From 62d6eb2881d6c9d46bca72335322bd4921f62840 Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Mon, 24 Jun 2024 10:37:52 +0200 Subject: [PATCH 1/6] lama --- nevergrad/optimization/recastlib.py | 44565 ++++++++++++++++++++++++++ 1 file changed, 44565 insertions(+) diff --git a/nevergrad/optimization/recastlib.py b/nevergrad/optimization/recastlib.py index 3b5c5bb3b..fc807145f 100644 --- a/nevergrad/optimization/recastlib.py +++ b/nevergrad/optimization/recastlib.py @@ -62,6 +62,7 @@ def __init__( "Powell", ] or "NLOPT" in method + or "LLAMA" in method or "DS" in method or "BFGS" in method ), f"Unknown method '{method}'" @@ -115,6 +116,17 @@ def ax_obj(p): if res.f < best_res: best_res = res.f best_x = res.x + elif weakself.method[:5] == "LLAMA": + method_name = weakself.method[5:] + + def five_objective_function(x): + return objective_function(10.0 * x - 5.0) + + val, best_x = lama_register[method_name](budget)(five_objective_function) + best_x = 10.0 * best_x - 5.0 + if weakself._normalizer is not None: + best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32)) + elif weakself.method[:2] == "DS": import directsearch # type: ignore @@ -996,6 +1008,7 @@ def _evaluate(self, X, out, *args, **kwargs): DS3p = NonObjectOptimizer(method="DS3p").set_name("DS3p", register=True) DSsubspace = NonObjectOptimizer(method="DSsubspace").set_name("DSsubspace", register=True) DSproba = NonObjectOptimizer(method="DSproba").set_name("DSproba", register=True) + # DSproba2 = NonObjectOptimizer(method="PDS2").set_name("DSproba2", register=True) # DSproba3 = NonObjectOptimizer(method="PDS3").set_name("DSproba3", register=True) # DSproba4 = NonObjectOptimizer(method="PDS4").set_name("DSproba4", register=True) @@ -1004,3 +1017,44555 @@ def _evaluate(self, X, out, *args, **kwargs): # DSproba7 = NonObjectOptimizer(method="PDS7").set_name("DSproba7", register=True) # DSproba8 = NonObjectOptimizer(method="PDS8").set_name("DSproba8", register=True) # DSproba9 = NonObjectOptimizer(method="PDS9").set_name("DSproba9", register=True) + +# from nevergrad.optimization.lama.StrategicDifferentialEvolution import StrategicDifferentialEvolution +# lama_register["StrategicDifferentialEvolution"] = StrategicDifferentialEvolution +# LLAMAStrategicDifferentialEvolution = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution").set_name("LLAMAStrategicDifferentialEvolution", register=True) + + +###### LLAMA ####### +lama_register = {} + +try: + from nevergrad.optimization.lama.AADCCS import AADCCS + + lama_register["AADCCS"] = AADCCS + LLAMAAADCCS = NonObjectOptimizer(method="LLAMAAADCCS").set_name("LLAMAAADCCS", register=True) +except Exception as e: + print("AADCCS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AADEHLS import AADEHLS + + lama_register["AADEHLS"] = AADEHLS + LLAMAAADEHLS = NonObjectOptimizer(method="LLAMAAADEHLS").set_name("LLAMAAADEHLS", register=True) +except Exception as e: + print("AADEHLS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AADMEM import AADMEM + + lama_register["AADMEM"] = AADMEM + LLAMAAADMEM = NonObjectOptimizer(method="LLAMAAADMEM").set_name("LLAMAAADMEM", register=True) +except Exception as e: + print("AADMEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AAES import AAES + + lama_register["AAES"] = AAES + LLAMAAAES = NonObjectOptimizer(method="LLAMAAAES").set_name("LLAMAAAES", register=True) +except Exception as e: + print("AAES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ACDE import ACDE + + lama_register["ACDE"] = ACDE + LLAMAACDE = NonObjectOptimizer(method="LLAMAACDE").set_name("LLAMAACDE", register=True) +except Exception as e: + print("ACDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ACMDEOBD import ACMDEOBD + + lama_register["ACMDEOBD"] = ACMDEOBD + LLAMAACMDEOBD = NonObjectOptimizer(method="LLAMAACMDEOBD").set_name("LLAMAACMDEOBD", register=True) +except Exception as e: + print("ACMDEOBD can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADAEDA import ADAEDA + + lama_register["ADAEDA"] = ADAEDA + LLAMAADAEDA = NonObjectOptimizer(method="LLAMAADAEDA").set_name("LLAMAADAEDA", register=True) +except Exception as e: + print("ADAEDA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADCE import ADCE + + lama_register["ADCE"] = ADCE + LLAMAADCE = NonObjectOptimizer(method="LLAMAADCE").set_name("LLAMAADCE", register=True) +except Exception as e: + print("ADCE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEA import ADEA + + lama_register["ADEA"] = ADEA + LLAMAADEA = NonObjectOptimizer(method="LLAMAADEA").set_name("LLAMAADEA", register=True) +except Exception as e: + print("ADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEAS import ADEAS + + lama_register["ADEAS"] = ADEAS + LLAMAADEAS = NonObjectOptimizer(method="LLAMAADEAS").set_name("LLAMAADEAS", register=True) +except Exception as e: + print("ADEAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADECMS import ADECMS + + lama_register["ADECMS"] = ADECMS + LLAMAADECMS = NonObjectOptimizer(method="LLAMAADECMS").set_name("LLAMAADECMS", register=True) +except Exception as e: + print("ADECMS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEDCA import ADEDCA + + lama_register["ADEDCA"] = ADEDCA + LLAMAADEDCA = NonObjectOptimizer(method="LLAMAADEDCA").set_name("LLAMAADEDCA", register=True) +except Exception as e: + print("ADEDCA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEDE import ADEDE + + lama_register["ADEDE"] = ADEDE + LLAMAADEDE = NonObjectOptimizer(method="LLAMAADEDE").set_name("LLAMAADEDE", register=True) +except Exception as e: + print("ADEDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEDLR import ADEDLR + + lama_register["ADEDLR"] = ADEDLR + LLAMAADEDLR = NonObjectOptimizer(method="LLAMAADEDLR").set_name("LLAMAADEDLR", register=True) +except Exception as e: + print("ADEDLR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEDM import ADEDM + + lama_register["ADEDM"] = ADEDM + LLAMAADEDM = NonObjectOptimizer(method="LLAMAADEDM").set_name("LLAMAADEDM", register=True) +except Exception as e: + print("ADEDM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEEM import ADEEM + + lama_register["ADEEM"] = ADEEM + LLAMAADEEM = NonObjectOptimizer(method="LLAMAADEEM").set_name("LLAMAADEEM", register=True) +except Exception as e: + print("ADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEGE import ADEGE + + lama_register["ADEGE"] = ADEGE + LLAMAADEGE = NonObjectOptimizer(method="LLAMAADEGE").set_name("LLAMAADEGE", register=True) +except Exception as e: + print("ADEGE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEGM import ADEGM + + lama_register["ADEGM"] = ADEGM + LLAMAADEGM = NonObjectOptimizer(method="LLAMAADEGM").set_name("LLAMAADEGM", register=True) +except Exception as e: + print("ADEGM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEGS import ADEGS + + lama_register["ADEGS"] = ADEGS + LLAMAADEGS = NonObjectOptimizer(method="LLAMAADEGS").set_name("LLAMAADEGS", register=True) +except Exception as e: + print("ADEGS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEM import ADEM + + lama_register["ADEM"] = ADEM + LLAMAADEM = NonObjectOptimizer(method="LLAMAADEM").set_name("LLAMAADEM", register=True) +except Exception as e: + print("ADEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEMSC import ADEMSC + + lama_register["ADEMSC"] = ADEMSC + LLAMAADEMSC = NonObjectOptimizer(method="LLAMAADEMSC").set_name("LLAMAADEMSC", register=True) +except Exception as e: + print("ADEMSC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEPF import ADEPF + + lama_register["ADEPF"] = ADEPF + LLAMAADEPF = NonObjectOptimizer(method="LLAMAADEPF").set_name("LLAMAADEPF", register=True) +except Exception as e: + print("ADEPF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEPM import ADEPM + + lama_register["ADEPM"] = ADEPM + LLAMAADEPM = NonObjectOptimizer(method="LLAMAADEPM").set_name("LLAMAADEPM", register=True) +except Exception as e: + print("ADEPM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEPMC import ADEPMC + + lama_register["ADEPMC"] = ADEPMC + LLAMAADEPMC = NonObjectOptimizer(method="LLAMAADEPMC").set_name("LLAMAADEPMC", register=True) +except Exception as e: + print("ADEPMC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEPMI import ADEPMI + + lama_register["ADEPMI"] = ADEPMI + LLAMAADEPMI = NonObjectOptimizer(method="LLAMAADEPMI").set_name("LLAMAADEPMI", register=True) +except Exception as e: + print("ADEPMI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADEPR import ADEPR + + lama_register["ADEPR"] = ADEPR + LLAMAADEPR = NonObjectOptimizer(method="LLAMAADEPR").set_name("LLAMAADEPR", register=True) +except Exception as e: + print("ADEPR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADES import ADES + + lama_register["ADES"] = ADES + LLAMAADES = NonObjectOptimizer(method="LLAMAADES").set_name("LLAMAADES", register=True) +except Exception as e: + print("ADES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADESA import ADESA + + lama_register["ADESA"] = ADESA + LLAMAADESA = NonObjectOptimizer(method="LLAMAADESA").set_name("LLAMAADESA", register=True) +except Exception as e: + print("ADESA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADE_FPC import ADE_FPC + + lama_register["ADE_FPC"] = ADE_FPC + LLAMAADE_FPC = NonObjectOptimizer(method="LLAMAADE_FPC").set_name("LLAMAADE_FPC", register=True) +except Exception as e: + print("ADE_FPC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADGD import ADGD + + lama_register["ADGD"] = ADGD + LLAMAADGD = NonObjectOptimizer(method="LLAMAADGD").set_name("LLAMAADGD", register=True) +except Exception as e: + print("ADGD can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADGE import ADGE + + lama_register["ADGE"] = ADGE + LLAMAADGE = NonObjectOptimizer(method="LLAMAADGE").set_name("LLAMAADGE", register=True) +except Exception as e: + print("ADGE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADMDE import ADMDE + + lama_register["ADMDE"] = ADMDE + LLAMAADMDE = NonObjectOptimizer(method="LLAMAADMDE").set_name("LLAMAADMDE", register=True) +except Exception as e: + print("ADMDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADMEMS import ADMEMS + + lama_register["ADMEMS"] = ADMEMS + LLAMAADMEMS = NonObjectOptimizer(method="LLAMAADMEMS").set_name("LLAMAADMEMS", register=True) +except Exception as e: + print("ADMEMS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADSDiffEvo import ADSDiffEvo + + lama_register["ADSDiffEvo"] = ADSDiffEvo + LLAMAADSDiffEvo = NonObjectOptimizer(method="LLAMAADSDiffEvo").set_name("LLAMAADSDiffEvo", register=True) +except Exception as e: + print("ADSDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADSEA import ADSEA + + lama_register["ADSEA"] = ADSEA + LLAMAADSEA = NonObjectOptimizer(method="LLAMAADSEA").set_name("LLAMAADSEA", register=True) +except Exception as e: + print("ADSEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ADSEAPlus import ADSEAPlus + + lama_register["ADSEAPlus"] = ADSEAPlus + LLAMAADSEAPlus = NonObjectOptimizer(method="LLAMAADSEAPlus").set_name("LLAMAADSEAPlus", register=True) +except Exception as e: + print("ADSEAPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGBES import AGBES + + lama_register["AGBES"] = AGBES + LLAMAAGBES = NonObjectOptimizer(method="LLAMAAGBES").set_name("LLAMAAGBES", register=True) +except Exception as e: + print("AGBES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGCES import AGCES + + lama_register["AGCES"] = AGCES + LLAMAAGCES = NonObjectOptimizer(method="LLAMAAGCES").set_name("LLAMAAGCES", register=True) +except Exception as e: + print("AGCES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGDE import AGDE + + lama_register["AGDE"] = AGDE + LLAMAAGDE = NonObjectOptimizer(method="LLAMAAGDE").set_name("LLAMAAGDE", register=True) +except Exception as e: + print("AGDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGDELS import AGDELS + + lama_register["AGDELS"] = AGDELS + LLAMAAGDELS = NonObjectOptimizer(method="LLAMAAGDELS").set_name("LLAMAAGDELS", register=True) +except Exception as e: + print("AGDELS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGDiffEvo import AGDiffEvo + + lama_register["AGDiffEvo"] = AGDiffEvo + LLAMAAGDiffEvo = NonObjectOptimizer(method="LLAMAAGDiffEvo").set_name("LLAMAAGDiffEvo", register=True) +except Exception as e: + print("AGDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGEA import AGEA + + lama_register["AGEA"] = AGEA + LLAMAAGEA = NonObjectOptimizer(method="LLAMAAGEA").set_name("LLAMAAGEA", register=True) +except Exception as e: + print("AGEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGESA import AGESA + + lama_register["AGESA"] = AGESA + LLAMAAGESA = NonObjectOptimizer(method="LLAMAAGESA").set_name("LLAMAAGESA", register=True) +except Exception as e: + print("AGESA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGGE import AGGE + + lama_register["AGGE"] = AGGE + LLAMAAGGE = NonObjectOptimizer(method="LLAMAAGGE").set_name("LLAMAAGGE", register=True) +except Exception as e: + print("AGGE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGGES import AGGES + + lama_register["AGGES"] = AGGES + LLAMAAGGES = NonObjectOptimizer(method="LLAMAAGGES").set_name("LLAMAAGGES", register=True) +except Exception as e: + print("AGGES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AGIDE import AGIDE + + lama_register["AGIDE"] = AGIDE + LLAMAAGIDE = NonObjectOptimizer(method="LLAMAAGIDE").set_name("LLAMAAGIDE", register=True) +except Exception as e: + print("AGIDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AHDEMI import AHDEMI + + lama_register["AHDEMI"] = AHDEMI + LLAMAAHDEMI = NonObjectOptimizer(method="LLAMAAHDEMI").set_name("LLAMAAHDEMI", register=True) +except Exception as e: + print("AHDEMI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ALDEEM import ALDEEM + + lama_register["ALDEEM"] = ALDEEM + LLAMAALDEEM = NonObjectOptimizer(method="LLAMAALDEEM").set_name("LLAMAALDEEM", register=True) +except Exception as e: + print("ALDEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ALES import ALES + + lama_register["ALES"] = ALES + LLAMAALES = NonObjectOptimizer(method="LLAMAALES").set_name("LLAMAALES", register=True) +except Exception as e: + print("ALES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ALSS import ALSS + + lama_register["ALSS"] = ALSS + LLAMAALSS = NonObjectOptimizer(method="LLAMAALSS").set_name("LLAMAALSS", register=True) +except Exception as e: + print("ALSS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AMDE import AMDE + + lama_register["AMDE"] = AMDE + LLAMAAMDE = NonObjectOptimizer(method="LLAMAAMDE").set_name("LLAMAAMDE", register=True) +except Exception as e: + print("AMDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AMES import AMES + + lama_register["AMES"] = AMES + LLAMAAMES = NonObjectOptimizer(method="LLAMAAMES").set_name("LLAMAAMES", register=True) +except Exception as e: + print("AMES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AMSDiffEvo import AMSDiffEvo + + lama_register["AMSDiffEvo"] = AMSDiffEvo + LLAMAAMSDiffEvo = NonObjectOptimizer(method="LLAMAAMSDiffEvo").set_name("LLAMAAMSDiffEvo", register=True) +except Exception as e: + print("AMSDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AMSEA import AMSEA + + lama_register["AMSEA"] = AMSEA + LLAMAAMSEA = NonObjectOptimizer(method="LLAMAAMSEA").set_name("LLAMAAMSEA", register=True) +except Exception as e: + print("AMSEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AN_MDEPSO import AN_MDEPSO + + lama_register["AN_MDEPSO"] = AN_MDEPSO + LLAMAAN_MDEPSO = NonObjectOptimizer(method="LLAMAAN_MDEPSO").set_name("LLAMAAN_MDEPSO", register=True) +except Exception as e: + print("AN_MDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.APBES import APBES + + lama_register["APBES"] = APBES + LLAMAAPBES = NonObjectOptimizer(method="LLAMAAPBES").set_name("LLAMAAPBES", register=True) +except Exception as e: + print("APBES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.APDE import APDE + + lama_register["APDE"] = APDE + LLAMAAPDE = NonObjectOptimizer(method="LLAMAAPDE").set_name("LLAMAAPDE", register=True) +except Exception as e: + print("APDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.APDETL import APDETL + + lama_register["APDETL"] = APDETL + LLAMAAPDETL = NonObjectOptimizer(method="LLAMAAPDETL").set_name("LLAMAAPDETL", register=True) +except Exception as e: + print("APDETL can not be imported: ", e) + +try: + from nevergrad.optimization.lama.APES import APES + + lama_register["APES"] = APES + LLAMAAPES = NonObjectOptimizer(method="LLAMAAPES").set_name("LLAMAAPES", register=True) +except Exception as e: + print("APES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AQAPSO_LS_DIW import AQAPSO_LS_DIW + + lama_register["AQAPSO_LS_DIW"] = AQAPSO_LS_DIW + LLAMAAQAPSO_LS_DIW = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW").set_name( + "LLAMAAQAPSO_LS_DIW", register=True + ) +except Exception as e: + print("AQAPSO_LS_DIW can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AQAPSO_LS_DIW_AP import AQAPSO_LS_DIW_AP + + lama_register["AQAPSO_LS_DIW_AP"] = AQAPSO_LS_DIW_AP + LLAMAAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP").set_name( + "LLAMAAQAPSO_LS_DIW_AP", register=True + ) +except Exception as e: + print("AQAPSO_LS_DIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ARDLS import ARDLS + + lama_register["ARDLS"] = ARDLS + LLAMAARDLS = NonObjectOptimizer(method="LLAMAARDLS").set_name("LLAMAARDLS", register=True) +except Exception as e: + print("ARDLS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ARESM import ARESM + + lama_register["ARESM"] = ARESM + LLAMAARESM = NonObjectOptimizer(method="LLAMAARESM").set_name("LLAMAARESM", register=True) +except Exception as e: + print("ARESM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ARISA import ARISA + + lama_register["ARISA"] = ARISA + LLAMAARISA = NonObjectOptimizer(method="LLAMAARISA").set_name("LLAMAARISA", register=True) +except Exception as e: + print("ARISA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ASADEA import ASADEA + + lama_register["ASADEA"] = ASADEA + LLAMAASADEA = NonObjectOptimizer(method="LLAMAASADEA").set_name("LLAMAASADEA", register=True) +except Exception as e: + print("ASADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ASO import ASO + + lama_register["ASO"] = ASO + LLAMAASO = NonObjectOptimizer(method="LLAMAASO").set_name("LLAMAASO", register=True) +except Exception as e: + print("ASO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AVDE import AVDE + + lama_register["AVDE"] = AVDE + LLAMAAVDE = NonObjectOptimizer(method="LLAMAAVDE").set_name("LLAMAAVDE", register=True) +except Exception as e: + print("AVDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AcceleratedAdaptivePrecisionCrossoverEvolution import ( + AcceleratedAdaptivePrecisionCrossoverEvolution, + ) + + lama_register["AcceleratedAdaptivePrecisionCrossoverEvolution"] = ( + AcceleratedAdaptivePrecisionCrossoverEvolution + ) + LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution" + ).set_name("LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution", register=True) +except Exception as e: + print("AcceleratedAdaptivePrecisionCrossoverEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveAnnealingDifferentialEvolution import ( + AdaptiveAnnealingDifferentialEvolution, + ) + + lama_register["AdaptiveAnnealingDifferentialEvolution"] = AdaptiveAnnealingDifferentialEvolution + LLAMAAdaptiveAnnealingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveAnnealingDifferentialEvolution" + ).set_name("LLAMAAdaptiveAnnealingDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveAnnealingDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveArchiveDE import AdaptiveArchiveDE + + lama_register["AdaptiveArchiveDE"] = AdaptiveArchiveDE + LLAMAAdaptiveArchiveDE = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE").set_name( + "LLAMAAdaptiveArchiveDE", register=True + ) +except Exception as e: + print("AdaptiveArchiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCMADiffEvoPSO import AdaptiveCMADiffEvoPSO + + lama_register["AdaptiveCMADiffEvoPSO"] = AdaptiveCMADiffEvoPSO + LLAMAAdaptiveCMADiffEvoPSO = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO").set_name( + "LLAMAAdaptiveCMADiffEvoPSO", register=True + ) +except Exception as e: + print("AdaptiveCMADiffEvoPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveChaoticFireworksOptimization import ( + AdaptiveChaoticFireworksOptimization, + ) + + lama_register["AdaptiveChaoticFireworksOptimization"] = AdaptiveChaoticFireworksOptimization + LLAMAAdaptiveChaoticFireworksOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveChaoticFireworksOptimization" + ).set_name("LLAMAAdaptiveChaoticFireworksOptimization", register=True) +except Exception as e: + print("AdaptiveChaoticFireworksOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveClusterBasedHybridOptimization import ( + AdaptiveClusterBasedHybridOptimization, + ) + + lama_register["AdaptiveClusterBasedHybridOptimization"] = AdaptiveClusterBasedHybridOptimization + LLAMAAdaptiveClusterBasedHybridOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveClusterBasedHybridOptimization" + ).set_name("LLAMAAdaptiveClusterBasedHybridOptimization", register=True) +except Exception as e: + print("AdaptiveClusterBasedHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveClusterHybridOptimizationV5 import ( + AdaptiveClusterHybridOptimizationV5, + ) + + lama_register["AdaptiveClusterHybridOptimizationV5"] = AdaptiveClusterHybridOptimizationV5 + LLAMAAdaptiveClusterHybridOptimizationV5 = NonObjectOptimizer( + method="LLAMAAdaptiveClusterHybridOptimizationV5" + ).set_name("LLAMAAdaptiveClusterHybridOptimizationV5", register=True) +except Exception as e: + print("AdaptiveClusterHybridOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveClusteredDifferentialEvolutionV2 import ( + AdaptiveClusteredDifferentialEvolutionV2, + ) + + lama_register["AdaptiveClusteredDifferentialEvolutionV2"] = AdaptiveClusteredDifferentialEvolutionV2 + LLAMAAdaptiveClusteredDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveClusteredDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveClusteredDifferentialEvolutionV2", register=True) +except Exception as e: + print("AdaptiveClusteredDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCohortHarmonizationOptimization import ( + AdaptiveCohortHarmonizationOptimization, + ) + + lama_register["AdaptiveCohortHarmonizationOptimization"] = AdaptiveCohortHarmonizationOptimization + LLAMAAdaptiveCohortHarmonizationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveCohortHarmonizationOptimization" + ).set_name("LLAMAAdaptiveCohortHarmonizationOptimization", register=True) +except Exception as e: + print("AdaptiveCohortHarmonizationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCohortMemeticAlgorithm import AdaptiveCohortMemeticAlgorithm + + lama_register["AdaptiveCohortMemeticAlgorithm"] = AdaptiveCohortMemeticAlgorithm + LLAMAAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCohortMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCohortMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveCohortMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveControlledMemoryAnnealing import ( + AdaptiveControlledMemoryAnnealing, + ) + + lama_register["AdaptiveControlledMemoryAnnealing"] = AdaptiveControlledMemoryAnnealing + LLAMAAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveControlledMemoryAnnealing" + ).set_name("LLAMAAdaptiveControlledMemoryAnnealing", register=True) +except Exception as e: + print("AdaptiveControlledMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialEvolution import ( + AdaptiveCooperativeDifferentialEvolution, + ) + + lama_register["AdaptiveCooperativeDifferentialEvolution"] = AdaptiveCooperativeDifferentialEvolution + LLAMAAdaptiveCooperativeDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCooperativeDifferentialEvolution" + ).set_name("LLAMAAdaptiveCooperativeDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveCooperativeDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialMemeticAlgorithm import ( + AdaptiveCooperativeDifferentialMemeticAlgorithm, + ) + + lama_register["AdaptiveCooperativeDifferentialMemeticAlgorithm"] = ( + AdaptiveCooperativeDifferentialMemeticAlgorithm + ) + LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveCooperativeDifferentialMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceGradientSearch import AdaptiveCovarianceGradientSearch + + lama_register["AdaptiveCovarianceGradientSearch"] = AdaptiveCovarianceGradientSearch + LLAMAAdaptiveCovarianceGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceGradientSearch" + ).set_name("LLAMAAdaptiveCovarianceGradientSearch", register=True) +except Exception as e: + print("AdaptiveCovarianceGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolution import ( + AdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["AdaptiveCovarianceMatrixDifferentialEvolution"] = ( + AdaptiveCovarianceMatrixDifferentialEvolution + ) + LLAMAAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching import ( + AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching, + ) + + lama_register["AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching"] = ( + AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching + ) + LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching" + ).set_name( + "LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching", register=True + ) +except Exception as e: + print( + "AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolution import ( + AdaptiveCovarianceMatrixEvolution, + ) + + lama_register["AdaptiveCovarianceMatrixEvolution"] = AdaptiveCovarianceMatrixEvolution + LLAMAAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionStrategy import ( + AdaptiveCovarianceMatrixEvolutionStrategy, + ) + + lama_register["AdaptiveCovarianceMatrixEvolutionStrategy"] = AdaptiveCovarianceMatrixEvolutionStrategy + LLAMAAdaptiveCovarianceMatrixEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation import ( + AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation, + ) + + lama_register["AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation"] = ( + AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation + ) + LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptation import ( + AdaptiveCovarianceMatrixSelfAdaptation, + ) + + lama_register["AdaptiveCovarianceMatrixSelfAdaptation"] = AdaptiveCovarianceMatrixSelfAdaptation + LLAMAAdaptiveCovarianceMatrixSelfAdaptation = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation" + ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptation", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixSelfAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptationV2 import ( + AdaptiveCovarianceMatrixSelfAdaptationV2, + ) + + lama_register["AdaptiveCovarianceMatrixSelfAdaptationV2"] = AdaptiveCovarianceMatrixSelfAdaptationV2 + LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2" + ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2", register=True) +except Exception as e: + print("AdaptiveCovarianceMatrixSelfAdaptationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCrossoverDEPSO import AdaptiveCrossoverDEPSO + + lama_register["AdaptiveCrossoverDEPSO"] = AdaptiveCrossoverDEPSO + LLAMAAdaptiveCrossoverDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO").set_name( + "LLAMAAdaptiveCrossoverDEPSO", register=True + ) +except Exception as e: + print("AdaptiveCrossoverDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCrossoverElitistStrategyV6 import ( + AdaptiveCrossoverElitistStrategyV6, + ) + + lama_register["AdaptiveCrossoverElitistStrategyV6"] = AdaptiveCrossoverElitistStrategyV6 + LLAMAAdaptiveCrossoverElitistStrategyV6 = NonObjectOptimizer( + method="LLAMAAdaptiveCrossoverElitistStrategyV6" + ).set_name("LLAMAAdaptiveCrossoverElitistStrategyV6", register=True) +except Exception as e: + print("AdaptiveCrossoverElitistStrategyV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCrossoverSearch import AdaptiveCrossoverSearch + + lama_register["AdaptiveCrossoverSearch"] = AdaptiveCrossoverSearch + LLAMAAdaptiveCrossoverSearch = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch").set_name( + "LLAMAAdaptiveCrossoverSearch", register=True + ) +except Exception as e: + print("AdaptiveCrossoverSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalCooperativeSearch import ( + AdaptiveCulturalCooperativeSearch, + ) + + lama_register["AdaptiveCulturalCooperativeSearch"] = AdaptiveCulturalCooperativeSearch + LLAMAAdaptiveCulturalCooperativeSearch = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalCooperativeSearch" + ).set_name("LLAMAAdaptiveCulturalCooperativeSearch", register=True) +except Exception as e: + print("AdaptiveCulturalCooperativeSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialEvolution import ( + AdaptiveCulturalDifferentialEvolution, + ) + + lama_register["AdaptiveCulturalDifferentialEvolution"] = AdaptiveCulturalDifferentialEvolution + LLAMAAdaptiveCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalDifferentialEvolution" + ).set_name("LLAMAAdaptiveCulturalDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveCulturalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialMemeticEvolution import ( + AdaptiveCulturalDifferentialMemeticEvolution, + ) + + lama_register["AdaptiveCulturalDifferentialMemeticEvolution"] = ( + AdaptiveCulturalDifferentialMemeticEvolution + ) + LLAMAAdaptiveCulturalDifferentialMemeticEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution" + ).set_name("LLAMAAdaptiveCulturalDifferentialMemeticEvolution", register=True) +except Exception as e: + print("AdaptiveCulturalDifferentialMemeticEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionStrategy import ( + AdaptiveCulturalEvolutionStrategy, + ) + + lama_register["AdaptiveCulturalEvolutionStrategy"] = AdaptiveCulturalEvolutionStrategy + LLAMAAdaptiveCulturalEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalEvolutionStrategy" + ).set_name("LLAMAAdaptiveCulturalEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveCulturalEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionaryAlgorithm import ( + AdaptiveCulturalEvolutionaryAlgorithm, + ) + + lama_register["AdaptiveCulturalEvolutionaryAlgorithm"] = AdaptiveCulturalEvolutionaryAlgorithm + LLAMAAdaptiveCulturalEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveCulturalEvolutionaryAlgorithm", register=True) +except Exception as e: + print("AdaptiveCulturalEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalMemeticAlgorithm import AdaptiveCulturalMemeticAlgorithm + + lama_register["AdaptiveCulturalMemeticAlgorithm"] = AdaptiveCulturalMemeticAlgorithm + LLAMAAdaptiveCulturalMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCulturalMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveCulturalMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveCulturalMemeticDifferentialEvolution import ( + AdaptiveCulturalMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveCulturalMemeticDifferentialEvolution"] = ( + AdaptiveCulturalMemeticDifferentialEvolution + ) + LLAMAAdaptiveCulturalMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveCulturalMemeticDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveCulturalMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDEPSOOptimizer import AdaptiveDEPSOOptimizer + + lama_register["AdaptiveDEPSOOptimizer"] = AdaptiveDEPSOOptimizer + LLAMAAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer").set_name( + "LLAMAAdaptiveDEPSOOptimizer", register=True + ) +except Exception as e: + print("AdaptiveDEPSOOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDEWithElitismAndLocalSearch import ( + AdaptiveDEWithElitismAndLocalSearch, + ) + + lama_register["AdaptiveDEWithElitismAndLocalSearch"] = AdaptiveDEWithElitismAndLocalSearch + LLAMAAdaptiveDEWithElitismAndLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDEWithElitismAndLocalSearch" + ).set_name("LLAMAAdaptiveDEWithElitismAndLocalSearch", register=True) +except Exception as e: + print("AdaptiveDEWithElitismAndLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDEWithOrthogonalCrossover import ( + AdaptiveDEWithOrthogonalCrossover, + ) + + lama_register["AdaptiveDEWithOrthogonalCrossover"] = AdaptiveDEWithOrthogonalCrossover + LLAMAAdaptiveDEWithOrthogonalCrossover = NonObjectOptimizer( + method="LLAMAAdaptiveDEWithOrthogonalCrossover" + ).set_name("LLAMAAdaptiveDEWithOrthogonalCrossover", register=True) +except Exception as e: + print("AdaptiveDEWithOrthogonalCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDecayOptimizer import AdaptiveDecayOptimizer + + lama_register["AdaptiveDecayOptimizer"] = AdaptiveDecayOptimizer + LLAMAAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer").set_name( + "LLAMAAdaptiveDecayOptimizer", register=True + ) +except Exception as e: + print("AdaptiveDecayOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialCrossover import AdaptiveDifferentialCrossover + + lama_register["AdaptiveDifferentialCrossover"] = AdaptiveDifferentialCrossover + LLAMAAdaptiveDifferentialCrossover = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialCrossover" + ).set_name("LLAMAAdaptiveDifferentialCrossover", register=True) +except Exception as e: + print("AdaptiveDifferentialCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolution import AdaptiveDifferentialEvolution + + lama_register["AdaptiveDifferentialEvolution"] = AdaptiveDifferentialEvolution + LLAMAAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolution" + ).set_name("LLAMAAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionHarmonySearch import ( + AdaptiveDifferentialEvolutionHarmonySearch, + ) + + lama_register["AdaptiveDifferentialEvolutionHarmonySearch"] = AdaptiveDifferentialEvolutionHarmonySearch + LLAMAAdaptiveDifferentialEvolutionHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionHarmonySearch", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionOptimizer import ( + AdaptiveDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveDifferentialEvolutionOptimizer"] = AdaptiveDifferentialEvolutionOptimizer + LLAMAAdaptiveDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPSO import AdaptiveDifferentialEvolutionPSO + + lama_register["AdaptiveDifferentialEvolutionPSO"] = AdaptiveDifferentialEvolutionPSO + LLAMAAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionPSO" + ).set_name("LLAMAAdaptiveDifferentialEvolutionPSO", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPlus import ( + AdaptiveDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveDifferentialEvolutionPlus"] = AdaptiveDifferentialEvolutionPlus + LLAMAAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveDifferentialEvolutionPlus", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( + AdaptiveDifferentialEvolutionWithAdaptivePerturbation, + ) + + lama_register["AdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( + AdaptiveDifferentialEvolutionWithAdaptivePerturbation + ) + LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( + AdaptiveDifferentialEvolutionWithBayesianLocalSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( + AdaptiveDifferentialEvolutionWithBayesianLocalSearch + ) + LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation import ( + AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation, + ) + + lama_register["AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation"] = ( + AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation + ) + LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( + AdaptiveDifferentialEvolutionWithDynamicPopulationV2, + ) + + lama_register["AdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( + AdaptiveDifferentialEvolutionWithDynamicPopulationV2 + ) + LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGradientBoost import ( + AdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["AdaptiveDifferentialEvolutionWithGradientBoost"] = ( + AdaptiveDifferentialEvolutionWithGradientBoost + ) + LLAMAAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGuidedSearch import ( + AdaptiveDifferentialEvolutionWithGuidedSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithGuidedSearch"] = ( + AdaptiveDifferentialEvolutionWithGuidedSearch + ) + LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithGuidedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithLocalSearch import ( + AdaptiveDifferentialEvolutionWithLocalSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithLocalSearch"] = ( + AdaptiveDifferentialEvolutionWithLocalSearch + ) + LLAMAAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithMemeticSearch import ( + AdaptiveDifferentialEvolutionWithMemeticSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithMemeticSearch"] = ( + AdaptiveDifferentialEvolutionWithMemeticSearch + ) + LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithSurrogateAssistance import ( + AdaptiveDifferentialEvolutionWithSurrogateAssistance, + ) + + lama_register["AdaptiveDifferentialEvolutionWithSurrogateAssistance"] = ( + AdaptiveDifferentialEvolutionWithSurrogateAssistance + ) + LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance", register=True) +except Exception as e: + print("AdaptiveDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialHarmonySearch import ( + AdaptiveDifferentialHarmonySearch, + ) + + lama_register["AdaptiveDifferentialHarmonySearch"] = AdaptiveDifferentialHarmonySearch + LLAMAAdaptiveDifferentialHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialHarmonySearch" + ).set_name("LLAMAAdaptiveDifferentialHarmonySearch", register=True) +except Exception as e: + print("AdaptiveDifferentialHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialMemeticAlgorithm import ( + AdaptiveDifferentialMemeticAlgorithm, + ) + + lama_register["AdaptiveDifferentialMemeticAlgorithm"] = AdaptiveDifferentialMemeticAlgorithm + LLAMAAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialMemeticAlgorithm" + ).set_name("LLAMAAdaptiveDifferentialMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumEvolution import ( + AdaptiveDifferentialQuantumEvolution, + ) + + lama_register["AdaptiveDifferentialQuantumEvolution"] = AdaptiveDifferentialQuantumEvolution + LLAMAAdaptiveDifferentialQuantumEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialQuantumEvolution" + ).set_name("LLAMAAdaptiveDifferentialQuantumEvolution", register=True) +except Exception as e: + print("AdaptiveDifferentialQuantumEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumMetaheuristic import ( + AdaptiveDifferentialQuantumMetaheuristic, + ) + + lama_register["AdaptiveDifferentialQuantumMetaheuristic"] = AdaptiveDifferentialQuantumMetaheuristic + LLAMAAdaptiveDifferentialQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialQuantumMetaheuristic" + ).set_name("LLAMAAdaptiveDifferentialQuantumMetaheuristic", register=True) +except Exception as e: + print("AdaptiveDifferentialQuantumMetaheuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDifferentialSpiralSearch import AdaptiveDifferentialSpiralSearch + + lama_register["AdaptiveDifferentialSpiralSearch"] = AdaptiveDifferentialSpiralSearch + LLAMAAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMAAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: + print("AdaptiveDifferentialSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDimensionalClimbingEvolutionStrategy import ( + AdaptiveDimensionalClimbingEvolutionStrategy, + ) + + lama_register["AdaptiveDimensionalClimbingEvolutionStrategy"] = ( + AdaptiveDimensionalClimbingEvolutionStrategy + ) + LLAMAAdaptiveDimensionalClimbingEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy" + ).set_name("LLAMAAdaptiveDimensionalClimbingEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveDimensionalClimbingEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDimensionalCrossoverEvolver import ( + AdaptiveDimensionalCrossoverEvolver, + ) + + lama_register["AdaptiveDimensionalCrossoverEvolver"] = AdaptiveDimensionalCrossoverEvolver + LLAMAAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( + method="LLAMAAdaptiveDimensionalCrossoverEvolver" + ).set_name("LLAMAAdaptiveDimensionalCrossoverEvolver", register=True) +except Exception as e: + print("AdaptiveDimensionalCrossoverEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDirectionalBiasQuorumOptimization import ( + AdaptiveDirectionalBiasQuorumOptimization, + ) + + lama_register["AdaptiveDirectionalBiasQuorumOptimization"] = AdaptiveDirectionalBiasQuorumOptimization + LLAMAAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMAAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: + print("AdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDirectionalSearch import AdaptiveDirectionalSearch + + lama_register["AdaptiveDirectionalSearch"] = AdaptiveDirectionalSearch + LLAMAAdaptiveDirectionalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch").set_name( + "LLAMAAdaptiveDirectionalSearch", register=True + ) +except Exception as e: + print("AdaptiveDirectionalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDivergenceClusteringSearch import ( + AdaptiveDivergenceClusteringSearch, + ) + + lama_register["AdaptiveDivergenceClusteringSearch"] = AdaptiveDivergenceClusteringSearch + LLAMAAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDivergenceClusteringSearch" + ).set_name("LLAMAAdaptiveDivergenceClusteringSearch", register=True) +except Exception as e: + print("AdaptiveDivergenceClusteringSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiverseHybridOptimizer import AdaptiveDiverseHybridOptimizer + + lama_register["AdaptiveDiverseHybridOptimizer"] = AdaptiveDiverseHybridOptimizer + LLAMAAdaptiveDiverseHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDiverseHybridOptimizer" + ).set_name("LLAMAAdaptiveDiverseHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveDiverseHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversifiedEvolutionStrategy import ( + AdaptiveDiversifiedEvolutionStrategy, + ) + + lama_register["AdaptiveDiversifiedEvolutionStrategy"] = AdaptiveDiversifiedEvolutionStrategy + LLAMAAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedEvolutionStrategy" + ).set_name("LLAMAAdaptiveDiversifiedEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearch import AdaptiveDiversifiedHarmonySearch + + lama_register["AdaptiveDiversifiedHarmonySearch"] = AdaptiveDiversifiedHarmonySearch + LLAMAAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedHarmonySearch" + ).set_name("LLAMAAdaptiveDiversifiedHarmonySearch", register=True) +except Exception as e: + print("AdaptiveDiversifiedHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearchOptimizer import ( + AdaptiveDiversifiedHarmonySearchOptimizer, + ) + + lama_register["AdaptiveDiversifiedHarmonySearchOptimizer"] = AdaptiveDiversifiedHarmonySearchOptimizer + LLAMAAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAAdaptiveDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: + print("AdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversifiedSearch import AdaptiveDiversifiedSearch + + lama_register["AdaptiveDiversifiedSearch"] = AdaptiveDiversifiedSearch + LLAMAAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch").set_name( + "LLAMAAdaptiveDiversifiedSearch", register=True + ) +except Exception as e: + print("AdaptiveDiversifiedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialHybrid import ( + AdaptiveDiversityDifferentialHybrid, + ) + + lama_register["AdaptiveDiversityDifferentialHybrid"] = AdaptiveDiversityDifferentialHybrid + LLAMAAdaptiveDiversityDifferentialHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityDifferentialHybrid" + ).set_name("LLAMAAdaptiveDiversityDifferentialHybrid", register=True) +except Exception as e: + print("AdaptiveDiversityDifferentialHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialMemeticHybrid import ( + AdaptiveDiversityDifferentialMemeticHybrid, + ) + + lama_register["AdaptiveDiversityDifferentialMemeticHybrid"] = AdaptiveDiversityDifferentialMemeticHybrid + LLAMAAdaptiveDiversityDifferentialMemeticHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid" + ).set_name("LLAMAAdaptiveDiversityDifferentialMemeticHybrid", register=True) +except Exception as e: + print("AdaptiveDiversityDifferentialMemeticHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversityMaintainedDifferentialEvolution import ( + AdaptiveDiversityMaintainedDifferentialEvolution, + ) + + lama_register["AdaptiveDiversityMaintainedDifferentialEvolution"] = ( + AdaptiveDiversityMaintainedDifferentialEvolution + ) + LLAMAAdaptiveDiversityMaintainedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution" + ).set_name("LLAMAAdaptiveDiversityMaintainedDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveDiversityMaintainedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversityMaintainingGradientEvolution import ( + AdaptiveDiversityMaintainingGradientEvolution, + ) + + lama_register["AdaptiveDiversityMaintainingGradientEvolution"] = ( + AdaptiveDiversityMaintainingGradientEvolution + ) + LLAMAAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityMaintainingGradientEvolution" + ).set_name("LLAMAAdaptiveDiversityMaintainingGradientEvolution", register=True) +except Exception as e: + print("AdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDiversityPSO import AdaptiveDiversityPSO + + lama_register["AdaptiveDiversityPSO"] = AdaptiveDiversityPSO + LLAMAAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO").set_name( + "LLAMAAdaptiveDiversityPSO", register=True + ) +except Exception as e: + print("AdaptiveDiversityPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDolphinPodOptimization import AdaptiveDolphinPodOptimization + + lama_register["AdaptiveDolphinPodOptimization"] = AdaptiveDolphinPodOptimization + LLAMAAdaptiveDolphinPodOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDolphinPodOptimization" + ).set_name("LLAMAAdaptiveDolphinPodOptimization", register=True) +except Exception as e: + print("AdaptiveDolphinPodOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualPhaseDifferentialEvolution import ( + AdaptiveDualPhaseDifferentialEvolution, + ) + + lama_register["AdaptiveDualPhaseDifferentialEvolution"] = AdaptiveDualPhaseDifferentialEvolution + LLAMAAdaptiveDualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseDifferentialEvolution" + ).set_name("LLAMAAdaptiveDualPhaseDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveDualPhaseDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualPhaseEvolutionarySwarmOptimization import ( + AdaptiveDualPhaseEvolutionarySwarmOptimization, + ) + + lama_register["AdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( + AdaptiveDualPhaseEvolutionarySwarmOptimization + ) + LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization" + ).set_name("LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) +except Exception as e: + print("AdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( + AdaptiveDualPhaseOptimizationWithDynamicParameterControl, + ) + + lama_register["AdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( + AdaptiveDualPhaseOptimizationWithDynamicParameterControl + ) + LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl" + ).set_name("LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) +except Exception as e: + print("AdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualPhaseStrategy import AdaptiveDualPhaseStrategy + + lama_register["AdaptiveDualPhaseStrategy"] = AdaptiveDualPhaseStrategy + LLAMAAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy").set_name( + "LLAMAAdaptiveDualPhaseStrategy", register=True + ) +except Exception as e: + print("AdaptiveDualPhaseStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualPopulationDE_LS import AdaptiveDualPopulationDE_LS + + lama_register["AdaptiveDualPopulationDE_LS"] = AdaptiveDualPopulationDE_LS + LLAMAAdaptiveDualPopulationDE_LS = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS").set_name( + "LLAMAAdaptiveDualPopulationDE_LS", register=True + ) +except Exception as e: + print("AdaptiveDualPopulationDE_LS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDualStrategyOptimizer import AdaptiveDualStrategyOptimizer + + lama_register["AdaptiveDualStrategyOptimizer"] = AdaptiveDualStrategyOptimizer + LLAMAAdaptiveDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDualStrategyOptimizer" + ).set_name("LLAMAAdaptiveDualStrategyOptimizer", register=True) +except Exception as e: + print("AdaptiveDualStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicDE import AdaptiveDynamicDE + + lama_register["AdaptiveDynamicDE"] = AdaptiveDynamicDE + LLAMAAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE").set_name( + "LLAMAAdaptiveDynamicDE", register=True + ) +except Exception as e: + print("AdaptiveDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicDifferentialEvolution import ( + AdaptiveDynamicDifferentialEvolution, + ) + + lama_register["AdaptiveDynamicDifferentialEvolution"] = AdaptiveDynamicDifferentialEvolution + LLAMAAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDifferentialEvolution" + ).set_name("LLAMAAdaptiveDynamicDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseEnhancedStrategyV20 import ( + AdaptiveDynamicDualPhaseEnhancedStrategyV20, + ) + + lama_register["AdaptiveDynamicDualPhaseEnhancedStrategyV20"] = AdaptiveDynamicDualPhaseEnhancedStrategyV20 + LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20" + ).set_name("LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20", register=True) +except Exception as e: + print("AdaptiveDynamicDualPhaseEnhancedStrategyV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseStrategyV11 import ( + AdaptiveDynamicDualPhaseStrategyV11, + ) + + lama_register["AdaptiveDynamicDualPhaseStrategyV11"] = AdaptiveDynamicDualPhaseStrategyV11 + LLAMAAdaptiveDynamicDualPhaseStrategyV11 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDualPhaseStrategyV11" + ).set_name("LLAMAAdaptiveDynamicDualPhaseStrategyV11", register=True) +except Exception as e: + print("AdaptiveDynamicDualPhaseStrategyV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicEvolutionStrategy import AdaptiveDynamicEvolutionStrategy + + lama_register["AdaptiveDynamicEvolutionStrategy"] = AdaptiveDynamicEvolutionStrategy + LLAMAAdaptiveDynamicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicEvolutionStrategy" + ).set_name("LLAMAAdaptiveDynamicEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveDynamicEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithm import ( + AdaptiveDynamicExplorationExploitationAlgorithm, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithm"] = ( + AdaptiveDynamicExplorationExploitationAlgorithm + ) + LLAMAAdaptiveDynamicExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithm", register=True) +except Exception as e: + print("AdaptiveDynamicExplorationExploitationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV2 import ( + AdaptiveDynamicExplorationExploitationAlgorithmV2, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV2"] = ( + AdaptiveDynamicExplorationExploitationAlgorithmV2 + ) + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2", register=True) +except Exception as e: + print("AdaptiveDynamicExplorationExploitationAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV3 import ( + AdaptiveDynamicExplorationExploitationAlgorithmV3, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV3"] = ( + AdaptiveDynamicExplorationExploitationAlgorithmV3 + ) + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3", register=True) +except Exception as e: + print("AdaptiveDynamicExplorationExploitationAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicExplorationOptimization import ( + AdaptiveDynamicExplorationOptimization, + ) + + lama_register["AdaptiveDynamicExplorationOptimization"] = AdaptiveDynamicExplorationOptimization + LLAMAAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: + print("AdaptiveDynamicExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithm import AdaptiveDynamicFireworkAlgorithm + + lama_register["AdaptiveDynamicFireworkAlgorithm"] = AdaptiveDynamicFireworkAlgorithm + LLAMAAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithmRedesigned import ( + AdaptiveDynamicFireworkAlgorithmRedesigned, + ) + + lama_register["AdaptiveDynamicFireworkAlgorithmRedesigned"] = AdaptiveDynamicFireworkAlgorithmRedesigned + LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned" + ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned", register=True) +except Exception as e: + print("AdaptiveDynamicFireworkAlgorithmRedesigned can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicFireworkDifferentialEvolutionV4 import ( + AdaptiveDynamicFireworkDifferentialEvolutionV4, + ) + + lama_register["AdaptiveDynamicFireworkDifferentialEvolutionV4"] = ( + AdaptiveDynamicFireworkDifferentialEvolutionV4 + ) + LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4" + ).set_name("LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4", register=True) +except Exception as e: + print("AdaptiveDynamicFireworkDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicHarmonySearch import AdaptiveDynamicHarmonySearch + + lama_register["AdaptiveDynamicHarmonySearch"] = AdaptiveDynamicHarmonySearch + LLAMAAdaptiveDynamicHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHarmonySearch" + ).set_name("LLAMAAdaptiveDynamicHarmonySearch", register=True) +except Exception as e: + print("AdaptiveDynamicHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizationV2 import ( + AdaptiveDynamicHybridOptimizationV2, + ) + + lama_register["AdaptiveDynamicHybridOptimizationV2"] = AdaptiveDynamicHybridOptimizationV2 + LLAMAAdaptiveDynamicHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHybridOptimizationV2" + ).set_name("LLAMAAdaptiveDynamicHybridOptimizationV2", register=True) +except Exception as e: + print("AdaptiveDynamicHybridOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizer import AdaptiveDynamicHybridOptimizer + + lama_register["AdaptiveDynamicHybridOptimizer"] = AdaptiveDynamicHybridOptimizer + LLAMAAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMAAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + AdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) + + lama_register["AdaptiveDynamicMemeticEvolutionaryAlgorithm"] = AdaptiveDynamicMemeticEvolutionaryAlgorithm + LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("AdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicMultiStrategyDifferentialEvolution import ( + AdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["AdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + AdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveDynamicQuantumSwarmOptimization import ( + AdaptiveDynamicQuantumSwarmOptimization, + ) + + lama_register["AdaptiveDynamicQuantumSwarmOptimization"] = AdaptiveDynamicQuantumSwarmOptimization + LLAMAAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicQuantumSwarmOptimization" + ).set_name("LLAMAAdaptiveDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteCovarianceMatrixMemeticSearch import ( + AdaptiveEliteCovarianceMatrixMemeticSearch, + ) + + lama_register["AdaptiveEliteCovarianceMatrixMemeticSearch"] = AdaptiveEliteCovarianceMatrixMemeticSearch + LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch" + ).set_name("LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch", register=True) +except Exception as e: + print("AdaptiveEliteCovarianceMatrixMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteDifferentialEvolution import ( + AdaptiveEliteDifferentialEvolution, + ) + + lama_register["AdaptiveEliteDifferentialEvolution"] = AdaptiveEliteDifferentialEvolution + LLAMAAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveEliteDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteDiverseHybridOptimizer import ( + AdaptiveEliteDiverseHybridOptimizer, + ) + + lama_register["AdaptiveEliteDiverseHybridOptimizer"] = AdaptiveEliteDiverseHybridOptimizer + LLAMAAdaptiveEliteDiverseHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteDiverseHybridOptimizer" + ).set_name("LLAMAAdaptiveEliteDiverseHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveEliteDiverseHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_LS_v2 import AdaptiveEliteGuidedDE_LS_v2 + + lama_register["AdaptiveEliteGuidedDE_LS_v2"] = AdaptiveEliteGuidedDE_LS_v2 + LLAMAAdaptiveEliteGuidedDE_LS_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2").set_name( + "LLAMAAdaptiveEliteGuidedDE_LS_v2", register=True + ) +except Exception as e: + print("AdaptiveEliteGuidedDE_LS_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_v2 import AdaptiveEliteGuidedDE_v2 + + lama_register["AdaptiveEliteGuidedDE_v2"] = AdaptiveEliteGuidedDE_v2 + LLAMAAdaptiveEliteGuidedDE_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2").set_name( + "LLAMAAdaptiveEliteGuidedDE_v2", register=True + ) +except Exception as e: + print("AdaptiveEliteGuidedDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE import AdaptiveEliteGuidedMutationDE + + lama_register["AdaptiveEliteGuidedMutationDE"] = AdaptiveEliteGuidedMutationDE + LLAMAAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE", register=True) +except Exception as e: + print("AdaptiveEliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v3 import AdaptiveEliteGuidedMutationDE_v3 + + lama_register["AdaptiveEliteGuidedMutationDE_v3"] = AdaptiveEliteGuidedMutationDE_v3 + LLAMAAdaptiveEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE_v3" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v3", register=True) +except Exception as e: + print("AdaptiveEliteGuidedMutationDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v4 import AdaptiveEliteGuidedMutationDE_v4 + + lama_register["AdaptiveEliteGuidedMutationDE_v4"] = AdaptiveEliteGuidedMutationDE_v4 + LLAMAAdaptiveEliteGuidedMutationDE_v4 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE_v4" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v4", register=True) +except Exception as e: + print("AdaptiveEliteGuidedMutationDE_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteGuidedRestartDE import AdaptiveEliteGuidedRestartDE + + lama_register["AdaptiveEliteGuidedRestartDE"] = AdaptiveEliteGuidedRestartDE + LLAMAAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedRestartDE" + ).set_name("LLAMAAdaptiveEliteGuidedRestartDE", register=True) +except Exception as e: + print("AdaptiveEliteGuidedRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteHybridOptimizer import AdaptiveEliteHybridOptimizer + + lama_register["AdaptiveEliteHybridOptimizer"] = AdaptiveEliteHybridOptimizer + LLAMAAdaptiveEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteHybridOptimizer" + ).set_name("LLAMAAdaptiveEliteHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveEliteHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteMemeticDifferentialEvolution import ( + AdaptiveEliteMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveEliteMemeticDifferentialEvolution"] = AdaptiveEliteMemeticDifferentialEvolution + LLAMAAdaptiveEliteMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteMemeticDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveEliteMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizer import AdaptiveEliteMemeticOptimizer + + lama_register["AdaptiveEliteMemeticOptimizer"] = AdaptiveEliteMemeticOptimizer + LLAMAAdaptiveEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizer" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizer", register=True) +except Exception as e: + print("AdaptiveEliteMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV5 import AdaptiveEliteMemeticOptimizerV5 + + lama_register["AdaptiveEliteMemeticOptimizerV5"] = AdaptiveEliteMemeticOptimizerV5 + LLAMAAdaptiveEliteMemeticOptimizerV5 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizerV5" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV5", register=True) +except Exception as e: + print("AdaptiveEliteMemeticOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV6 import AdaptiveEliteMemeticOptimizerV6 + + lama_register["AdaptiveEliteMemeticOptimizerV6"] = AdaptiveEliteMemeticOptimizerV6 + LLAMAAdaptiveEliteMemeticOptimizerV6 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizerV6" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV6", register=True) +except Exception as e: + print("AdaptiveEliteMemeticOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEliteMultiStrategyDifferentialEvolution import ( + AdaptiveEliteMultiStrategyDifferentialEvolution, + ) + + lama_register["AdaptiveEliteMultiStrategyDifferentialEvolution"] = ( + AdaptiveEliteMultiStrategyDifferentialEvolution + ) + LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveElitistDE import AdaptiveElitistDE + + lama_register["AdaptiveElitistDE"] = AdaptiveElitistDE + LLAMAAdaptiveElitistDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE").set_name( + "LLAMAAdaptiveElitistDE", register=True + ) +except Exception as e: + print("AdaptiveElitistDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveElitistDE_v3 import AdaptiveElitistDE_v3 + + lama_register["AdaptiveElitistDE_v3"] = AdaptiveElitistDE_v3 + LLAMAAdaptiveElitistDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3").set_name( + "LLAMAAdaptiveElitistDE_v3", register=True + ) +except Exception as e: + print("AdaptiveElitistDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveElitistMutationDE import AdaptiveElitistMutationDE + + lama_register["AdaptiveElitistMutationDE"] = AdaptiveElitistMutationDE + LLAMAAdaptiveElitistMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE").set_name( + "LLAMAAdaptiveElitistMutationDE", register=True + ) +except Exception as e: + print("AdaptiveElitistMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveElitistPopulationStrategy import ( + AdaptiveElitistPopulationStrategy, + ) + + lama_register["AdaptiveElitistPopulationStrategy"] = AdaptiveElitistPopulationStrategy + LLAMAAdaptiveElitistPopulationStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveElitistPopulationStrategy" + ).set_name("LLAMAAdaptiveElitistPopulationStrategy", register=True) +except Exception as e: + print("AdaptiveElitistPopulationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveElitistQuasiRandomDEGradientAnnealing import ( + AdaptiveElitistQuasiRandomDEGradientAnnealing, + ) + + lama_register["AdaptiveElitistQuasiRandomDEGradientAnnealing"] = ( + AdaptiveElitistQuasiRandomDEGradientAnnealing + ) + LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing" + ).set_name("LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: + print("AdaptiveElitistQuasiRandomDEGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm import ( + AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm, + ) + + lama_register["AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm"] = ( + AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm + ) + LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch import ( + AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch, + ) + + lama_register["AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( + AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch + ) + LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch" + ).set_name("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) +except Exception as e: + print("AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch import ( + AdaptiveEnhancedEvolutionaryFireworksSearch, + ) + + lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch"] = AdaptiveEnhancedEvolutionaryFireworksSearch + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: + print("AdaptiveEnhancedEvolutionaryFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch_v2 import ( + AdaptiveEnhancedEvolutionaryFireworksSearch_v2, + ) + + lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch_v2"] = ( + AdaptiveEnhancedEvolutionaryFireworksSearch_v2 + ) + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2" + ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2", register=True) +except Exception as e: + print("AdaptiveEnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedExplorationGravitationalSwarmOptimization import ( + AdaptiveEnhancedExplorationGravitationalSwarmOptimization, + ) + + lama_register["AdaptiveEnhancedExplorationGravitationalSwarmOptimization"] = ( + AdaptiveEnhancedExplorationGravitationalSwarmOptimization + ) + LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization" + ).set_name("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveEnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithm import ( + AdaptiveEnhancedFireworkAlgorithm, + ) + + lama_register["AdaptiveEnhancedFireworkAlgorithm"] = AdaptiveEnhancedFireworkAlgorithm + LLAMAAdaptiveEnhancedFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveEnhancedFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( + AdaptiveEnhancedFireworkAlgorithmWithLocalSearch, + ) + + lama_register["AdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( + AdaptiveEnhancedFireworkAlgorithmWithLocalSearch + ) + LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: + print("AdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGradientGuidedHybridPSO import ( + AdaptiveEnhancedGradientGuidedHybridPSO, + ) + + lama_register["AdaptiveEnhancedGradientGuidedHybridPSO"] = AdaptiveEnhancedGradientGuidedHybridPSO + LLAMAAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: + print("AdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligence import ( + AdaptiveEnhancedGravitationalSwarmIntelligence, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligence"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligence + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV18 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV18, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV18"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV18 + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV2 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV2, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV2"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV2 + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV22 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV22, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV22"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV22 + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV29 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV29, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV29"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV29 + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligenceV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV33 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV33, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV33"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV33 + ) + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33", register=True) +except Exception as e: + print("AdaptiveEnhancedGravitationalSwarmIntelligenceV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonicFireworkAlgorithm import ( + AdaptiveEnhancedHarmonicFireworkAlgorithm, + ) + + lama_register["AdaptiveEnhancedHarmonicFireworkAlgorithm"] = AdaptiveEnhancedHarmonicFireworkAlgorithm + LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveEnhancedHarmonicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch import ( + AdaptiveEnhancedHarmonyFireworksSearch, + ) + + lama_register["AdaptiveEnhancedHarmonyFireworksSearch"] = AdaptiveEnhancedHarmonyFireworksSearch + LLAMAAdaptiveEnhancedHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch" + ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch", register=True) +except Exception as e: + print("AdaptiveEnhancedHarmonyFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch_v2 import ( + AdaptiveEnhancedHarmonyFireworksSearch_v2, + ) + + lama_register["AdaptiveEnhancedHarmonyFireworksSearch_v2"] = AdaptiveEnhancedHarmonyFireworksSearch_v2 + LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2" + ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2", register=True) +except Exception as e: + print("AdaptiveEnhancedHarmonyFireworksSearch_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration import ( + AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration, + ) + + lama_register["AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration"] = ( + AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration + ) + LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration" + ).set_name("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration", register=True) +except Exception as e: + print("AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticDifferentialEvolution import ( + AdaptiveEnhancedMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveEnhancedMemeticDifferentialEvolution"] = ( + AdaptiveEnhancedMemeticDifferentialEvolution + ) + LLAMAAdaptiveEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveEnhancedMemeticDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveEnhancedMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 import ( + AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3, + ) + + lama_register["AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3"] = ( + AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 + ) + LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3" + ).set_name("LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3", register=True) +except Exception as e: + print("AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv10 import AdaptiveEnhancedMetaNetAQAPSOv10 + + lama_register["AdaptiveEnhancedMetaNetAQAPSOv10"] = AdaptiveEnhancedMetaNetAQAPSOv10 + LLAMAAdaptiveEnhancedMetaNetAQAPSOv10 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10" + ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv10", register=True) +except Exception as e: + print("AdaptiveEnhancedMetaNetAQAPSOv10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv11 import AdaptiveEnhancedMetaNetAQAPSOv11 + + lama_register["AdaptiveEnhancedMetaNetAQAPSOv11"] = AdaptiveEnhancedMetaNetAQAPSOv11 + LLAMAAdaptiveEnhancedMetaNetAQAPSOv11 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11" + ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv11", register=True) +except Exception as e: + print("AdaptiveEnhancedMetaNetAQAPSOv11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseDifferentialEvolution import ( + AdaptiveEnhancedMultiPhaseDifferentialEvolution, + ) + + lama_register["AdaptiveEnhancedMultiPhaseDifferentialEvolution"] = ( + AdaptiveEnhancedMultiPhaseDifferentialEvolution + ) + LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution" + ).set_name("LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveEnhancedMultiPhaseDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseOptimizationAlgorithm import ( + AdaptiveEnhancedMultiPhaseOptimizationAlgorithm, + ) + + lama_register["AdaptiveEnhancedMultiPhaseOptimizationAlgorithm"] = ( + AdaptiveEnhancedMultiPhaseOptimizationAlgorithm + ) + LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm", register=True) +except Exception as e: + print("AdaptiveEnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedQGSA_v7 import AdaptiveEnhancedQGSA_v7 + + lama_register["AdaptiveEnhancedQGSA_v7"] = AdaptiveEnhancedQGSA_v7 + LLAMAAdaptiveEnhancedQGSA_v7 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7").set_name( + "LLAMAAdaptiveEnhancedQGSA_v7", register=True + ) +except Exception as e: + print("AdaptiveEnhancedQGSA_v7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumHarmonySearch import ( + AdaptiveEnhancedQuantumHarmonySearch, + ) + + lama_register["AdaptiveEnhancedQuantumHarmonySearch"] = AdaptiveEnhancedQuantumHarmonySearch + LLAMAAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedQuantumHarmonySearch" + ).set_name("LLAMAAdaptiveEnhancedQuantumHarmonySearch", register=True) +except Exception as e: + print("AdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumSimulatedAnnealing import ( + AdaptiveEnhancedQuantumSimulatedAnnealing, + ) + + lama_register["AdaptiveEnhancedQuantumSimulatedAnnealing"] = AdaptiveEnhancedQuantumSimulatedAnnealing + LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveEnhancedQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 + ) + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11", register=True) +except Exception as e: + print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 + ) + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14", register=True) +except Exception as e: + print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 + ) + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28", register=True) +except Exception as e: + print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEnsembleMemeticAlgorithm import AdaptiveEnsembleMemeticAlgorithm + + lama_register["AdaptiveEnsembleMemeticAlgorithm"] = AdaptiveEnsembleMemeticAlgorithm + LLAMAAdaptiveEnsembleMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnsembleMemeticAlgorithm" + ).set_name("LLAMAAdaptiveEnsembleMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveEnsembleMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialOptimization import ( + AdaptiveEvolutionaryDifferentialOptimization, + ) + + lama_register["AdaptiveEvolutionaryDifferentialOptimization"] = ( + AdaptiveEvolutionaryDifferentialOptimization + ) + LLAMAAdaptiveEvolutionaryDifferentialOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryDifferentialOptimization" + ).set_name("LLAMAAdaptiveEvolutionaryDifferentialOptimization", register=True) +except Exception as e: + print("AdaptiveEvolutionaryDifferentialOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialPopulationStrategy import ( + AdaptiveEvolutionaryDifferentialPopulationStrategy, + ) + + lama_register["AdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( + AdaptiveEvolutionaryDifferentialPopulationStrategy + ) + LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy" + ).set_name("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) +except Exception as e: + print("AdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEvolutionaryFireworksSearch_v1 import ( + AdaptiveEvolutionaryFireworksSearch_v1, + ) + + lama_register["AdaptiveEvolutionaryFireworksSearch_v1"] = AdaptiveEvolutionaryFireworksSearch_v1 + LLAMAAdaptiveEvolutionaryFireworksSearch_v1 = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1" + ).set_name("LLAMAAdaptiveEvolutionaryFireworksSearch_v1", register=True) +except Exception as e: + print("AdaptiveEvolutionaryFireworksSearch_v1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveEvolutionaryGradientSearch import ( + AdaptiveEvolutionaryGradientSearch, + ) + + lama_register["AdaptiveEvolutionaryGradientSearch"] = AdaptiveEvolutionaryGradientSearch + LLAMAAdaptiveEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryGradientSearch" + ).set_name("LLAMAAdaptiveEvolutionaryGradientSearch", register=True) +except Exception as e: + print("AdaptiveEvolutionaryGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveExplorationEvolutionStrategy import ( + AdaptiveExplorationEvolutionStrategy, + ) + + lama_register["AdaptiveExplorationEvolutionStrategy"] = AdaptiveExplorationEvolutionStrategy + LLAMAAdaptiveExplorationEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationEvolutionStrategy" + ).set_name("LLAMAAdaptiveExplorationEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveExplorationEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveExplorationExploitationDifferentialEvolution import ( + AdaptiveExplorationExploitationDifferentialEvolution, + ) + + lama_register["AdaptiveExplorationExploitationDifferentialEvolution"] = ( + AdaptiveExplorationExploitationDifferentialEvolution + ) + LLAMAAdaptiveExplorationExploitationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution" + ).set_name("LLAMAAdaptiveExplorationExploitationDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveExplorationExploitationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveExplorationExploitationHybridAlgorithm import ( + AdaptiveExplorationExploitationHybridAlgorithm, + ) + + lama_register["AdaptiveExplorationExploitationHybridAlgorithm"] = ( + AdaptiveExplorationExploitationHybridAlgorithm + ) + LLAMAAdaptiveExplorationExploitationHybridAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm" + ).set_name("LLAMAAdaptiveExplorationExploitationHybridAlgorithm", register=True) +except Exception as e: + print("AdaptiveExplorationExploitationHybridAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveExploratoryOptimizer import AdaptiveExploratoryOptimizer + + lama_register["AdaptiveExploratoryOptimizer"] = AdaptiveExploratoryOptimizer + LLAMAAdaptiveExploratoryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveExploratoryOptimizer" + ).set_name("LLAMAAdaptiveExploratoryOptimizer", register=True) +except Exception as e: + print("AdaptiveExploratoryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFeedbackControlStrategyV61 import ( + AdaptiveFeedbackControlStrategyV61, + ) + + lama_register["AdaptiveFeedbackControlStrategyV61"] = AdaptiveFeedbackControlStrategyV61 + LLAMAAdaptiveFeedbackControlStrategyV61 = NonObjectOptimizer( + method="LLAMAAdaptiveFeedbackControlStrategyV61" + ).set_name("LLAMAAdaptiveFeedbackControlStrategyV61", register=True) +except Exception as e: + print("AdaptiveFeedbackControlStrategyV61 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFeedbackEnhancedMemoryStrategyV71 import ( + AdaptiveFeedbackEnhancedMemoryStrategyV71, + ) + + lama_register["AdaptiveFeedbackEnhancedMemoryStrategyV71"] = AdaptiveFeedbackEnhancedMemoryStrategyV71 + LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71 = NonObjectOptimizer( + method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71" + ).set_name("LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71", register=True) +except Exception as e: + print("AdaptiveFeedbackEnhancedMemoryStrategyV71 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmEnhanced import ( + AdaptiveFireworkAlgorithmEnhanced, + ) + + lama_register["AdaptiveFireworkAlgorithmEnhanced"] = AdaptiveFireworkAlgorithmEnhanced + LLAMAAdaptiveFireworkAlgorithmEnhanced = NonObjectOptimizer( + method="LLAMAAdaptiveFireworkAlgorithmEnhanced" + ).set_name("LLAMAAdaptiveFireworkAlgorithmEnhanced", register=True) +except Exception as e: + print("AdaptiveFireworkAlgorithmEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmOptimization import ( + AdaptiveFireworkAlgorithmOptimization, + ) + + lama_register["AdaptiveFireworkAlgorithmOptimization"] = AdaptiveFireworkAlgorithmOptimization + LLAMAAdaptiveFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveFireworkAlgorithmOptimization" + ).set_name("LLAMAAdaptiveFireworkAlgorithmOptimization", register=True) +except Exception as e: + print("AdaptiveFireworkAlgorithmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFireworksEnhancedHarmonySearch import ( + AdaptiveFireworksEnhancedHarmonySearch, + ) + + lama_register["AdaptiveFireworksEnhancedHarmonySearch"] = AdaptiveFireworksEnhancedHarmonySearch + LLAMAAdaptiveFireworksEnhancedHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveFireworksEnhancedHarmonySearch" + ).set_name("LLAMAAdaptiveFireworksEnhancedHarmonySearch", register=True) +except Exception as e: + print("AdaptiveFireworksEnhancedHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFocusedEvolutionStrategy import AdaptiveFocusedEvolutionStrategy + + lama_register["AdaptiveFocusedEvolutionStrategy"] = AdaptiveFocusedEvolutionStrategy + LLAMAAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveFocusedEvolutionStrategy" + ).set_name("LLAMAAdaptiveFocusedEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveFocusedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveFuzzyDynamicDE import AdaptiveFuzzyDynamicDE + + lama_register["AdaptiveFuzzyDynamicDE"] = AdaptiveFuzzyDynamicDE + LLAMAAdaptiveFuzzyDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE").set_name( + "LLAMAAdaptiveFuzzyDynamicDE", register=True + ) +except Exception as e: + print("AdaptiveFuzzyDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGaussianSearch import AdaptiveGaussianSearch + + lama_register["AdaptiveGaussianSearch"] = AdaptiveGaussianSearch + LLAMAAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch").set_name( + "LLAMAAdaptiveGaussianSearch", register=True + ) +except Exception as e: + print("AdaptiveGaussianSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGlobalLocalSearchStrategyV62 import ( + AdaptiveGlobalLocalSearchStrategyV62, + ) + + lama_register["AdaptiveGlobalLocalSearchStrategyV62"] = AdaptiveGlobalLocalSearchStrategyV62 + LLAMAAdaptiveGlobalLocalSearchStrategyV62 = NonObjectOptimizer( + method="LLAMAAdaptiveGlobalLocalSearchStrategyV62" + ).set_name("LLAMAAdaptiveGlobalLocalSearchStrategyV62", register=True) +except Exception as e: + print("AdaptiveGlobalLocalSearchStrategyV62 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientAssistedEvolution import ( + AdaptiveGradientAssistedEvolution, + ) + + lama_register["AdaptiveGradientAssistedEvolution"] = AdaptiveGradientAssistedEvolution + LLAMAAdaptiveGradientAssistedEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientAssistedEvolution" + ).set_name("LLAMAAdaptiveGradientAssistedEvolution", register=True) +except Exception as e: + print("AdaptiveGradientAssistedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBalancedCrossoverPSO import ( + AdaptiveGradientBalancedCrossoverPSO, + ) + + lama_register["AdaptiveGradientBalancedCrossoverPSO"] = AdaptiveGradientBalancedCrossoverPSO + LLAMAAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMAAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: + print("AdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBalancedEvolutionStrategy import ( + AdaptiveGradientBalancedEvolutionStrategy, + ) + + lama_register["AdaptiveGradientBalancedEvolutionStrategy"] = AdaptiveGradientBalancedEvolutionStrategy + LLAMAAdaptiveGradientBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBalancedEvolutionStrategy" + ).set_name("LLAMAAdaptiveGradientBalancedEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveGradientBalancedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingPlus import ( + AdaptiveGradientBoostedMemoryAnnealingPlus, + ) + + lama_register["AdaptiveGradientBoostedMemoryAnnealingPlus"] = AdaptiveGradientBoostedMemoryAnnealingPlus + LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus", register=True) +except Exception as e: + print("AdaptiveGradientBoostedMemoryAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl import ( + AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl, + ) + + lama_register["AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl"] = ( + AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl + ) + LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl", register=True) +except Exception as e: + print("AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryExploration import ( + AdaptiveGradientBoostedMemoryExploration, + ) + + lama_register["AdaptiveGradientBoostedMemoryExploration"] = AdaptiveGradientBoostedMemoryExploration + LLAMAAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryExploration" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryExploration", register=True) +except Exception as e: + print("AdaptiveGradientBoostedMemoryExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemorySimulatedAnnealing import ( + AdaptiveGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( + AdaptiveGradientBoostedMemorySimulatedAnnealing + ) + LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientClusteringEvolution import ( + AdaptiveGradientClusteringEvolution, + ) + + lama_register["AdaptiveGradientClusteringEvolution"] = AdaptiveGradientClusteringEvolution + LLAMAAdaptiveGradientClusteringEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientClusteringEvolution" + ).set_name("LLAMAAdaptiveGradientClusteringEvolution", register=True) +except Exception as e: + print("AdaptiveGradientClusteringEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientCrossoverOptimizer import ( + AdaptiveGradientCrossoverOptimizer, + ) + + lama_register["AdaptiveGradientCrossoverOptimizer"] = AdaptiveGradientCrossoverOptimizer + LLAMAAdaptiveGradientCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGradientCrossoverOptimizer" + ).set_name("LLAMAAdaptiveGradientCrossoverOptimizer", register=True) +except Exception as e: + print("AdaptiveGradientCrossoverOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolution import ( + AdaptiveGradientDifferentialEvolution, + ) + + lama_register["AdaptiveGradientDifferentialEvolution"] = AdaptiveGradientDifferentialEvolution + LLAMAAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionEnhanced import ( + AdaptiveGradientDifferentialEvolutionEnhanced, + ) + + lama_register["AdaptiveGradientDifferentialEvolutionEnhanced"] = ( + AdaptiveGradientDifferentialEvolutionEnhanced + ) + LLAMAAdaptiveGradientDifferentialEvolutionEnhanced = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionEnhanced", register=True) +except Exception as e: + print("AdaptiveGradientDifferentialEvolutionEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionPlus import ( + AdaptiveGradientDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveGradientDifferentialEvolutionPlus"] = AdaptiveGradientDifferentialEvolutionPlus + LLAMAAdaptiveGradientDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionPlus", register=True) +except Exception as e: + print("AdaptiveGradientDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientDifferentialHybrid import ( + AdaptiveGradientDifferentialHybrid, + ) + + lama_register["AdaptiveGradientDifferentialHybrid"] = AdaptiveGradientDifferentialHybrid + LLAMAAdaptiveGradientDifferentialHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialHybrid" + ).set_name("LLAMAAdaptiveGradientDifferentialHybrid", register=True) +except Exception as e: + print("AdaptiveGradientDifferentialHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientEnhancedExplorationPSO import ( + AdaptiveGradientEnhancedExplorationPSO, + ) + + lama_register["AdaptiveGradientEnhancedExplorationPSO"] = AdaptiveGradientEnhancedExplorationPSO + LLAMAAdaptiveGradientEnhancedExplorationPSO = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedExplorationPSO" + ).set_name("LLAMAAdaptiveGradientEnhancedExplorationPSO", register=True) +except Exception as e: + print("AdaptiveGradientEnhancedExplorationPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientEnhancedMultiPhaseAnnealing import ( + AdaptiveGradientEnhancedMultiPhaseAnnealing, + ) + + lama_register["AdaptiveGradientEnhancedMultiPhaseAnnealing"] = AdaptiveGradientEnhancedMultiPhaseAnnealing + LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing" + ).set_name("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing", register=True) +except Exception as e: + print("AdaptiveGradientEnhancedMultiPhaseAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientEnhancedRAMEDS import AdaptiveGradientEnhancedRAMEDS + + lama_register["AdaptiveGradientEnhancedRAMEDS"] = AdaptiveGradientEnhancedRAMEDS + LLAMAAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedRAMEDS" + ).set_name("LLAMAAdaptiveGradientEnhancedRAMEDS", register=True) +except Exception as e: + print("AdaptiveGradientEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientEvolution import AdaptiveGradientEvolution + + lama_register["AdaptiveGradientEvolution"] = AdaptiveGradientEvolution + LLAMAAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution").set_name( + "LLAMAAdaptiveGradientEvolution", register=True + ) +except Exception as e: + print("AdaptiveGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientExploration import AdaptiveGradientExploration + + lama_register["AdaptiveGradientExploration"] = AdaptiveGradientExploration + LLAMAAdaptiveGradientExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration").set_name( + "LLAMAAdaptiveGradientExploration", register=True + ) +except Exception as e: + print("AdaptiveGradientExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientExplorationV2 import AdaptiveGradientExplorationV2 + + lama_register["AdaptiveGradientExplorationV2"] = AdaptiveGradientExplorationV2 + LLAMAAdaptiveGradientExplorationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveGradientExplorationV2" + ).set_name("LLAMAAdaptiveGradientExplorationV2", register=True) +except Exception as e: + print("AdaptiveGradientExplorationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientGuidedEvolution import AdaptiveGradientGuidedEvolution + + lama_register["AdaptiveGradientGuidedEvolution"] = AdaptiveGradientGuidedEvolution + LLAMAAdaptiveGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientGuidedEvolution" + ).set_name("LLAMAAdaptiveGradientGuidedEvolution", register=True) +except Exception as e: + print("AdaptiveGradientGuidedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientInformedPSO import AdaptiveGradientInformedPSO + + lama_register["AdaptiveGradientInformedPSO"] = AdaptiveGradientInformedPSO + LLAMAAdaptiveGradientInformedPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO").set_name( + "LLAMAAdaptiveGradientInformedPSO", register=True + ) +except Exception as e: + print("AdaptiveGradientInformedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientSampling import AdaptiveGradientSampling + + lama_register["AdaptiveGradientSampling"] = AdaptiveGradientSampling + LLAMAAdaptiveGradientSampling = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling").set_name( + "LLAMAAdaptiveGradientSampling", register=True + ) +except Exception as e: + print("AdaptiveGradientSampling can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGradientSearch import AdaptiveGradientSearch + + lama_register["AdaptiveGradientSearch"] = AdaptiveGradientSearch + LLAMAAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch").set_name( + "LLAMAAdaptiveGradientSearch", register=True + ) +except Exception as e: + print("AdaptiveGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligence import ( + AdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligence"] = AdaptiveGravitationalSwarmIntelligence + LLAMAAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV15 import ( + AdaptiveGravitationalSwarmIntelligenceV15, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligenceV15"] = AdaptiveGravitationalSwarmIntelligenceV15 + LLAMAAdaptiveGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV15", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligenceV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV2 import ( + AdaptiveGravitationalSwarmIntelligenceV2, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligenceV2"] = AdaptiveGravitationalSwarmIntelligenceV2 + LLAMAAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV26 import ( + AdaptiveGravitationalSwarmIntelligenceV26, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligenceV26"] = AdaptiveGravitationalSwarmIntelligenceV26 + LLAMAAdaptiveGravitationalSwarmIntelligenceV26 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV26", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligenceV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV3 import ( + AdaptiveGravitationalSwarmIntelligenceV3, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligenceV3"] = AdaptiveGravitationalSwarmIntelligenceV3 + LLAMAAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV4 import ( + AdaptiveGravitationalSwarmIntelligenceV4, + ) + + lama_register["AdaptiveGravitationalSwarmIntelligenceV4"] = AdaptiveGravitationalSwarmIntelligenceV4 + LLAMAAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( + AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, + ) + + lama_register["AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( + AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + ) + LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" + ).set_name("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) +except Exception as e: + print("AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGuidedCulturalSearch import AdaptiveGuidedCulturalSearch + + lama_register["AdaptiveGuidedCulturalSearch"] = AdaptiveGuidedCulturalSearch + LLAMAAdaptiveGuidedCulturalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedCulturalSearch" + ).set_name("LLAMAAdaptiveGuidedCulturalSearch", register=True) +except Exception as e: + print("AdaptiveGuidedCulturalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGuidedDifferentialEvolution import ( + AdaptiveGuidedDifferentialEvolution, + ) + + lama_register["AdaptiveGuidedDifferentialEvolution"] = AdaptiveGuidedDifferentialEvolution + LLAMAAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedDifferentialEvolution" + ).set_name("LLAMAAdaptiveGuidedDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveGuidedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGuidedEvolutionStrategy import AdaptiveGuidedEvolutionStrategy + + lama_register["AdaptiveGuidedEvolutionStrategy"] = AdaptiveGuidedEvolutionStrategy + LLAMAAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedEvolutionStrategy" + ).set_name("LLAMAAdaptiveGuidedEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveGuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGuidedHybridOptimizer import AdaptiveGuidedHybridOptimizer + + lama_register["AdaptiveGuidedHybridOptimizer"] = AdaptiveGuidedHybridOptimizer + LLAMAAdaptiveGuidedHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedHybridOptimizer" + ).set_name("LLAMAAdaptiveGuidedHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveGuidedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveGuidedMutationOptimizer import AdaptiveGuidedMutationOptimizer + + lama_register["AdaptiveGuidedMutationOptimizer"] = AdaptiveGuidedMutationOptimizer + LLAMAAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: + print("AdaptiveGuidedMutationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicFireworkAlgorithm import ( + AdaptiveHarmonicFireworkAlgorithm, + ) + + lama_register["AdaptiveHarmonicFireworkAlgorithm"] = AdaptiveHarmonicFireworkAlgorithm + LLAMAAdaptiveHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveHarmonicFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveHarmonicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicSearchOptimizer import AdaptiveHarmonicSearchOptimizer + + lama_register["AdaptiveHarmonicSearchOptimizer"] = AdaptiveHarmonicSearchOptimizer + LLAMAAdaptiveHarmonicSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSearchOptimizer" + ).set_name("LLAMAAdaptiveHarmonicSearchOptimizer", register=True) +except Exception as e: + print("AdaptiveHarmonicSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimization import ( + AdaptiveHarmonicSwarmOptimization, + ) + + lama_register["AdaptiveHarmonicSwarmOptimization"] = AdaptiveHarmonicSwarmOptimization + LLAMAAdaptiveHarmonicSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimization" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveHarmonicSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV2 import ( + AdaptiveHarmonicSwarmOptimizationV2, + ) + + lama_register["AdaptiveHarmonicSwarmOptimizationV2"] = AdaptiveHarmonicSwarmOptimizationV2 + LLAMAAdaptiveHarmonicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHarmonicSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV3 import ( + AdaptiveHarmonicSwarmOptimizationV3, + ) + + lama_register["AdaptiveHarmonicSwarmOptimizationV3"] = AdaptiveHarmonicSwarmOptimizationV3 + LLAMAAdaptiveHarmonicSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimizationV3" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV3", register=True) +except Exception as e: + print("AdaptiveHarmonicSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV12 import AdaptiveHarmonicTabuSearchV12 + + lama_register["AdaptiveHarmonicTabuSearchV12"] = AdaptiveHarmonicTabuSearchV12 + LLAMAAdaptiveHarmonicTabuSearchV12 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV12" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV12", register=True) +except Exception as e: + print("AdaptiveHarmonicTabuSearchV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV17 import AdaptiveHarmonicTabuSearchV17 + + lama_register["AdaptiveHarmonicTabuSearchV17"] = AdaptiveHarmonicTabuSearchV17 + LLAMAAdaptiveHarmonicTabuSearchV17 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV17" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV17", register=True) +except Exception as e: + print("AdaptiveHarmonicTabuSearchV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV20 import AdaptiveHarmonicTabuSearchV20 + + lama_register["AdaptiveHarmonicTabuSearchV20"] = AdaptiveHarmonicTabuSearchV20 + LLAMAAdaptiveHarmonicTabuSearchV20 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV20" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV20", register=True) +except Exception as e: + print("AdaptiveHarmonicTabuSearchV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV8 import AdaptiveHarmonicTabuSearchV8 + + lama_register["AdaptiveHarmonicTabuSearchV8"] = AdaptiveHarmonicTabuSearchV8 + LLAMAAdaptiveHarmonicTabuSearchV8 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV8" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV8", register=True) +except Exception as e: + print("AdaptiveHarmonicTabuSearchV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyFireworksAlgorithm import ( + AdaptiveHarmonyFireworksAlgorithm, + ) + + lama_register["AdaptiveHarmonyFireworksAlgorithm"] = AdaptiveHarmonyFireworksAlgorithm + LLAMAAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyFireworksAlgorithm" + ).set_name("LLAMAAdaptiveHarmonyFireworksAlgorithm", register=True) +except Exception as e: + print("AdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithm import AdaptiveHarmonyMemeticAlgorithm + + lama_register["AdaptiveHarmonyMemeticAlgorithm"] = AdaptiveHarmonyMemeticAlgorithm + LLAMAAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticAlgorithm" + ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithmV15 import ( + AdaptiveHarmonyMemeticAlgorithmV15, + ) + + lama_register["AdaptiveHarmonyMemeticAlgorithmV15"] = AdaptiveHarmonyMemeticAlgorithmV15 + LLAMAAdaptiveHarmonyMemeticAlgorithmV15 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15" + ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithmV15", register=True) +except Exception as e: + print("AdaptiveHarmonyMemeticAlgorithmV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV2 import ( + AdaptiveHarmonyMemeticOptimizationV2, + ) + + lama_register["AdaptiveHarmonyMemeticOptimizationV2"] = AdaptiveHarmonyMemeticOptimizationV2 + LLAMAAdaptiveHarmonyMemeticOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHarmonyMemeticOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV27 import ( + AdaptiveHarmonyMemeticOptimizationV27, + ) + + lama_register["AdaptiveHarmonyMemeticOptimizationV27"] = AdaptiveHarmonyMemeticOptimizationV27 + LLAMAAdaptiveHarmonyMemeticOptimizationV27 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticOptimizationV27" + ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV27", register=True) +except Exception as e: + print("AdaptiveHarmonyMemeticOptimizationV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticSearchV2 import AdaptiveHarmonyMemeticSearchV2 + + lama_register["AdaptiveHarmonyMemeticSearchV2"] = AdaptiveHarmonyMemeticSearchV2 + LLAMAAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticSearchV2" + ).set_name("LLAMAAdaptiveHarmonyMemeticSearchV2", register=True) +except Exception as e: + print("AdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchOptimizerV2 import AdaptiveHarmonySearchOptimizerV2 + + lama_register["AdaptiveHarmonySearchOptimizerV2"] = AdaptiveHarmonySearchOptimizerV2 + LLAMAAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: + print("AdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithCuckooInspiration import ( + AdaptiveHarmonySearchWithCuckooInspiration, + ) + + lama_register["AdaptiveHarmonySearchWithCuckooInspiration"] = AdaptiveHarmonySearchWithCuckooInspiration + LLAMAAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration" + ).set_name("LLAMAAdaptiveHarmonySearchWithCuckooInspiration", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 import ( + AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2, + ) + + lama_register["AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2"] = ( + AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 + ) + LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlight import ( + AdaptiveHarmonySearchWithImprovedLevyFlight, + ) + + lama_register["AdaptiveHarmonySearchWithImprovedLevyFlight"] = AdaptiveHarmonySearchWithImprovedLevyFlight + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight" + ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlightInspiration import ( + AdaptiveHarmonySearchWithImprovedLevyFlightInspiration, + ) + + lama_register["AdaptiveHarmonySearchWithImprovedLevyFlightInspiration"] = ( + AdaptiveHarmonySearchWithImprovedLevyFlightInspiration + ) + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration" + ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithImprovedLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLevyFlightImprovement import ( + AdaptiveHarmonySearchWithLevyFlightImprovement, + ) + + lama_register["AdaptiveHarmonySearchWithLevyFlightImprovement"] = ( + AdaptiveHarmonySearchWithLevyFlightImprovement + ) + LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement" + ).set_name("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithLevyFlightImprovement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimization import ( + AdaptiveHarmonySearchWithLocalOptimization, + ) + + lama_register["AdaptiveHarmonySearchWithLocalOptimization"] = AdaptiveHarmonySearchWithLocalOptimization + LLAMAAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimization" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimization", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationImproved import ( + AdaptiveHarmonySearchWithLocalOptimizationImproved, + ) + + lama_register["AdaptiveHarmonySearchWithLocalOptimizationImproved"] = ( + AdaptiveHarmonySearchWithLocalOptimizationImproved + ) + LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithLocalOptimizationImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationV2 import ( + AdaptiveHarmonySearchWithLocalOptimizationV2, + ) + + lama_register["AdaptiveHarmonySearchWithLocalOptimizationV2"] = ( + AdaptiveHarmonySearchWithLocalOptimizationV2 + ) + LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithLocalOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithSimulatedAnnealing import ( + AdaptiveHarmonySearchWithSimulatedAnnealing, + ) + + lama_register["AdaptiveHarmonySearchWithSimulatedAnnealing"] = AdaptiveHarmonySearchWithSimulatedAnnealing + LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing" + ).set_name("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHarmonyTabuOptimization import AdaptiveHarmonyTabuOptimization + + lama_register["AdaptiveHarmonyTabuOptimization"] = AdaptiveHarmonyTabuOptimization + LLAMAAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyTabuOptimization" + ).set_name("LLAMAAdaptiveHarmonyTabuOptimization", register=True) +except Exception as e: + print("AdaptiveHarmonyTabuOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridAlgorithm import AdaptiveHybridAlgorithm + + lama_register["AdaptiveHybridAlgorithm"] = AdaptiveHybridAlgorithm + LLAMAAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm").set_name( + "LLAMAAdaptiveHybridAlgorithm", register=True + ) +except Exception as e: + print("AdaptiveHybridAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithGradientBoost import ( + AdaptiveHybridAnnealingWithGradientBoost, + ) + + lama_register["AdaptiveHybridAnnealingWithGradientBoost"] = AdaptiveHybridAnnealingWithGradientBoost + LLAMAAdaptiveHybridAnnealingWithGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveHybridAnnealingWithGradientBoost" + ).set_name("LLAMAAdaptiveHybridAnnealingWithGradientBoost", register=True) +except Exception as e: + print("AdaptiveHybridAnnealingWithGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithMemoryRefinement import ( + AdaptiveHybridAnnealingWithMemoryRefinement, + ) + + lama_register["AdaptiveHybridAnnealingWithMemoryRefinement"] = AdaptiveHybridAnnealingWithMemoryRefinement + LLAMAAdaptiveHybridAnnealingWithMemoryRefinement = NonObjectOptimizer( + method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement" + ).set_name("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement", register=True) +except Exception as e: + print("AdaptiveHybridAnnealingWithMemoryRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridCMAESDE import AdaptiveHybridCMAESDE + + lama_register["AdaptiveHybridCMAESDE"] = AdaptiveHybridCMAESDE + LLAMAAdaptiveHybridCMAESDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE").set_name( + "LLAMAAdaptiveHybridCMAESDE", register=True + ) +except Exception as e: + print("AdaptiveHybridCMAESDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 import ( + AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 + ) + LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: + print("AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridCulturalOptimizer import AdaptiveHybridCulturalOptimizer + + lama_register["AdaptiveHybridCulturalOptimizer"] = AdaptiveHybridCulturalOptimizer + LLAMAAdaptiveHybridCulturalOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHybridCulturalOptimizer" + ).set_name("LLAMAAdaptiveHybridCulturalOptimizer", register=True) +except Exception as e: + print("AdaptiveHybridCulturalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridDEPSOWithDynamicRestart import ( + AdaptiveHybridDEPSOWithDynamicRestart, + ) + + lama_register["AdaptiveHybridDEPSOWithDynamicRestart"] = AdaptiveHybridDEPSOWithDynamicRestart + LLAMAAdaptiveHybridDEPSOWithDynamicRestart = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart" + ).set_name("LLAMAAdaptiveHybridDEPSOWithDynamicRestart", register=True) +except Exception as e: + print("AdaptiveHybridDEPSOWithDynamicRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridDEWithIntensifiedLocalSearch import ( + AdaptiveHybridDEWithIntensifiedLocalSearch, + ) + + lama_register["AdaptiveHybridDEWithIntensifiedLocalSearch"] = AdaptiveHybridDEWithIntensifiedLocalSearch + LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch" + ).set_name("LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch", register=True) +except Exception as e: + print("AdaptiveHybridDEWithIntensifiedLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridDifferentialEvolution import ( + AdaptiveHybridDifferentialEvolution, + ) + + lama_register["AdaptiveHybridDifferentialEvolution"] = AdaptiveHybridDifferentialEvolution + LLAMAAdaptiveHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDifferentialEvolution" + ).set_name("LLAMAAdaptiveHybridDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveHybridDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridEvolutionStrategyV5 import ( + AdaptiveHybridEvolutionStrategyV5, + ) + + lama_register["AdaptiveHybridEvolutionStrategyV5"] = AdaptiveHybridEvolutionStrategyV5 + LLAMAAdaptiveHybridEvolutionStrategyV5 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridEvolutionStrategyV5" + ).set_name("LLAMAAdaptiveHybridEvolutionStrategyV5", register=True) +except Exception as e: + print("AdaptiveHybridEvolutionStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridFireworkAlgorithm import AdaptiveHybridFireworkAlgorithm + + lama_register["AdaptiveHybridFireworkAlgorithm"] = AdaptiveHybridFireworkAlgorithm + LLAMAAdaptiveHybridFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHybridFireworkAlgorithm" + ).set_name("LLAMAAdaptiveHybridFireworkAlgorithm", register=True) +except Exception as e: + print("AdaptiveHybridFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridGradientAnnealingWithVariableMemory import ( + AdaptiveHybridGradientAnnealingWithVariableMemory, + ) + + lama_register["AdaptiveHybridGradientAnnealingWithVariableMemory"] = ( + AdaptiveHybridGradientAnnealingWithVariableMemory + ) + LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory = NonObjectOptimizer( + method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory" + ).set_name("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory", register=True) +except Exception as e: + print("AdaptiveHybridGradientAnnealingWithVariableMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridHarmonySearch import AdaptiveHybridHarmonySearch + + lama_register["AdaptiveHybridHarmonySearch"] = AdaptiveHybridHarmonySearch + LLAMAAdaptiveHybridHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch").set_name( + "LLAMAAdaptiveHybridHarmonySearch", register=True + ) +except Exception as e: + print("AdaptiveHybridHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridMetaOptimizer import AdaptiveHybridMetaOptimizer + + lama_register["AdaptiveHybridMetaOptimizer"] = AdaptiveHybridMetaOptimizer + LLAMAAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer").set_name( + "LLAMAAdaptiveHybridMetaOptimizer", register=True + ) +except Exception as e: + print("AdaptiveHybridMetaOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridOptimization import AdaptiveHybridOptimization + + lama_register["AdaptiveHybridOptimization"] = AdaptiveHybridOptimization + LLAMAAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization").set_name( + "LLAMAAdaptiveHybridOptimization", register=True + ) +except Exception as e: + print("AdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridOptimizationV2 import AdaptiveHybridOptimizationV2 + + lama_register["AdaptiveHybridOptimizationV2"] = AdaptiveHybridOptimizationV2 + LLAMAAdaptiveHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridOptimizationV2" + ).set_name("LLAMAAdaptiveHybridOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHybridOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridOptimizationV3 import AdaptiveHybridOptimizationV3 + + lama_register["AdaptiveHybridOptimizationV3"] = AdaptiveHybridOptimizationV3 + LLAMAAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridOptimizationV3" + ).set_name("LLAMAAdaptiveHybridOptimizationV3", register=True) +except Exception as e: + print("AdaptiveHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridOptimizer import AdaptiveHybridOptimizer + + lama_register["AdaptiveHybridOptimizer"] = AdaptiveHybridOptimizer + LLAMAAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer").set_name( + "LLAMAAdaptiveHybridOptimizer", register=True + ) +except Exception as e: + print("AdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolution import ( + AdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + AdaptiveHybridParticleSwarmDifferentialEvolution + ) + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( + AdaptiveHybridParticleSwarmDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( + AdaptiveHybridParticleSwarmDifferentialEvolutionPlus + ) + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) +except Exception as e: + print("AdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridQuasiRandomGradientDE import ( + AdaptiveHybridQuasiRandomGradientDE, + ) + + lama_register["AdaptiveHybridQuasiRandomGradientDE"] = AdaptiveHybridQuasiRandomGradientDE + LLAMAAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMAAdaptiveHybridQuasiRandomGradientDE" + ).set_name("LLAMAAdaptiveHybridQuasiRandomGradientDE", register=True) +except Exception as e: + print("AdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridRecombinativeStrategy import ( + AdaptiveHybridRecombinativeStrategy, + ) + + lama_register["AdaptiveHybridRecombinativeStrategy"] = AdaptiveHybridRecombinativeStrategy + LLAMAAdaptiveHybridRecombinativeStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveHybridRecombinativeStrategy" + ).set_name("LLAMAAdaptiveHybridRecombinativeStrategy", register=True) +except Exception as e: + print("AdaptiveHybridRecombinativeStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridSearchOptimizer import AdaptiveHybridSearchOptimizer + + lama_register["AdaptiveHybridSearchOptimizer"] = AdaptiveHybridSearchOptimizer + LLAMAAdaptiveHybridSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHybridSearchOptimizer" + ).set_name("LLAMAAdaptiveHybridSearchOptimizer", register=True) +except Exception as e: + print("AdaptiveHybridSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHybridSwarmEvolutionOptimization import ( + AdaptiveHybridSwarmEvolutionOptimization, + ) + + lama_register["AdaptiveHybridSwarmEvolutionOptimization"] = AdaptiveHybridSwarmEvolutionOptimization + LLAMAAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHybridSwarmEvolutionOptimization" + ).set_name("LLAMAAdaptiveHybridSwarmEvolutionOptimization", register=True) +except Exception as e: + print("AdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveHyperQuantumStateCrossoverOptimizationV2 import ( + AdaptiveHyperQuantumStateCrossoverOptimizationV2, + ) + + lama_register["AdaptiveHyperQuantumStateCrossoverOptimizationV2"] = ( + AdaptiveHyperQuantumStateCrossoverOptimizationV2 + ) + LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2" + ).set_name("LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2", register=True) +except Exception as e: + print("AdaptiveHyperQuantumStateCrossoverOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveIncrementalCrossoverEnhancement import ( + AdaptiveIncrementalCrossoverEnhancement, + ) + + lama_register["AdaptiveIncrementalCrossoverEnhancement"] = AdaptiveIncrementalCrossoverEnhancement + LLAMAAdaptiveIncrementalCrossoverEnhancement = NonObjectOptimizer( + method="LLAMAAdaptiveIncrementalCrossoverEnhancement" + ).set_name("LLAMAAdaptiveIncrementalCrossoverEnhancement", register=True) +except Exception as e: + print("AdaptiveIncrementalCrossoverEnhancement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveInertiaHybridOptimizer import AdaptiveInertiaHybridOptimizer + + lama_register["AdaptiveInertiaHybridOptimizer"] = AdaptiveInertiaHybridOptimizer + LLAMAAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaHybridOptimizer" + ).set_name("LLAMAAdaptiveInertiaHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveInertiaHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveInertiaParticleOptimizer import AdaptiveInertiaParticleOptimizer + + lama_register["AdaptiveInertiaParticleOptimizer"] = AdaptiveInertiaParticleOptimizer + LLAMAAdaptiveInertiaParticleOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaParticleOptimizer" + ).set_name("LLAMAAdaptiveInertiaParticleOptimizer", register=True) +except Exception as e: + print("AdaptiveInertiaParticleOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveInertiaParticleSwarmOptimization import ( + AdaptiveInertiaParticleSwarmOptimization, + ) + + lama_register["AdaptiveInertiaParticleSwarmOptimization"] = AdaptiveInertiaParticleSwarmOptimization + LLAMAAdaptiveInertiaParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveInertiaParticleSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveInertiaParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLearningDifferentialEvolutionOptimizer import ( + AdaptiveLearningDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveLearningDifferentialEvolutionOptimizer"] = ( + AdaptiveLearningDifferentialEvolutionOptimizer + ) + LLAMAAdaptiveLearningDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveLearningDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("AdaptiveLearningDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( + AdaptiveLevyDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["AdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( + AdaptiveLevyDiversifiedMetaHeuristicAlgorithm + ) + LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: + print("AdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLevyHarmonySearch import AdaptiveLevyHarmonySearch + + lama_register["AdaptiveLevyHarmonySearch"] = AdaptiveLevyHarmonySearch + LLAMAAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch").set_name( + "LLAMAAdaptiveLevyHarmonySearch", register=True + ) +except Exception as e: + print("AdaptiveLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing import ( + AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing, + ) + + lama_register["AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing"] = ( + AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing + ) + LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLocalSearchOptimizer import AdaptiveLocalSearchOptimizer + + lama_register["AdaptiveLocalSearchOptimizer"] = AdaptiveLocalSearchOptimizer + LLAMAAdaptiveLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchOptimizer" + ).set_name("LLAMAAdaptiveLocalSearchOptimizer", register=True) +except Exception as e: + print("AdaptiveLocalSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveLocalSearchQuantumSimulatedAnnealing import ( + AdaptiveLocalSearchQuantumSimulatedAnnealing, + ) + + lama_register["AdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( + AdaptiveLocalSearchQuantumSimulatedAnnealing + ) + LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticAlgorithm import AdaptiveMemeticAlgorithm + + lama_register["AdaptiveMemeticAlgorithm"] = AdaptiveMemeticAlgorithm + LLAMAAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm").set_name( + "LLAMAAdaptiveMemeticAlgorithm", register=True + ) +except Exception as e: + print("AdaptiveMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer import ( + AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer"] = ( + AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer + ) + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer import ( + AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer, + ) + + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer"] = ( + AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer + ) + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer" + ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolution import ( + AdaptiveMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveMemeticDifferentialEvolution"] = AdaptiveMemeticDifferentialEvolution + LLAMAAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionOptimizer import ( + AdaptiveMemeticDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionOptimizer"] = ( + AdaptiveMemeticDifferentialEvolutionOptimizer + ) + LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV2 import ( + AdaptiveMemeticDifferentialEvolutionV2, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV2"] = AdaptiveMemeticDifferentialEvolutionV2 + LLAMAAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV2", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV3 import ( + AdaptiveMemeticDifferentialEvolutionV3, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV3"] = AdaptiveMemeticDifferentialEvolutionV3 + LLAMAAdaptiveMemeticDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV3" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV3", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV4 import ( + AdaptiveMemeticDifferentialEvolutionV4, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV4"] = AdaptiveMemeticDifferentialEvolutionV4 + LLAMAAdaptiveMemeticDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV4" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV4", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV5 import ( + AdaptiveMemeticDifferentialEvolutionV5, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV5"] = AdaptiveMemeticDifferentialEvolutionV5 + LLAMAAdaptiveMemeticDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV5" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV5", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV6 import ( + AdaptiveMemeticDifferentialEvolutionV6, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV6"] = AdaptiveMemeticDifferentialEvolutionV6 + LLAMAAdaptiveMemeticDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV6" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV6", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV7 import ( + AdaptiveMemeticDifferentialEvolutionV7, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionV7"] = AdaptiveMemeticDifferentialEvolutionV7 + LLAMAAdaptiveMemeticDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV7" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV7", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR import ( + AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR"] = ( + AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR + ) + LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance import ( + AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance"] = ( + AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance + ) + LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialQuantumSearch import ( + AdaptiveMemeticDifferentialQuantumSearch, + ) + + lama_register["AdaptiveMemeticDifferentialQuantumSearch"] = AdaptiveMemeticDifferentialQuantumSearch + LLAMAAdaptiveMemeticDifferentialQuantumSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialQuantumSearch" + ).set_name("LLAMAAdaptiveMemeticDifferentialQuantumSearch", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialQuantumSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialSearch import ( + AdaptiveMemeticDifferentialSearch, + ) + + lama_register["AdaptiveMemeticDifferentialSearch"] = AdaptiveMemeticDifferentialSearch + LLAMAAdaptiveMemeticDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialSearch" + ).set_name("LLAMAAdaptiveMemeticDifferentialSearch", register=True) +except Exception as e: + print("AdaptiveMemeticDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticDiverseOptimizer import AdaptiveMemeticDiverseOptimizer + + lama_register["AdaptiveMemeticDiverseOptimizer"] = AdaptiveMemeticDiverseOptimizer + LLAMAAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticDiverseOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionStrategy import AdaptiveMemeticEvolutionStrategy + + lama_register["AdaptiveMemeticEvolutionStrategy"] = AdaptiveMemeticEvolutionStrategy + LLAMAAdaptiveMemeticEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionStrategy" + ).set_name("LLAMAAdaptiveMemeticEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveMemeticEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryAlgorithm import ( + AdaptiveMemeticEvolutionaryAlgorithm, + ) + + lama_register["AdaptiveMemeticEvolutionaryAlgorithm"] = AdaptiveMemeticEvolutionaryAlgorithm + LLAMAAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("AdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryOptimizer import ( + AdaptiveMemeticEvolutionaryOptimizer, + ) + + lama_register["AdaptiveMemeticEvolutionaryOptimizer"] = AdaptiveMemeticEvolutionaryOptimizer + LLAMAAdaptiveMemeticEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveMemeticEvolutionaryOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionarySearch import ( + AdaptiveMemeticEvolutionarySearch, + ) + + lama_register["AdaptiveMemeticEvolutionarySearch"] = AdaptiveMemeticEvolutionarySearch + LLAMAAdaptiveMemeticEvolutionarySearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionarySearch" + ).set_name("LLAMAAdaptiveMemeticEvolutionarySearch", register=True) +except Exception as e: + print("AdaptiveMemeticEvolutionarySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimization import ( + AdaptiveMemeticHarmonyOptimization, + ) + + lama_register["AdaptiveMemeticHarmonyOptimization"] = AdaptiveMemeticHarmonyOptimization + LLAMAAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHarmonyOptimization" + ).set_name("LLAMAAdaptiveMemeticHarmonyOptimization", register=True) +except Exception as e: + print("AdaptiveMemeticHarmonyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimizationV5 import ( + AdaptiveMemeticHarmonyOptimizationV5, + ) + + lama_register["AdaptiveMemeticHarmonyOptimizationV5"] = AdaptiveMemeticHarmonyOptimizationV5 + LLAMAAdaptiveMemeticHarmonyOptimizationV5 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHarmonyOptimizationV5" + ).set_name("LLAMAAdaptiveMemeticHarmonyOptimizationV5", register=True) +except Exception as e: + print("AdaptiveMemeticHarmonyOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticHybridOptimizer import AdaptiveMemeticHybridOptimizer + + lama_register["AdaptiveMemeticHybridOptimizer"] = AdaptiveMemeticHybridOptimizer + LLAMAAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveMemeticHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticOptimizer import AdaptiveMemeticOptimizer + + lama_register["AdaptiveMemeticOptimizer"] = AdaptiveMemeticOptimizer + LLAMAAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer").set_name( + "LLAMAAdaptiveMemeticOptimizer", register=True + ) +except Exception as e: + print("AdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticOptimizerV2 import AdaptiveMemeticOptimizerV2 + + lama_register["AdaptiveMemeticOptimizerV2"] = AdaptiveMemeticOptimizerV2 + LLAMAAdaptiveMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2").set_name( + "LLAMAAdaptiveMemeticOptimizerV2", register=True + ) +except Exception as e: + print("AdaptiveMemeticOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemeticParticleSwarmOptimization import ( + AdaptiveMemeticParticleSwarmOptimization, + ) + + lama_register["AdaptiveMemeticParticleSwarmOptimization"] = AdaptiveMemeticParticleSwarmOptimization + LLAMAAdaptiveMemeticParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveMemeticParticleSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveMemeticParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryAssistedStrategyV41 import ( + AdaptiveMemoryAssistedStrategyV41, + ) + + lama_register["AdaptiveMemoryAssistedStrategyV41"] = AdaptiveMemoryAssistedStrategyV41 + LLAMAAdaptiveMemoryAssistedStrategyV41 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryAssistedStrategyV41" + ).set_name("LLAMAAdaptiveMemoryAssistedStrategyV41", register=True) +except Exception as e: + print("AdaptiveMemoryAssistedStrategyV41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedDualStrategyV45 import ( + AdaptiveMemoryEnhancedDualStrategyV45, + ) + + lama_register["AdaptiveMemoryEnhancedDualStrategyV45"] = AdaptiveMemoryEnhancedDualStrategyV45 + LLAMAAdaptiveMemoryEnhancedDualStrategyV45 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45" + ).set_name("LLAMAAdaptiveMemoryEnhancedDualStrategyV45", register=True) +except Exception as e: + print("AdaptiveMemoryEnhancedDualStrategyV45 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedSearch import AdaptiveMemoryEnhancedSearch + + lama_register["AdaptiveMemoryEnhancedSearch"] = AdaptiveMemoryEnhancedSearch + LLAMAAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMAAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: + print("AdaptiveMemoryEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedStrategyV42 import ( + AdaptiveMemoryEnhancedStrategyV42, + ) + + lama_register["AdaptiveMemoryEnhancedStrategyV42"] = AdaptiveMemoryEnhancedStrategyV42 + LLAMAAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedStrategyV42" + ).set_name("LLAMAAdaptiveMemoryEnhancedStrategyV42", register=True) +except Exception as e: + print("AdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryEvolutionaryOptimizer import ( + AdaptiveMemoryEvolutionaryOptimizer, + ) + + lama_register["AdaptiveMemoryEvolutionaryOptimizer"] = AdaptiveMemoryEvolutionaryOptimizer + LLAMAAdaptiveMemoryEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveMemoryEvolutionaryOptimizer", register=True) +except Exception as e: + print("AdaptiveMemoryEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealing import AdaptiveMemoryGradientAnnealing + + lama_register["AdaptiveMemoryGradientAnnealing"] = AdaptiveMemoryGradientAnnealing + LLAMAAdaptiveMemoryGradientAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealing" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealing", register=True) +except Exception as e: + print("AdaptiveMemoryGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingPlus import ( + AdaptiveMemoryGradientAnnealingPlus, + ) + + lama_register["AdaptiveMemoryGradientAnnealingPlus"] = AdaptiveMemoryGradientAnnealingPlus + LLAMAAdaptiveMemoryGradientAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealingPlus" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealingPlus", register=True) +except Exception as e: + print("AdaptiveMemoryGradientAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingWithExplorationBoost import ( + AdaptiveMemoryGradientAnnealingWithExplorationBoost, + ) + + lama_register["AdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( + AdaptiveMemoryGradientAnnealingWithExplorationBoost + ) + LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) +except Exception as e: + print("AdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryGradientSimulatedAnnealing import ( + AdaptiveMemoryGradientSimulatedAnnealing, + ) + + lama_register["AdaptiveMemoryGradientSimulatedAnnealing"] = AdaptiveMemoryGradientSimulatedAnnealing + LLAMAAdaptiveMemoryGradientSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing" + ).set_name("LLAMAAdaptiveMemoryGradientSimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveMemoryGradientSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryGuidedEvolutionStrategyV57 import ( + AdaptiveMemoryGuidedEvolutionStrategyV57, + ) + + lama_register["AdaptiveMemoryGuidedEvolutionStrategyV57"] = AdaptiveMemoryGuidedEvolutionStrategyV57 + LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57" + ).set_name("LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57", register=True) +except Exception as e: + print("AdaptiveMemoryGuidedEvolutionStrategyV57 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryHybridAnnealing import AdaptiveMemoryHybridAnnealing + + lama_register["AdaptiveMemoryHybridAnnealing"] = AdaptiveMemoryHybridAnnealing + LLAMAAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryHybridAnnealing" + ).set_name("LLAMAAdaptiveMemoryHybridAnnealing", register=True) +except Exception as e: + print("AdaptiveMemoryHybridAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO import AdaptiveMemoryHybridDEPSO + + lama_register["AdaptiveMemoryHybridDEPSO"] = AdaptiveMemoryHybridDEPSO + LLAMAAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO").set_name( + "LLAMAAdaptiveMemoryHybridDEPSO", register=True + ) +except Exception as e: + print("AdaptiveMemoryHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO_V2 import AdaptiveMemoryHybridDEPSO_V2 + + lama_register["AdaptiveMemoryHybridDEPSO_V2"] = AdaptiveMemoryHybridDEPSO_V2 + LLAMAAdaptiveMemoryHybridDEPSO_V2 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryHybridDEPSO_V2" + ).set_name("LLAMAAdaptiveMemoryHybridDEPSO_V2", register=True) +except Exception as e: + print("AdaptiveMemoryHybridDEPSO_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemoryParticleDifferentialSearch import ( + AdaptiveMemoryParticleDifferentialSearch, + ) + + lama_register["AdaptiveMemoryParticleDifferentialSearch"] = AdaptiveMemoryParticleDifferentialSearch + LLAMAAdaptiveMemoryParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryParticleDifferentialSearch" + ).set_name("LLAMAAdaptiveMemoryParticleDifferentialSearch", register=True) +except Exception as e: + print("AdaptiveMemoryParticleDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemorySelfTuningStrategyV60 import ( + AdaptiveMemorySelfTuningStrategyV60, + ) + + lama_register["AdaptiveMemorySelfTuningStrategyV60"] = AdaptiveMemorySelfTuningStrategyV60 + LLAMAAdaptiveMemorySelfTuningStrategyV60 = NonObjectOptimizer( + method="LLAMAAdaptiveMemorySelfTuningStrategyV60" + ).set_name("LLAMAAdaptiveMemorySelfTuningStrategyV60", register=True) +except Exception as e: + print("AdaptiveMemorySelfTuningStrategyV60 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMemorySimulatedAnnealing import AdaptiveMemorySimulatedAnnealing + + lama_register["AdaptiveMemorySimulatedAnnealing"] = AdaptiveMemorySimulatedAnnealing + LLAMAAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSO import AdaptiveMetaNetAQAPSO + + lama_register["AdaptiveMetaNetAQAPSO"] = AdaptiveMetaNetAQAPSO + LLAMAAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO").set_name( + "LLAMAAdaptiveMetaNetAQAPSO", register=True + ) +except Exception as e: + print("AdaptiveMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSOv13 import AdaptiveMetaNetAQAPSOv13 + + lama_register["AdaptiveMetaNetAQAPSOv13"] = AdaptiveMetaNetAQAPSOv13 + LLAMAAdaptiveMetaNetAQAPSOv13 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13").set_name( + "LLAMAAdaptiveMetaNetAQAPSOv13", register=True + ) +except Exception as e: + print("AdaptiveMetaNetAQAPSOv13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMetaNetPSO_v3 import AdaptiveMetaNetPSO_v3 + + lama_register["AdaptiveMetaNetPSO_v3"] = AdaptiveMetaNetPSO_v3 + LLAMAAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3").set_name( + "LLAMAAdaptiveMetaNetPSO_v3", register=True + ) +except Exception as e: + print("AdaptiveMetaNetPSO_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMetaNetPSOv3 import AdaptiveMetaNetPSOv3 + + lama_register["AdaptiveMetaNetPSOv3"] = AdaptiveMetaNetPSOv3 + LLAMAAdaptiveMetaNetPSOv3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3").set_name( + "LLAMAAdaptiveMetaNetPSOv3", register=True + ) +except Exception as e: + print("AdaptiveMetaNetPSOv3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMetaheuristicOptimization import ( + AdaptiveMetaheuristicOptimization, + ) + + lama_register["AdaptiveMetaheuristicOptimization"] = AdaptiveMetaheuristicOptimization + LLAMAAdaptiveMetaheuristicOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMetaheuristicOptimization" + ).set_name("LLAMAAdaptiveMetaheuristicOptimization", register=True) +except Exception as e: + print("AdaptiveMetaheuristicOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMomentumOptimization import AdaptiveMomentumOptimization + + lama_register["AdaptiveMomentumOptimization"] = AdaptiveMomentumOptimization + LLAMAAdaptiveMomentumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMomentumOptimization" + ).set_name("LLAMAAdaptiveMomentumOptimization", register=True) +except Exception as e: + print("AdaptiveMomentumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiExplorationAlgorithm import ( + AdaptiveMultiExplorationAlgorithm, + ) + + lama_register["AdaptiveMultiExplorationAlgorithm"] = AdaptiveMultiExplorationAlgorithm + LLAMAAdaptiveMultiExplorationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveMultiExplorationAlgorithm" + ).set_name("LLAMAAdaptiveMultiExplorationAlgorithm", register=True) +except Exception as e: + print("AdaptiveMultiExplorationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiMemorySimulatedAnnealing import ( + AdaptiveMultiMemorySimulatedAnnealing, + ) + + lama_register["AdaptiveMultiMemorySimulatedAnnealing"] = AdaptiveMultiMemorySimulatedAnnealing + LLAMAAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMultiMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveMultiMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiOperatorDifferentialEvolution import ( + AdaptiveMultiOperatorDifferentialEvolution, + ) + + lama_register["AdaptiveMultiOperatorDifferentialEvolution"] = AdaptiveMultiOperatorDifferentialEvolution + LLAMAAdaptiveMultiOperatorDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiOperatorDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveMultiOperatorDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiOperatorSearch import AdaptiveMultiOperatorSearch + + lama_register["AdaptiveMultiOperatorSearch"] = AdaptiveMultiOperatorSearch + LLAMAAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch").set_name( + "LLAMAAdaptiveMultiOperatorSearch", register=True + ) +except Exception as e: + print("AdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV2 import AdaptiveMultiOperatorSearchV2 + + lama_register["AdaptiveMultiOperatorSearchV2"] = AdaptiveMultiOperatorSearchV2 + LLAMAAdaptiveMultiOperatorSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorSearchV2" + ).set_name("LLAMAAdaptiveMultiOperatorSearchV2", register=True) +except Exception as e: + print("AdaptiveMultiOperatorSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV3 import AdaptiveMultiOperatorSearchV3 + + lama_register["AdaptiveMultiOperatorSearchV3"] = AdaptiveMultiOperatorSearchV3 + LLAMAAdaptiveMultiOperatorSearchV3 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorSearchV3" + ).set_name("LLAMAAdaptiveMultiOperatorSearchV3", register=True) +except Exception as e: + print("AdaptiveMultiOperatorSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealing import AdaptiveMultiPhaseAnnealing + + lama_register["AdaptiveMultiPhaseAnnealing"] = AdaptiveMultiPhaseAnnealing + LLAMAAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing").set_name( + "LLAMAAdaptiveMultiPhaseAnnealing", register=True + ) +except Exception as e: + print("AdaptiveMultiPhaseAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealingV2 import AdaptiveMultiPhaseAnnealingV2 + + lama_register["AdaptiveMultiPhaseAnnealingV2"] = AdaptiveMultiPhaseAnnealingV2 + LLAMAAdaptiveMultiPhaseAnnealingV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPhaseAnnealingV2" + ).set_name("LLAMAAdaptiveMultiPhaseAnnealingV2", register=True) +except Exception as e: + print("AdaptiveMultiPhaseAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiPhaseOptimization import AdaptiveMultiPhaseOptimization + + lama_register["AdaptiveMultiPhaseOptimization"] = AdaptiveMultiPhaseOptimization + LLAMAAdaptiveMultiPhaseOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPhaseOptimization" + ).set_name("LLAMAAdaptiveMultiPhaseOptimization", register=True) +except Exception as e: + print("AdaptiveMultiPhaseOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiPopulationDifferentialEvolution import ( + AdaptiveMultiPopulationDifferentialEvolution, + ) + + lama_register["AdaptiveMultiPopulationDifferentialEvolution"] = ( + AdaptiveMultiPopulationDifferentialEvolution + ) + LLAMAAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPopulationDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiPopulationDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStageOptimization import AdaptiveMultiStageOptimization + + lama_register["AdaptiveMultiStageOptimization"] = AdaptiveMultiStageOptimization + LLAMAAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStageOptimization" + ).set_name("LLAMAAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("AdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategicOptimizer import AdaptiveMultiStrategicOptimizer + + lama_register["AdaptiveMultiStrategicOptimizer"] = AdaptiveMultiStrategicOptimizer + LLAMAAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategicOptimizer" + ).set_name("LLAMAAdaptiveMultiStrategicOptimizer", register=True) +except Exception as e: + print("AdaptiveMultiStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyDE import AdaptiveMultiStrategyDE + + lama_register["AdaptiveMultiStrategyDE"] = AdaptiveMultiStrategyDE + LLAMAAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE").set_name( + "LLAMAAdaptiveMultiStrategyDE", register=True + ) +except Exception as e: + print("AdaptiveMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyDEWithMemory import ( + AdaptiveMultiStrategyDEWithMemory, + ) + + lama_register["AdaptiveMultiStrategyDEWithMemory"] = AdaptiveMultiStrategyDEWithMemory + LLAMAAdaptiveMultiStrategyDEWithMemory = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDEWithMemory" + ).set_name("LLAMAAdaptiveMultiStrategyDEWithMemory", register=True) +except Exception as e: + print("AdaptiveMultiStrategyDEWithMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolution import ( + AdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["AdaptiveMultiStrategyDifferentialEvolution"] = AdaptiveMultiStrategyDifferentialEvolution + LLAMAAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolutionPlus import ( + AdaptiveMultiStrategyDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveMultiStrategyDifferentialEvolutionPlus"] = ( + AdaptiveMultiStrategyDifferentialEvolutionPlus + ) + LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus", register=True) +except Exception as e: + print("AdaptiveMultiStrategyDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizer import AdaptiveMultiStrategyOptimizer + + lama_register["AdaptiveMultiStrategyOptimizer"] = AdaptiveMultiStrategyOptimizer + LLAMAAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: + print("AdaptiveMultiStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizerV2 import AdaptiveMultiStrategyOptimizerV2 + + lama_register["AdaptiveMultiStrategyOptimizerV2"] = AdaptiveMultiStrategyOptimizerV2 + LLAMAAdaptiveMultiStrategyOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyOptimizerV2" + ).set_name("LLAMAAdaptiveMultiStrategyOptimizerV2", register=True) +except Exception as e: + print("AdaptiveMultiStrategyOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveNicheDifferentialParticleSwarmOptimizer import ( + AdaptiveNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["AdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( + AdaptiveNicheDifferentialParticleSwarmOptimizer + ) + LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("AdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveNichingDE_PSO import AdaptiveNichingDE_PSO + + lama_register["AdaptiveNichingDE_PSO"] = AdaptiveNichingDE_PSO + LLAMAAdaptiveNichingDE_PSO = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO").set_name( + "LLAMAAdaptiveNichingDE_PSO", register=True + ) +except Exception as e: + print("AdaptiveNichingDE_PSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolution import ( + AdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["AdaptiveOppositionBasedDifferentialEvolution"] = ( + AdaptiveOppositionBasedDifferentialEvolution + ) + LLAMAAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolutionImproved import ( + AdaptiveOppositionBasedDifferentialEvolutionImproved, + ) + + lama_register["AdaptiveOppositionBasedDifferentialEvolutionImproved"] = ( + AdaptiveOppositionBasedDifferentialEvolutionImproved + ) + LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved" + ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved", register=True) +except Exception as e: + print("AdaptiveOppositionBasedDifferentialEvolutionImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE import ( + AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE, + ) + + lama_register["AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE"] = ( + AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE + ) + LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE" + ).set_name("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE", register=True) +except Exception as e: + print("AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveOrthogonalDifferentialEvolution import ( + AdaptiveOrthogonalDifferentialEvolution, + ) + + lama_register["AdaptiveOrthogonalDifferentialEvolution"] = AdaptiveOrthogonalDifferentialEvolution + LLAMAAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveOscillatoryCrossoverDifferentialEvolution import ( + AdaptiveOscillatoryCrossoverDifferentialEvolution, + ) + + lama_register["AdaptiveOscillatoryCrossoverDifferentialEvolution"] = ( + AdaptiveOscillatoryCrossoverDifferentialEvolution + ) + LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution" + ).set_name("LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveOscillatoryCrossoverDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveParticleDifferentialSearch import ( + AdaptiveParticleDifferentialSearch, + ) + + lama_register["AdaptiveParticleDifferentialSearch"] = AdaptiveParticleDifferentialSearch + LLAMAAdaptiveParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveParticleDifferentialSearch" + ).set_name("LLAMAAdaptiveParticleDifferentialSearch", register=True) +except Exception as e: + print("AdaptiveParticleDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveParticleSwarmOptimization import ( + AdaptiveParticleSwarmOptimization, + ) + + lama_register["AdaptiveParticleSwarmOptimization"] = AdaptiveParticleSwarmOptimization + LLAMAAdaptiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveParticleSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePerturbationDifferentialEvolution import ( + AdaptivePerturbationDifferentialEvolution, + ) + + lama_register["AdaptivePerturbationDifferentialEvolution"] = AdaptivePerturbationDifferentialEvolution + LLAMAAdaptivePerturbationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePerturbationDifferentialEvolution" + ).set_name("LLAMAAdaptivePerturbationDifferentialEvolution", register=True) +except Exception as e: + print("AdaptivePerturbationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePopulationDifferentialEvolutionOptimizer import ( + AdaptivePopulationDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptivePopulationDifferentialEvolutionOptimizer"] = ( + AdaptivePopulationDifferentialEvolutionOptimizer + ) + LLAMAAdaptivePopulationDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptivePopulationDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("AdaptivePopulationDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( + AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( + AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + ) + LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: + print("AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePopulationMemeticOptimizer import ( + AdaptivePopulationMemeticOptimizer, + ) + + lama_register["AdaptivePopulationMemeticOptimizer"] = AdaptivePopulationMemeticOptimizer + LLAMAAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationMemeticOptimizer" + ).set_name("LLAMAAdaptivePopulationMemeticOptimizer", register=True) +except Exception as e: + print("AdaptivePopulationMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePopulationResizingOptimizer import ( + AdaptivePopulationResizingOptimizer, + ) + + lama_register["AdaptivePopulationResizingOptimizer"] = AdaptivePopulationResizingOptimizer + LLAMAAdaptivePopulationResizingOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationResizingOptimizer" + ).set_name("LLAMAAdaptivePopulationResizingOptimizer", register=True) +except Exception as e: + print("AdaptivePopulationResizingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionCohortOptimizationV3 import ( + AdaptivePrecisionCohortOptimizationV3, + ) + + lama_register["AdaptivePrecisionCohortOptimizationV3"] = AdaptivePrecisionCohortOptimizationV3 + LLAMAAdaptivePrecisionCohortOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionCohortOptimizationV3" + ).set_name("LLAMAAdaptivePrecisionCohortOptimizationV3", register=True) +except Exception as e: + print("AdaptivePrecisionCohortOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionControlDifferentialEvolution import ( + AdaptivePrecisionControlDifferentialEvolution, + ) + + lama_register["AdaptivePrecisionControlDifferentialEvolution"] = ( + AdaptivePrecisionControlDifferentialEvolution + ) + LLAMAAdaptivePrecisionControlDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionControlDifferentialEvolution" + ).set_name("LLAMAAdaptivePrecisionControlDifferentialEvolution", register=True) +except Exception as e: + print("AdaptivePrecisionControlDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionCrossoverEvolution import ( + AdaptivePrecisionCrossoverEvolution, + ) + + lama_register["AdaptivePrecisionCrossoverEvolution"] = AdaptivePrecisionCrossoverEvolution + LLAMAAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionCrossoverEvolution" + ).set_name("LLAMAAdaptivePrecisionCrossoverEvolution", register=True) +except Exception as e: + print("AdaptivePrecisionCrossoverEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionDifferentialEvolution import ( + AdaptivePrecisionDifferentialEvolution, + ) + + lama_register["AdaptivePrecisionDifferentialEvolution"] = AdaptivePrecisionDifferentialEvolution + LLAMAAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDifferentialEvolution" + ).set_name("LLAMAAdaptivePrecisionDifferentialEvolution", register=True) +except Exception as e: + print("AdaptivePrecisionDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionDivideSearch import AdaptivePrecisionDivideSearch + + lama_register["AdaptivePrecisionDivideSearch"] = AdaptivePrecisionDivideSearch + LLAMAAdaptivePrecisionDivideSearch = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDivideSearch" + ).set_name("LLAMAAdaptivePrecisionDivideSearch", register=True) +except Exception as e: + print("AdaptivePrecisionDivideSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionDynamicMemoryStrategyV48 import ( + AdaptivePrecisionDynamicMemoryStrategyV48, + ) + + lama_register["AdaptivePrecisionDynamicMemoryStrategyV48"] = AdaptivePrecisionDynamicMemoryStrategyV48 + LLAMAAdaptivePrecisionDynamicMemoryStrategyV48 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48" + ).set_name("LLAMAAdaptivePrecisionDynamicMemoryStrategyV48", register=True) +except Exception as e: + print("AdaptivePrecisionDynamicMemoryStrategyV48 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionEvolutionStrategy import ( + AdaptivePrecisionEvolutionStrategy, + ) + + lama_register["AdaptivePrecisionEvolutionStrategy"] = AdaptivePrecisionEvolutionStrategy + LLAMAAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionEvolutionStrategy" + ).set_name("LLAMAAdaptivePrecisionEvolutionStrategy", register=True) +except Exception as e: + print("AdaptivePrecisionEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionFocalStrategy import AdaptivePrecisionFocalStrategy + + lama_register["AdaptivePrecisionFocalStrategy"] = AdaptivePrecisionFocalStrategy + LLAMAAdaptivePrecisionFocalStrategy = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionFocalStrategy" + ).set_name("LLAMAAdaptivePrecisionFocalStrategy", register=True) +except Exception as e: + print("AdaptivePrecisionFocalStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionHybridSearch import AdaptivePrecisionHybridSearch + + lama_register["AdaptivePrecisionHybridSearch"] = AdaptivePrecisionHybridSearch + LLAMAAdaptivePrecisionHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionHybridSearch" + ).set_name("LLAMAAdaptivePrecisionHybridSearch", register=True) +except Exception as e: + print("AdaptivePrecisionHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionMemoryStrategyV47 import ( + AdaptivePrecisionMemoryStrategyV47, + ) + + lama_register["AdaptivePrecisionMemoryStrategyV47"] = AdaptivePrecisionMemoryStrategyV47 + LLAMAAdaptivePrecisionMemoryStrategyV47 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionMemoryStrategyV47" + ).set_name("LLAMAAdaptivePrecisionMemoryStrategyV47", register=True) +except Exception as e: + print("AdaptivePrecisionMemoryStrategyV47 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionRotationalClimbOptimizer import ( + AdaptivePrecisionRotationalClimbOptimizer, + ) + + lama_register["AdaptivePrecisionRotationalClimbOptimizer"] = AdaptivePrecisionRotationalClimbOptimizer + LLAMAAdaptivePrecisionRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionRotationalClimbOptimizer" + ).set_name("LLAMAAdaptivePrecisionRotationalClimbOptimizer", register=True) +except Exception as e: + print("AdaptivePrecisionRotationalClimbOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionSearch import AdaptivePrecisionSearch + + lama_register["AdaptivePrecisionSearch"] = AdaptivePrecisionSearch + LLAMAAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch").set_name( + "LLAMAAdaptivePrecisionSearch", register=True + ) +except Exception as e: + print("AdaptivePrecisionSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptivePrecisionStrategicOptimizer import ( + AdaptivePrecisionStrategicOptimizer, + ) + + lama_register["AdaptivePrecisionStrategicOptimizer"] = AdaptivePrecisionStrategicOptimizer + LLAMAAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionStrategicOptimizer" + ).set_name("LLAMAAdaptivePrecisionStrategicOptimizer", register=True) +except Exception as e: + print("AdaptivePrecisionStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQGSA import AdaptiveQGSA + + lama_register["AdaptiveQGSA"] = AdaptiveQGSA + LLAMAAdaptiveQGSA = NonObjectOptimizer(method="LLAMAAdaptiveQGSA").set_name( + "LLAMAAdaptiveQGSA", register=True + ) +except Exception as e: + print("AdaptiveQGSA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQGSA_EC import AdaptiveQGSA_EC + + lama_register["AdaptiveQGSA_EC"] = AdaptiveQGSA_EC + LLAMAAdaptiveQGSA_EC = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC").set_name( + "LLAMAAdaptiveQGSA_EC", register=True + ) +except Exception as e: + print("AdaptiveQGSA_EC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDE import AdaptiveQuantumAnnealingDE + + lama_register["AdaptiveQuantumAnnealingDE"] = AdaptiveQuantumAnnealingDE + LLAMAAdaptiveQuantumAnnealingDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE").set_name( + "LLAMAAdaptiveQuantumAnnealingDE", register=True + ) +except Exception as e: + print("AdaptiveQuantumAnnealingDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDEv2 import AdaptiveQuantumAnnealingDEv2 + + lama_register["AdaptiveQuantumAnnealingDEv2"] = AdaptiveQuantumAnnealingDEv2 + LLAMAAdaptiveQuantumAnnealingDEv2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumAnnealingDEv2" + ).set_name("LLAMAAdaptiveQuantumAnnealingDEv2", register=True) +except Exception as e: + print("AdaptiveQuantumAnnealingDEv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumCognitionOptimizerV3 import ( + AdaptiveQuantumCognitionOptimizerV3, + ) + + lama_register["AdaptiveQuantumCognitionOptimizerV3"] = AdaptiveQuantumCognitionOptimizerV3 + LLAMAAdaptiveQuantumCognitionOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumCognitionOptimizerV3" + ).set_name("LLAMAAdaptiveQuantumCognitionOptimizerV3", register=True) +except Exception as e: + print("AdaptiveQuantumCognitionOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumCrossoverOptimizer import ( + AdaptiveQuantumCrossoverOptimizer, + ) + + lama_register["AdaptiveQuantumCrossoverOptimizer"] = AdaptiveQuantumCrossoverOptimizer + LLAMAAdaptiveQuantumCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumCrossoverOptimizer" + ).set_name("LLAMAAdaptiveQuantumCrossoverOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumCrossoverOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolution import ( + AdaptiveQuantumDifferentialEvolution, + ) + + lama_register["AdaptiveQuantumDifferentialEvolution"] = AdaptiveQuantumDifferentialEvolution + LLAMAAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionPlus import ( + AdaptiveQuantumDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionPlus"] = AdaptiveQuantumDifferentialEvolutionPlus + LLAMAAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionPlus", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionV2 import ( + AdaptiveQuantumDifferentialEvolutionV2, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionV2"] = AdaptiveQuantumDifferentialEvolutionV2 + LLAMAAdaptiveQuantumDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionV2", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( + AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" + ).set_name( + "LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True + ) +except Exception as e: + print( + "AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( + AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch import ( + AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory import ( + AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory"] = ( + AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement import ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement"] = ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement" + ).set_name( + "LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement", register=True + ) +except Exception as e: + print( + "AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch import ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch + ) + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch", register=True) +except Exception as e: + print("AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDiversityEnhancerV7 import ( + AdaptiveQuantumDiversityEnhancerV7, + ) + + lama_register["AdaptiveQuantumDiversityEnhancerV7"] = AdaptiveQuantumDiversityEnhancerV7 + LLAMAAdaptiveQuantumDiversityEnhancerV7 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDiversityEnhancerV7" + ).set_name("LLAMAAdaptiveQuantumDiversityEnhancerV7", register=True) +except Exception as e: + print("AdaptiveQuantumDiversityEnhancerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumDynamicTuningOptimizer import ( + AdaptiveQuantumDynamicTuningOptimizer, + ) + + lama_register["AdaptiveQuantumDynamicTuningOptimizer"] = AdaptiveQuantumDynamicTuningOptimizer + LLAMAAdaptiveQuantumDynamicTuningOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDynamicTuningOptimizer" + ).set_name("LLAMAAdaptiveQuantumDynamicTuningOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumDynamicTuningOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumEliteDifferentialEvolution import ( + AdaptiveQuantumEliteDifferentialEvolution, + ) + + lama_register["AdaptiveQuantumEliteDifferentialEvolution"] = AdaptiveQuantumEliteDifferentialEvolution + LLAMAAdaptiveQuantumEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEliteDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuantumEliteDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveQuantumEliteDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumEliteMemeticOptimizer import ( + AdaptiveQuantumEliteMemeticOptimizer, + ) + + lama_register["AdaptiveQuantumEliteMemeticOptimizer"] = AdaptiveQuantumEliteMemeticOptimizer + LLAMAAdaptiveQuantumEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEliteMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumEliteMemeticOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumEliteMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumEntropyDE import AdaptiveQuantumEntropyDE + + lama_register["AdaptiveQuantumEntropyDE"] = AdaptiveQuantumEntropyDE + LLAMAAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE").set_name( + "LLAMAAdaptiveQuantumEntropyDE", register=True + ) +except Exception as e: + print("AdaptiveQuantumEntropyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumEvolutionStrategy import AdaptiveQuantumEvolutionStrategy + + lama_register["AdaptiveQuantumEvolutionStrategy"] = AdaptiveQuantumEvolutionStrategy + LLAMAAdaptiveQuantumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEvolutionStrategy" + ).set_name("LLAMAAdaptiveQuantumEvolutionStrategy", register=True) +except Exception as e: + print("AdaptiveQuantumEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumEvolvedDiversityExplorerV15 import ( + AdaptiveQuantumEvolvedDiversityExplorerV15, + ) + + lama_register["AdaptiveQuantumEvolvedDiversityExplorerV15"] = AdaptiveQuantumEvolvedDiversityExplorerV15 + LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15" + ).set_name("LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15", register=True) +except Exception as e: + print("AdaptiveQuantumEvolvedDiversityExplorerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch import ( + AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch, + ) + + lama_register["AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch"] = ( + AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch + ) + LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch" + ).set_name("LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch", register=True) +except Exception as e: + print("AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedMemeticSearch import ( + AdaptiveQuantumGradientBoostedMemeticSearch, + ) + + lama_register["AdaptiveQuantumGradientBoostedMemeticSearch"] = AdaptiveQuantumGradientBoostedMemeticSearch + LLAMAAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMAAdaptiveQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: + print("AdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientEnhancedOptimizer import ( + AdaptiveQuantumGradientEnhancedOptimizer, + ) + + lama_register["AdaptiveQuantumGradientEnhancedOptimizer"] = AdaptiveQuantumGradientEnhancedOptimizer + LLAMAAdaptiveQuantumGradientEnhancedOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientEnhancedOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumGradientEnhancedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimization import ( + AdaptiveQuantumGradientExplorationOptimization, + ) + + lama_register["AdaptiveQuantumGradientExplorationOptimization"] = ( + AdaptiveQuantumGradientExplorationOptimization + ) + LLAMAAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientExplorationOptimization" + ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimizationV2 import ( + AdaptiveQuantumGradientExplorationOptimizationV2, + ) + + lama_register["AdaptiveQuantumGradientExplorationOptimizationV2"] = ( + AdaptiveQuantumGradientExplorationOptimizationV2 + ) + LLAMAAdaptiveQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimizationV2", register=True) +except Exception as e: + print("AdaptiveQuantumGradientExplorationOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientHybridOptimizer import ( + AdaptiveQuantumGradientHybridOptimizer, + ) + + lama_register["AdaptiveQuantumGradientHybridOptimizer"] = AdaptiveQuantumGradientHybridOptimizer + LLAMAAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientHybridOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumGradientOptimizer import AdaptiveQuantumGradientOptimizer + + lama_register["AdaptiveQuantumGradientOptimizer"] = AdaptiveQuantumGradientOptimizer + LLAMAAdaptiveQuantumGradientOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumHarmonizedPSO import AdaptiveQuantumHarmonizedPSO + + lama_register["AdaptiveQuantumHarmonizedPSO"] = AdaptiveQuantumHarmonizedPSO + LLAMAAdaptiveQuantumHarmonizedPSO = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHarmonizedPSO" + ).set_name("LLAMAAdaptiveQuantumHarmonizedPSO", register=True) +except Exception as e: + print("AdaptiveQuantumHarmonizedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumHybridOptimizer import AdaptiveQuantumHybridOptimizer + + lama_register["AdaptiveQuantumHybridOptimizer"] = AdaptiveQuantumHybridOptimizer + LLAMAAdaptiveQuantumHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHybridOptimizer" + ).set_name("LLAMAAdaptiveQuantumHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumHybridSearchV2 import AdaptiveQuantumHybridSearchV2 + + lama_register["AdaptiveQuantumHybridSearchV2"] = AdaptiveQuantumHybridSearchV2 + LLAMAAdaptiveQuantumHybridSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHybridSearchV2" + ).set_name("LLAMAAdaptiveQuantumHybridSearchV2", register=True) +except Exception as e: + print("AdaptiveQuantumHybridSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumInfluencedMemeticAlgorithm import ( + AdaptiveQuantumInfluencedMemeticAlgorithm, + ) + + lama_register["AdaptiveQuantumInfluencedMemeticAlgorithm"] = AdaptiveQuantumInfluencedMemeticAlgorithm + LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm" + ).set_name("LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm", register=True) +except Exception as e: + print("AdaptiveQuantumInfluencedMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumInformedDifferentialStrategy import ( + AdaptiveQuantumInformedDifferentialStrategy, + ) + + lama_register["AdaptiveQuantumInformedDifferentialStrategy"] = AdaptiveQuantumInformedDifferentialStrategy + LLAMAAdaptiveQuantumInformedDifferentialStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInformedDifferentialStrategy" + ).set_name("LLAMAAdaptiveQuantumInformedDifferentialStrategy", register=True) +except Exception as e: + print("AdaptiveQuantumInformedDifferentialStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumInformedGradientEnhancer import ( + AdaptiveQuantumInformedGradientEnhancer, + ) + + lama_register["AdaptiveQuantumInformedGradientEnhancer"] = AdaptiveQuantumInformedGradientEnhancer + LLAMAAdaptiveQuantumInformedGradientEnhancer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInformedGradientEnhancer" + ).set_name("LLAMAAdaptiveQuantumInformedGradientEnhancer", register=True) +except Exception as e: + print("AdaptiveQuantumInformedGradientEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLeapOptimizer import AdaptiveQuantumLeapOptimizer + + lama_register["AdaptiveQuantumLeapOptimizer"] = AdaptiveQuantumLeapOptimizer + LLAMAAdaptiveQuantumLeapOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLeapOptimizer" + ).set_name("LLAMAAdaptiveQuantumLeapOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumLeapOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialEnhancedOptimizer import ( + AdaptiveQuantumLevyDifferentialEnhancedOptimizer, + ) + + lama_register["AdaptiveQuantumLevyDifferentialEnhancedOptimizer"] = ( + AdaptiveQuantumLevyDifferentialEnhancedOptimizer + ) + LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDifferentialEnhancedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizer import ( + AdaptiveQuantumLevyDifferentialOptimizer, + ) + + lama_register["AdaptiveQuantumLevyDifferentialOptimizer"] = AdaptiveQuantumLevyDifferentialOptimizer + LLAMAAdaptiveQuantumLevyDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizerV2 import ( + AdaptiveQuantumLevyDifferentialOptimizerV2, + ) + + lama_register["AdaptiveQuantumLevyDifferentialOptimizerV2"] = AdaptiveQuantumLevyDifferentialOptimizerV2 + LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDifferentialOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 import ( + AdaptiveQuantumLevyDifferentialSwarmOptimizationV2, + ) + + lama_register["AdaptiveQuantumLevyDifferentialSwarmOptimizationV2"] = ( + AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 + ) + LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicOptimization import ( + AdaptiveQuantumLevyDynamicOptimization, + ) + + lama_register["AdaptiveQuantumLevyDynamicOptimization"] = AdaptiveQuantumLevyDynamicOptimization + LLAMAAdaptiveQuantumLevyDynamicOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDynamicOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimization import ( + AdaptiveQuantumLevyDynamicSwarmOptimization, + ) + + lama_register["AdaptiveQuantumLevyDynamicSwarmOptimization"] = AdaptiveQuantumLevyDynamicSwarmOptimization + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDynamicSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimizationV2 import ( + AdaptiveQuantumLevyDynamicSwarmOptimizationV2, + ) + + lama_register["AdaptiveQuantumLevyDynamicSwarmOptimizationV2"] = ( + AdaptiveQuantumLevyDynamicSwarmOptimizationV2 + ) + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2", register=True) +except Exception as e: + print("AdaptiveQuantumLevyDynamicSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyEnhancedDifferentialOptimizer import ( + AdaptiveQuantumLevyEnhancedDifferentialOptimizer, + ) + + lama_register["AdaptiveQuantumLevyEnhancedDifferentialOptimizer"] = ( + AdaptiveQuantumLevyEnhancedDifferentialOptimizer + ) + LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizer import ( + AdaptiveQuantumLevyMemeticOptimizer, + ) + + lama_register["AdaptiveQuantumLevyMemeticOptimizer"] = AdaptiveQuantumLevyMemeticOptimizer + LLAMAAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizerV2 import ( + AdaptiveQuantumLevyMemeticOptimizerV2, + ) + + lama_register["AdaptiveQuantumLevyMemeticOptimizerV2"] = AdaptiveQuantumLevyMemeticOptimizerV2 + LLAMAAdaptiveQuantumLevyMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizerV2", register=True) +except Exception as e: + print("AdaptiveQuantumLevyMemeticOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevySwarmOptimization import ( + AdaptiveQuantumLevySwarmOptimization, + ) + + lama_register["AdaptiveQuantumLevySwarmOptimization"] = AdaptiveQuantumLevySwarmOptimization + LLAMAAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevySwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumLevySwarmOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLevyTreeOptimization import ( + AdaptiveQuantumLevyTreeOptimization, + ) + + lama_register["AdaptiveQuantumLevyTreeOptimization"] = AdaptiveQuantumLevyTreeOptimization + LLAMAAdaptiveQuantumLevyTreeOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyTreeOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyTreeOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumLevyTreeOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumLocalSearch import AdaptiveQuantumLocalSearch + + lama_register["AdaptiveQuantumLocalSearch"] = AdaptiveQuantumLocalSearch + LLAMAAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch").set_name( + "LLAMAAdaptiveQuantumLocalSearch", register=True + ) +except Exception as e: + print("AdaptiveQuantumLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticEvolutionaryOptimizer import ( + AdaptiveQuantumMemeticEvolutionaryOptimizer, + ) + + lama_register["AdaptiveQuantumMemeticEvolutionaryOptimizer"] = AdaptiveQuantumMemeticEvolutionaryOptimizer + LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticGradientBoost import ( + AdaptiveQuantumMemeticGradientBoost, + ) + + lama_register["AdaptiveQuantumMemeticGradientBoost"] = AdaptiveQuantumMemeticGradientBoost + LLAMAAdaptiveQuantumMemeticGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticGradientBoost" + ).set_name("LLAMAAdaptiveQuantumMemeticGradientBoost", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizer import AdaptiveQuantumMemeticOptimizer + + lama_register["AdaptiveQuantumMemeticOptimizer"] = AdaptiveQuantumMemeticOptimizer + LLAMAAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerPlus import ( + AdaptiveQuantumMemeticOptimizerPlus, + ) + + lama_register["AdaptiveQuantumMemeticOptimizerPlus"] = AdaptiveQuantumMemeticOptimizerPlus + LLAMAAdaptiveQuantumMemeticOptimizerPlus = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerPlus" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerPlus", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticOptimizerPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV2 import ( + AdaptiveQuantumMemeticOptimizerV2, + ) + + lama_register["AdaptiveQuantumMemeticOptimizerV2"] = AdaptiveQuantumMemeticOptimizerV2 + LLAMAAdaptiveQuantumMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV2", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV3 import ( + AdaptiveQuantumMemeticOptimizerV3, + ) + + lama_register["AdaptiveQuantumMemeticOptimizerV3"] = AdaptiveQuantumMemeticOptimizerV3 + LLAMAAdaptiveQuantumMemeticOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerV3" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV3", register=True) +except Exception as e: + print("AdaptiveQuantumMemeticOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumMetaheuristic import AdaptiveQuantumMetaheuristic + + lama_register["AdaptiveQuantumMetaheuristic"] = AdaptiveQuantumMetaheuristic + LLAMAAdaptiveQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMetaheuristic" + ).set_name("LLAMAAdaptiveQuantumMetaheuristic", register=True) +except Exception as e: + print("AdaptiveQuantumMetaheuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumPSO import AdaptiveQuantumPSO + + lama_register["AdaptiveQuantumPSO"] = AdaptiveQuantumPSO + LLAMAAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO").set_name( + "LLAMAAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("AdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumPSOEnhanced import AdaptiveQuantumPSOEnhanced + + lama_register["AdaptiveQuantumPSOEnhanced"] = AdaptiveQuantumPSOEnhanced + LLAMAAdaptiveQuantumPSOEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced").set_name( + "LLAMAAdaptiveQuantumPSOEnhanced", register=True + ) +except Exception as e: + print("AdaptiveQuantumPSOEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumParticleDifferentialSwarm import ( + AdaptiveQuantumParticleDifferentialSwarm, + ) + + lama_register["AdaptiveQuantumParticleDifferentialSwarm"] = AdaptiveQuantumParticleDifferentialSwarm + LLAMAAdaptiveQuantumParticleDifferentialSwarm = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumParticleDifferentialSwarm" + ).set_name("LLAMAAdaptiveQuantumParticleDifferentialSwarm", register=True) +except Exception as e: + print("AdaptiveQuantumParticleDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumParticleSwarmOptimization import ( + AdaptiveQuantumParticleSwarmOptimization, + ) + + lama_register["AdaptiveQuantumParticleSwarmOptimization"] = AdaptiveQuantumParticleSwarmOptimization + LLAMAAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumParticleSwarmOptimization", register=True) +except Exception as e: + print("AdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumResonanceOptimizer import ( + AdaptiveQuantumResonanceOptimizer, + ) + + lama_register["AdaptiveQuantumResonanceOptimizer"] = AdaptiveQuantumResonanceOptimizer + LLAMAAdaptiveQuantumResonanceOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumResonanceOptimizer" + ).set_name("LLAMAAdaptiveQuantumResonanceOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumResonanceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumStrategicOptimizer import ( + AdaptiveQuantumStrategicOptimizer, + ) + + lama_register["AdaptiveQuantumStrategicOptimizer"] = AdaptiveQuantumStrategicOptimizer + LLAMAAdaptiveQuantumStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumStrategicOptimizer" + ).set_name("LLAMAAdaptiveQuantumStrategicOptimizer", register=True) +except Exception as e: + print("AdaptiveQuantumStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizationV2 import ( + AdaptiveQuantumSwarmOptimizationV2, + ) + + lama_register["AdaptiveQuantumSwarmOptimizationV2"] = AdaptiveQuantumSwarmOptimizationV2 + LLAMAAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("AdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizerV2 import AdaptiveQuantumSwarmOptimizerV2 + + lama_register["AdaptiveQuantumSwarmOptimizerV2"] = AdaptiveQuantumSwarmOptimizerV2 + LLAMAAdaptiveQuantumSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSwarmOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumSwarmOptimizerV2", register=True) +except Exception as e: + print("AdaptiveQuantumSwarmOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuantumSymbioticStrategy import AdaptiveQuantumSymbioticStrategy + + lama_register["AdaptiveQuantumSymbioticStrategy"] = AdaptiveQuantumSymbioticStrategy + LLAMAAdaptiveQuantumSymbioticStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSymbioticStrategy" + ).set_name("LLAMAAdaptiveQuantumSymbioticStrategy", register=True) +except Exception as e: + print("AdaptiveQuantumSymbioticStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuasiGradientEvolution import AdaptiveQuasiGradientEvolution + + lama_register["AdaptiveQuasiGradientEvolution"] = AdaptiveQuasiGradientEvolution + LLAMAAdaptiveQuasiGradientEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiGradientEvolution" + ).set_name("LLAMAAdaptiveQuasiGradientEvolution", register=True) +except Exception as e: + print("AdaptiveQuasiGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuasiRandomEnhancedDifferentialEvolution import ( + AdaptiveQuasiRandomEnhancedDifferentialEvolution, + ) + + lama_register["AdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( + AdaptiveQuasiRandomEnhancedDifferentialEvolution + ) + LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuasiRandomGradientDE import AdaptiveQuasiRandomGradientDE + + lama_register["AdaptiveQuasiRandomGradientDE"] = AdaptiveQuasiRandomGradientDE + LLAMAAdaptiveQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiRandomGradientDE" + ).set_name("LLAMAAdaptiveQuasiRandomGradientDE", register=True) +except Exception as e: + print("AdaptiveQuasiRandomGradientDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveQuorumWithStrategicMutation import ( + AdaptiveQuorumWithStrategicMutation, + ) + + lama_register["AdaptiveQuorumWithStrategicMutation"] = AdaptiveQuorumWithStrategicMutation + LLAMAAdaptiveQuorumWithStrategicMutation = NonObjectOptimizer( + method="LLAMAAdaptiveQuorumWithStrategicMutation" + ).set_name("LLAMAAdaptiveQuorumWithStrategicMutation", register=True) +except Exception as e: + print("AdaptiveQuorumWithStrategicMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRefinedGradientBoostedAnnealing import ( + AdaptiveRefinedGradientBoostedAnnealing, + ) + + lama_register["AdaptiveRefinedGradientBoostedAnnealing"] = AdaptiveRefinedGradientBoostedAnnealing + LLAMAAdaptiveRefinedGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveRefinedGradientBoostedAnnealing" + ).set_name("LLAMAAdaptiveRefinedGradientBoostedAnnealing", register=True) +except Exception as e: + print("AdaptiveRefinedGradientBoostedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRefinedHybridPSO_DE import AdaptiveRefinedHybridPSO_DE + + lama_register["AdaptiveRefinedHybridPSO_DE"] = AdaptiveRefinedHybridPSO_DE + LLAMAAdaptiveRefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE").set_name( + "LLAMAAdaptiveRefinedHybridPSO_DE", register=True + ) +except Exception as e: + print("AdaptiveRefinedHybridPSO_DE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRefinementEvolutiveStrategy import ( + AdaptiveRefinementEvolutiveStrategy, + ) + + lama_register["AdaptiveRefinementEvolutiveStrategy"] = AdaptiveRefinementEvolutiveStrategy + LLAMAAdaptiveRefinementEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveRefinementEvolutiveStrategy" + ).set_name("LLAMAAdaptiveRefinementEvolutiveStrategy", register=True) +except Exception as e: + print("AdaptiveRefinementEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRefinementPSO import AdaptiveRefinementPSO + + lama_register["AdaptiveRefinementPSO"] = AdaptiveRefinementPSO + LLAMAAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO").set_name( + "LLAMAAdaptiveRefinementPSO", register=True + ) +except Exception as e: + print("AdaptiveRefinementPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRefinementSearchStrategyV30 import ( + AdaptiveRefinementSearchStrategyV30, + ) + + lama_register["AdaptiveRefinementSearchStrategyV30"] = AdaptiveRefinementSearchStrategyV30 + LLAMAAdaptiveRefinementSearchStrategyV30 = NonObjectOptimizer( + method="LLAMAAdaptiveRefinementSearchStrategyV30" + ).set_name("LLAMAAdaptiveRefinementSearchStrategyV30", register=True) +except Exception as e: + print("AdaptiveRefinementSearchStrategyV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveResilientQuantumCrossoverStrategy import ( + AdaptiveResilientQuantumCrossoverStrategy, + ) + + lama_register["AdaptiveResilientQuantumCrossoverStrategy"] = AdaptiveResilientQuantumCrossoverStrategy + LLAMAAdaptiveResilientQuantumCrossoverStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveResilientQuantumCrossoverStrategy" + ).set_name("LLAMAAdaptiveResilientQuantumCrossoverStrategy", register=True) +except Exception as e: + print("AdaptiveResilientQuantumCrossoverStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRestartDE import AdaptiveRestartDE + + lama_register["AdaptiveRestartDE"] = AdaptiveRestartDE + LLAMAAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE").set_name( + "LLAMAAdaptiveRestartDE", register=True + ) +except Exception as e: + print("AdaptiveRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRestartHybridOptimizer import AdaptiveRestartHybridOptimizer + + lama_register["AdaptiveRestartHybridOptimizer"] = AdaptiveRestartHybridOptimizer + LLAMAAdaptiveRestartHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveRestartHybridOptimizer" + ).set_name("LLAMAAdaptiveRestartHybridOptimizer", register=True) +except Exception as e: + print("AdaptiveRestartHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveRotationalClimbOptimizer import AdaptiveRotationalClimbOptimizer + + lama_register["AdaptiveRotationalClimbOptimizer"] = AdaptiveRotationalClimbOptimizer + LLAMAAdaptiveRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveRotationalClimbOptimizer" + ).set_name("LLAMAAdaptiveRotationalClimbOptimizer", register=True) +except Exception as e: + print("AdaptiveRotationalClimbOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSigmaCrossoverEvolution import AdaptiveSigmaCrossoverEvolution + + lama_register["AdaptiveSigmaCrossoverEvolution"] = AdaptiveSigmaCrossoverEvolution + LLAMAAdaptiveSigmaCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSigmaCrossoverEvolution" + ).set_name("LLAMAAdaptiveSigmaCrossoverEvolution", register=True) +except Exception as e: + print("AdaptiveSigmaCrossoverEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSimulatedAnnealing import AdaptiveSimulatedAnnealing + + lama_register["AdaptiveSimulatedAnnealing"] = AdaptiveSimulatedAnnealing + LLAMAAdaptiveSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing").set_name( + "LLAMAAdaptiveSimulatedAnnealing", register=True + ) +except Exception as e: + print("AdaptiveSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingSearch import AdaptiveSimulatedAnnealingSearch + + lama_register["AdaptiveSimulatedAnnealingSearch"] = AdaptiveSimulatedAnnealingSearch + LLAMAAdaptiveSimulatedAnnealingSearch = NonObjectOptimizer( + method="LLAMAAdaptiveSimulatedAnnealingSearch" + ).set_name("LLAMAAdaptiveSimulatedAnnealingSearch", register=True) +except Exception as e: + print("AdaptiveSimulatedAnnealingSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingWithSmartMemory import ( + AdaptiveSimulatedAnnealingWithSmartMemory, + ) + + lama_register["AdaptiveSimulatedAnnealingWithSmartMemory"] = AdaptiveSimulatedAnnealingWithSmartMemory + LLAMAAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: + print("AdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSineCosineDifferentialEvolution import ( + AdaptiveSineCosineDifferentialEvolution, + ) + + lama_register["AdaptiveSineCosineDifferentialEvolution"] = AdaptiveSineCosineDifferentialEvolution + LLAMAAdaptiveSineCosineDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSineCosineDifferentialEvolution" + ).set_name("LLAMAAdaptiveSineCosineDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveSineCosineDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSinusoidalDifferentialSwarm import ( + AdaptiveSinusoidalDifferentialSwarm, + ) + + lama_register["AdaptiveSinusoidalDifferentialSwarm"] = AdaptiveSinusoidalDifferentialSwarm + LLAMAAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: + print("AdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSpatialExplorationOptimizer import ( + AdaptiveSpatialExplorationOptimizer, + ) + + lama_register["AdaptiveSpatialExplorationOptimizer"] = AdaptiveSpatialExplorationOptimizer + LLAMAAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveSpatialExplorationOptimizer" + ).set_name("LLAMAAdaptiveSpatialExplorationOptimizer", register=True) +except Exception as e: + print("AdaptiveSpatialExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSpiralGradientSearch import AdaptiveSpiralGradientSearch + + lama_register["AdaptiveSpiralGradientSearch"] = AdaptiveSpiralGradientSearch + LLAMAAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveSpiralGradientSearch" + ).set_name("LLAMAAdaptiveSpiralGradientSearch", register=True) +except Exception as e: + print("AdaptiveSpiralGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveStepSearch import AdaptiveStepSearch + + lama_register["AdaptiveStepSearch"] = AdaptiveStepSearch + LLAMAAdaptiveStepSearch = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch").set_name( + "LLAMAAdaptiveStepSearch", register=True + ) +except Exception as e: + print("AdaptiveStepSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveStochasticGradientQuorumOptimization import ( + AdaptiveStochasticGradientQuorumOptimization, + ) + + lama_register["AdaptiveStochasticGradientQuorumOptimization"] = ( + AdaptiveStochasticGradientQuorumOptimization + ) + LLAMAAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveStochasticGradientQuorumOptimization" + ).set_name("LLAMAAdaptiveStochasticGradientQuorumOptimization", register=True) +except Exception as e: + print("AdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveStochasticHybridEvolution import ( + AdaptiveStochasticHybridEvolution, + ) + + lama_register["AdaptiveStochasticHybridEvolution"] = AdaptiveStochasticHybridEvolution + LLAMAAdaptiveStochasticHybridEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveStochasticHybridEvolution" + ).set_name("LLAMAAdaptiveStochasticHybridEvolution", register=True) +except Exception as e: + print("AdaptiveStochasticHybridEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveStochasticTunneling import AdaptiveStochasticTunneling + + lama_register["AdaptiveStochasticTunneling"] = AdaptiveStochasticTunneling + LLAMAAdaptiveStochasticTunneling = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling").set_name( + "LLAMAAdaptiveStochasticTunneling", register=True + ) +except Exception as e: + print("AdaptiveStochasticTunneling can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveStrategicExplorationOptimizer import ( + AdaptiveStrategicExplorationOptimizer, + ) + + lama_register["AdaptiveStrategicExplorationOptimizer"] = AdaptiveStrategicExplorationOptimizer + LLAMAAdaptiveStrategicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveStrategicExplorationOptimizer" + ).set_name("LLAMAAdaptiveStrategicExplorationOptimizer", register=True) +except Exception as e: + print("AdaptiveStrategicExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSwarmDifferentialEvolution import ( + AdaptiveSwarmDifferentialEvolution, + ) + + lama_register["AdaptiveSwarmDifferentialEvolution"] = AdaptiveSwarmDifferentialEvolution + LLAMAAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: + print("AdaptiveSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSwarmGradientOptimization import ( + AdaptiveSwarmGradientOptimization, + ) + + lama_register["AdaptiveSwarmGradientOptimization"] = AdaptiveSwarmGradientOptimization + LLAMAAdaptiveSwarmGradientOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmGradientOptimization" + ).set_name("LLAMAAdaptiveSwarmGradientOptimization", register=True) +except Exception as e: + print("AdaptiveSwarmGradientOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSwarmHarmonicOptimizationV4 import ( + AdaptiveSwarmHarmonicOptimizationV4, + ) + + lama_register["AdaptiveSwarmHarmonicOptimizationV4"] = AdaptiveSwarmHarmonicOptimizationV4 + LLAMAAdaptiveSwarmHarmonicOptimizationV4 = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmHarmonicOptimizationV4" + ).set_name("LLAMAAdaptiveSwarmHarmonicOptimizationV4", register=True) +except Exception as e: + print("AdaptiveSwarmHarmonicOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveSwarmHybridOptimization import AdaptiveSwarmHybridOptimization + + lama_register["AdaptiveSwarmHybridOptimization"] = AdaptiveSwarmHybridOptimization + LLAMAAdaptiveSwarmHybridOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmHybridOptimization" + ).set_name("LLAMAAdaptiveSwarmHybridOptimization", register=True) +except Exception as e: + print("AdaptiveSwarmHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdaptiveThresholdDifferentialStrategy import ( + AdaptiveThresholdDifferentialStrategy, + ) + + lama_register["AdaptiveThresholdDifferentialStrategy"] = AdaptiveThresholdDifferentialStrategy + LLAMAAdaptiveThresholdDifferentialStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveThresholdDifferentialStrategy" + ).set_name("LLAMAAdaptiveThresholdDifferentialStrategy", register=True) +except Exception as e: + print("AdaptiveThresholdDifferentialStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveDifferentialEvolution import ( + AdvancedAdaptiveDifferentialEvolution, + ) + + lama_register["AdvancedAdaptiveDifferentialEvolution"] = AdvancedAdaptiveDifferentialEvolution + LLAMAAdvancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAAdvancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("AdvancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveDualPhaseStrategy import ( + AdvancedAdaptiveDualPhaseStrategy, + ) + + lama_register["AdvancedAdaptiveDualPhaseStrategy"] = AdvancedAdaptiveDualPhaseStrategy + LLAMAAdvancedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDualPhaseStrategy" + ).set_name("LLAMAAdvancedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: + print("AdvancedAdaptiveDualPhaseStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMemoryStrategyV64 import ( + AdvancedAdaptiveDynamicMemoryStrategyV64, + ) + + lama_register["AdvancedAdaptiveDynamicMemoryStrategyV64"] = AdvancedAdaptiveDynamicMemoryStrategyV64 + LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64" + ).set_name("LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64", register=True) +except Exception as e: + print("AdvancedAdaptiveDynamicMemoryStrategyV64 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationExploitationAlgorithm import ( + AdvancedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["AdvancedAdaptiveExplorationExploitationAlgorithm"] = ( + AdvancedAdaptiveExplorationExploitationAlgorithm + ) + LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: + print("AdvancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationOptimizationAlgorithm import ( + AdvancedAdaptiveExplorationOptimizationAlgorithm, + ) + + lama_register["AdvancedAdaptiveExplorationOptimizationAlgorithm"] = ( + AdvancedAdaptiveExplorationOptimizationAlgorithm + ) + LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm", register=True) +except Exception as e: + print("AdvancedAdaptiveExplorationOptimizationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveFireworkAlgorithm import ( + AdvancedAdaptiveFireworkAlgorithm, + ) + + lama_register["AdvancedAdaptiveFireworkAlgorithm"] = AdvancedAdaptiveFireworkAlgorithm + LLAMAAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("AdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveGlobalClimbingOptimizerV6 import ( + AdvancedAdaptiveGlobalClimbingOptimizerV6, + ) + + lama_register["AdvancedAdaptiveGlobalClimbingOptimizerV6"] = AdvancedAdaptiveGlobalClimbingOptimizerV6 + LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6" + ).set_name("LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6", register=True) +except Exception as e: + print("AdvancedAdaptiveGlobalClimbingOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveGradientBoostedMemoryExploration import ( + AdvancedAdaptiveGradientBoostedMemoryExploration, + ) + + lama_register["AdvancedAdaptiveGradientBoostedMemoryExploration"] = ( + AdvancedAdaptiveGradientBoostedMemoryExploration + ) + LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration" + ).set_name("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration", register=True) +except Exception as e: + print("AdvancedAdaptiveGradientBoostedMemoryExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveGradientHybridOptimizer import ( + AdvancedAdaptiveGradientHybridOptimizer, + ) + + lama_register["AdvancedAdaptiveGradientHybridOptimizer"] = AdvancedAdaptiveGradientHybridOptimizer + LLAMAAdvancedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGradientHybridOptimizer" + ).set_name("LLAMAAdvancedAdaptiveGradientHybridOptimizer", register=True) +except Exception as e: + print("AdvancedAdaptiveGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV56 import ( + AdvancedAdaptiveMemoryEnhancedStrategyV56, + ) + + lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV56"] = AdvancedAdaptiveMemoryEnhancedStrategyV56 + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56" + ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56", register=True) +except Exception as e: + print("AdvancedAdaptiveMemoryEnhancedStrategyV56 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV73 import ( + AdvancedAdaptiveMemoryEnhancedStrategyV73, + ) + + lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV73"] = AdvancedAdaptiveMemoryEnhancedStrategyV73 + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73" + ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73", register=True) +except Exception as e: + print("AdvancedAdaptiveMemoryEnhancedStrategyV73 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryGuidedStrategyV77 import ( + AdvancedAdaptiveMemoryGuidedStrategyV77, + ) + + lama_register["AdvancedAdaptiveMemoryGuidedStrategyV77"] = AdvancedAdaptiveMemoryGuidedStrategyV77 + LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77" + ).set_name("LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77", register=True) +except Exception as e: + print("AdvancedAdaptiveMemoryGuidedStrategyV77 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveMemorySimulatedAnnealing import ( + AdvancedAdaptiveMemorySimulatedAnnealing, + ) + + lama_register["AdvancedAdaptiveMemorySimulatedAnnealing"] = AdvancedAdaptiveMemorySimulatedAnnealing + LLAMAAdvancedAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdvancedAdaptiveMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptivePSO import AdvancedAdaptivePSO + + lama_register["AdvancedAdaptivePSO"] = AdvancedAdaptivePSO + LLAMAAdvancedAdaptivePSO = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO").set_name( + "LLAMAAdvancedAdaptivePSO", register=True + ) +except Exception as e: + print("AdvancedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumEntropyDE import AdvancedAdaptiveQuantumEntropyDE + + lama_register["AdvancedAdaptiveQuantumEntropyDE"] = AdvancedAdaptiveQuantumEntropyDE + LLAMAAdvancedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumEntropyDE" + ).set_name("LLAMAAdvancedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: + print("AdvancedAdaptiveQuantumEntropyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumLevyOptimizer import ( + AdvancedAdaptiveQuantumLevyOptimizer, + ) + + lama_register["AdvancedAdaptiveQuantumLevyOptimizer"] = AdvancedAdaptiveQuantumLevyOptimizer + LLAMAAdvancedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAAdvancedAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: + print("AdvancedAdaptiveQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV1 import ( + AdvancedAdaptiveQuantumSwarmOptimizationV1, + ) + + lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV1"] = AdvancedAdaptiveQuantumSwarmOptimizationV1 + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1" + ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1", register=True) +except Exception as e: + print("AdvancedAdaptiveQuantumSwarmOptimizationV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV2 import ( + AdvancedAdaptiveQuantumSwarmOptimizationV2, + ) + + lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV2"] = AdvancedAdaptiveQuantumSwarmOptimizationV2 + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("AdvancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAdaptiveStrategyOptimizer import ( + AdvancedAdaptiveStrategyOptimizer, + ) + + lama_register["AdvancedAdaptiveStrategyOptimizer"] = AdvancedAdaptiveStrategyOptimizer + LLAMAAdvancedAdaptiveStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveStrategyOptimizer" + ).set_name("LLAMAAdvancedAdaptiveStrategyOptimizer", register=True) +except Exception as e: + print("AdvancedAdaptiveStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedAttenuatedAdaptiveEvolver import ( + AdvancedAttenuatedAdaptiveEvolver, + ) + + lama_register["AdvancedAttenuatedAdaptiveEvolver"] = AdvancedAttenuatedAdaptiveEvolver + LLAMAAdvancedAttenuatedAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAAdvancedAttenuatedAdaptiveEvolver" + ).set_name("LLAMAAdvancedAttenuatedAdaptiveEvolver", register=True) +except Exception as e: + print("AdvancedAttenuatedAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedBalancedAdaptiveElitistStrategyV2 import ( + AdvancedBalancedAdaptiveElitistStrategyV2, + ) + + lama_register["AdvancedBalancedAdaptiveElitistStrategyV2"] = AdvancedBalancedAdaptiveElitistStrategyV2 + LLAMAAdvancedBalancedAdaptiveElitistStrategyV2 = NonObjectOptimizer( + method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2" + ).set_name("LLAMAAdvancedBalancedAdaptiveElitistStrategyV2", register=True) +except Exception as e: + print("AdvancedBalancedAdaptiveElitistStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedBalancedExplorationOptimizer import ( + AdvancedBalancedExplorationOptimizer, + ) + + lama_register["AdvancedBalancedExplorationOptimizer"] = AdvancedBalancedExplorationOptimizer + LLAMAAdvancedBalancedExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedBalancedExplorationOptimizer" + ).set_name("LLAMAAdvancedBalancedExplorationOptimizer", register=True) +except Exception as e: + print("AdvancedBalancedExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRate import ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRate, + ) + + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRate"] = ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRate + ) + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate" + ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate", register=True) +except Exception as e: + print("AdvancedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 import ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2, + ) + + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2"] = ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 + ) + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2" + ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2", register=True) +except Exception as e: + print("AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDifferentialParticleSwarmOptimization import ( + AdvancedDifferentialParticleSwarmOptimization, + ) + + lama_register["AdvancedDifferentialParticleSwarmOptimization"] = ( + AdvancedDifferentialParticleSwarmOptimization + ) + LLAMAAdvancedDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialParticleSwarmOptimization" + ).set_name("LLAMAAdvancedDifferentialParticleSwarmOptimization", register=True) +except Exception as e: + print("AdvancedDifferentialParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDimensionalCyclicCrossoverEvolver import ( + AdvancedDimensionalCyclicCrossoverEvolver, + ) + + lama_register["AdvancedDimensionalCyclicCrossoverEvolver"] = AdvancedDimensionalCyclicCrossoverEvolver + LLAMAAdvancedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( + method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver" + ).set_name("LLAMAAdvancedDimensionalCyclicCrossoverEvolver", register=True) +except Exception as e: + print("AdvancedDimensionalCyclicCrossoverEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDimensionalFeedbackEvolver import ( + AdvancedDimensionalFeedbackEvolver, + ) + + lama_register["AdvancedDimensionalFeedbackEvolver"] = AdvancedDimensionalFeedbackEvolver + LLAMAAdvancedDimensionalFeedbackEvolver = NonObjectOptimizer( + method="LLAMAAdvancedDimensionalFeedbackEvolver" + ).set_name("LLAMAAdvancedDimensionalFeedbackEvolver", register=True) +except Exception as e: + print("AdvancedDimensionalFeedbackEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDiversityAdaptiveDE import AdvancedDiversityAdaptiveDE + + lama_register["AdvancedDiversityAdaptiveDE"] = AdvancedDiversityAdaptiveDE + LLAMAAdvancedDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE").set_name( + "LLAMAAdvancedDiversityAdaptiveDE", register=True + ) +except Exception as e: + print("AdvancedDiversityAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDiversityDE import AdvancedDiversityDE + + lama_register["AdvancedDiversityDE"] = AdvancedDiversityDE + LLAMAAdvancedDiversityDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE").set_name( + "LLAMAAdvancedDiversityDE", register=True + ) +except Exception as e: + print("AdvancedDiversityDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDualStrategyAdaptiveDE import AdvancedDualStrategyAdaptiveDE + + lama_register["AdvancedDualStrategyAdaptiveDE"] = AdvancedDualStrategyAdaptiveDE + LLAMAAdvancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedDualStrategyAdaptiveDE" + ).set_name("LLAMAAdvancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("AdvancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDualStrategyHybridDE import AdvancedDualStrategyHybridDE + + lama_register["AdvancedDualStrategyHybridDE"] = AdvancedDualStrategyHybridDE + LLAMAAdvancedDualStrategyHybridDE = NonObjectOptimizer( + method="LLAMAAdvancedDualStrategyHybridDE" + ).set_name("LLAMAAdvancedDualStrategyHybridDE", register=True) +except Exception as e: + print("AdvancedDualStrategyHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridOptimizer import ( + AdvancedDynamicAdaptiveHybridOptimizer, + ) + + lama_register["AdvancedDynamicAdaptiveHybridOptimizer"] = AdvancedDynamicAdaptiveHybridOptimizer + LLAMAAdvancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAAdvancedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("AdvancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicCrowdedDE import AdvancedDynamicCrowdedDE + + lama_register["AdvancedDynamicCrowdedDE"] = AdvancedDynamicCrowdedDE + LLAMAAdvancedDynamicCrowdedDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE").set_name( + "LLAMAAdvancedDynamicCrowdedDE", register=True + ) +except Exception as e: + print("AdvancedDynamicCrowdedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicDualPhaseStrategyV37 import ( + AdvancedDynamicDualPhaseStrategyV37, + ) + + lama_register["AdvancedDynamicDualPhaseStrategyV37"] = AdvancedDynamicDualPhaseStrategyV37 + LLAMAAdvancedDynamicDualPhaseStrategyV37 = NonObjectOptimizer( + method="LLAMAAdvancedDynamicDualPhaseStrategyV37" + ).set_name("LLAMAAdvancedDynamicDualPhaseStrategyV37", register=True) +except Exception as e: + print("AdvancedDynamicDualPhaseStrategyV37 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicExplorationOptimizer import ( + AdvancedDynamicExplorationOptimizer, + ) + + lama_register["AdvancedDynamicExplorationOptimizer"] = AdvancedDynamicExplorationOptimizer + LLAMAAdvancedDynamicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicExplorationOptimizer" + ).set_name("LLAMAAdvancedDynamicExplorationOptimizer", register=True) +except Exception as e: + print("AdvancedDynamicExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicFireworkAlgorithm import AdvancedDynamicFireworkAlgorithm + + lama_register["AdvancedDynamicFireworkAlgorithm"] = AdvancedDynamicFireworkAlgorithm + LLAMAAdvancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedDynamicFireworkAlgorithm" + ).set_name("LLAMAAdvancedDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("AdvancedDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicGradientBoostedMemorySimulatedAnnealing import ( + AdvancedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdvancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + AdvancedDynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdvancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicHybridOptimization import ( + AdvancedDynamicHybridOptimization, + ) + + lama_register["AdvancedDynamicHybridOptimization"] = AdvancedDynamicHybridOptimization + LLAMAAdvancedDynamicHybridOptimization = NonObjectOptimizer( + method="LLAMAAdvancedDynamicHybridOptimization" + ).set_name("LLAMAAdvancedDynamicHybridOptimization", register=True) +except Exception as e: + print("AdvancedDynamicHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicHybridOptimizer import AdvancedDynamicHybridOptimizer + + lama_register["AdvancedDynamicHybridOptimizer"] = AdvancedDynamicHybridOptimizer + LLAMAAdvancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicHybridOptimizer" + ).set_name("LLAMAAdvancedDynamicHybridOptimizer", register=True) +except Exception as e: + print("AdvancedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicMultimodalSimulatedAnnealing import ( + AdvancedDynamicMultimodalSimulatedAnnealing, + ) + + lama_register["AdvancedDynamicMultimodalSimulatedAnnealing"] = AdvancedDynamicMultimodalSimulatedAnnealing + LLAMAAdvancedDynamicMultimodalSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing" + ).set_name("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing", register=True) +except Exception as e: + print("AdvancedDynamicMultimodalSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedDynamicStrategyAdaptiveDE import ( + AdvancedDynamicStrategyAdaptiveDE, + ) + + lama_register["AdvancedDynamicStrategyAdaptiveDE"] = AdvancedDynamicStrategyAdaptiveDE + LLAMAAdvancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedDynamicStrategyAdaptiveDE" + ).set_name("LLAMAAdvancedDynamicStrategyAdaptiveDE", register=True) +except Exception as e: + print("AdvancedDynamicStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEliteAdaptiveCrowdingHybridOptimizer import ( + AdvancedEliteAdaptiveCrowdingHybridOptimizer, + ) + + lama_register["AdvancedEliteAdaptiveCrowdingHybridOptimizer"] = ( + AdvancedEliteAdaptiveCrowdingHybridOptimizer + ) + LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer" + ).set_name("LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer", register=True) +except Exception as e: + print("AdvancedEliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEliteDynamicHybridOptimizer import ( + AdvancedEliteDynamicHybridOptimizer, + ) + + lama_register["AdvancedEliteDynamicHybridOptimizer"] = AdvancedEliteDynamicHybridOptimizer + LLAMAAdvancedEliteDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEliteDynamicHybridOptimizer" + ).set_name("LLAMAAdvancedEliteDynamicHybridOptimizer", register=True) +except Exception as e: + print("AdvancedEliteDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveFireworkAlgorithm import ( + AdvancedEnhancedAdaptiveFireworkAlgorithm, + ) + + lama_register["AdvancedEnhancedAdaptiveFireworkAlgorithm"] = AdvancedEnhancedAdaptiveFireworkAlgorithm + LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("AdvancedEnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveMetaNetAQAPSO import ( + AdvancedEnhancedAdaptiveMetaNetAQAPSO, + ) + + lama_register["AdvancedEnhancedAdaptiveMetaNetAQAPSO"] = AdvancedEnhancedAdaptiveMetaNetAQAPSO + LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: + print("AdvancedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 import ( + AdvancedEnhancedDifferentialEvolutionLocalSearch_v55, + ) + + lama_register["AdvancedEnhancedDifferentialEvolutionLocalSearch_v55"] = ( + AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 + ) + LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55" + ).set_name("LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55", register=True) +except Exception as e: + print("AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedEnhancedGuidedMassQGSA_v69 import ( + AdvancedEnhancedEnhancedGuidedMassQGSA_v69, + ) + + lama_register["AdvancedEnhancedEnhancedGuidedMassQGSA_v69"] = AdvancedEnhancedEnhancedGuidedMassQGSA_v69 + LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69" + ).set_name("LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69", register=True) +except Exception as e: + print("AdvancedEnhancedEnhancedGuidedMassQGSA_v69 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedGuidedMassQGSA_v65 import ( + AdvancedEnhancedGuidedMassQGSA_v65, + ) + + lama_register["AdvancedEnhancedGuidedMassQGSA_v65"] = AdvancedEnhancedGuidedMassQGSA_v65 + LLAMAAdvancedEnhancedGuidedMassQGSA_v65 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65" + ).set_name("LLAMAAdvancedEnhancedGuidedMassQGSA_v65", register=True) +except Exception as e: + print("AdvancedEnhancedGuidedMassQGSA_v65 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizer import ( + AdvancedEnhancedHybridMetaHeuristicOptimizer, + ) + + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizer"] = ( + AdvancedEnhancedHybridMetaHeuristicOptimizer + ) + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: + print("AdvancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizerV16 import ( + AdvancedEnhancedHybridMetaHeuristicOptimizerV16, + ) + + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizerV16"] = ( + AdvancedEnhancedHybridMetaHeuristicOptimizerV16 + ) + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16" + ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16", register=True) +except Exception as e: + print("AdvancedEnhancedHybridMetaHeuristicOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedExplorativeConvergenceEnhancer import ( + AdvancedExplorativeConvergenceEnhancer, + ) + + lama_register["AdvancedExplorativeConvergenceEnhancer"] = AdvancedExplorativeConvergenceEnhancer + LLAMAAdvancedExplorativeConvergenceEnhancer = NonObjectOptimizer( + method="LLAMAAdvancedExplorativeConvergenceEnhancer" + ).set_name("LLAMAAdvancedExplorativeConvergenceEnhancer", register=True) +except Exception as e: + print("AdvancedExplorativeConvergenceEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedFireworkAlgorithmWithAdaptiveMutation import ( + AdvancedFireworkAlgorithmWithAdaptiveMutation, + ) + + lama_register["AdvancedFireworkAlgorithmWithAdaptiveMutation"] = ( + AdvancedFireworkAlgorithmWithAdaptiveMutation + ) + LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( + method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation" + ).set_name("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation", register=True) +except Exception as e: + print("AdvancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedFocusedAdaptiveOptimizer import AdvancedFocusedAdaptiveOptimizer + + lama_register["AdvancedFocusedAdaptiveOptimizer"] = AdvancedFocusedAdaptiveOptimizer + LLAMAAdvancedFocusedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedFocusedAdaptiveOptimizer" + ).set_name("LLAMAAdvancedFocusedAdaptiveOptimizer", register=True) +except Exception as e: + print("AdvancedFocusedAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedGlobalClimbingOptimizerV4 import ( + AdvancedGlobalClimbingOptimizerV4, + ) + + lama_register["AdvancedGlobalClimbingOptimizerV4"] = AdvancedGlobalClimbingOptimizerV4 + LLAMAAdvancedGlobalClimbingOptimizerV4 = NonObjectOptimizer( + method="LLAMAAdvancedGlobalClimbingOptimizerV4" + ).set_name("LLAMAAdvancedGlobalClimbingOptimizerV4", register=True) +except Exception as e: + print("AdvancedGlobalClimbingOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedGlobalStructureAwareOptimizerV3 import ( + AdvancedGlobalStructureAwareOptimizerV3, + ) + + lama_register["AdvancedGlobalStructureAwareOptimizerV3"] = AdvancedGlobalStructureAwareOptimizerV3 + LLAMAAdvancedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdvancedGlobalStructureAwareOptimizerV3" + ).set_name("LLAMAAdvancedGlobalStructureAwareOptimizerV3", register=True) +except Exception as e: + print("AdvancedGlobalStructureAwareOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration import ( + AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration, + ) + + lama_register["AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration"] = ( + AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration + ) + LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration = NonObjectOptimizer( + method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration" + ).set_name("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration", register=True) +except Exception as e: + print("AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategy import ( + AdvancedGradientEvolutionStrategy, + ) + + lama_register["AdvancedGradientEvolutionStrategy"] = AdvancedGradientEvolutionStrategy + LLAMAAdvancedGradientEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdvancedGradientEvolutionStrategy" + ).set_name("LLAMAAdvancedGradientEvolutionStrategy", register=True) +except Exception as e: + print("AdvancedGradientEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategyV2 import ( + AdvancedGradientEvolutionStrategyV2, + ) + + lama_register["AdvancedGradientEvolutionStrategyV2"] = AdvancedGradientEvolutionStrategyV2 + LLAMAAdvancedGradientEvolutionStrategyV2 = NonObjectOptimizer( + method="LLAMAAdvancedGradientEvolutionStrategyV2" + ).set_name("LLAMAAdvancedGradientEvolutionStrategyV2", register=True) +except Exception as e: + print("AdvancedGradientEvolutionStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHarmonyMemeticOptimization import ( + AdvancedHarmonyMemeticOptimization, + ) + + lama_register["AdvancedHarmonyMemeticOptimization"] = AdvancedHarmonyMemeticOptimization + LLAMAAdvancedHarmonyMemeticOptimization = NonObjectOptimizer( + method="LLAMAAdvancedHarmonyMemeticOptimization" + ).set_name("LLAMAAdvancedHarmonyMemeticOptimization", register=True) +except Exception as e: + print("AdvancedHarmonyMemeticOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHarmonySearch import AdvancedHarmonySearch + + lama_register["AdvancedHarmonySearch"] = AdvancedHarmonySearch + LLAMAAdvancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch").set_name( + "LLAMAAdvancedHarmonySearch", register=True + ) +except Exception as e: + print("AdvancedHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridAdaptiveDE import AdvancedHybridAdaptiveDE + + lama_register["AdvancedHybridAdaptiveDE"] = AdvancedHybridAdaptiveDE + LLAMAAdvancedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE").set_name( + "LLAMAAdvancedHybridAdaptiveDE", register=True + ) +except Exception as e: + print("AdvancedHybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridAdaptiveOptimization import ( + AdvancedHybridAdaptiveOptimization, + ) + + lama_register["AdvancedHybridAdaptiveOptimization"] = AdvancedHybridAdaptiveOptimization + LLAMAAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAAdvancedHybridAdaptiveOptimization" + ).set_name("LLAMAAdvancedHybridAdaptiveOptimization", register=True) +except Exception as e: + print("AdvancedHybridAdaptiveOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( + AdvancedHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["AdvancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 + ) + LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: + print("AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithAdaptiveRestarts import ( + AdvancedHybridDEPSOWithAdaptiveRestarts, + ) + + lama_register["AdvancedHybridDEPSOWithAdaptiveRestarts"] = AdvancedHybridDEPSOWithAdaptiveRestarts + LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts" + ).set_name("LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts", register=True) +except Exception as e: + print("AdvancedHybridDEPSOWithAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithDynamicAdaptationAndRestart import ( + AdvancedHybridDEPSOWithDynamicAdaptationAndRestart, + ) + + lama_register["AdvancedHybridDEPSOWithDynamicAdaptationAndRestart"] = ( + AdvancedHybridDEPSOWithDynamicAdaptationAndRestart + ) + LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart = NonObjectOptimizer( + method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart" + ).set_name("LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart", register=True) +except Exception as e: + print("AdvancedHybridDEPSOWithDynamicAdaptationAndRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridExplorationExploitationOptimizer import ( + AdvancedHybridExplorationExploitationOptimizer, + ) + + lama_register["AdvancedHybridExplorationExploitationOptimizer"] = ( + AdvancedHybridExplorationExploitationOptimizer + ) + LLAMAAdvancedHybridExplorationExploitationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedHybridExplorationExploitationOptimizer" + ).set_name("LLAMAAdvancedHybridExplorationExploitationOptimizer", register=True) +except Exception as e: + print("AdvancedHybridExplorationExploitationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridLocalOptimizationDE import ( + AdvancedHybridLocalOptimizationDE, + ) + + lama_register["AdvancedHybridLocalOptimizationDE"] = AdvancedHybridLocalOptimizationDE + LLAMAAdvancedHybridLocalOptimizationDE = NonObjectOptimizer( + method="LLAMAAdvancedHybridLocalOptimizationDE" + ).set_name("LLAMAAdvancedHybridLocalOptimizationDE", register=True) +except Exception as e: + print("AdvancedHybridLocalOptimizationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridMetaHeuristicOptimizer import ( + AdvancedHybridMetaHeuristicOptimizer, + ) + + lama_register["AdvancedHybridMetaHeuristicOptimizer"] = AdvancedHybridMetaHeuristicOptimizer + LLAMAAdvancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: + print("AdvancedHybridMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridMetaheuristic import AdvancedHybridMetaheuristic + + lama_register["AdvancedHybridMetaheuristic"] = AdvancedHybridMetaheuristic + LLAMAAdvancedHybridMetaheuristic = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic").set_name( + "LLAMAAdvancedHybridMetaheuristic", register=True + ) +except Exception as e: + print("AdvancedHybridMetaheuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridOptimization import AdvancedHybridOptimization + + lama_register["AdvancedHybridOptimization"] = AdvancedHybridOptimization + LLAMAAdvancedHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization").set_name( + "LLAMAAdvancedHybridOptimization", register=True + ) +except Exception as e: + print("AdvancedHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridOptimizer import AdvancedHybridOptimizer + + lama_register["AdvancedHybridOptimizer"] = AdvancedHybridOptimizer + LLAMAAdvancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer").set_name( + "LLAMAAdvancedHybridOptimizer", register=True + ) +except Exception as e: + print("AdvancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridQuantumAdaptiveDE import AdvancedHybridQuantumAdaptiveDE + + lama_register["AdvancedHybridQuantumAdaptiveDE"] = AdvancedHybridQuantumAdaptiveDE + LLAMAAdvancedHybridQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedHybridQuantumAdaptiveDE" + ).set_name("LLAMAAdvancedHybridQuantumAdaptiveDE", register=True) +except Exception as e: + print("AdvancedHybridQuantumAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithAdaptiveMemory import ( + AdvancedHybridSimulatedAnnealingWithAdaptiveMemory, + ) + + lama_register["AdvancedHybridSimulatedAnnealingWithAdaptiveMemory"] = ( + AdvancedHybridSimulatedAnnealingWithAdaptiveMemory + ) + LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory = NonObjectOptimizer( + method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory" + ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory", register=True) +except Exception as e: + print("AdvancedHybridSimulatedAnnealingWithAdaptiveMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithGuidedExploration import ( + AdvancedHybridSimulatedAnnealingWithGuidedExploration, + ) + + lama_register["AdvancedHybridSimulatedAnnealingWithGuidedExploration"] = ( + AdvancedHybridSimulatedAnnealingWithGuidedExploration + ) + LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration = NonObjectOptimizer( + method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration" + ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration", register=True) +except Exception as e: + print("AdvancedHybridSimulatedAnnealingWithGuidedExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedImprovedMetaHeuristicOptimizer import ( + AdvancedImprovedMetaHeuristicOptimizer, + ) + + lama_register["AdvancedImprovedMetaHeuristicOptimizer"] = AdvancedImprovedMetaHeuristicOptimizer + LLAMAAdvancedImprovedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedImprovedMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedImprovedMetaHeuristicOptimizer", register=True) +except Exception as e: + print("AdvancedImprovedMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV5 import ( + AdvancedIslandEvolutionStrategyV5, + ) + + lama_register["AdvancedIslandEvolutionStrategyV5"] = AdvancedIslandEvolutionStrategyV5 + LLAMAAdvancedIslandEvolutionStrategyV5 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV5" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV5", register=True) +except Exception as e: + print("AdvancedIslandEvolutionStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV8 import ( + AdvancedIslandEvolutionStrategyV8, + ) + + lama_register["AdvancedIslandEvolutionStrategyV8"] = AdvancedIslandEvolutionStrategyV8 + LLAMAAdvancedIslandEvolutionStrategyV8 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV8" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV8", register=True) +except Exception as e: + print("AdvancedIslandEvolutionStrategyV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV9 import ( + AdvancedIslandEvolutionStrategyV9, + ) + + lama_register["AdvancedIslandEvolutionStrategyV9"] = AdvancedIslandEvolutionStrategyV9 + LLAMAAdvancedIslandEvolutionStrategyV9 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV9" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV9", register=True) +except Exception as e: + print("AdvancedIslandEvolutionStrategyV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMemeticQuantumDifferentialOptimizer import ( + AdvancedMemeticQuantumDifferentialOptimizer, + ) + + lama_register["AdvancedMemeticQuantumDifferentialOptimizer"] = AdvancedMemeticQuantumDifferentialOptimizer + LLAMAAdvancedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer" + ).set_name("LLAMAAdvancedMemeticQuantumDifferentialOptimizer", register=True) +except Exception as e: + print("AdvancedMemeticQuantumDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMemoryAdaptiveStrategyV50 import ( + AdvancedMemoryAdaptiveStrategyV50, + ) + + lama_register["AdvancedMemoryAdaptiveStrategyV50"] = AdvancedMemoryAdaptiveStrategyV50 + LLAMAAdvancedMemoryAdaptiveStrategyV50 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryAdaptiveStrategyV50" + ).set_name("LLAMAAdvancedMemoryAdaptiveStrategyV50", register=True) +except Exception as e: + print("AdvancedMemoryAdaptiveStrategyV50 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMemoryEnhancedHybridOptimizer import ( + AdvancedMemoryEnhancedHybridOptimizer, + ) + + lama_register["AdvancedMemoryEnhancedHybridOptimizer"] = AdvancedMemoryEnhancedHybridOptimizer + LLAMAAdvancedMemoryEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMemoryEnhancedHybridOptimizer" + ).set_name("LLAMAAdvancedMemoryEnhancedHybridOptimizer", register=True) +except Exception as e: + print("AdvancedMemoryEnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMemoryGuidedAdaptiveStrategyV68 import ( + AdvancedMemoryGuidedAdaptiveStrategyV68, + ) + + lama_register["AdvancedMemoryGuidedAdaptiveStrategyV68"] = AdvancedMemoryGuidedAdaptiveStrategyV68 + LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68" + ).set_name("LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68", register=True) +except Exception as e: + print("AdvancedMemoryGuidedAdaptiveStrategyV68 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMemoryGuidedDualStrategyV80 import ( + AdvancedMemoryGuidedDualStrategyV80, + ) + + lama_register["AdvancedMemoryGuidedDualStrategyV80"] = AdvancedMemoryGuidedDualStrategyV80 + LLAMAAdvancedMemoryGuidedDualStrategyV80 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryGuidedDualStrategyV80" + ).set_name("LLAMAAdvancedMemoryGuidedDualStrategyV80", register=True) +except Exception as e: + print("AdvancedMemoryGuidedDualStrategyV80 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMultiModalAdaptiveOptimizer import ( + AdvancedMultiModalAdaptiveOptimizer, + ) + + lama_register["AdvancedMultiModalAdaptiveOptimizer"] = AdvancedMultiModalAdaptiveOptimizer + LLAMAAdvancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMultiModalAdaptiveOptimizer" + ).set_name("LLAMAAdvancedMultiModalAdaptiveOptimizer", register=True) +except Exception as e: + print("AdvancedMultiModalAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedMultiStrategySelfAdaptiveDE import ( + AdvancedMultiStrategySelfAdaptiveDE, + ) + + lama_register["AdvancedMultiStrategySelfAdaptiveDE"] = AdvancedMultiStrategySelfAdaptiveDE + LLAMAAdvancedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAAdvancedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: + print("AdvancedMultiStrategySelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedNicheDifferentialParticleSwarmOptimizer import ( + AdvancedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["AdvancedNicheDifferentialParticleSwarmOptimizer"] = ( + AdvancedNicheDifferentialParticleSwarmOptimizer + ) + LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("AdvancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: + print("AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedOptimalHybridDifferentialAnnealingOptimizer import ( + AdvancedOptimalHybridDifferentialAnnealingOptimizer, + ) + + lama_register["AdvancedOptimalHybridDifferentialAnnealingOptimizer"] = ( + AdvancedOptimalHybridDifferentialAnnealingOptimizer + ) + LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer" + ).set_name("LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer", register=True) +except Exception as e: + print("AdvancedOptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedParallelDifferentialEvolution import ( + AdvancedParallelDifferentialEvolution, + ) + + lama_register["AdvancedParallelDifferentialEvolution"] = AdvancedParallelDifferentialEvolution + LLAMAAdvancedParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedParallelDifferentialEvolution" + ).set_name("LLAMAAdvancedParallelDifferentialEvolution", register=True) +except Exception as e: + print("AdvancedParallelDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedPrecisionEvolver import AdvancedPrecisionEvolver + + lama_register["AdvancedPrecisionEvolver"] = AdvancedPrecisionEvolver + LLAMAAdvancedPrecisionEvolver = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver").set_name( + "LLAMAAdvancedPrecisionEvolver", register=True + ) +except Exception as e: + print("AdvancedPrecisionEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedPrecisionGuidedStrategy import AdvancedPrecisionGuidedStrategy + + lama_register["AdvancedPrecisionGuidedStrategy"] = AdvancedPrecisionGuidedStrategy + LLAMAAdvancedPrecisionGuidedStrategy = NonObjectOptimizer( + method="LLAMAAdvancedPrecisionGuidedStrategy" + ).set_name("LLAMAAdvancedPrecisionGuidedStrategy", register=True) +except Exception as e: + print("AdvancedPrecisionGuidedStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumCognitionTrajectoryOptimizerV29 import ( + AdvancedQuantumCognitionTrajectoryOptimizerV29, + ) + + lama_register["AdvancedQuantumCognitionTrajectoryOptimizerV29"] = ( + AdvancedQuantumCognitionTrajectoryOptimizerV29 + ) + LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29 = NonObjectOptimizer( + method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29" + ).set_name("LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29", register=True) +except Exception as e: + print("AdvancedQuantumCognitionTrajectoryOptimizerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumControlledDiversityStrategy import ( + AdvancedQuantumControlledDiversityStrategy, + ) + + lama_register["AdvancedQuantumControlledDiversityStrategy"] = AdvancedQuantumControlledDiversityStrategy + LLAMAAdvancedQuantumControlledDiversityStrategy = NonObjectOptimizer( + method="LLAMAAdvancedQuantumControlledDiversityStrategy" + ).set_name("LLAMAAdvancedQuantumControlledDiversityStrategy", register=True) +except Exception as e: + print("AdvancedQuantumControlledDiversityStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumCrossoverOptimizer import ( + AdvancedQuantumCrossoverOptimizer, + ) + + lama_register["AdvancedQuantumCrossoverOptimizer"] = AdvancedQuantumCrossoverOptimizer + LLAMAAdvancedQuantumCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumCrossoverOptimizer" + ).set_name("LLAMAAdvancedQuantumCrossoverOptimizer", register=True) +except Exception as e: + print("AdvancedQuantumCrossoverOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart import ( + AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart, + ) + + lama_register["AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart"] = ( + AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart + ) + LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart = NonObjectOptimizer( + method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart" + ).set_name( + "LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart", register=True + ) +except Exception as e: + print( + "AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.AdvancedQuantumGradientDescent import AdvancedQuantumGradientDescent + + lama_register["AdvancedQuantumGradientDescent"] = AdvancedQuantumGradientDescent + LLAMAAdvancedQuantumGradientDescent = NonObjectOptimizer( + method="LLAMAAdvancedQuantumGradientDescent" + ).set_name("LLAMAAdvancedQuantumGradientDescent", register=True) +except Exception as e: + print("AdvancedQuantumGradientDescent can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumGradientExplorationOptimization import ( + AdvancedQuantumGradientExplorationOptimization, + ) + + lama_register["AdvancedQuantumGradientExplorationOptimization"] = ( + AdvancedQuantumGradientExplorationOptimization + ) + LLAMAAdvancedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumGradientExplorationOptimization" + ).set_name("LLAMAAdvancedQuantumGradientExplorationOptimization", register=True) +except Exception as e: + print("AdvancedQuantumGradientExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumHarmonicFeedbackOptimizer import ( + AdvancedQuantumHarmonicFeedbackOptimizer, + ) + + lama_register["AdvancedQuantumHarmonicFeedbackOptimizer"] = AdvancedQuantumHarmonicFeedbackOptimizer + LLAMAAdvancedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAAdvancedQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: + print("AdvancedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumInfusedAdaptiveStrategyV3 import ( + AdvancedQuantumInfusedAdaptiveStrategyV3, + ) + + lama_register["AdvancedQuantumInfusedAdaptiveStrategyV3"] = AdvancedQuantumInfusedAdaptiveStrategyV3 + LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3 = NonObjectOptimizer( + method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3" + ).set_name("LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3", register=True) +except Exception as e: + print("AdvancedQuantumInfusedAdaptiveStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumMemeticDifferentialEvolution import ( + AdvancedQuantumMemeticDifferentialEvolution, + ) + + lama_register["AdvancedQuantumMemeticDifferentialEvolution"] = AdvancedQuantumMemeticDifferentialEvolution + LLAMAAdvancedQuantumMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedQuantumMemeticDifferentialEvolution" + ).set_name("LLAMAAdvancedQuantumMemeticDifferentialEvolution", register=True) +except Exception as e: + print("AdvancedQuantumMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumStateCrossoverOptimization import ( + AdvancedQuantumStateCrossoverOptimization, + ) + + lama_register["AdvancedQuantumStateCrossoverOptimization"] = AdvancedQuantumStateCrossoverOptimization + LLAMAAdvancedQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumStateCrossoverOptimization" + ).set_name("LLAMAAdvancedQuantumStateCrossoverOptimization", register=True) +except Exception as e: + print("AdvancedQuantumStateCrossoverOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumSwarmOptimization import AdvancedQuantumSwarmOptimization + + lama_register["AdvancedQuantumSwarmOptimization"] = AdvancedQuantumSwarmOptimization + LLAMAAdvancedQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumSwarmOptimization" + ).set_name("LLAMAAdvancedQuantumSwarmOptimization", register=True) +except Exception as e: + print("AdvancedQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedQuantumVelocityOptimizer import AdvancedQuantumVelocityOptimizer + + lama_register["AdvancedQuantumVelocityOptimizer"] = AdvancedQuantumVelocityOptimizer + LLAMAAdvancedQuantumVelocityOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumVelocityOptimizer" + ).set_name("LLAMAAdvancedQuantumVelocityOptimizer", register=True) +except Exception as e: + print("AdvancedQuantumVelocityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRAMEDSv6 import AdvancedRAMEDSv6 + + lama_register["AdvancedRAMEDSv6"] = AdvancedRAMEDSv6 + LLAMAAdvancedRAMEDSv6 = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6").set_name( + "LLAMAAdvancedRAMEDSv6", register=True + ) +except Exception as e: + print("AdvancedRAMEDSv6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedAdaptiveMemoryEnhancedSearch import ( + AdvancedRefinedAdaptiveMemoryEnhancedSearch, + ) + + lama_register["AdvancedRefinedAdaptiveMemoryEnhancedSearch"] = AdvancedRefinedAdaptiveMemoryEnhancedSearch + LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: + print("AdvancedRefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: + print("AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: + print("AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( + AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( + AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer + ) + LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedAnnealing import ( + AdvancedRefinedGradientBoostedAnnealing, + ) + + lama_register["AdvancedRefinedGradientBoostedAnnealing"] = AdvancedRefinedGradientBoostedAnnealing + LLAMAAdvancedRefinedGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedAnnealing", register=True) +except Exception as e: + print("AdvancedRefinedGradientBoostedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemoryAnnealing import ( + AdvancedRefinedGradientBoostedMemoryAnnealing, + ) + + lama_register["AdvancedRefinedGradientBoostedMemoryAnnealing"] = ( + AdvancedRefinedGradientBoostedMemoryAnnealing + ) + LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("AdvancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemorySimulatedAnnealing import ( + AdvancedRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdvancedRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + AdvancedRefinedGradientBoostedMemorySimulatedAnnealing + ) + LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("AdvancedRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedHybridEvolutionaryAnnealingOptimizer import ( + AdvancedRefinedHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["AdvancedRefinedHybridEvolutionaryAnnealingOptimizer"] = ( + AdvancedRefinedHybridEvolutionaryAnnealingOptimizer + ) + LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: + print("AdvancedRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 import ( + AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51, + ) + + lama_register["AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51"] = ( + AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 + ) + LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 = NonObjectOptimizer( + method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51" + ).set_name("LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51", register=True) +except Exception as e: + print("AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedRAMEDSPro import AdvancedRefinedRAMEDSPro + + lama_register["AdvancedRefinedRAMEDSPro"] = AdvancedRefinedRAMEDSPro + LLAMAAdvancedRefinedRAMEDSPro = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro").set_name( + "LLAMAAdvancedRefinedRAMEDSPro", register=True + ) +except Exception as e: + print("AdvancedRefinedRAMEDSPro can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedSpiralSearchOptimizer import ( + AdvancedRefinedSpiralSearchOptimizer, + ) + + lama_register["AdvancedRefinedSpiralSearchOptimizer"] = AdvancedRefinedSpiralSearchOptimizer + LLAMAAdvancedRefinedSpiralSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedSpiralSearchOptimizer" + ).set_name("LLAMAAdvancedRefinedSpiralSearchOptimizer", register=True) +except Exception as e: + print("AdvancedRefinedSpiralSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 import ( + AdvancedRefinedUltraEvolutionaryGradientOptimizerV29, + ) + + lama_register["AdvancedRefinedUltraEvolutionaryGradientOptimizerV29"] = ( + AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 + ) + LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29 = NonObjectOptimizer( + method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29" + ).set_name("LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29", register=True) +except Exception as e: + print("AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v2 import AdvancedSelfAdaptiveDE_v2 + + lama_register["AdvancedSelfAdaptiveDE_v2"] = AdvancedSelfAdaptiveDE_v2 + LLAMAAdvancedSelfAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2").set_name( + "LLAMAAdvancedSelfAdaptiveDE_v2", register=True + ) +except Exception as e: + print("AdvancedSelfAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v3 import AdvancedSelfAdaptiveDE_v3 + + lama_register["AdvancedSelfAdaptiveDE_v3"] = AdvancedSelfAdaptiveDE_v3 + LLAMAAdvancedSelfAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3").set_name( + "LLAMAAdvancedSelfAdaptiveDE_v3", register=True + ) +except Exception as e: + print("AdvancedSelfAdaptiveDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedSpatialAdaptiveConvergenceOptimizer import ( + AdvancedSpatialAdaptiveConvergenceOptimizer, + ) + + lama_register["AdvancedSpatialAdaptiveConvergenceOptimizer"] = AdvancedSpatialAdaptiveConvergenceOptimizer + LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer" + ).set_name("LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer", register=True) +except Exception as e: + print("AdvancedSpatialAdaptiveConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedSpatialGradientOptimizer import AdvancedSpatialGradientOptimizer + + lama_register["AdvancedSpatialGradientOptimizer"] = AdvancedSpatialGradientOptimizer + LLAMAAdvancedSpatialGradientOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedSpatialGradientOptimizer" + ).set_name("LLAMAAdvancedSpatialGradientOptimizer", register=True) +except Exception as e: + print("AdvancedSpatialGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AdvancedStrategicHybridDE import AdvancedStrategicHybridDE + + lama_register["AdvancedStrategicHybridDE"] = AdvancedStrategicHybridDE + LLAMAAdvancedStrategicHybridDE = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE").set_name( + "LLAMAAdvancedStrategicHybridDE", register=True + ) +except Exception as e: + print("AdvancedStrategicHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ArchiveEnhancedAdaptiveDE import ArchiveEnhancedAdaptiveDE + + lama_register["ArchiveEnhancedAdaptiveDE"] = ArchiveEnhancedAdaptiveDE + LLAMAArchiveEnhancedAdaptiveDE = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE").set_name( + "LLAMAArchiveEnhancedAdaptiveDE", register=True + ) +except Exception as e: + print("ArchiveEnhancedAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.AttenuatedAdaptiveEvolver import AttenuatedAdaptiveEvolver + + lama_register["AttenuatedAdaptiveEvolver"] = AttenuatedAdaptiveEvolver + LLAMAAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver").set_name( + "LLAMAAttenuatedAdaptiveEvolver", register=True + ) +except Exception as e: + print("AttenuatedAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedAdaptiveMemeticDE import BalancedAdaptiveMemeticDE + + lama_register["BalancedAdaptiveMemeticDE"] = BalancedAdaptiveMemeticDE + LLAMABalancedAdaptiveMemeticDE = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE").set_name( + "LLAMABalancedAdaptiveMemeticDE", register=True + ) +except Exception as e: + print("BalancedAdaptiveMemeticDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedCulturalDifferentialEvolution import ( + BalancedCulturalDifferentialEvolution, + ) + + lama_register["BalancedCulturalDifferentialEvolution"] = BalancedCulturalDifferentialEvolution + LLAMABalancedCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMABalancedCulturalDifferentialEvolution" + ).set_name("LLAMABalancedCulturalDifferentialEvolution", register=True) +except Exception as e: + print("BalancedCulturalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedDualStrategyAdaptiveDE import BalancedDualStrategyAdaptiveDE + + lama_register["BalancedDualStrategyAdaptiveDE"] = BalancedDualStrategyAdaptiveDE + LLAMABalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMABalancedDualStrategyAdaptiveDE" + ).set_name("LLAMABalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("BalancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedDynamicQuantumLevySwarm import BalancedDynamicQuantumLevySwarm + + lama_register["BalancedDynamicQuantumLevySwarm"] = BalancedDynamicQuantumLevySwarm + LLAMABalancedDynamicQuantumLevySwarm = NonObjectOptimizer( + method="LLAMABalancedDynamicQuantumLevySwarm" + ).set_name("LLAMABalancedDynamicQuantumLevySwarm", register=True) +except Exception as e: + print("BalancedDynamicQuantumLevySwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedQuantumLevyDifferentialSearch import ( + BalancedQuantumLevyDifferentialSearch, + ) + + lama_register["BalancedQuantumLevyDifferentialSearch"] = BalancedQuantumLevyDifferentialSearch + LLAMABalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMABalancedQuantumLevyDifferentialSearch" + ).set_name("LLAMABalancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: + print("BalancedQuantumLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BalancedQuantumLevySwarmOptimization import ( + BalancedQuantumLevySwarmOptimization, + ) + + lama_register["BalancedQuantumLevySwarmOptimization"] = BalancedQuantumLevySwarmOptimization + LLAMABalancedQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMABalancedQuantumLevySwarmOptimization" + ).set_name("LLAMABalancedQuantumLevySwarmOptimization", register=True) +except Exception as e: + print("BalancedQuantumLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.BayesianAdaptiveMemeticSearch import BayesianAdaptiveMemeticSearch + + lama_register["BayesianAdaptiveMemeticSearch"] = BayesianAdaptiveMemeticSearch + LLAMABayesianAdaptiveMemeticSearch = NonObjectOptimizer( + method="LLAMABayesianAdaptiveMemeticSearch" + ).set_name("LLAMABayesianAdaptiveMemeticSearch", register=True) +except Exception as e: + print("BayesianAdaptiveMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CAMSQSOB import CAMSQSOB + + lama_register["CAMSQSOB"] = CAMSQSOB + LLAMACAMSQSOB = NonObjectOptimizer(method="LLAMACAMSQSOB").set_name("LLAMACAMSQSOB", register=True) +except Exception as e: + print("CAMSQSOB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CGES import CGES + + lama_register["CGES"] = CGES + LLAMACGES = NonObjectOptimizer(method="LLAMACGES").set_name("LLAMACGES", register=True) +except Exception as e: + print("CGES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CMADifferentialEvolutionPSO import CMADifferentialEvolutionPSO + + lama_register["CMADifferentialEvolutionPSO"] = CMADifferentialEvolutionPSO + LLAMACMADifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO").set_name( + "LLAMACMADifferentialEvolutionPSO", register=True + ) +except Exception as e: + print("CMADifferentialEvolutionPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CMDEALX import CMDEALX + + lama_register["CMDEALX"] = CMDEALX + LLAMACMDEALX = NonObjectOptimizer(method="LLAMACMDEALX").set_name("LLAMACMDEALX", register=True) +except Exception as e: + print("CMDEALX can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ClusterAdaptiveQuantumLevyOptimizer import ( + ClusterAdaptiveQuantumLevyOptimizer, + ) + + lama_register["ClusterAdaptiveQuantumLevyOptimizer"] = ClusterAdaptiveQuantumLevyOptimizer + LLAMAClusterAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAClusterAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAClusterAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: + print("ClusterAdaptiveQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ClusterBasedAdaptiveDifferentialEvolution import ( + ClusterBasedAdaptiveDifferentialEvolution, + ) + + lama_register["ClusterBasedAdaptiveDifferentialEvolution"] = ClusterBasedAdaptiveDifferentialEvolution + LLAMAClusterBasedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAClusterBasedAdaptiveDifferentialEvolution" + ).set_name("LLAMAClusterBasedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ClusterBasedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ClusteredAdaptiveHybridPSODESimulatedAnnealing import ( + ClusteredAdaptiveHybridPSODESimulatedAnnealing, + ) + + lama_register["ClusteredAdaptiveHybridPSODESimulatedAnnealing"] = ( + ClusteredAdaptiveHybridPSODESimulatedAnnealing + ) + LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing = NonObjectOptimizer( + method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing" + ).set_name("LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing", register=True) +except Exception as e: + print("ClusteredAdaptiveHybridPSODESimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ClusteredDifferentialEvolutionWithLocalSearch import ( + ClusteredDifferentialEvolutionWithLocalSearch, + ) + + lama_register["ClusteredDifferentialEvolutionWithLocalSearch"] = ( + ClusteredDifferentialEvolutionWithLocalSearch + ) + LLAMAClusteredDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAClusteredDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAClusteredDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: + print("ClusteredDifferentialEvolutionWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CoevolutionaryDualPopulationSearch import ( + CoevolutionaryDualPopulationSearch, + ) + + lama_register["CoevolutionaryDualPopulationSearch"] = CoevolutionaryDualPopulationSearch + LLAMACoevolutionaryDualPopulationSearch = NonObjectOptimizer( + method="LLAMACoevolutionaryDualPopulationSearch" + ).set_name("LLAMACoevolutionaryDualPopulationSearch", register=True) +except Exception as e: + print("CoevolutionaryDualPopulationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CohortDiversityDrivenOptimization import ( + CohortDiversityDrivenOptimization, + ) + + lama_register["CohortDiversityDrivenOptimization"] = CohortDiversityDrivenOptimization + LLAMACohortDiversityDrivenOptimization = NonObjectOptimizer( + method="LLAMACohortDiversityDrivenOptimization" + ).set_name("LLAMACohortDiversityDrivenOptimization", register=True) +except Exception as e: + print("CohortDiversityDrivenOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CohortEvolutionWithDynamicSelection import ( + CohortEvolutionWithDynamicSelection, + ) + + lama_register["CohortEvolutionWithDynamicSelection"] = CohortEvolutionWithDynamicSelection + LLAMACohortEvolutionWithDynamicSelection = NonObjectOptimizer( + method="LLAMACohortEvolutionWithDynamicSelection" + ).set_name("LLAMACohortEvolutionWithDynamicSelection", register=True) +except Exception as e: + print("CohortEvolutionWithDynamicSelection can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConcentricConvergenceOptimizer import ConcentricConvergenceOptimizer + + lama_register["ConcentricConvergenceOptimizer"] = ConcentricConvergenceOptimizer + LLAMAConcentricConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAConcentricConvergenceOptimizer" + ).set_name("LLAMAConcentricConvergenceOptimizer", register=True) +except Exception as e: + print("ConcentricConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConcentricDiversityStrategy import ConcentricDiversityStrategy + + lama_register["ConcentricDiversityStrategy"] = ConcentricDiversityStrategy + LLAMAConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy").set_name( + "LLAMAConcentricDiversityStrategy", register=True + ) +except Exception as e: + print("ConcentricDiversityStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConcentricGradientDescentEvolver import ConcentricGradientDescentEvolver + + lama_register["ConcentricGradientDescentEvolver"] = ConcentricGradientDescentEvolver + LLAMAConcentricGradientDescentEvolver = NonObjectOptimizer( + method="LLAMAConcentricGradientDescentEvolver" + ).set_name("LLAMAConcentricGradientDescentEvolver", register=True) +except Exception as e: + print("ConcentricGradientDescentEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConcentricGradientEnhancedEvolver import ( + ConcentricGradientEnhancedEvolver, + ) + + lama_register["ConcentricGradientEnhancedEvolver"] = ConcentricGradientEnhancedEvolver + LLAMAConcentricGradientEnhancedEvolver = NonObjectOptimizer( + method="LLAMAConcentricGradientEnhancedEvolver" + ).set_name("LLAMAConcentricGradientEnhancedEvolver", register=True) +except Exception as e: + print("ConcentricGradientEnhancedEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConcentricQuantumCrossoverStrategyV4 import ( + ConcentricQuantumCrossoverStrategyV4, + ) + + lama_register["ConcentricQuantumCrossoverStrategyV4"] = ConcentricQuantumCrossoverStrategyV4 + LLAMAConcentricQuantumCrossoverStrategyV4 = NonObjectOptimizer( + method="LLAMAConcentricQuantumCrossoverStrategyV4" + ).set_name("LLAMAConcentricQuantumCrossoverStrategyV4", register=True) +except Exception as e: + print("ConcentricQuantumCrossoverStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConvergenceAcceleratedSpiralSearch import ( + ConvergenceAcceleratedSpiralSearch, + ) + + lama_register["ConvergenceAcceleratedSpiralSearch"] = ConvergenceAcceleratedSpiralSearch + LLAMAConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( + method="LLAMAConvergenceAcceleratedSpiralSearch" + ).set_name("LLAMAConvergenceAcceleratedSpiralSearch", register=True) +except Exception as e: + print("ConvergenceAcceleratedSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutionStrategy import ( + ConvergentAdaptiveEvolutionStrategy, + ) + + lama_register["ConvergentAdaptiveEvolutionStrategy"] = ConvergentAdaptiveEvolutionStrategy + LLAMAConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMAConvergentAdaptiveEvolutionStrategy" + ).set_name("LLAMAConvergentAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("ConvergentAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutiveStrategy import ( + ConvergentAdaptiveEvolutiveStrategy, + ) + + lama_register["ConvergentAdaptiveEvolutiveStrategy"] = ConvergentAdaptiveEvolutiveStrategy + LLAMAConvergentAdaptiveEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAConvergentAdaptiveEvolutiveStrategy" + ).set_name("LLAMAConvergentAdaptiveEvolutiveStrategy", register=True) +except Exception as e: + print("ConvergentAdaptiveEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeAdaptiveCulturalSearch import ( + CooperativeAdaptiveCulturalSearch, + ) + + lama_register["CooperativeAdaptiveCulturalSearch"] = CooperativeAdaptiveCulturalSearch + LLAMACooperativeAdaptiveCulturalSearch = NonObjectOptimizer( + method="LLAMACooperativeAdaptiveCulturalSearch" + ).set_name("LLAMACooperativeAdaptiveCulturalSearch", register=True) +except Exception as e: + print("CooperativeAdaptiveCulturalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeAdaptiveEvolutionaryOptimizer import ( + CooperativeAdaptiveEvolutionaryOptimizer, + ) + + lama_register["CooperativeAdaptiveEvolutionaryOptimizer"] = CooperativeAdaptiveEvolutionaryOptimizer + LLAMACooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMACooperativeAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMACooperativeAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: + print("CooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeCulturalAdaptiveSearch import ( + CooperativeCulturalAdaptiveSearch, + ) + + lama_register["CooperativeCulturalAdaptiveSearch"] = CooperativeCulturalAdaptiveSearch + LLAMACooperativeCulturalAdaptiveSearch = NonObjectOptimizer( + method="LLAMACooperativeCulturalAdaptiveSearch" + ).set_name("LLAMACooperativeCulturalAdaptiveSearch", register=True) +except Exception as e: + print("CooperativeCulturalAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeCulturalDifferentialSearch import ( + CooperativeCulturalDifferentialSearch, + ) + + lama_register["CooperativeCulturalDifferentialSearch"] = CooperativeCulturalDifferentialSearch + LLAMACooperativeCulturalDifferentialSearch = NonObjectOptimizer( + method="LLAMACooperativeCulturalDifferentialSearch" + ).set_name("LLAMACooperativeCulturalDifferentialSearch", register=True) +except Exception as e: + print("CooperativeCulturalDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeCulturalEvolutionStrategy import ( + CooperativeCulturalEvolutionStrategy, + ) + + lama_register["CooperativeCulturalEvolutionStrategy"] = CooperativeCulturalEvolutionStrategy + LLAMACooperativeCulturalEvolutionStrategy = NonObjectOptimizer( + method="LLAMACooperativeCulturalEvolutionStrategy" + ).set_name("LLAMACooperativeCulturalEvolutionStrategy", register=True) +except Exception as e: + print("CooperativeCulturalEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeEvolutionaryGradientSearch import ( + CooperativeEvolutionaryGradientSearch, + ) + + lama_register["CooperativeEvolutionaryGradientSearch"] = CooperativeEvolutionaryGradientSearch + LLAMACooperativeEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMACooperativeEvolutionaryGradientSearch" + ).set_name("LLAMACooperativeEvolutionaryGradientSearch", register=True) +except Exception as e: + print("CooperativeEvolutionaryGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CooperativeParticleSwarmOptimization import ( + CooperativeParticleSwarmOptimization, + ) + + lama_register["CooperativeParticleSwarmOptimization"] = CooperativeParticleSwarmOptimization + LLAMACooperativeParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMACooperativeParticleSwarmOptimization" + ).set_name("LLAMACooperativeParticleSwarmOptimization", register=True) +except Exception as e: + print("CooperativeParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CoordinatedAdaptiveHybridOptimizer import ( + CoordinatedAdaptiveHybridOptimizer, + ) + + lama_register["CoordinatedAdaptiveHybridOptimizer"] = CoordinatedAdaptiveHybridOptimizer + LLAMACoordinatedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMACoordinatedAdaptiveHybridOptimizer" + ).set_name("LLAMACoordinatedAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("CoordinatedAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CovarianceMatrixAdaptationDifferentialEvolution import ( + CovarianceMatrixAdaptationDifferentialEvolution, + ) + + lama_register["CovarianceMatrixAdaptationDifferentialEvolution"] = ( + CovarianceMatrixAdaptationDifferentialEvolution + ) + LLAMACovarianceMatrixAdaptationDifferentialEvolution = NonObjectOptimizer( + method="LLAMACovarianceMatrixAdaptationDifferentialEvolution" + ).set_name("LLAMACovarianceMatrixAdaptationDifferentialEvolution", register=True) +except Exception as e: + print("CovarianceMatrixAdaptationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CulturalAdaptiveDifferentialEvolution import ( + CulturalAdaptiveDifferentialEvolution, + ) + + lama_register["CulturalAdaptiveDifferentialEvolution"] = CulturalAdaptiveDifferentialEvolution + LLAMACulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMACulturalAdaptiveDifferentialEvolution" + ).set_name("LLAMACulturalAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("CulturalAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.CulturalGuidedDifferentialEvolution import ( + CulturalGuidedDifferentialEvolution, + ) + + lama_register["CulturalGuidedDifferentialEvolution"] = CulturalGuidedDifferentialEvolution + LLAMACulturalGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMACulturalGuidedDifferentialEvolution" + ).set_name("LLAMACulturalGuidedDifferentialEvolution", register=True) +except Exception as e: + print("CulturalGuidedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DADERC import DADERC + + lama_register["DADERC"] = DADERC + LLAMADADERC = NonObjectOptimizer(method="LLAMADADERC").set_name("LLAMADADERC", register=True) +except Exception as e: + print("DADERC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DADESM import DADESM + + lama_register["DADESM"] = DADESM + LLAMADADESM = NonObjectOptimizer(method="LLAMADADESM").set_name("LLAMADADESM", register=True) +except Exception as e: + print("DADESM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DADe import DADe + + lama_register["DADe"] = DADe + LLAMADADe = NonObjectOptimizer(method="LLAMADADe").set_name("LLAMADADe", register=True) +except Exception as e: + print("DADe can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DAEA import DAEA + + lama_register["DAEA"] = DAEA + LLAMADAEA = NonObjectOptimizer(method="LLAMADAEA").set_name("LLAMADAEA", register=True) +except Exception as e: + print("DAEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DAES import DAES + + lama_register["DAES"] = DAES + LLAMADAES = NonObjectOptimizer(method="LLAMADAES").set_name("LLAMADAES", register=True) +except Exception as e: + print("DAES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DAESF import DAESF + + lama_register["DAESF"] = DAESF + LLAMADAESF = NonObjectOptimizer(method="LLAMADAESF").set_name("LLAMADAESF", register=True) +except Exception as e: + print("DAESF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DASES import DASES + + lama_register["DASES"] = DASES + LLAMADASES = NonObjectOptimizer(method="LLAMADASES").set_name("LLAMADASES", register=True) +except Exception as e: + print("DASES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DASOGG import DASOGG + + lama_register["DASOGG"] = DASOGG + LLAMADASOGG = NonObjectOptimizer(method="LLAMADASOGG").set_name("LLAMADASOGG", register=True) +except Exception as e: + print("DASOGG can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DDCEA import DDCEA + + lama_register["DDCEA"] = DDCEA + LLAMADDCEA = NonObjectOptimizer(method="LLAMADDCEA").set_name("LLAMADDCEA", register=True) +except Exception as e: + print("DDCEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DDPO import DDPO + + lama_register["DDPO"] = DDPO + LLAMADDPO = NonObjectOptimizer(method="LLAMADDPO").set_name("LLAMADDPO", register=True) +except Exception as e: + print("DDPO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DEAMC import DEAMC + + lama_register["DEAMC"] = DEAMC + LLAMADEAMC = NonObjectOptimizer(method="LLAMADEAMC").set_name("LLAMADEAMC", register=True) +except Exception as e: + print("DEAMC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DEAMC_DSR import DEAMC_DSR + + lama_register["DEAMC_DSR"] = DEAMC_DSR + LLAMADEAMC_DSR = NonObjectOptimizer(method="LLAMADEAMC_DSR").set_name("LLAMADEAMC_DSR", register=True) +except Exception as e: + print("DEAMC_DSR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DEAMC_LSI import DEAMC_LSI + + lama_register["DEAMC_LSI"] = DEAMC_LSI + LLAMADEAMC_LSI = NonObjectOptimizer(method="LLAMADEAMC_LSI").set_name("LLAMADEAMC_LSI", register=True) +except Exception as e: + print("DEAMC_LSI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DEWithNelderMead import DEWithNelderMead + + lama_register["DEWithNelderMead"] = DEWithNelderMead + LLAMADEWithNelderMead = NonObjectOptimizer(method="LLAMADEWithNelderMead").set_name( + "LLAMADEWithNelderMead", register=True + ) +except Exception as e: + print("DEWithNelderMead can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DHDGE import DHDGE + + lama_register["DHDGE"] = DHDGE + LLAMADHDGE = NonObjectOptimizer(method="LLAMADHDGE").set_name("LLAMADHDGE", register=True) +except Exception as e: + print("DHDGE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DLASS import DLASS + + lama_register["DLASS"] = DLASS + LLAMADLASS = NonObjectOptimizer(method="LLAMADLASS").set_name("LLAMADLASS", register=True) +except Exception as e: + print("DLASS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DMDE import DMDE + + lama_register["DMDE"] = DMDE + LLAMADMDE = NonObjectOptimizer(method="LLAMADMDE").set_name("LLAMADMDE", register=True) +except Exception as e: + print("DMDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DMDESM import DMDESM + + lama_register["DMDESM"] = DMDESM + LLAMADMDESM = NonObjectOptimizer(method="LLAMADMDESM").set_name("LLAMADMDESM", register=True) +except Exception as e: + print("DMDESM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DMES import DMES + + lama_register["DMES"] = DMES + LLAMADMES = NonObjectOptimizer(method="LLAMADMES").set_name("LLAMADMES", register=True) +except Exception as e: + print("DMES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DNAS import DNAS + + lama_register["DNAS"] = DNAS + LLAMADNAS = NonObjectOptimizer(method="LLAMADNAS").set_name("LLAMADNAS", register=True) +except Exception as e: + print("DNAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DPADE import DPADE + + lama_register["DPADE"] = DPADE + LLAMADPADE = NonObjectOptimizer(method="LLAMADPADE").set_name("LLAMADPADE", register=True) +except Exception as e: + print("DPADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DPES import DPES + + lama_register["DPES"] = DPES + LLAMADPES = NonObjectOptimizer(method="LLAMADPES").set_name("LLAMADPES", register=True) +except Exception as e: + print("DPES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DSDE import DSDE + + lama_register["DSDE"] = DSDE + LLAMADSDE = NonObjectOptimizer(method="LLAMADSDE").set_name("LLAMADSDE", register=True) +except Exception as e: + print("DSDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DSEDES import DSEDES + + lama_register["DSEDES"] = DSEDES + LLAMADSEDES = NonObjectOptimizer(method="LLAMADSEDES").set_name("LLAMADSEDES", register=True) +except Exception as e: + print("DSEDES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionAdaptiveCrossover import ( + DifferentialEvolutionAdaptiveCrossover, + ) + + lama_register["DifferentialEvolutionAdaptiveCrossover"] = DifferentialEvolutionAdaptiveCrossover + LLAMADifferentialEvolutionAdaptiveCrossover = NonObjectOptimizer( + method="LLAMADifferentialEvolutionAdaptiveCrossover" + ).set_name("LLAMADifferentialEvolutionAdaptiveCrossover", register=True) +except Exception as e: + print("DifferentialEvolutionAdaptiveCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionAdaptivePSO import DifferentialEvolutionAdaptivePSO + + lama_register["DifferentialEvolutionAdaptivePSO"] = DifferentialEvolutionAdaptivePSO + LLAMADifferentialEvolutionAdaptivePSO = NonObjectOptimizer( + method="LLAMADifferentialEvolutionAdaptivePSO" + ).set_name("LLAMADifferentialEvolutionAdaptivePSO", register=True) +except Exception as e: + print("DifferentialEvolutionAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionHybrid import DifferentialEvolutionHybrid + + lama_register["DifferentialEvolutionHybrid"] = DifferentialEvolutionHybrid + LLAMADifferentialEvolutionHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid").set_name( + "LLAMADifferentialEvolutionHybrid", register=True + ) +except Exception as e: + print("DifferentialEvolutionHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionOptimizer import DifferentialEvolutionOptimizer + + lama_register["DifferentialEvolutionOptimizer"] = DifferentialEvolutionOptimizer + LLAMADifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMADifferentialEvolutionOptimizer" + ).set_name("LLAMADifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("DifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionPSOHybrid import DifferentialEvolutionPSOHybrid + + lama_register["DifferentialEvolutionPSOHybrid"] = DifferentialEvolutionPSOHybrid + LLAMADifferentialEvolutionPSOHybrid = NonObjectOptimizer( + method="LLAMADifferentialEvolutionPSOHybrid" + ).set_name("LLAMADifferentialEvolutionPSOHybrid", register=True) +except Exception as e: + print("DifferentialEvolutionPSOHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialEvolutionSearch import DifferentialEvolutionSearch + + lama_register["DifferentialEvolutionSearch"] = DifferentialEvolutionSearch + LLAMADifferentialEvolutionSearch = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch").set_name( + "LLAMADifferentialEvolutionSearch", register=True + ) +except Exception as e: + print("DifferentialEvolutionSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialFireworkAlgorithm import DifferentialFireworkAlgorithm + + lama_register["DifferentialFireworkAlgorithm"] = DifferentialFireworkAlgorithm + LLAMADifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADifferentialFireworkAlgorithm" + ).set_name("LLAMADifferentialFireworkAlgorithm", register=True) +except Exception as e: + print("DifferentialFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialGradientEvolutionStrategy import ( + DifferentialGradientEvolutionStrategy, + ) + + lama_register["DifferentialGradientEvolutionStrategy"] = DifferentialGradientEvolutionStrategy + LLAMADifferentialGradientEvolutionStrategy = NonObjectOptimizer( + method="LLAMADifferentialGradientEvolutionStrategy" + ).set_name("LLAMADifferentialGradientEvolutionStrategy", register=True) +except Exception as e: + print("DifferentialGradientEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialHarmonySearch import DifferentialHarmonySearch + + lama_register["DifferentialHarmonySearch"] = DifferentialHarmonySearch + LLAMADifferentialHarmonySearch = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch").set_name( + "LLAMADifferentialHarmonySearch", register=True + ) +except Exception as e: + print("DifferentialHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialMemeticAlgorithm import DifferentialMemeticAlgorithm + + lama_register["DifferentialMemeticAlgorithm"] = DifferentialMemeticAlgorithm + LLAMADifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADifferentialMemeticAlgorithm" + ).set_name("LLAMADifferentialMemeticAlgorithm", register=True) +except Exception as e: + print("DifferentialMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialQuantumMetaheuristic import DifferentialQuantumMetaheuristic + + lama_register["DifferentialQuantumMetaheuristic"] = DifferentialQuantumMetaheuristic + LLAMADifferentialQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMADifferentialQuantumMetaheuristic" + ).set_name("LLAMADifferentialQuantumMetaheuristic", register=True) +except Exception as e: + print("DifferentialQuantumMetaheuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DifferentialSimulatedAnnealingOptimizer import ( + DifferentialSimulatedAnnealingOptimizer, + ) + + lama_register["DifferentialSimulatedAnnealingOptimizer"] = DifferentialSimulatedAnnealingOptimizer + LLAMADifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( + method="LLAMADifferentialSimulatedAnnealingOptimizer" + ).set_name("LLAMADifferentialSimulatedAnnealingOptimizer", register=True) +except Exception as e: + print("DifferentialSimulatedAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolution import ( + DiversityEnhancedAdaptiveGradientEvolution, + ) + + lama_register["DiversityEnhancedAdaptiveGradientEvolution"] = DiversityEnhancedAdaptiveGradientEvolution + LLAMADiversityEnhancedAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADiversityEnhancedAdaptiveGradientEvolution" + ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolution", register=True) +except Exception as e: + print("DiversityEnhancedAdaptiveGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolutionV2 import ( + DiversityEnhancedAdaptiveGradientEvolutionV2, + ) + + lama_register["DiversityEnhancedAdaptiveGradientEvolutionV2"] = ( + DiversityEnhancedAdaptiveGradientEvolutionV2 + ) + LLAMADiversityEnhancedAdaptiveGradientEvolutionV2 = NonObjectOptimizer( + method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2" + ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolutionV2", register=True) +except Exception as e: + print("DiversityEnhancedAdaptiveGradientEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DolphinPodOptimization import DolphinPodOptimization + + lama_register["DolphinPodOptimization"] = DolphinPodOptimization + LLAMADolphinPodOptimization = NonObjectOptimizer(method="LLAMADolphinPodOptimization").set_name( + "LLAMADolphinPodOptimization", register=True + ) +except Exception as e: + print("DolphinPodOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualAdaptiveRestartDE import DualAdaptiveRestartDE + + lama_register["DualAdaptiveRestartDE"] = DualAdaptiveRestartDE + LLAMADualAdaptiveRestartDE = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE").set_name( + "LLAMADualAdaptiveRestartDE", register=True + ) +except Exception as e: + print("DualAdaptiveRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualAdaptiveSearch import DualAdaptiveSearch + + lama_register["DualAdaptiveSearch"] = DualAdaptiveSearch + LLAMADualAdaptiveSearch = NonObjectOptimizer(method="LLAMADualAdaptiveSearch").set_name( + "LLAMADualAdaptiveSearch", register=True + ) +except Exception as e: + print("DualAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualConvergenceEvolutiveStrategy import DualConvergenceEvolutiveStrategy + + lama_register["DualConvergenceEvolutiveStrategy"] = DualConvergenceEvolutiveStrategy + LLAMADualConvergenceEvolutiveStrategy = NonObjectOptimizer( + method="LLAMADualConvergenceEvolutiveStrategy" + ).set_name("LLAMADualConvergenceEvolutiveStrategy", register=True) +except Exception as e: + print("DualConvergenceEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualModeOptimization import DualModeOptimization + + lama_register["DualModeOptimization"] = DualModeOptimization + LLAMADualModeOptimization = NonObjectOptimizer(method="LLAMADualModeOptimization").set_name( + "LLAMADualModeOptimization", register=True + ) +except Exception as e: + print("DualModeOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseAdaptiveGradientEvolution import ( + DualPhaseAdaptiveGradientEvolution, + ) + + lama_register["DualPhaseAdaptiveGradientEvolution"] = DualPhaseAdaptiveGradientEvolution + LLAMADualPhaseAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveGradientEvolution" + ).set_name("LLAMADualPhaseAdaptiveGradientEvolution", register=True) +except Exception as e: + print("DualPhaseAdaptiveGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseAdaptiveHybridOptimizerV3 import ( + DualPhaseAdaptiveHybridOptimizerV3, + ) + + lama_register["DualPhaseAdaptiveHybridOptimizerV3"] = DualPhaseAdaptiveHybridOptimizerV3 + LLAMADualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveHybridOptimizerV3" + ).set_name("LLAMADualPhaseAdaptiveHybridOptimizerV3", register=True) +except Exception as e: + print("DualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolution import ( + DualPhaseAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolution"] = ( + DualPhaseAdaptiveMemeticDifferentialEvolution + ) + LLAMADualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("DualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolutionV2 import ( + DualPhaseAdaptiveMemeticDifferentialEvolutionV2, + ) + + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolutionV2"] = ( + DualPhaseAdaptiveMemeticDifferentialEvolutionV2 + ) + LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2" + ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2", register=True) +except Exception as e: + print("DualPhaseAdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced import ( + DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced, + ) + + lama_register["DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced"] = ( + DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced + ) + LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced" + ).set_name("LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced", register=True) +except Exception as e: + print("DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseDifferentialEvolution import DualPhaseDifferentialEvolution + + lama_register["DualPhaseDifferentialEvolution"] = DualPhaseDifferentialEvolution + LLAMADualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualPhaseDifferentialEvolution" + ).set_name("LLAMADualPhaseDifferentialEvolution", register=True) +except Exception as e: + print("DualPhaseDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseOptimizationStrategy import DualPhaseOptimizationStrategy + + lama_register["DualPhaseOptimizationStrategy"] = DualPhaseOptimizationStrategy + LLAMADualPhaseOptimizationStrategy = NonObjectOptimizer( + method="LLAMADualPhaseOptimizationStrategy" + ).set_name("LLAMADualPhaseOptimizationStrategy", register=True) +except Exception as e: + print("DualPhaseOptimizationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseQuantumMemeticSearch import DualPhaseQuantumMemeticSearch + + lama_register["DualPhaseQuantumMemeticSearch"] = DualPhaseQuantumMemeticSearch + LLAMADualPhaseQuantumMemeticSearch = NonObjectOptimizer( + method="LLAMADualPhaseQuantumMemeticSearch" + ).set_name("LLAMADualPhaseQuantumMemeticSearch", register=True) +except Exception as e: + print("DualPhaseQuantumMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPhaseRefinedQuantumLocalSearchOptimizer import ( + DualPhaseRefinedQuantumLocalSearchOptimizer, + ) + + lama_register["DualPhaseRefinedQuantumLocalSearchOptimizer"] = DualPhaseRefinedQuantumLocalSearchOptimizer + LLAMADualPhaseRefinedQuantumLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer" + ).set_name("LLAMADualPhaseRefinedQuantumLocalSearchOptimizer", register=True) +except Exception as e: + print("DualPhaseRefinedQuantumLocalSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPopulationADE import DualPopulationADE + + lama_register["DualPopulationADE"] = DualPopulationADE + LLAMADualPopulationADE = NonObjectOptimizer(method="LLAMADualPopulationADE").set_name( + "LLAMADualPopulationADE", register=True + ) +except Exception as e: + print("DualPopulationADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPopulationAdaptiveSearch import DualPopulationAdaptiveSearch + + lama_register["DualPopulationAdaptiveSearch"] = DualPopulationAdaptiveSearch + LLAMADualPopulationAdaptiveSearch = NonObjectOptimizer( + method="LLAMADualPopulationAdaptiveSearch" + ).set_name("LLAMADualPopulationAdaptiveSearch", register=True) +except Exception as e: + print("DualPopulationAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPopulationCovarianceMatrixGradientSearch import ( + DualPopulationCovarianceMatrixGradientSearch, + ) + + lama_register["DualPopulationCovarianceMatrixGradientSearch"] = ( + DualPopulationCovarianceMatrixGradientSearch + ) + LLAMADualPopulationCovarianceMatrixGradientSearch = NonObjectOptimizer( + method="LLAMADualPopulationCovarianceMatrixGradientSearch" + ).set_name("LLAMADualPopulationCovarianceMatrixGradientSearch", register=True) +except Exception as e: + print("DualPopulationCovarianceMatrixGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualPopulationEnhancedSearch import DualPopulationEnhancedSearch + + lama_register["DualPopulationEnhancedSearch"] = DualPopulationEnhancedSearch + LLAMADualPopulationEnhancedSearch = NonObjectOptimizer( + method="LLAMADualPopulationEnhancedSearch" + ).set_name("LLAMADualPopulationEnhancedSearch", register=True) +except Exception as e: + print("DualPopulationEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualStrategyAdaptiveDE import DualStrategyAdaptiveDE + + lama_register["DualStrategyAdaptiveDE"] = DualStrategyAdaptiveDE + LLAMADualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE").set_name( + "LLAMADualStrategyAdaptiveDE", register=True + ) +except Exception as e: + print("DualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualStrategyDifferentialEvolution import ( + DualStrategyDifferentialEvolution, + ) + + lama_register["DualStrategyDifferentialEvolution"] = DualStrategyDifferentialEvolution + LLAMADualStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualStrategyDifferentialEvolution" + ).set_name("LLAMADualStrategyDifferentialEvolution", register=True) +except Exception as e: + print("DualStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualStrategyOptimizer import DualStrategyOptimizer + + lama_register["DualStrategyOptimizer"] = DualStrategyOptimizer + LLAMADualStrategyOptimizer = NonObjectOptimizer(method="LLAMADualStrategyOptimizer").set_name( + "LLAMADualStrategyOptimizer", register=True + ) +except Exception as e: + print("DualStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DualStrategyQuantumEvolutionOptimizer import ( + DualStrategyQuantumEvolutionOptimizer, + ) + + lama_register["DualStrategyQuantumEvolutionOptimizer"] = DualStrategyQuantumEvolutionOptimizer + LLAMADualStrategyQuantumEvolutionOptimizer = NonObjectOptimizer( + method="LLAMADualStrategyQuantumEvolutionOptimizer" + ).set_name("LLAMADualStrategyQuantumEvolutionOptimizer", register=True) +except Exception as e: + print("DualStrategyQuantumEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveClimbingStrategy import DynamicAdaptiveClimbingStrategy + + lama_register["DynamicAdaptiveClimbingStrategy"] = DynamicAdaptiveClimbingStrategy + LLAMADynamicAdaptiveClimbingStrategy = NonObjectOptimizer( + method="LLAMADynamicAdaptiveClimbingStrategy" + ).set_name("LLAMADynamicAdaptiveClimbingStrategy", register=True) +except Exception as e: + print("DynamicAdaptiveClimbingStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveCohortOptimization import ( + DynamicAdaptiveCohortOptimization, + ) + + lama_register["DynamicAdaptiveCohortOptimization"] = DynamicAdaptiveCohortOptimization + LLAMADynamicAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveCohortOptimization" + ).set_name("LLAMADynamicAdaptiveCohortOptimization", register=True) +except Exception as e: + print("DynamicAdaptiveCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveEliteHybridOptimizer import ( + DynamicAdaptiveEliteHybridOptimizer, + ) + + lama_register["DynamicAdaptiveEliteHybridOptimizer"] = DynamicAdaptiveEliteHybridOptimizer + LLAMADynamicAdaptiveEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveEliteHybridOptimizer" + ).set_name("LLAMADynamicAdaptiveEliteHybridOptimizer", register=True) +except Exception as e: + print("DynamicAdaptiveEliteHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveEnhancedDifferentialEvolution import ( + DynamicAdaptiveEnhancedDifferentialEvolution, + ) + + lama_register["DynamicAdaptiveEnhancedDifferentialEvolution"] = ( + DynamicAdaptiveEnhancedDifferentialEvolution + ) + LLAMADynamicAdaptiveEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("DynamicAdaptiveEnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimization import ( + DynamicAdaptiveExplorationOptimization, + ) + + lama_register["DynamicAdaptiveExplorationOptimization"] = DynamicAdaptiveExplorationOptimization + LLAMADynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveExplorationOptimization" + ).set_name("LLAMADynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("DynamicAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimizer import ( + DynamicAdaptiveExplorationOptimizer, + ) + + lama_register["DynamicAdaptiveExplorationOptimizer"] = DynamicAdaptiveExplorationOptimizer + LLAMADynamicAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveExplorationOptimizer" + ).set_name("LLAMADynamicAdaptiveExplorationOptimizer", register=True) +except Exception as e: + print("DynamicAdaptiveExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveFireworkAlgorithm import DynamicAdaptiveFireworkAlgorithm + + lama_register["DynamicAdaptiveFireworkAlgorithm"] = DynamicAdaptiveFireworkAlgorithm + LLAMADynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicAdaptiveFireworkAlgorithm" + ).set_name("LLAMADynamicAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("DynamicAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveGradientDifferentialEvolution import ( + DynamicAdaptiveGradientDifferentialEvolution, + ) + + lama_register["DynamicAdaptiveGradientDifferentialEvolution"] = ( + DynamicAdaptiveGradientDifferentialEvolution + ) + LLAMADynamicAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: + print("DynamicAdaptiveGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligence import ( + DynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["DynamicAdaptiveGravitationalSwarmIntelligence"] = ( + DynamicAdaptiveGravitationalSwarmIntelligence + ) + LLAMADynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("DynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( + DynamicAdaptiveGravitationalSwarmIntelligenceV2, + ) + + lama_register["DynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( + DynamicAdaptiveGravitationalSwarmIntelligenceV2 + ) + LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: + print("DynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveHybridAlgorithm import DynamicAdaptiveHybridAlgorithm + + lama_register["DynamicAdaptiveHybridAlgorithm"] = DynamicAdaptiveHybridAlgorithm + LLAMADynamicAdaptiveHybridAlgorithm = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridAlgorithm" + ).set_name("LLAMADynamicAdaptiveHybridAlgorithm", register=True) +except Exception as e: + print("DynamicAdaptiveHybridAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveHybridDE import DynamicAdaptiveHybridDE + + lama_register["DynamicAdaptiveHybridDE"] = DynamicAdaptiveHybridDE + LLAMADynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE").set_name( + "LLAMADynamicAdaptiveHybridDE", register=True + ) +except Exception as e: + print("DynamicAdaptiveHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveHybridDEPSOWithEliteMemory import ( + DynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["DynamicAdaptiveHybridDEPSOWithEliteMemory"] = DynamicAdaptiveHybridDEPSOWithEliteMemory + LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("DynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimization import ( + DynamicAdaptiveHybridOptimization, + ) + + lama_register["DynamicAdaptiveHybridOptimization"] = DynamicAdaptiveHybridOptimization + LLAMADynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridOptimization" + ).set_name("LLAMADynamicAdaptiveHybridOptimization", register=True) +except Exception as e: + print("DynamicAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimizer import DynamicAdaptiveHybridOptimizer + + lama_register["DynamicAdaptiveHybridOptimizer"] = DynamicAdaptiveHybridOptimizer + LLAMADynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridOptimizer" + ).set_name("LLAMADynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("DynamicAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch import ( + DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch, + ) + + lama_register["DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch"] = ( + DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch + ) + LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch = NonObjectOptimizer( + method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch" + ).set_name("LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch", register=True) +except Exception as e: + print("DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveMemeticOptimizer import DynamicAdaptiveMemeticOptimizer + + lama_register["DynamicAdaptiveMemeticOptimizer"] = DynamicAdaptiveMemeticOptimizer + LLAMADynamicAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveMemeticOptimizer" + ).set_name("LLAMADynamicAdaptiveMemeticOptimizer", register=True) +except Exception as e: + print("DynamicAdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptivePopulationDifferentialEvolution import ( + DynamicAdaptivePopulationDifferentialEvolution, + ) + + lama_register["DynamicAdaptivePopulationDifferentialEvolution"] = ( + DynamicAdaptivePopulationDifferentialEvolution + ) + LLAMADynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptivePopulationDifferentialEvolution" + ).set_name("LLAMADynamicAdaptivePopulationDifferentialEvolution", register=True) +except Exception as e: + print("DynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveQuantumDifferentialEvolution import ( + DynamicAdaptiveQuantumDifferentialEvolution, + ) + + lama_register["DynamicAdaptiveQuantumDifferentialEvolution"] = DynamicAdaptiveQuantumDifferentialEvolution + LLAMADynamicAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: + print("DynamicAdaptiveQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveQuantumLevyOptimizer import ( + DynamicAdaptiveQuantumLevyOptimizer, + ) + + lama_register["DynamicAdaptiveQuantumLevyOptimizer"] = DynamicAdaptiveQuantumLevyOptimizer + LLAMADynamicAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMADynamicAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: + print("DynamicAdaptiveQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveQuantumPSO import DynamicAdaptiveQuantumPSO + + lama_register["DynamicAdaptiveQuantumPSO"] = DynamicAdaptiveQuantumPSO + LLAMADynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO").set_name( + "LLAMADynamicAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("DynamicAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveQuasiRandomDEGradientAnnealing import ( + DynamicAdaptiveQuasiRandomDEGradientAnnealing, + ) + + lama_register["DynamicAdaptiveQuasiRandomDEGradientAnnealing"] = ( + DynamicAdaptiveQuasiRandomDEGradientAnnealing + ) + LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing" + ).set_name("LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: + print("DynamicAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicAdaptiveSwarmOptimization import DynamicAdaptiveSwarmOptimization + + lama_register["DynamicAdaptiveSwarmOptimization"] = DynamicAdaptiveSwarmOptimization + LLAMADynamicAdaptiveSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveSwarmOptimization" + ).set_name("LLAMADynamicAdaptiveSwarmOptimization", register=True) +except Exception as e: + print("DynamicAdaptiveSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicBalancingPSO import DynamicBalancingPSO + + lama_register["DynamicBalancingPSO"] = DynamicBalancingPSO + LLAMADynamicBalancingPSO = NonObjectOptimizer(method="LLAMADynamicBalancingPSO").set_name( + "LLAMADynamicBalancingPSO", register=True + ) +except Exception as e: + print("DynamicBalancingPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicClusterHybridOptimization import DynamicClusterHybridOptimization + + lama_register["DynamicClusterHybridOptimization"] = DynamicClusterHybridOptimization + LLAMADynamicClusterHybridOptimization = NonObjectOptimizer( + method="LLAMADynamicClusterHybridOptimization" + ).set_name("LLAMADynamicClusterHybridOptimization", register=True) +except Exception as e: + print("DynamicClusterHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicCohortAdaptiveEvolution import DynamicCohortAdaptiveEvolution + + lama_register["DynamicCohortAdaptiveEvolution"] = DynamicCohortAdaptiveEvolution + LLAMADynamicCohortAdaptiveEvolution = NonObjectOptimizer( + method="LLAMADynamicCohortAdaptiveEvolution" + ).set_name("LLAMADynamicCohortAdaptiveEvolution", register=True) +except Exception as e: + print("DynamicCohortAdaptiveEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicCohortMemeticAlgorithm import DynamicCohortMemeticAlgorithm + + lama_register["DynamicCohortMemeticAlgorithm"] = DynamicCohortMemeticAlgorithm + LLAMADynamicCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADynamicCohortMemeticAlgorithm" + ).set_name("LLAMADynamicCohortMemeticAlgorithm", register=True) +except Exception as e: + print("DynamicCohortMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicCohortOptimization import DynamicCohortOptimization + + lama_register["DynamicCohortOptimization"] = DynamicCohortOptimization + LLAMADynamicCohortOptimization = NonObjectOptimizer(method="LLAMADynamicCohortOptimization").set_name( + "LLAMADynamicCohortOptimization", register=True + ) +except Exception as e: + print("DynamicCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicCrowdedDE import DynamicCrowdedDE + + lama_register["DynamicCrowdedDE"] = DynamicCrowdedDE + LLAMADynamicCrowdedDE = NonObjectOptimizer(method="LLAMADynamicCrowdedDE").set_name( + "LLAMADynamicCrowdedDE", register=True + ) +except Exception as e: + print("DynamicCrowdedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicCulturalDifferentialEvolution import ( + DynamicCulturalDifferentialEvolution, + ) + + lama_register["DynamicCulturalDifferentialEvolution"] = DynamicCulturalDifferentialEvolution + LLAMADynamicCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicCulturalDifferentialEvolution" + ).set_name("LLAMADynamicCulturalDifferentialEvolution", register=True) +except Exception as e: + print("DynamicCulturalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEliteAdaptiveHybridOptimizerV2 import ( + DynamicEliteAdaptiveHybridOptimizerV2, + ) + + lama_register["DynamicEliteAdaptiveHybridOptimizerV2"] = DynamicEliteAdaptiveHybridOptimizerV2 + LLAMADynamicEliteAdaptiveHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMADynamicEliteAdaptiveHybridOptimizerV2" + ).set_name("LLAMADynamicEliteAdaptiveHybridOptimizerV2", register=True) +except Exception as e: + print("DynamicEliteAdaptiveHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEliteAnnealingDE import DynamicEliteAnnealingDE + + lama_register["DynamicEliteAnnealingDE"] = DynamicEliteAnnealingDE + LLAMADynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE").set_name( + "LLAMADynamicEliteAnnealingDE", register=True + ) +except Exception as e: + print("DynamicEliteAnnealingDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEliteCovarianceMemeticSearch import ( + DynamicEliteCovarianceMemeticSearch, + ) + + lama_register["DynamicEliteCovarianceMemeticSearch"] = DynamicEliteCovarianceMemeticSearch + LLAMADynamicEliteCovarianceMemeticSearch = NonObjectOptimizer( + method="LLAMADynamicEliteCovarianceMemeticSearch" + ).set_name("LLAMADynamicEliteCovarianceMemeticSearch", register=True) +except Exception as e: + print("DynamicEliteCovarianceMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEliteEnhancedDifferentialEvolution import ( + DynamicEliteEnhancedDifferentialEvolution, + ) + + lama_register["DynamicEliteEnhancedDifferentialEvolution"] = DynamicEliteEnhancedDifferentialEvolution + LLAMADynamicEliteEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicEliteEnhancedDifferentialEvolution" + ).set_name("LLAMADynamicEliteEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("DynamicEliteEnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicElitistHybridOptimizer import DynamicElitistHybridOptimizer + + lama_register["DynamicElitistHybridOptimizer"] = DynamicElitistHybridOptimizer + LLAMADynamicElitistHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicElitistHybridOptimizer" + ).set_name("LLAMADynamicElitistHybridOptimizer", register=True) +except Exception as e: + print("DynamicElitistHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEnhancedDifferentialFireworkAlgorithm import ( + DynamicEnhancedDifferentialFireworkAlgorithm, + ) + + lama_register["DynamicEnhancedDifferentialFireworkAlgorithm"] = ( + DynamicEnhancedDifferentialFireworkAlgorithm + ) + LLAMADynamicEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm" + ).set_name("LLAMADynamicEnhancedDifferentialFireworkAlgorithm", register=True) +except Exception as e: + print("DynamicEnhancedDifferentialFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicEnhancedHybridOptimizer import DynamicEnhancedHybridOptimizer + + lama_register["DynamicEnhancedHybridOptimizer"] = DynamicEnhancedHybridOptimizer + LLAMADynamicEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicEnhancedHybridOptimizer" + ).set_name("LLAMADynamicEnhancedHybridOptimizer", register=True) +except Exception as e: + print("DynamicEnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicExplorationExploitationAlgorithm import ( + DynamicExplorationExploitationAlgorithm, + ) + + lama_register["DynamicExplorationExploitationAlgorithm"] = DynamicExplorationExploitationAlgorithm + LLAMADynamicExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationAlgorithm" + ).set_name("LLAMADynamicExplorationExploitationAlgorithm", register=True) +except Exception as e: + print("DynamicExplorationExploitationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicExplorationExploitationDE import DynamicExplorationExploitationDE + + lama_register["DynamicExplorationExploitationDE"] = DynamicExplorationExploitationDE + LLAMADynamicExplorationExploitationDE = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationDE" + ).set_name("LLAMADynamicExplorationExploitationDE", register=True) +except Exception as e: + print("DynamicExplorationExploitationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicExplorationExploitationMemeticAlgorithm import ( + DynamicExplorationExploitationMemeticAlgorithm, + ) + + lama_register["DynamicExplorationExploitationMemeticAlgorithm"] = ( + DynamicExplorationExploitationMemeticAlgorithm + ) + LLAMADynamicExplorationExploitationMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationMemeticAlgorithm" + ).set_name("LLAMADynamicExplorationExploitationMemeticAlgorithm", register=True) +except Exception as e: + print("DynamicExplorationExploitationMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicExplorationOptimization import DynamicExplorationOptimization + + lama_register["DynamicExplorationOptimization"] = DynamicExplorationOptimization + LLAMADynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMADynamicExplorationOptimization" + ).set_name("LLAMADynamicExplorationOptimization", register=True) +except Exception as e: + print("DynamicExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicFireworkAlgorithm import DynamicFireworkAlgorithm + + lama_register["DynamicFireworkAlgorithm"] = DynamicFireworkAlgorithm + LLAMADynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm").set_name( + "LLAMADynamicFireworkAlgorithm", register=True + ) +except Exception as e: + print("DynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicFireworksSwarmOptimization import ( + DynamicFireworksSwarmOptimization, + ) + + lama_register["DynamicFireworksSwarmOptimization"] = DynamicFireworksSwarmOptimization + LLAMADynamicFireworksSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicFireworksSwarmOptimization" + ).set_name("LLAMADynamicFireworksSwarmOptimization", register=True) +except Exception as e: + print("DynamicFireworksSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicFractionalClusterOptimization import ( + DynamicFractionalClusterOptimization, + ) + + lama_register["DynamicFractionalClusterOptimization"] = DynamicFractionalClusterOptimization + LLAMADynamicFractionalClusterOptimization = NonObjectOptimizer( + method="LLAMADynamicFractionalClusterOptimization" + ).set_name("LLAMADynamicFractionalClusterOptimization", register=True) +except Exception as e: + print("DynamicFractionalClusterOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealing import ( + DynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["DynamicGradientBoostedMemorySimulatedAnnealing"] = ( + DynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMADynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("DynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealingV2 import ( + DynamicGradientBoostedMemorySimulatedAnnealingV2, + ) + + lama_register["DynamicGradientBoostedMemorySimulatedAnnealingV2"] = ( + DynamicGradientBoostedMemorySimulatedAnnealingV2 + ) + LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2" + ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2", register=True) +except Exception as e: + print("DynamicGradientBoostedMemorySimulatedAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicGradientBoostedRefinementAnnealing import ( + DynamicGradientBoostedRefinementAnnealing, + ) + + lama_register["DynamicGradientBoostedRefinementAnnealing"] = DynamicGradientBoostedRefinementAnnealing + LLAMADynamicGradientBoostedRefinementAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedRefinementAnnealing" + ).set_name("LLAMADynamicGradientBoostedRefinementAnnealing", register=True) +except Exception as e: + print("DynamicGradientBoostedRefinementAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicGradientEnhancedAnnealing import DynamicGradientEnhancedAnnealing + + lama_register["DynamicGradientEnhancedAnnealing"] = DynamicGradientEnhancedAnnealing + LLAMADynamicGradientEnhancedAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientEnhancedAnnealing" + ).set_name("LLAMADynamicGradientEnhancedAnnealing", register=True) +except Exception as e: + print("DynamicGradientEnhancedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicHybridAnnealing import DynamicHybridAnnealing + + lama_register["DynamicHybridAnnealing"] = DynamicHybridAnnealing + LLAMADynamicHybridAnnealing = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing").set_name( + "LLAMADynamicHybridAnnealing", register=True + ) +except Exception as e: + print("DynamicHybridAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicHybridOptimizer import DynamicHybridOptimizer + + lama_register["DynamicHybridOptimizer"] = DynamicHybridOptimizer + LLAMADynamicHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer").set_name( + "LLAMADynamicHybridOptimizer", register=True + ) +except Exception as e: + print("DynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicHybridQuantumDifferentialEvolution import ( + DynamicHybridQuantumDifferentialEvolution, + ) + + lama_register["DynamicHybridQuantumDifferentialEvolution"] = DynamicHybridQuantumDifferentialEvolution + LLAMADynamicHybridQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicHybridQuantumDifferentialEvolution" + ).set_name("LLAMADynamicHybridQuantumDifferentialEvolution", register=True) +except Exception as e: + print("DynamicHybridQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicHybridSelfAdaptiveDE import DynamicHybridSelfAdaptiveDE + + lama_register["DynamicHybridSelfAdaptiveDE"] = DynamicHybridSelfAdaptiveDE + LLAMADynamicHybridSelfAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE").set_name( + "LLAMADynamicHybridSelfAdaptiveDE", register=True + ) +except Exception as e: + print("DynamicHybridSelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicLevyHarmonySearch import DynamicLevyHarmonySearch + + lama_register["DynamicLevyHarmonySearch"] = DynamicLevyHarmonySearch + LLAMADynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch").set_name( + "LLAMADynamicLevyHarmonySearch", register=True + ) +except Exception as e: + print("DynamicLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicLocalSearchFireworkAlgorithm import ( + DynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["DynamicLocalSearchFireworkAlgorithm"] = DynamicLocalSearchFireworkAlgorithm + LLAMADynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMADynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: + print("DynamicLocalSearchFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMemeticDifferentialEvolutionWithAdaptiveElitism import ( + DynamicMemeticDifferentialEvolutionWithAdaptiveElitism, + ) + + lama_register["DynamicMemeticDifferentialEvolutionWithAdaptiveElitism"] = ( + DynamicMemeticDifferentialEvolutionWithAdaptiveElitism + ) + LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism = NonObjectOptimizer( + method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism" + ).set_name("LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism", register=True) +except Exception as e: + print("DynamicMemeticDifferentialEvolutionWithAdaptiveElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMemoryAdaptiveConvergenceStrategyV76 import ( + DynamicMemoryAdaptiveConvergenceStrategyV76, + ) + + lama_register["DynamicMemoryAdaptiveConvergenceStrategyV76"] = DynamicMemoryAdaptiveConvergenceStrategyV76 + LLAMADynamicMemoryAdaptiveConvergenceStrategyV76 = NonObjectOptimizer( + method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76" + ).set_name("LLAMADynamicMemoryAdaptiveConvergenceStrategyV76", register=True) +except Exception as e: + print("DynamicMemoryAdaptiveConvergenceStrategyV76 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMemoryEnhancedDualPhaseStrategyV66 import ( + DynamicMemoryEnhancedDualPhaseStrategyV66, + ) + + lama_register["DynamicMemoryEnhancedDualPhaseStrategyV66"] = DynamicMemoryEnhancedDualPhaseStrategyV66 + LLAMADynamicMemoryEnhancedDualPhaseStrategyV66 = NonObjectOptimizer( + method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66" + ).set_name("LLAMADynamicMemoryEnhancedDualPhaseStrategyV66", register=True) +except Exception as e: + print("DynamicMemoryEnhancedDualPhaseStrategyV66 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMemoryHybridSearch import DynamicMemoryHybridSearch + + lama_register["DynamicMemoryHybridSearch"] = DynamicMemoryHybridSearch + LLAMADynamicMemoryHybridSearch = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch").set_name( + "LLAMADynamicMemoryHybridSearch", register=True + ) +except Exception as e: + print("DynamicMemoryHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMultiPhaseAnnealingPlus import DynamicMultiPhaseAnnealingPlus + + lama_register["DynamicMultiPhaseAnnealingPlus"] = DynamicMultiPhaseAnnealingPlus + LLAMADynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( + method="LLAMADynamicMultiPhaseAnnealingPlus" + ).set_name("LLAMADynamicMultiPhaseAnnealingPlus", register=True) +except Exception as e: + print("DynamicMultiPhaseAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicMultiStrategyOptimizer import DynamicMultiStrategyOptimizer + + lama_register["DynamicMultiStrategyOptimizer"] = DynamicMultiStrategyOptimizer + LLAMADynamicMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMADynamicMultiStrategyOptimizer" + ).set_name("LLAMADynamicMultiStrategyOptimizer", register=True) +except Exception as e: + print("DynamicMultiStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicNichePSO_DE_LS import DynamicNichePSO_DE_LS + + lama_register["DynamicNichePSO_DE_LS"] = DynamicNichePSO_DE_LS + LLAMADynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS").set_name( + "LLAMADynamicNichePSO_DE_LS", register=True + ) +except Exception as e: + print("DynamicNichePSO_DE_LS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicNichingDEPSOWithRestart import DynamicNichingDEPSOWithRestart + + lama_register["DynamicNichingDEPSOWithRestart"] = DynamicNichingDEPSOWithRestart + LLAMADynamicNichingDEPSOWithRestart = NonObjectOptimizer( + method="LLAMADynamicNichingDEPSOWithRestart" + ).set_name("LLAMADynamicNichingDEPSOWithRestart", register=True) +except Exception as e: + print("DynamicNichingDEPSOWithRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPopulationAdaptiveGradientEvolution import ( + DynamicPopulationAdaptiveGradientEvolution, + ) + + lama_register["DynamicPopulationAdaptiveGradientEvolution"] = DynamicPopulationAdaptiveGradientEvolution + LLAMADynamicPopulationAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADynamicPopulationAdaptiveGradientEvolution" + ).set_name("LLAMADynamicPopulationAdaptiveGradientEvolution", register=True) +except Exception as e: + print("DynamicPopulationAdaptiveGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPopulationMemeticDifferentialEvolution import ( + DynamicPopulationMemeticDifferentialEvolution, + ) + + lama_register["DynamicPopulationMemeticDifferentialEvolution"] = ( + DynamicPopulationMemeticDifferentialEvolution + ) + LLAMADynamicPopulationMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicPopulationMemeticDifferentialEvolution" + ).set_name("LLAMADynamicPopulationMemeticDifferentialEvolution", register=True) +except Exception as e: + print("DynamicPopulationMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPrecisionBalancedEvolution import ( + DynamicPrecisionBalancedEvolution, + ) + + lama_register["DynamicPrecisionBalancedEvolution"] = DynamicPrecisionBalancedEvolution + LLAMADynamicPrecisionBalancedEvolution = NonObjectOptimizer( + method="LLAMADynamicPrecisionBalancedEvolution" + ).set_name("LLAMADynamicPrecisionBalancedEvolution", register=True) +except Exception as e: + print("DynamicPrecisionBalancedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPrecisionCosineDifferentialSwarm import ( + DynamicPrecisionCosineDifferentialSwarm, + ) + + lama_register["DynamicPrecisionCosineDifferentialSwarm"] = DynamicPrecisionCosineDifferentialSwarm + LLAMADynamicPrecisionCosineDifferentialSwarm = NonObjectOptimizer( + method="LLAMADynamicPrecisionCosineDifferentialSwarm" + ).set_name("LLAMADynamicPrecisionCosineDifferentialSwarm", register=True) +except Exception as e: + print("DynamicPrecisionCosineDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPrecisionExplorationOptimizer import ( + DynamicPrecisionExplorationOptimizer, + ) + + lama_register["DynamicPrecisionExplorationOptimizer"] = DynamicPrecisionExplorationOptimizer + LLAMADynamicPrecisionExplorationOptimizer = NonObjectOptimizer( + method="LLAMADynamicPrecisionExplorationOptimizer" + ).set_name("LLAMADynamicPrecisionExplorationOptimizer", register=True) +except Exception as e: + print("DynamicPrecisionExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicPrecisionOptimizer import DynamicPrecisionOptimizer + + lama_register["DynamicPrecisionOptimizer"] = DynamicPrecisionOptimizer + LLAMADynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer").set_name( + "LLAMADynamicPrecisionOptimizer", register=True + ) +except Exception as e: + print("DynamicPrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumAdaptiveEvolutionStrategy import ( + DynamicQuantumAdaptiveEvolutionStrategy, + ) + + lama_register["DynamicQuantumAdaptiveEvolutionStrategy"] = DynamicQuantumAdaptiveEvolutionStrategy + LLAMADynamicQuantumAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMADynamicQuantumAdaptiveEvolutionStrategy" + ).set_name("LLAMADynamicQuantumAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("DynamicQuantumAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolution import ( + DynamicQuantumDifferentialEvolution, + ) + + lama_register["DynamicQuantumDifferentialEvolution"] = DynamicQuantumDifferentialEvolution + LLAMADynamicQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolution" + ).set_name("LLAMADynamicQuantumDifferentialEvolution", register=True) +except Exception as e: + print("DynamicQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch import ( + DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch, + ) + + lama_register["DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch"] = ( + DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch + ) + LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch" + ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch", register=True) +except Exception as e: + print("DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart import ( + DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart, + ) + + lama_register["DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart"] = ( + DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart + ) + LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart" + ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart", register=True) +except Exception as e: + print("DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumEvolution import DynamicQuantumEvolution + + lama_register["DynamicQuantumEvolution"] = DynamicQuantumEvolution + LLAMADynamicQuantumEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution").set_name( + "LLAMADynamicQuantumEvolution", register=True + ) +except Exception as e: + print("DynamicQuantumEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumGuidedHybridSearchV7 import ( + DynamicQuantumGuidedHybridSearchV7, + ) + + lama_register["DynamicQuantumGuidedHybridSearchV7"] = DynamicQuantumGuidedHybridSearchV7 + LLAMADynamicQuantumGuidedHybridSearchV7 = NonObjectOptimizer( + method="LLAMADynamicQuantumGuidedHybridSearchV7" + ).set_name("LLAMADynamicQuantumGuidedHybridSearchV7", register=True) +except Exception as e: + print("DynamicQuantumGuidedHybridSearchV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialHybridSearch import ( + DynamicQuantumLevyDifferentialHybridSearch, + ) + + lama_register["DynamicQuantumLevyDifferentialHybridSearch"] = DynamicQuantumLevyDifferentialHybridSearch + LLAMADynamicQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( + method="LLAMADynamicQuantumLevyDifferentialHybridSearch" + ).set_name("LLAMADynamicQuantumLevyDifferentialHybridSearch", register=True) +except Exception as e: + print("DynamicQuantumLevyDifferentialHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialSwarmOptimization import ( + DynamicQuantumLevyDifferentialSwarmOptimization, + ) + + lama_register["DynamicQuantumLevyDifferentialSwarmOptimization"] = ( + DynamicQuantumLevyDifferentialSwarmOptimization + ) + LLAMADynamicQuantumLevyDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization" + ).set_name("LLAMADynamicQuantumLevyDifferentialSwarmOptimization", register=True) +except Exception as e: + print("DynamicQuantumLevyDifferentialSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumLevySwarmOptimization import ( + DynamicQuantumLevySwarmOptimization, + ) + + lama_register["DynamicQuantumLevySwarmOptimization"] = DynamicQuantumLevySwarmOptimization + LLAMADynamicQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumLevySwarmOptimization" + ).set_name("LLAMADynamicQuantumLevySwarmOptimization", register=True) +except Exception as e: + print("DynamicQuantumLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumMemeticOptimizer import DynamicQuantumMemeticOptimizer + + lama_register["DynamicQuantumMemeticOptimizer"] = DynamicQuantumMemeticOptimizer + LLAMADynamicQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMADynamicQuantumMemeticOptimizer" + ).set_name("LLAMADynamicQuantumMemeticOptimizer", register=True) +except Exception as e: + print("DynamicQuantumMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumSwarmOptimization import DynamicQuantumSwarmOptimization + + lama_register["DynamicQuantumSwarmOptimization"] = DynamicQuantumSwarmOptimization + LLAMADynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumSwarmOptimization" + ).set_name("LLAMADynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("DynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuantumSwarmOptimizationRefined import ( + DynamicQuantumSwarmOptimizationRefined, + ) + + lama_register["DynamicQuantumSwarmOptimizationRefined"] = DynamicQuantumSwarmOptimizationRefined + LLAMADynamicQuantumSwarmOptimizationRefined = NonObjectOptimizer( + method="LLAMADynamicQuantumSwarmOptimizationRefined" + ).set_name("LLAMADynamicQuantumSwarmOptimizationRefined", register=True) +except Exception as e: + print("DynamicQuantumSwarmOptimizationRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicQuasiRandomAdaptiveDifferentialEvolution import ( + DynamicQuasiRandomAdaptiveDifferentialEvolution, + ) + + lama_register["DynamicQuasiRandomAdaptiveDifferentialEvolution"] = ( + DynamicQuasiRandomAdaptiveDifferentialEvolution + ) + LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution" + ).set_name("LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("DynamicQuasiRandomAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( + DynamicRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["DynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + DynamicRefinedGradientBoostedMemorySimulatedAnnealing + ) + LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("DynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicRefinementGradientBoostedMemoryAnnealing import ( + DynamicRefinementGradientBoostedMemoryAnnealing, + ) + + lama_register["DynamicRefinementGradientBoostedMemoryAnnealing"] = ( + DynamicRefinementGradientBoostedMemoryAnnealing + ) + LLAMADynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing" + ).set_name("LLAMADynamicRefinementGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("DynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicScaleSearch import DynamicScaleSearch + + lama_register["DynamicScaleSearch"] = DynamicScaleSearch + LLAMADynamicScaleSearch = NonObjectOptimizer(method="LLAMADynamicScaleSearch").set_name( + "LLAMADynamicScaleSearch", register=True + ) +except Exception as e: + print("DynamicScaleSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicSelfAdaptiveOptimizer import DynamicSelfAdaptiveOptimizer + + lama_register["DynamicSelfAdaptiveOptimizer"] = DynamicSelfAdaptiveOptimizer + LLAMADynamicSelfAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMADynamicSelfAdaptiveOptimizer" + ).set_name("LLAMADynamicSelfAdaptiveOptimizer", register=True) +except Exception as e: + print("DynamicSelfAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicStrategyAdaptiveDE import DynamicStrategyAdaptiveDE + + lama_register["DynamicStrategyAdaptiveDE"] = DynamicStrategyAdaptiveDE + LLAMADynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE").set_name( + "LLAMADynamicStrategyAdaptiveDE", register=True + ) +except Exception as e: + print("DynamicStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.DynamicallyAdaptiveFireworkAlgorithm import ( + DynamicallyAdaptiveFireworkAlgorithm, + ) + + lama_register["DynamicallyAdaptiveFireworkAlgorithm"] = DynamicallyAdaptiveFireworkAlgorithm + LLAMADynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicallyAdaptiveFireworkAlgorithm" + ).set_name("LLAMADynamicallyAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("DynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EACDE import EACDE + + lama_register["EACDE"] = EACDE + LLAMAEACDE = NonObjectOptimizer(method="LLAMAEACDE").set_name("LLAMAEACDE", register=True) +except Exception as e: + print("EACDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADE import EADE + + lama_register["EADE"] = EADE + LLAMAEADE = NonObjectOptimizer(method="LLAMAEADE").set_name("LLAMAEADE", register=True) +except Exception as e: + print("EADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEA import EADEA + + lama_register["EADEA"] = EADEA + LLAMAEADEA = NonObjectOptimizer(method="LLAMAEADEA").set_name("LLAMAEADEA", register=True) +except Exception as e: + print("EADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEDM import EADEDM + + lama_register["EADEDM"] = EADEDM + LLAMAEADEDM = NonObjectOptimizer(method="LLAMAEADEDM").set_name("LLAMAEADEDM", register=True) +except Exception as e: + print("EADEDM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEDMGM import EADEDMGM + + lama_register["EADEDMGM"] = EADEDMGM + LLAMAEADEDMGM = NonObjectOptimizer(method="LLAMAEADEDMGM").set_name("LLAMAEADEDMGM", register=True) +except Exception as e: + print("EADEDMGM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEPC import EADEPC + + lama_register["EADEPC"] = EADEPC + LLAMAEADEPC = NonObjectOptimizer(method="LLAMAEADEPC").set_name("LLAMAEADEPC", register=True) +except Exception as e: + print("EADEPC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEPM import EADEPM + + lama_register["EADEPM"] = EADEPM + LLAMAEADEPM = NonObjectOptimizer(method="LLAMAEADEPM").set_name("LLAMAEADEPM", register=True) +except Exception as e: + print("EADEPM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEPMC import EADEPMC + + lama_register["EADEPMC"] = EADEPMC + LLAMAEADEPMC = NonObjectOptimizer(method="LLAMAEADEPMC").set_name("LLAMAEADEPMC", register=True) +except Exception as e: + print("EADEPMC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADES import EADES + + lama_register["EADES"] = EADES + LLAMAEADES = NonObjectOptimizer(method="LLAMAEADES").set_name("LLAMAEADES", register=True) +except Exception as e: + print("EADES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADESC import EADESC + + lama_register["EADESC"] = EADESC + LLAMAEADESC = NonObjectOptimizer(method="LLAMAEADESC").set_name("LLAMAEADESC", register=True) +except Exception as e: + print("EADESC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADEWM import EADEWM + + lama_register["EADEWM"] = EADEWM + LLAMAEADEWM = NonObjectOptimizer(method="LLAMAEADEWM").set_name("LLAMAEADEWM", register=True) +except Exception as e: + print("EADEWM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADE_FIDM import EADE_FIDM + + lama_register["EADE_FIDM"] = EADE_FIDM + LLAMAEADE_FIDM = NonObjectOptimizer(method="LLAMAEADE_FIDM").set_name("LLAMAEADE_FIDM", register=True) +except Exception as e: + print("EADE_FIDM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADGM import EADGM + + lama_register["EADGM"] = EADGM + LLAMAEADGM = NonObjectOptimizer(method="LLAMAEADGM").set_name("LLAMAEADGM", register=True) +except Exception as e: + print("EADGM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADMMMS import EADMMMS + + lama_register["EADMMMS"] = EADMMMS + LLAMAEADMMMS = NonObjectOptimizer(method="LLAMAEADMMMS").set_name("LLAMAEADMMMS", register=True) +except Exception as e: + print("EADMMMS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADSEA import EADSEA + + lama_register["EADSEA"] = EADSEA + LLAMAEADSEA = NonObjectOptimizer(method="LLAMAEADSEA").set_name("LLAMAEADSEA", register=True) +except Exception as e: + print("EADSEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EADSM import EADSM + + lama_register["EADSM"] = EADSM + LLAMAEADSM = NonObjectOptimizer(method="LLAMAEADSM").set_name("LLAMAEADSM", register=True) +except Exception as e: + print("EADSM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAMDE import EAMDE + + lama_register["EAMDE"] = EAMDE + LLAMAEAMDE = NonObjectOptimizer(method="LLAMAEAMDE").set_name("LLAMAEAMDE", register=True) +except Exception as e: + print("EAMDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAMES import EAMES + + lama_register["EAMES"] = EAMES + LLAMAEAMES = NonObjectOptimizer(method="LLAMAEAMES").set_name("LLAMAEAMES", register=True) +except Exception as e: + print("EAMES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAMSDiffEvo import EAMSDiffEvo + + lama_register["EAMSDiffEvo"] = EAMSDiffEvo + LLAMAEAMSDiffEvo = NonObjectOptimizer(method="LLAMAEAMSDiffEvo").set_name( + "LLAMAEAMSDiffEvo", register=True + ) +except Exception as e: + print("EAMSDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAMSEA import EAMSEA + + lama_register["EAMSEA"] = EAMSEA + LLAMAEAMSEA = NonObjectOptimizer(method="LLAMAEAMSEA").set_name("LLAMAEAMSEA", register=True) +except Exception as e: + print("EAMSEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAPBES import EAPBES + + lama_register["EAPBES"] = EAPBES + LLAMAEAPBES = NonObjectOptimizer(method="LLAMAEAPBES").set_name("LLAMAEAPBES", register=True) +except Exception as e: + print("EAPBES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EAPDELS import EAPDELS + + lama_register["EAPDELS"] = EAPDELS + LLAMAEAPDELS = NonObjectOptimizer(method="LLAMAEAPDELS").set_name("LLAMAEAPDELS", register=True) +except Exception as e: + print("EAPDELS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EARESDM import EARESDM + + lama_register["EARESDM"] = EARESDM + LLAMAEARESDM = NonObjectOptimizer(method="LLAMAEARESDM").set_name("LLAMAEARESDM", register=True) +except Exception as e: + print("EARESDM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EASO import EASO + + lama_register["EASO"] = EASO + LLAMAEASO = NonObjectOptimizer(method="LLAMAEASO").set_name("LLAMAEASO", register=True) +except Exception as e: + print("EASO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDAEA import EDAEA + + lama_register["EDAEA"] = EDAEA + LLAMAEDAEA = NonObjectOptimizer(method="LLAMAEDAEA").set_name("LLAMAEDAEA", register=True) +except Exception as e: + print("EDAEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDAG import EDAG + + lama_register["EDAG"] = EDAG + LLAMAEDAG = NonObjectOptimizer(method="LLAMAEDAG").set_name("LLAMAEDAG", register=True) +except Exception as e: + print("EDAG can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDASOGG import EDASOGG + + lama_register["EDASOGG"] = EDASOGG + LLAMAEDASOGG = NonObjectOptimizer(method="LLAMAEDASOGG").set_name("LLAMAEDASOGG", register=True) +except Exception as e: + print("EDASOGG can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDDCEA import EDDCEA + + lama_register["EDDCEA"] = EDDCEA + LLAMAEDDCEA = NonObjectOptimizer(method="LLAMAEDDCEA").set_name("LLAMAEDDCEA", register=True) +except Exception as e: + print("EDDCEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDEAS import EDEAS + + lama_register["EDEAS"] = EDEAS + LLAMAEDEAS = NonObjectOptimizer(method="LLAMAEDEAS").set_name("LLAMAEDEAS", register=True) +except Exception as e: + print("EDEAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDEPM import EDEPM + + lama_register["EDEPM"] = EDEPM + LLAMAEDEPM = NonObjectOptimizer(method="LLAMAEDEPM").set_name("LLAMAEDEPM", register=True) +except Exception as e: + print("EDEPM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDGB import EDGB + + lama_register["EDGB"] = EDGB + LLAMAEDGB = NonObjectOptimizer(method="LLAMAEDGB").set_name("LLAMAEDGB", register=True) +except Exception as e: + print("EDGB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDMDESM import EDMDESM + + lama_register["EDMDESM"] = EDMDESM + LLAMAEDMDESM = NonObjectOptimizer(method="LLAMAEDMDESM").set_name("LLAMAEDMDESM", register=True) +except Exception as e: + print("EDMDESM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDMRL import EDMRL + + lama_register["EDMRL"] = EDMRL + LLAMAEDMRL = NonObjectOptimizer(method="LLAMAEDMRL").set_name("LLAMAEDMRL", register=True) +except Exception as e: + print("EDMRL can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDMS import EDMS + + lama_register["EDMS"] = EDMS + LLAMAEDMS = NonObjectOptimizer(method="LLAMAEDMS").set_name("LLAMAEDMS", register=True) +except Exception as e: + print("EDMS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDNAS import EDNAS + + lama_register["EDNAS"] = EDNAS + LLAMAEDNAS = NonObjectOptimizer(method="LLAMAEDNAS").set_name("LLAMAEDNAS", register=True) +except Exception as e: + print("EDNAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDNAS_SAMRA import EDNAS_SAMRA + + lama_register["EDNAS_SAMRA"] = EDNAS_SAMRA + LLAMAEDNAS_SAMRA = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA").set_name( + "LLAMAEDNAS_SAMRA", register=True + ) +except Exception as e: + print("EDNAS_SAMRA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EDSDiffEvoM import EDSDiffEvoM + + lama_register["EDSDiffEvoM"] = EDSDiffEvoM + LLAMAEDSDiffEvoM = NonObjectOptimizer(method="LLAMAEDSDiffEvoM").set_name( + "LLAMAEDSDiffEvoM", register=True + ) +except Exception as e: + print("EDSDiffEvoM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EGBDE import EGBDE + + lama_register["EGBDE"] = EGBDE + LLAMAEGBDE = NonObjectOptimizer(method="LLAMAEGBDE").set_name("LLAMAEGBDE", register=True) +except Exception as e: + print("EGBDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EGGEO import EGGEO + + lama_register["EGGEO"] = EGGEO + LLAMAEGGEO = NonObjectOptimizer(method="LLAMAEGGEO").set_name("LLAMAEGGEO", register=True) +except Exception as e: + print("EGGEO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EHADEEM import EHADEEM + + lama_register["EHADEEM"] = EHADEEM + LLAMAEHADEEM = NonObjectOptimizer(method="LLAMAEHADEEM").set_name("LLAMAEHADEEM", register=True) +except Exception as e: + print("EHADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EHADEMI import EHADEMI + + lama_register["EHADEMI"] = EHADEMI + LLAMAEHADEMI = NonObjectOptimizer(method="LLAMAEHADEMI").set_name("LLAMAEHADEMI", register=True) +except Exception as e: + print("EHADEMI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EHDAM import EHDAM + + lama_register["EHDAM"] = EHDAM + LLAMAEHDAM = NonObjectOptimizer(method="LLAMAEHDAM").set_name("LLAMAEHDAM", register=True) +except Exception as e: + print("EHDAM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EHDE import EHDE + + lama_register["EHDE"] = EHDE + LLAMAEHDE = NonObjectOptimizer(method="LLAMAEHDE").set_name("LLAMAEHDE", register=True) +except Exception as e: + print("EHDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EIADEA import EIADEA + + lama_register["EIADEA"] = EIADEA + LLAMAEIADEA = NonObjectOptimizer(method="LLAMAEIADEA").set_name("LLAMAEIADEA", register=True) +except Exception as e: + print("EIADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EMIDE import EMIDE + + lama_register["EMIDE"] = EMIDE + LLAMAEMIDE = NonObjectOptimizer(method="LLAMAEMIDE").set_name("LLAMAEMIDE", register=True) +except Exception as e: + print("EMIDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EMSADE import EMSADE + + lama_register["EMSADE"] = EMSADE + LLAMAEMSADE = NonObjectOptimizer(method="LLAMAEMSADE").set_name("LLAMAEMSADE", register=True) +except Exception as e: + print("EMSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EMSEAS import EMSEAS + + lama_register["EMSEAS"] = EMSEAS + LLAMAEMSEAS = NonObjectOptimizer(method="LLAMAEMSEAS").set_name("LLAMAEMSEAS", register=True) +except Exception as e: + print("EMSEAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EORAMED import EORAMED + + lama_register["EORAMED"] = EORAMED + LLAMAEORAMED = NonObjectOptimizer(method="LLAMAEORAMED").set_name("LLAMAEORAMED", register=True) +except Exception as e: + print("EORAMED can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EPADE import EPADE + + lama_register["EPADE"] = EPADE + LLAMAEPADE = NonObjectOptimizer(method="LLAMAEPADE").set_name("LLAMAEPADE", register=True) +except Exception as e: + print("EPADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EPDE import EPDE + + lama_register["EPDE"] = EPDE + LLAMAEPDE = NonObjectOptimizer(method="LLAMAEPDE").set_name("LLAMAEPDE", register=True) +except Exception as e: + print("EPDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EPWDEM import EPWDEM + + lama_register["EPWDEM"] = EPWDEM + LLAMAEPWDEM = NonObjectOptimizer(method="LLAMAEPWDEM").set_name("LLAMAEPWDEM", register=True) +except Exception as e: + print("EPWDEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADE import ERADE + + lama_register["ERADE"] = ERADE + LLAMAERADE = NonObjectOptimizer(method="LLAMAERADE").set_name("LLAMAERADE", register=True) +except Exception as e: + print("ERADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS import ERADS + + lama_register["ERADS"] = ERADS + LLAMAERADS = NonObjectOptimizer(method="LLAMAERADS").set_name("LLAMAERADS", register=True) +except Exception as e: + print("ERADS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptiveDynamic import ERADS_AdaptiveDynamic + + lama_register["ERADS_AdaptiveDynamic"] = ERADS_AdaptiveDynamic + LLAMAERADS_AdaptiveDynamic = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic").set_name( + "LLAMAERADS_AdaptiveDynamic", register=True + ) +except Exception as e: + print("ERADS_AdaptiveDynamic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptiveDynamicPlus import ERADS_AdaptiveDynamicPlus + + lama_register["ERADS_AdaptiveDynamicPlus"] = ERADS_AdaptiveDynamicPlus + LLAMAERADS_AdaptiveDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus").set_name( + "LLAMAERADS_AdaptiveDynamicPlus", register=True + ) +except Exception as e: + print("ERADS_AdaptiveDynamicPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptiveHybrid import ERADS_AdaptiveHybrid + + lama_register["ERADS_AdaptiveHybrid"] = ERADS_AdaptiveHybrid + LLAMAERADS_AdaptiveHybrid = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid").set_name( + "LLAMAERADS_AdaptiveHybrid", register=True + ) +except Exception as e: + print("ERADS_AdaptiveHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptivePlus import ERADS_AdaptivePlus + + lama_register["ERADS_AdaptivePlus"] = ERADS_AdaptivePlus + LLAMAERADS_AdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus").set_name( + "LLAMAERADS_AdaptivePlus", register=True + ) +except Exception as e: + print("ERADS_AdaptivePlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptiveProgressive import ERADS_AdaptiveProgressive + + lama_register["ERADS_AdaptiveProgressive"] = ERADS_AdaptiveProgressive + LLAMAERADS_AdaptiveProgressive = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive").set_name( + "LLAMAERADS_AdaptiveProgressive", register=True + ) +except Exception as e: + print("ERADS_AdaptiveProgressive can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdaptiveRefinement import ERADS_AdaptiveRefinement + + lama_register["ERADS_AdaptiveRefinement"] = ERADS_AdaptiveRefinement + LLAMAERADS_AdaptiveRefinement = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement").set_name( + "LLAMAERADS_AdaptiveRefinement", register=True + ) +except Exception as e: + print("ERADS_AdaptiveRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Advanced import ERADS_Advanced + + lama_register["ERADS_Advanced"] = ERADS_Advanced + LLAMAERADS_Advanced = NonObjectOptimizer(method="LLAMAERADS_Advanced").set_name( + "LLAMAERADS_Advanced", register=True + ) +except Exception as e: + print("ERADS_Advanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdvancedDynamic import ERADS_AdvancedDynamic + + lama_register["ERADS_AdvancedDynamic"] = ERADS_AdvancedDynamic + LLAMAERADS_AdvancedDynamic = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic").set_name( + "LLAMAERADS_AdvancedDynamic", register=True + ) +except Exception as e: + print("ERADS_AdvancedDynamic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_AdvancedRefined import ERADS_AdvancedRefined + + lama_register["ERADS_AdvancedRefined"] = ERADS_AdvancedRefined + LLAMAERADS_AdvancedRefined = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined").set_name( + "LLAMAERADS_AdvancedRefined", register=True + ) +except Exception as e: + print("ERADS_AdvancedRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_DynamicPrecision import ERADS_DynamicPrecision + + lama_register["ERADS_DynamicPrecision"] = ERADS_DynamicPrecision + LLAMAERADS_DynamicPrecision = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision").set_name( + "LLAMAERADS_DynamicPrecision", register=True + ) +except Exception as e: + print("ERADS_DynamicPrecision can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Enhanced import ERADS_Enhanced + + lama_register["ERADS_Enhanced"] = ERADS_Enhanced + LLAMAERADS_Enhanced = NonObjectOptimizer(method="LLAMAERADS_Enhanced").set_name( + "LLAMAERADS_Enhanced", register=True + ) +except Exception as e: + print("ERADS_Enhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_EnhancedPrecision import ERADS_EnhancedPrecision + + lama_register["ERADS_EnhancedPrecision"] = ERADS_EnhancedPrecision + LLAMAERADS_EnhancedPrecision = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision").set_name( + "LLAMAERADS_EnhancedPrecision", register=True + ) +except Exception as e: + print("ERADS_EnhancedPrecision can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_HyperOptimized import ERADS_HyperOptimized + + lama_register["ERADS_HyperOptimized"] = ERADS_HyperOptimized + LLAMAERADS_HyperOptimized = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized").set_name( + "LLAMAERADS_HyperOptimized", register=True + ) +except Exception as e: + print("ERADS_HyperOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_NextGen import ERADS_NextGen + + lama_register["ERADS_NextGen"] = ERADS_NextGen + LLAMAERADS_NextGen = NonObjectOptimizer(method="LLAMAERADS_NextGen").set_name( + "LLAMAERADS_NextGen", register=True + ) +except Exception as e: + print("ERADS_NextGen can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Optimized import ERADS_Optimized + + lama_register["ERADS_Optimized"] = ERADS_Optimized + LLAMAERADS_Optimized = NonObjectOptimizer(method="LLAMAERADS_Optimized").set_name( + "LLAMAERADS_Optimized", register=True + ) +except Exception as e: + print("ERADS_Optimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Precision import ERADS_Precision + + lama_register["ERADS_Precision"] = ERADS_Precision + LLAMAERADS_Precision = NonObjectOptimizer(method="LLAMAERADS_Precision").set_name( + "LLAMAERADS_Precision", register=True + ) +except Exception as e: + print("ERADS_Precision can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressiveAdaptive import ERADS_ProgressiveAdaptive + + lama_register["ERADS_ProgressiveAdaptive"] = ERADS_ProgressiveAdaptive + LLAMAERADS_ProgressiveAdaptive = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive").set_name( + "LLAMAERADS_ProgressiveAdaptive", register=True + ) +except Exception as e: + print("ERADS_ProgressiveAdaptive can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressiveAdaptivePlus import ERADS_ProgressiveAdaptivePlus + + lama_register["ERADS_ProgressiveAdaptivePlus"] = ERADS_ProgressiveAdaptivePlus + LLAMAERADS_ProgressiveAdaptivePlus = NonObjectOptimizer( + method="LLAMAERADS_ProgressiveAdaptivePlus" + ).set_name("LLAMAERADS_ProgressiveAdaptivePlus", register=True) +except Exception as e: + print("ERADS_ProgressiveAdaptivePlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressiveDynamic import ERADS_ProgressiveDynamic + + lama_register["ERADS_ProgressiveDynamic"] = ERADS_ProgressiveDynamic + LLAMAERADS_ProgressiveDynamic = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic").set_name( + "LLAMAERADS_ProgressiveDynamic", register=True + ) +except Exception as e: + print("ERADS_ProgressiveDynamic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressiveOptimized import ERADS_ProgressiveOptimized + + lama_register["ERADS_ProgressiveOptimized"] = ERADS_ProgressiveOptimized + LLAMAERADS_ProgressiveOptimized = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized").set_name( + "LLAMAERADS_ProgressiveOptimized", register=True + ) +except Exception as e: + print("ERADS_ProgressiveOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressivePrecision import ERADS_ProgressivePrecision + + lama_register["ERADS_ProgressivePrecision"] = ERADS_ProgressivePrecision + LLAMAERADS_ProgressivePrecision = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision").set_name( + "LLAMAERADS_ProgressivePrecision", register=True + ) +except Exception as e: + print("ERADS_ProgressivePrecision can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_ProgressiveRefinement import ERADS_ProgressiveRefinement + + lama_register["ERADS_ProgressiveRefinement"] = ERADS_ProgressiveRefinement + LLAMAERADS_ProgressiveRefinement = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement").set_name( + "LLAMAERADS_ProgressiveRefinement", register=True + ) +except Exception as e: + print("ERADS_ProgressiveRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumFlux import ERADS_QuantumFlux + + lama_register["ERADS_QuantumFlux"] = ERADS_QuantumFlux + LLAMAERADS_QuantumFlux = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux").set_name( + "LLAMAERADS_QuantumFlux", register=True + ) +except Exception as e: + print("ERADS_QuantumFlux can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumFluxPro import ERADS_QuantumFluxPro + + lama_register["ERADS_QuantumFluxPro"] = ERADS_QuantumFluxPro + LLAMAERADS_QuantumFluxPro = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro").set_name( + "LLAMAERADS_QuantumFluxPro", register=True + ) +except Exception as e: + print("ERADS_QuantumFluxPro can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumFluxUltra import ERADS_QuantumFluxUltra + + lama_register["ERADS_QuantumFluxUltra"] = ERADS_QuantumFluxUltra + LLAMAERADS_QuantumFluxUltra = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra").set_name( + "LLAMAERADS_QuantumFluxUltra", register=True + ) +except Exception as e: + print("ERADS_QuantumFluxUltra can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefined import ERADS_QuantumFluxUltraRefined + + lama_register["ERADS_QuantumFluxUltraRefined"] = ERADS_QuantumFluxUltraRefined + LLAMAERADS_QuantumFluxUltraRefined = NonObjectOptimizer( + method="LLAMAERADS_QuantumFluxUltraRefined" + ).set_name("LLAMAERADS_QuantumFluxUltraRefined", register=True) +except Exception as e: + print("ERADS_QuantumFluxUltraRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefinedPlus import ( + ERADS_QuantumFluxUltraRefinedPlus, + ) + + lama_register["ERADS_QuantumFluxUltraRefinedPlus"] = ERADS_QuantumFluxUltraRefinedPlus + LLAMAERADS_QuantumFluxUltraRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_QuantumFluxUltraRefinedPlus" + ).set_name("LLAMAERADS_QuantumFluxUltraRefinedPlus", register=True) +except Exception as e: + print("ERADS_QuantumFluxUltraRefinedPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_QuantumLeap import ERADS_QuantumLeap + + lama_register["ERADS_QuantumLeap"] = ERADS_QuantumLeap + LLAMAERADS_QuantumLeap = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap").set_name( + "LLAMAERADS_QuantumLeap", register=True + ) +except Exception as e: + print("ERADS_QuantumLeap can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Refined import ERADS_Refined + + lama_register["ERADS_Refined"] = ERADS_Refined + LLAMAERADS_Refined = NonObjectOptimizer(method="LLAMAERADS_Refined").set_name( + "LLAMAERADS_Refined", register=True + ) +except Exception as e: + print("ERADS_Refined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Superior import ERADS_Superior + + lama_register["ERADS_Superior"] = ERADS_Superior + LLAMAERADS_Superior = NonObjectOptimizer(method="LLAMAERADS_Superior").set_name( + "LLAMAERADS_Superior", register=True + ) +except Exception as e: + print("ERADS_Superior can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_Ultra import ERADS_Ultra + + lama_register["ERADS_Ultra"] = ERADS_Ultra + LLAMAERADS_Ultra = NonObjectOptimizer(method="LLAMAERADS_Ultra").set_name( + "LLAMAERADS_Ultra", register=True + ) +except Exception as e: + print("ERADS_Ultra can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamic import ERADS_UltraDynamic + + lama_register["ERADS_UltraDynamic"] = ERADS_UltraDynamic + LLAMAERADS_UltraDynamic = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic").set_name( + "LLAMAERADS_UltraDynamic", register=True + ) +except Exception as e: + print("ERADS_UltraDynamic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMax import ERADS_UltraDynamicMax + + lama_register["ERADS_UltraDynamicMax"] = ERADS_UltraDynamicMax + LLAMAERADS_UltraDynamicMax = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax").set_name( + "LLAMAERADS_UltraDynamicMax", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicMax can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxEnhanced import ERADS_UltraDynamicMaxEnhanced + + lama_register["ERADS_UltraDynamicMaxEnhanced"] = ERADS_UltraDynamicMaxEnhanced + LLAMAERADS_UltraDynamicMaxEnhanced = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxEnhanced" + ).set_name("LLAMAERADS_UltraDynamicMaxEnhanced", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHybrid import ERADS_UltraDynamicMaxHybrid + + lama_register["ERADS_UltraDynamicMaxHybrid"] = ERADS_UltraDynamicMaxHybrid + LLAMAERADS_UltraDynamicMaxHybrid = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid").set_name( + "LLAMAERADS_UltraDynamicMaxHybrid", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicMaxHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyper import ERADS_UltraDynamicMaxHyper + + lama_register["ERADS_UltraDynamicMaxHyper"] = ERADS_UltraDynamicMaxHyper + LLAMAERADS_UltraDynamicMaxHyper = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper").set_name( + "LLAMAERADS_UltraDynamicMaxHyper", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicMaxHyper can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimized import ( + ERADS_UltraDynamicMaxHyperOptimized, + ) + + lama_register["ERADS_UltraDynamicMaxHyperOptimized"] = ERADS_UltraDynamicMaxHyperOptimized + LLAMAERADS_UltraDynamicMaxHyperOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimized", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimizedV4 import ( + ERADS_UltraDynamicMaxHyperOptimizedV4, + ) + + lama_register["ERADS_UltraDynamicMaxHyperOptimizedV4"] = ERADS_UltraDynamicMaxHyperOptimizedV4 + LLAMAERADS_UltraDynamicMaxHyperOptimizedV4 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimizedV4", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperOptimizedV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperPlus import ERADS_UltraDynamicMaxHyperPlus + + lama_register["ERADS_UltraDynamicMaxHyperPlus"] = ERADS_UltraDynamicMaxHyperPlus + LLAMAERADS_UltraDynamicMaxHyperPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperPlus", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefined import ( + ERADS_UltraDynamicMaxHyperRefined, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefined"] = ERADS_UltraDynamicMaxHyperRefined + LLAMAERADS_UltraDynamicMaxHyperRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefined", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimized import ( + ERADS_UltraDynamicMaxHyperRefinedOptimized, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimized"] = ERADS_UltraDynamicMaxHyperRefinedOptimized + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperRefinedOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 import ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV2, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV2"] = ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 + ) + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 import ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV3, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV3"] = ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 + ) + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedPlus import ( + ERADS_UltraDynamicMaxHyperRefinedPlus, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedPlus"] = ERADS_UltraDynamicMaxHyperRefinedPlus + LLAMAERADS_UltraDynamicMaxHyperRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedPlus", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxHyperRefinedPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimal import ERADS_UltraDynamicMaxOptimal + + lama_register["ERADS_UltraDynamicMaxOptimal"] = ERADS_UltraDynamicMaxOptimal + LLAMAERADS_UltraDynamicMaxOptimal = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimal" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimal", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxOptimal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimized import ERADS_UltraDynamicMaxOptimized + + lama_register["ERADS_UltraDynamicMaxOptimized"] = ERADS_UltraDynamicMaxOptimized + LLAMAERADS_UltraDynamicMaxOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimized", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimizedPlus import ( + ERADS_UltraDynamicMaxOptimizedPlus, + ) + + lama_register["ERADS_UltraDynamicMaxOptimizedPlus"] = ERADS_UltraDynamicMaxOptimizedPlus + LLAMAERADS_UltraDynamicMaxOptimizedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimizedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimizedPlus", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxOptimizedPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPlus import ERADS_UltraDynamicMaxPlus + + lama_register["ERADS_UltraDynamicMaxPlus"] = ERADS_UltraDynamicMaxPlus + LLAMAERADS_UltraDynamicMaxPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus").set_name( + "LLAMAERADS_UltraDynamicMaxPlus", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicMaxPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPrecision import ERADS_UltraDynamicMaxPrecision + + lama_register["ERADS_UltraDynamicMaxPrecision"] = ERADS_UltraDynamicMaxPrecision + LLAMAERADS_UltraDynamicMaxPrecision = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxPrecision" + ).set_name("LLAMAERADS_UltraDynamicMaxPrecision", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxPrecision can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefined import ERADS_UltraDynamicMaxRefined + + lama_register["ERADS_UltraDynamicMaxRefined"] = ERADS_UltraDynamicMaxRefined + LLAMAERADS_UltraDynamicMaxRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxRefined", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefinedPlus import ERADS_UltraDynamicMaxRefinedPlus + + lama_register["ERADS_UltraDynamicMaxRefinedPlus"] = ERADS_UltraDynamicMaxRefinedPlus + LLAMAERADS_UltraDynamicMaxRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxRefinedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxRefinedPlus", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxRefinedPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxSupreme import ERADS_UltraDynamicMaxSupreme + + lama_register["ERADS_UltraDynamicMaxSupreme"] = ERADS_UltraDynamicMaxSupreme + LLAMAERADS_UltraDynamicMaxSupreme = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxSupreme" + ).set_name("LLAMAERADS_UltraDynamicMaxSupreme", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxSupreme can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltra import ERADS_UltraDynamicMaxUltra + + lama_register["ERADS_UltraDynamicMaxUltra"] = ERADS_UltraDynamicMaxUltra + LLAMAERADS_UltraDynamicMaxUltra = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra").set_name( + "LLAMAERADS_UltraDynamicMaxUltra", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicMaxUltra can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraPlus import ERADS_UltraDynamicMaxUltraPlus + + lama_register["ERADS_UltraDynamicMaxUltraPlus"] = ERADS_UltraDynamicMaxUltraPlus + LLAMAERADS_UltraDynamicMaxUltraPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraPlus", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefined import ( + ERADS_UltraDynamicMaxUltraRefined, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefined"] = ERADS_UltraDynamicMaxUltraRefined + LLAMAERADS_UltraDynamicMaxUltraRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefined", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV2 import ( + ERADS_UltraDynamicMaxUltraRefinedV2, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV2"] = ERADS_UltraDynamicMaxUltraRefinedV2 + LLAMAERADS_UltraDynamicMaxUltraRefinedV2 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV2", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV3 import ( + ERADS_UltraDynamicMaxUltraRefinedV3, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV3"] = ERADS_UltraDynamicMaxUltraRefinedV3 + LLAMAERADS_UltraDynamicMaxUltraRefinedV3 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV3", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV4 import ( + ERADS_UltraDynamicMaxUltraRefinedV4, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV4"] = ERADS_UltraDynamicMaxUltraRefinedV4 + LLAMAERADS_UltraDynamicMaxUltraRefinedV4 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV4", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV5 import ( + ERADS_UltraDynamicMaxUltraRefinedV5, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV5"] = ERADS_UltraDynamicMaxUltraRefinedV5 + LLAMAERADS_UltraDynamicMaxUltraRefinedV5 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV5", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV6 import ( + ERADS_UltraDynamicMaxUltraRefinedV6, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV6"] = ERADS_UltraDynamicMaxUltraRefinedV6 + LLAMAERADS_UltraDynamicMaxUltraRefinedV6 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV6", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV7 import ( + ERADS_UltraDynamicMaxUltraRefinedV7, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV7"] = ERADS_UltraDynamicMaxUltraRefinedV7 + LLAMAERADS_UltraDynamicMaxUltraRefinedV7 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV7", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV8 import ( + ERADS_UltraDynamicMaxUltraRefinedV8, + ) + + lama_register["ERADS_UltraDynamicMaxUltraRefinedV8"] = ERADS_UltraDynamicMaxUltraRefinedV8 + LLAMAERADS_UltraDynamicMaxUltraRefinedV8 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV8", register=True) +except Exception as e: + print("ERADS_UltraDynamicMaxUltraRefinedV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicPlus import ERADS_UltraDynamicPlus + + lama_register["ERADS_UltraDynamicPlus"] = ERADS_UltraDynamicPlus + LLAMAERADS_UltraDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus").set_name( + "LLAMAERADS_UltraDynamicPlus", register=True + ) +except Exception as e: + print("ERADS_UltraDynamicPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionEnhanced import ( + ERADS_UltraDynamicPrecisionEnhanced, + ) + + lama_register["ERADS_UltraDynamicPrecisionEnhanced"] = ERADS_UltraDynamicPrecisionEnhanced + LLAMAERADS_UltraDynamicPrecisionEnhanced = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicPrecisionEnhanced" + ).set_name("LLAMAERADS_UltraDynamicPrecisionEnhanced", register=True) +except Exception as e: + print("ERADS_UltraDynamicPrecisionEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionOptimized import ( + ERADS_UltraDynamicPrecisionOptimized, + ) + + lama_register["ERADS_UltraDynamicPrecisionOptimized"] = ERADS_UltraDynamicPrecisionOptimized + LLAMAERADS_UltraDynamicPrecisionOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicPrecisionOptimized" + ).set_name("LLAMAERADS_UltraDynamicPrecisionOptimized", register=True) +except Exception as e: + print("ERADS_UltraDynamicPrecisionOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraEnhanced import ERADS_UltraEnhanced + + lama_register["ERADS_UltraEnhanced"] = ERADS_UltraEnhanced + LLAMAERADS_UltraEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced").set_name( + "LLAMAERADS_UltraEnhanced", register=True + ) +except Exception as e: + print("ERADS_UltraEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraMax import ERADS_UltraMax + + lama_register["ERADS_UltraMax"] = ERADS_UltraMax + LLAMAERADS_UltraMax = NonObjectOptimizer(method="LLAMAERADS_UltraMax").set_name( + "LLAMAERADS_UltraMax", register=True + ) +except Exception as e: + print("ERADS_UltraMax can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraOptimized import ERADS_UltraOptimized + + lama_register["ERADS_UltraOptimized"] = ERADS_UltraOptimized + LLAMAERADS_UltraOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized").set_name( + "LLAMAERADS_UltraOptimized", register=True + ) +except Exception as e: + print("ERADS_UltraOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraPrecise import ERADS_UltraPrecise + + lama_register["ERADS_UltraPrecise"] = ERADS_UltraPrecise + LLAMAERADS_UltraPrecise = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise").set_name( + "LLAMAERADS_UltraPrecise", register=True + ) +except Exception as e: + print("ERADS_UltraPrecise can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERADS_UltraRefined import ERADS_UltraRefined + + lama_register["ERADS_UltraRefined"] = ERADS_UltraRefined + LLAMAERADS_UltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraRefined").set_name( + "LLAMAERADS_UltraRefined", register=True + ) +except Exception as e: + print("ERADS_UltraRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ERAMEDS import ERAMEDS + + lama_register["ERAMEDS"] = ERAMEDS + LLAMAERAMEDS = NonObjectOptimizer(method="LLAMAERAMEDS").set_name("LLAMAERAMEDS", register=True) +except Exception as e: + print("ERAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ESADE import ESADE + + lama_register["ESADE"] = ESADE + LLAMAESADE = NonObjectOptimizer(method="LLAMAESADE").set_name("LLAMAESADE", register=True) +except Exception as e: + print("ESADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ESADEPFLLP import ESADEPFLLP + + lama_register["ESADEPFLLP"] = ESADEPFLLP + LLAMAESADEPFLLP = NonObjectOptimizer(method="LLAMAESADEPFLLP").set_name("LLAMAESADEPFLLP", register=True) +except Exception as e: + print("ESADEPFLLP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ESBASM import ESBASM + + lama_register["ESBASM"] = ESBASM + LLAMAESBASM = NonObjectOptimizer(method="LLAMAESBASM").set_name("LLAMAESBASM", register=True) +except Exception as e: + print("ESBASM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveCrowdingHybridOptimizer import ( + EliteAdaptiveCrowdingHybridOptimizer, + ) + + lama_register["EliteAdaptiveCrowdingHybridOptimizer"] = EliteAdaptiveCrowdingHybridOptimizer + LLAMAEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteAdaptiveCrowdingHybridOptimizer" + ).set_name("LLAMAEliteAdaptiveCrowdingHybridOptimizer", register=True) +except Exception as e: + print("EliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveHybridDEPSO import EliteAdaptiveHybridDEPSO + + lama_register["EliteAdaptiveHybridDEPSO"] = EliteAdaptiveHybridDEPSO + LLAMAEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO").set_name( + "LLAMAEliteAdaptiveHybridDEPSO", register=True + ) +except Exception as e: + print("EliteAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveMemeticDifferentialEvolution import ( + EliteAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["EliteAdaptiveMemeticDifferentialEvolution"] = EliteAdaptiveMemeticDifferentialEvolution + LLAMAEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEliteAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("EliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 import ( + EliteAdaptiveMemoryDynamicCrowdingOptimizerV2, + ) + + lama_register["EliteAdaptiveMemoryDynamicCrowdingOptimizerV2"] = ( + EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 + ) + LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2 = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2" + ).set_name("LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2", register=True) +except Exception as e: + print("EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveMemoryHybridOptimizer import ( + EliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["EliteAdaptiveMemoryHybridOptimizer"] = EliteAdaptiveMemoryHybridOptimizer + LLAMAEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("EliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch import ( + EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch, + ) + + lama_register["EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch"] = ( + EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch + ) + LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch" + ).set_name("LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch", register=True) +except Exception as e: + print("EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteCovarianceMatrixAdaptationMemeticSearch import ( + EliteCovarianceMatrixAdaptationMemeticSearch, + ) + + lama_register["EliteCovarianceMatrixAdaptationMemeticSearch"] = ( + EliteCovarianceMatrixAdaptationMemeticSearch + ) + LLAMAEliteCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( + method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch" + ).set_name("LLAMAEliteCovarianceMatrixAdaptationMemeticSearch", register=True) +except Exception as e: + print("EliteCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteDynamicHybridOptimizer import EliteDynamicHybridOptimizer + + lama_register["EliteDynamicHybridOptimizer"] = EliteDynamicHybridOptimizer + LLAMAEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer").set_name( + "LLAMAEliteDynamicHybridOptimizer", register=True + ) +except Exception as e: + print("EliteDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteDynamicMemoryHybridOptimizer import ( + EliteDynamicMemoryHybridOptimizer, + ) + + lama_register["EliteDynamicMemoryHybridOptimizer"] = EliteDynamicMemoryHybridOptimizer + LLAMAEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMAEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: + print("EliteDynamicMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteDynamicMultiStrategyHybridDEPSO import ( + EliteDynamicMultiStrategyHybridDEPSO, + ) + + lama_register["EliteDynamicMultiStrategyHybridDEPSO"] = EliteDynamicMultiStrategyHybridDEPSO + LLAMAEliteDynamicMultiStrategyHybridDEPSO = NonObjectOptimizer( + method="LLAMAEliteDynamicMultiStrategyHybridDEPSO" + ).set_name("LLAMAEliteDynamicMultiStrategyHybridDEPSO", register=True) +except Exception as e: + print("EliteDynamicMultiStrategyHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedAdaptiveRestartDE import EliteGuidedAdaptiveRestartDE + + lama_register["EliteGuidedAdaptiveRestartDE"] = EliteGuidedAdaptiveRestartDE + LLAMAEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMAEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMAEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: + print("EliteGuidedAdaptiveRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedDualStrategyDE import EliteGuidedDualStrategyDE + + lama_register["EliteGuidedDualStrategyDE"] = EliteGuidedDualStrategyDE + LLAMAEliteGuidedDualStrategyDE = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE").set_name( + "LLAMAEliteGuidedDualStrategyDE", register=True + ) +except Exception as e: + print("EliteGuidedDualStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedHybridAdaptiveDE import EliteGuidedHybridAdaptiveDE + + lama_register["EliteGuidedHybridAdaptiveDE"] = EliteGuidedHybridAdaptiveDE + LLAMAEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE").set_name( + "LLAMAEliteGuidedHybridAdaptiveDE", register=True + ) +except Exception as e: + print("EliteGuidedHybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedHybridDE import EliteGuidedHybridDE + + lama_register["EliteGuidedHybridDE"] = EliteGuidedHybridDE + LLAMAEliteGuidedHybridDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE").set_name( + "LLAMAEliteGuidedHybridDE", register=True + ) +except Exception as e: + print("EliteGuidedHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedMutationDE import EliteGuidedMutationDE + + lama_register["EliteGuidedMutationDE"] = EliteGuidedMutationDE + LLAMAEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE").set_name( + "LLAMAEliteGuidedMutationDE", register=True + ) +except Exception as e: + print("EliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedMutationDE_v2 import EliteGuidedMutationDE_v2 + + lama_register["EliteGuidedMutationDE_v2"] = EliteGuidedMutationDE_v2 + LLAMAEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2").set_name( + "LLAMAEliteGuidedMutationDE_v2", register=True + ) +except Exception as e: + print("EliteGuidedMutationDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteGuidedQuantumAdaptiveDE import EliteGuidedQuantumAdaptiveDE + + lama_register["EliteGuidedQuantumAdaptiveDE"] = EliteGuidedQuantumAdaptiveDE + LLAMAEliteGuidedQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMAEliteGuidedQuantumAdaptiveDE" + ).set_name("LLAMAEliteGuidedQuantumAdaptiveDE", register=True) +except Exception as e: + print("EliteGuidedQuantumAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteHybridAdaptiveOptimizer import EliteHybridAdaptiveOptimizer + + lama_register["EliteHybridAdaptiveOptimizer"] = EliteHybridAdaptiveOptimizer + LLAMAEliteHybridAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEliteHybridAdaptiveOptimizer" + ).set_name("LLAMAEliteHybridAdaptiveOptimizer", register=True) +except Exception as e: + print("EliteHybridAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteMemoryEnhancedDynamicHybridOptimizer import ( + EliteMemoryEnhancedDynamicHybridOptimizer, + ) + + lama_register["EliteMemoryEnhancedDynamicHybridOptimizer"] = EliteMemoryEnhancedDynamicHybridOptimizer + LLAMAEliteMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAEliteMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: + print("EliteMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteMultiStrategySelfAdaptiveDE import EliteMultiStrategySelfAdaptiveDE + + lama_register["EliteMultiStrategySelfAdaptiveDE"] = EliteMultiStrategySelfAdaptiveDE + LLAMAEliteMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAEliteMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAEliteMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: + print("EliteMultiStrategySelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ElitePreservingDifferentialEvolution import ( + ElitePreservingDifferentialEvolution, + ) + + lama_register["ElitePreservingDifferentialEvolution"] = ElitePreservingDifferentialEvolution + LLAMAElitePreservingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAElitePreservingDifferentialEvolution" + ).set_name("LLAMAElitePreservingDifferentialEvolution", register=True) +except Exception as e: + print("ElitePreservingDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteQuantumAdaptiveExplorationOptimization import ( + EliteQuantumAdaptiveExplorationOptimization, + ) + + lama_register["EliteQuantumAdaptiveExplorationOptimization"] = EliteQuantumAdaptiveExplorationOptimization + LLAMAEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEliteQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMAEliteQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("EliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteQuantumDifferentialMemeticOptimizer import ( + EliteQuantumDifferentialMemeticOptimizer, + ) + + lama_register["EliteQuantumDifferentialMemeticOptimizer"] = EliteQuantumDifferentialMemeticOptimizer + LLAMAEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEliteQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMAEliteQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: + print("EliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteRefinedAdaptivePrecisionOptimizer import ( + EliteRefinedAdaptivePrecisionOptimizer, + ) + + lama_register["EliteRefinedAdaptivePrecisionOptimizer"] = EliteRefinedAdaptivePrecisionOptimizer + LLAMAEliteRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAEliteRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAEliteRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: + print("EliteRefinedAdaptivePrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EliteTranscendentalEvolutionaryOptimizer import ( + EliteTranscendentalEvolutionaryOptimizer, + ) + + lama_register["EliteTranscendentalEvolutionaryOptimizer"] = EliteTranscendentalEvolutionaryOptimizer + LLAMAEliteTranscendentalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEliteTranscendentalEvolutionaryOptimizer" + ).set_name("LLAMAEliteTranscendentalEvolutionaryOptimizer", register=True) +except Exception as e: + print("EliteTranscendentalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ElitistAdaptiveDE import ElitistAdaptiveDE + + lama_register["ElitistAdaptiveDE"] = ElitistAdaptiveDE + LLAMAElitistAdaptiveDE = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE").set_name( + "LLAMAElitistAdaptiveDE", register=True + ) +except Exception as e: + print("ElitistAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW import EnhancedAQAPSOHR_LSDIW + + lama_register["EnhancedAQAPSOHR_LSDIW"] = EnhancedAQAPSOHR_LSDIW + LLAMAEnhancedAQAPSOHR_LSDIW = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW").set_name( + "LLAMAEnhancedAQAPSOHR_LSDIW", register=True + ) +except Exception as e: + print("EnhancedAQAPSOHR_LSDIW can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW_AP import EnhancedAQAPSOHR_LSDIW_AP + + lama_register["EnhancedAQAPSOHR_LSDIW_AP"] = EnhancedAQAPSOHR_LSDIW_AP + LLAMAEnhancedAQAPSOHR_LSDIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP").set_name( + "LLAMAEnhancedAQAPSOHR_LSDIW_AP", register=True + ) +except Exception as e: + print("EnhancedAQAPSOHR_LSDIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP import EnhancedAQAPSO_LS_DIW_AP + + lama_register["EnhancedAQAPSO_LS_DIW_AP"] = EnhancedAQAPSO_LS_DIW_AP + LLAMAEnhancedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP").set_name( + "LLAMAEnhancedAQAPSO_LS_DIW_AP", register=True + ) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Final import EnhancedAQAPSO_LS_DIW_AP_Final + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Final"] = EnhancedAQAPSO_LS_DIW_AP_Final + LLAMAEnhancedAQAPSO_LS_DIW_AP_Final = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Final", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Final can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined import EnhancedAQAPSO_LS_DIW_AP_Refined + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Refined + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined_Final import ( + EnhancedAQAPSO_LS_DIW_AP_Refined_Final, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined_Final"] = EnhancedAQAPSO_LS_DIW_AP_Refined_Final + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Refined_Final can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Ultimate can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined"] = ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined + ) + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined"] = ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined + ) + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined", register=True) +except Exception as e: + print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v2 import ( + EnhancedAdaptiveChaoticFireworksOptimization_v2, + ) + + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v2"] = ( + EnhancedAdaptiveChaoticFireworksOptimization_v2 + ) + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2" + ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2", register=True) +except Exception as e: + print("EnhancedAdaptiveChaoticFireworksOptimization_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v3 import ( + EnhancedAdaptiveChaoticFireworksOptimization_v3, + ) + + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v3"] = ( + EnhancedAdaptiveChaoticFireworksOptimization_v3 + ) + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3" + ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3", register=True) +except Exception as e: + print("EnhancedAdaptiveChaoticFireworksOptimization_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveCohortMemeticAlgorithm import ( + EnhancedAdaptiveCohortMemeticAlgorithm, + ) + + lama_register["EnhancedAdaptiveCohortMemeticAlgorithm"] = EnhancedAdaptiveCohortMemeticAlgorithm + LLAMAEnhancedAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveCohortMemeticAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveCohortMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveControlledMemoryAnnealing import ( + EnhancedAdaptiveControlledMemoryAnnealing, + ) + + lama_register["EnhancedAdaptiveControlledMemoryAnnealing"] = EnhancedAdaptiveControlledMemoryAnnealing + LLAMAEnhancedAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing" + ).set_name("LLAMAEnhancedAdaptiveControlledMemoryAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveControlledMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 import ( + EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4"] = ( + EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 + ) + LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: + print("EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixEvolution import ( + EnhancedAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["EnhancedAdaptiveCovarianceMatrixEvolution"] = EnhancedAdaptiveCovarianceMatrixEvolution + LLAMAEnhancedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDEPSOOptimizer import EnhancedAdaptiveDEPSOOptimizer + + lama_register["EnhancedAdaptiveDEPSOOptimizer"] = EnhancedAdaptiveDEPSOOptimizer + LLAMAEnhancedAdaptiveDEPSOOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDEPSOOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDEPSOOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveDEPSOOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiffEvolutionGradientDescent import ( + EnhancedAdaptiveDiffEvolutionGradientDescent, + ) + + lama_register["EnhancedAdaptiveDiffEvolutionGradientDescent"] = ( + EnhancedAdaptiveDiffEvolutionGradientDescent + ) + LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent" + ).set_name("LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent", register=True) +except Exception as e: + print("EnhancedAdaptiveDiffEvolutionGradientDescent can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolution import ( + EnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolution"] = EnhancedAdaptiveDifferentialEvolution + LLAMAEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamic import ( + EnhancedAdaptiveDifferentialEvolutionDynamic, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamic"] = ( + EnhancedAdaptiveDifferentialEvolutionDynamic + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionDynamic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamicImproved import ( + EnhancedAdaptiveDifferentialEvolutionDynamicImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamicImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionDynamicImproved + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionDynamicImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionEnhanced import ( + EnhancedAdaptiveDifferentialEvolutionEnhanced, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionEnhanced"] = ( + EnhancedAdaptiveDifferentialEvolutionEnhanced + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefined import ( + EnhancedAdaptiveDifferentialEvolutionRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionRefined + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedImproved import ( + EnhancedAdaptiveDifferentialEvolutionRefinedImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedImproved + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionRefinedImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV2 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV2"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV2 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionRefinedV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV3 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV3, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV3"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV3 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionRefinedV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV4 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV4, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV4"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV4 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionRefinedV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV10 import ( + EnhancedAdaptiveDifferentialEvolutionV10, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV10"] = EnhancedAdaptiveDifferentialEvolutionV10 + LLAMAEnhancedAdaptiveDifferentialEvolutionV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV10", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV11 import ( + EnhancedAdaptiveDifferentialEvolutionV11, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV11"] = EnhancedAdaptiveDifferentialEvolutionV11 + LLAMAEnhancedAdaptiveDifferentialEvolutionV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV11", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV12 import ( + EnhancedAdaptiveDifferentialEvolutionV12, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV12"] = EnhancedAdaptiveDifferentialEvolutionV12 + LLAMAEnhancedAdaptiveDifferentialEvolutionV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV12", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV13 import ( + EnhancedAdaptiveDifferentialEvolutionV13, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV13"] = EnhancedAdaptiveDifferentialEvolutionV13 + LLAMAEnhancedAdaptiveDifferentialEvolutionV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV13", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV14 import ( + EnhancedAdaptiveDifferentialEvolutionV14, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV14"] = EnhancedAdaptiveDifferentialEvolutionV14 + LLAMAEnhancedAdaptiveDifferentialEvolutionV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV14", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV15 import ( + EnhancedAdaptiveDifferentialEvolutionV15, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV15"] = EnhancedAdaptiveDifferentialEvolutionV15 + LLAMAEnhancedAdaptiveDifferentialEvolutionV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV15", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV16 import ( + EnhancedAdaptiveDifferentialEvolutionV16, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV16"] = EnhancedAdaptiveDifferentialEvolutionV16 + LLAMAEnhancedAdaptiveDifferentialEvolutionV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV16", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV17 import ( + EnhancedAdaptiveDifferentialEvolutionV17, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV17"] = EnhancedAdaptiveDifferentialEvolutionV17 + LLAMAEnhancedAdaptiveDifferentialEvolutionV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV17", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV18 import ( + EnhancedAdaptiveDifferentialEvolutionV18, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV18"] = EnhancedAdaptiveDifferentialEvolutionV18 + LLAMAEnhancedAdaptiveDifferentialEvolutionV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV18", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV19 import ( + EnhancedAdaptiveDifferentialEvolutionV19, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV19"] = EnhancedAdaptiveDifferentialEvolutionV19 + LLAMAEnhancedAdaptiveDifferentialEvolutionV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV19", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV20 import ( + EnhancedAdaptiveDifferentialEvolutionV20, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV20"] = EnhancedAdaptiveDifferentialEvolutionV20 + LLAMAEnhancedAdaptiveDifferentialEvolutionV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV20", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV21 import ( + EnhancedAdaptiveDifferentialEvolutionV21, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV21"] = EnhancedAdaptiveDifferentialEvolutionV21 + LLAMAEnhancedAdaptiveDifferentialEvolutionV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV21", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV22 import ( + EnhancedAdaptiveDifferentialEvolutionV22, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV22"] = EnhancedAdaptiveDifferentialEvolutionV22 + LLAMAEnhancedAdaptiveDifferentialEvolutionV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV22", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV23 import ( + EnhancedAdaptiveDifferentialEvolutionV23, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV23"] = EnhancedAdaptiveDifferentialEvolutionV23 + LLAMAEnhancedAdaptiveDifferentialEvolutionV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV23", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV24 import ( + EnhancedAdaptiveDifferentialEvolutionV24, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV24"] = EnhancedAdaptiveDifferentialEvolutionV24 + LLAMAEnhancedAdaptiveDifferentialEvolutionV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV24", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV25 import ( + EnhancedAdaptiveDifferentialEvolutionV25, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV25"] = EnhancedAdaptiveDifferentialEvolutionV25 + LLAMAEnhancedAdaptiveDifferentialEvolutionV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV25", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV26 import ( + EnhancedAdaptiveDifferentialEvolutionV26, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV26"] = EnhancedAdaptiveDifferentialEvolutionV26 + LLAMAEnhancedAdaptiveDifferentialEvolutionV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV26", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV27 import ( + EnhancedAdaptiveDifferentialEvolutionV27, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV27"] = EnhancedAdaptiveDifferentialEvolutionV27 + LLAMAEnhancedAdaptiveDifferentialEvolutionV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV27", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV28 import ( + EnhancedAdaptiveDifferentialEvolutionV28, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV28"] = EnhancedAdaptiveDifferentialEvolutionV28 + LLAMAEnhancedAdaptiveDifferentialEvolutionV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV28", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV4 import ( + EnhancedAdaptiveDifferentialEvolutionV4, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV4"] = EnhancedAdaptiveDifferentialEvolutionV4 + LLAMAEnhancedAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV5 import ( + EnhancedAdaptiveDifferentialEvolutionV5, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV5"] = EnhancedAdaptiveDifferentialEvolutionV5 + LLAMAEnhancedAdaptiveDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV5", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV6 import ( + EnhancedAdaptiveDifferentialEvolutionV6, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV6"] = EnhancedAdaptiveDifferentialEvolutionV6 + LLAMAEnhancedAdaptiveDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV6", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV7 import ( + EnhancedAdaptiveDifferentialEvolutionV7, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV7"] = EnhancedAdaptiveDifferentialEvolutionV7 + LLAMAEnhancedAdaptiveDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV7", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV8 import ( + EnhancedAdaptiveDifferentialEvolutionV8, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV8"] = EnhancedAdaptiveDifferentialEvolutionV8 + LLAMAEnhancedAdaptiveDifferentialEvolutionV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV8", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV9 import ( + EnhancedAdaptiveDifferentialEvolutionV9, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionV9"] = EnhancedAdaptiveDifferentialEvolutionV9 + LLAMAEnhancedAdaptiveDifferentialEvolutionV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV9", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( + EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( + EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved" + ).set_name( + "LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved", register=True + ) +except Exception as e: + print( + "EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters import ( + EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters"] = ( + EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters + ) + LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialMemeticAlgorithm import ( + EnhancedAdaptiveDifferentialMemeticAlgorithm, + ) + + lama_register["EnhancedAdaptiveDifferentialMemeticAlgorithm"] = ( + EnhancedAdaptiveDifferentialMemeticAlgorithm + ) + LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDirectionalBiasQuorumOptimization import ( + EnhancedAdaptiveDirectionalBiasQuorumOptimization, + ) + + lama_register["EnhancedAdaptiveDirectionalBiasQuorumOptimization"] = ( + EnhancedAdaptiveDirectionalBiasQuorumOptimization + ) + LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedEvolutionStrategy import ( + EnhancedAdaptiveDiversifiedEvolutionStrategy, + ) + + lama_register["EnhancedAdaptiveDiversifiedEvolutionStrategy"] = ( + EnhancedAdaptiveDiversifiedEvolutionStrategy + ) + LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization + ) + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 + ) + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 + ) + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 + ) + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearch import ( + EnhancedAdaptiveDiversifiedHarmonySearch, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearch"] = EnhancedAdaptiveDiversifiedHarmonySearch + LLAMAEnhancedAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizer import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizer, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizer"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizer + ) + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 + ) + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 + ) + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 + ) + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 + ) + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV2 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV2"] = EnhancedAdaptiveDiversifiedHarmonySearchV2 + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV3 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV3, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV3"] = EnhancedAdaptiveDiversifiedHarmonySearchV3 + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV4 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV4, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV4"] = EnhancedAdaptiveDiversifiedHarmonySearchV4 + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedHarmonySearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm import ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm"] = ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm + ) + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 import ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2"] = ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 + ) + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedSearch import ( + EnhancedAdaptiveDiversifiedSearch, + ) + + lama_register["EnhancedAdaptiveDiversifiedSearch"] = EnhancedAdaptiveDiversifiedSearch + LLAMAEnhancedAdaptiveDiversifiedSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedSearch" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveDiversifiedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDolphinPodOptimization import ( + EnhancedAdaptiveDolphinPodOptimization, + ) + + lama_register["EnhancedAdaptiveDolphinPodOptimization"] = EnhancedAdaptiveDolphinPodOptimization + LLAMAEnhancedAdaptiveDolphinPodOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDolphinPodOptimization" + ).set_name("LLAMAEnhancedAdaptiveDolphinPodOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveDolphinPodOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization import ( + EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( + EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization + ) + LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( + EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl, + ) + + lama_register["EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( + EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl + ) + LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) +except Exception as e: + print("EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV2 import ( + EnhancedAdaptiveDualPhaseStrategyV2, + ) + + lama_register["EnhancedAdaptiveDualPhaseStrategyV2"] = EnhancedAdaptiveDualPhaseStrategyV2 + LLAMAEnhancedAdaptiveDualPhaseStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDualPhaseStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV5 import ( + EnhancedAdaptiveDualPhaseStrategyV5, + ) + + lama_register["EnhancedAdaptiveDualPhaseStrategyV5"] = EnhancedAdaptiveDualPhaseStrategyV5 + LLAMAEnhancedAdaptiveDualPhaseStrategyV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV5", register=True) +except Exception as e: + print("EnhancedAdaptiveDualPhaseStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDualStrategyOptimizer import ( + EnhancedAdaptiveDualStrategyOptimizer, + ) + + lama_register["EnhancedAdaptiveDualStrategyOptimizer"] = EnhancedAdaptiveDualStrategyOptimizer + LLAMAEnhancedAdaptiveDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualStrategyOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDualStrategyOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveDualStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDE import EnhancedAdaptiveDynamicDE + + lama_register["EnhancedAdaptiveDynamicDE"] = EnhancedAdaptiveDynamicDE + LLAMAEnhancedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE").set_name( + "LLAMAEnhancedAdaptiveDynamicDE", register=True + ) +except Exception as e: + print("EnhancedAdaptiveDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDifferentialEvolution import ( + EnhancedAdaptiveDynamicDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveDynamicDifferentialEvolution"] = ( + EnhancedAdaptiveDynamicDifferentialEvolution + ) + LLAMAEnhancedAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV19 import ( + EnhancedAdaptiveDynamicDualPhaseStrategyV19, + ) + + lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV19"] = EnhancedAdaptiveDynamicDualPhaseStrategyV19 + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19" + ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicDualPhaseStrategyV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV22 import ( + EnhancedAdaptiveDynamicDualPhaseStrategyV22, + ) + + lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV22"] = EnhancedAdaptiveDynamicDualPhaseStrategyV22 + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22" + ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicDualPhaseStrategyV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithm import ( + EnhancedAdaptiveDynamicFireworkAlgorithm, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithm"] = EnhancedAdaptiveDynamicFireworkAlgorithm + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced import ( + EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced + ) + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmImproved import ( + EnhancedAdaptiveDynamicFireworkAlgorithmImproved, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmImproved"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmImproved + ) + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmRefined import ( + EnhancedAdaptiveDynamicFireworkAlgorithmRefined, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmRefined"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmRefined + ) + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkAlgorithmRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 + ) + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 + ) + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 + ) + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearch import ( + EnhancedAdaptiveDynamicHarmonySearch, + ) + + lama_register["EnhancedAdaptiveDynamicHarmonySearch"] = EnhancedAdaptiveDynamicHarmonySearch + LLAMAEnhancedAdaptiveDynamicHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearch", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV2 import ( + EnhancedAdaptiveDynamicHarmonySearchV2, + ) + + lama_register["EnhancedAdaptiveDynamicHarmonySearchV2"] = EnhancedAdaptiveDynamicHarmonySearchV2 + LLAMAEnhancedAdaptiveDynamicHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicHarmonySearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV3 import ( + EnhancedAdaptiveDynamicHarmonySearchV3, + ) + + lama_register["EnhancedAdaptiveDynamicHarmonySearchV3"] = EnhancedAdaptiveDynamicHarmonySearchV3 + LLAMAEnhancedAdaptiveDynamicHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicHarmonySearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) + + lama_register["EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( + EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm + ) + LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicQuantumSwarmOptimization import ( + EnhancedAdaptiveDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDynamicQuantumSwarmOptimization"] = ( + EnhancedAdaptiveDynamicQuantumSwarmOptimization + ) + LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveEliteDifferentialEvolution import ( + EnhancedAdaptiveEliteDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveEliteDifferentialEvolution"] = EnhancedAdaptiveEliteDifferentialEvolution + LLAMAEnhancedAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveEliteDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveEliteDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveEliteGuidedMutationDE_v2 import ( + EnhancedAdaptiveEliteGuidedMutationDE_v2, + ) + + lama_register["EnhancedAdaptiveEliteGuidedMutationDE_v2"] = EnhancedAdaptiveEliteGuidedMutationDE_v2 + LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2" + ).set_name("LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2", register=True) +except Exception as e: + print("EnhancedAdaptiveEliteGuidedMutationDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution + ) + LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveEnvironmentalStrategyV24 import ( + EnhancedAdaptiveEnvironmentalStrategyV24, + ) + + lama_register["EnhancedAdaptiveEnvironmentalStrategyV24"] = EnhancedAdaptiveEnvironmentalStrategyV24 + LLAMAEnhancedAdaptiveEnvironmentalStrategyV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24" + ).set_name("LLAMAEnhancedAdaptiveEnvironmentalStrategyV24", register=True) +except Exception as e: + print("EnhancedAdaptiveEnvironmentalStrategyV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy import ( + EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy, + ) + + lama_register["EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( + EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy + ) + LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy" + ).set_name("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) +except Exception as e: + print("EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationExploitationAlgorithm import ( + EnhancedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["EnhancedAdaptiveExplorationExploitationAlgorithm"] = ( + EnhancedAdaptiveExplorationExploitationAlgorithm + ) + LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationOptimizer import ( + EnhancedAdaptiveExplorationOptimizer, + ) + + lama_register["EnhancedAdaptiveExplorationOptimizer"] = EnhancedAdaptiveExplorationOptimizer + LLAMAEnhancedAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveExplorationOptimizer" + ).set_name("LLAMAEnhancedAdaptiveExplorationOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveFireworkAlgorithm import ( + EnhancedAdaptiveFireworkAlgorithm, + ) + + lama_register["EnhancedAdaptiveFireworkAlgorithm"] = EnhancedAdaptiveFireworkAlgorithm + LLAMAEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveFireworksAlgorithm import ( + EnhancedAdaptiveFireworksAlgorithm, + ) + + lama_register["EnhancedAdaptiveFireworksAlgorithm"] = EnhancedAdaptiveFireworksAlgorithm + LLAMAEnhancedAdaptiveFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveFireworksAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveFireworksAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGaussianSearch import EnhancedAdaptiveGaussianSearch + + lama_register["EnhancedAdaptiveGaussianSearch"] = EnhancedAdaptiveGaussianSearch + LLAMAEnhancedAdaptiveGaussianSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGaussianSearch" + ).set_name("LLAMAEnhancedAdaptiveGaussianSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveGaussianSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBalancedCrossoverPSO import ( + EnhancedAdaptiveGradientBalancedCrossoverPSO, + ) + + lama_register["EnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( + EnhancedAdaptiveGradientBalancedCrossoverPSO + ) + LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: + print("EnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing import ( + EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing + ) + LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGranularStrategyV26 import ( + EnhancedAdaptiveGranularStrategyV26, + ) + + lama_register["EnhancedAdaptiveGranularStrategyV26"] = EnhancedAdaptiveGranularStrategyV26 + LLAMAEnhancedAdaptiveGranularStrategyV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGranularStrategyV26" + ).set_name("LLAMAEnhancedAdaptiveGranularStrategyV26", register=True) +except Exception as e: + print("EnhancedAdaptiveGranularStrategyV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV10 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV10, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV10"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV10 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV11 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV11, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV11"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV11 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV12 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV12, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV12"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV12 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV19 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV19, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV19"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV19 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV20 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV20, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV20"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV20 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV21 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV21, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV21"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV21 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV27 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV27, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV27"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV27 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV28 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV28, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV28"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV28 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV3 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV3, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV3"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV3 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV4 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV4, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV4"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV4 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV5 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV5, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV5"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV5 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV6 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV6, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV6"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV6 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV7 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV7, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV7"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV7 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV8 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV8, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV8"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV8 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV9 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV9, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV9"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV9 + ) + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9", register=True) +except Exception as e: + print("EnhancedAdaptiveGravitationalSwarmIntelligenceV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( + EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( + EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + ) + LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" + ).set_name( + "LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True + ) +except Exception as e: + print( + "EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedDifferentialEvolution import ( + EnhancedAdaptiveGuidedDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveGuidedDifferentialEvolution"] = EnhancedAdaptiveGuidedDifferentialEvolution + LLAMAEnhancedAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveGuidedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveGuidedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedMutationOptimizer import ( + EnhancedAdaptiveGuidedMutationOptimizer, + ) + + lama_register["EnhancedAdaptiveGuidedMutationOptimizer"] = EnhancedAdaptiveGuidedMutationOptimizer + LLAMAEnhancedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAEnhancedAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveGuidedMutationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearch import ( + EnhancedAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearch"] = EnhancedAdaptiveHarmonicFireworksTabuSearch + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearchV2 import ( + EnhancedAdaptiveHarmonicFireworksTabuSearchV2, + ) + + lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearchV2"] = ( + EnhancedAdaptiveHarmonicFireworksTabuSearchV2 + ) + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicFireworksTabuSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicOptimizationV2 import ( + EnhancedAdaptiveHarmonicOptimizationV2, + ) + + lama_register["EnhancedAdaptiveHarmonicOptimizationV2"] = EnhancedAdaptiveHarmonicOptimizationV2 + LLAMAEnhancedAdaptiveHarmonicOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonicOptimizationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV10 import ( + EnhancedAdaptiveHarmonicTabuSearchV10, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV10"] = EnhancedAdaptiveHarmonicTabuSearchV10 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV18 import ( + EnhancedAdaptiveHarmonicTabuSearchV18, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV18"] = EnhancedAdaptiveHarmonicTabuSearchV18 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV21 import ( + EnhancedAdaptiveHarmonicTabuSearchV21, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV21"] = EnhancedAdaptiveHarmonicTabuSearchV21 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV22 import ( + EnhancedAdaptiveHarmonicTabuSearchV22, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV22"] = EnhancedAdaptiveHarmonicTabuSearchV22 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV23 import ( + EnhancedAdaptiveHarmonicTabuSearchV23, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV23"] = EnhancedAdaptiveHarmonicTabuSearchV23 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV25 import ( + EnhancedAdaptiveHarmonicTabuSearchV25, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV25"] = EnhancedAdaptiveHarmonicTabuSearchV25 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV26 import ( + EnhancedAdaptiveHarmonicTabuSearchV26, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV26"] = EnhancedAdaptiveHarmonicTabuSearchV26 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV27 import ( + EnhancedAdaptiveHarmonicTabuSearchV27, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV27"] = EnhancedAdaptiveHarmonicTabuSearchV27 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV29 import ( + EnhancedAdaptiveHarmonicTabuSearchV29, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV29"] = EnhancedAdaptiveHarmonicTabuSearchV29 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV30 import ( + EnhancedAdaptiveHarmonicTabuSearchV30, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV30"] = EnhancedAdaptiveHarmonicTabuSearchV30 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV31 import ( + EnhancedAdaptiveHarmonicTabuSearchV31, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV31"] = EnhancedAdaptiveHarmonicTabuSearchV31 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV9 import ( + EnhancedAdaptiveHarmonicTabuSearchV9, + ) + + lama_register["EnhancedAdaptiveHarmonicTabuSearchV9"] = EnhancedAdaptiveHarmonicTabuSearchV9 + LLAMAEnhancedAdaptiveHarmonicTabuSearchV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonicTabuSearchV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyFireworksAlgorithm import ( + EnhancedAdaptiveHarmonyFireworksAlgorithm, + ) + + lama_register["EnhancedAdaptiveHarmonyFireworksAlgorithm"] = EnhancedAdaptiveHarmonyFireworksAlgorithm + LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithm import ( + EnhancedAdaptiveHarmonyMemeticAlgorithm, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithm"] = EnhancedAdaptiveHarmonyMemeticAlgorithm + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV10 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV10, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV10"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV10 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV11 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV11, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV11"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV11 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV12 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV12, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV12"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV12 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV13 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV13, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV13"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV13 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV14 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV14, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV14"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV14 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV16 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV16, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV16"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV16 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV18 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV18, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV18"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV18 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV19 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV19, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV19"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV19 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV2 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV2"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV2 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV20 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV20, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV20"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV20 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV21 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV21, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV21"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV21 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV22 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV22, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV22"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV22 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV23 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV23, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV23"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV23 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV24 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV24, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV24"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV24 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV25 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV25, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV25"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV25 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV3 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV3, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV3"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV3 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV4 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV4, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV4"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV4 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV5 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV5, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV5"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV5 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV6 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV6, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV6"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV6 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV7 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV7, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV7"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV7 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV8 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV8, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV8"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV8 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV9 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV9, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV9"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV9 + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticAlgorithmV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV28 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV28, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV28"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV28 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV29 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV29, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV29"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV29 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV3 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV3"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV3 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV30 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV30, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV30"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV30 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV31 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV31, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV31"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV31 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV32 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV32, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV32"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV32 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV33 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV33, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV33"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV33 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV4 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV4"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV4 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV5 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV5"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV5 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV6 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV6"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV6 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV7 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV7"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV7 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV8 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV8"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV8 + ) + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearch import ( + EnhancedAdaptiveHarmonyMemeticSearch, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticSearch"] = EnhancedAdaptiveHarmonyMemeticSearch + LLAMAEnhancedAdaptiveHarmonyMemeticSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearchV2 import ( + EnhancedAdaptiveHarmonyMemeticSearchV2, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticSearchV2"] = EnhancedAdaptiveHarmonyMemeticSearchV2 + LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization + ) + LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization + ) + LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizer import ( + EnhancedAdaptiveHarmonySearchOptimizer, + ) + + lama_register["EnhancedAdaptiveHarmonySearchOptimizer"] = EnhancedAdaptiveHarmonySearchOptimizer + LLAMAEnhancedAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizerV2 import ( + EnhancedAdaptiveHarmonySearchOptimizerV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchOptimizerV2"] = EnhancedAdaptiveHarmonySearchOptimizerV2 + LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV10 import EnhancedAdaptiveHarmonySearchV10 + + lama_register["EnhancedAdaptiveHarmonySearchV10"] = EnhancedAdaptiveHarmonySearchV10 + LLAMAEnhancedAdaptiveHarmonySearchV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV11 import EnhancedAdaptiveHarmonySearchV11 + + lama_register["EnhancedAdaptiveHarmonySearchV11"] = EnhancedAdaptiveHarmonySearchV11 + LLAMAEnhancedAdaptiveHarmonySearchV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV11", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV12 import EnhancedAdaptiveHarmonySearchV12 + + lama_register["EnhancedAdaptiveHarmonySearchV12"] = EnhancedAdaptiveHarmonySearchV12 + LLAMAEnhancedAdaptiveHarmonySearchV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV12", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV13 import EnhancedAdaptiveHarmonySearchV13 + + lama_register["EnhancedAdaptiveHarmonySearchV13"] = EnhancedAdaptiveHarmonySearchV13 + LLAMAEnhancedAdaptiveHarmonySearchV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV13", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV14 import EnhancedAdaptiveHarmonySearchV14 + + lama_register["EnhancedAdaptiveHarmonySearchV14"] = EnhancedAdaptiveHarmonySearchV14 + LLAMAEnhancedAdaptiveHarmonySearchV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV14", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV15 import EnhancedAdaptiveHarmonySearchV15 + + lama_register["EnhancedAdaptiveHarmonySearchV15"] = EnhancedAdaptiveHarmonySearchV15 + LLAMAEnhancedAdaptiveHarmonySearchV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV15" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV15", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV16 import EnhancedAdaptiveHarmonySearchV16 + + lama_register["EnhancedAdaptiveHarmonySearchV16"] = EnhancedAdaptiveHarmonySearchV16 + LLAMAEnhancedAdaptiveHarmonySearchV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV16", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV17 import EnhancedAdaptiveHarmonySearchV17 + + lama_register["EnhancedAdaptiveHarmonySearchV17"] = EnhancedAdaptiveHarmonySearchV17 + LLAMAEnhancedAdaptiveHarmonySearchV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV17" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV17", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV18 import EnhancedAdaptiveHarmonySearchV18 + + lama_register["EnhancedAdaptiveHarmonySearchV18"] = EnhancedAdaptiveHarmonySearchV18 + LLAMAEnhancedAdaptiveHarmonySearchV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV18", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV19 import EnhancedAdaptiveHarmonySearchV19 + + lama_register["EnhancedAdaptiveHarmonySearchV19"] = EnhancedAdaptiveHarmonySearchV19 + LLAMAEnhancedAdaptiveHarmonySearchV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV19" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV19", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV20 import EnhancedAdaptiveHarmonySearchV20 + + lama_register["EnhancedAdaptiveHarmonySearchV20"] = EnhancedAdaptiveHarmonySearchV20 + LLAMAEnhancedAdaptiveHarmonySearchV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV20" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV20", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV21 import EnhancedAdaptiveHarmonySearchV21 + + lama_register["EnhancedAdaptiveHarmonySearchV21"] = EnhancedAdaptiveHarmonySearchV21 + LLAMAEnhancedAdaptiveHarmonySearchV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV21", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV22 import EnhancedAdaptiveHarmonySearchV22 + + lama_register["EnhancedAdaptiveHarmonySearchV22"] = EnhancedAdaptiveHarmonySearchV22 + LLAMAEnhancedAdaptiveHarmonySearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV22", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV23 import EnhancedAdaptiveHarmonySearchV23 + + lama_register["EnhancedAdaptiveHarmonySearchV23"] = EnhancedAdaptiveHarmonySearchV23 + LLAMAEnhancedAdaptiveHarmonySearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV23", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV24 import EnhancedAdaptiveHarmonySearchV24 + + lama_register["EnhancedAdaptiveHarmonySearchV24"] = EnhancedAdaptiveHarmonySearchV24 + LLAMAEnhancedAdaptiveHarmonySearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV24" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV24", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV25 import EnhancedAdaptiveHarmonySearchV25 + + lama_register["EnhancedAdaptiveHarmonySearchV25"] = EnhancedAdaptiveHarmonySearchV25 + LLAMAEnhancedAdaptiveHarmonySearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV25", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV3 import EnhancedAdaptiveHarmonySearchV3 + + lama_register["EnhancedAdaptiveHarmonySearchV3"] = EnhancedAdaptiveHarmonySearchV3 + LLAMAEnhancedAdaptiveHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV4 import EnhancedAdaptiveHarmonySearchV4 + + lama_register["EnhancedAdaptiveHarmonySearchV4"] = EnhancedAdaptiveHarmonySearchV4 + LLAMAEnhancedAdaptiveHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV5 import EnhancedAdaptiveHarmonySearchV5 + + lama_register["EnhancedAdaptiveHarmonySearchV5"] = EnhancedAdaptiveHarmonySearchV5 + LLAMAEnhancedAdaptiveHarmonySearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV6 import EnhancedAdaptiveHarmonySearchV6 + + lama_register["EnhancedAdaptiveHarmonySearchV6"] = EnhancedAdaptiveHarmonySearchV6 + LLAMAEnhancedAdaptiveHarmonySearchV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV7 import EnhancedAdaptiveHarmonySearchV7 + + lama_register["EnhancedAdaptiveHarmonySearchV7"] = EnhancedAdaptiveHarmonySearchV7 + LLAMAEnhancedAdaptiveHarmonySearchV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV8 import EnhancedAdaptiveHarmonySearchV8 + + lama_register["EnhancedAdaptiveHarmonySearchV8"] = EnhancedAdaptiveHarmonySearchV8 + LLAMAEnhancedAdaptiveHarmonySearchV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV9 import EnhancedAdaptiveHarmonySearchV9 + + lama_register["EnhancedAdaptiveHarmonySearchV9"] = EnhancedAdaptiveHarmonySearchV9 + LLAMAEnhancedAdaptiveHarmonySearchV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration import ( + EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration + ) + LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2" + ).set_name( + "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2", register=True + ) +except Exception as e: + print( + "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3" + ).set_name( + "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3", register=True + ) +except Exception as e: + print( + "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 import ( + EnhancedAdaptiveHarmonySearchWithHybridInspirationV16, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithHybridInspirationV16"] = ( + EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithLevyFlight, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlight"] = EnhancedAdaptiveHarmonySearchWithLevyFlight + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLevyFlight can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 import ( + EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2"] = ( + EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimization + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight"] = ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight + ) + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration import ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration"] = ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration + ) + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 + ) + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuOptimization import ( + EnhancedAdaptiveHarmonyTabuOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonyTabuOptimization"] = EnhancedAdaptiveHarmonyTabuOptimization + LLAMAEnhancedAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyTabuOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV2 import ( + EnhancedAdaptiveHarmonyTabuSearchV2, + ) + + lama_register["EnhancedAdaptiveHarmonyTabuSearchV2"] = EnhancedAdaptiveHarmonyTabuSearchV2 + LLAMAEnhancedAdaptiveHarmonyTabuSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyTabuSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV3 import ( + EnhancedAdaptiveHarmonyTabuSearchV3, + ) + + lama_register["EnhancedAdaptiveHarmonyTabuSearchV3"] = EnhancedAdaptiveHarmonyTabuSearchV3 + LLAMAEnhancedAdaptiveHarmonyTabuSearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyTabuSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV4 import ( + EnhancedAdaptiveHarmonyTabuSearchV4, + ) + + lama_register["EnhancedAdaptiveHarmonyTabuSearchV4"] = EnhancedAdaptiveHarmonyTabuSearchV4 + LLAMAEnhancedAdaptiveHarmonyTabuSearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyTabuSearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV5 import ( + EnhancedAdaptiveHarmonyTabuSearchV5, + ) + + lama_register["EnhancedAdaptiveHarmonyTabuSearchV5"] = EnhancedAdaptiveHarmonyTabuSearchV5 + LLAMAEnhancedAdaptiveHarmonyTabuSearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5", register=True) +except Exception as e: + print("EnhancedAdaptiveHarmonyTabuSearchV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory import ( + EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory, + ) + + lama_register["EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory"] = ( + EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory + ) + LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory" + ).set_name("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV22 import ( + EnhancedAdaptiveHybridHarmonySearchV22, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV22"] = EnhancedAdaptiveHybridHarmonySearchV22 + LLAMAEnhancedAdaptiveHybridHarmonySearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV22", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV23 import ( + EnhancedAdaptiveHybridHarmonySearchV23, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV23"] = EnhancedAdaptiveHybridHarmonySearchV23 + LLAMAEnhancedAdaptiveHybridHarmonySearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV23", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV24 import ( + EnhancedAdaptiveHybridHarmonySearchV24, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV24"] = EnhancedAdaptiveHybridHarmonySearchV24 + LLAMAEnhancedAdaptiveHybridHarmonySearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV24", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV25 import ( + EnhancedAdaptiveHybridHarmonySearchV25, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV25"] = EnhancedAdaptiveHybridHarmonySearchV25 + LLAMAEnhancedAdaptiveHybridHarmonySearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV25", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV26 import ( + EnhancedAdaptiveHybridHarmonySearchV26, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV26"] = EnhancedAdaptiveHybridHarmonySearchV26 + LLAMAEnhancedAdaptiveHybridHarmonySearchV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV26", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV27 import ( + EnhancedAdaptiveHybridHarmonySearchV27, + ) + + lama_register["EnhancedAdaptiveHybridHarmonySearchV27"] = EnhancedAdaptiveHybridHarmonySearchV27 + LLAMAEnhancedAdaptiveHybridHarmonySearchV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV27", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridHarmonySearchV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridMetaOptimizer import ( + EnhancedAdaptiveHybridMetaOptimizer, + ) + + lama_register["EnhancedAdaptiveHybridMetaOptimizer"] = EnhancedAdaptiveHybridMetaOptimizer + LLAMAEnhancedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridMetaOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHybridMetaOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridMetaOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridOptimizer import EnhancedAdaptiveHybridOptimizer + + lama_register["EnhancedAdaptiveHybridOptimizer"] = EnhancedAdaptiveHybridOptimizer + LLAMAEnhancedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution import ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution + ) + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus, + ) + + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus + ) + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" + ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) +except Exception as e: + print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveInertiaHybridOptimizer import ( + EnhancedAdaptiveInertiaHybridOptimizer, + ) + + lama_register["EnhancedAdaptiveInertiaHybridOptimizer"] = EnhancedAdaptiveInertiaHybridOptimizer + LLAMAEnhancedAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveInertiaHybridOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveInertiaHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm + ) + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 + ) + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 + ) + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 + ) + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearch import ( + EnhancedAdaptiveLevyHarmonySearch, + ) + + lama_register["EnhancedAdaptiveLevyHarmonySearch"] = EnhancedAdaptiveLevyHarmonySearch + LLAMAEnhancedAdaptiveLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearch", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV2 import ( + EnhancedAdaptiveLevyHarmonySearchV2, + ) + + lama_register["EnhancedAdaptiveLevyHarmonySearchV2"] = EnhancedAdaptiveLevyHarmonySearchV2 + LLAMAEnhancedAdaptiveLevyHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyHarmonySearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV3 import ( + EnhancedAdaptiveLevyHarmonySearchV3, + ) + + lama_register["EnhancedAdaptiveLevyHarmonySearchV3"] = EnhancedAdaptiveLevyHarmonySearchV3 + LLAMAEnhancedAdaptiveLevyHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV3", register=True) +except Exception as e: + print("EnhancedAdaptiveLevyHarmonySearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing + ) + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 + ) + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2", register=True) +except Exception as e: + print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 + ) + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3", register=True) +except Exception as e: + print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 + ) + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4", register=True) +except Exception as e: + print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 + ) + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5", register=True) +except Exception as e: + print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDifferentialEvolution import ( + EnhancedAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMemeticDifferentialEvolution"] = ( + EnhancedAdaptiveMemeticDifferentialEvolution + ) + LLAMAEnhancedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizer import ( + EnhancedAdaptiveMemeticDiverseOptimizer, + ) + + lama_register["EnhancedAdaptiveMemeticDiverseOptimizer"] = EnhancedAdaptiveMemeticDiverseOptimizer + LLAMAEnhancedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV2 import ( + EnhancedAdaptiveMemeticDiverseOptimizerV2, + ) + + lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV2"] = EnhancedAdaptiveMemeticDiverseOptimizerV2 + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticDiverseOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV3 import ( + EnhancedAdaptiveMemeticDiverseOptimizerV3, + ) + + lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV3"] = EnhancedAdaptiveMemeticDiverseOptimizerV3 + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticDiverseOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 import ( + EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2"] = ( + EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 + ) + LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimization import ( + EnhancedAdaptiveMemeticHarmonyOptimization, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimization"] = EnhancedAdaptiveMemeticHarmonyOptimization + LLAMAEnhancedAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHarmonyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV2 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV2, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV2"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV2 + ) + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHarmonyOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV3 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV3, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV3"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV3 + ) + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHarmonyOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV4 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV4, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV4"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV4 + ) + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHarmonyOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV6 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV6, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV6"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV6 + ) + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHarmonyOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHybridOptimizer import ( + EnhancedAdaptiveMemeticHybridOptimizer, + ) + + lama_register["EnhancedAdaptiveMemeticHybridOptimizer"] = EnhancedAdaptiveMemeticHybridOptimizer + LLAMAEnhancedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticOptimizerV7 import ( + EnhancedAdaptiveMemeticOptimizerV7, + ) + + lama_register["EnhancedAdaptiveMemeticOptimizerV7"] = EnhancedAdaptiveMemeticOptimizerV7 + LLAMAEnhancedAdaptiveMemeticOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticOptimizerV7" + ).set_name("LLAMAEnhancedAdaptiveMemeticOptimizerV7", register=True) +except Exception as e: + print("EnhancedAdaptiveMemeticOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryControlStrategyV49 import ( + EnhancedAdaptiveMemoryControlStrategyV49, + ) + + lama_register["EnhancedAdaptiveMemoryControlStrategyV49"] = EnhancedAdaptiveMemoryControlStrategyV49 + LLAMAEnhancedAdaptiveMemoryControlStrategyV49 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49" + ).set_name("LLAMAEnhancedAdaptiveMemoryControlStrategyV49", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryControlStrategyV49 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryDualPhaseStrategyV46 import ( + EnhancedAdaptiveMemoryDualPhaseStrategyV46, + ) + + lama_register["EnhancedAdaptiveMemoryDualPhaseStrategyV46"] = EnhancedAdaptiveMemoryDualPhaseStrategyV46 + LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46" + ).set_name("LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryDualPhaseStrategyV46 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost import ( + EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost, + ) + + lama_register["EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( + EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost + ) + LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost" + ).set_name("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridAnnealing import ( + EnhancedAdaptiveMemoryHybridAnnealing, + ) + + lama_register["EnhancedAdaptiveMemoryHybridAnnealing"] = EnhancedAdaptiveMemoryHybridAnnealing + LLAMAEnhancedAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMemoryHybridAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryHybridAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridDEPSO import ( + EnhancedAdaptiveMemoryHybridDEPSO, + ) + + lama_register["EnhancedAdaptiveMemoryHybridDEPSO"] = EnhancedAdaptiveMemoryHybridDEPSO + LLAMAEnhancedAdaptiveMemoryHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO" + ).set_name("LLAMAEnhancedAdaptiveMemoryHybridDEPSO", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV54 import ( + EnhancedAdaptiveMemoryStrategyV54, + ) + + lama_register["EnhancedAdaptiveMemoryStrategyV54"] = EnhancedAdaptiveMemoryStrategyV54 + LLAMAEnhancedAdaptiveMemoryStrategyV54 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryStrategyV54" + ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV54", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryStrategyV54 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV79 import ( + EnhancedAdaptiveMemoryStrategyV79, + ) + + lama_register["EnhancedAdaptiveMemoryStrategyV79"] = EnhancedAdaptiveMemoryStrategyV79 + LLAMAEnhancedAdaptiveMemoryStrategyV79 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryStrategyV79" + ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV79", register=True) +except Exception as e: + print("EnhancedAdaptiveMemoryStrategyV79 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSO import EnhancedAdaptiveMetaNetAQAPSO + + lama_register["EnhancedAdaptiveMetaNetAQAPSO"] = EnhancedAdaptiveMetaNetAQAPSO + LLAMAEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv12 import EnhancedAdaptiveMetaNetAQAPSOv12 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv12"] = EnhancedAdaptiveMetaNetAQAPSOv12 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv12", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv14 import EnhancedAdaptiveMetaNetAQAPSOv14 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv14"] = EnhancedAdaptiveMetaNetAQAPSOv14 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv14", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv15 import EnhancedAdaptiveMetaNetAQAPSOv15 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv15"] = EnhancedAdaptiveMetaNetAQAPSOv15 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv15", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv16 import EnhancedAdaptiveMetaNetAQAPSOv16 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv16"] = EnhancedAdaptiveMetaNetAQAPSOv16 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv16", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv2 import EnhancedAdaptiveMetaNetAQAPSOv2 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv2"] = EnhancedAdaptiveMetaNetAQAPSOv2 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv2", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv3 import EnhancedAdaptiveMetaNetAQAPSOv3 + + lama_register["EnhancedAdaptiveMetaNetAQAPSOv3"] = EnhancedAdaptiveMetaNetAQAPSOv3 + LLAMAEnhancedAdaptiveMetaNetAQAPSOv3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv3", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetAQAPSOv3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO import EnhancedAdaptiveMetaNetPSO + + lama_register["EnhancedAdaptiveMetaNetPSO"] = EnhancedAdaptiveMetaNetPSO + LLAMAEnhancedAdaptiveMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO").set_name( + "LLAMAEnhancedAdaptiveMetaNetPSO", register=True + ) +except Exception as e: + print("EnhancedAdaptiveMetaNetPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v2 import EnhancedAdaptiveMetaNetPSO_v2 + + lama_register["EnhancedAdaptiveMetaNetPSO_v2"] = EnhancedAdaptiveMetaNetPSO_v2 + LLAMAEnhancedAdaptiveMetaNetPSO_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetPSO_v2" + ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v2", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetPSO_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v3 import EnhancedAdaptiveMetaNetPSO_v3 + + lama_register["EnhancedAdaptiveMetaNetPSO_v3"] = EnhancedAdaptiveMetaNetPSO_v3 + LLAMAEnhancedAdaptiveMetaNetPSO_v3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetPSO_v3" + ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v3", register=True) +except Exception as e: + print("EnhancedAdaptiveMetaNetPSO_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiMemorySimulatedAnnealing import ( + EnhancedAdaptiveMultiMemorySimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveMultiMemorySimulatedAnnealing"] = ( + EnhancedAdaptiveMultiMemorySimulatedAnnealing + ) + LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiOperatorSearch import ( + EnhancedAdaptiveMultiOperatorSearch, + ) + + lama_register["EnhancedAdaptiveMultiOperatorSearch"] = EnhancedAdaptiveMultiOperatorSearch + LLAMAEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAEnhancedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealing import ( + EnhancedAdaptiveMultiPhaseAnnealing, + ) + + lama_register["EnhancedAdaptiveMultiPhaseAnnealing"] = EnhancedAdaptiveMultiPhaseAnnealing + LLAMAEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( + EnhancedAdaptiveMultiPhaseAnnealingWithGradient, + ) + + lama_register["EnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( + EnhancedAdaptiveMultiPhaseAnnealingWithGradient + ) + LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient" + ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPopulationDifferentialEvolution import ( + EnhancedAdaptiveMultiPopulationDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMultiPopulationDifferentialEvolution"] = ( + EnhancedAdaptiveMultiPopulationDifferentialEvolution + ) + LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategicOptimizer import ( + EnhancedAdaptiveMultiStrategicOptimizer, + ) + + lama_register["EnhancedAdaptiveMultiStrategicOptimizer"] = EnhancedAdaptiveMultiStrategicOptimizer + LLAMAEnhancedAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategicOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDE import EnhancedAdaptiveMultiStrategyDE + + lama_register["EnhancedAdaptiveMultiStrategyDE"] = EnhancedAdaptiveMultiStrategyDE + LLAMAEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyDE" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDE", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveMultiStrategyDifferentialEvolution + ) + LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyOptimizer import ( + EnhancedAdaptiveMultiStrategyOptimizer, + ) + + lama_register["EnhancedAdaptiveMultiStrategyOptimizer"] = EnhancedAdaptiveMultiStrategyOptimizer + LLAMAEnhancedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveMultiStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer import ( + EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( + EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer + ) + LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution import ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution + ) + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 import ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2, + ) + + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2"] = ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 + ) + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2", register=True) +except Exception as e: + print("EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: + print("EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveOrthogonalDifferentialEvolution import ( + EnhancedAdaptiveOrthogonalDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveOrthogonalDifferentialEvolution"] = ( + EnhancedAdaptiveOrthogonalDifferentialEvolution + ) + LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( + EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( + EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + ) + LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: + print("EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionCohortOptimizationV5 import ( + EnhancedAdaptivePrecisionCohortOptimizationV5, + ) + + lama_register["EnhancedAdaptivePrecisionCohortOptimizationV5"] = ( + EnhancedAdaptivePrecisionCohortOptimizationV5 + ) + LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5" + ).set_name("LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdaptivePrecisionCohortOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionFocalStrategy import ( + EnhancedAdaptivePrecisionFocalStrategy, + ) + + lama_register["EnhancedAdaptivePrecisionFocalStrategy"] = EnhancedAdaptivePrecisionFocalStrategy + LLAMAEnhancedAdaptivePrecisionFocalStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePrecisionFocalStrategy" + ).set_name("LLAMAEnhancedAdaptivePrecisionFocalStrategy", register=True) +except Exception as e: + print("EnhancedAdaptivePrecisionFocalStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA import EnhancedAdaptiveQGSA + + lama_register["EnhancedAdaptiveQGSA"] = EnhancedAdaptiveQGSA + LLAMAEnhancedAdaptiveQGSA = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA").set_name( + "LLAMAEnhancedAdaptiveQGSA", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v10 import EnhancedAdaptiveQGSA_v10 + + lama_register["EnhancedAdaptiveQGSA_v10"] = EnhancedAdaptiveQGSA_v10 + LLAMAEnhancedAdaptiveQGSA_v10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10").set_name( + "LLAMAEnhancedAdaptiveQGSA_v10", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v11 import EnhancedAdaptiveQGSA_v11 + + lama_register["EnhancedAdaptiveQGSA_v11"] = EnhancedAdaptiveQGSA_v11 + LLAMAEnhancedAdaptiveQGSA_v11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11").set_name( + "LLAMAEnhancedAdaptiveQGSA_v11", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v12 import EnhancedAdaptiveQGSA_v12 + + lama_register["EnhancedAdaptiveQGSA_v12"] = EnhancedAdaptiveQGSA_v12 + LLAMAEnhancedAdaptiveQGSA_v12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12").set_name( + "LLAMAEnhancedAdaptiveQGSA_v12", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v13 import EnhancedAdaptiveQGSA_v13 + + lama_register["EnhancedAdaptiveQGSA_v13"] = EnhancedAdaptiveQGSA_v13 + LLAMAEnhancedAdaptiveQGSA_v13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13").set_name( + "LLAMAEnhancedAdaptiveQGSA_v13", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v14 import EnhancedAdaptiveQGSA_v14 + + lama_register["EnhancedAdaptiveQGSA_v14"] = EnhancedAdaptiveQGSA_v14 + LLAMAEnhancedAdaptiveQGSA_v14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14").set_name( + "LLAMAEnhancedAdaptiveQGSA_v14", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v15 import EnhancedAdaptiveQGSA_v15 + + lama_register["EnhancedAdaptiveQGSA_v15"] = EnhancedAdaptiveQGSA_v15 + LLAMAEnhancedAdaptiveQGSA_v15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15").set_name( + "LLAMAEnhancedAdaptiveQGSA_v15", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v16 import EnhancedAdaptiveQGSA_v16 + + lama_register["EnhancedAdaptiveQGSA_v16"] = EnhancedAdaptiveQGSA_v16 + LLAMAEnhancedAdaptiveQGSA_v16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16").set_name( + "LLAMAEnhancedAdaptiveQGSA_v16", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v17 import EnhancedAdaptiveQGSA_v17 + + lama_register["EnhancedAdaptiveQGSA_v17"] = EnhancedAdaptiveQGSA_v17 + LLAMAEnhancedAdaptiveQGSA_v17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17").set_name( + "LLAMAEnhancedAdaptiveQGSA_v17", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v18 import EnhancedAdaptiveQGSA_v18 + + lama_register["EnhancedAdaptiveQGSA_v18"] = EnhancedAdaptiveQGSA_v18 + LLAMAEnhancedAdaptiveQGSA_v18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18").set_name( + "LLAMAEnhancedAdaptiveQGSA_v18", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v19 import EnhancedAdaptiveQGSA_v19 + + lama_register["EnhancedAdaptiveQGSA_v19"] = EnhancedAdaptiveQGSA_v19 + LLAMAEnhancedAdaptiveQGSA_v19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19").set_name( + "LLAMAEnhancedAdaptiveQGSA_v19", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v2 import EnhancedAdaptiveQGSA_v2 + + lama_register["EnhancedAdaptiveQGSA_v2"] = EnhancedAdaptiveQGSA_v2 + LLAMAEnhancedAdaptiveQGSA_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2").set_name( + "LLAMAEnhancedAdaptiveQGSA_v2", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v20 import EnhancedAdaptiveQGSA_v20 + + lama_register["EnhancedAdaptiveQGSA_v20"] = EnhancedAdaptiveQGSA_v20 + LLAMAEnhancedAdaptiveQGSA_v20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20").set_name( + "LLAMAEnhancedAdaptiveQGSA_v20", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v21 import EnhancedAdaptiveQGSA_v21 + + lama_register["EnhancedAdaptiveQGSA_v21"] = EnhancedAdaptiveQGSA_v21 + LLAMAEnhancedAdaptiveQGSA_v21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21").set_name( + "LLAMAEnhancedAdaptiveQGSA_v21", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v22 import EnhancedAdaptiveQGSA_v22 + + lama_register["EnhancedAdaptiveQGSA_v22"] = EnhancedAdaptiveQGSA_v22 + LLAMAEnhancedAdaptiveQGSA_v22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22").set_name( + "LLAMAEnhancedAdaptiveQGSA_v22", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v23 import EnhancedAdaptiveQGSA_v23 + + lama_register["EnhancedAdaptiveQGSA_v23"] = EnhancedAdaptiveQGSA_v23 + LLAMAEnhancedAdaptiveQGSA_v23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23").set_name( + "LLAMAEnhancedAdaptiveQGSA_v23", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v24 import EnhancedAdaptiveQGSA_v24 + + lama_register["EnhancedAdaptiveQGSA_v24"] = EnhancedAdaptiveQGSA_v24 + LLAMAEnhancedAdaptiveQGSA_v24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24").set_name( + "LLAMAEnhancedAdaptiveQGSA_v24", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v25 import EnhancedAdaptiveQGSA_v25 + + lama_register["EnhancedAdaptiveQGSA_v25"] = EnhancedAdaptiveQGSA_v25 + LLAMAEnhancedAdaptiveQGSA_v25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25").set_name( + "LLAMAEnhancedAdaptiveQGSA_v25", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v26 import EnhancedAdaptiveQGSA_v26 + + lama_register["EnhancedAdaptiveQGSA_v26"] = EnhancedAdaptiveQGSA_v26 + LLAMAEnhancedAdaptiveQGSA_v26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26").set_name( + "LLAMAEnhancedAdaptiveQGSA_v26", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v27 import EnhancedAdaptiveQGSA_v27 + + lama_register["EnhancedAdaptiveQGSA_v27"] = EnhancedAdaptiveQGSA_v27 + LLAMAEnhancedAdaptiveQGSA_v27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27").set_name( + "LLAMAEnhancedAdaptiveQGSA_v27", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v28 import EnhancedAdaptiveQGSA_v28 + + lama_register["EnhancedAdaptiveQGSA_v28"] = EnhancedAdaptiveQGSA_v28 + LLAMAEnhancedAdaptiveQGSA_v28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28").set_name( + "LLAMAEnhancedAdaptiveQGSA_v28", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v29 import EnhancedAdaptiveQGSA_v29 + + lama_register["EnhancedAdaptiveQGSA_v29"] = EnhancedAdaptiveQGSA_v29 + LLAMAEnhancedAdaptiveQGSA_v29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29").set_name( + "LLAMAEnhancedAdaptiveQGSA_v29", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v3 import EnhancedAdaptiveQGSA_v3 + + lama_register["EnhancedAdaptiveQGSA_v3"] = EnhancedAdaptiveQGSA_v3 + LLAMAEnhancedAdaptiveQGSA_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3").set_name( + "LLAMAEnhancedAdaptiveQGSA_v3", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v30 import EnhancedAdaptiveQGSA_v30 + + lama_register["EnhancedAdaptiveQGSA_v30"] = EnhancedAdaptiveQGSA_v30 + LLAMAEnhancedAdaptiveQGSA_v30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30").set_name( + "LLAMAEnhancedAdaptiveQGSA_v30", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v31 import EnhancedAdaptiveQGSA_v31 + + lama_register["EnhancedAdaptiveQGSA_v31"] = EnhancedAdaptiveQGSA_v31 + LLAMAEnhancedAdaptiveQGSA_v31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31").set_name( + "LLAMAEnhancedAdaptiveQGSA_v31", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v32 import EnhancedAdaptiveQGSA_v32 + + lama_register["EnhancedAdaptiveQGSA_v32"] = EnhancedAdaptiveQGSA_v32 + LLAMAEnhancedAdaptiveQGSA_v32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32").set_name( + "LLAMAEnhancedAdaptiveQGSA_v32", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v33 import EnhancedAdaptiveQGSA_v33 + + lama_register["EnhancedAdaptiveQGSA_v33"] = EnhancedAdaptiveQGSA_v33 + LLAMAEnhancedAdaptiveQGSA_v33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33").set_name( + "LLAMAEnhancedAdaptiveQGSA_v33", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v34 import EnhancedAdaptiveQGSA_v34 + + lama_register["EnhancedAdaptiveQGSA_v34"] = EnhancedAdaptiveQGSA_v34 + LLAMAEnhancedAdaptiveQGSA_v34 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34").set_name( + "LLAMAEnhancedAdaptiveQGSA_v34", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v35 import EnhancedAdaptiveQGSA_v35 + + lama_register["EnhancedAdaptiveQGSA_v35"] = EnhancedAdaptiveQGSA_v35 + LLAMAEnhancedAdaptiveQGSA_v35 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35").set_name( + "LLAMAEnhancedAdaptiveQGSA_v35", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v35 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v36 import EnhancedAdaptiveQGSA_v36 + + lama_register["EnhancedAdaptiveQGSA_v36"] = EnhancedAdaptiveQGSA_v36 + LLAMAEnhancedAdaptiveQGSA_v36 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36").set_name( + "LLAMAEnhancedAdaptiveQGSA_v36", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v36 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v38 import EnhancedAdaptiveQGSA_v38 + + lama_register["EnhancedAdaptiveQGSA_v38"] = EnhancedAdaptiveQGSA_v38 + LLAMAEnhancedAdaptiveQGSA_v38 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38").set_name( + "LLAMAEnhancedAdaptiveQGSA_v38", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v38 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v39 import EnhancedAdaptiveQGSA_v39 + + lama_register["EnhancedAdaptiveQGSA_v39"] = EnhancedAdaptiveQGSA_v39 + LLAMAEnhancedAdaptiveQGSA_v39 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39").set_name( + "LLAMAEnhancedAdaptiveQGSA_v39", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v4 import EnhancedAdaptiveQGSA_v4 + + lama_register["EnhancedAdaptiveQGSA_v4"] = EnhancedAdaptiveQGSA_v4 + LLAMAEnhancedAdaptiveQGSA_v4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4").set_name( + "LLAMAEnhancedAdaptiveQGSA_v4", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v40 import EnhancedAdaptiveQGSA_v40 + + lama_register["EnhancedAdaptiveQGSA_v40"] = EnhancedAdaptiveQGSA_v40 + LLAMAEnhancedAdaptiveQGSA_v40 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40").set_name( + "LLAMAEnhancedAdaptiveQGSA_v40", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v40 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v41 import EnhancedAdaptiveQGSA_v41 + + lama_register["EnhancedAdaptiveQGSA_v41"] = EnhancedAdaptiveQGSA_v41 + LLAMAEnhancedAdaptiveQGSA_v41 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41").set_name( + "LLAMAEnhancedAdaptiveQGSA_v41", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v42 import EnhancedAdaptiveQGSA_v42 + + lama_register["EnhancedAdaptiveQGSA_v42"] = EnhancedAdaptiveQGSA_v42 + LLAMAEnhancedAdaptiveQGSA_v42 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42").set_name( + "LLAMAEnhancedAdaptiveQGSA_v42", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v43 import EnhancedAdaptiveQGSA_v43 + + lama_register["EnhancedAdaptiveQGSA_v43"] = EnhancedAdaptiveQGSA_v43 + LLAMAEnhancedAdaptiveQGSA_v43 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43").set_name( + "LLAMAEnhancedAdaptiveQGSA_v43", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v44 import EnhancedAdaptiveQGSA_v44 + + lama_register["EnhancedAdaptiveQGSA_v44"] = EnhancedAdaptiveQGSA_v44 + LLAMAEnhancedAdaptiveQGSA_v44 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44").set_name( + "LLAMAEnhancedAdaptiveQGSA_v44", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v44 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v47 import EnhancedAdaptiveQGSA_v47 + + lama_register["EnhancedAdaptiveQGSA_v47"] = EnhancedAdaptiveQGSA_v47 + LLAMAEnhancedAdaptiveQGSA_v47 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47").set_name( + "LLAMAEnhancedAdaptiveQGSA_v47", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v47 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v5 import EnhancedAdaptiveQGSA_v5 + + lama_register["EnhancedAdaptiveQGSA_v5"] = EnhancedAdaptiveQGSA_v5 + LLAMAEnhancedAdaptiveQGSA_v5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5").set_name( + "LLAMAEnhancedAdaptiveQGSA_v5", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v6 import EnhancedAdaptiveQGSA_v6 + + lama_register["EnhancedAdaptiveQGSA_v6"] = EnhancedAdaptiveQGSA_v6 + LLAMAEnhancedAdaptiveQGSA_v6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6").set_name( + "LLAMAEnhancedAdaptiveQGSA_v6", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v8 import EnhancedAdaptiveQGSA_v8 + + lama_register["EnhancedAdaptiveQGSA_v8"] = EnhancedAdaptiveQGSA_v8 + LLAMAEnhancedAdaptiveQGSA_v8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8").set_name( + "LLAMAEnhancedAdaptiveQGSA_v8", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v9 import EnhancedAdaptiveQGSA_v9 + + lama_register["EnhancedAdaptiveQGSA_v9"] = EnhancedAdaptiveQGSA_v9 + LLAMAEnhancedAdaptiveQGSA_v9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9").set_name( + "LLAMAEnhancedAdaptiveQGSA_v9", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQGSA_v9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDEWithDynamicElitistLearning import ( + EnhancedAdaptiveQuantumDEWithDynamicElitistLearning, + ) + + lama_register["EnhancedAdaptiveQuantumDEWithDynamicElitistLearning"] = ( + EnhancedAdaptiveQuantumDEWithDynamicElitistLearning + ) + LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning" + ).set_name("LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumDEWithDynamicElitistLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolution import ( + EnhancedAdaptiveQuantumDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveQuantumDifferentialEvolution"] = ( + EnhancedAdaptiveQuantumDifferentialEvolution + ) + LLAMAEnhancedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch import ( + EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch, + ) + + lama_register["EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch"] = ( + EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch + ) + LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDynamicLevyOptimization import ( + EnhancedAdaptiveQuantumDynamicLevyOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumDynamicLevyOptimization"] = ( + EnhancedAdaptiveQuantumDynamicLevyOptimization + ) + LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumDynamicLevyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumGradientMemeticOptimizer import ( + EnhancedAdaptiveQuantumGradientMemeticOptimizer, + ) + + lama_register["EnhancedAdaptiveQuantumGradientMemeticOptimizer"] = ( + EnhancedAdaptiveQuantumGradientMemeticOptimizer + ) + LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer" + ).set_name("LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumGradientMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGB import ( + EnhancedAdaptiveQuantumHarmonySearchDBGB, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGB"] = EnhancedAdaptiveQuantumHarmonySearchDBGB + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchDBGB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinal import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinal, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinal"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinal + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBImproved import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBImproved, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBImproved"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBImproved + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchDBGBImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchFinal import ( + EnhancedAdaptiveQuantumHarmonySearchFinal, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchFinal"] = EnhancedAdaptiveQuantumHarmonySearchFinal + LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchFinal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImproved import ( + EnhancedAdaptiveQuantumHarmonySearchImproved, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchImproved"] = ( + EnhancedAdaptiveQuantumHarmonySearchImproved + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImprovedRefined import ( + EnhancedAdaptiveQuantumHarmonySearchImprovedRefined, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchImprovedRefined"] = ( + EnhancedAdaptiveQuantumHarmonySearchImprovedRefined + ) + LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumHarmonySearchImprovedRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevyMemeticOptimizer import ( + EnhancedAdaptiveQuantumLevyMemeticOptimizer, + ) + + lama_register["EnhancedAdaptiveQuantumLevyMemeticOptimizer"] = EnhancedAdaptiveQuantumLevyMemeticOptimizer + LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer" + ).set_name("LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevySwarmOptimization import ( + EnhancedAdaptiveQuantumLevySwarmOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumLevySwarmOptimization"] = ( + EnhancedAdaptiveQuantumLevySwarmOptimization + ) + LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLocalSearch import ( + EnhancedAdaptiveQuantumLocalSearch, + ) + + lama_register["EnhancedAdaptiveQuantumLocalSearch"] = EnhancedAdaptiveQuantumLocalSearch + LLAMAEnhancedAdaptiveQuantumLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveQuantumLocalSearch", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumMemeticOptimizerV4 import ( + EnhancedAdaptiveQuantumMemeticOptimizerV4, + ) + + lama_register["EnhancedAdaptiveQuantumMemeticOptimizerV4"] = EnhancedAdaptiveQuantumMemeticOptimizerV4 + LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4" + ).set_name("LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumMemeticOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSO import EnhancedAdaptiveQuantumPSO + + lama_register["EnhancedAdaptiveQuantumPSO"] = EnhancedAdaptiveQuantumPSO + LLAMAEnhancedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO").set_name( + "LLAMAEnhancedAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("EnhancedAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSOv2 import EnhancedAdaptiveQuantumPSOv2 + + lama_register["EnhancedAdaptiveQuantumPSOv2"] = EnhancedAdaptiveQuantumPSOv2 + LLAMAEnhancedAdaptiveQuantumPSOv2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumPSOv2" + ).set_name("LLAMAEnhancedAdaptiveQuantumPSOv2", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumPSOv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumParticleSwarmOptimization import ( + EnhancedAdaptiveQuantumParticleSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumParticleSwarmOptimization"] = ( + EnhancedAdaptiveQuantumParticleSwarmOptimization + ) + LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealing import ( + EnhancedAdaptiveQuantumSimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveQuantumSimulatedAnnealing"] = EnhancedAdaptiveQuantumSimulatedAnnealing + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealingOptimized import ( + EnhancedAdaptiveQuantumSimulatedAnnealingOptimized, + ) + + lama_register["EnhancedAdaptiveQuantumSimulatedAnnealingOptimized"] = ( + EnhancedAdaptiveQuantumSimulatedAnnealingOptimized + ) + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized" + ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSimulatedAnnealingOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimization import ( + EnhancedAdaptiveQuantumSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimization"] = EnhancedAdaptiveQuantumSwarmOptimization + LLAMAEnhancedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV10 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV10, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV10"] = EnhancedAdaptiveQuantumSwarmOptimizationV10 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV11 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV11, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV11"] = EnhancedAdaptiveQuantumSwarmOptimizationV11 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV12 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV12, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV12"] = EnhancedAdaptiveQuantumSwarmOptimizationV12 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV13 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV13, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV13"] = EnhancedAdaptiveQuantumSwarmOptimizationV13 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV14 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV14, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV14"] = EnhancedAdaptiveQuantumSwarmOptimizationV14 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV15 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV15, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV15"] = EnhancedAdaptiveQuantumSwarmOptimizationV15 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV16 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV16, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV16"] = EnhancedAdaptiveQuantumSwarmOptimizationV16 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV17 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV17, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV17"] = EnhancedAdaptiveQuantumSwarmOptimizationV17 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV18 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV18, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV18"] = EnhancedAdaptiveQuantumSwarmOptimizationV18 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV19 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV19, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV19"] = EnhancedAdaptiveQuantumSwarmOptimizationV19 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV2 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV2"] = EnhancedAdaptiveQuantumSwarmOptimizationV2 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV20 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV20, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV20"] = EnhancedAdaptiveQuantumSwarmOptimizationV20 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV21 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV21, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV21"] = EnhancedAdaptiveQuantumSwarmOptimizationV21 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV22 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV22, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV22"] = EnhancedAdaptiveQuantumSwarmOptimizationV22 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV23 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV23, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV23"] = EnhancedAdaptiveQuantumSwarmOptimizationV23 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV24 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV24, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV24"] = EnhancedAdaptiveQuantumSwarmOptimizationV24 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV25 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV25, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV25"] = EnhancedAdaptiveQuantumSwarmOptimizationV25 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV26 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV26, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV26"] = EnhancedAdaptiveQuantumSwarmOptimizationV26 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV27 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV27, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV27"] = EnhancedAdaptiveQuantumSwarmOptimizationV27 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV28 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV28, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV28"] = EnhancedAdaptiveQuantumSwarmOptimizationV28 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV29 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV29, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV29"] = EnhancedAdaptiveQuantumSwarmOptimizationV29 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV3 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV3"] = EnhancedAdaptiveQuantumSwarmOptimizationV3 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV30 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV30, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV30"] = EnhancedAdaptiveQuantumSwarmOptimizationV30 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV31 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV31, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV31"] = EnhancedAdaptiveQuantumSwarmOptimizationV31 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV4 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV4"] = EnhancedAdaptiveQuantumSwarmOptimizationV4 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV5 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV5"] = EnhancedAdaptiveQuantumSwarmOptimizationV5 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV6 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV6"] = EnhancedAdaptiveQuantumSwarmOptimizationV6 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV7 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV7"] = EnhancedAdaptiveQuantumSwarmOptimizationV7 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV8 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV8, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV8"] = EnhancedAdaptiveQuantumSwarmOptimizationV8 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV9 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV9, + ) + + lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV9"] = EnhancedAdaptiveQuantumSwarmOptimizationV9 + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9", register=True) +except Exception as e: + print("EnhancedAdaptiveQuantumSwarmOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSinusoidalDifferentialSwarm import ( + EnhancedAdaptiveSinusoidalDifferentialSwarm, + ) + + lama_register["EnhancedAdaptiveSinusoidalDifferentialSwarm"] = EnhancedAdaptiveSinusoidalDifferentialSwarm + LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: + print("EnhancedAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 + ) + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26", register=True) +except Exception as e: + print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveSwarmHarmonicOptimization import ( + EnhancedAdaptiveSwarmHarmonicOptimization, + ) + + lama_register["EnhancedAdaptiveSwarmHarmonicOptimization"] = EnhancedAdaptiveSwarmHarmonicOptimization + LLAMAEnhancedAdaptiveSwarmHarmonicOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization" + ).set_name("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization", register=True) +except Exception as e: + print("EnhancedAdaptiveSwarmHarmonicOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearch import ( + EnhancedAdaptiveTabuHarmonySearch, + ) + + lama_register["EnhancedAdaptiveTabuHarmonySearch"] = EnhancedAdaptiveTabuHarmonySearch + LLAMAEnhancedAdaptiveTabuHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveTabuHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearch", register=True) +except Exception as e: + print("EnhancedAdaptiveTabuHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearchV2 import ( + EnhancedAdaptiveTabuHarmonySearchV2, + ) + + lama_register["EnhancedAdaptiveTabuHarmonySearchV2"] = EnhancedAdaptiveTabuHarmonySearchV2 + LLAMAEnhancedAdaptiveTabuHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearchV2", register=True) +except Exception as e: + print("EnhancedAdaptiveTabuHarmonySearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedAdaptiveFireworkAlgorithm import ( + EnhancedAdvancedAdaptiveFireworkAlgorithm, + ) + + lama_register["EnhancedAdvancedAdaptiveFireworkAlgorithm"] = EnhancedAdvancedAdaptiveFireworkAlgorithm + LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedAdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 import ( + EnhancedAdvancedDifferentialEvolutionLocalSearch_v56, + ) + + lama_register["EnhancedAdvancedDifferentialEvolutionLocalSearch_v56"] = ( + EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 + ) + LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56" + ).set_name("LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56", register=True) +except Exception as e: + print("EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedHybridDifferentialEvolutionV4 import ( + EnhancedAdvancedHybridDifferentialEvolutionV4, + ) + + lama_register["EnhancedAdvancedHybridDifferentialEvolutionV4"] = ( + EnhancedAdvancedHybridDifferentialEvolutionV4 + ) + LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4", register=True) +except Exception as e: + print("EnhancedAdvancedHybridDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV17 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV17, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV17"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV17 + ) + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17", register=True) +except Exception as e: + print("EnhancedAdvancedHybridMetaHeuristicOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV18 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV18, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV18"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV18 + ) + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18", register=True) +except Exception as e: + print("EnhancedAdvancedHybridMetaHeuristicOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV19 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV19, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV19"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV19 + ) + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19", register=True) +except Exception as e: + print("EnhancedAdvancedHybridMetaHeuristicOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer import ( + EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer, + ) + + lama_register["EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer"] = ( + EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer + ) + LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer", register=True) +except Exception as e: + print("EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV1 import ( + EnhancedAdvancedQuantumSwarmOptimizationV1, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV1"] = EnhancedAdvancedQuantumSwarmOptimizationV1 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV10 import ( + EnhancedAdvancedQuantumSwarmOptimizationV10, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV10"] = EnhancedAdvancedQuantumSwarmOptimizationV10 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV11 import ( + EnhancedAdvancedQuantumSwarmOptimizationV11, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV11"] = EnhancedAdvancedQuantumSwarmOptimizationV11 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV12 import ( + EnhancedAdvancedQuantumSwarmOptimizationV12, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV12"] = EnhancedAdvancedQuantumSwarmOptimizationV12 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV13 import ( + EnhancedAdvancedQuantumSwarmOptimizationV13, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV13"] = EnhancedAdvancedQuantumSwarmOptimizationV13 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV14 import ( + EnhancedAdvancedQuantumSwarmOptimizationV14, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV14"] = EnhancedAdvancedQuantumSwarmOptimizationV14 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV2 import ( + EnhancedAdvancedQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV2"] = EnhancedAdvancedQuantumSwarmOptimizationV2 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV3 import ( + EnhancedAdvancedQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV3"] = EnhancedAdvancedQuantumSwarmOptimizationV3 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV4 import ( + EnhancedAdvancedQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV4"] = EnhancedAdvancedQuantumSwarmOptimizationV4 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV5 import ( + EnhancedAdvancedQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV5"] = EnhancedAdvancedQuantumSwarmOptimizationV5 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV6 import ( + EnhancedAdvancedQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV6"] = EnhancedAdvancedQuantumSwarmOptimizationV6 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV7 import ( + EnhancedAdvancedQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV7"] = EnhancedAdvancedQuantumSwarmOptimizationV7 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV8 import ( + EnhancedAdvancedQuantumSwarmOptimizationV8, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV8"] = EnhancedAdvancedQuantumSwarmOptimizationV8 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV9 import ( + EnhancedAdvancedQuantumSwarmOptimizationV9, + ) + + lama_register["EnhancedAdvancedQuantumSwarmOptimizationV9"] = EnhancedAdvancedQuantumSwarmOptimizationV9 + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9", register=True) +except Exception as e: + print("EnhancedAdvancedQuantumSwarmOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 import ( + EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78, + ) + + lama_register["EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78"] = ( + EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 + ) + LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78" + ).set_name("LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78", register=True) +except Exception as e: + print("EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedAdvancedUltimateGuidedMassQGSA_v79 import ( + EnhancedAdvancedUltimateGuidedMassQGSA_v79, + ) + + lama_register["EnhancedAdvancedUltimateGuidedMassQGSA_v79"] = EnhancedAdvancedUltimateGuidedMassQGSA_v79 + LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79" + ).set_name("LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79", register=True) +except Exception as e: + print("EnhancedAdvancedUltimateGuidedMassQGSA_v79 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedArchiveDE import EnhancedArchiveDE + + lama_register["EnhancedArchiveDE"] = EnhancedArchiveDE + LLAMAEnhancedArchiveDE = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE").set_name( + "LLAMAEnhancedArchiveDE", register=True + ) +except Exception as e: + print("EnhancedArchiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedBalancedDualStrategyAdaptiveDE import ( + EnhancedBalancedDualStrategyAdaptiveDE, + ) + + lama_register["EnhancedBalancedDualStrategyAdaptiveDE"] = EnhancedBalancedDualStrategyAdaptiveDE + LLAMAEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("EnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCMAES import EnhancedCMAES + + lama_register["EnhancedCMAES"] = EnhancedCMAES + LLAMAEnhancedCMAES = NonObjectOptimizer(method="LLAMAEnhancedCMAES").set_name( + "LLAMAEnhancedCMAES", register=True + ) +except Exception as e: + print("EnhancedCMAES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCMAESv2 import EnhancedCMAESv2 + + lama_register["EnhancedCMAESv2"] = EnhancedCMAESv2 + LLAMAEnhancedCMAESv2 = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2").set_name( + "LLAMAEnhancedCMAESv2", register=True + ) +except Exception as e: + print("EnhancedCMAESv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedChaoticFireworksOptimization import ( + EnhancedChaoticFireworksOptimization, + ) + + lama_register["EnhancedChaoticFireworksOptimization"] = EnhancedChaoticFireworksOptimization + LLAMAEnhancedChaoticFireworksOptimization = NonObjectOptimizer( + method="LLAMAEnhancedChaoticFireworksOptimization" + ).set_name("LLAMAEnhancedChaoticFireworksOptimization", register=True) +except Exception as e: + print("EnhancedChaoticFireworksOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedClusterDifferentialCrossover import ( + EnhancedClusterDifferentialCrossover, + ) + + lama_register["EnhancedClusterDifferentialCrossover"] = EnhancedClusterDifferentialCrossover + LLAMAEnhancedClusterDifferentialCrossover = NonObjectOptimizer( + method="LLAMAEnhancedClusterDifferentialCrossover" + ).set_name("LLAMAEnhancedClusterDifferentialCrossover", register=True) +except Exception as e: + print("EnhancedClusterDifferentialCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedClusteredDifferentialEvolution import ( + EnhancedClusteredDifferentialEvolution, + ) + + lama_register["EnhancedClusteredDifferentialEvolution"] = EnhancedClusteredDifferentialEvolution + LLAMAEnhancedClusteredDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedClusteredDifferentialEvolution" + ).set_name("LLAMAEnhancedClusteredDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedClusteredDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedConvergenceAcceleratedSpiralSearch import ( + EnhancedConvergenceAcceleratedSpiralSearch, + ) + + lama_register["EnhancedConvergenceAcceleratedSpiralSearch"] = EnhancedConvergenceAcceleratedSpiralSearch + LLAMAEnhancedConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( + method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch" + ).set_name("LLAMAEnhancedConvergenceAcceleratedSpiralSearch", register=True) +except Exception as e: + print("EnhancedConvergenceAcceleratedSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolution import ( + EnhancedConvergentDifferentialEvolution, + ) + + lama_register["EnhancedConvergentDifferentialEvolution"] = EnhancedConvergentDifferentialEvolution + LLAMAEnhancedConvergentDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolution" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedConvergentDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV2 import ( + EnhancedConvergentDifferentialEvolutionV2, + ) + + lama_register["EnhancedConvergentDifferentialEvolutionV2"] = EnhancedConvergentDifferentialEvolutionV2 + LLAMAEnhancedConvergentDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedConvergentDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV3 import ( + EnhancedConvergentDifferentialEvolutionV3, + ) + + lama_register["EnhancedConvergentDifferentialEvolutionV3"] = EnhancedConvergentDifferentialEvolutionV3 + LLAMAEnhancedConvergentDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV3", register=True) +except Exception as e: + print("EnhancedConvergentDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV4 import ( + EnhancedConvergentDifferentialEvolutionV4, + ) + + lama_register["EnhancedConvergentDifferentialEvolutionV4"] = EnhancedConvergentDifferentialEvolutionV4 + LLAMAEnhancedConvergentDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV4", register=True) +except Exception as e: + print("EnhancedConvergentDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCooperativeCulturalDifferentialSearch import ( + EnhancedCooperativeCulturalDifferentialSearch, + ) + + lama_register["EnhancedCooperativeCulturalDifferentialSearch"] = ( + EnhancedCooperativeCulturalDifferentialSearch + ) + LLAMAEnhancedCooperativeCulturalDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedCooperativeCulturalDifferentialSearch" + ).set_name("LLAMAEnhancedCooperativeCulturalDifferentialSearch", register=True) +except Exception as e: + print("EnhancedCooperativeCulturalDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarm import ( + EnhancedCosineAdaptiveDifferentialSwarm, + ) + + lama_register["EnhancedCosineAdaptiveDifferentialSwarm"] = EnhancedCosineAdaptiveDifferentialSwarm + LLAMAEnhancedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: + print("EnhancedCosineAdaptiveDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarmV2 import ( + EnhancedCosineAdaptiveDifferentialSwarmV2, + ) + + lama_register["EnhancedCosineAdaptiveDifferentialSwarmV2"] = EnhancedCosineAdaptiveDifferentialSwarmV2 + LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2 = NonObjectOptimizer( + method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2" + ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2", register=True) +except Exception as e: + print("EnhancedCosineAdaptiveDifferentialSwarmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCovarianceGradientSearchV2 import ( + EnhancedCovarianceGradientSearchV2, + ) + + lama_register["EnhancedCovarianceGradientSearchV2"] = EnhancedCovarianceGradientSearchV2 + LLAMAEnhancedCovarianceGradientSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceGradientSearchV2" + ).set_name("LLAMAEnhancedCovarianceGradientSearchV2", register=True) +except Exception as e: + print("EnhancedCovarianceGradientSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCovarianceMatrixAdaptation import ( + EnhancedCovarianceMatrixAdaptation, + ) + + lama_register["EnhancedCovarianceMatrixAdaptation"] = EnhancedCovarianceMatrixAdaptation + LLAMAEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixAdaptation" + ).set_name("LLAMAEnhancedCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("EnhancedCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolution import ( + EnhancedCovarianceMatrixEvolution, + ) + + lama_register["EnhancedCovarianceMatrixEvolution"] = EnhancedCovarianceMatrixEvolution + LLAMAEnhancedCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedCovarianceMatrixEvolution", register=True) +except Exception as e: + print("EnhancedCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolutionV2 import ( + EnhancedCovarianceMatrixEvolutionV2, + ) + + lama_register["EnhancedCovarianceMatrixEvolutionV2"] = EnhancedCovarianceMatrixEvolutionV2 + LLAMAEnhancedCovarianceMatrixEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixEvolutionV2" + ).set_name("LLAMAEnhancedCovarianceMatrixEvolutionV2", register=True) +except Exception as e: + print("EnhancedCovarianceMatrixEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCrossoverElitistStrategyV9 import ( + EnhancedCrossoverElitistStrategyV9, + ) + + lama_register["EnhancedCrossoverElitistStrategyV9"] = EnhancedCrossoverElitistStrategyV9 + LLAMAEnhancedCrossoverElitistStrategyV9 = NonObjectOptimizer( + method="LLAMAEnhancedCrossoverElitistStrategyV9" + ).set_name("LLAMAEnhancedCrossoverElitistStrategyV9", register=True) +except Exception as e: + print("EnhancedCrossoverElitistStrategyV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCrowdingMemoryHybridOptimizer import ( + EnhancedCrowdingMemoryHybridOptimizer, + ) + + lama_register["EnhancedCrowdingMemoryHybridOptimizer"] = EnhancedCrowdingMemoryHybridOptimizer + LLAMAEnhancedCrowdingMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedCrowdingMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedCrowdingMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedCrowdingMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCulturalAdaptiveDifferentialEvolution import ( + EnhancedCulturalAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedCulturalAdaptiveDifferentialEvolution"] = ( + EnhancedCulturalAdaptiveDifferentialEvolution + ) + LLAMAEnhancedCulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedCulturalAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedCulturalAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCulturalEvolutionaryOptimizer import ( + EnhancedCulturalEvolutionaryOptimizer, + ) + + lama_register["EnhancedCulturalEvolutionaryOptimizer"] = EnhancedCulturalEvolutionaryOptimizer + LLAMAEnhancedCulturalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedCulturalEvolutionaryOptimizer" + ).set_name("LLAMAEnhancedCulturalEvolutionaryOptimizer", register=True) +except Exception as e: + print("EnhancedCulturalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedCulturalMemeticDifferentialEvolution import ( + EnhancedCulturalMemeticDifferentialEvolution, + ) + + lama_register["EnhancedCulturalMemeticDifferentialEvolution"] = ( + EnhancedCulturalMemeticDifferentialEvolution + ) + LLAMAEnhancedCulturalMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCulturalMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedCulturalMemeticDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedCulturalMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolution import EnhancedDifferentialEvolution + + lama_register["EnhancedDifferentialEvolution"] = EnhancedDifferentialEvolution + LLAMAEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolution" + ).set_name("LLAMAEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptivePSO import ( + EnhancedDifferentialEvolutionAdaptivePSO, + ) + + lama_register["EnhancedDifferentialEvolutionAdaptivePSO"] = EnhancedDifferentialEvolutionAdaptivePSO + LLAMAEnhancedDifferentialEvolutionAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO" + ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptivePSO", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptiveStrategy import ( + EnhancedDifferentialEvolutionAdaptiveStrategy, + ) + + lama_register["EnhancedDifferentialEvolutionAdaptiveStrategy"] = ( + EnhancedDifferentialEvolutionAdaptiveStrategy + ) + LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy" + ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionFireworkAlgorithm import ( + EnhancedDifferentialEvolutionFireworkAlgorithm, + ) + + lama_register["EnhancedDifferentialEvolutionFireworkAlgorithm"] = ( + EnhancedDifferentialEvolutionFireworkAlgorithm + ) + LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm" + ).set_name("LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v15 import ( + EnhancedDifferentialEvolutionLSRefinement_v15, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v15"] = ( + EnhancedDifferentialEvolutionLSRefinement_v15 + ) + LLAMAEnhancedDifferentialEvolutionLSRefinement_v15 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v15", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLSRefinement_v15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v16 import ( + EnhancedDifferentialEvolutionLSRefinement_v16, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v16"] = ( + EnhancedDifferentialEvolutionLSRefinement_v16 + ) + LLAMAEnhancedDifferentialEvolutionLSRefinement_v16 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v16", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLSRefinement_v16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v17 import ( + EnhancedDifferentialEvolutionLSRefinement_v17, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v17"] = ( + EnhancedDifferentialEvolutionLSRefinement_v17 + ) + LLAMAEnhancedDifferentialEvolutionLSRefinement_v17 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v17", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLSRefinement_v17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v18 import ( + EnhancedDifferentialEvolutionLSRefinement_v18, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v18"] = ( + EnhancedDifferentialEvolutionLSRefinement_v18 + ) + LLAMAEnhancedDifferentialEvolutionLSRefinement_v18 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v18", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLSRefinement_v18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v19 import ( + EnhancedDifferentialEvolutionLSRefinement_v19, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v19"] = ( + EnhancedDifferentialEvolutionLSRefinement_v19 + ) + LLAMAEnhancedDifferentialEvolutionLSRefinement_v19 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v19", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLSRefinement_v19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v21 import ( + EnhancedDifferentialEvolutionLocalSearch_v21, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v21"] = ( + EnhancedDifferentialEvolutionLocalSearch_v21 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v21 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v21", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v22 import ( + EnhancedDifferentialEvolutionLocalSearch_v22, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v22"] = ( + EnhancedDifferentialEvolutionLocalSearch_v22 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v22 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v22", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v23 import ( + EnhancedDifferentialEvolutionLocalSearch_v23, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v23"] = ( + EnhancedDifferentialEvolutionLocalSearch_v23 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v23 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v23", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v24 import ( + EnhancedDifferentialEvolutionLocalSearch_v24, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v24"] = ( + EnhancedDifferentialEvolutionLocalSearch_v24 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v24 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v24", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v25 import ( + EnhancedDifferentialEvolutionLocalSearch_v25, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v25"] = ( + EnhancedDifferentialEvolutionLocalSearch_v25 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v25 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v25", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v26 import ( + EnhancedDifferentialEvolutionLocalSearch_v26, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v26"] = ( + EnhancedDifferentialEvolutionLocalSearch_v26 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v26 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v26", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v27 import ( + EnhancedDifferentialEvolutionLocalSearch_v27, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v27"] = ( + EnhancedDifferentialEvolutionLocalSearch_v27 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v27 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v27", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v28 import ( + EnhancedDifferentialEvolutionLocalSearch_v28, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v28"] = ( + EnhancedDifferentialEvolutionLocalSearch_v28 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v28 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v28", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v29 import ( + EnhancedDifferentialEvolutionLocalSearch_v29, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v29"] = ( + EnhancedDifferentialEvolutionLocalSearch_v29 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v29 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v29", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v30 import ( + EnhancedDifferentialEvolutionLocalSearch_v30, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v30"] = ( + EnhancedDifferentialEvolutionLocalSearch_v30 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v30 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v30", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v31 import ( + EnhancedDifferentialEvolutionLocalSearch_v31, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v31"] = ( + EnhancedDifferentialEvolutionLocalSearch_v31 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v31 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v31", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v32 import ( + EnhancedDifferentialEvolutionLocalSearch_v32, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v32"] = ( + EnhancedDifferentialEvolutionLocalSearch_v32 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v32 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v32", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v33 import ( + EnhancedDifferentialEvolutionLocalSearch_v33, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v33"] = ( + EnhancedDifferentialEvolutionLocalSearch_v33 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v33 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v33", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v34 import ( + EnhancedDifferentialEvolutionLocalSearch_v34, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v34"] = ( + EnhancedDifferentialEvolutionLocalSearch_v34 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v34 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v34", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v35 import ( + EnhancedDifferentialEvolutionLocalSearch_v35, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v35"] = ( + EnhancedDifferentialEvolutionLocalSearch_v35 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v35 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v35", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v35 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v36 import ( + EnhancedDifferentialEvolutionLocalSearch_v36, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v36"] = ( + EnhancedDifferentialEvolutionLocalSearch_v36 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v36 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v36", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v36 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v37 import ( + EnhancedDifferentialEvolutionLocalSearch_v37, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v37"] = ( + EnhancedDifferentialEvolutionLocalSearch_v37 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v37 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v37", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v37 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v38 import ( + EnhancedDifferentialEvolutionLocalSearch_v38, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v38"] = ( + EnhancedDifferentialEvolutionLocalSearch_v38 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v38 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v38", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v38 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v39 import ( + EnhancedDifferentialEvolutionLocalSearch_v39, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v39"] = ( + EnhancedDifferentialEvolutionLocalSearch_v39 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v39 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v39", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v40 import ( + EnhancedDifferentialEvolutionLocalSearch_v40, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v40"] = ( + EnhancedDifferentialEvolutionLocalSearch_v40 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v40 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v40", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v40 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v41 import ( + EnhancedDifferentialEvolutionLocalSearch_v41, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v41"] = ( + EnhancedDifferentialEvolutionLocalSearch_v41 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v41 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v41", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v43 import ( + EnhancedDifferentialEvolutionLocalSearch_v43, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v43"] = ( + EnhancedDifferentialEvolutionLocalSearch_v43 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v43 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v43", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v44 import ( + EnhancedDifferentialEvolutionLocalSearch_v44, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v44"] = ( + EnhancedDifferentialEvolutionLocalSearch_v44 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v44 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v44", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v44 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v45 import ( + EnhancedDifferentialEvolutionLocalSearch_v45, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v45"] = ( + EnhancedDifferentialEvolutionLocalSearch_v45 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v45 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v45", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v45 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v46 import ( + EnhancedDifferentialEvolutionLocalSearch_v46, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v46"] = ( + EnhancedDifferentialEvolutionLocalSearch_v46 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v46 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v46", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v46 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v47 import ( + EnhancedDifferentialEvolutionLocalSearch_v47, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v47"] = ( + EnhancedDifferentialEvolutionLocalSearch_v47 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v47 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v47", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v47 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v48 import ( + EnhancedDifferentialEvolutionLocalSearch_v48, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v48"] = ( + EnhancedDifferentialEvolutionLocalSearch_v48 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v48 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v48", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v48 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v49 import ( + EnhancedDifferentialEvolutionLocalSearch_v49, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v49"] = ( + EnhancedDifferentialEvolutionLocalSearch_v49 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v49 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v49", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v49 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v50 import ( + EnhancedDifferentialEvolutionLocalSearch_v50, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v50"] = ( + EnhancedDifferentialEvolutionLocalSearch_v50 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v50 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v50", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v50 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v51 import ( + EnhancedDifferentialEvolutionLocalSearch_v51, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v51"] = ( + EnhancedDifferentialEvolutionLocalSearch_v51 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v51 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v51", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v51 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v52 import ( + EnhancedDifferentialEvolutionLocalSearch_v52, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v52"] = ( + EnhancedDifferentialEvolutionLocalSearch_v52 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v52 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v52", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v53 import ( + EnhancedDifferentialEvolutionLocalSearch_v53, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v53"] = ( + EnhancedDifferentialEvolutionLocalSearch_v53 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v53 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v53", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v53 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v59 import ( + EnhancedDifferentialEvolutionLocalSearch_v59, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v59"] = ( + EnhancedDifferentialEvolutionLocalSearch_v59 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v59 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v59", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v59 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v60 import ( + EnhancedDifferentialEvolutionLocalSearch_v60, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v60"] = ( + EnhancedDifferentialEvolutionLocalSearch_v60 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v60 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v60", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v60 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v62 import ( + EnhancedDifferentialEvolutionLocalSearch_v62, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v62"] = ( + EnhancedDifferentialEvolutionLocalSearch_v62 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v62 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v62", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v62 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v63 import ( + EnhancedDifferentialEvolutionLocalSearch_v63, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v63"] = ( + EnhancedDifferentialEvolutionLocalSearch_v63 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v63 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v63", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v63 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v64 import ( + EnhancedDifferentialEvolutionLocalSearch_v64, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v64"] = ( + EnhancedDifferentialEvolutionLocalSearch_v64 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v64 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v64", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v64 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v66 import ( + EnhancedDifferentialEvolutionLocalSearch_v66, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v66"] = ( + EnhancedDifferentialEvolutionLocalSearch_v66 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v66 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v66", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v66 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v67 import ( + EnhancedDifferentialEvolutionLocalSearch_v67, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v67"] = ( + EnhancedDifferentialEvolutionLocalSearch_v67 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v67 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v67", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v67 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v68 import ( + EnhancedDifferentialEvolutionLocalSearch_v68, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v68"] = ( + EnhancedDifferentialEvolutionLocalSearch_v68 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v68 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v68", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v68 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v69 import ( + EnhancedDifferentialEvolutionLocalSearch_v69, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v69"] = ( + EnhancedDifferentialEvolutionLocalSearch_v69 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v69 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v69", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v69 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v70 import ( + EnhancedDifferentialEvolutionLocalSearch_v70, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v70"] = ( + EnhancedDifferentialEvolutionLocalSearch_v70 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v70 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v70", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v70 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v71 import ( + EnhancedDifferentialEvolutionLocalSearch_v71, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v71"] = ( + EnhancedDifferentialEvolutionLocalSearch_v71 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v71 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v71", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v71 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v72 import ( + EnhancedDifferentialEvolutionLocalSearch_v72, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v72"] = ( + EnhancedDifferentialEvolutionLocalSearch_v72 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v72 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v72", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v72 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v73 import ( + EnhancedDifferentialEvolutionLocalSearch_v73, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v73"] = ( + EnhancedDifferentialEvolutionLocalSearch_v73 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v73 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v73", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v73 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v74 import ( + EnhancedDifferentialEvolutionLocalSearch_v74, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v74"] = ( + EnhancedDifferentialEvolutionLocalSearch_v74 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v74 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v74", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v74 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v75 import ( + EnhancedDifferentialEvolutionLocalSearch_v75, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v75"] = ( + EnhancedDifferentialEvolutionLocalSearch_v75 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v75 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v75", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v75 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v76 import ( + EnhancedDifferentialEvolutionLocalSearch_v76, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v76"] = ( + EnhancedDifferentialEvolutionLocalSearch_v76 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v76 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v76", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v76 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v77 import ( + EnhancedDifferentialEvolutionLocalSearch_v77, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v77"] = ( + EnhancedDifferentialEvolutionLocalSearch_v77 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v77 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v77", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v77 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v78 import ( + EnhancedDifferentialEvolutionLocalSearch_v78, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v78"] = ( + EnhancedDifferentialEvolutionLocalSearch_v78 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v78 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v78", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v78 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v79 import ( + EnhancedDifferentialEvolutionLocalSearch_v79, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v79"] = ( + EnhancedDifferentialEvolutionLocalSearch_v79 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v79 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v79", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v79 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v80 import ( + EnhancedDifferentialEvolutionLocalSearch_v80, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v80"] = ( + EnhancedDifferentialEvolutionLocalSearch_v80 + ) + LLAMAEnhancedDifferentialEvolutionLocalSearch_v80 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v80", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionLocalSearch_v80 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionOptimizer import ( + EnhancedDifferentialEvolutionOptimizer, + ) + + lama_register["EnhancedDifferentialEvolutionOptimizer"] = EnhancedDifferentialEvolutionOptimizer + LLAMAEnhancedDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionOptimizer" + ).set_name("LLAMAEnhancedDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizer import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizer, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizer"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizer + ) + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV2, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV2"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 + ) + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV3, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV3"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 + ) + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV4, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV4"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 + ) + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionWithAdaptiveMutationControl import ( + EnhancedDifferentialEvolutionWithAdaptiveMutationControl, + ) + + lama_register["EnhancedDifferentialEvolutionWithAdaptiveMutationControl"] = ( + EnhancedDifferentialEvolutionWithAdaptiveMutationControl + ) + LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl" + ).set_name("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl", register=True) +except Exception as e: + print("EnhancedDifferentialEvolutionWithAdaptiveMutationControl can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm import ( + EnhancedDifferentialFireworkAlgorithm, + ) + + lama_register["EnhancedDifferentialFireworkAlgorithm"] = EnhancedDifferentialFireworkAlgorithm + LLAMAEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialFireworkAlgorithm" + ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDifferentialFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm_v2 import ( + EnhancedDifferentialFireworkAlgorithm_v2, + ) + + lama_register["EnhancedDifferentialFireworkAlgorithm_v2"] = EnhancedDifferentialFireworkAlgorithm_v2 + LLAMAEnhancedDifferentialFireworkAlgorithm_v2 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2" + ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm_v2", register=True) +except Exception as e: + print("EnhancedDifferentialFireworkAlgorithm_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentialSimulatedAnnealingOptimizer import ( + EnhancedDifferentialSimulatedAnnealingOptimizer, + ) + + lama_register["EnhancedDifferentialSimulatedAnnealingOptimizer"] = ( + EnhancedDifferentialSimulatedAnnealingOptimizer + ) + LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer" + ).set_name("LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer", register=True) +except Exception as e: + print("EnhancedDifferentialSimulatedAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDifferentiatedAdaptiveEvolution import ( + EnhancedDifferentiatedAdaptiveEvolution, + ) + + lama_register["EnhancedDifferentiatedAdaptiveEvolution"] = EnhancedDifferentiatedAdaptiveEvolution + LLAMAEnhancedDifferentiatedAdaptiveEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDifferentiatedAdaptiveEvolution" + ).set_name("LLAMAEnhancedDifferentiatedAdaptiveEvolution", register=True) +except Exception as e: + print("EnhancedDifferentiatedAdaptiveEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDimensionalFeedbackEvolverV3 import ( + EnhancedDimensionalFeedbackEvolverV3, + ) + + lama_register["EnhancedDimensionalFeedbackEvolverV3"] = EnhancedDimensionalFeedbackEvolverV3 + LLAMAEnhancedDimensionalFeedbackEvolverV3 = NonObjectOptimizer( + method="LLAMAEnhancedDimensionalFeedbackEvolverV3" + ).set_name("LLAMAEnhancedDimensionalFeedbackEvolverV3", register=True) +except Exception as e: + print("EnhancedDimensionalFeedbackEvolverV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiverseMemoryHybridOptimizer import ( + EnhancedDiverseMemoryHybridOptimizer, + ) + + lama_register["EnhancedDiverseMemoryHybridOptimizer"] = EnhancedDiverseMemoryHybridOptimizer + LLAMAEnhancedDiverseMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiverseMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedDiverseMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedDiverseMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedAdaptiveHarmonySearch import ( + EnhancedDiversifiedAdaptiveHarmonySearch, + ) + + lama_register["EnhancedDiversifiedAdaptiveHarmonySearch"] = EnhancedDiversifiedAdaptiveHarmonySearch + LLAMAEnhancedDiversifiedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch" + ).set_name("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch", register=True) +except Exception as e: + print("EnhancedDiversifiedAdaptiveHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithm import ( + EnhancedDiversifiedCuckooFireworksAlgorithm, + ) + + lama_register["EnhancedDiversifiedCuckooFireworksAlgorithm"] = EnhancedDiversifiedCuckooFireworksAlgorithm + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm", register=True) +except Exception as e: + print("EnhancedDiversifiedCuckooFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithmV2 import ( + EnhancedDiversifiedCuckooFireworksAlgorithmV2, + ) + + lama_register["EnhancedDiversifiedCuckooFireworksAlgorithmV2"] = ( + EnhancedDiversifiedCuckooFireworksAlgorithmV2 + ) + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2", register=True) +except Exception as e: + print("EnhancedDiversifiedCuckooFireworksAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimization import ( + EnhancedDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimization"] = ( + EnhancedDiversifiedGravitationalSwarmOptimization + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV2 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV2"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV2 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV3 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV3"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV3 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV4 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV4"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV4 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV5 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV5, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV5"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV5 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV6 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV6, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV6"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV6 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV7 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV7, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV7"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV7 + ) + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedDiversifiedGravitationalSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer, + ) + + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer"] = EnhancedDiversifiedHarmonicHarmonyOptimizer + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonicHarmonyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V2, + ) + + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V2"] = ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 + ) + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V3, + ) + + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V3"] = ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 + ) + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyAlgorithm import ( + EnhancedDiversifiedHarmonyAlgorithm, + ) + + lama_register["EnhancedDiversifiedHarmonyAlgorithm"] = EnhancedDiversifiedHarmonyAlgorithm + LLAMAEnhancedDiversifiedHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedHarmonyAlgorithm", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonyAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithm import ( + EnhancedDiversifiedHarmonyFireworksAlgorithm, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithm"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithm + ) + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonyFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV2 import ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV2, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV2"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV2 + ) + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonyFireworksAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV3 import ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV3, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV3"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV3 + ) + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonyFireworksAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonySearchOptimizer import ( + EnhancedDiversifiedHarmonySearchOptimizer, + ) + + lama_register["EnhancedDiversifiedHarmonySearchOptimizer"] = EnhancedDiversifiedHarmonySearchOptimizer + LLAMAEnhancedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedDiversifiedHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV3 import ( + EnhancedDiversifiedMetaHeuristicAlgorithmV3, + ) + + lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV3"] = EnhancedDiversifiedMetaHeuristicAlgorithmV3 + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3" + ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3", register=True) +except Exception as e: + print("EnhancedDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV4 import ( + EnhancedDiversifiedMetaHeuristicAlgorithmV4, + ) + + lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV4"] = EnhancedDiversifiedMetaHeuristicAlgorithmV4 + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4" + ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4", register=True) +except Exception as e: + print("EnhancedDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization import ( + EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization, + ) + + lama_register["EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization"] = ( + EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization + ) + LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization", register=True) +except Exception as e: + print("EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( + EnhancedDualPhaseAdaptiveHybridOptimizationV3, + ) + + lama_register["EnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( + EnhancedDualPhaseAdaptiveHybridOptimizationV3 + ) + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) +except Exception as e: + print("EnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizerV3 import ( + EnhancedDualPhaseAdaptiveHybridOptimizerV3, + ) + + lama_register["EnhancedDualPhaseAdaptiveHybridOptimizerV3"] = EnhancedDualPhaseAdaptiveHybridOptimizerV3 + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3", register=True) +except Exception as e: + print("EnhancedDualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution import ( + EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution"] = ( + EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution + ) + LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseDifferentialEvolution import ( + EnhancedDualPhaseDifferentialEvolution, + ) + + lama_register["EnhancedDualPhaseDifferentialEvolution"] = EnhancedDualPhaseDifferentialEvolution + LLAMAEnhancedDualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseDifferentialEvolution" + ).set_name("LLAMAEnhancedDualPhaseDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDualPhaseDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimization import ( + EnhancedDualPhaseHybridOptimization, + ) + + lama_register["EnhancedDualPhaseHybridOptimization"] = EnhancedDualPhaseHybridOptimization + LLAMAEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseHybridOptimization" + ).set_name("LLAMAEnhancedDualPhaseHybridOptimization", register=True) +except Exception as e: + print("EnhancedDualPhaseHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimizationV2 import ( + EnhancedDualPhaseHybridOptimizationV2, + ) + + lama_register["EnhancedDualPhaseHybridOptimizationV2"] = EnhancedDualPhaseHybridOptimizationV2 + LLAMAEnhancedDualPhaseHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseHybridOptimizationV2" + ).set_name("LLAMAEnhancedDualPhaseHybridOptimizationV2", register=True) +except Exception as e: + print("EnhancedDualPhaseHybridOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualStrategyAdaptiveDE_v2 import ( + EnhancedDualStrategyAdaptiveDE_v2, + ) + + lama_register["EnhancedDualStrategyAdaptiveDE_v2"] = EnhancedDualStrategyAdaptiveDE_v2 + LLAMAEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedDualStrategyAdaptiveDE_v2" + ).set_name("LLAMAEnhancedDualStrategyAdaptiveDE_v2", register=True) +except Exception as e: + print("EnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDualStrategyHybridOptimizer import ( + EnhancedDualStrategyHybridOptimizer, + ) + + lama_register["EnhancedDualStrategyHybridOptimizer"] = EnhancedDualStrategyHybridOptimizer + LLAMAEnhancedDualStrategyHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDualStrategyHybridOptimizer" + ).set_name("LLAMAEnhancedDualStrategyHybridOptimizer", register=True) +except Exception as e: + print("EnhancedDualStrategyHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveClimbingStrategy import ( + EnhancedDynamicAdaptiveClimbingStrategy, + ) + + lama_register["EnhancedDynamicAdaptiveClimbingStrategy"] = EnhancedDynamicAdaptiveClimbingStrategy + LLAMAEnhancedDynamicAdaptiveClimbingStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy" + ).set_name("LLAMAEnhancedDynamicAdaptiveClimbingStrategy", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveClimbingStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDE import EnhancedDynamicAdaptiveDE + + lama_register["EnhancedDynamicAdaptiveDE"] = EnhancedDynamicAdaptiveDE + LLAMAEnhancedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE").set_name( + "LLAMAEnhancedDynamicAdaptiveDE", register=True + ) +except Exception as e: + print("EnhancedDynamicAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolution import ( + EnhancedDynamicAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolution"] = ( + EnhancedDynamicAdaptiveDifferentialEvolution + ) + LLAMAEnhancedDynamicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation import ( + EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation + ) + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionRefined import ( + EnhancedDynamicAdaptiveDifferentialEvolutionRefined, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionRefined"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionRefined + ) + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveDifferentialEvolutionRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionV2 import ( + EnhancedDynamicAdaptiveDifferentialEvolutionV2, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionV2"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionV2 + ) + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveExplorationOptimization import ( + EnhancedDynamicAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedDynamicAdaptiveExplorationOptimization"] = ( + EnhancedDynamicAdaptiveExplorationOptimization + ) + LLAMAEnhancedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveFireworkAlgorithm import ( + EnhancedDynamicAdaptiveFireworkAlgorithm, + ) + + lama_register["EnhancedDynamicAdaptiveFireworkAlgorithm"] = EnhancedDynamicAdaptiveFireworkAlgorithm + LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligence + ) + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2, + ) + + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 + ) + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizer import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizer, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizer"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizer + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV2, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV2"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV3, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV3"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV4, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV4"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV5, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV5"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV6, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV6"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 + ) + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridDEPSO import ( + EnhancedDynamicAdaptiveHybridDEPSO, + ) + + lama_register["EnhancedDynamicAdaptiveHybridDEPSO"] = EnhancedDynamicAdaptiveHybridDEPSO + LLAMAEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimization import ( + EnhancedDynamicAdaptiveHybridOptimization, + ) + + lama_register["EnhancedDynamicAdaptiveHybridOptimization"] = EnhancedDynamicAdaptiveHybridOptimization + LLAMAEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimizer import ( + EnhancedDynamicAdaptiveHybridOptimizer, + ) + + lama_register["EnhancedDynamicAdaptiveHybridOptimizer"] = EnhancedDynamicAdaptiveHybridOptimizer + LLAMAEnhancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryAnnealing import ( + EnhancedDynamicAdaptiveMemoryAnnealing, + ) + + lama_register["EnhancedDynamicAdaptiveMemoryAnnealing"] = EnhancedDynamicAdaptiveMemoryAnnealing + LLAMAEnhancedDynamicAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing" + ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryStrategyV59 import ( + EnhancedDynamicAdaptiveMemoryStrategyV59, + ) + + lama_register["EnhancedDynamicAdaptiveMemoryStrategyV59"] = EnhancedDynamicAdaptiveMemoryStrategyV59 + LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59" + ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveMemoryStrategyV59 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveOptimizerV8 import ( + EnhancedDynamicAdaptiveOptimizerV8, + ) + + lama_register["EnhancedDynamicAdaptiveOptimizerV8"] = EnhancedDynamicAdaptiveOptimizerV8 + LLAMAEnhancedDynamicAdaptiveOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveOptimizerV8" + ).set_name("LLAMAEnhancedDynamicAdaptiveOptimizerV8", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptivePopulationDifferentialEvolution import ( + EnhancedDynamicAdaptivePopulationDifferentialEvolution, + ) + + lama_register["EnhancedDynamicAdaptivePopulationDifferentialEvolution"] = ( + EnhancedDynamicAdaptivePopulationDifferentialEvolution + ) + LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveQuantumPSO import ( + EnhancedDynamicAdaptiveQuantumPSO, + ) + + lama_register["EnhancedDynamicAdaptiveQuantumPSO"] = EnhancedDynamicAdaptiveQuantumPSO + LLAMAEnhancedDynamicAdaptiveQuantumPSO = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveQuantumPSO" + ).set_name("LLAMAEnhancedDynamicAdaptiveQuantumPSO", register=True) +except Exception as e: + print("EnhancedDynamicAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicBalancingPSO import EnhancedDynamicBalancingPSO + + lama_register["EnhancedDynamicBalancingPSO"] = EnhancedDynamicBalancingPSO + LLAMAEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO").set_name( + "LLAMAEnhancedDynamicBalancingPSO", register=True + ) +except Exception as e: + print("EnhancedDynamicBalancingPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicClusterOptimization import ( + EnhancedDynamicClusterOptimization, + ) + + lama_register["EnhancedDynamicClusterOptimization"] = EnhancedDynamicClusterOptimization + LLAMAEnhancedDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicClusterOptimization" + ).set_name("LLAMAEnhancedDynamicClusterOptimization", register=True) +except Exception as e: + print("EnhancedDynamicClusterOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicClusterSearch import EnhancedDynamicClusterSearch + + lama_register["EnhancedDynamicClusterSearch"] = EnhancedDynamicClusterSearch + LLAMAEnhancedDynamicClusterSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicClusterSearch" + ).set_name("LLAMAEnhancedDynamicClusterSearch", register=True) +except Exception as e: + print("EnhancedDynamicClusterSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicCohortOptimization import ( + EnhancedDynamicCohortOptimization, + ) + + lama_register["EnhancedDynamicCohortOptimization"] = EnhancedDynamicCohortOptimization + LLAMAEnhancedDynamicCohortOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCohortOptimization" + ).set_name("LLAMAEnhancedDynamicCohortOptimization", register=True) +except Exception as e: + print("EnhancedDynamicCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicCrossoverRAMEDS import EnhancedDynamicCrossoverRAMEDS + + lama_register["EnhancedDynamicCrossoverRAMEDS"] = EnhancedDynamicCrossoverRAMEDS + LLAMAEnhancedDynamicCrossoverRAMEDS = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCrossoverRAMEDS" + ).set_name("LLAMAEnhancedDynamicCrossoverRAMEDS", register=True) +except Exception as e: + print("EnhancedDynamicCrossoverRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicCuckooHarmonyAlgorithm import ( + EnhancedDynamicCuckooHarmonyAlgorithm, + ) + + lama_register["EnhancedDynamicCuckooHarmonyAlgorithm"] = EnhancedDynamicCuckooHarmonyAlgorithm + LLAMAEnhancedDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDynamicCuckooHarmonyAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicCuckooHarmonyAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolution import ( + EnhancedDynamicDifferentialEvolution, + ) + + lama_register["EnhancedDynamicDifferentialEvolution"] = EnhancedDynamicDifferentialEvolution + LLAMAEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionImproved import ( + EnhancedDynamicDifferentialEvolutionImproved, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionImproved"] = ( + EnhancedDynamicDifferentialEvolutionImproved + ) + LLAMAEnhancedDynamicDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionImproved" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionImproved", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionRefined import ( + EnhancedDynamicDifferentialEvolutionRefined, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionRefined"] = EnhancedDynamicDifferentialEvolutionRefined + LLAMAEnhancedDynamicDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionRefined", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV2 import ( + EnhancedDynamicDifferentialEvolutionV2, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionV2"] = EnhancedDynamicDifferentialEvolutionV2 + LLAMAEnhancedDynamicDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV3 import ( + EnhancedDynamicDifferentialEvolutionV3, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionV3"] = EnhancedDynamicDifferentialEvolutionV3 + LLAMAEnhancedDynamicDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV3", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover + ) + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation + ) + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined + ) + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined", register=True) +except Exception as e: + print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover import ( + EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover"] = ( + EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover + ) + LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover" + ).set_name( + "LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover", register=True + ) +except Exception as e: + print( + "EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDiversifiedHarmonySearchOptimizer import ( + EnhancedDynamicDiversifiedHarmonySearchOptimizer, + ) + + lama_register["EnhancedDynamicDiversifiedHarmonySearchOptimizer"] = ( + EnhancedDynamicDiversifiedHarmonySearchOptimizer + ) + LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicDiversifiedHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicDualPhaseStrategyV12 import ( + EnhancedDynamicDualPhaseStrategyV12, + ) + + lama_register["EnhancedDynamicDualPhaseStrategyV12"] = EnhancedDynamicDualPhaseStrategyV12 + LLAMAEnhancedDynamicDualPhaseStrategyV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDualPhaseStrategyV12" + ).set_name("LLAMAEnhancedDynamicDualPhaseStrategyV12", register=True) +except Exception as e: + print("EnhancedDynamicDualPhaseStrategyV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicEliteAnnealingDE import EnhancedDynamicEliteAnnealingDE + + lama_register["EnhancedDynamicEliteAnnealingDE"] = EnhancedDynamicEliteAnnealingDE + LLAMAEnhancedDynamicEliteAnnealingDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEliteAnnealingDE" + ).set_name("LLAMAEnhancedDynamicEliteAnnealingDE", register=True) +except Exception as e: + print("EnhancedDynamicEliteAnnealingDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicEscapeStrategyV32 import EnhancedDynamicEscapeStrategyV32 + + lama_register["EnhancedDynamicEscapeStrategyV32"] = EnhancedDynamicEscapeStrategyV32 + LLAMAEnhancedDynamicEscapeStrategyV32 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEscapeStrategyV32" + ).set_name("LLAMAEnhancedDynamicEscapeStrategyV32", register=True) +except Exception as e: + print("EnhancedDynamicEscapeStrategyV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicEvolutionStrategy import EnhancedDynamicEvolutionStrategy + + lama_register["EnhancedDynamicEvolutionStrategy"] = EnhancedDynamicEvolutionStrategy + LLAMAEnhancedDynamicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEvolutionStrategy" + ).set_name("LLAMAEnhancedDynamicEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedDynamicEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicExplorationOptimizer import ( + EnhancedDynamicExplorationOptimizer, + ) + + lama_register["EnhancedDynamicExplorationOptimizer"] = EnhancedDynamicExplorationOptimizer + LLAMAEnhancedDynamicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicExplorationOptimizer" + ).set_name("LLAMAEnhancedDynamicExplorationOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithm import EnhancedDynamicFireworkAlgorithm + + lama_register["EnhancedDynamicFireworkAlgorithm"] = EnhancedDynamicFireworkAlgorithm + LLAMAEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmFinal import ( + EnhancedDynamicFireworkAlgorithmFinal, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmFinal"] = EnhancedDynamicFireworkAlgorithmFinal + LLAMAEnhancedDynamicFireworkAlgorithmFinal = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmFinal" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmFinal", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmFinal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmImproved import ( + EnhancedDynamicFireworkAlgorithmImproved, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmImproved"] = EnhancedDynamicFireworkAlgorithmImproved + LLAMAEnhancedDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRedesigned import ( + EnhancedDynamicFireworkAlgorithmRedesigned, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmRedesigned"] = EnhancedDynamicFireworkAlgorithmRedesigned + LLAMAEnhancedDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmRedesigned can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRefined import ( + EnhancedDynamicFireworkAlgorithmRefined, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmRefined"] = EnhancedDynamicFireworkAlgorithmRefined + LLAMAEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmV2 import ( + EnhancedDynamicFireworkAlgorithmV2, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmV2"] = EnhancedDynamicFireworkAlgorithmV2 + LLAMAEnhancedDynamicFireworkAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmV2", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization import ( + EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization", register=True + ) +except Exception as e: + print( + "EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithHybridSearch import ( + EnhancedDynamicFireworkAlgorithmWithHybridSearch, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( + EnhancedDynamicFireworkAlgorithmWithHybridSearch + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization + ) + LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolution import ( + EnhancedDynamicFireworkDifferentialEvolution, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolution"] = ( + EnhancedDynamicFireworkDifferentialEvolution + ) + LLAMAEnhancedDynamicFireworkDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicFireworkDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV2 import ( + EnhancedDynamicFireworkDifferentialEvolutionV2, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV2"] = ( + EnhancedDynamicFireworkDifferentialEvolutionV2 + ) + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedDynamicFireworkDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV3 import ( + EnhancedDynamicFireworkDifferentialEvolutionV3, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV3"] = ( + EnhancedDynamicFireworkDifferentialEvolutionV3 + ) + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3", register=True) +except Exception as e: + print("EnhancedDynamicFireworkDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealing import ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: + print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithm import EnhancedDynamicHarmonyAlgorithm + + lama_register["EnhancedDynamicHarmonyAlgorithm"] = EnhancedDynamicHarmonyAlgorithm + LLAMAEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicHarmonyAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithmV2 import ( + EnhancedDynamicHarmonyAlgorithmV2, + ) + + lama_register["EnhancedDynamicHarmonyAlgorithmV2"] = EnhancedDynamicHarmonyAlgorithmV2 + LLAMAEnhancedDynamicHarmonyAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithmV2", register=True) +except Exception as e: + print("EnhancedDynamicHarmonyAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonyFireworksSearch import ( + EnhancedDynamicHarmonyFireworksSearch, + ) + + lama_register["EnhancedDynamicHarmonyFireworksSearch"] = EnhancedDynamicHarmonyFireworksSearch + LLAMAEnhancedDynamicHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyFireworksSearch" + ).set_name("LLAMAEnhancedDynamicHarmonyFireworksSearch", register=True) +except Exception as e: + print("EnhancedDynamicHarmonyFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizer import ( + EnhancedDynamicHarmonySearchOptimizer, + ) + + lama_register["EnhancedDynamicHarmonySearchOptimizer"] = EnhancedDynamicHarmonySearchOptimizer + LLAMAEnhancedDynamicHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizerV7 import ( + EnhancedDynamicHarmonySearchOptimizerV7, + ) + + lama_register["EnhancedDynamicHarmonySearchOptimizerV7"] = EnhancedDynamicHarmonySearchOptimizerV7 + LLAMAEnhancedDynamicHarmonySearchOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7" + ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizerV7", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV5 import EnhancedDynamicHarmonySearchV5 + + lama_register["EnhancedDynamicHarmonySearchV5"] = EnhancedDynamicHarmonySearchV5 + LLAMAEnhancedDynamicHarmonySearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV5" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV5", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV6 import EnhancedDynamicHarmonySearchV6 + + lama_register["EnhancedDynamicHarmonySearchV6"] = EnhancedDynamicHarmonySearchV6 + LLAMAEnhancedDynamicHarmonySearchV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV6" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV6", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV7 import EnhancedDynamicHarmonySearchV7 + + lama_register["EnhancedDynamicHarmonySearchV7"] = EnhancedDynamicHarmonySearchV7 + LLAMAEnhancedDynamicHarmonySearchV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV7" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV7", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV8 import EnhancedDynamicHarmonySearchV8 + + lama_register["EnhancedDynamicHarmonySearchV8"] = EnhancedDynamicHarmonySearchV8 + LLAMAEnhancedDynamicHarmonySearchV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV8" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV8", register=True) +except Exception as e: + print("EnhancedDynamicHarmonySearchV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHarmonyTabuSearch import EnhancedDynamicHarmonyTabuSearch + + lama_register["EnhancedDynamicHarmonyTabuSearch"] = EnhancedDynamicHarmonyTabuSearch + LLAMAEnhancedDynamicHarmonyTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyTabuSearch" + ).set_name("LLAMAEnhancedDynamicHarmonyTabuSearch", register=True) +except Exception as e: + print("EnhancedDynamicHarmonyTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHybridDEPSOWithEliteMemory import ( + EnhancedDynamicHybridDEPSOWithEliteMemory, + ) + + lama_register["EnhancedDynamicHybridDEPSOWithEliteMemory"] = EnhancedDynamicHybridDEPSOWithEliteMemory + LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory" + ).set_name("LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("EnhancedDynamicHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 import ( + EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21, + ) + + lama_register["EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21"] = ( + EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 + ) + LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21" + ).set_name("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21", register=True) +except Exception as e: + print("EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHybridOptimization import ( + EnhancedDynamicHybridOptimization, + ) + + lama_register["EnhancedDynamicHybridOptimization"] = EnhancedDynamicHybridOptimization + LLAMAEnhancedDynamicHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridOptimization" + ).set_name("LLAMAEnhancedDynamicHybridOptimization", register=True) +except Exception as e: + print("EnhancedDynamicHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicHybridOptimizer import EnhancedDynamicHybridOptimizer + + lama_register["EnhancedDynamicHybridOptimizer"] = EnhancedDynamicHybridOptimizer + LLAMAEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearch import EnhancedDynamicLevyHarmonySearch + + lama_register["EnhancedDynamicLevyHarmonySearch"] = EnhancedDynamicLevyHarmonySearch + LLAMAEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearch" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearch", register=True) +except Exception as e: + print("EnhancedDynamicLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV2 import ( + EnhancedDynamicLevyHarmonySearchV2, + ) + + lama_register["EnhancedDynamicLevyHarmonySearchV2"] = EnhancedDynamicLevyHarmonySearchV2 + LLAMAEnhancedDynamicLevyHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearchV2" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV2", register=True) +except Exception as e: + print("EnhancedDynamicLevyHarmonySearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV3 import ( + EnhancedDynamicLevyHarmonySearchV3, + ) + + lama_register["EnhancedDynamicLevyHarmonySearchV3"] = EnhancedDynamicLevyHarmonySearchV3 + LLAMAEnhancedDynamicLevyHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearchV3" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV3", register=True) +except Exception as e: + print("EnhancedDynamicLevyHarmonySearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithm import ( + EnhancedDynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithm"] = EnhancedDynamicLocalSearchFireworkAlgorithm + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV2 import ( + EnhancedDynamicLocalSearchFireworkAlgorithmV2, + ) + + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV2"] = ( + EnhancedDynamicLocalSearchFireworkAlgorithmV2 + ) + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2", register=True) +except Exception as e: + print("EnhancedDynamicLocalSearchFireworkAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV3 import ( + EnhancedDynamicLocalSearchFireworkAlgorithmV3, + ) + + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV3"] = ( + EnhancedDynamicLocalSearchFireworkAlgorithmV3 + ) + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3", register=True) +except Exception as e: + print("EnhancedDynamicLocalSearchFireworkAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicMemoryStrategyV51 import EnhancedDynamicMemoryStrategyV51 + + lama_register["EnhancedDynamicMemoryStrategyV51"] = EnhancedDynamicMemoryStrategyV51 + LLAMAEnhancedDynamicMemoryStrategyV51 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMemoryStrategyV51" + ).set_name("LLAMAEnhancedDynamicMemoryStrategyV51", register=True) +except Exception as e: + print("EnhancedDynamicMemoryStrategyV51 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicMultiPhaseAnnealingPlus import ( + EnhancedDynamicMultiPhaseAnnealingPlus, + ) + + lama_register["EnhancedDynamicMultiPhaseAnnealingPlus"] = EnhancedDynamicMultiPhaseAnnealingPlus + LLAMAEnhancedDynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus" + ).set_name("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus", register=True) +except Exception as e: + print("EnhancedDynamicMultiPhaseAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicMutationSearch import EnhancedDynamicMutationSearch + + lama_register["EnhancedDynamicMutationSearch"] = EnhancedDynamicMutationSearch + LLAMAEnhancedDynamicMutationSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMutationSearch" + ).set_name("LLAMAEnhancedDynamicMutationSearch", register=True) +except Exception as e: + print("EnhancedDynamicMutationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicNichePSO_DE_LS import EnhancedDynamicNichePSO_DE_LS + + lama_register["EnhancedDynamicNichePSO_DE_LS"] = EnhancedDynamicNichePSO_DE_LS + LLAMAEnhancedDynamicNichePSO_DE_LS = NonObjectOptimizer( + method="LLAMAEnhancedDynamicNichePSO_DE_LS" + ).set_name("LLAMAEnhancedDynamicNichePSO_DE_LS", register=True) +except Exception as e: + print("EnhancedDynamicNichePSO_DE_LS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicNichingDEPSO import EnhancedDynamicNichingDEPSO + + lama_register["EnhancedDynamicNichingDEPSO"] = EnhancedDynamicNichingDEPSO + LLAMAEnhancedDynamicNichingDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO").set_name( + "LLAMAEnhancedDynamicNichingDEPSO", register=True + ) +except Exception as e: + print("EnhancedDynamicNichingDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicPrecisionBalancedEvolution import ( + EnhancedDynamicPrecisionBalancedEvolution, + ) + + lama_register["EnhancedDynamicPrecisionBalancedEvolution"] = EnhancedDynamicPrecisionBalancedEvolution + LLAMAEnhancedDynamicPrecisionBalancedEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicPrecisionBalancedEvolution" + ).set_name("LLAMAEnhancedDynamicPrecisionBalancedEvolution", register=True) +except Exception as e: + print("EnhancedDynamicPrecisionBalancedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicPrecisionOptimizer import ( + EnhancedDynamicPrecisionOptimizer, + ) + + lama_register["EnhancedDynamicPrecisionOptimizer"] = EnhancedDynamicPrecisionOptimizer + LLAMAEnhancedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicPrecisionOptimizer" + ).set_name("LLAMAEnhancedDynamicPrecisionOptimizer", register=True) +except Exception as e: + print("EnhancedDynamicPrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolution import ( + EnhancedDynamicQuantumDifferentialEvolution, + ) + + lama_register["EnhancedDynamicQuantumDifferentialEvolution"] = EnhancedDynamicQuantumDifferentialEvolution + LLAMAEnhancedDynamicQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory import ( + EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory, + ) + + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory"] = ( + EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory + ) + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory" + ).set_name( + "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory", register=True + ) +except Exception as e: + print( + "EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart import ( + EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart, + ) + + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart"] = ( + EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart + ) + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart" + ).set_name( + "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart", register=True + ) +except Exception as e: + print( + "EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimization import ( + EnhancedDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimization"] = EnhancedDynamicQuantumSwarmOptimization + LLAMAEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationFinal import ( + EnhancedDynamicQuantumSwarmOptimizationFinal, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationFinal"] = ( + EnhancedDynamicQuantumSwarmOptimizationFinal + ) + LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationFinal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationImproved import ( + EnhancedDynamicQuantumSwarmOptimizationImproved, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationImproved"] = ( + EnhancedDynamicQuantumSwarmOptimizationImproved + ) + LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV10 import ( + EnhancedDynamicQuantumSwarmOptimizationV10, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV10"] = EnhancedDynamicQuantumSwarmOptimizationV10 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV10", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV11 import ( + EnhancedDynamicQuantumSwarmOptimizationV11, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV11"] = EnhancedDynamicQuantumSwarmOptimizationV11 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV11", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV12 import ( + EnhancedDynamicQuantumSwarmOptimizationV12, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV12"] = EnhancedDynamicQuantumSwarmOptimizationV12 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV12", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV13 import ( + EnhancedDynamicQuantumSwarmOptimizationV13, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV13"] = EnhancedDynamicQuantumSwarmOptimizationV13 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV13", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV14 import ( + EnhancedDynamicQuantumSwarmOptimizationV14, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV14"] = EnhancedDynamicQuantumSwarmOptimizationV14 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV14", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV15 import ( + EnhancedDynamicQuantumSwarmOptimizationV15, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV15"] = EnhancedDynamicQuantumSwarmOptimizationV15 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV15", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV16 import ( + EnhancedDynamicQuantumSwarmOptimizationV16, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV16"] = EnhancedDynamicQuantumSwarmOptimizationV16 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV16", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV17 import ( + EnhancedDynamicQuantumSwarmOptimizationV17, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV17"] = EnhancedDynamicQuantumSwarmOptimizationV17 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV17", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV18 import ( + EnhancedDynamicQuantumSwarmOptimizationV18, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV18"] = EnhancedDynamicQuantumSwarmOptimizationV18 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV18 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV18", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV19 import ( + EnhancedDynamicQuantumSwarmOptimizationV19, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV19"] = EnhancedDynamicQuantumSwarmOptimizationV19 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV19 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV19", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV2 import ( + EnhancedDynamicQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV2"] = EnhancedDynamicQuantumSwarmOptimizationV2 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV20 import ( + EnhancedDynamicQuantumSwarmOptimizationV20, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV20"] = EnhancedDynamicQuantumSwarmOptimizationV20 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV20 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV20", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV21 import ( + EnhancedDynamicQuantumSwarmOptimizationV21, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV21"] = EnhancedDynamicQuantumSwarmOptimizationV21 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV21 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV21", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV22 import ( + EnhancedDynamicQuantumSwarmOptimizationV22, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV22"] = EnhancedDynamicQuantumSwarmOptimizationV22 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV22 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV22", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV23 import ( + EnhancedDynamicQuantumSwarmOptimizationV23, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV23"] = EnhancedDynamicQuantumSwarmOptimizationV23 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV23 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV23", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV24 import ( + EnhancedDynamicQuantumSwarmOptimizationV24, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV24"] = EnhancedDynamicQuantumSwarmOptimizationV24 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV24 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV24", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV25 import ( + EnhancedDynamicQuantumSwarmOptimizationV25, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV25"] = EnhancedDynamicQuantumSwarmOptimizationV25 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV25 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV25", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV26 import ( + EnhancedDynamicQuantumSwarmOptimizationV26, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV26"] = EnhancedDynamicQuantumSwarmOptimizationV26 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV26 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV26", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV27 import ( + EnhancedDynamicQuantumSwarmOptimizationV27, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV27"] = EnhancedDynamicQuantumSwarmOptimizationV27 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV27 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV27", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV28 import ( + EnhancedDynamicQuantumSwarmOptimizationV28, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV28"] = EnhancedDynamicQuantumSwarmOptimizationV28 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV28", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV3 import ( + EnhancedDynamicQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV3"] = EnhancedDynamicQuantumSwarmOptimizationV3 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV4 import ( + EnhancedDynamicQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV4"] = EnhancedDynamicQuantumSwarmOptimizationV4 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV5 import ( + EnhancedDynamicQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV5"] = EnhancedDynamicQuantumSwarmOptimizationV5 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV6 import ( + EnhancedDynamicQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV6"] = EnhancedDynamicQuantumSwarmOptimizationV6 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV7 import ( + EnhancedDynamicQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV7"] = EnhancedDynamicQuantumSwarmOptimizationV7 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV8 import ( + EnhancedDynamicQuantumSwarmOptimizationV8, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV8"] = EnhancedDynamicQuantumSwarmOptimizationV8 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV8", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV9 import ( + EnhancedDynamicQuantumSwarmOptimizationV9, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationV9"] = EnhancedDynamicQuantumSwarmOptimizationV9 + LLAMAEnhancedDynamicQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV9", register=True) +except Exception as e: + print("EnhancedDynamicQuantumSwarmOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( + EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing + ) + LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicRefinementGradientBoostedMemoryAnnealing import ( + EnhancedDynamicRefinementGradientBoostedMemoryAnnealing, + ) + + lama_register["EnhancedDynamicRefinementGradientBoostedMemoryAnnealing"] = ( + EnhancedDynamicRefinementGradientBoostedMemoryAnnealing + ) + LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing" + ).set_name("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("EnhancedDynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicRestartAdaptiveDE import EnhancedDynamicRestartAdaptiveDE + + lama_register["EnhancedDynamicRestartAdaptiveDE"] = EnhancedDynamicRestartAdaptiveDE + LLAMAEnhancedDynamicRestartAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRestartAdaptiveDE" + ).set_name("LLAMAEnhancedDynamicRestartAdaptiveDE", register=True) +except Exception as e: + print("EnhancedDynamicRestartAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicStrategyAdaptiveDE import ( + EnhancedDynamicStrategyAdaptiveDE, + ) + + lama_register["EnhancedDynamicStrategyAdaptiveDE"] = EnhancedDynamicStrategyAdaptiveDE + LLAMAEnhancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedDynamicStrategyAdaptiveDE", register=True) +except Exception as e: + print("EnhancedDynamicStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithm import ( + EnhancedDynamicallyAdaptiveFireworkAlgorithm, + ) + + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithm"] = ( + EnhancedDynamicallyAdaptiveFireworkAlgorithm + ) + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedDynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved import ( + EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved, + ) + + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved"] = ( + EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved + ) + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved", register=True) +except Exception as e: + print("EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteAdaptiveHybridDEPSO import EnhancedEliteAdaptiveHybridDEPSO + + lama_register["EnhancedEliteAdaptiveHybridDEPSO"] = EnhancedEliteAdaptiveHybridDEPSO + LLAMAEnhancedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedEliteAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("EnhancedEliteAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizer import ( + EnhancedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizer"] = EnhancedEliteAdaptiveMemoryHybridOptimizer + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV2 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV2, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV2"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV2 + ) + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2", register=True) +except Exception as e: + print("EnhancedEliteAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV6 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV6, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV6"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV6 + ) + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6", register=True) +except Exception as e: + print("EnhancedEliteAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV7 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV7, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV7"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV7 + ) + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7", register=True) +except Exception as e: + print("EnhancedEliteAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteCrowdingMemoryHybridOptimizerV3 import ( + EnhancedEliteCrowdingMemoryHybridOptimizerV3, + ) + + lama_register["EnhancedEliteCrowdingMemoryHybridOptimizerV3"] = ( + EnhancedEliteCrowdingMemoryHybridOptimizerV3 + ) + LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3" + ).set_name("LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3", register=True) +except Exception as e: + print("EnhancedEliteCrowdingMemoryHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveDE import EnhancedEliteGuidedAdaptiveDE + + lama_register["EnhancedEliteGuidedAdaptiveDE"] = EnhancedEliteGuidedAdaptiveDE + LLAMAEnhancedEliteGuidedAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedAdaptiveDE" + ).set_name("LLAMAEnhancedEliteGuidedAdaptiveDE", register=True) +except Exception as e: + print("EnhancedEliteGuidedAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveRestartDE import ( + EnhancedEliteGuidedAdaptiveRestartDE, + ) + + lama_register["EnhancedEliteGuidedAdaptiveRestartDE"] = EnhancedEliteGuidedAdaptiveRestartDE + LLAMAEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMAEnhancedEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: + print("EnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedDualMutationDE import ( + EnhancedEliteGuidedDualMutationDE, + ) + + lama_register["EnhancedEliteGuidedDualMutationDE"] = EnhancedEliteGuidedDualMutationDE + LLAMAEnhancedEliteGuidedDualMutationDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedDualMutationDE" + ).set_name("LLAMAEnhancedEliteGuidedDualMutationDE", register=True) +except Exception as e: + print("EnhancedEliteGuidedDualMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v81 import EnhancedEliteGuidedMassQGSA_v81 + + lama_register["EnhancedEliteGuidedMassQGSA_v81"] = EnhancedEliteGuidedMassQGSA_v81 + LLAMAEnhancedEliteGuidedMassQGSA_v81 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v81" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v81", register=True) +except Exception as e: + print("EnhancedEliteGuidedMassQGSA_v81 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v82 import EnhancedEliteGuidedMassQGSA_v82 + + lama_register["EnhancedEliteGuidedMassQGSA_v82"] = EnhancedEliteGuidedMassQGSA_v82 + LLAMAEnhancedEliteGuidedMassQGSA_v82 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v82" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v82", register=True) +except Exception as e: + print("EnhancedEliteGuidedMassQGSA_v82 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v83 import EnhancedEliteGuidedMassQGSA_v83 + + lama_register["EnhancedEliteGuidedMassQGSA_v83"] = EnhancedEliteGuidedMassQGSA_v83 + LLAMAEnhancedEliteGuidedMassQGSA_v83 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v83" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v83", register=True) +except Exception as e: + print("EnhancedEliteGuidedMassQGSA_v83 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v85 import EnhancedEliteGuidedMassQGSA_v85 + + lama_register["EnhancedEliteGuidedMassQGSA_v85"] = EnhancedEliteGuidedMassQGSA_v85 + LLAMAEnhancedEliteGuidedMassQGSA_v85 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v85" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v85", register=True) +except Exception as e: + print("EnhancedEliteGuidedMassQGSA_v85 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v86 import EnhancedEliteGuidedMassQGSA_v86 + + lama_register["EnhancedEliteGuidedMassQGSA_v86"] = EnhancedEliteGuidedMassQGSA_v86 + LLAMAEnhancedEliteGuidedMassQGSA_v86 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v86" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v86", register=True) +except Exception as e: + print("EnhancedEliteGuidedMassQGSA_v86 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteGuidedMutationDE_v2 import EnhancedEliteGuidedMutationDE_v2 + + lama_register["EnhancedEliteGuidedMutationDE_v2"] = EnhancedEliteGuidedMutationDE_v2 + LLAMAEnhancedEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMutationDE_v2" + ).set_name("LLAMAEnhancedEliteGuidedMutationDE_v2", register=True) +except Exception as e: + print("EnhancedEliteGuidedMutationDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteHybridOptimizer import EnhancedEliteHybridOptimizer + + lama_register["EnhancedEliteHybridOptimizer"] = EnhancedEliteHybridOptimizer + LLAMAEnhancedEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEliteHybridOptimizer" + ).set_name("LLAMAEnhancedEliteHybridOptimizer", register=True) +except Exception as e: + print("EnhancedEliteHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEliteQuantumAdaptiveExplorationOptimization import ( + EnhancedEliteQuantumAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedEliteQuantumAdaptiveExplorationOptimization"] = ( + EnhancedEliteQuantumAdaptiveExplorationOptimization + ) + LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("EnhancedEliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 import ( + EnhancedEnhancedAdaptiveHarmonicTabuSearchV24, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonicTabuSearchV24"] = ( + EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 + ) + LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24" + ).set_name("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24", register=True) +except Exception as e: + print("EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 import ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7"] = ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 + ) + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7" + ).set_name( + "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7", register=True + ) +except Exception as e: + print( + "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 import ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8"] = ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 + ) + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8" + ).set_name( + "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8", register=True + ) +except Exception as e: + print( + "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution import ( + EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( + EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution + ) + LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 import ( + EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57, + ) + + lama_register["EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57"] = ( + EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 + ) + LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57" + ).set_name("LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57", register=True) +except Exception as e: + print("EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( + EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( + EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence + ) + LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedDynamicQuantumSwarmOptimization import ( + EnhancedEnhancedDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedEnhancedDynamicQuantumSwarmOptimization"] = ( + EnhancedEnhancedDynamicQuantumSwarmOptimization + ) + LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 + ) + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10", register=True) +except Exception as e: + print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 + ) + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6", register=True) +except Exception as e: + print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 + ) + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7", register=True) +except Exception as e: + print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 + ) + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8", register=True) +except Exception as e: + print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 + ) + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9", register=True) +except Exception as e: + print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization import ( + EnhancedEnhancedFireworkSwarmOptimization, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization"] = EnhancedEnhancedFireworkSwarmOptimization + LLAMAEnhancedEnhancedFireworkSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization", register=True) +except Exception as e: + print("EnhancedEnhancedFireworkSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v2 import ( + EnhancedEnhancedFireworkSwarmOptimization_v2, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v2"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v2 + ) + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2", register=True) +except Exception as e: + print("EnhancedEnhancedFireworkSwarmOptimization_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v3 import ( + EnhancedEnhancedFireworkSwarmOptimization_v3, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v3"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v3 + ) + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3", register=True) +except Exception as e: + print("EnhancedEnhancedFireworkSwarmOptimization_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v4 import ( + EnhancedEnhancedFireworkSwarmOptimization_v4, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v4"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v4 + ) + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4", register=True) +except Exception as e: + print("EnhancedEnhancedFireworkSwarmOptimization_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v63 import ( + EnhancedEnhancedGuidedMassQGSA_v63, + ) + + lama_register["EnhancedEnhancedGuidedMassQGSA_v63"] = EnhancedEnhancedGuidedMassQGSA_v63 + LLAMAEnhancedEnhancedGuidedMassQGSA_v63 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v63", register=True) +except Exception as e: + print("EnhancedEnhancedGuidedMassQGSA_v63 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v64 import ( + EnhancedEnhancedGuidedMassQGSA_v64, + ) + + lama_register["EnhancedEnhancedGuidedMassQGSA_v64"] = EnhancedEnhancedGuidedMassQGSA_v64 + LLAMAEnhancedEnhancedGuidedMassQGSA_v64 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v64", register=True) +except Exception as e: + print("EnhancedEnhancedGuidedMassQGSA_v64 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v68 import ( + EnhancedEnhancedGuidedMassQGSA_v68, + ) + + lama_register["EnhancedEnhancedGuidedMassQGSA_v68"] = EnhancedEnhancedGuidedMassQGSA_v68 + LLAMAEnhancedEnhancedGuidedMassQGSA_v68 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v68", register=True) +except Exception as e: + print("EnhancedEnhancedGuidedMassQGSA_v68 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration import ( + EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration"] = ( + EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration + ) + LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration", register=True) +except Exception as e: + print("EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizer import ( + EnhancedEnhancedHybridMetaHeuristicOptimizer, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizer"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizer + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV10 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV10, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV10"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV10 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV11 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV11, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV11"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV11 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV12 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV12, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV12"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV12 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV13 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV13, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV13"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV13 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV14 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV14, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV14"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV14 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV2 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV2, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV2"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV2 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV3 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV3, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV3"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV3 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV4 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV4, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV4"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV4 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV5 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV5, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV5"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV5 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV6 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV6, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV6"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV6 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV7 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV7, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV7"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV7 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV8 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV8, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV8"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV8 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV9 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV9, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV9"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV9 + ) + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9", register=True) +except Exception as e: + print("EnhancedEnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedMetaHeuristicOptimizerV3 import ( + EnhancedEnhancedMetaHeuristicOptimizerV3, + ) + + lama_register["EnhancedEnhancedMetaHeuristicOptimizerV3"] = EnhancedEnhancedMetaHeuristicOptimizerV3 + LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3", register=True) +except Exception as e: + print("EnhancedEnhancedMetaHeuristicOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP, + ) + + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP + ) + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: + print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 import ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4, + ) + + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4"] = ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 + ) + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4" + ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4", register=True) +except Exception as e: + print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV1 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV1, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV1"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV1 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV12 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV12, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV12"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV12 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV13 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV13, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV13"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV13 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV14 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV14, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV14"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV14 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV15 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV15, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV15"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV15 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV16 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV16, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV16"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV16 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV17 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV17, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV17"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV17 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV18 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV18, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV18"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV18 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV19 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV19, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV19"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV19 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV2 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV2, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV2"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV2 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV20 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV20, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV20"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV20 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV21 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV21, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV21"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV21 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV22 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV22, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV22"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV22 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV23 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV23, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV23"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV23 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV24 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV24, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV24"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV24 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV25 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV25, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV25"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV25 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV26 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV26, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV26"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV26 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV27 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV27, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV27"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV27 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV28 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV28, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV28"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV28 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV29 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV29, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV29"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV29 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV3 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV3, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV3"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV3 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV30 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV30, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV30"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV30 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV4 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV4, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV4"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV4 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV5 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV5, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV5"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV5 + ) + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5", register=True) +except Exception as e: + print("EnhancedEvolutionaryDifferentialSwarmOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch import ( + EnhancedEvolutionaryFireworksSearch, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch"] = EnhancedEvolutionaryFireworksSearch + LLAMAEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v2 import ( + EnhancedEvolutionaryFireworksSearch_v2, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch_v2"] = EnhancedEvolutionaryFireworksSearch_v2 + LLAMAEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v2" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v2", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v3 import ( + EnhancedEvolutionaryFireworksSearch_v3, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch_v3"] = EnhancedEvolutionaryFireworksSearch_v3 + LLAMAEnhancedEvolutionaryFireworksSearch_v3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v3" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v3", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v4 import ( + EnhancedEvolutionaryFireworksSearch_v4, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch_v4"] = EnhancedEvolutionaryFireworksSearch_v4 + LLAMAEnhancedEvolutionaryFireworksSearch_v4 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v4" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v4", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v5 import ( + EnhancedEvolutionaryFireworksSearch_v5, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch_v5"] = EnhancedEvolutionaryFireworksSearch_v5 + LLAMAEnhancedEvolutionaryFireworksSearch_v5 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v5" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v5", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v6 import ( + EnhancedEvolutionaryFireworksSearch_v6, + ) + + lama_register["EnhancedEvolutionaryFireworksSearch_v6"] = EnhancedEvolutionaryFireworksSearch_v6 + LLAMAEnhancedEvolutionaryFireworksSearch_v6 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v6" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v6", register=True) +except Exception as e: + print("EnhancedEvolutionaryFireworksSearch_v6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryGradientSearch import ( + EnhancedEvolutionaryGradientSearch, + ) + + lama_register["EnhancedEvolutionaryGradientSearch"] = EnhancedEvolutionaryGradientSearch + LLAMAEnhancedEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryGradientSearch" + ).set_name("LLAMAEnhancedEvolutionaryGradientSearch", register=True) +except Exception as e: + print("EnhancedEvolutionaryGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizer import ( + EnhancedEvolutionaryParticleSwarmOptimizer, + ) + + lama_register["EnhancedEvolutionaryParticleSwarmOptimizer"] = EnhancedEvolutionaryParticleSwarmOptimizer + LLAMAEnhancedEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedEvolutionaryParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV2 import ( + EnhancedEvolutionaryParticleSwarmOptimizerV2, + ) + + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV2"] = ( + EnhancedEvolutionaryParticleSwarmOptimizerV2 + ) + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2", register=True) +except Exception as e: + print("EnhancedEvolutionaryParticleSwarmOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV3 import ( + EnhancedEvolutionaryParticleSwarmOptimizerV3, + ) + + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV3"] = ( + EnhancedEvolutionaryParticleSwarmOptimizerV3 + ) + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3", register=True) +except Exception as e: + print("EnhancedEvolutionaryParticleSwarmOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedEvolutionaryStrategy import EnhancedEvolutionaryStrategy + + lama_register["EnhancedEvolutionaryStrategy"] = EnhancedEvolutionaryStrategy + LLAMAEnhancedEvolutionaryStrategy = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryStrategy" + ).set_name("LLAMAEnhancedEvolutionaryStrategy", register=True) +except Exception as e: + print("EnhancedEvolutionaryStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimization import ( + EnhancedExplorationGravitationalSwarmOptimization, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimization"] = ( + EnhancedExplorationGravitationalSwarmOptimization + ) + LLAMAEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimization", register=True) +except Exception as e: + print("EnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV2 import ( + EnhancedExplorationGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV2"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV2 + ) + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedExplorationGravitationalSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV3 import ( + EnhancedExplorationGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV3"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV3 + ) + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedExplorationGravitationalSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV4 import ( + EnhancedExplorationGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV4"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV4 + ) + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedExplorationGravitationalSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV5 import ( + EnhancedExplorationGravitationalSwarmOptimizationV5, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV5"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV5 + ) + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedExplorationGravitationalSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedExplorativeHarmonicSwarmOptimizer import ( + EnhancedExplorativeHarmonicSwarmOptimizer, + ) + + lama_register["EnhancedExplorativeHarmonicSwarmOptimizer"] = EnhancedExplorativeHarmonicSwarmOptimizer + LLAMAEnhancedExplorativeHarmonicSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer" + ).set_name("LLAMAEnhancedExplorativeHarmonicSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedExplorativeHarmonicSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithm import EnhancedFireworkAlgorithm + + lama_register["EnhancedFireworkAlgorithm"] = EnhancedFireworkAlgorithm + LLAMAEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm").set_name( + "LLAMAEnhancedFireworkAlgorithm", register=True + ) +except Exception as e: + print("EnhancedFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization import ( + EnhancedFireworkAlgorithmOptimization, + ) + + lama_register["EnhancedFireworkAlgorithmOptimization"] = EnhancedFireworkAlgorithmOptimization + LLAMAEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmOptimization" + ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization_v2 import ( + EnhancedFireworkAlgorithmOptimization_v2, + ) + + lama_register["EnhancedFireworkAlgorithmOptimization_v2"] = EnhancedFireworkAlgorithmOptimization_v2 + LLAMAEnhancedFireworkAlgorithmOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmOptimization_v2" + ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization_v2", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmOptimization_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined import ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined"] = ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined + ) + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveMutation import ( + EnhancedFireworkAlgorithmWithAdaptiveMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveMutation"] = ( + EnhancedFireworkAlgorithmWithAdaptiveMutation + ) + LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithDynamicMutation import ( + EnhancedFireworkAlgorithmWithDynamicMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithDynamicMutation"] = ( + EnhancedFireworkAlgorithmWithDynamicMutation + ) + LLAMAEnhancedFireworkAlgorithmWithDynamicMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithDynamicMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithHybridLocalSearch import ( + EnhancedFireworkAlgorithmWithHybridLocalSearch, + ) + + lama_register["EnhancedFireworkAlgorithmWithHybridLocalSearch"] = ( + EnhancedFireworkAlgorithmWithHybridLocalSearch + ) + LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithHybridLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithImprovedMutation import ( + EnhancedFireworkAlgorithmWithImprovedMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithImprovedMutation"] = ( + EnhancedFireworkAlgorithmWithImprovedMutation + ) + LLAMAEnhancedFireworkAlgorithmWithImprovedMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithImprovedMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearch import ( + EnhancedFireworkAlgorithmWithLocalSearch, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearch"] = EnhancedFireworkAlgorithmWithLocalSearch + LLAMAEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinal import ( + EnhancedFireworkAlgorithmWithLocalSearchFinal, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinal"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinal + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchFinal can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized import ( + EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalRefined import ( + EnhancedFireworkAlgorithmWithLocalSearchFinalRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalRefined"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinalRefined + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchFinalRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchImproved import ( + EnhancedFireworkAlgorithmWithLocalSearchImproved, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchImproved"] = ( + EnhancedFireworkAlgorithmWithLocalSearchImproved + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchOptimized import ( + EnhancedFireworkAlgorithmWithLocalSearchOptimized, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchOptimized"] = ( + EnhancedFireworkAlgorithmWithLocalSearchOptimized + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchRefined import ( + EnhancedFireworkAlgorithmWithLocalSearchRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchRefined"] = ( + EnhancedFireworkAlgorithmWithLocalSearchRefined + ) + LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined", register=True) +except Exception as e: + print("EnhancedFireworkAlgorithmWithLocalSearchRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworkSwarmOptimization import ( + EnhancedFireworkSwarmOptimization, + ) + + lama_register["EnhancedFireworkSwarmOptimization"] = EnhancedFireworkSwarmOptimization + LLAMAEnhancedFireworkSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedFireworkSwarmOptimization" + ).set_name("LLAMAEnhancedFireworkSwarmOptimization", register=True) +except Exception as e: + print("EnhancedFireworkSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworksAlgorithm import EnhancedFireworksAlgorithm + + lama_register["EnhancedFireworksAlgorithm"] = EnhancedFireworksAlgorithm + LLAMAEnhancedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm").set_name( + "LLAMAEnhancedFireworksAlgorithm", register=True + ) +except Exception as e: + print("EnhancedFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFireworksSwarmOptimization_v4 import ( + EnhancedFireworksSwarmOptimization_v4, + ) + + lama_register["EnhancedFireworksSwarmOptimization_v4"] = EnhancedFireworksSwarmOptimization_v4 + LLAMAEnhancedFireworksSwarmOptimization_v4 = NonObjectOptimizer( + method="LLAMAEnhancedFireworksSwarmOptimization_v4" + ).set_name("LLAMAEnhancedFireworksSwarmOptimization_v4", register=True) +except Exception as e: + print("EnhancedFireworksSwarmOptimization_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedFocusedBalancedAdaptivePSO import ( + EnhancedFocusedBalancedAdaptivePSO, + ) + + lama_register["EnhancedFocusedBalancedAdaptivePSO"] = EnhancedFocusedBalancedAdaptivePSO + LLAMAEnhancedFocusedBalancedAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedFocusedBalancedAdaptivePSO" + ).set_name("LLAMAEnhancedFocusedBalancedAdaptivePSO", register=True) +except Exception as e: + print("EnhancedFocusedBalancedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizer import EnhancedGlobalClimbingOptimizer + + lama_register["EnhancedGlobalClimbingOptimizer"] = EnhancedGlobalClimbingOptimizer + LLAMAEnhancedGlobalClimbingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalClimbingOptimizer" + ).set_name("LLAMAEnhancedGlobalClimbingOptimizer", register=True) +except Exception as e: + print("EnhancedGlobalClimbingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizerV3 import ( + EnhancedGlobalClimbingOptimizerV3, + ) + + lama_register["EnhancedGlobalClimbingOptimizerV3"] = EnhancedGlobalClimbingOptimizerV3 + LLAMAEnhancedGlobalClimbingOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedGlobalClimbingOptimizerV3" + ).set_name("LLAMAEnhancedGlobalClimbingOptimizerV3", register=True) +except Exception as e: + print("EnhancedGlobalClimbingOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGlobalStructureAdaptiveEvolver import ( + EnhancedGlobalStructureAdaptiveEvolver, + ) + + lama_register["EnhancedGlobalStructureAdaptiveEvolver"] = EnhancedGlobalStructureAdaptiveEvolver + LLAMAEnhancedGlobalStructureAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureAdaptiveEvolver" + ).set_name("LLAMAEnhancedGlobalStructureAdaptiveEvolver", register=True) +except Exception as e: + print("EnhancedGlobalStructureAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGlobalStructureAwareOptimizer import ( + EnhancedGlobalStructureAwareOptimizer, + ) + + lama_register["EnhancedGlobalStructureAwareOptimizer"] = EnhancedGlobalStructureAwareOptimizer + LLAMAEnhancedGlobalStructureAwareOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureAwareOptimizer" + ).set_name("LLAMAEnhancedGlobalStructureAwareOptimizer", register=True) +except Exception as e: + print("EnhancedGlobalStructureAwareOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGlobalStructureOptimizer import EnhancedGlobalStructureOptimizer + + lama_register["EnhancedGlobalStructureOptimizer"] = EnhancedGlobalStructureOptimizer + LLAMAEnhancedGlobalStructureOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureOptimizer" + ).set_name("LLAMAEnhancedGlobalStructureOptimizer", register=True) +except Exception as e: + print("EnhancedGlobalStructureOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGradientBoostedAnnealingWithAdaptiveMemory import ( + EnhancedGradientBoostedAnnealingWithAdaptiveMemory, + ) + + lama_register["EnhancedGradientBoostedAnnealingWithAdaptiveMemory"] = ( + EnhancedGradientBoostedAnnealingWithAdaptiveMemory + ) + LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory = NonObjectOptimizer( + method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory" + ).set_name("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory", register=True) +except Exception as e: + print("EnhancedGradientBoostedAnnealingWithAdaptiveMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGradientGuidedClusterSearch import ( + EnhancedGradientGuidedClusterSearch, + ) + + lama_register["EnhancedGradientGuidedClusterSearch"] = EnhancedGradientGuidedClusterSearch + LLAMAEnhancedGradientGuidedClusterSearch = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedClusterSearch" + ).set_name("LLAMAEnhancedGradientGuidedClusterSearch", register=True) +except Exception as e: + print("EnhancedGradientGuidedClusterSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGradientGuidedEvolution import EnhancedGradientGuidedEvolution + + lama_register["EnhancedGradientGuidedEvolution"] = EnhancedGradientGuidedEvolution + LLAMAEnhancedGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedEvolution" + ).set_name("LLAMAEnhancedGradientGuidedEvolution", register=True) +except Exception as e: + print("EnhancedGradientGuidedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGradientGuidedHybridPSO import EnhancedGradientGuidedHybridPSO + + lama_register["EnhancedGradientGuidedHybridPSO"] = EnhancedGradientGuidedHybridPSO + LLAMAEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: + print("EnhancedGradientGuidedHybridPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGradualAdaptiveRAMEDS import EnhancedGradualAdaptiveRAMEDS + + lama_register["EnhancedGradualAdaptiveRAMEDS"] = EnhancedGradualAdaptiveRAMEDS + LLAMAEnhancedGradualAdaptiveRAMEDS = NonObjectOptimizer( + method="LLAMAEnhancedGradualAdaptiveRAMEDS" + ).set_name("LLAMAEnhancedGradualAdaptiveRAMEDS", register=True) +except Exception as e: + print("EnhancedGradualAdaptiveRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimization import ( + EnhancedGravitationSwarmOptimization, + ) + + lama_register["EnhancedGravitationSwarmOptimization"] = EnhancedGravitationSwarmOptimization + LLAMAEnhancedGravitationSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedGravitationSwarmOptimization" + ).set_name("LLAMAEnhancedGravitationSwarmOptimization", register=True) +except Exception as e: + print("EnhancedGravitationSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimizationV2 import ( + EnhancedGravitationSwarmOptimizationV2, + ) + + lama_register["EnhancedGravitationSwarmOptimizationV2"] = EnhancedGravitationSwarmOptimizationV2 + LLAMAEnhancedGravitationSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationSwarmOptimizationV2" + ).set_name("LLAMAEnhancedGravitationSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedGravitationSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV10 import ( + EnhancedGravitationalSwarmIntelligenceV10, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV10"] = EnhancedGravitationalSwarmIntelligenceV10 + LLAMAEnhancedGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV10" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV10", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV11 import ( + EnhancedGravitationalSwarmIntelligenceV11, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV11"] = EnhancedGravitationalSwarmIntelligenceV11 + LLAMAEnhancedGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV11" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV11", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV12 import ( + EnhancedGravitationalSwarmIntelligenceV12, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV12"] = EnhancedGravitationalSwarmIntelligenceV12 + LLAMAEnhancedGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV12" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV12", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV13 import ( + EnhancedGravitationalSwarmIntelligenceV13, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV13"] = EnhancedGravitationalSwarmIntelligenceV13 + LLAMAEnhancedGravitationalSwarmIntelligenceV13 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV13" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV13", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV14 import ( + EnhancedGravitationalSwarmIntelligenceV14, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV14"] = EnhancedGravitationalSwarmIntelligenceV14 + LLAMAEnhancedGravitationalSwarmIntelligenceV14 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV14" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV14", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV15 import ( + EnhancedGravitationalSwarmIntelligenceV15, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV15"] = EnhancedGravitationalSwarmIntelligenceV15 + LLAMAEnhancedGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV15" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV15", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV16 import ( + EnhancedGravitationalSwarmIntelligenceV16, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV16"] = EnhancedGravitationalSwarmIntelligenceV16 + LLAMAEnhancedGravitationalSwarmIntelligenceV16 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV16" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV16", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV17 import ( + EnhancedGravitationalSwarmIntelligenceV17, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV17"] = EnhancedGravitationalSwarmIntelligenceV17 + LLAMAEnhancedGravitationalSwarmIntelligenceV17 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV17" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV17", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV18 import ( + EnhancedGravitationalSwarmIntelligenceV18, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV18"] = EnhancedGravitationalSwarmIntelligenceV18 + LLAMAEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV18" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV18", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV19 import ( + EnhancedGravitationalSwarmIntelligenceV19, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV19"] = EnhancedGravitationalSwarmIntelligenceV19 + LLAMAEnhancedGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV19" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV19", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV2 import ( + EnhancedGravitationalSwarmIntelligenceV2, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV2"] = EnhancedGravitationalSwarmIntelligenceV2 + LLAMAEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV20 import ( + EnhancedGravitationalSwarmIntelligenceV20, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV20"] = EnhancedGravitationalSwarmIntelligenceV20 + LLAMAEnhancedGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV20" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV20", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV21 import ( + EnhancedGravitationalSwarmIntelligenceV21, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV21"] = EnhancedGravitationalSwarmIntelligenceV21 + LLAMAEnhancedGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV21" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV21", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV22 import ( + EnhancedGravitationalSwarmIntelligenceV22, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV22"] = EnhancedGravitationalSwarmIntelligenceV22 + LLAMAEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV22" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV22", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV23 import ( + EnhancedGravitationalSwarmIntelligenceV23, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV23"] = EnhancedGravitationalSwarmIntelligenceV23 + LLAMAEnhancedGravitationalSwarmIntelligenceV23 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV23" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV23", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV24 import ( + EnhancedGravitationalSwarmIntelligenceV24, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV24"] = EnhancedGravitationalSwarmIntelligenceV24 + LLAMAEnhancedGravitationalSwarmIntelligenceV24 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV24" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV24", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV25 import ( + EnhancedGravitationalSwarmIntelligenceV25, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV25"] = EnhancedGravitationalSwarmIntelligenceV25 + LLAMAEnhancedGravitationalSwarmIntelligenceV25 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV25" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV25", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV3 import ( + EnhancedGravitationalSwarmIntelligenceV3, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV3"] = EnhancedGravitationalSwarmIntelligenceV3 + LLAMAEnhancedGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV30 import ( + EnhancedGravitationalSwarmIntelligenceV30, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV30"] = EnhancedGravitationalSwarmIntelligenceV30 + LLAMAEnhancedGravitationalSwarmIntelligenceV30 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV30" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV30", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV31 import ( + EnhancedGravitationalSwarmIntelligenceV31, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV31"] = EnhancedGravitationalSwarmIntelligenceV31 + LLAMAEnhancedGravitationalSwarmIntelligenceV31 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV31" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV31", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV32 import ( + EnhancedGravitationalSwarmIntelligenceV32, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV32"] = EnhancedGravitationalSwarmIntelligenceV32 + LLAMAEnhancedGravitationalSwarmIntelligenceV32 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV32" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV32", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV4 import ( + EnhancedGravitationalSwarmIntelligenceV4, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV4"] = EnhancedGravitationalSwarmIntelligenceV4 + LLAMAEnhancedGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV6 import ( + EnhancedGravitationalSwarmIntelligenceV6, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV6"] = EnhancedGravitationalSwarmIntelligenceV6 + LLAMAEnhancedGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV6" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV6", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV7 import ( + EnhancedGravitationalSwarmIntelligenceV7, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV7"] = EnhancedGravitationalSwarmIntelligenceV7 + LLAMAEnhancedGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV7" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV7", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV8 import ( + EnhancedGravitationalSwarmIntelligenceV8, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV8"] = EnhancedGravitationalSwarmIntelligenceV8 + LLAMAEnhancedGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV8" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV8", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV9 import ( + EnhancedGravitationalSwarmIntelligenceV9, + ) + + lama_register["EnhancedGravitationalSwarmIntelligenceV9"] = EnhancedGravitationalSwarmIntelligenceV9 + LLAMAEnhancedGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV9" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV9", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmIntelligenceV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDiversityPreservation import ( + EnhancedGravitationalSwarmOptimizationWithDiversityPreservation, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDiversityPreservation"] = ( + EnhancedGravitationalSwarmOptimizationWithDiversityPreservation + ) + LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmOptimizationWithDiversityPreservation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 import ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2"] = ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 + ) + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 import ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3"] = ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 + ) + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3", register=True) +except Exception as e: + print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v62 import EnhancedGuidedMassQGSA_v62 + + lama_register["EnhancedGuidedMassQGSA_v62"] = EnhancedGuidedMassQGSA_v62 + LLAMAEnhancedGuidedMassQGSA_v62 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62").set_name( + "LLAMAEnhancedGuidedMassQGSA_v62", register=True + ) +except Exception as e: + print("EnhancedGuidedMassQGSA_v62 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v94 import EnhancedGuidedMassQGSA_v94 + + lama_register["EnhancedGuidedMassQGSA_v94"] = EnhancedGuidedMassQGSA_v94 + LLAMAEnhancedGuidedMassQGSA_v94 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94").set_name( + "LLAMAEnhancedGuidedMassQGSA_v94", register=True + ) +except Exception as e: + print("EnhancedGuidedMassQGSA_v94 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicFireworkAlgorithm import ( + EnhancedHarmonicFireworkAlgorithm, + ) + + lama_register["EnhancedHarmonicFireworkAlgorithm"] = EnhancedHarmonicFireworkAlgorithm + LLAMAEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicFireworkAlgorithm" + ).set_name("LLAMAEnhancedHarmonicFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedHarmonicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicLevyDolphinOptimization import ( + EnhancedHarmonicLevyDolphinOptimization, + ) + + lama_register["EnhancedHarmonicLevyDolphinOptimization"] = EnhancedHarmonicLevyDolphinOptimization + LLAMAEnhancedHarmonicLevyDolphinOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicLevyDolphinOptimization" + ).set_name("LLAMAEnhancedHarmonicLevyDolphinOptimization", register=True) +except Exception as e: + print("EnhancedHarmonicLevyDolphinOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizer import EnhancedHarmonicSearchOptimizer + + lama_register["EnhancedHarmonicSearchOptimizer"] = EnhancedHarmonicSearchOptimizer + LLAMAEnhancedHarmonicSearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizer" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizer", register=True) +except Exception as e: + print("EnhancedHarmonicSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV2 import ( + EnhancedHarmonicSearchOptimizerV2, + ) + + lama_register["EnhancedHarmonicSearchOptimizerV2"] = EnhancedHarmonicSearchOptimizerV2 + LLAMAEnhancedHarmonicSearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV2" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV2", register=True) +except Exception as e: + print("EnhancedHarmonicSearchOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV3 import ( + EnhancedHarmonicSearchOptimizerV3, + ) + + lama_register["EnhancedHarmonicSearchOptimizerV3"] = EnhancedHarmonicSearchOptimizerV3 + LLAMAEnhancedHarmonicSearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV3" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV3", register=True) +except Exception as e: + print("EnhancedHarmonicSearchOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV4 import ( + EnhancedHarmonicSearchOptimizerV4, + ) + + lama_register["EnhancedHarmonicSearchOptimizerV4"] = EnhancedHarmonicSearchOptimizerV4 + LLAMAEnhancedHarmonicSearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV4" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV4", register=True) +except Exception as e: + print("EnhancedHarmonicSearchOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV5 import ( + EnhancedHarmonicSearchOptimizerV5, + ) + + lama_register["EnhancedHarmonicSearchOptimizerV5"] = EnhancedHarmonicSearchOptimizerV5 + LLAMAEnhancedHarmonicSearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV5" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV5", register=True) +except Exception as e: + print("EnhancedHarmonicSearchOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimization import ( + EnhancedHarmonicSwarmOptimization, + ) + + lama_register["EnhancedHarmonicSwarmOptimization"] = EnhancedHarmonicSwarmOptimization + LLAMAEnhancedHarmonicSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimization" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimization", register=True) +except Exception as e: + print("EnhancedHarmonicSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV2 import ( + EnhancedHarmonicSwarmOptimizationV2, + ) + + lama_register["EnhancedHarmonicSwarmOptimizationV2"] = EnhancedHarmonicSwarmOptimizationV2 + LLAMAEnhancedHarmonicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV2" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedHarmonicSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV3 import ( + EnhancedHarmonicSwarmOptimizationV3, + ) + + lama_register["EnhancedHarmonicSwarmOptimizationV3"] = EnhancedHarmonicSwarmOptimizationV3 + LLAMAEnhancedHarmonicSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV3" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedHarmonicSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV4 import ( + EnhancedHarmonicSwarmOptimizationV4, + ) + + lama_register["EnhancedHarmonicSwarmOptimizationV4"] = EnhancedHarmonicSwarmOptimizationV4 + LLAMAEnhancedHarmonicSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV4" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedHarmonicSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV11 import EnhancedHarmonicTabuSearchV11 + + lama_register["EnhancedHarmonicTabuSearchV11"] = EnhancedHarmonicTabuSearchV11 + LLAMAEnhancedHarmonicTabuSearchV11 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV11" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV11", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV13 import EnhancedHarmonicTabuSearchV13 + + lama_register["EnhancedHarmonicTabuSearchV13"] = EnhancedHarmonicTabuSearchV13 + LLAMAEnhancedHarmonicTabuSearchV13 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV13" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV13", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV14 import EnhancedHarmonicTabuSearchV14 + + lama_register["EnhancedHarmonicTabuSearchV14"] = EnhancedHarmonicTabuSearchV14 + LLAMAEnhancedHarmonicTabuSearchV14 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV14" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV14", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV15 import EnhancedHarmonicTabuSearchV15 + + lama_register["EnhancedHarmonicTabuSearchV15"] = EnhancedHarmonicTabuSearchV15 + LLAMAEnhancedHarmonicTabuSearchV15 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV15" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV15", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV16 import EnhancedHarmonicTabuSearchV16 + + lama_register["EnhancedHarmonicTabuSearchV16"] = EnhancedHarmonicTabuSearchV16 + LLAMAEnhancedHarmonicTabuSearchV16 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV16" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV16", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV19 import EnhancedHarmonicTabuSearchV19 + + lama_register["EnhancedHarmonicTabuSearchV19"] = EnhancedHarmonicTabuSearchV19 + LLAMAEnhancedHarmonicTabuSearchV19 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV19" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV19", register=True) +except Exception as e: + print("EnhancedHarmonicTabuSearchV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyDiversifiedCuckooAlgorithm import ( + EnhancedHarmonyDiversifiedCuckooAlgorithm, + ) + + lama_register["EnhancedHarmonyDiversifiedCuckooAlgorithm"] = EnhancedHarmonyDiversifiedCuckooAlgorithm + LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm" + ).set_name("LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm", register=True) +except Exception as e: + print("EnhancedHarmonyDiversifiedCuckooAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyFireworkOptimizer import EnhancedHarmonyFireworkOptimizer + + lama_register["EnhancedHarmonyFireworkOptimizer"] = EnhancedHarmonyFireworkOptimizer + LLAMAEnhancedHarmonyFireworkOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyFireworkOptimizer" + ).set_name("LLAMAEnhancedHarmonyFireworkOptimizer", register=True) +except Exception as e: + print("EnhancedHarmonyFireworkOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV2 import ( + EnhancedHarmonyMemeticAlgorithmV2, + ) + + lama_register["EnhancedHarmonyMemeticAlgorithmV2"] = EnhancedHarmonyMemeticAlgorithmV2 + LLAMAEnhancedHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV2" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV2", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV3 import ( + EnhancedHarmonyMemeticAlgorithmV3, + ) + + lama_register["EnhancedHarmonyMemeticAlgorithmV3"] = EnhancedHarmonyMemeticAlgorithmV3 + LLAMAEnhancedHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV3" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV3", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticAlgorithmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV4 import ( + EnhancedHarmonyMemeticAlgorithmV4, + ) + + lama_register["EnhancedHarmonyMemeticAlgorithmV4"] = EnhancedHarmonyMemeticAlgorithmV4 + LLAMAEnhancedHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV4" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV4", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticAlgorithmV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV10 import ( + EnhancedHarmonyMemeticOptimizationV10, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV10"] = EnhancedHarmonyMemeticOptimizationV10 + LLAMAEnhancedHarmonyMemeticOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV10" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV10", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV11 import ( + EnhancedHarmonyMemeticOptimizationV11, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV11"] = EnhancedHarmonyMemeticOptimizationV11 + LLAMAEnhancedHarmonyMemeticOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV11" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV11", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV12 import ( + EnhancedHarmonyMemeticOptimizationV12, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV12"] = EnhancedHarmonyMemeticOptimizationV12 + LLAMAEnhancedHarmonyMemeticOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV12" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV12", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV13 import ( + EnhancedHarmonyMemeticOptimizationV13, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV13"] = EnhancedHarmonyMemeticOptimizationV13 + LLAMAEnhancedHarmonyMemeticOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV13" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV13", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV14 import ( + EnhancedHarmonyMemeticOptimizationV14, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV14"] = EnhancedHarmonyMemeticOptimizationV14 + LLAMAEnhancedHarmonyMemeticOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV14" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV14", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV15 import ( + EnhancedHarmonyMemeticOptimizationV15, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV15"] = EnhancedHarmonyMemeticOptimizationV15 + LLAMAEnhancedHarmonyMemeticOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV15" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV15", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV16 import ( + EnhancedHarmonyMemeticOptimizationV16, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV16"] = EnhancedHarmonyMemeticOptimizationV16 + LLAMAEnhancedHarmonyMemeticOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV16" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV16", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV17 import ( + EnhancedHarmonyMemeticOptimizationV17, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV17"] = EnhancedHarmonyMemeticOptimizationV17 + LLAMAEnhancedHarmonyMemeticOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV17" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV17", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV34 import ( + EnhancedHarmonyMemeticOptimizationV34, + ) + + lama_register["EnhancedHarmonyMemeticOptimizationV34"] = EnhancedHarmonyMemeticOptimizationV34 + LLAMAEnhancedHarmonyMemeticOptimizationV34 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV34" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV34", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticOptimizationV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearch import EnhancedHarmonyMemeticSearch + + lama_register["EnhancedHarmonyMemeticSearch"] = EnhancedHarmonyMemeticSearch + LLAMAEnhancedHarmonyMemeticSearch = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearch" + ).set_name("LLAMAEnhancedHarmonyMemeticSearch", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV2 import EnhancedHarmonyMemeticSearchV2 + + lama_register["EnhancedHarmonyMemeticSearchV2"] = EnhancedHarmonyMemeticSearchV2 + LLAMAEnhancedHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearchV2" + ).set_name("LLAMAEnhancedHarmonyMemeticSearchV2", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV3 import EnhancedHarmonyMemeticSearchV3 + + lama_register["EnhancedHarmonyMemeticSearchV3"] = EnhancedHarmonyMemeticSearchV3 + LLAMAEnhancedHarmonyMemeticSearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearchV3" + ).set_name("LLAMAEnhancedHarmonyMemeticSearchV3", register=True) +except Exception as e: + print("EnhancedHarmonyMemeticSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonySearchOB import EnhancedHarmonySearchOB + + lama_register["EnhancedHarmonySearchOB"] = EnhancedHarmonySearchOB + LLAMAEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB").set_name( + "LLAMAEnhancedHarmonySearchOB", register=True + ) +except Exception as e: + print("EnhancedHarmonySearchOB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( + EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + ) + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: + print("EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightV2 import ( + EnhancedHarmonySearchWithAdaptiveLevyFlightV2, + ) + + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightV2"] = ( + EnhancedHarmonySearchWithAdaptiveLevyFlightV2 + ) + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2" + ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2", register=True) +except Exception as e: + print("EnhancedHarmonySearchWithAdaptiveLevyFlightV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimization import EnhancedHarmonyTabuOptimization + + lama_register["EnhancedHarmonyTabuOptimization"] = EnhancedHarmonyTabuOptimization + LLAMAEnhancedHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimization" + ).set_name("LLAMAEnhancedHarmonyTabuOptimization", register=True) +except Exception as e: + print("EnhancedHarmonyTabuOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV2 import ( + EnhancedHarmonyTabuOptimizationV2, + ) + + lama_register["EnhancedHarmonyTabuOptimizationV2"] = EnhancedHarmonyTabuOptimizationV2 + LLAMAEnhancedHarmonyTabuOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimizationV2" + ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV2", register=True) +except Exception as e: + print("EnhancedHarmonyTabuOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV3 import ( + EnhancedHarmonyTabuOptimizationV3, + ) + + lama_register["EnhancedHarmonyTabuOptimizationV3"] = EnhancedHarmonyTabuOptimizationV3 + LLAMAEnhancedHarmonyTabuOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimizationV3" + ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV3", register=True) +except Exception as e: + print("EnhancedHarmonyTabuOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearch import EnhancedHarmonyTabuSearch + + lama_register["EnhancedHarmonyTabuSearch"] = EnhancedHarmonyTabuSearch + LLAMAEnhancedHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch").set_name( + "LLAMAEnhancedHarmonyTabuSearch", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV2 import EnhancedHarmonyTabuSearchV2 + + lama_register["EnhancedHarmonyTabuSearchV2"] = EnhancedHarmonyTabuSearchV2 + LLAMAEnhancedHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2").set_name( + "LLAMAEnhancedHarmonyTabuSearchV2", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV3 import EnhancedHarmonyTabuSearchV3 + + lama_register["EnhancedHarmonyTabuSearchV3"] = EnhancedHarmonyTabuSearchV3 + LLAMAEnhancedHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3").set_name( + "LLAMAEnhancedHarmonyTabuSearchV3", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV4 import EnhancedHarmonyTabuSearchV4 + + lama_register["EnhancedHarmonyTabuSearchV4"] = EnhancedHarmonyTabuSearchV4 + LLAMAEnhancedHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4").set_name( + "LLAMAEnhancedHarmonyTabuSearchV4", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV6 import EnhancedHarmonyTabuSearchV6 + + lama_register["EnhancedHarmonyTabuSearchV6"] = EnhancedHarmonyTabuSearchV6 + LLAMAEnhancedHarmonyTabuSearchV6 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6").set_name( + "LLAMAEnhancedHarmonyTabuSearchV6", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearchV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV7 import EnhancedHarmonyTabuSearchV7 + + lama_register["EnhancedHarmonyTabuSearchV7"] = EnhancedHarmonyTabuSearchV7 + LLAMAEnhancedHarmonyTabuSearchV7 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7").set_name( + "LLAMAEnhancedHarmonyTabuSearchV7", register=True + ) +except Exception as e: + print("EnhancedHarmonyTabuSearchV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHierarchicalCovarianceMatrixAdaptation import ( + EnhancedHierarchicalCovarianceMatrixAdaptation, + ) + + lama_register["EnhancedHierarchicalCovarianceMatrixAdaptation"] = ( + EnhancedHierarchicalCovarianceMatrixAdaptation + ) + LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation" + ).set_name("LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("EnhancedHierarchicalCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveDifferentialEvolution import ( + EnhancedHybridAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedHybridAdaptiveDifferentialEvolution"] = EnhancedHybridAdaptiveDifferentialEvolution + LLAMAEnhancedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveExplorationOptimizer import ( + EnhancedHybridAdaptiveExplorationOptimizer, + ) + + lama_register["EnhancedHybridAdaptiveExplorationOptimizer"] = EnhancedHybridAdaptiveExplorationOptimizer + LLAMAEnhancedHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveExplorationOptimizer", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveGeneticSwarmOptimizer import ( + EnhancedHybridAdaptiveGeneticSwarmOptimizer, + ) + + lama_register["EnhancedHybridAdaptiveGeneticSwarmOptimizer"] = EnhancedHybridAdaptiveGeneticSwarmOptimizer + LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveHarmonicFireworksTabuSearch import ( + EnhancedHybridAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["EnhancedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( + EnhancedHybridAdaptiveHarmonicFireworksTabuSearch + ) + LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMemoryAnnealing import ( + EnhancedHybridAdaptiveMemoryAnnealing, + ) + + lama_register["EnhancedHybridAdaptiveMemoryAnnealing"] = EnhancedHybridAdaptiveMemoryAnnealing + LLAMAEnhancedHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing" + ).set_name("LLAMAEnhancedHybridAdaptiveMemoryAnnealing", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiPhaseEvolution import ( + EnhancedHybridAdaptiveMultiPhaseEvolution, + ) + + lama_register["EnhancedHybridAdaptiveMultiPhaseEvolution"] = EnhancedHybridAdaptiveMultiPhaseEvolution + LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveMultiPhaseEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiStageOptimization import ( + EnhancedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["EnhancedHybridAdaptiveMultiStageOptimization"] = ( + EnhancedHybridAdaptiveMultiStageOptimization + ) + LLAMAEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAEnhancedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveQuantumOptimizer import ( + EnhancedHybridAdaptiveQuantumOptimizer, + ) + + lama_register["EnhancedHybridAdaptiveQuantumOptimizer"] = EnhancedHybridAdaptiveQuantumOptimizer + LLAMAEnhancedHybridAdaptiveQuantumOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveQuantumOptimizer", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveQuantumOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveSearch import EnhancedHybridAdaptiveSearch + + lama_register["EnhancedHybridAdaptiveSearch"] = EnhancedHybridAdaptiveSearch + LLAMAEnhancedHybridAdaptiveSearch = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveSearch" + ).set_name("LLAMAEnhancedHybridAdaptiveSearch", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution import ( + EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( + EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution + ) + LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridCMAESDE import EnhancedHybridCMAESDE + + lama_register["EnhancedHybridCMAESDE"] = EnhancedHybridCMAESDE + LLAMAEnhancedHybridCMAESDE = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE").set_name( + "LLAMAEnhancedHybridCMAESDE", register=True + ) +except Exception as e: + print("EnhancedHybridCMAESDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridCovarianceMatrixDifferentialEvolution import ( + EnhancedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedHybridCovarianceMatrixDifferentialEvolution"] = ( + EnhancedHybridCovarianceMatrixDifferentialEvolution + ) + LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithDynamicAdaptationV4 import ( + EnhancedHybridDEPSOWithDynamicAdaptationV4, + ) + + lama_register["EnhancedHybridDEPSOWithDynamicAdaptationV4"] = EnhancedHybridDEPSOWithDynamicAdaptationV4 + LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4 = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4" + ).set_name("LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4", register=True) +except Exception as e: + print("EnhancedHybridDEPSOWithDynamicAdaptationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithQuantumLevyFlight import ( + EnhancedHybridDEPSOWithQuantumLevyFlight, + ) + + lama_register["EnhancedHybridDEPSOWithQuantumLevyFlight"] = EnhancedHybridDEPSOWithQuantumLevyFlight + LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight" + ).set_name("LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight", register=True) +except Exception as e: + print("EnhancedHybridDEPSOWithQuantumLevyFlight can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridDEPSOwithAdaptiveRestart import ( + EnhancedHybridDEPSOwithAdaptiveRestart, + ) + + lama_register["EnhancedHybridDEPSOwithAdaptiveRestart"] = EnhancedHybridDEPSOwithAdaptiveRestart + LLAMAEnhancedHybridDEPSOwithAdaptiveRestart = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart" + ).set_name("LLAMAEnhancedHybridDEPSOwithAdaptiveRestart", register=True) +except Exception as e: + print("EnhancedHybridDEPSOwithAdaptiveRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridDifferentialEvolutionMemeticOptimizer import ( + EnhancedHybridDifferentialEvolutionMemeticOptimizer, + ) + + lama_register["EnhancedHybridDifferentialEvolutionMemeticOptimizer"] = ( + EnhancedHybridDifferentialEvolutionMemeticOptimizer + ) + LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer" + ).set_name("LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedHybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridDynamicAdaptiveExplorationOptimization import ( + EnhancedHybridDynamicAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedHybridDynamicAdaptiveExplorationOptimization"] = ( + EnhancedHybridDynamicAdaptiveExplorationOptimization + ) + LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("EnhancedHybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridExplorationOptimization import ( + EnhancedHybridExplorationOptimization, + ) + + lama_register["EnhancedHybridExplorationOptimization"] = EnhancedHybridExplorationOptimization + LLAMAEnhancedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridExplorationOptimization" + ).set_name("LLAMAEnhancedHybridExplorationOptimization", register=True) +except Exception as e: + print("EnhancedHybridExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridGradientAnnealingWithMemory import ( + EnhancedHybridGradientAnnealingWithMemory, + ) + + lama_register["EnhancedHybridGradientAnnealingWithMemory"] = EnhancedHybridGradientAnnealingWithMemory + LLAMAEnhancedHybridGradientAnnealingWithMemory = NonObjectOptimizer( + method="LLAMAEnhancedHybridGradientAnnealingWithMemory" + ).set_name("LLAMAEnhancedHybridGradientAnnealingWithMemory", register=True) +except Exception as e: + print("EnhancedHybridGradientAnnealingWithMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridGradientBasedStrategyV8 import ( + EnhancedHybridGradientBasedStrategyV8, + ) + + lama_register["EnhancedHybridGradientBasedStrategyV8"] = EnhancedHybridGradientBasedStrategyV8 + LLAMAEnhancedHybridGradientBasedStrategyV8 = NonObjectOptimizer( + method="LLAMAEnhancedHybridGradientBasedStrategyV8" + ).set_name("LLAMAEnhancedHybridGradientBasedStrategyV8", register=True) +except Exception as e: + print("EnhancedHybridGradientBasedStrategyV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridGradientPSO import EnhancedHybridGradientPSO + + lama_register["EnhancedHybridGradientPSO"] = EnhancedHybridGradientPSO + LLAMAEnhancedHybridGradientPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO").set_name( + "LLAMAEnhancedHybridGradientPSO", register=True + ) +except Exception as e: + print("EnhancedHybridGradientPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridHarmonySearchWithAdaptiveMutationV20 import ( + EnhancedHybridHarmonySearchWithAdaptiveMutationV20, + ) + + lama_register["EnhancedHybridHarmonySearchWithAdaptiveMutationV20"] = ( + EnhancedHybridHarmonySearchWithAdaptiveMutationV20 + ) + LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20 = NonObjectOptimizer( + method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20" + ).set_name("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20", register=True) +except Exception as e: + print("EnhancedHybridHarmonySearchWithAdaptiveMutationV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMemoryAdaptiveDE import EnhancedHybridMemoryAdaptiveDE + + lama_register["EnhancedHybridMemoryAdaptiveDE"] = EnhancedHybridMemoryAdaptiveDE + LLAMAEnhancedHybridMemoryAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedHybridMemoryAdaptiveDE" + ).set_name("LLAMAEnhancedHybridMemoryAdaptiveDE", register=True) +except Exception as e: + print("EnhancedHybridMemoryAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMemoryPSO import EnhancedHybridMemoryPSO + + lama_register["EnhancedHybridMemoryPSO"] = EnhancedHybridMemoryPSO + LLAMAEnhancedHybridMemoryPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO").set_name( + "LLAMAEnhancedHybridMemoryPSO", register=True + ) +except Exception as e: + print("EnhancedHybridMemoryPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizer import ( + EnhancedHybridMetaHeuristicOptimizer, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizer"] = EnhancedHybridMetaHeuristicOptimizer + LLAMAEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV10 import ( + EnhancedHybridMetaHeuristicOptimizerV10, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV10"] = EnhancedHybridMetaHeuristicOptimizerV10 + LLAMAEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV10", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV11 import ( + EnhancedHybridMetaHeuristicOptimizerV11, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV11"] = EnhancedHybridMetaHeuristicOptimizerV11 + LLAMAEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV11", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV12 import ( + EnhancedHybridMetaHeuristicOptimizerV12, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV12"] = EnhancedHybridMetaHeuristicOptimizerV12 + LLAMAEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV12", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV15 import ( + EnhancedHybridMetaHeuristicOptimizerV15, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV15"] = EnhancedHybridMetaHeuristicOptimizerV15 + LLAMAEnhancedHybridMetaHeuristicOptimizerV15 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV15", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV2 import ( + EnhancedHybridMetaHeuristicOptimizerV2, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV2"] = EnhancedHybridMetaHeuristicOptimizerV2 + LLAMAEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV2", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV3 import ( + EnhancedHybridMetaHeuristicOptimizerV3, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV3"] = EnhancedHybridMetaHeuristicOptimizerV3 + LLAMAEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV3", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV4 import ( + EnhancedHybridMetaHeuristicOptimizerV4, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV4"] = EnhancedHybridMetaHeuristicOptimizerV4 + LLAMAEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV4", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV5 import ( + EnhancedHybridMetaHeuristicOptimizerV5, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV5"] = EnhancedHybridMetaHeuristicOptimizerV5 + LLAMAEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV5", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV6 import ( + EnhancedHybridMetaHeuristicOptimizerV6, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV6"] = EnhancedHybridMetaHeuristicOptimizerV6 + LLAMAEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV6", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV7 import ( + EnhancedHybridMetaHeuristicOptimizerV7, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV7"] = EnhancedHybridMetaHeuristicOptimizerV7 + LLAMAEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV7", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV8 import ( + EnhancedHybridMetaHeuristicOptimizerV8, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV8"] = EnhancedHybridMetaHeuristicOptimizerV8 + LLAMAEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV8", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV9 import ( + EnhancedHybridMetaHeuristicOptimizerV9, + ) + + lama_register["EnhancedHybridMetaHeuristicOptimizerV9"] = EnhancedHybridMetaHeuristicOptimizerV9 + LLAMAEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV9", register=True) +except Exception as e: + print("EnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithm import ( + EnhancedHybridMetaOptimizationAlgorithm, + ) + + lama_register["EnhancedHybridMetaOptimizationAlgorithm"] = EnhancedHybridMetaOptimizationAlgorithm + LLAMAEnhancedHybridMetaOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaOptimizationAlgorithm" + ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithm", register=True) +except Exception as e: + print("EnhancedHybridMetaOptimizationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithmV2 import ( + EnhancedHybridMetaOptimizationAlgorithmV2, + ) + + lama_register["EnhancedHybridMetaOptimizationAlgorithmV2"] = EnhancedHybridMetaOptimizationAlgorithmV2 + LLAMAEnhancedHybridMetaOptimizationAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2" + ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithmV2", register=True) +except Exception as e: + print("EnhancedHybridMetaOptimizationAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridOptimization import EnhancedHybridOptimization + + lama_register["EnhancedHybridOptimization"] = EnhancedHybridOptimization + LLAMAEnhancedHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization").set_name( + "LLAMAEnhancedHybridOptimization", register=True + ) +except Exception as e: + print("EnhancedHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridOptimizer import EnhancedHybridOptimizer + + lama_register["EnhancedHybridOptimizer"] = EnhancedHybridOptimizer + LLAMAEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer").set_name( + "LLAMAEnhancedHybridOptimizer", register=True + ) +except Exception as e: + print("EnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridQuantumDifferentialPSO import ( + EnhancedHybridQuantumDifferentialPSO, + ) + + lama_register["EnhancedHybridQuantumDifferentialPSO"] = EnhancedHybridQuantumDifferentialPSO + LLAMAEnhancedHybridQuantumDifferentialPSO = NonObjectOptimizer( + method="LLAMAEnhancedHybridQuantumDifferentialPSO" + ).set_name("LLAMAEnhancedHybridQuantumDifferentialPSO", register=True) +except Exception as e: + print("EnhancedHybridQuantumDifferentialPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridQuasiRandomGradientDifferentialEvolution import ( + EnhancedHybridQuasiRandomGradientDifferentialEvolution, + ) + + lama_register["EnhancedHybridQuasiRandomGradientDifferentialEvolution"] = ( + EnhancedHybridQuasiRandomGradientDifferentialEvolution + ) + LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedHybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridSearch import EnhancedHybridSearch + + lama_register["EnhancedHybridSearch"] = EnhancedHybridSearch + LLAMAEnhancedHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch").set_name( + "LLAMAEnhancedHybridSearch", register=True + ) +except Exception as e: + print("EnhancedHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHybridSimulatedAnnealingOptimization import ( + EnhancedHybridSimulatedAnnealingOptimization, + ) + + lama_register["EnhancedHybridSimulatedAnnealingOptimization"] = ( + EnhancedHybridSimulatedAnnealingOptimization + ) + LLAMAEnhancedHybridSimulatedAnnealingOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridSimulatedAnnealingOptimization" + ).set_name("LLAMAEnhancedHybridSimulatedAnnealingOptimization", register=True) +except Exception as e: + print("EnhancedHybridSimulatedAnnealingOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperAdaptiveHybridDEPSO import EnhancedHyperAdaptiveHybridDEPSO + + lama_register["EnhancedHyperAdaptiveHybridDEPSO"] = EnhancedHyperAdaptiveHybridDEPSO + LLAMAEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedHyperAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedHyperAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("EnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 import ( + EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59, + ) + + lama_register["EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59"] = ( + EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 + ) + LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59" + ).set_name("LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59", register=True) +except Exception as e: + print("EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 import ( + EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62, + ) + + lama_register["EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62"] = ( + EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 + ) + LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62" + ).set_name("LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62", register=True) +except Exception as e: + print("EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperOptimizedMultiStrategicOptimizerV49 import ( + EnhancedHyperOptimizedMultiStrategicOptimizerV49, + ) + + lama_register["EnhancedHyperOptimizedMultiStrategicOptimizerV49"] = ( + EnhancedHyperOptimizedMultiStrategicOptimizerV49 + ) + LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49" + ).set_name("LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49", register=True) +except Exception as e: + print("EnhancedHyperOptimizedMultiStrategicOptimizerV49 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 import ( + EnhancedHyperParameterTunedMetaHeuristicOptimizerV4, + ) + + lama_register["EnhancedHyperParameterTunedMetaHeuristicOptimizerV4"] = ( + EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 + ) + LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4", register=True) +except Exception as e: + print("EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedHyperStrategicOptimizerV56 import ( + EnhancedHyperStrategicOptimizerV56, + ) + + lama_register["EnhancedHyperStrategicOptimizerV56"] = EnhancedHyperStrategicOptimizerV56 + LLAMAEnhancedHyperStrategicOptimizerV56 = NonObjectOptimizer( + method="LLAMAEnhancedHyperStrategicOptimizerV56" + ).set_name("LLAMAEnhancedHyperStrategicOptimizerV56", register=True) +except Exception as e: + print("EnhancedHyperStrategicOptimizerV56 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedImprovedDifferentialEvolutionLocalSearch_v58 import ( + EnhancedImprovedDifferentialEvolutionLocalSearch_v58, + ) + + lama_register["EnhancedImprovedDifferentialEvolutionLocalSearch_v58"] = ( + EnhancedImprovedDifferentialEvolutionLocalSearch_v58 + ) + LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58" + ).set_name("LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58", register=True) +except Exception as e: + print("EnhancedImprovedDifferentialEvolutionLocalSearch_v58 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer import ( + EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer, + ) + + lama_register["EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer"] = ( + EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer + ) + LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer", register=True) +except Exception as e: + print("EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 import ( + EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77, + ) + + lama_register["EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77"] = ( + EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 + ) + LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77" + ).set_name("LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77", register=True) +except Exception as e: + print("EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 import ( + EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7"] = ( + EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 + ) + LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategy import EnhancedIslandEvolutionStrategy + + lama_register["EnhancedIslandEvolutionStrategy"] = EnhancedIslandEvolutionStrategy + LLAMAEnhancedIslandEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategy" + ).set_name("LLAMAEnhancedIslandEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedIslandEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV10 import ( + EnhancedIslandEvolutionStrategyV10, + ) + + lama_register["EnhancedIslandEvolutionStrategyV10"] = EnhancedIslandEvolutionStrategyV10 + LLAMAEnhancedIslandEvolutionStrategyV10 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV10" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV10", register=True) +except Exception as e: + print("EnhancedIslandEvolutionStrategyV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV3 import ( + EnhancedIslandEvolutionStrategyV3, + ) + + lama_register["EnhancedIslandEvolutionStrategyV3"] = EnhancedIslandEvolutionStrategyV3 + LLAMAEnhancedIslandEvolutionStrategyV3 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV3" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV3", register=True) +except Exception as e: + print("EnhancedIslandEvolutionStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV7 import ( + EnhancedIslandEvolutionStrategyV7, + ) + + lama_register["EnhancedIslandEvolutionStrategyV7"] = EnhancedIslandEvolutionStrategyV7 + LLAMAEnhancedIslandEvolutionStrategyV7 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV7" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV7", register=True) +except Exception as e: + print("EnhancedIslandEvolutionStrategyV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV8 import ( + EnhancedIslandEvolutionStrategyV8, + ) + + lama_register["EnhancedIslandEvolutionStrategyV8"] = EnhancedIslandEvolutionStrategyV8 + LLAMAEnhancedIslandEvolutionStrategyV8 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV8" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV8", register=True) +except Exception as e: + print("EnhancedIslandEvolutionStrategyV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedLocalSearchAdaptiveStrategyV29 import ( + EnhancedLocalSearchAdaptiveStrategyV29, + ) + + lama_register["EnhancedLocalSearchAdaptiveStrategyV29"] = EnhancedLocalSearchAdaptiveStrategyV29 + LLAMAEnhancedLocalSearchAdaptiveStrategyV29 = NonObjectOptimizer( + method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29" + ).set_name("LLAMAEnhancedLocalSearchAdaptiveStrategyV29", register=True) +except Exception as e: + print("EnhancedLocalSearchAdaptiveStrategyV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedLocalSearchQuantumSimulatedAnnealingV6 import ( + EnhancedLocalSearchQuantumSimulatedAnnealingV6, + ) + + lama_register["EnhancedLocalSearchQuantumSimulatedAnnealingV6"] = ( + EnhancedLocalSearchQuantumSimulatedAnnealingV6 + ) + LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6 = NonObjectOptimizer( + method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6" + ).set_name("LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6", register=True) +except Exception as e: + print("EnhancedLocalSearchQuantumSimulatedAnnealingV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemeticDifferentialEvolution import ( + EnhancedMemeticDifferentialEvolution, + ) + + lama_register["EnhancedMemeticDifferentialEvolution"] = EnhancedMemeticDifferentialEvolution + LLAMAEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedMemeticDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemeticEvolutionarySearch import ( + EnhancedMemeticEvolutionarySearch, + ) + + lama_register["EnhancedMemeticEvolutionarySearch"] = EnhancedMemeticEvolutionarySearch + LLAMAEnhancedMemeticEvolutionarySearch = NonObjectOptimizer( + method="LLAMAEnhancedMemeticEvolutionarySearch" + ).set_name("LLAMAEnhancedMemeticEvolutionarySearch", register=True) +except Exception as e: + print("EnhancedMemeticEvolutionarySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemeticHarmonyOptimization import ( + EnhancedMemeticHarmonyOptimization, + ) + + lama_register["EnhancedMemeticHarmonyOptimization"] = EnhancedMemeticHarmonyOptimization + LLAMAEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedMemeticHarmonyOptimization" + ).set_name("LLAMAEnhancedMemeticHarmonyOptimization", register=True) +except Exception as e: + print("EnhancedMemeticHarmonyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemoryAdaptiveDynamicHybridOptimizer import ( + EnhancedMemoryAdaptiveDynamicHybridOptimizer, + ) + + lama_register["EnhancedMemoryAdaptiveDynamicHybridOptimizer"] = ( + EnhancedMemoryAdaptiveDynamicHybridOptimizer + ) + LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: + print("EnhancedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 import ( + EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77, + ) + + lama_register["EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77"] = ( + EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 + ) + LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77", register=True) +except Exception as e: + print("EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV41 import ( + EnhancedMemoryGuidedAdaptiveStrategyV41, + ) + + lama_register["EnhancedMemoryGuidedAdaptiveStrategyV41"] = EnhancedMemoryGuidedAdaptiveStrategyV41 + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41", register=True) +except Exception as e: + print("EnhancedMemoryGuidedAdaptiveStrategyV41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV69 import ( + EnhancedMemoryGuidedAdaptiveStrategyV69, + ) + + lama_register["EnhancedMemoryGuidedAdaptiveStrategyV69"] = EnhancedMemoryGuidedAdaptiveStrategyV69 + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69", register=True) +except Exception as e: + print("EnhancedMemoryGuidedAdaptiveStrategyV69 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaDynamicPrecisionOptimizerV1 import ( + EnhancedMetaDynamicPrecisionOptimizerV1, + ) + + lama_register["EnhancedMetaDynamicPrecisionOptimizerV1"] = EnhancedMetaDynamicPrecisionOptimizerV1 + LLAMAEnhancedMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1" + ).set_name("LLAMAEnhancedMetaDynamicPrecisionOptimizerV1", register=True) +except Exception as e: + print("EnhancedMetaDynamicPrecisionOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaHeuristicOptimizerV2 import EnhancedMetaHeuristicOptimizerV2 + + lama_register["EnhancedMetaHeuristicOptimizerV2"] = EnhancedMetaHeuristicOptimizerV2 + LLAMAEnhancedMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedMetaHeuristicOptimizerV2", register=True) +except Exception as e: + print("EnhancedMetaHeuristicOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V1, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V1"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V2, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V2"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V3, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V3"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V4, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V4"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V5, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V5"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V6, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V6"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V7, + ) + + lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V7"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7", register=True) +except Exception as e: + print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv2 import EnhancedMetaNetAQAPSOv2 + + lama_register["EnhancedMetaNetAQAPSOv2"] = EnhancedMetaNetAQAPSOv2 + LLAMAEnhancedMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2").set_name( + "LLAMAEnhancedMetaNetAQAPSOv2", register=True + ) +except Exception as e: + print("EnhancedMetaNetAQAPSOv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv3 import EnhancedMetaNetAQAPSOv3 + + lama_register["EnhancedMetaNetAQAPSOv3"] = EnhancedMetaNetAQAPSOv3 + LLAMAEnhancedMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3").set_name( + "LLAMAEnhancedMetaNetAQAPSOv3", register=True + ) +except Exception as e: + print("EnhancedMetaNetAQAPSOv3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv4 import EnhancedMetaNetAQAPSOv4 + + lama_register["EnhancedMetaNetAQAPSOv4"] = EnhancedMetaNetAQAPSOv4 + LLAMAEnhancedMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4").set_name( + "LLAMAEnhancedMetaNetAQAPSOv4", register=True + ) +except Exception as e: + print("EnhancedMetaNetAQAPSOv4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv5 import EnhancedMetaNetAQAPSOv5 + + lama_register["EnhancedMetaNetAQAPSOv5"] = EnhancedMetaNetAQAPSOv5 + LLAMAEnhancedMetaNetAQAPSOv5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5").set_name( + "LLAMAEnhancedMetaNetAQAPSOv5", register=True + ) +except Exception as e: + print("EnhancedMetaNetAQAPSOv5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv6 import EnhancedMetaNetAQAPSOv6 + + lama_register["EnhancedMetaNetAQAPSOv6"] = EnhancedMetaNetAQAPSOv6 + LLAMAEnhancedMetaNetAQAPSOv6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6").set_name( + "LLAMAEnhancedMetaNetAQAPSOv6", register=True + ) +except Exception as e: + print("EnhancedMetaNetAQAPSOv6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetPSO import EnhancedMetaNetPSO + + lama_register["EnhancedMetaNetPSO"] = EnhancedMetaNetPSO + LLAMAEnhancedMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO").set_name( + "LLAMAEnhancedMetaNetPSO", register=True + ) +except Exception as e: + print("EnhancedMetaNetPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaNetPSOv2 import EnhancedMetaNetPSOv2 + + lama_register["EnhancedMetaNetPSOv2"] = EnhancedMetaNetPSOv2 + LLAMAEnhancedMetaNetPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2").set_name( + "LLAMAEnhancedMetaNetPSOv2", register=True + ) +except Exception as e: + print("EnhancedMetaNetPSOv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMetaPopulationAdaptiveGradientSearch import ( + EnhancedMetaPopulationAdaptiveGradientSearch, + ) + + lama_register["EnhancedMetaPopulationAdaptiveGradientSearch"] = ( + EnhancedMetaPopulationAdaptiveGradientSearch + ) + LLAMAEnhancedMetaPopulationAdaptiveGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch" + ).set_name("LLAMAEnhancedMetaPopulationAdaptiveGradientSearch", register=True) +except Exception as e: + print("EnhancedMetaPopulationAdaptiveGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiFocalAdaptiveOptimizer import ( + EnhancedMultiFocalAdaptiveOptimizer, + ) + + lama_register["EnhancedMultiFocalAdaptiveOptimizer"] = EnhancedMultiFocalAdaptiveOptimizer + LLAMAEnhancedMultiFocalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiFocalAdaptiveOptimizer" + ).set_name("LLAMAEnhancedMultiFocalAdaptiveOptimizer", register=True) +except Exception as e: + print("EnhancedMultiFocalAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiModalAdaptiveOptimizer import ( + EnhancedMultiModalAdaptiveOptimizer, + ) + + lama_register["EnhancedMultiModalAdaptiveOptimizer"] = EnhancedMultiModalAdaptiveOptimizer + LLAMAEnhancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalAdaptiveOptimizer" + ).set_name("LLAMAEnhancedMultiModalAdaptiveOptimizer", register=True) +except Exception as e: + print("EnhancedMultiModalAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiModalConvergenceOptimizer import ( + EnhancedMultiModalConvergenceOptimizer, + ) + + lama_register["EnhancedMultiModalConvergenceOptimizer"] = EnhancedMultiModalConvergenceOptimizer + LLAMAEnhancedMultiModalConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalConvergenceOptimizer" + ).set_name("LLAMAEnhancedMultiModalConvergenceOptimizer", register=True) +except Exception as e: + print("EnhancedMultiModalConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiModalExplorationStrategy import ( + EnhancedMultiModalExplorationStrategy, + ) + + lama_register["EnhancedMultiModalExplorationStrategy"] = EnhancedMultiModalExplorationStrategy + LLAMAEnhancedMultiModalExplorationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalExplorationStrategy" + ).set_name("LLAMAEnhancedMultiModalExplorationStrategy", register=True) +except Exception as e: + print("EnhancedMultiModalExplorationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiModalMemoryHybridOptimizer import ( + EnhancedMultiModalMemoryHybridOptimizer, + ) + + lama_register["EnhancedMultiModalMemoryHybridOptimizer"] = EnhancedMultiModalMemoryHybridOptimizer + LLAMAEnhancedMultiModalMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedMultiModalMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedMultiModalMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiOperatorSearch import EnhancedMultiOperatorSearch + + lama_register["EnhancedMultiOperatorSearch"] = EnhancedMultiOperatorSearch + LLAMAEnhancedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch").set_name( + "LLAMAEnhancedMultiOperatorSearch", register=True + ) +except Exception as e: + print("EnhancedMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiOperatorSearch2 import EnhancedMultiOperatorSearch2 + + lama_register["EnhancedMultiOperatorSearch2"] = EnhancedMultiOperatorSearch2 + LLAMAEnhancedMultiOperatorSearch2 = NonObjectOptimizer( + method="LLAMAEnhancedMultiOperatorSearch2" + ).set_name("LLAMAEnhancedMultiOperatorSearch2", register=True) +except Exception as e: + print("EnhancedMultiOperatorSearch2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiPhaseAdaptiveDE import EnhancedMultiPhaseAdaptiveDE + + lama_register["EnhancedMultiPhaseAdaptiveDE"] = EnhancedMultiPhaseAdaptiveDE + LLAMAEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedMultiPhaseAdaptiveDE" + ).set_name("LLAMAEnhancedMultiPhaseAdaptiveDE", register=True) +except Exception as e: + print("EnhancedMultiPhaseAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiPhaseOptimizationAlgorithm import ( + EnhancedMultiPhaseOptimizationAlgorithm, + ) + + lama_register["EnhancedMultiPhaseOptimizationAlgorithm"] = EnhancedMultiPhaseOptimizationAlgorithm + LLAMAEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm" + ).set_name("LLAMAEnhancedMultiPhaseOptimizationAlgorithm", register=True) +except Exception as e: + print("EnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiStageGradientBoostedAnnealing import ( + EnhancedMultiStageGradientBoostedAnnealing, + ) + + lama_register["EnhancedMultiStageGradientBoostedAnnealing"] = EnhancedMultiStageGradientBoostedAnnealing + LLAMAEnhancedMultiStageGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedMultiStageGradientBoostedAnnealing" + ).set_name("LLAMAEnhancedMultiStageGradientBoostedAnnealing", register=True) +except Exception as e: + print("EnhancedMultiStageGradientBoostedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiStrategyDifferentialEvolution import ( + EnhancedMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedMultiStrategyDifferentialEvolution"] = EnhancedMultiStrategyDifferentialEvolution + LLAMAEnhancedMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedMultiStrategyQuantumLevyOptimizer import ( + EnhancedMultiStrategyQuantumLevyOptimizer, + ) + + lama_register["EnhancedMultiStrategyQuantumLevyOptimizer"] = EnhancedMultiStrategyQuantumLevyOptimizer + LLAMAEnhancedMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer" + ).set_name("LLAMAEnhancedMultiStrategyQuantumLevyOptimizer", register=True) +except Exception as e: + print("EnhancedMultiStrategyQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedNicheDifferentialParticleSwarmOptimizer import ( + EnhancedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedNicheDifferentialParticleSwarmOptimizer"] = ( + EnhancedNicheDifferentialParticleSwarmOptimizer + ) + LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOppositionBasedDifferentialEvolution import ( + EnhancedOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedOppositionBasedDifferentialEvolution"] = ( + EnhancedOppositionBasedDifferentialEvolution + ) + LLAMAEnhancedOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearch import ( + EnhancedOppositionBasedHarmonySearch, + ) + + lama_register["EnhancedOppositionBasedHarmonySearch"] = EnhancedOppositionBasedHarmonySearch + LLAMAEnhancedOppositionBasedHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearch" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearch", register=True) +except Exception as e: + print("EnhancedOppositionBasedHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidth import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidth, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidth"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidth + ) + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth", register=True) +except Exception as e: + print("EnhancedOppositionBasedHarmonySearchDynamicBandwidth can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC + ) + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC", register=True) +except Exception as e: + print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: + print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOptimalEvolutionaryGradientOptimizerV9 import ( + EnhancedOptimalEvolutionaryGradientOptimizerV9, + ) + + lama_register["EnhancedOptimalEvolutionaryGradientOptimizerV9"] = ( + EnhancedOptimalEvolutionaryGradientOptimizerV9 + ) + LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9" + ).set_name("LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9", register=True) +except Exception as e: + print("EnhancedOptimalEvolutionaryGradientOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOptimalPrecisionEvolutionaryThermalOptimizer import ( + EnhancedOptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["EnhancedOptimalPrecisionEvolutionaryThermalOptimizer"] = ( + EnhancedOptimalPrecisionEvolutionaryThermalOptimizer + ) + LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: + print("EnhancedOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOptimizedEvolutiveStrategy import ( + EnhancedOptimizedEvolutiveStrategy, + ) + + lama_register["EnhancedOptimizedEvolutiveStrategy"] = EnhancedOptimizedEvolutiveStrategy + LLAMAEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedOptimizedEvolutiveStrategy" + ).set_name("LLAMAEnhancedOptimizedEvolutiveStrategy", register=True) +except Exception as e: + print("EnhancedOptimizedEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 import ( + EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46, + ) + + lama_register["EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46"] = ( + EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 + ) + LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 = NonObjectOptimizer( + method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46" + ).set_name("LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46", register=True) +except Exception as e: + print("EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDE import EnhancedOrthogonalDE + + lama_register["EnhancedOrthogonalDE"] = EnhancedOrthogonalDE + LLAMAEnhancedOrthogonalDE = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE").set_name( + "LLAMAEnhancedOrthogonalDE", register=True + ) +except Exception as e: + print("EnhancedOrthogonalDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolution import ( + EnhancedOrthogonalDifferentialEvolution, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolution"] = EnhancedOrthogonalDifferentialEvolution + LLAMAEnhancedOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolution" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedOrthogonalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionImproved import ( + EnhancedOrthogonalDifferentialEvolutionImproved, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolutionImproved"] = ( + EnhancedOrthogonalDifferentialEvolutionImproved + ) + LLAMAEnhancedOrthogonalDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved", register=True) +except Exception as e: + print("EnhancedOrthogonalDifferentialEvolutionImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV2 import ( + EnhancedOrthogonalDifferentialEvolutionV2, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolutionV2"] = EnhancedOrthogonalDifferentialEvolutionV2 + LLAMAEnhancedOrthogonalDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedOrthogonalDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV3 import ( + EnhancedOrthogonalDifferentialEvolutionV3, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolutionV3"] = EnhancedOrthogonalDifferentialEvolutionV3 + LLAMAEnhancedOrthogonalDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV3", register=True) +except Exception as e: + print("EnhancedOrthogonalDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV4 import ( + EnhancedOrthogonalDifferentialEvolutionV4, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolutionV4"] = EnhancedOrthogonalDifferentialEvolutionV4 + LLAMAEnhancedOrthogonalDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV4", register=True) +except Exception as e: + print("EnhancedOrthogonalDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParallelDifferentialEvolution import ( + EnhancedParallelDifferentialEvolution, + ) + + lama_register["EnhancedParallelDifferentialEvolution"] = EnhancedParallelDifferentialEvolution + LLAMAEnhancedParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedParallelDifferentialEvolution" + ).set_name("LLAMAEnhancedParallelDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedParallelDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimization import ( + EnhancedParticleSwarmOptimization, + ) + + lama_register["EnhancedParticleSwarmOptimization"] = EnhancedParticleSwarmOptimization + LLAMAEnhancedParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimization" + ).set_name("LLAMAEnhancedParticleSwarmOptimization", register=True) +except Exception as e: + print("EnhancedParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizer import EnhancedParticleSwarmOptimizer + + lama_register["EnhancedParticleSwarmOptimizer"] = EnhancedParticleSwarmOptimizer + LLAMAEnhancedParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV4 import EnhancedParticleSwarmOptimizerV4 + + lama_register["EnhancedParticleSwarmOptimizerV4"] = EnhancedParticleSwarmOptimizerV4 + LLAMAEnhancedParticleSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV4" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV4", register=True) +except Exception as e: + print("EnhancedParticleSwarmOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV5 import EnhancedParticleSwarmOptimizerV5 + + lama_register["EnhancedParticleSwarmOptimizerV5"] = EnhancedParticleSwarmOptimizerV5 + LLAMAEnhancedParticleSwarmOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV5" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV5", register=True) +except Exception as e: + print("EnhancedParticleSwarmOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV6 import EnhancedParticleSwarmOptimizerV6 + + lama_register["EnhancedParticleSwarmOptimizerV6"] = EnhancedParticleSwarmOptimizerV6 + LLAMAEnhancedParticleSwarmOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV6" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV6", register=True) +except Exception as e: + print("EnhancedParticleSwarmOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPhaseAdaptiveMemoryStrategyV75 import ( + EnhancedPhaseAdaptiveMemoryStrategyV75, + ) + + lama_register["EnhancedPhaseAdaptiveMemoryStrategyV75"] = EnhancedPhaseAdaptiveMemoryStrategyV75 + LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75 = NonObjectOptimizer( + method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75" + ).set_name("LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75", register=True) +except Exception as e: + print("EnhancedPhaseAdaptiveMemoryStrategyV75 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPhaseTransitionMemoryStrategyV82 import ( + EnhancedPhaseTransitionMemoryStrategyV82, + ) + + lama_register["EnhancedPhaseTransitionMemoryStrategyV82"] = EnhancedPhaseTransitionMemoryStrategyV82 + LLAMAEnhancedPhaseTransitionMemoryStrategyV82 = NonObjectOptimizer( + method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82" + ).set_name("LLAMAEnhancedPhaseTransitionMemoryStrategyV82", register=True) +except Exception as e: + print("EnhancedPhaseTransitionMemoryStrategyV82 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveCohortOptimization import ( + EnhancedPrecisionAdaptiveCohortOptimization, + ) + + lama_register["EnhancedPrecisionAdaptiveCohortOptimization"] = EnhancedPrecisionAdaptiveCohortOptimization + LLAMAEnhancedPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization" + ).set_name("LLAMAEnhancedPrecisionAdaptiveCohortOptimization", register=True) +except Exception as e: + print("EnhancedPrecisionAdaptiveCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveGradientClusteringPSO import ( + EnhancedPrecisionAdaptiveGradientClusteringPSO, + ) + + lama_register["EnhancedPrecisionAdaptiveGradientClusteringPSO"] = ( + EnhancedPrecisionAdaptiveGradientClusteringPSO + ) + LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO" + ).set_name("LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO", register=True) +except Exception as e: + print("EnhancedPrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionBoostedDifferentialEvolution import ( + EnhancedPrecisionBoostedDifferentialEvolution, + ) + + lama_register["EnhancedPrecisionBoostedDifferentialEvolution"] = ( + EnhancedPrecisionBoostedDifferentialEvolution + ) + LLAMAEnhancedPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution" + ).set_name("LLAMAEnhancedPrecisionBoostedDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedPrecisionBoostedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionConvergenceOptimizer import ( + EnhancedPrecisionConvergenceOptimizer, + ) + + lama_register["EnhancedPrecisionConvergenceOptimizer"] = EnhancedPrecisionConvergenceOptimizer + LLAMAEnhancedPrecisionConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionConvergenceOptimizer" + ).set_name("LLAMAEnhancedPrecisionConvergenceOptimizer", register=True) +except Exception as e: + print("EnhancedPrecisionConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV38 import ( + EnhancedPrecisionEvolutionaryOptimizerV38, + ) + + lama_register["EnhancedPrecisionEvolutionaryOptimizerV38"] = EnhancedPrecisionEvolutionaryOptimizerV38 + LLAMAEnhancedPrecisionEvolutionaryOptimizerV38 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38" + ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV38", register=True) +except Exception as e: + print("EnhancedPrecisionEvolutionaryOptimizerV38 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV39 import ( + EnhancedPrecisionEvolutionaryOptimizerV39, + ) + + lama_register["EnhancedPrecisionEvolutionaryOptimizerV39"] = EnhancedPrecisionEvolutionaryOptimizerV39 + LLAMAEnhancedPrecisionEvolutionaryOptimizerV39 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39" + ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV39", register=True) +except Exception as e: + print("EnhancedPrecisionEvolutionaryOptimizerV39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionGuidedQuantumStrategy import ( + EnhancedPrecisionGuidedQuantumStrategy, + ) + + lama_register["EnhancedPrecisionGuidedQuantumStrategy"] = EnhancedPrecisionGuidedQuantumStrategy + LLAMAEnhancedPrecisionGuidedQuantumStrategy = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionGuidedQuantumStrategy" + ).set_name("LLAMAEnhancedPrecisionGuidedQuantumStrategy", register=True) +except Exception as e: + print("EnhancedPrecisionGuidedQuantumStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionHybridSearchV2 import EnhancedPrecisionHybridSearchV2 + + lama_register["EnhancedPrecisionHybridSearchV2"] = EnhancedPrecisionHybridSearchV2 + LLAMAEnhancedPrecisionHybridSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionHybridSearchV2" + ).set_name("LLAMAEnhancedPrecisionHybridSearchV2", register=True) +except Exception as e: + print("EnhancedPrecisionHybridSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedPrecisionTunedCrossoverElitistStrategyV14 import ( + EnhancedPrecisionTunedCrossoverElitistStrategyV14, + ) + + lama_register["EnhancedPrecisionTunedCrossoverElitistStrategyV14"] = ( + EnhancedPrecisionTunedCrossoverElitistStrategyV14 + ) + LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14" + ).set_name("LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14", register=True) +except Exception as e: + print("EnhancedPrecisionTunedCrossoverElitistStrategyV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedProgressiveAdaptiveDifferentialEvolution import ( + EnhancedProgressiveAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedProgressiveAdaptiveDifferentialEvolution"] = ( + EnhancedProgressiveAdaptiveDifferentialEvolution + ) + LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHR import EnhancedQAPSOAIRVCHR + + lama_register["EnhancedQAPSOAIRVCHR"] = EnhancedQAPSOAIRVCHR + LLAMAEnhancedQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR").set_name( + "LLAMAEnhancedQAPSOAIRVCHR", register=True + ) +except Exception as e: + print("EnhancedQAPSOAIRVCHR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLS import EnhancedQAPSOAIRVCHRLS + + lama_register["EnhancedQAPSOAIRVCHRLS"] = EnhancedQAPSOAIRVCHRLS + LLAMAEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS").set_name( + "LLAMAEnhancedQAPSOAIRVCHRLS", register=True + ) +except Exception as e: + print("EnhancedQAPSOAIRVCHRLS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLSDP import EnhancedQAPSOAIRVCHRLSDP + + lama_register["EnhancedQAPSOAIRVCHRLSDP"] = EnhancedQAPSOAIRVCHRLSDP + LLAMAEnhancedQAPSOAIRVCHRLSDP = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP").set_name( + "LLAMAEnhancedQAPSOAIRVCHRLSDP", register=True + ) +except Exception as e: + print("EnhancedQAPSOAIRVCHRLSDP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveCrossover import EnhancedQuantumAdaptiveCrossover + + lama_register["EnhancedQuantumAdaptiveCrossover"] = EnhancedQuantumAdaptiveCrossover + LLAMAEnhancedQuantumAdaptiveCrossover = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveCrossover" + ).set_name("LLAMAEnhancedQuantumAdaptiveCrossover", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDE import EnhancedQuantumAdaptiveDE + + lama_register["EnhancedQuantumAdaptiveDE"] = EnhancedQuantumAdaptiveDE + LLAMAEnhancedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE").set_name( + "LLAMAEnhancedQuantumAdaptiveDE", register=True + ) +except Exception as e: + print("EnhancedQuantumAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( + EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, + ) + + lama_register["EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( + EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + ) + LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" + ).set_name("LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveEliteGuidedSearch import ( + EnhancedQuantumAdaptiveEliteGuidedSearch, + ) + + lama_register["EnhancedQuantumAdaptiveEliteGuidedSearch"] = EnhancedQuantumAdaptiveEliteGuidedSearch + LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch" + ).set_name("LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveEliteGuidedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveFireworksOptimizer import ( + EnhancedQuantumAdaptiveFireworksOptimizer, + ) + + lama_register["EnhancedQuantumAdaptiveFireworksOptimizer"] = EnhancedQuantumAdaptiveFireworksOptimizer + LLAMAEnhancedQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer" + ).set_name("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveFireworksOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveGradientDiversityExplorer import ( + EnhancedQuantumAdaptiveGradientDiversityExplorer, + ) + + lama_register["EnhancedQuantumAdaptiveGradientDiversityExplorer"] = ( + EnhancedQuantumAdaptiveGradientDiversityExplorer + ) + LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer" + ).set_name("LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridDEPSO_V4 import ( + EnhancedQuantumAdaptiveHybridDEPSO_V4, + ) + + lama_register["EnhancedQuantumAdaptiveHybridDEPSO_V4"] = EnhancedQuantumAdaptiveHybridDEPSO_V4 + LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4" + ).set_name("LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveHybridDEPSO_V4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridSearchV2 import ( + EnhancedQuantumAdaptiveHybridSearchV2, + ) + + lama_register["EnhancedQuantumAdaptiveHybridSearchV2"] = EnhancedQuantumAdaptiveHybridSearchV2 + LLAMAEnhancedQuantumAdaptiveHybridSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2" + ).set_name("LLAMAEnhancedQuantumAdaptiveHybridSearchV2", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveHybridSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveLevySwarmOptimization import ( + EnhancedQuantumAdaptiveLevySwarmOptimization, + ) + + lama_register["EnhancedQuantumAdaptiveLevySwarmOptimization"] = ( + EnhancedQuantumAdaptiveLevySwarmOptimization + ) + LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization" + ).set_name("LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiPhaseDE_v3 import ( + EnhancedQuantumAdaptiveMultiPhaseDE_v3, + ) + + lama_register["EnhancedQuantumAdaptiveMultiPhaseDE_v3"] = EnhancedQuantumAdaptiveMultiPhaseDE_v3 + LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3" + ).set_name("LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveMultiPhaseDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiStrategyEvolution import ( + EnhancedQuantumAdaptiveMultiStrategyEvolution, + ) + + lama_register["EnhancedQuantumAdaptiveMultiStrategyEvolution"] = ( + EnhancedQuantumAdaptiveMultiStrategyEvolution + ) + LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution" + ).set_name("LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveNesterovStrategy import ( + EnhancedQuantumAdaptiveNesterovStrategy, + ) + + lama_register["EnhancedQuantumAdaptiveNesterovStrategy"] = EnhancedQuantumAdaptiveNesterovStrategy + LLAMAEnhancedQuantumAdaptiveNesterovStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy" + ).set_name("LLAMAEnhancedQuantumAdaptiveNesterovStrategy", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveNesterovStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveOptimizer import EnhancedQuantumAdaptiveOptimizer + + lama_register["EnhancedQuantumAdaptiveOptimizer"] = EnhancedQuantumAdaptiveOptimizer + LLAMAEnhancedQuantumAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveOptimizer" + ).set_name("LLAMAEnhancedQuantumAdaptiveOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumAnnealingOptimizer import ( + EnhancedQuantumAnnealingOptimizer, + ) + + lama_register["EnhancedQuantumAnnealingOptimizer"] = EnhancedQuantumAnnealingOptimizer + LLAMAEnhancedQuantumAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAnnealingOptimizer" + ).set_name("LLAMAEnhancedQuantumAnnealingOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCognitionFocusedOptimizerV18 import ( + EnhancedQuantumCognitionFocusedOptimizerV18, + ) + + lama_register["EnhancedQuantumCognitionFocusedOptimizerV18"] = EnhancedQuantumCognitionFocusedOptimizerV18 + LLAMAEnhancedQuantumCognitionFocusedOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18" + ).set_name("LLAMAEnhancedQuantumCognitionFocusedOptimizerV18", register=True) +except Exception as e: + print("EnhancedQuantumCognitionFocusedOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCognitionOptimizerV12 import ( + EnhancedQuantumCognitionOptimizerV12, + ) + + lama_register["EnhancedQuantumCognitionOptimizerV12"] = EnhancedQuantumCognitionOptimizerV12 + LLAMAEnhancedQuantumCognitionOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCognitionOptimizerV12" + ).set_name("LLAMAEnhancedQuantumCognitionOptimizerV12", register=True) +except Exception as e: + print("EnhancedQuantumCognitionOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCooperativeStrategy import ( + EnhancedQuantumCooperativeStrategy, + ) + + lama_register["EnhancedQuantumCooperativeStrategy"] = EnhancedQuantumCooperativeStrategy + LLAMAEnhancedQuantumCooperativeStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCooperativeStrategy" + ).set_name("LLAMAEnhancedQuantumCooperativeStrategy", register=True) +except Exception as e: + print("EnhancedQuantumCooperativeStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolution import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolution + ) + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus + ) + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus", register=True) +except Exception as e: + print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + ) + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) +except Exception as e: + print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts import ( + EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts, + ) + + lama_register["EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts"] = ( + EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts + ) + LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts" + ).set_name("LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolution import ( + EnhancedQuantumDifferentialEvolution, + ) + + lama_register["EnhancedQuantumDifferentialEvolution"] = EnhancedQuantumDifferentialEvolution + LLAMAEnhancedQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart + ) + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts + ) + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory + ) + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism import ( + EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism"] = ( + EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism + ) + LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism import ( + EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism, + ) + + lama_register["EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism"] = ( + EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism + ) + LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism" + ).set_name("LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleSwarmOptimizer import ( + EnhancedQuantumDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedQuantumDifferentialParticleSwarmOptimizer"] = ( + EnhancedQuantumDifferentialParticleSwarmOptimizer + ) + LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDiversityDE import EnhancedQuantumDiversityDE + + lama_register["EnhancedQuantumDiversityDE"] = EnhancedQuantumDiversityDE + LLAMAEnhancedQuantumDiversityDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE").set_name( + "LLAMAEnhancedQuantumDiversityDE", register=True + ) +except Exception as e: + print("EnhancedQuantumDiversityDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDynamicAdaptiveHybridDEPSO import ( + EnhancedQuantumDynamicAdaptiveHybridDEPSO, + ) + + lama_register["EnhancedQuantumDynamicAdaptiveHybridDEPSO"] = EnhancedQuantumDynamicAdaptiveHybridDEPSO + LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("EnhancedQuantumDynamicAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDynamicBalanceOptimizer import ( + EnhancedQuantumDynamicBalanceOptimizer, + ) + + lama_register["EnhancedQuantumDynamicBalanceOptimizer"] = EnhancedQuantumDynamicBalanceOptimizer + LLAMAEnhancedQuantumDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicBalanceOptimizer" + ).set_name("LLAMAEnhancedQuantumDynamicBalanceOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumDynamicBalanceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumDynamicOptimizer import EnhancedQuantumDynamicOptimizer + + lama_register["EnhancedQuantumDynamicOptimizer"] = EnhancedQuantumDynamicOptimizer + LLAMAEnhancedQuantumDynamicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicOptimizer" + ).set_name("LLAMAEnhancedQuantumDynamicOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumDynamicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumEvolutionStrategy import EnhancedQuantumEvolutionStrategy + + lama_register["EnhancedQuantumEvolutionStrategy"] = EnhancedQuantumEvolutionStrategy + LLAMAEnhancedQuantumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumEvolutionStrategy" + ).set_name("LLAMAEnhancedQuantumEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedQuantumEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithm import ( + EnhancedQuantumFireworksAlgorithm, + ) + + lama_register["EnhancedQuantumFireworksAlgorithm"] = EnhancedQuantumFireworksAlgorithm + LLAMAEnhancedQuantumFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedQuantumFireworksAlgorithm" + ).set_name("LLAMAEnhancedQuantumFireworksAlgorithm", register=True) +except Exception as e: + print("EnhancedQuantumFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithmV2 import ( + EnhancedQuantumFireworksAlgorithmV2, + ) + + lama_register["EnhancedQuantumFireworksAlgorithmV2"] = EnhancedQuantumFireworksAlgorithmV2 + LLAMAEnhancedQuantumFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedQuantumFireworksAlgorithmV2", register=True) +except Exception as e: + print("EnhancedQuantumFireworksAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimization import ( + EnhancedQuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimization"] = ( + EnhancedQuantumGradientAdaptiveExplorationOptimization + ) + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("EnhancedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 import ( + EnhancedQuantumGradientAdaptiveExplorationOptimizationV5, + ) + + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimizationV5"] = ( + EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 + ) + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5" + ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5", register=True) +except Exception as e: + print("EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimization import ( + EnhancedQuantumGradientExplorationOptimization, + ) + + lama_register["EnhancedQuantumGradientExplorationOptimization"] = ( + EnhancedQuantumGradientExplorationOptimization + ) + LLAMAEnhancedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientExplorationOptimization" + ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimization", register=True) +except Exception as e: + print("EnhancedQuantumGradientExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimizationV2 import ( + EnhancedQuantumGradientExplorationOptimizationV2, + ) + + lama_register["EnhancedQuantumGradientExplorationOptimizationV2"] = ( + EnhancedQuantumGradientExplorationOptimizationV2 + ) + LLAMAEnhancedQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2" + ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimizationV2", register=True) +except Exception as e: + print("EnhancedQuantumGradientExplorationOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientMemeticOptimizer import ( + EnhancedQuantumGradientMemeticOptimizer, + ) + + lama_register["EnhancedQuantumGradientMemeticOptimizer"] = EnhancedQuantumGradientMemeticOptimizer + LLAMAEnhancedQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumGradientMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumGradientMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumGradientOptimizerV5 import ( + EnhancedQuantumGradientOptimizerV5, + ) + + lama_register["EnhancedQuantumGradientOptimizerV5"] = EnhancedQuantumGradientOptimizerV5 + LLAMAEnhancedQuantumGradientOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientOptimizerV5" + ).set_name("LLAMAEnhancedQuantumGradientOptimizerV5", register=True) +except Exception as e: + print("EnhancedQuantumGradientOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonicAdaptationStrategy import ( + EnhancedQuantumHarmonicAdaptationStrategy, + ) + + lama_register["EnhancedQuantumHarmonicAdaptationStrategy"] = EnhancedQuantumHarmonicAdaptationStrategy + LLAMAEnhancedQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy" + ).set_name("LLAMAEnhancedQuantumHarmonicAdaptationStrategy", register=True) +except Exception as e: + print("EnhancedQuantumHarmonicAdaptationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonyMemeticAlgorithm import ( + EnhancedQuantumHarmonyMemeticAlgorithm, + ) + + lama_register["EnhancedQuantumHarmonyMemeticAlgorithm"] = EnhancedQuantumHarmonyMemeticAlgorithm + LLAMAEnhancedQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm" + ).set_name("LLAMAEnhancedQuantumHarmonyMemeticAlgorithm", register=True) +except Exception as e: + print("EnhancedQuantumHarmonyMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearch import EnhancedQuantumHarmonySearch + + lama_register["EnhancedQuantumHarmonySearch"] = EnhancedQuantumHarmonySearch + LLAMAEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearch" + ).set_name("LLAMAEnhancedQuantumHarmonySearch", register=True) +except Exception as e: + print("EnhancedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchAB import EnhancedQuantumHarmonySearchAB + + lama_register["EnhancedQuantumHarmonySearchAB"] = EnhancedQuantumHarmonySearchAB + LLAMAEnhancedQuantumHarmonySearchAB = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchAB" + ).set_name("LLAMAEnhancedQuantumHarmonySearchAB", register=True) +except Exception as e: + print("EnhancedQuantumHarmonySearchAB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGB import EnhancedQuantumHarmonySearchABGB + + lama_register["EnhancedQuantumHarmonySearchABGB"] = EnhancedQuantumHarmonySearchABGB + LLAMAEnhancedQuantumHarmonySearchABGB = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchABGB" + ).set_name("LLAMAEnhancedQuantumHarmonySearchABGB", register=True) +except Exception as e: + print("EnhancedQuantumHarmonySearchABGB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGBRefined import ( + EnhancedQuantumHarmonySearchABGBRefined, + ) + + lama_register["EnhancedQuantumHarmonySearchABGBRefined"] = EnhancedQuantumHarmonySearchABGBRefined + LLAMAEnhancedQuantumHarmonySearchABGBRefined = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchABGBRefined" + ).set_name("LLAMAEnhancedQuantumHarmonySearchABGBRefined", register=True) +except Exception as e: + print("EnhancedQuantumHarmonySearchABGBRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE import EnhancedQuantumHybridAdaptiveDE + + lama_register["EnhancedQuantumHybridAdaptiveDE"] = EnhancedQuantumHybridAdaptiveDE + LLAMAEnhancedQuantumHybridAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHybridAdaptiveDE" + ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE", register=True) +except Exception as e: + print("EnhancedQuantumHybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE_v2 import ( + EnhancedQuantumHybridAdaptiveDE_v2, + ) + + lama_register["EnhancedQuantumHybridAdaptiveDE_v2"] = EnhancedQuantumHybridAdaptiveDE_v2 + LLAMAEnhancedQuantumHybridAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2" + ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE_v2", register=True) +except Exception as e: + print("EnhancedQuantumHybridAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumInformedGradientOptimizer import ( + EnhancedQuantumInformedGradientOptimizer, + ) + + lama_register["EnhancedQuantumInformedGradientOptimizer"] = EnhancedQuantumInformedGradientOptimizer + LLAMAEnhancedQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInformedGradientOptimizer" + ).set_name("LLAMAEnhancedQuantumInformedGradientOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumInformedGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumInfusedAdaptiveStrategy import ( + EnhancedQuantumInfusedAdaptiveStrategy, + ) + + lama_register["EnhancedQuantumInfusedAdaptiveStrategy"] = EnhancedQuantumInfusedAdaptiveStrategy + LLAMAEnhancedQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy" + ).set_name("LLAMAEnhancedQuantumInfusedAdaptiveStrategy", register=True) +except Exception as e: + print("EnhancedQuantumInfusedAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumInspiredHybridOptimizer import ( + EnhancedQuantumInspiredHybridOptimizer, + ) + + lama_register["EnhancedQuantumInspiredHybridOptimizer"] = EnhancedQuantumInspiredHybridOptimizer + LLAMAEnhancedQuantumInspiredHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInspiredHybridOptimizer" + ).set_name("LLAMAEnhancedQuantumInspiredHybridOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumInspiredHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumIterativeRefinement import ( + EnhancedQuantumIterativeRefinement, + ) + + lama_register["EnhancedQuantumIterativeRefinement"] = EnhancedQuantumIterativeRefinement + LLAMAEnhancedQuantumIterativeRefinement = NonObjectOptimizer( + method="LLAMAEnhancedQuantumIterativeRefinement" + ).set_name("LLAMAEnhancedQuantumIterativeRefinement", register=True) +except Exception as e: + print("EnhancedQuantumIterativeRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLeapGradientBoostPSO import ( + EnhancedQuantumLeapGradientBoostPSO, + ) + + lama_register["EnhancedQuantumLeapGradientBoostPSO"] = EnhancedQuantumLeapGradientBoostPSO + LLAMAEnhancedQuantumLeapGradientBoostPSO = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLeapGradientBoostPSO" + ).set_name("LLAMAEnhancedQuantumLeapGradientBoostPSO", register=True) +except Exception as e: + print("EnhancedQuantumLeapGradientBoostPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLeapPSO import EnhancedQuantumLeapPSO + + lama_register["EnhancedQuantumLeapPSO"] = EnhancedQuantumLeapPSO + LLAMAEnhancedQuantumLeapPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO").set_name( + "LLAMAEnhancedQuantumLeapPSO", register=True + ) +except Exception as e: + print("EnhancedQuantumLeapPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialDynamicOptimizer import ( + EnhancedQuantumLevyDifferentialDynamicOptimizer, + ) + + lama_register["EnhancedQuantumLevyDifferentialDynamicOptimizer"] = ( + EnhancedQuantumLevyDifferentialDynamicOptimizer + ) + LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialOptimizer import ( + EnhancedQuantumLevyDifferentialOptimizer, + ) + + lama_register["EnhancedQuantumLevyDifferentialOptimizer"] = EnhancedQuantumLevyDifferentialOptimizer + LLAMAEnhancedQuantumLevyDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumLevyDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialSearch import ( + EnhancedQuantumLevyDifferentialSearch, + ) + + lama_register["EnhancedQuantumLevyDifferentialSearch"] = EnhancedQuantumLevyDifferentialSearch + LLAMAEnhancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialSearch" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: + print("EnhancedQuantumLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLevyMemeticOptimizer import ( + EnhancedQuantumLevyMemeticOptimizer, + ) + + lama_register["EnhancedQuantumLevyMemeticOptimizer"] = EnhancedQuantumLevyMemeticOptimizer + LLAMAEnhancedQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumLevyMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLevyParticleOptimization import ( + EnhancedQuantumLevyParticleOptimization, + ) + + lama_register["EnhancedQuantumLevyParticleOptimization"] = EnhancedQuantumLevyParticleOptimization + LLAMAEnhancedQuantumLevyParticleOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyParticleOptimization" + ).set_name("LLAMAEnhancedQuantumLevyParticleOptimization", register=True) +except Exception as e: + print("EnhancedQuantumLevyParticleOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLocalSearch import EnhancedQuantumLocalSearch + + lama_register["EnhancedQuantumLocalSearch"] = EnhancedQuantumLocalSearch + LLAMAEnhancedQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch").set_name( + "LLAMAEnhancedQuantumLocalSearch", register=True + ) +except Exception as e: + print("EnhancedQuantumLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumLocalSearchImproved import ( + EnhancedQuantumLocalSearchImproved, + ) + + lama_register["EnhancedQuantumLocalSearchImproved"] = EnhancedQuantumLocalSearchImproved + LLAMAEnhancedQuantumLocalSearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLocalSearchImproved" + ).set_name("LLAMAEnhancedQuantumLocalSearchImproved", register=True) +except Exception as e: + print("EnhancedQuantumLocalSearchImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizer import EnhancedQuantumMemeticOptimizer + + lama_register["EnhancedQuantumMemeticOptimizer"] = EnhancedQuantumMemeticOptimizer + LLAMAEnhancedQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumMemeticOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizerV5 import ( + EnhancedQuantumMemeticOptimizerV5, + ) + + lama_register["EnhancedQuantumMemeticOptimizerV5"] = EnhancedQuantumMemeticOptimizerV5 + LLAMAEnhancedQuantumMemeticOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMemeticOptimizerV5" + ).set_name("LLAMAEnhancedQuantumMemeticOptimizerV5", register=True) +except Exception as e: + print("EnhancedQuantumMemeticOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumMultiPhaseAdaptiveDE_v10 import ( + EnhancedQuantumMultiPhaseAdaptiveDE_v10, + ) + + lama_register["EnhancedQuantumMultiPhaseAdaptiveDE_v10"] = EnhancedQuantumMultiPhaseAdaptiveDE_v10 + LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10" + ).set_name("LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10", register=True) +except Exception as e: + print("EnhancedQuantumMultiPhaseAdaptiveDE_v10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumMultiStrategyOptimization_v2 import ( + EnhancedQuantumMultiStrategyOptimization_v2, + ) + + lama_register["EnhancedQuantumMultiStrategyOptimization_v2"] = EnhancedQuantumMultiStrategyOptimization_v2 + LLAMAEnhancedQuantumMultiStrategyOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2" + ).set_name("LLAMAEnhancedQuantumMultiStrategyOptimization_v2", register=True) +except Exception as e: + print("EnhancedQuantumMultiStrategyOptimization_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumPSO import EnhancedQuantumPSO + + lama_register["EnhancedQuantumPSO"] = EnhancedQuantumPSO + LLAMAEnhancedQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO").set_name( + "LLAMAEnhancedQuantumPSO", register=True + ) +except Exception as e: + print("EnhancedQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumReactiveCooperativeStrategy import ( + EnhancedQuantumReactiveCooperativeStrategy, + ) + + lama_register["EnhancedQuantumReactiveCooperativeStrategy"] = EnhancedQuantumReactiveCooperativeStrategy + LLAMAEnhancedQuantumReactiveCooperativeStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumReactiveCooperativeStrategy" + ).set_name("LLAMAEnhancedQuantumReactiveCooperativeStrategy", register=True) +except Exception as e: + print("EnhancedQuantumReactiveCooperativeStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumReinforcedNesterovAcceleratorV2 import ( + EnhancedQuantumReinforcedNesterovAcceleratorV2, + ) + + lama_register["EnhancedQuantumReinforcedNesterovAcceleratorV2"] = ( + EnhancedQuantumReinforcedNesterovAcceleratorV2 + ) + LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2" + ).set_name("LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2", register=True) +except Exception as e: + print("EnhancedQuantumReinforcedNesterovAcceleratorV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumResilientCrossoverStrategyV2 import ( + EnhancedQuantumResilientCrossoverStrategyV2, + ) + + lama_register["EnhancedQuantumResilientCrossoverStrategyV2"] = EnhancedQuantumResilientCrossoverStrategyV2 + LLAMAEnhancedQuantumResilientCrossoverStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2" + ).set_name("LLAMAEnhancedQuantumResilientCrossoverStrategyV2", register=True) +except Exception as e: + print("EnhancedQuantumResilientCrossoverStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealing import ( + EnhancedQuantumSimulatedAnnealing, + ) + + lama_register["EnhancedQuantumSimulatedAnnealing"] = EnhancedQuantumSimulatedAnnealing + LLAMAEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingImproved import ( + EnhancedQuantumSimulatedAnnealingImproved, + ) + + lama_register["EnhancedQuantumSimulatedAnnealingImproved"] = EnhancedQuantumSimulatedAnnealingImproved + LLAMAEnhancedQuantumSimulatedAnnealingImproved = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingImproved" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingImproved", register=True) +except Exception as e: + print("EnhancedQuantumSimulatedAnnealingImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingOptimized import ( + EnhancedQuantumSimulatedAnnealingOptimized, + ) + + lama_register["EnhancedQuantumSimulatedAnnealingOptimized"] = EnhancedQuantumSimulatedAnnealingOptimized + LLAMAEnhancedQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingOptimized", register=True) +except Exception as e: + print("EnhancedQuantumSimulatedAnnealingOptimized can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingV2 import ( + EnhancedQuantumSimulatedAnnealingV2, + ) + + lama_register["EnhancedQuantumSimulatedAnnealingV2"] = EnhancedQuantumSimulatedAnnealingV2 + LLAMAEnhancedQuantumSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingV2", register=True) +except Exception as e: + print("EnhancedQuantumSimulatedAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumStateConvergenceOptimizer import ( + EnhancedQuantumStateConvergenceOptimizer, + ) + + lama_register["EnhancedQuantumStateConvergenceOptimizer"] = EnhancedQuantumStateConvergenceOptimizer + LLAMAEnhancedQuantumStateConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumStateConvergenceOptimizer" + ).set_name("LLAMAEnhancedQuantumStateConvergenceOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumStateConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimization import EnhancedQuantumSwarmOptimization + + lama_register["EnhancedQuantumSwarmOptimization"] = EnhancedQuantumSwarmOptimization + LLAMAEnhancedQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationRefined import ( + EnhancedQuantumSwarmOptimizationRefined, + ) + + lama_register["EnhancedQuantumSwarmOptimizationRefined"] = EnhancedQuantumSwarmOptimizationRefined + LLAMAEnhancedQuantumSwarmOptimizationRefined = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationRefined" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationRefined", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV10 import ( + EnhancedQuantumSwarmOptimizationV10, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV10"] = EnhancedQuantumSwarmOptimizationV10 + LLAMAEnhancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV10", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV11 import ( + EnhancedQuantumSwarmOptimizationV11, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV11"] = EnhancedQuantumSwarmOptimizationV11 + LLAMAEnhancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV11", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV12 import ( + EnhancedQuantumSwarmOptimizationV12, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV12"] = EnhancedQuantumSwarmOptimizationV12 + LLAMAEnhancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV12", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV13 import ( + EnhancedQuantumSwarmOptimizationV13, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV13"] = EnhancedQuantumSwarmOptimizationV13 + LLAMAEnhancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV13", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV2 import ( + EnhancedQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV2"] = EnhancedQuantumSwarmOptimizationV2 + LLAMAEnhancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV3 import ( + EnhancedQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV3"] = EnhancedQuantumSwarmOptimizationV3 + LLAMAEnhancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV4 import ( + EnhancedQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV4"] = EnhancedQuantumSwarmOptimizationV4 + LLAMAEnhancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV5 import ( + EnhancedQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV5"] = EnhancedQuantumSwarmOptimizationV5 + LLAMAEnhancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV6 import ( + EnhancedQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV6"] = EnhancedQuantumSwarmOptimizationV6 + LLAMAEnhancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV7 import ( + EnhancedQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV7"] = EnhancedQuantumSwarmOptimizationV7 + LLAMAEnhancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV7", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV8 import ( + EnhancedQuantumSwarmOptimizationV8, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV8"] = EnhancedQuantumSwarmOptimizationV8 + LLAMAEnhancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV8", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV9 import ( + EnhancedQuantumSwarmOptimizationV9, + ) + + lama_register["EnhancedQuantumSwarmOptimizationV9"] = EnhancedQuantumSwarmOptimizationV9 + LLAMAEnhancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV9", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizerV4 import EnhancedQuantumSwarmOptimizerV4 + + lama_register["EnhancedQuantumSwarmOptimizerV4"] = EnhancedQuantumSwarmOptimizerV4 + LLAMAEnhancedQuantumSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizerV4" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizerV4", register=True) +except Exception as e: + print("EnhancedQuantumSwarmOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSymbioticStrategyV5 import ( + EnhancedQuantumSymbioticStrategyV5, + ) + + lama_register["EnhancedQuantumSymbioticStrategyV5"] = EnhancedQuantumSymbioticStrategyV5 + LLAMAEnhancedQuantumSymbioticStrategyV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSymbioticStrategyV5" + ).set_name("LLAMAEnhancedQuantumSymbioticStrategyV5", register=True) +except Exception as e: + print("EnhancedQuantumSymbioticStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumSynergyStrategyV2 import EnhancedQuantumSynergyStrategyV2 + + lama_register["EnhancedQuantumSynergyStrategyV2"] = EnhancedQuantumSynergyStrategyV2 + LLAMAEnhancedQuantumSynergyStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSynergyStrategyV2" + ).set_name("LLAMAEnhancedQuantumSynergyStrategyV2", register=True) +except Exception as e: + print("EnhancedQuantumSynergyStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedQuantumTunnelingOptimizer import ( + EnhancedQuantumTunnelingOptimizer, + ) + + lama_register["EnhancedQuantumTunnelingOptimizer"] = EnhancedQuantumTunnelingOptimizer + LLAMAEnhancedQuantumTunnelingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumTunnelingOptimizer" + ).set_name("LLAMAEnhancedQuantumTunnelingOptimizer", register=True) +except Exception as e: + print("EnhancedQuantumTunnelingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRAMEDS import EnhancedRAMEDS + + lama_register["EnhancedRAMEDS"] = EnhancedRAMEDS + LLAMAEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS").set_name( + "LLAMAEnhancedRAMEDS", register=True + ) +except Exception as e: + print("EnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRAMEDSPro import EnhancedRAMEDSPro + + lama_register["EnhancedRAMEDSPro"] = EnhancedRAMEDSPro + LLAMAEnhancedRAMEDSPro = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro").set_name( + "LLAMAEnhancedRAMEDSPro", register=True + ) +except Exception as e: + print("EnhancedRAMEDSPro can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRAMEDSProV2 import EnhancedRAMEDSProV2 + + lama_register["EnhancedRAMEDSProV2"] = EnhancedRAMEDSProV2 + LLAMAEnhancedRAMEDSProV2 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2").set_name( + "LLAMAEnhancedRAMEDSProV2", register=True + ) +except Exception as e: + print("EnhancedRAMEDSProV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRAMEDSv3 import EnhancedRAMEDSv3 + + lama_register["EnhancedRAMEDSv3"] = EnhancedRAMEDSv3 + LLAMAEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3").set_name( + "LLAMAEnhancedRAMEDSv3", register=True + ) +except Exception as e: + print("EnhancedRAMEDSv3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRAMEDSv4 import EnhancedRAMEDSv4 + + lama_register["EnhancedRAMEDSv4"] = EnhancedRAMEDSv4 + LLAMAEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4").set_name( + "LLAMAEnhancedRAMEDSv4", register=True + ) +except Exception as e: + print("EnhancedRAMEDSv4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolution import ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolution"] = ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolution + ) + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus import ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus, + ) + + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus"] = ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus + ) + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus" + ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( + EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost + ) + LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSearch import ( + EnhancedRefinedAdaptiveDifferentialSearch, + ) + + lama_register["EnhancedRefinedAdaptiveDifferentialSearch"] = EnhancedRefinedAdaptiveDifferentialSearch + LLAMAEnhancedRefinedAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSearch", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSpiralSearch import ( + EnhancedRefinedAdaptiveDifferentialSpiralSearch, + ) + + lama_register["EnhancedRefinedAdaptiveDifferentialSpiralSearch"] = ( + EnhancedRefinedAdaptiveDifferentialSpiralSearch + ) + LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDE import EnhancedRefinedAdaptiveDynamicDE + + lama_register["EnhancedRefinedAdaptiveDynamicDE"] = EnhancedRefinedAdaptiveDynamicDE + LLAMAEnhancedRefinedAdaptiveDynamicDE = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicDE" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDE", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 import ( + EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15"] = ( + EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 + ) + LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicExplorationOptimization import ( + EnhancedRefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicExplorationOptimization"] = ( + EnhancedRefinedAdaptiveDynamicExplorationOptimization + ) + LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveFocusedEvolutionStrategy import ( + EnhancedRefinedAdaptiveFocusedEvolutionStrategy, + ) + + lama_register["EnhancedRefinedAdaptiveFocusedEvolutionStrategy"] = ( + EnhancedRefinedAdaptiveFocusedEvolutionStrategy + ) + LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy" + ).set_name("LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveFocusedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveHarmonySearch import ( + EnhancedRefinedAdaptiveHarmonySearch, + ) + + lama_register["EnhancedRefinedAdaptiveHarmonySearch"] = EnhancedRefinedAdaptiveHarmonySearch + LLAMAEnhancedRefinedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveHarmonySearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveHarmonySearch", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMemeticDiverseOptimizer import ( + EnhancedRefinedAdaptiveMemeticDiverseOptimizer, + ) + + lama_register["EnhancedRefinedAdaptiveMemeticDiverseOptimizer"] = ( + EnhancedRefinedAdaptiveMemeticDiverseOptimizer + ) + LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v4 import ( + EnhancedRefinedAdaptiveMetaNetPSO_v4, + ) + + lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v4"] = EnhancedRefinedAdaptiveMetaNetPSO_v4 + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4" + ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveMetaNetPSO_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v5 import ( + EnhancedRefinedAdaptiveMetaNetPSO_v5, + ) + + lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v5"] = EnhancedRefinedAdaptiveMetaNetPSO_v5 + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5" + ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveMetaNetPSO_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v49 import EnhancedRefinedAdaptiveQGSA_v49 + + lama_register["EnhancedRefinedAdaptiveQGSA_v49"] = EnhancedRefinedAdaptiveQGSA_v49 + LLAMAEnhancedRefinedAdaptiveQGSA_v49 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v49" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v49", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v49 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v52 import EnhancedRefinedAdaptiveQGSA_v52 + + lama_register["EnhancedRefinedAdaptiveQGSA_v52"] = EnhancedRefinedAdaptiveQGSA_v52 + LLAMAEnhancedRefinedAdaptiveQGSA_v52 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v52" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v52", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v53 import EnhancedRefinedAdaptiveQGSA_v53 + + lama_register["EnhancedRefinedAdaptiveQGSA_v53"] = EnhancedRefinedAdaptiveQGSA_v53 + LLAMAEnhancedRefinedAdaptiveQGSA_v53 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v53" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v53", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v53 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v54 import EnhancedRefinedAdaptiveQGSA_v54 + + lama_register["EnhancedRefinedAdaptiveQGSA_v54"] = EnhancedRefinedAdaptiveQGSA_v54 + LLAMAEnhancedRefinedAdaptiveQGSA_v54 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v54" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v54", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v54 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v55 import EnhancedRefinedAdaptiveQGSA_v55 + + lama_register["EnhancedRefinedAdaptiveQGSA_v55"] = EnhancedRefinedAdaptiveQGSA_v55 + LLAMAEnhancedRefinedAdaptiveQGSA_v55 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v55" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v55", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v55 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v56 import EnhancedRefinedAdaptiveQGSA_v56 + + lama_register["EnhancedRefinedAdaptiveQGSA_v56"] = EnhancedRefinedAdaptiveQGSA_v56 + LLAMAEnhancedRefinedAdaptiveQGSA_v56 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v56" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v56", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v56 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v57 import EnhancedRefinedAdaptiveQGSA_v57 + + lama_register["EnhancedRefinedAdaptiveQGSA_v57"] = EnhancedRefinedAdaptiveQGSA_v57 + LLAMAEnhancedRefinedAdaptiveQGSA_v57 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v57" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v57", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v57 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v58 import EnhancedRefinedAdaptiveQGSA_v58 + + lama_register["EnhancedRefinedAdaptiveQGSA_v58"] = EnhancedRefinedAdaptiveQGSA_v58 + LLAMAEnhancedRefinedAdaptiveQGSA_v58 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v58" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v58", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v58 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v59 import EnhancedRefinedAdaptiveQGSA_v59 + + lama_register["EnhancedRefinedAdaptiveQGSA_v59"] = EnhancedRefinedAdaptiveQGSA_v59 + LLAMAEnhancedRefinedAdaptiveQGSA_v59 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v59" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v59", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v59 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v60 import EnhancedRefinedAdaptiveQGSA_v60 + + lama_register["EnhancedRefinedAdaptiveQGSA_v60"] = EnhancedRefinedAdaptiveQGSA_v60 + LLAMAEnhancedRefinedAdaptiveQGSA_v60 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v60" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v60", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveQGSA_v60 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSpiralGradientSearch import ( + EnhancedRefinedAdaptiveSpiralGradientSearch, + ) + + lama_register["EnhancedRefinedAdaptiveSpiralGradientSearch"] = EnhancedRefinedAdaptiveSpiralGradientSearch + LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveSpiralGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 import ( + EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3, + ) + + lama_register["EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3"] = ( + EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 + ) + LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3" + ).set_name("LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3", register=True) +except Exception as e: + print("EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedDualStrategyAdaptiveDE import ( + EnhancedRefinedDualStrategyAdaptiveDE, + ) + + lama_register["EnhancedRefinedDualStrategyAdaptiveDE"] = EnhancedRefinedDualStrategyAdaptiveDE + LLAMAEnhancedRefinedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedRefinedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("EnhancedRefinedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedDynamicFireworkAlgorithm import ( + EnhancedRefinedDynamicFireworkAlgorithm, + ) + + lama_register["EnhancedRefinedDynamicFireworkAlgorithm"] = EnhancedRefinedDynamicFireworkAlgorithm + LLAMAEnhancedRefinedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedRefinedDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedRefinedDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( + EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( + EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( + EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer + ) + LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedEliteDynamicMemoryHybridOptimizer import ( + EnhancedRefinedEliteDynamicMemoryHybridOptimizer, + ) + + lama_register["EnhancedRefinedEliteDynamicMemoryHybridOptimizer"] = ( + EnhancedRefinedEliteDynamicMemoryHybridOptimizer + ) + LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 import ( + EnhancedRefinedEvolutionaryGradientHybridOptimizerV4, + ) + + lama_register["EnhancedRefinedEvolutionaryGradientHybridOptimizerV4"] = ( + EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 + ) + LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4" + ).set_name("LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4", register=True) +except Exception as e: + print("EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGradientBoostedMemoryAnnealing import ( + EnhancedRefinedGradientBoostedMemoryAnnealing, + ) + + lama_register["EnhancedRefinedGradientBoostedMemoryAnnealing"] = ( + EnhancedRefinedGradientBoostedMemoryAnnealing + ) + LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("EnhancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v88 import ( + EnhancedRefinedGuidedMassQGSA_v88, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v88"] = EnhancedRefinedGuidedMassQGSA_v88 + LLAMAEnhancedRefinedGuidedMassQGSA_v88 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v88" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v88", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v88 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v89 import ( + EnhancedRefinedGuidedMassQGSA_v89, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v89"] = EnhancedRefinedGuidedMassQGSA_v89 + LLAMAEnhancedRefinedGuidedMassQGSA_v89 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v89" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v89", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v89 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v90 import ( + EnhancedRefinedGuidedMassQGSA_v90, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v90"] = EnhancedRefinedGuidedMassQGSA_v90 + LLAMAEnhancedRefinedGuidedMassQGSA_v90 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v90" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v90", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v90 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v91 import ( + EnhancedRefinedGuidedMassQGSA_v91, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v91"] = EnhancedRefinedGuidedMassQGSA_v91 + LLAMAEnhancedRefinedGuidedMassQGSA_v91 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v91" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v91", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v91 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v92 import ( + EnhancedRefinedGuidedMassQGSA_v92, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v92"] = EnhancedRefinedGuidedMassQGSA_v92 + LLAMAEnhancedRefinedGuidedMassQGSA_v92 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v92" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v92", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v92 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v93 import ( + EnhancedRefinedGuidedMassQGSA_v93, + ) + + lama_register["EnhancedRefinedGuidedMassQGSA_v93"] = EnhancedRefinedGuidedMassQGSA_v93 + LLAMAEnhancedRefinedGuidedMassQGSA_v93 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v93" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v93", register=True) +except Exception as e: + print("EnhancedRefinedGuidedMassQGSA_v93 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution import ( + EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution"] = ( + EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution + ) + LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHybridDEPSOWithDynamicAdaptation import ( + EnhancedRefinedHybridDEPSOWithDynamicAdaptation, + ) + + lama_register["EnhancedRefinedHybridDEPSOWithDynamicAdaptation"] = ( + EnhancedRefinedHybridDEPSOWithDynamicAdaptation + ) + LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation" + ).set_name("LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", register=True) +except Exception as e: + print("EnhancedRefinedHybridDEPSOWithDynamicAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( + EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution + ) + LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHybridOptimizer import EnhancedRefinedHybridOptimizer + + lama_register["EnhancedRefinedHybridOptimizer"] = EnhancedRefinedHybridOptimizer + LLAMAEnhancedRefinedHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedHybridOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 import ( + EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3, + ) + + lama_register["EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3"] = ( + EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 + ) + LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3" + ).set_name("LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3", register=True) +except Exception as e: + print("EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer import ( + EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer, + ) + + lama_register["EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( + EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer + ) + LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSO import EnhancedRefinedMetaNetAQAPSO + + lama_register["EnhancedRefinedMetaNetAQAPSO"] = EnhancedRefinedMetaNetAQAPSO + LLAMAEnhancedRefinedMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSO" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSO", register=True) +except Exception as e: + print("EnhancedRefinedMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv8 import EnhancedRefinedMetaNetAQAPSOv8 + + lama_register["EnhancedRefinedMetaNetAQAPSOv8"] = EnhancedRefinedMetaNetAQAPSOv8 + LLAMAEnhancedRefinedMetaNetAQAPSOv8 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSOv8" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv8", register=True) +except Exception as e: + print("EnhancedRefinedMetaNetAQAPSOv8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv9 import EnhancedRefinedMetaNetAQAPSOv9 + + lama_register["EnhancedRefinedMetaNetAQAPSOv9"] = EnhancedRefinedMetaNetAQAPSOv9 + LLAMAEnhancedRefinedMetaNetAQAPSOv9 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSOv9" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv9", register=True) +except Exception as e: + print("EnhancedRefinedMetaNetAQAPSOv9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 import ( + EnhancedRefinedOptimalDynamicPrecisionOptimizerV16, + ) + + lama_register["EnhancedRefinedOptimalDynamicPrecisionOptimizerV16"] = ( + EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 + ) + LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16" + ).set_name("LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16", register=True) +except Exception as e: + print("EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization import ( + EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( + EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization + ) + LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedSpatialOptimizer import EnhancedRefinedSpatialOptimizer + + lama_register["EnhancedRefinedSpatialOptimizer"] = EnhancedRefinedSpatialOptimizer + LLAMAEnhancedRefinedSpatialOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedSpatialOptimizer" + ).set_name("LLAMAEnhancedRefinedSpatialOptimizer", register=True) +except Exception as e: + print("EnhancedRefinedSpatialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 import ( + EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35, + ) + + lama_register["EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35"] = ( + EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 + ) + LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35" + ).set_name("LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35", register=True) +except Exception as e: + print("EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v72 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v72, + ) + + lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v72"] = EnhancedRefinedUltimateGuidedMassQGSA_v72 + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72", register=True) +except Exception as e: + print("EnhancedRefinedUltimateGuidedMassQGSA_v72 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v73 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v73, + ) + + lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v73"] = EnhancedRefinedUltimateGuidedMassQGSA_v73 + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73", register=True) +except Exception as e: + print("EnhancedRefinedUltimateGuidedMassQGSA_v73 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v74 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v74, + ) + + lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v74"] = EnhancedRefinedUltimateGuidedMassQGSA_v74 + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74", register=True) +except Exception as e: + print("EnhancedRefinedUltimateGuidedMassQGSA_v74 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v76 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v76, + ) + + lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v76"] = EnhancedRefinedUltimateGuidedMassQGSA_v76 + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76", register=True) +except Exception as e: + print("EnhancedRefinedUltimateGuidedMassQGSA_v76 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 import ( + EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43, + ) + + lama_register["EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43"] = ( + EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 + ) + LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43" + ).set_name("LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43", register=True) +except Exception as e: + print("EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedResilientAdaptivePSO import EnhancedResilientAdaptivePSO + + lama_register["EnhancedResilientAdaptivePSO"] = EnhancedResilientAdaptivePSO + LLAMAEnhancedResilientAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedResilientAdaptivePSO" + ).set_name("LLAMAEnhancedResilientAdaptivePSO", register=True) +except Exception as e: + print("EnhancedResilientAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch import ( + EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch, + ) + + lama_register["EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch"] = ( + EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch + ) + LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch" + ).set_name("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch", register=True) +except Exception as e: + print("EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedRotationalClimbOptimizer import EnhancedRotationalClimbOptimizer + + lama_register["EnhancedRotationalClimbOptimizer"] = EnhancedRotationalClimbOptimizer + LLAMAEnhancedRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRotationalClimbOptimizer" + ).set_name("LLAMAEnhancedRotationalClimbOptimizer", register=True) +except Exception as e: + print("EnhancedRotationalClimbOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSelectiveEvolutionaryOptimizerV21 import ( + EnhancedSelectiveEvolutionaryOptimizerV21, + ) + + lama_register["EnhancedSelectiveEvolutionaryOptimizerV21"] = EnhancedSelectiveEvolutionaryOptimizerV21 + LLAMAEnhancedSelectiveEvolutionaryOptimizerV21 = NonObjectOptimizer( + method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21" + ).set_name("LLAMAEnhancedSelectiveEvolutionaryOptimizerV21", register=True) +except Exception as e: + print("EnhancedSelectiveEvolutionaryOptimizerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution import ( + EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution + ) + LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE import EnhancedSelfAdaptiveDE + + lama_register["EnhancedSelfAdaptiveDE"] = EnhancedSelfAdaptiveDE + LLAMAEnhancedSelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE").set_name( + "LLAMAEnhancedSelfAdaptiveDE", register=True + ) +except Exception as e: + print("EnhancedSelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE2 import EnhancedSelfAdaptiveDE2 + + lama_register["EnhancedSelfAdaptiveDE2"] = EnhancedSelfAdaptiveDE2 + LLAMAEnhancedSelfAdaptiveDE2 = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2").set_name( + "LLAMAEnhancedSelfAdaptiveDE2", register=True + ) +except Exception as e: + print("EnhancedSelfAdaptiveDE2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSelfAdaptiveMemeticAlgorithm import ( + EnhancedSelfAdaptiveMemeticAlgorithm, + ) + + lama_register["EnhancedSelfAdaptiveMemeticAlgorithm"] = EnhancedSelfAdaptiveMemeticAlgorithm + LLAMAEnhancedSelfAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm" + ).set_name("LLAMAEnhancedSelfAdaptiveMemeticAlgorithm", register=True) +except Exception as e: + print("EnhancedSelfAdaptiveMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSequentialQuadraticAdaptiveEvolutionStrategy import ( + EnhancedSequentialQuadraticAdaptiveEvolutionStrategy, + ) + + lama_register["EnhancedSequentialQuadraticAdaptiveEvolutionStrategy"] = ( + EnhancedSequentialQuadraticAdaptiveEvolutionStrategy + ) + LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("EnhancedSequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSpatialAdaptiveEvolver import EnhancedSpatialAdaptiveEvolver + + lama_register["EnhancedSpatialAdaptiveEvolver"] = EnhancedSpatialAdaptiveEvolver + LLAMAEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMAEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: + print("EnhancedSpatialAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSpatialAdaptiveOptimizer import EnhancedSpatialAdaptiveOptimizer + + lama_register["EnhancedSpatialAdaptiveOptimizer"] = EnhancedSpatialAdaptiveOptimizer + LLAMAEnhancedSpatialAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedSpatialAdaptiveOptimizer" + ).set_name("LLAMAEnhancedSpatialAdaptiveOptimizer", register=True) +except Exception as e: + print("EnhancedSpatialAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSpectralHybridOptimization import ( + EnhancedSpectralHybridOptimization, + ) + + lama_register["EnhancedSpectralHybridOptimization"] = EnhancedSpectralHybridOptimization + LLAMAEnhancedSpectralHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSpectralHybridOptimization" + ).set_name("LLAMAEnhancedSpectralHybridOptimization", register=True) +except Exception as e: + print("EnhancedSpectralHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover import ( + EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover, + ) + + lama_register["EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover"] = ( + EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover + ) + LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover = NonObjectOptimizer( + method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover" + ).set_name( + "LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover", register=True + ) +except Exception as e: + print( + "EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.EnhancedStochasticGradientDifferentialEvolution import ( + EnhancedStochasticGradientDifferentialEvolution, + ) + + lama_register["EnhancedStochasticGradientDifferentialEvolution"] = ( + EnhancedStochasticGradientDifferentialEvolution + ) + LLAMAEnhancedStochasticGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedStochasticGradientDifferentialEvolution" + ).set_name("LLAMAEnhancedStochasticGradientDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedStochasticGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStochasticMetaHeuristicOptimizer import ( + EnhancedStochasticMetaHeuristicOptimizer, + ) + + lama_register["EnhancedStochasticMetaHeuristicOptimizer"] = EnhancedStochasticMetaHeuristicOptimizer + LLAMAEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedStochasticMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedStochasticMetaHeuristicOptimizer", register=True) +except Exception as e: + print("EnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStrategicAdaptiveOptimizer import ( + EnhancedStrategicAdaptiveOptimizer, + ) + + lama_register["EnhancedStrategicAdaptiveOptimizer"] = EnhancedStrategicAdaptiveOptimizer + LLAMAEnhancedStrategicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedStrategicAdaptiveOptimizer" + ).set_name("LLAMAEnhancedStrategicAdaptiveOptimizer", register=True) +except Exception as e: + print("EnhancedStrategicAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStrategicMemoryAdaptiveStrategyV44 import ( + EnhancedStrategicMemoryAdaptiveStrategyV44, + ) + + lama_register["EnhancedStrategicMemoryAdaptiveStrategyV44"] = EnhancedStrategicMemoryAdaptiveStrategyV44 + LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44 = NonObjectOptimizer( + method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44" + ).set_name("LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44", register=True) +except Exception as e: + print("EnhancedStrategicMemoryAdaptiveStrategyV44 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStrategicPSO import EnhancedStrategicPSO + + lama_register["EnhancedStrategicPSO"] = EnhancedStrategicPSO + LLAMAEnhancedStrategicPSO = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO").set_name( + "LLAMAEnhancedStrategicPSO", register=True + ) +except Exception as e: + print("EnhancedStrategicPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedStrategyDE import EnhancedStrategyDE + + lama_register["EnhancedStrategyDE"] = EnhancedStrategyDE + LLAMAEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE").set_name( + "LLAMAEnhancedStrategyDE", register=True + ) +except Exception as e: + print("EnhancedStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimization import ( + EnhancedSuperDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimization"] = ( + EnhancedSuperDynamicQuantumSwarmOptimization + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV2 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV2"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV2 + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV3 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV3"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV3 + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV4 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV4"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV4 + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV5 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV5"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV5 + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV6 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV6"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV6 + ) + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6", register=True) +except Exception as e: + print("EnhancedSuperDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperRefinedRAMEDS import EnhancedSuperRefinedRAMEDS + + lama_register["EnhancedSuperRefinedRAMEDS"] = EnhancedSuperRefinedRAMEDS + LLAMAEnhancedSuperRefinedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS").set_name( + "LLAMAEnhancedSuperRefinedRAMEDS", register=True + ) +except Exception as e: + print("EnhancedSuperRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 + ) + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9", register=True) +except Exception as e: + print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSuperiorUltimateGuidedMassQGSA_v80 import ( + EnhancedSuperiorUltimateGuidedMassQGSA_v80, + ) + + lama_register["EnhancedSuperiorUltimateGuidedMassQGSA_v80"] = EnhancedSuperiorUltimateGuidedMassQGSA_v80 + LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80 = NonObjectOptimizer( + method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80" + ).set_name("LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80", register=True) +except Exception as e: + print("EnhancedSuperiorUltimateGuidedMassQGSA_v80 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSupremeDynamicPrecisionOptimizerV1 import ( + EnhancedSupremeDynamicPrecisionOptimizerV1, + ) + + lama_register["EnhancedSupremeDynamicPrecisionOptimizerV1"] = EnhancedSupremeDynamicPrecisionOptimizerV1 + LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1" + ).set_name("LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1", register=True) +except Exception as e: + print("EnhancedSupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedSwarmHybridOptimization import EnhancedSwarmHybridOptimization + + lama_register["EnhancedSwarmHybridOptimization"] = EnhancedSwarmHybridOptimization + LLAMAEnhancedSwarmHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSwarmHybridOptimization" + ).set_name("LLAMAEnhancedSwarmHybridOptimization", register=True) +except Exception as e: + print("EnhancedSwarmHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedTwoPhaseDynamicStrategyV39 import ( + EnhancedTwoPhaseDynamicStrategyV39, + ) + + lama_register["EnhancedTwoPhaseDynamicStrategyV39"] = EnhancedTwoPhaseDynamicStrategyV39 + LLAMAEnhancedTwoPhaseDynamicStrategyV39 = NonObjectOptimizer( + method="LLAMAEnhancedTwoPhaseDynamicStrategyV39" + ).set_name("LLAMAEnhancedTwoPhaseDynamicStrategyV39", register=True) +except Exception as e: + print("EnhancedTwoPhaseDynamicStrategyV39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithm import ( + EnhancedUltimateDynamicFireworkAlgorithm, + ) + + lama_register["EnhancedUltimateDynamicFireworkAlgorithm"] = EnhancedUltimateDynamicFireworkAlgorithm + LLAMAEnhancedUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("EnhancedUltimateDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithmImproved import ( + EnhancedUltimateDynamicFireworkAlgorithmImproved, + ) + + lama_register["EnhancedUltimateDynamicFireworkAlgorithmImproved"] = ( + EnhancedUltimateDynamicFireworkAlgorithmImproved + ) + LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: + print("EnhancedUltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateEvolutionaryGradientOptimizerV36 import ( + EnhancedUltimateEvolutionaryGradientOptimizerV36, + ) + + lama_register["EnhancedUltimateEvolutionaryGradientOptimizerV36"] = ( + EnhancedUltimateEvolutionaryGradientOptimizerV36 + ) + LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36" + ).set_name("LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36", register=True) +except Exception as e: + print("EnhancedUltimateEvolutionaryGradientOptimizerV36 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: + print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined + ) + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined", register=True) +except Exception as e: + print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 + ) + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2", register=True) +except Exception as e: + print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 + ) + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3", register=True) +except Exception as e: + print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 import ( + EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44, + ) + + lama_register["EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44"] = ( + EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 + ) + LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 = NonObjectOptimizer( + method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44" + ).set_name("LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44", register=True) +except Exception as e: + print("EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleAdaptiveEvolutionaryAlgorithm import ( + EnsembleAdaptiveEvolutionaryAlgorithm, + ) + + lama_register["EnsembleAdaptiveEvolutionaryAlgorithm"] = EnsembleAdaptiveEvolutionaryAlgorithm + LLAMAEnsembleAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm" + ).set_name("LLAMAEnsembleAdaptiveEvolutionaryAlgorithm", register=True) +except Exception as e: + print("EnsembleAdaptiveEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleAdaptiveMemeticOptimizer import EnsembleAdaptiveMemeticOptimizer + + lama_register["EnsembleAdaptiveMemeticOptimizer"] = EnsembleAdaptiveMemeticOptimizer + LLAMAEnsembleAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnsembleAdaptiveMemeticOptimizer" + ).set_name("LLAMAEnsembleAdaptiveMemeticOptimizer", register=True) +except Exception as e: + print("EnsembleAdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleAdaptiveQuantumDE import EnsembleAdaptiveQuantumDE + + lama_register["EnsembleAdaptiveQuantumDE"] = EnsembleAdaptiveQuantumDE + LLAMAEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE").set_name( + "LLAMAEnsembleAdaptiveQuantumDE", register=True + ) +except Exception as e: + print("EnsembleAdaptiveQuantumDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleDE import EnsembleDE + + lama_register["EnsembleDE"] = EnsembleDE + LLAMAEnsembleDE = NonObjectOptimizer(method="LLAMAEnsembleDE").set_name("LLAMAEnsembleDE", register=True) +except Exception as e: + print("EnsembleDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleEvolutionaryCulturalSearch import ( + EnsembleEvolutionaryCulturalSearch, + ) + + lama_register["EnsembleEvolutionaryCulturalSearch"] = EnsembleEvolutionaryCulturalSearch + LLAMAEnsembleEvolutionaryCulturalSearch = NonObjectOptimizer( + method="LLAMAEnsembleEvolutionaryCulturalSearch" + ).set_name("LLAMAEnsembleEvolutionaryCulturalSearch", register=True) +except Exception as e: + print("EnsembleEvolutionaryCulturalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleHybridSearch import EnsembleHybridSearch + + lama_register["EnsembleHybridSearch"] = EnsembleHybridSearch + LLAMAEnsembleHybridSearch = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch").set_name( + "LLAMAEnsembleHybridSearch", register=True + ) +except Exception as e: + print("EnsembleHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleMemeticAlgorithm import EnsembleMemeticAlgorithm + + lama_register["EnsembleMemeticAlgorithm"] = EnsembleMemeticAlgorithm + LLAMAEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm").set_name( + "LLAMAEnsembleMemeticAlgorithm", register=True + ) +except Exception as e: + print("EnsembleMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EnsembleMutationAdaptiveDE import EnsembleMutationAdaptiveDE + + lama_register["EnsembleMutationAdaptiveDE"] = EnsembleMutationAdaptiveDE + LLAMAEnsembleMutationAdaptiveDE = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE").set_name( + "LLAMAEnsembleMutationAdaptiveDE", register=True + ) +except Exception as e: + print("EnsembleMutationAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EntropyEnhancedAdaptiveStrategyV61 import ( + EntropyEnhancedAdaptiveStrategyV61, + ) + + lama_register["EntropyEnhancedAdaptiveStrategyV61"] = EntropyEnhancedAdaptiveStrategyV61 + LLAMAEntropyEnhancedAdaptiveStrategyV61 = NonObjectOptimizer( + method="LLAMAEntropyEnhancedAdaptiveStrategyV61" + ).set_name("LLAMAEntropyEnhancedAdaptiveStrategyV61", register=True) +except Exception as e: + print("EntropyEnhancedAdaptiveStrategyV61 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryConvergenceSpiralSearch import ( + EvolutionaryConvergenceSpiralSearch, + ) + + lama_register["EvolutionaryConvergenceSpiralSearch"] = EvolutionaryConvergenceSpiralSearch + LLAMAEvolutionaryConvergenceSpiralSearch = NonObjectOptimizer( + method="LLAMAEvolutionaryConvergenceSpiralSearch" + ).set_name("LLAMAEvolutionaryConvergenceSpiralSearch", register=True) +except Exception as e: + print("EvolutionaryConvergenceSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryDynamicGradientSearch import ( + EvolutionaryDynamicGradientSearch, + ) + + lama_register["EvolutionaryDynamicGradientSearch"] = EvolutionaryDynamicGradientSearch + LLAMAEvolutionaryDynamicGradientSearch = NonObjectOptimizer( + method="LLAMAEvolutionaryDynamicGradientSearch" + ).set_name("LLAMAEvolutionaryDynamicGradientSearch", register=True) +except Exception as e: + print("EvolutionaryDynamicGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizer import ( + EvolutionaryGradientHybridOptimizer, + ) + + lama_register["EvolutionaryGradientHybridOptimizer"] = EvolutionaryGradientHybridOptimizer + LLAMAEvolutionaryGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAEvolutionaryGradientHybridOptimizer" + ).set_name("LLAMAEvolutionaryGradientHybridOptimizer", register=True) +except Exception as e: + print("EvolutionaryGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizerV2 import ( + EvolutionaryGradientHybridOptimizerV2, + ) + + lama_register["EvolutionaryGradientHybridOptimizerV2"] = EvolutionaryGradientHybridOptimizerV2 + LLAMAEvolutionaryGradientHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAEvolutionaryGradientHybridOptimizerV2" + ).set_name("LLAMAEvolutionaryGradientHybridOptimizerV2", register=True) +except Exception as e: + print("EvolutionaryGradientHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryGradientSearch import EvolutionaryGradientSearch + + lama_register["EvolutionaryGradientSearch"] = EvolutionaryGradientSearch + LLAMAEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch").set_name( + "LLAMAEvolutionaryGradientSearch", register=True + ) +except Exception as e: + print("EvolutionaryGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryHarmonicFireworkAlgorithm import ( + EvolutionaryHarmonicFireworkAlgorithm, + ) + + lama_register["EvolutionaryHarmonicFireworkAlgorithm"] = EvolutionaryHarmonicFireworkAlgorithm + LLAMAEvolutionaryHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEvolutionaryHarmonicFireworkAlgorithm" + ).set_name("LLAMAEvolutionaryHarmonicFireworkAlgorithm", register=True) +except Exception as e: + print("EvolutionaryHarmonicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.EvolutionaryParticleSwarmOptimizer import ( + EvolutionaryParticleSwarmOptimizer, + ) + + lama_register["EvolutionaryParticleSwarmOptimizer"] = EvolutionaryParticleSwarmOptimizer + LLAMAEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEvolutionaryParticleSwarmOptimizer" + ).set_name("LLAMAEvolutionaryParticleSwarmOptimizer", register=True) +except Exception as e: + print("EvolutionaryParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ExDADe import ExDADe + + lama_register["ExDADe"] = ExDADe + LLAMAExDADe = NonObjectOptimizer(method="LLAMAExDADe").set_name("LLAMAExDADe", register=True) +except Exception as e: + print("ExDADe can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FEDE import FEDE + + lama_register["FEDE"] = FEDE + LLAMAFEDE = NonObjectOptimizer(method="LLAMAFEDE").set_name("LLAMAFEDE", register=True) +except Exception as e: + print("FEDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FTADEEM import FTADEEM + + lama_register["FTADEEM"] = FTADEEM + LLAMAFTADEEM = NonObjectOptimizer(method="LLAMAFTADEEM").set_name("LLAMAFTADEEM", register=True) +except Exception as e: + print("FTADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( + FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + ) + LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: + print("FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalEnhancedDynamicLocalSearchFireworkAlgorithm import ( + FinalEnhancedDynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["FinalEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( + FinalEnhancedDynamicLocalSearchFireworkAlgorithm + ) + LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: + print("FinalEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: + print("FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 import ( + FinalEnhancedRefinedUltimateGuidedMassQGSA_v75, + ) + + lama_register["FinalEnhancedRefinedUltimateGuidedMassQGSA_v75"] = ( + FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 + ) + LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75 = NonObjectOptimizer( + method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75" + ).set_name("LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75", register=True) +except Exception as e: + print("FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithm import ( + FinalOptimizedEnhancedDynamicFireworkAlgorithm, + ) + + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithm"] = ( + FinalOptimizedEnhancedDynamicFireworkAlgorithm + ) + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("FinalOptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined import ( + FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined, + ) + + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined"] = ( + FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined + ) + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: + print("FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FineTunedCohortDiversityOptimizer import ( + FineTunedCohortDiversityOptimizer, + ) + + lama_register["FineTunedCohortDiversityOptimizer"] = FineTunedCohortDiversityOptimizer + LLAMAFineTunedCohortDiversityOptimizer = NonObjectOptimizer( + method="LLAMAFineTunedCohortDiversityOptimizer" + ).set_name("LLAMAFineTunedCohortDiversityOptimizer", register=True) +except Exception as e: + print("FineTunedCohortDiversityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FineTunedFocusedAdaptiveOptimizer import ( + FineTunedFocusedAdaptiveOptimizer, + ) + + lama_register["FineTunedFocusedAdaptiveOptimizer"] = FineTunedFocusedAdaptiveOptimizer + LLAMAFineTunedFocusedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAFineTunedFocusedAdaptiveOptimizer" + ).set_name("LLAMAFineTunedFocusedAdaptiveOptimizer", register=True) +except Exception as e: + print("FineTunedFocusedAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FineTunedProgressiveAdaptiveSearch import ( + FineTunedProgressiveAdaptiveSearch, + ) + + lama_register["FineTunedProgressiveAdaptiveSearch"] = FineTunedProgressiveAdaptiveSearch + LLAMAFineTunedProgressiveAdaptiveSearch = NonObjectOptimizer( + method="LLAMAFineTunedProgressiveAdaptiveSearch" + ).set_name("LLAMAFineTunedProgressiveAdaptiveSearch", register=True) +except Exception as e: + print("FineTunedProgressiveAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FocusedBalancedAdaptivePSO import FocusedBalancedAdaptivePSO + + lama_register["FocusedBalancedAdaptivePSO"] = FocusedBalancedAdaptivePSO + LLAMAFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO").set_name( + "LLAMAFocusedBalancedAdaptivePSO", register=True + ) +except Exception as e: + print("FocusedBalancedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FocusedEvolutionStrategy import FocusedEvolutionStrategy + + lama_register["FocusedEvolutionStrategy"] = FocusedEvolutionStrategy + LLAMAFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy").set_name( + "LLAMAFocusedEvolutionStrategy", register=True + ) +except Exception as e: + print("FocusedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FractionalOrderClusterHybridOptimization import ( + FractionalOrderClusterHybridOptimization, + ) + + lama_register["FractionalOrderClusterHybridOptimization"] = FractionalOrderClusterHybridOptimization + LLAMAFractionalOrderClusterHybridOptimization = NonObjectOptimizer( + method="LLAMAFractionalOrderClusterHybridOptimization" + ).set_name("LLAMAFractionalOrderClusterHybridOptimization", register=True) +except Exception as e: + print("FractionalOrderClusterHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.FurtherEnhancedHybridMetaHeuristicOptimizerV13 import ( + FurtherEnhancedHybridMetaHeuristicOptimizerV13, + ) + + lama_register["FurtherEnhancedHybridMetaHeuristicOptimizerV13"] = ( + FurtherEnhancedHybridMetaHeuristicOptimizerV13 + ) + LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( + method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13" + ).set_name("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13", register=True) +except Exception as e: + print("FurtherEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GEEA import GEEA + + lama_register["GEEA"] = GEEA + LLAMAGEEA = NonObjectOptimizer(method="LLAMAGEEA").set_name("LLAMAGEEA", register=True) +except Exception as e: + print("GEEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GESA import GESA + + lama_register["GESA"] = GESA + LLAMAGESA = NonObjectOptimizer(method="LLAMAGESA").set_name("LLAMAGESA", register=True) +except Exception as e: + print("GESA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GGAES import GGAES + + lama_register["GGAES"] = GGAES + LLAMAGGAES = NonObjectOptimizer(method="LLAMAGGAES").set_name("LLAMAGGAES", register=True) +except Exception as e: + print("GGAES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GIDE import GIDE + + lama_register["GIDE"] = GIDE + LLAMAGIDE = NonObjectOptimizer(method="LLAMAGIDE").set_name("LLAMAGIDE", register=True) +except Exception as e: + print("GIDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GaussianAdaptivePSO import GaussianAdaptivePSO + + lama_register["GaussianAdaptivePSO"] = GaussianAdaptivePSO + LLAMAGaussianAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO").set_name( + "LLAMAGaussianAdaptivePSO", register=True + ) +except Exception as e: + print("GaussianAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GaussianEnhancedAdaptivePSO import GaussianEnhancedAdaptivePSO + + lama_register["GaussianEnhancedAdaptivePSO"] = GaussianEnhancedAdaptivePSO + LLAMAGaussianEnhancedAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO").set_name( + "LLAMAGaussianEnhancedAdaptivePSO", register=True + ) +except Exception as e: + print("GaussianEnhancedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientAssistedDifferentialCrossover import ( + GradientAssistedDifferentialCrossover, + ) + + lama_register["GradientAssistedDifferentialCrossover"] = GradientAssistedDifferentialCrossover + LLAMAGradientAssistedDifferentialCrossover = NonObjectOptimizer( + method="LLAMAGradientAssistedDifferentialCrossover" + ).set_name("LLAMAGradientAssistedDifferentialCrossover", register=True) +except Exception as e: + print("GradientAssistedDifferentialCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientBalancedEvolutionStrategy import ( + GradientBalancedEvolutionStrategy, + ) + + lama_register["GradientBalancedEvolutionStrategy"] = GradientBalancedEvolutionStrategy + LLAMAGradientBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAGradientBalancedEvolutionStrategy" + ).set_name("LLAMAGradientBalancedEvolutionStrategy", register=True) +except Exception as e: + print("GradientBalancedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientBasedAdaptiveCovarianceMatrixAdaptation import ( + GradientBasedAdaptiveCovarianceMatrixAdaptation, + ) + + lama_register["GradientBasedAdaptiveCovarianceMatrixAdaptation"] = ( + GradientBasedAdaptiveCovarianceMatrixAdaptation + ) + LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("GradientBasedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientBoostedMemoryAnnealing import GradientBoostedMemoryAnnealing + + lama_register["GradientBoostedMemoryAnnealing"] = GradientBoostedMemoryAnnealing + LLAMAGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAGradientBoostedMemoryAnnealing" + ).set_name("LLAMAGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("GradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientEnhancedAdaptiveAnnealing import ( + GradientEnhancedAdaptiveAnnealing, + ) + + lama_register["GradientEnhancedAdaptiveAnnealing"] = GradientEnhancedAdaptiveAnnealing + LLAMAGradientEnhancedAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAGradientEnhancedAdaptiveAnnealing" + ).set_name("LLAMAGradientEnhancedAdaptiveAnnealing", register=True) +except Exception as e: + print("GradientEnhancedAdaptiveAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientEnhancedAdaptiveDifferentialEvolution import ( + GradientEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["GradientEnhancedAdaptiveDifferentialEvolution"] = ( + GradientEnhancedAdaptiveDifferentialEvolution + ) + LLAMAGradientEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAGradientEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("GradientEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientEstimationSearch import GradientEstimationSearch + + lama_register["GradientEstimationSearch"] = GradientEstimationSearch + LLAMAGradientEstimationSearch = NonObjectOptimizer(method="LLAMAGradientEstimationSearch").set_name( + "LLAMAGradientEstimationSearch", register=True + ) +except Exception as e: + print("GradientEstimationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientGuidedClusterSearch import GradientGuidedClusterSearch + + lama_register["GradientGuidedClusterSearch"] = GradientGuidedClusterSearch + LLAMAGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch").set_name( + "LLAMAGradientGuidedClusterSearch", register=True + ) +except Exception as e: + print("GradientGuidedClusterSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientGuidedDifferentialEvolution import ( + GradientGuidedDifferentialEvolution, + ) + + lama_register["GradientGuidedDifferentialEvolution"] = GradientGuidedDifferentialEvolution + LLAMAGradientGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAGradientGuidedDifferentialEvolution" + ).set_name("LLAMAGradientGuidedDifferentialEvolution", register=True) +except Exception as e: + print("GradientGuidedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientGuidedEvolutionStrategy import GradientGuidedEvolutionStrategy + + lama_register["GradientGuidedEvolutionStrategy"] = GradientGuidedEvolutionStrategy + LLAMAGradientGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAGradientGuidedEvolutionStrategy" + ).set_name("LLAMAGradientGuidedEvolutionStrategy", register=True) +except Exception as e: + print("GradientGuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientGuidedHybridPSO import GradientGuidedHybridPSO + + lama_register["GradientGuidedHybridPSO"] = GradientGuidedHybridPSO + LLAMAGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO").set_name( + "LLAMAGradientGuidedHybridPSO", register=True + ) +except Exception as e: + print("GradientGuidedHybridPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientInformedAdaptiveDirectionSearch import ( + GradientInformedAdaptiveDirectionSearch, + ) + + lama_register["GradientInformedAdaptiveDirectionSearch"] = GradientInformedAdaptiveDirectionSearch + LLAMAGradientInformedAdaptiveDirectionSearch = NonObjectOptimizer( + method="LLAMAGradientInformedAdaptiveDirectionSearch" + ).set_name("LLAMAGradientInformedAdaptiveDirectionSearch", register=True) +except Exception as e: + print("GradientInformedAdaptiveDirectionSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientInformedAdaptiveSearch import GradientInformedAdaptiveSearch + + lama_register["GradientInformedAdaptiveSearch"] = GradientInformedAdaptiveSearch + LLAMAGradientInformedAdaptiveSearch = NonObjectOptimizer( + method="LLAMAGradientInformedAdaptiveSearch" + ).set_name("LLAMAGradientInformedAdaptiveSearch", register=True) +except Exception as e: + print("GradientInformedAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientInformedParticleOptimizer import ( + GradientInformedParticleOptimizer, + ) + + lama_register["GradientInformedParticleOptimizer"] = GradientInformedParticleOptimizer + LLAMAGradientInformedParticleOptimizer = NonObjectOptimizer( + method="LLAMAGradientInformedParticleOptimizer" + ).set_name("LLAMAGradientInformedParticleOptimizer", register=True) +except Exception as e: + print("GradientInformedParticleOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GradientSpiralDifferentialEnhancerV5 import ( + GradientSpiralDifferentialEnhancerV5, + ) + + lama_register["GradientSpiralDifferentialEnhancerV5"] = GradientSpiralDifferentialEnhancerV5 + LLAMAGradientSpiralDifferentialEnhancerV5 = NonObjectOptimizer( + method="LLAMAGradientSpiralDifferentialEnhancerV5" + ).set_name("LLAMAGradientSpiralDifferentialEnhancerV5", register=True) +except Exception as e: + print("GradientSpiralDifferentialEnhancerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GravitationalSwarmIntelligence import GravitationalSwarmIntelligence + + lama_register["GravitationalSwarmIntelligence"] = GravitationalSwarmIntelligence + LLAMAGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAGravitationalSwarmIntelligence" + ).set_name("LLAMAGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("GravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GreedyDiversityMultiStrategySADE import GreedyDiversityMultiStrategySADE + + lama_register["GreedyDiversityMultiStrategySADE"] = GreedyDiversityMultiStrategySADE + LLAMAGreedyDiversityMultiStrategySADE = NonObjectOptimizer( + method="LLAMAGreedyDiversityMultiStrategySADE" + ).set_name("LLAMAGreedyDiversityMultiStrategySADE", register=True) +except Exception as e: + print("GreedyDiversityMultiStrategySADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GreedyDynamicMultiStrategyDE import GreedyDynamicMultiStrategyDE + + lama_register["GreedyDynamicMultiStrategyDE"] = GreedyDynamicMultiStrategyDE + LLAMAGreedyDynamicMultiStrategyDE = NonObjectOptimizer( + method="LLAMAGreedyDynamicMultiStrategyDE" + ).set_name("LLAMAGreedyDynamicMultiStrategyDE", register=True) +except Exception as e: + print("GreedyDynamicMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GuidedEvolutionStrategy import GuidedEvolutionStrategy + + lama_register["GuidedEvolutionStrategy"] = GuidedEvolutionStrategy + LLAMAGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy").set_name( + "LLAMAGuidedEvolutionStrategy", register=True + ) +except Exception as e: + print("GuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.GuidedMutationOptimizer import GuidedMutationOptimizer + + lama_register["GuidedMutationOptimizer"] = GuidedMutationOptimizer + LLAMAGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer").set_name( + "LLAMAGuidedMutationOptimizer", register=True + ) +except Exception as e: + print("GuidedMutationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HADE import HADE + + lama_register["HADE"] = HADE + LLAMAHADE = NonObjectOptimizer(method="LLAMAHADE").set_name("LLAMAHADE", register=True) +except Exception as e: + print("HADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HADEEM import HADEEM + + lama_register["HADEEM"] = HADEEM + LLAMAHADEEM = NonObjectOptimizer(method="LLAMAHADEEM").set_name("LLAMAHADEEM", register=True) +except Exception as e: + print("HADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HADEMI import HADEMI + + lama_register["HADEMI"] = HADEMI + LLAMAHADEMI = NonObjectOptimizer(method="LLAMAHADEMI").set_name("LLAMAHADEMI", register=True) +except Exception as e: + print("HADEMI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HAVCDE import HAVCDE + + lama_register["HAVCDE"] = HAVCDE + LLAMAHAVCDE = NonObjectOptimizer(method="LLAMAHAVCDE").set_name("LLAMAHAVCDE", register=True) +except Exception as e: + print("HAVCDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HEAS import HEAS + + lama_register["HEAS"] = HEAS + LLAMAHEAS = NonObjectOptimizer(method="LLAMAHEAS").set_name("LLAMAHEAS", register=True) +except Exception as e: + print("HEAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HarmonyFireworkOptimizer import HarmonyFireworkOptimizer + + lama_register["HarmonyFireworkOptimizer"] = HarmonyFireworkOptimizer + LLAMAHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer").set_name( + "LLAMAHarmonyFireworkOptimizer", register=True + ) +except Exception as e: + print("HarmonyFireworkOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HarmonyTabuOptimization import HarmonyTabuOptimization + + lama_register["HarmonyTabuOptimization"] = HarmonyTabuOptimization + LLAMAHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization").set_name( + "LLAMAHarmonyTabuOptimization", register=True + ) +except Exception as e: + print("HarmonyTabuOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HierarchicalAdaptiveAnnealing import HierarchicalAdaptiveAnnealing + + lama_register["HierarchicalAdaptiveAnnealing"] = HierarchicalAdaptiveAnnealing + LLAMAHierarchicalAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAHierarchicalAdaptiveAnnealing" + ).set_name("LLAMAHierarchicalAdaptiveAnnealing", register=True) +except Exception as e: + print("HierarchicalAdaptiveAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HierarchicalAdaptiveCovarianceMatrixAdaptation import ( + HierarchicalAdaptiveCovarianceMatrixAdaptation, + ) + + lama_register["HierarchicalAdaptiveCovarianceMatrixAdaptation"] = ( + HierarchicalAdaptiveCovarianceMatrixAdaptation + ) + LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("HierarchicalAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HierarchicalAdaptiveSearch import HierarchicalAdaptiveSearch + + lama_register["HierarchicalAdaptiveSearch"] = HierarchicalAdaptiveSearch + LLAMAHierarchicalAdaptiveSearch = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch").set_name( + "LLAMAHierarchicalAdaptiveSearch", register=True + ) +except Exception as e: + print("HierarchicalAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HierarchicalDiversityEnhancedCovarianceMatrixAdaptation import ( + HierarchicalDiversityEnhancedCovarianceMatrixAdaptation, + ) + + lama_register["HierarchicalDiversityEnhancedCovarianceMatrixAdaptation"] = ( + HierarchicalDiversityEnhancedCovarianceMatrixAdaptation + ) + LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation" + ).set_name("LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("HierarchicalDiversityEnhancedCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HighPerformanceAdaptiveDifferentialSearch import ( + HighPerformanceAdaptiveDifferentialSearch, + ) + + lama_register["HighPerformanceAdaptiveDifferentialSearch"] = HighPerformanceAdaptiveDifferentialSearch + LLAMAHighPerformanceAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAHighPerformanceAdaptiveDifferentialSearch" + ).set_name("LLAMAHighPerformanceAdaptiveDifferentialSearch", register=True) +except Exception as e: + print("HighPerformanceAdaptiveDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyGDAE import HyGDAE + + lama_register["HyGDAE"] = HyGDAE + LLAMAHyGDAE = NonObjectOptimizer(method="LLAMAHyGDAE").set_name("LLAMAHyGDAE", register=True) +except Exception as e: + print("HyGDAE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveCovarianceMatrixDifferentialEvolution import ( + HybridAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["HybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + HybridAdaptiveCovarianceMatrixDifferentialEvolution + ) + LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveCrossoverElitistStrategyV10 import ( + HybridAdaptiveCrossoverElitistStrategyV10, + ) + + lama_register["HybridAdaptiveCrossoverElitistStrategyV10"] = HybridAdaptiveCrossoverElitistStrategyV10 + LLAMAHybridAdaptiveCrossoverElitistStrategyV10 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10" + ).set_name("LLAMAHybridAdaptiveCrossoverElitistStrategyV10", register=True) +except Exception as e: + print("HybridAdaptiveCrossoverElitistStrategyV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDE import HybridAdaptiveDE + + lama_register["HybridAdaptiveDE"] = HybridAdaptiveDE + LLAMAHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE").set_name( + "LLAMAHybridAdaptiveDE", register=True + ) +except Exception as e: + print("HybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolution import ( + HybridAdaptiveDifferentialEvolution, + ) + + lama_register["HybridAdaptiveDifferentialEvolution"] = HybridAdaptiveDifferentialEvolution + LLAMAHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning import ( + HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning, + ) + + lama_register["HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning"] = ( + HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning + ) + LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning", register=True) +except Exception as e: + print("HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch import ( + HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch, + ) + + lama_register["HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch"] = ( + HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch + ) + LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch", register=True) +except Exception as e: + print("HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDifferentialQuantumSearch import ( + HybridAdaptiveDifferentialQuantumSearch, + ) + + lama_register["HybridAdaptiveDifferentialQuantumSearch"] = HybridAdaptiveDifferentialQuantumSearch + LLAMAHybridAdaptiveDifferentialQuantumSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialQuantumSearch" + ).set_name("LLAMAHybridAdaptiveDifferentialQuantumSearch", register=True) +except Exception as e: + print("HybridAdaptiveDifferentialQuantumSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDifferentialSwarm import HybridAdaptiveDifferentialSwarm + + lama_register["HybridAdaptiveDifferentialSwarm"] = HybridAdaptiveDifferentialSwarm + LLAMAHybridAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialSwarm" + ).set_name("LLAMAHybridAdaptiveDifferentialSwarm", register=True) +except Exception as e: + print("HybridAdaptiveDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDiversityMaintainingGradientEvolution import ( + HybridAdaptiveDiversityMaintainingGradientEvolution, + ) + + lama_register["HybridAdaptiveDiversityMaintainingGradientEvolution"] = ( + HybridAdaptiveDiversityMaintainingGradientEvolution + ) + LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution" + ).set_name("LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution", register=True) +except Exception as e: + print("HybridAdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveDualPhaseStrategyV6 import ( + HybridAdaptiveDualPhaseStrategyV6, + ) + + lama_register["HybridAdaptiveDualPhaseStrategyV6"] = HybridAdaptiveDualPhaseStrategyV6 + LLAMAHybridAdaptiveDualPhaseStrategyV6 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDualPhaseStrategyV6" + ).set_name("LLAMAHybridAdaptiveDualPhaseStrategyV6", register=True) +except Exception as e: + print("HybridAdaptiveDualPhaseStrategyV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveEvolutionaryOptimizer import ( + HybridAdaptiveEvolutionaryOptimizer, + ) + + lama_register["HybridAdaptiveEvolutionaryOptimizer"] = HybridAdaptiveEvolutionaryOptimizer + LLAMAHybridAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAHybridAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: + print("HybridAdaptiveEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveExplorationOptimizer import ( + HybridAdaptiveExplorationOptimizer, + ) + + lama_register["HybridAdaptiveExplorationOptimizer"] = HybridAdaptiveExplorationOptimizer + LLAMAHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveExplorationOptimizer" + ).set_name("LLAMAHybridAdaptiveExplorationOptimizer", register=True) +except Exception as e: + print("HybridAdaptiveExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizer import ( + HybridAdaptiveGeneticSwarmOptimizer, + ) + + lama_register["HybridAdaptiveGeneticSwarmOptimizer"] = HybridAdaptiveGeneticSwarmOptimizer + LLAMAHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: + print("HybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizerV2 import ( + HybridAdaptiveGeneticSwarmOptimizerV2, + ) + + lama_register["HybridAdaptiveGeneticSwarmOptimizerV2"] = HybridAdaptiveGeneticSwarmOptimizerV2 + LLAMAHybridAdaptiveGeneticSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2" + ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizerV2", register=True) +except Exception as e: + print("HybridAdaptiveGeneticSwarmOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveGradientPSO import HybridAdaptiveGradientPSO + + lama_register["HybridAdaptiveGradientPSO"] = HybridAdaptiveGradientPSO + LLAMAHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO").set_name( + "LLAMAHybridAdaptiveGradientPSO", register=True + ) +except Exception as e: + print("HybridAdaptiveGradientPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveHarmonicFireworksTabuSearch import ( + HybridAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["HybridAdaptiveHarmonicFireworksTabuSearch"] = HybridAdaptiveHarmonicFireworksTabuSearch + LLAMAHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: + print("HybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMemeticAlgorithm import HybridAdaptiveMemeticAlgorithm + + lama_register["HybridAdaptiveMemeticAlgorithm"] = HybridAdaptiveMemeticAlgorithm + LLAMAHybridAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticAlgorithm" + ).set_name("LLAMAHybridAdaptiveMemeticAlgorithm", register=True) +except Exception as e: + print("HybridAdaptiveMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism import ( + HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism, + ) + + lama_register["HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism"] = ( + HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism + ) + LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism" + ).set_name("LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism", register=True) +except Exception as e: + print("HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMemeticOptimizerV4 import HybridAdaptiveMemeticOptimizerV4 + + lama_register["HybridAdaptiveMemeticOptimizerV4"] = HybridAdaptiveMemeticOptimizerV4 + LLAMAHybridAdaptiveMemeticOptimizerV4 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticOptimizerV4" + ).set_name("LLAMAHybridAdaptiveMemeticOptimizerV4", register=True) +except Exception as e: + print("HybridAdaptiveMemeticOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMemoryAnnealing import HybridAdaptiveMemoryAnnealing + + lama_register["HybridAdaptiveMemoryAnnealing"] = HybridAdaptiveMemoryAnnealing + LLAMAHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemoryAnnealing" + ).set_name("LLAMAHybridAdaptiveMemoryAnnealing", register=True) +except Exception as e: + print("HybridAdaptiveMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolution import ( + HybridAdaptiveMultiPhaseEvolution, + ) + + lama_register["HybridAdaptiveMultiPhaseEvolution"] = HybridAdaptiveMultiPhaseEvolution + LLAMAHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMultiPhaseEvolution" + ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolution", register=True) +except Exception as e: + print("HybridAdaptiveMultiPhaseEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolutionV2 import ( + HybridAdaptiveMultiPhaseEvolutionV2, + ) + + lama_register["HybridAdaptiveMultiPhaseEvolutionV2"] = HybridAdaptiveMultiPhaseEvolutionV2 + LLAMAHybridAdaptiveMultiPhaseEvolutionV2 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2" + ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolutionV2", register=True) +except Exception as e: + print("HybridAdaptiveMultiPhaseEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveNesterovSynergy import HybridAdaptiveNesterovSynergy + + lama_register["HybridAdaptiveNesterovSynergy"] = HybridAdaptiveNesterovSynergy + LLAMAHybridAdaptiveNesterovSynergy = NonObjectOptimizer( + method="LLAMAHybridAdaptiveNesterovSynergy" + ).set_name("LLAMAHybridAdaptiveNesterovSynergy", register=True) +except Exception as e: + print("HybridAdaptiveNesterovSynergy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveOptimization import HybridAdaptiveOptimization + + lama_register["HybridAdaptiveOptimization"] = HybridAdaptiveOptimization + LLAMAHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization").set_name( + "LLAMAHybridAdaptiveOptimization", register=True + ) +except Exception as e: + print("HybridAdaptiveOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveOrthogonalDifferentialEvolution import ( + HybridAdaptiveOrthogonalDifferentialEvolution, + ) + + lama_register["HybridAdaptiveOrthogonalDifferentialEvolution"] = ( + HybridAdaptiveOrthogonalDifferentialEvolution + ) + LLAMAHybridAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveParallelDifferentialEvolution import ( + HybridAdaptiveParallelDifferentialEvolution, + ) + + lama_register["HybridAdaptiveParallelDifferentialEvolution"] = HybridAdaptiveParallelDifferentialEvolution + LLAMAHybridAdaptiveParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveParallelDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveParallelDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveParallelDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveParameterTuningOptimization import ( + HybridAdaptiveParameterTuningOptimization, + ) + + lama_register["HybridAdaptiveParameterTuningOptimization"] = HybridAdaptiveParameterTuningOptimization + LLAMAHybridAdaptiveParameterTuningOptimization = NonObjectOptimizer( + method="LLAMAHybridAdaptiveParameterTuningOptimization" + ).set_name("LLAMAHybridAdaptiveParameterTuningOptimization", register=True) +except Exception as e: + print("HybridAdaptiveParameterTuningOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptivePopulationDE import HybridAdaptivePopulationDE + + lama_register["HybridAdaptivePopulationDE"] = HybridAdaptivePopulationDE + LLAMAHybridAdaptivePopulationDE = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE").set_name( + "LLAMAHybridAdaptivePopulationDE", register=True + ) +except Exception as e: + print("HybridAdaptivePopulationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveQuantumLevySearch import HybridAdaptiveQuantumLevySearch + + lama_register["HybridAdaptiveQuantumLevySearch"] = HybridAdaptiveQuantumLevySearch + LLAMAHybridAdaptiveQuantumLevySearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumLevySearch" + ).set_name("LLAMAHybridAdaptiveQuantumLevySearch", register=True) +except Exception as e: + print("HybridAdaptiveQuantumLevySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticDifferentialEvolution import ( + HybridAdaptiveQuantumMemeticDifferentialEvolution, + ) + + lama_register["HybridAdaptiveQuantumMemeticDifferentialEvolution"] = ( + HybridAdaptiveQuantumMemeticDifferentialEvolution + ) + LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveQuantumMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticOptimizer import ( + HybridAdaptiveQuantumMemeticOptimizer, + ) + + lama_register["HybridAdaptiveQuantumMemeticOptimizer"] = HybridAdaptiveQuantumMemeticOptimizer + LLAMAHybridAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumMemeticOptimizer" + ).set_name("LLAMAHybridAdaptiveQuantumMemeticOptimizer", register=True) +except Exception as e: + print("HybridAdaptiveQuantumMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveQuantumPSO import HybridAdaptiveQuantumPSO + + lama_register["HybridAdaptiveQuantumPSO"] = HybridAdaptiveQuantumPSO + LLAMAHybridAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO").set_name( + "LLAMAHybridAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("HybridAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveSearch import HybridAdaptiveSearch + + lama_register["HybridAdaptiveSearch"] = HybridAdaptiveSearch + LLAMAHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch").set_name( + "LLAMAHybridAdaptiveSearch", register=True + ) +except Exception as e: + print("HybridAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveSearchStrategy import HybridAdaptiveSearchStrategy + + lama_register["HybridAdaptiveSearchStrategy"] = HybridAdaptiveSearchStrategy + LLAMAHybridAdaptiveSearchStrategy = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSearchStrategy" + ).set_name("LLAMAHybridAdaptiveSearchStrategy", register=True) +except Exception as e: + print("HybridAdaptiveSearchStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveSelfAdaptiveDifferentialEvolution import ( + HybridAdaptiveSelfAdaptiveDifferentialEvolution, + ) + + lama_register["HybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( + HybridAdaptiveSelfAdaptiveDifferentialEvolution + ) + LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("HybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridAdaptiveSimulatedAnnealingDE import ( + HybridAdaptiveSimulatedAnnealingDE, + ) + + lama_register["HybridAdaptiveSimulatedAnnealingDE"] = HybridAdaptiveSimulatedAnnealingDE + LLAMAHybridAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSimulatedAnnealingDE" + ).set_name("LLAMAHybridAdaptiveSimulatedAnnealingDE", register=True) +except Exception as e: + print("HybridAdaptiveSimulatedAnnealingDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridCosineSineDualPhaseStrategyV10 import ( + HybridCosineSineDualPhaseStrategyV10, + ) + + lama_register["HybridCosineSineDualPhaseStrategyV10"] = HybridCosineSineDualPhaseStrategyV10 + LLAMAHybridCosineSineDualPhaseStrategyV10 = NonObjectOptimizer( + method="LLAMAHybridCosineSineDualPhaseStrategyV10" + ).set_name("LLAMAHybridCosineSineDualPhaseStrategyV10", register=True) +except Exception as e: + print("HybridCosineSineDualPhaseStrategyV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptionDifferentialEvolution import ( + HybridCovarianceMatrixAdaptionDifferentialEvolution, + ) + + lama_register["HybridCovarianceMatrixAdaptionDifferentialEvolution"] = ( + HybridCovarianceMatrixAdaptionDifferentialEvolution + ) + LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution" + ).set_name("LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution", register=True) +except Exception as e: + print("HybridCovarianceMatrixAdaptionDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 import ( + HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2, + ) + + lama_register["HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2"] = ( + HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 + ) + LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2" + ).set_name("LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2", register=True) +except Exception as e: + print("HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights import ( + HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights, + ) + + lama_register["HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights"] = ( + HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights + ) + LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights" + ).set_name("LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights", register=True) +except Exception as e: + print("HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridCulturalDifferentialEvolution import ( + HybridCulturalDifferentialEvolution, + ) + + lama_register["HybridCulturalDifferentialEvolution"] = HybridCulturalDifferentialEvolution + LLAMAHybridCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridCulturalDifferentialEvolution" + ).set_name("LLAMAHybridCulturalDifferentialEvolution", register=True) +except Exception as e: + print("HybridCulturalDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDEPSO import HybridDEPSO + + lama_register["HybridDEPSO"] = HybridDEPSO + LLAMAHybridDEPSO = NonObjectOptimizer(method="LLAMAHybridDEPSO").set_name( + "LLAMAHybridDEPSO", register=True + ) +except Exception as e: + print("HybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDEPSOWithDynamicAdaptation import HybridDEPSOWithDynamicAdaptation + + lama_register["HybridDEPSOWithDynamicAdaptation"] = HybridDEPSOWithDynamicAdaptation + LLAMAHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( + method="LLAMAHybridDEPSOWithDynamicAdaptation" + ).set_name("LLAMAHybridDEPSOWithDynamicAdaptation", register=True) +except Exception as e: + print("HybridDEPSOWithDynamicAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDifferentialEvolution import HybridDifferentialEvolution + + lama_register["HybridDifferentialEvolution"] = HybridDifferentialEvolution + LLAMAHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution").set_name( + "LLAMAHybridDifferentialEvolution", register=True + ) +except Exception as e: + print("HybridDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDifferentialEvolutionMemeticOptimizer import ( + HybridDifferentialEvolutionMemeticOptimizer, + ) + + lama_register["HybridDifferentialEvolutionMemeticOptimizer"] = HybridDifferentialEvolutionMemeticOptimizer + LLAMAHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionMemeticOptimizer" + ).set_name("LLAMAHybridDifferentialEvolutionMemeticOptimizer", register=True) +except Exception as e: + print("HybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDifferentialEvolutionParticleSwarmOptimizer import ( + HybridDifferentialEvolutionParticleSwarmOptimizer, + ) + + lama_register["HybridDifferentialEvolutionParticleSwarmOptimizer"] = ( + HybridDifferentialEvolutionParticleSwarmOptimizer + ) + LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer" + ).set_name("LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer", register=True) +except Exception as e: + print("HybridDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDifferentialEvolutionWithLocalSearch import ( + HybridDifferentialEvolutionWithLocalSearch, + ) + + lama_register["HybridDifferentialEvolutionWithLocalSearch"] = HybridDifferentialEvolutionWithLocalSearch + LLAMAHybridDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAHybridDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: + print("HybridDifferentialEvolutionWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDifferentialLocalSearch import HybridDifferentialLocalSearch + + lama_register["HybridDifferentialLocalSearch"] = HybridDifferentialLocalSearch + LLAMAHybridDifferentialLocalSearch = NonObjectOptimizer( + method="LLAMAHybridDifferentialLocalSearch" + ).set_name("LLAMAHybridDifferentialLocalSearch", register=True) +except Exception as e: + print("HybridDifferentialLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDualLocalOptimizationDE import HybridDualLocalOptimizationDE + + lama_register["HybridDualLocalOptimizationDE"] = HybridDualLocalOptimizationDE + LLAMAHybridDualLocalOptimizationDE = NonObjectOptimizer( + method="LLAMAHybridDualLocalOptimizationDE" + ).set_name("LLAMAHybridDualLocalOptimizationDE", register=True) +except Exception as e: + print("HybridDualLocalOptimizationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDualPhaseParticleSwarmDifferentialEvolution import ( + HybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["HybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + HybridDualPhaseParticleSwarmDifferentialEvolution + ) + LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("HybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicAdaptiveDE import HybridDynamicAdaptiveDE + + lama_register["HybridDynamicAdaptiveDE"] = HybridDynamicAdaptiveDE + LLAMAHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE").set_name( + "LLAMAHybridDynamicAdaptiveDE", register=True + ) +except Exception as e: + print("HybridDynamicAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicAdaptiveExplorationOptimization import ( + HybridDynamicAdaptiveExplorationOptimization, + ) + + lama_register["HybridDynamicAdaptiveExplorationOptimization"] = ( + HybridDynamicAdaptiveExplorationOptimization + ) + LLAMAHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAHybridDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAHybridDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("HybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicClusterOptimization import HybridDynamicClusterOptimization + + lama_register["HybridDynamicClusterOptimization"] = HybridDynamicClusterOptimization + LLAMAHybridDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMAHybridDynamicClusterOptimization" + ).set_name("LLAMAHybridDynamicClusterOptimization", register=True) +except Exception as e: + print("HybridDynamicClusterOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicCuckooHarmonyAlgorithm import ( + HybridDynamicCuckooHarmonyAlgorithm, + ) + + lama_register["HybridDynamicCuckooHarmonyAlgorithm"] = HybridDynamicCuckooHarmonyAlgorithm + LLAMAHybridDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAHybridDynamicCuckooHarmonyAlgorithm" + ).set_name("LLAMAHybridDynamicCuckooHarmonyAlgorithm", register=True) +except Exception as e: + print("HybridDynamicCuckooHarmonyAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolution import ( + HybridDynamicDifferentialEvolution, + ) + + lama_register["HybridDynamicDifferentialEvolution"] = HybridDynamicDifferentialEvolution + LLAMAHybridDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridDynamicDifferentialEvolution" + ).set_name("LLAMAHybridDynamicDifferentialEvolution", register=True) +except Exception as e: + print("HybridDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolutionGradient import ( + HybridDynamicDifferentialEvolutionGradient, + ) + + lama_register["HybridDynamicDifferentialEvolutionGradient"] = HybridDynamicDifferentialEvolutionGradient + LLAMAHybridDynamicDifferentialEvolutionGradient = NonObjectOptimizer( + method="LLAMAHybridDynamicDifferentialEvolutionGradient" + ).set_name("LLAMAHybridDynamicDifferentialEvolutionGradient", register=True) +except Exception as e: + print("HybridDynamicDifferentialEvolutionGradient can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicElitistDE import HybridDynamicElitistDE + + lama_register["HybridDynamicElitistDE"] = HybridDynamicElitistDE + LLAMAHybridDynamicElitistDE = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE").set_name( + "LLAMAHybridDynamicElitistDE", register=True + ) +except Exception as e: + print("HybridDynamicElitistDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicQuantumLevyDifferentialSearch import ( + HybridDynamicQuantumLevyDifferentialSearch, + ) + + lama_register["HybridDynamicQuantumLevyDifferentialSearch"] = HybridDynamicQuantumLevyDifferentialSearch + LLAMAHybridDynamicQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAHybridDynamicQuantumLevyDifferentialSearch" + ).set_name("LLAMAHybridDynamicQuantumLevyDifferentialSearch", register=True) +except Exception as e: + print("HybridDynamicQuantumLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridDynamicSearch import HybridDynamicSearch + + lama_register["HybridDynamicSearch"] = HybridDynamicSearch + LLAMAHybridDynamicSearch = NonObjectOptimizer(method="LLAMAHybridDynamicSearch").set_name( + "LLAMAHybridDynamicSearch", register=True + ) +except Exception as e: + print("HybridDynamicSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEnhancedAdaptiveDifferentialEvolution import ( + HybridEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["HybridEnhancedAdaptiveDifferentialEvolution"] = HybridEnhancedAdaptiveDifferentialEvolution + LLAMAHybridEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("HybridEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEnhancedDualPhaseAdaptiveOptimizationV6 import ( + HybridEnhancedDualPhaseAdaptiveOptimizationV6, + ) + + lama_register["HybridEnhancedDualPhaseAdaptiveOptimizationV6"] = ( + HybridEnhancedDualPhaseAdaptiveOptimizationV6 + ) + LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6 = NonObjectOptimizer( + method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6" + ).set_name("LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6", register=True) +except Exception as e: + print("HybridEnhancedDualPhaseAdaptiveOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEnhancedGravitationalSwarmIntelligence import ( + HybridEnhancedGravitationalSwarmIntelligence, + ) + + lama_register["HybridEnhancedGravitationalSwarmIntelligence"] = ( + HybridEnhancedGravitationalSwarmIntelligence + ) + LLAMAHybridEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAHybridEnhancedGravitationalSwarmIntelligence" + ).set_name("LLAMAHybridEnhancedGravitationalSwarmIntelligence", register=True) +except Exception as e: + print("HybridEnhancedGravitationalSwarmIntelligence can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEvolutionaryAnnealingOptimizer import ( + HybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["HybridEvolutionaryAnnealingOptimizer"] = HybridEvolutionaryAnnealingOptimizer + LLAMAHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: + print("HybridEvolutionaryAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEvolutionaryOptimization import HybridEvolutionaryOptimization + + lama_register["HybridEvolutionaryOptimization"] = HybridEvolutionaryOptimization + LLAMAHybridEvolutionaryOptimization = NonObjectOptimizer( + method="LLAMAHybridEvolutionaryOptimization" + ).set_name("LLAMAHybridEvolutionaryOptimization", register=True) +except Exception as e: + print("HybridEvolutionaryOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridEvolvingAdaptiveStrategyV28 import ( + HybridEvolvingAdaptiveStrategyV28, + ) + + lama_register["HybridEvolvingAdaptiveStrategyV28"] = HybridEvolvingAdaptiveStrategyV28 + LLAMAHybridEvolvingAdaptiveStrategyV28 = NonObjectOptimizer( + method="LLAMAHybridEvolvingAdaptiveStrategyV28" + ).set_name("LLAMAHybridEvolvingAdaptiveStrategyV28", register=True) +except Exception as e: + print("HybridEvolvingAdaptiveStrategyV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridExploitationExplorationGradientSearch import ( + HybridExploitationExplorationGradientSearch, + ) + + lama_register["HybridExploitationExplorationGradientSearch"] = HybridExploitationExplorationGradientSearch + LLAMAHybridExploitationExplorationGradientSearch = NonObjectOptimizer( + method="LLAMAHybridExploitationExplorationGradientSearch" + ).set_name("LLAMAHybridExploitationExplorationGradientSearch", register=True) +except Exception as e: + print("HybridExploitationExplorationGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientAnnealingWithMemory import ( + HybridGradientAnnealingWithMemory, + ) + + lama_register["HybridGradientAnnealingWithMemory"] = HybridGradientAnnealingWithMemory + LLAMAHybridGradientAnnealingWithMemory = NonObjectOptimizer( + method="LLAMAHybridGradientAnnealingWithMemory" + ).set_name("LLAMAHybridGradientAnnealingWithMemory", register=True) +except Exception as e: + print("HybridGradientAnnealingWithMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientBoostedMemoryAnnealingPlus import ( + HybridGradientBoostedMemoryAnnealingPlus, + ) + + lama_register["HybridGradientBoostedMemoryAnnealingPlus"] = HybridGradientBoostedMemoryAnnealingPlus + LLAMAHybridGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( + method="LLAMAHybridGradientBoostedMemoryAnnealingPlus" + ).set_name("LLAMAHybridGradientBoostedMemoryAnnealingPlus", register=True) +except Exception as e: + print("HybridGradientBoostedMemoryAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientCrossoverOptimization import ( + HybridGradientCrossoverOptimization, + ) + + lama_register["HybridGradientCrossoverOptimization"] = HybridGradientCrossoverOptimization + LLAMAHybridGradientCrossoverOptimization = NonObjectOptimizer( + method="LLAMAHybridGradientCrossoverOptimization" + ).set_name("LLAMAHybridGradientCrossoverOptimization", register=True) +except Exception as e: + print("HybridGradientCrossoverOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientDifferentialEvolution import ( + HybridGradientDifferentialEvolution, + ) + + lama_register["HybridGradientDifferentialEvolution"] = HybridGradientDifferentialEvolution + LLAMAHybridGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridGradientDifferentialEvolution" + ).set_name("LLAMAHybridGradientDifferentialEvolution", register=True) +except Exception as e: + print("HybridGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientEvolution import HybridGradientEvolution + + lama_register["HybridGradientEvolution"] = HybridGradientEvolution + LLAMAHybridGradientEvolution = NonObjectOptimizer(method="LLAMAHybridGradientEvolution").set_name( + "LLAMAHybridGradientEvolution", register=True + ) +except Exception as e: + print("HybridGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientMemoryAnnealing import HybridGradientMemoryAnnealing + + lama_register["HybridGradientMemoryAnnealing"] = HybridGradientMemoryAnnealing + LLAMAHybridGradientMemoryAnnealing = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealing" + ).set_name("LLAMAHybridGradientMemoryAnnealing", register=True) +except Exception as e: + print("HybridGradientMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV2 import HybridGradientMemoryAnnealingV2 + + lama_register["HybridGradientMemoryAnnealingV2"] = HybridGradientMemoryAnnealingV2 + LLAMAHybridGradientMemoryAnnealingV2 = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealingV2" + ).set_name("LLAMAHybridGradientMemoryAnnealingV2", register=True) +except Exception as e: + print("HybridGradientMemoryAnnealingV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV3 import HybridGradientMemoryAnnealingV3 + + lama_register["HybridGradientMemoryAnnealingV3"] = HybridGradientMemoryAnnealingV3 + LLAMAHybridGradientMemoryAnnealingV3 = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealingV3" + ).set_name("LLAMAHybridGradientMemoryAnnealingV3", register=True) +except Exception as e: + print("HybridGradientMemoryAnnealingV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientMemorySimulatedAnnealing import ( + HybridGradientMemorySimulatedAnnealing, + ) + + lama_register["HybridGradientMemorySimulatedAnnealing"] = HybridGradientMemorySimulatedAnnealing + LLAMAHybridGradientMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAHybridGradientMemorySimulatedAnnealing" + ).set_name("LLAMAHybridGradientMemorySimulatedAnnealing", register=True) +except Exception as e: + print("HybridGradientMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGradientPSO import HybridGradientPSO + + lama_register["HybridGradientPSO"] = HybridGradientPSO + LLAMAHybridGradientPSO = NonObjectOptimizer(method="LLAMAHybridGradientPSO").set_name( + "LLAMAHybridGradientPSO", register=True + ) +except Exception as e: + print("HybridGradientPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridGuidedEvolutionaryOptimizer import ( + HybridGuidedEvolutionaryOptimizer, + ) + + lama_register["HybridGuidedEvolutionaryOptimizer"] = HybridGuidedEvolutionaryOptimizer + LLAMAHybridGuidedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHybridGuidedEvolutionaryOptimizer" + ).set_name("LLAMAHybridGuidedEvolutionaryOptimizer", register=True) +except Exception as e: + print("HybridGuidedEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridMemoryAdaptiveDE import HybridMemoryAdaptiveDE + + lama_register["HybridMemoryAdaptiveDE"] = HybridMemoryAdaptiveDE + LLAMAHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE").set_name( + "LLAMAHybridMemoryAdaptiveDE", register=True + ) +except Exception as e: + print("HybridMemoryAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridMultiDimensionalAnnealing import HybridMultiDimensionalAnnealing + + lama_register["HybridMultiDimensionalAnnealing"] = HybridMultiDimensionalAnnealing + LLAMAHybridMultiDimensionalAnnealing = NonObjectOptimizer( + method="LLAMAHybridMultiDimensionalAnnealing" + ).set_name("LLAMAHybridMultiDimensionalAnnealing", register=True) +except Exception as e: + print("HybridMultiDimensionalAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridPSO_DE import HybridPSO_DE + + lama_register["HybridPSO_DE"] = HybridPSO_DE + LLAMAHybridPSO_DE = NonObjectOptimizer(method="LLAMAHybridPSO_DE").set_name( + "LLAMAHybridPSO_DE", register=True + ) +except Exception as e: + print("HybridPSO_DE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridPSO_DE_GradientOptimizer import HybridPSO_DE_GradientOptimizer + + lama_register["HybridPSO_DE_GradientOptimizer"] = HybridPSO_DE_GradientOptimizer + LLAMAHybridPSO_DE_GradientOptimizer = NonObjectOptimizer( + method="LLAMAHybridPSO_DE_GradientOptimizer" + ).set_name("LLAMAHybridPSO_DE_GradientOptimizer", register=True) +except Exception as e: + print("HybridPSO_DE_GradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridParticleDE import HybridParticleDE + + lama_register["HybridParticleDE"] = HybridParticleDE + LLAMAHybridParticleDE = NonObjectOptimizer(method="LLAMAHybridParticleDE").set_name( + "LLAMAHybridParticleDE", register=True + ) +except Exception as e: + print("HybridParticleDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridParticleDE_v2 import HybridParticleDE_v2 + + lama_register["HybridParticleDE_v2"] = HybridParticleDE_v2 + LLAMAHybridParticleDE_v2 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2").set_name( + "LLAMAHybridParticleDE_v2", register=True + ) +except Exception as e: + print("HybridParticleDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridParticleDE_v3 import HybridParticleDE_v3 + + lama_register["HybridParticleDE_v3"] = HybridParticleDE_v3 + LLAMAHybridParticleDE_v3 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3").set_name( + "LLAMAHybridParticleDE_v3", register=True + ) +except Exception as e: + print("HybridParticleDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridParticleSwarmDifferentialEvolutionOptimizer import ( + HybridParticleSwarmDifferentialEvolutionOptimizer, + ) + + lama_register["HybridParticleSwarmDifferentialEvolutionOptimizer"] = ( + HybridParticleSwarmDifferentialEvolutionOptimizer + ) + LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer" + ).set_name("LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("HybridParticleSwarmDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumAdaptiveMemeticSearch import ( + HybridQuantumAdaptiveMemeticSearch, + ) + + lama_register["HybridQuantumAdaptiveMemeticSearch"] = HybridQuantumAdaptiveMemeticSearch + LLAMAHybridQuantumAdaptiveMemeticSearch = NonObjectOptimizer( + method="LLAMAHybridQuantumAdaptiveMemeticSearch" + ).set_name("LLAMAHybridQuantumAdaptiveMemeticSearch", register=True) +except Exception as e: + print("HybridQuantumAdaptiveMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolution import ( + HybridQuantumDifferentialEvolution, + ) + + lama_register["HybridQuantumDifferentialEvolution"] = HybridQuantumDifferentialEvolution + LLAMAHybridQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolution" + ).set_name("LLAMAHybridQuantumDifferentialEvolution", register=True) +except Exception as e: + print("HybridQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart import ( + HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart"] = ( + HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart + ) + LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart" + ).set_name( + "LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart", register=True + ) +except Exception as e: + print( + "HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch import ( + HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch"] = ( + HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch + ) + LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch" + ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch", register=True) +except Exception as e: + print("HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory import ( + HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory"] = ( + HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory + ) + LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory" + ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory", register=True) +except Exception as e: + print("HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumDifferentialParticleSwarmOptimization import ( + HybridQuantumDifferentialParticleSwarmOptimization, + ) + + lama_register["HybridQuantumDifferentialParticleSwarmOptimization"] = ( + HybridQuantumDifferentialParticleSwarmOptimization + ) + LLAMAHybridQuantumDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization" + ).set_name("LLAMAHybridQuantumDifferentialParticleSwarmOptimization", register=True) +except Exception as e: + print("HybridQuantumDifferentialParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumEnhancedMultiPhaseAdaptiveDE import ( + HybridQuantumEnhancedMultiPhaseAdaptiveDE, + ) + + lama_register["HybridQuantumEnhancedMultiPhaseAdaptiveDE"] = HybridQuantumEnhancedMultiPhaseAdaptiveDE + LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( + method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE" + ).set_name("LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE", register=True) +except Exception as e: + print("HybridQuantumEnhancedMultiPhaseAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumEvolution import HybridQuantumEvolution + + lama_register["HybridQuantumEvolution"] = HybridQuantumEvolution + LLAMAHybridQuantumEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution").set_name( + "LLAMAHybridQuantumEvolution", register=True + ) +except Exception as e: + print("HybridQuantumEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumGradientEvolution import HybridQuantumGradientEvolution + + lama_register["HybridQuantumGradientEvolution"] = HybridQuantumGradientEvolution + LLAMAHybridQuantumGradientEvolution = NonObjectOptimizer( + method="LLAMAHybridQuantumGradientEvolution" + ).set_name("LLAMAHybridQuantumGradientEvolution", register=True) +except Exception as e: + print("HybridQuantumGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumLevyAdaptiveSwarmV2 import HybridQuantumLevyAdaptiveSwarmV2 + + lama_register["HybridQuantumLevyAdaptiveSwarmV2"] = HybridQuantumLevyAdaptiveSwarmV2 + LLAMAHybridQuantumLevyAdaptiveSwarmV2 = NonObjectOptimizer( + method="LLAMAHybridQuantumLevyAdaptiveSwarmV2" + ).set_name("LLAMAHybridQuantumLevyAdaptiveSwarmV2", register=True) +except Exception as e: + print("HybridQuantumLevyAdaptiveSwarmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuantumMemeticOptimization import HybridQuantumMemeticOptimization + + lama_register["HybridQuantumMemeticOptimization"] = HybridQuantumMemeticOptimization + LLAMAHybridQuantumMemeticOptimization = NonObjectOptimizer( + method="LLAMAHybridQuantumMemeticOptimization" + ).set_name("LLAMAHybridQuantumMemeticOptimization", register=True) +except Exception as e: + print("HybridQuantumMemeticOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuasiRandomDEGradientAnnealing import ( + HybridQuasiRandomDEGradientAnnealing, + ) + + lama_register["HybridQuasiRandomDEGradientAnnealing"] = HybridQuasiRandomDEGradientAnnealing + LLAMAHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMAHybridQuasiRandomDEGradientAnnealing" + ).set_name("LLAMAHybridQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: + print("HybridQuasiRandomDEGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridQuasiRandomGradientDifferentialEvolution import ( + HybridQuasiRandomGradientDifferentialEvolution, + ) + + lama_register["HybridQuasiRandomGradientDifferentialEvolution"] = ( + HybridQuasiRandomGradientDifferentialEvolution + ) + LLAMAHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridQuasiRandomGradientDifferentialEvolution" + ).set_name("LLAMAHybridQuasiRandomGradientDifferentialEvolution", register=True) +except Exception as e: + print("HybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost import ( + HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost, + ) + + lama_register["HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost"] = ( + HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost + ) + LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost = NonObjectOptimizer( + method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost" + ).set_name("LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost", register=True) +except Exception as e: + print("HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HybridSelfAdaptiveDifferentialEvolution import ( + HybridSelfAdaptiveDifferentialEvolution, + ) + + lama_register["HybridSelfAdaptiveDifferentialEvolution"] = HybridSelfAdaptiveDifferentialEvolution + LLAMAHybridSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("HybridSelfAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveConvergenceStrategy import HyperAdaptiveConvergenceStrategy + + lama_register["HyperAdaptiveConvergenceStrategy"] = HyperAdaptiveConvergenceStrategy + LLAMAHyperAdaptiveConvergenceStrategy = NonObjectOptimizer( + method="LLAMAHyperAdaptiveConvergenceStrategy" + ).set_name("LLAMAHyperAdaptiveConvergenceStrategy", register=True) +except Exception as e: + print("HyperAdaptiveConvergenceStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveGradientRAMEDS import HyperAdaptiveGradientRAMEDS + + lama_register["HyperAdaptiveGradientRAMEDS"] = HyperAdaptiveGradientRAMEDS + LLAMAHyperAdaptiveGradientRAMEDS = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS").set_name( + "LLAMAHyperAdaptiveGradientRAMEDS", register=True + ) +except Exception as e: + print("HyperAdaptiveGradientRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveHybridDEPSOwithDynamicRestart import ( + HyperAdaptiveHybridDEPSOwithDynamicRestart, + ) + + lama_register["HyperAdaptiveHybridDEPSOwithDynamicRestart"] = HyperAdaptiveHybridDEPSOwithDynamicRestart + LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart = NonObjectOptimizer( + method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart" + ).set_name("LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart", register=True) +except Exception as e: + print("HyperAdaptiveHybridDEPSOwithDynamicRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveMemoryGuidedStrategyV74 import ( + HyperAdaptiveMemoryGuidedStrategyV74, + ) + + lama_register["HyperAdaptiveMemoryGuidedStrategyV74"] = HyperAdaptiveMemoryGuidedStrategyV74 + LLAMAHyperAdaptiveMemoryGuidedStrategyV74 = NonObjectOptimizer( + method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74" + ).set_name("LLAMAHyperAdaptiveMemoryGuidedStrategyV74", register=True) +except Exception as e: + print("HyperAdaptiveMemoryGuidedStrategyV74 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptivePrecisionOptimizer import HyperAdaptivePrecisionOptimizer + + lama_register["HyperAdaptivePrecisionOptimizer"] = HyperAdaptivePrecisionOptimizer + LLAMAHyperAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperAdaptivePrecisionOptimizer" + ).set_name("LLAMAHyperAdaptivePrecisionOptimizer", register=True) +except Exception as e: + print("HyperAdaptivePrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveSinusoidalDifferentialSwarm import ( + HyperAdaptiveSinusoidalDifferentialSwarm, + ) + + lama_register["HyperAdaptiveSinusoidalDifferentialSwarm"] = HyperAdaptiveSinusoidalDifferentialSwarm + LLAMAHyperAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAHyperAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: + print("HyperAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdaptiveStrategyDE import HyperAdaptiveStrategyDE + + lama_register["HyperAdaptiveStrategyDE"] = HyperAdaptiveStrategyDE + LLAMAHyperAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE").set_name( + "LLAMAHyperAdaptiveStrategyDE", register=True + ) +except Exception as e: + print("HyperAdaptiveStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperAdvancedDynamicPrecisionOptimizerV41 import ( + HyperAdvancedDynamicPrecisionOptimizerV41, + ) + + lama_register["HyperAdvancedDynamicPrecisionOptimizerV41"] = HyperAdvancedDynamicPrecisionOptimizerV41 + LLAMAHyperAdvancedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( + method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41" + ).set_name("LLAMAHyperAdvancedDynamicPrecisionOptimizerV41", register=True) +except Exception as e: + print("HyperAdvancedDynamicPrecisionOptimizerV41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperEvolvedDynamicPrecisionOptimizerV48 import ( + HyperEvolvedDynamicPrecisionOptimizerV48, + ) + + lama_register["HyperEvolvedDynamicPrecisionOptimizerV48"] = HyperEvolvedDynamicPrecisionOptimizerV48 + LLAMAHyperEvolvedDynamicPrecisionOptimizerV48 = NonObjectOptimizer( + method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48" + ).set_name("LLAMAHyperEvolvedDynamicPrecisionOptimizerV48", register=True) +except Exception as e: + print("HyperEvolvedDynamicPrecisionOptimizerV48 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperEvolvedDynamicRAMEDS import HyperEvolvedDynamicRAMEDS + + lama_register["HyperEvolvedDynamicRAMEDS"] = HyperEvolvedDynamicRAMEDS + LLAMAHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS").set_name( + "LLAMAHyperEvolvedDynamicRAMEDS", register=True + ) +except Exception as e: + print("HyperEvolvedDynamicRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperEvolvedRAMEDS import HyperEvolvedRAMEDS + + lama_register["HyperEvolvedRAMEDS"] = HyperEvolvedRAMEDS + LLAMAHyperEvolvedRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS").set_name( + "LLAMAHyperEvolvedRAMEDS", register=True + ) +except Exception as e: + print("HyperEvolvedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperFocusedAdaptiveElitistStrategyV5 import ( + HyperFocusedAdaptiveElitistStrategyV5, + ) + + lama_register["HyperFocusedAdaptiveElitistStrategyV5"] = HyperFocusedAdaptiveElitistStrategyV5 + LLAMAHyperFocusedAdaptiveElitistStrategyV5 = NonObjectOptimizer( + method="LLAMAHyperFocusedAdaptiveElitistStrategyV5" + ).set_name("LLAMAHyperFocusedAdaptiveElitistStrategyV5", register=True) +except Exception as e: + print("HyperFocusedAdaptiveElitistStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimalRAMEDS import HyperOptimalRAMEDS + + lama_register["HyperOptimalRAMEDS"] = HyperOptimalRAMEDS + LLAMAHyperOptimalRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS").set_name( + "LLAMAHyperOptimalRAMEDS", register=True + ) +except Exception as e: + print("HyperOptimalRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimalStrategicEvolutionaryOptimizerV58 import ( + HyperOptimalStrategicEvolutionaryOptimizerV58, + ) + + lama_register["HyperOptimalStrategicEvolutionaryOptimizerV58"] = ( + HyperOptimalStrategicEvolutionaryOptimizerV58 + ) + LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58 = NonObjectOptimizer( + method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58" + ).set_name("LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58", register=True) +except Exception as e: + print("HyperOptimalStrategicEvolutionaryOptimizerV58 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizer import ( + HyperOptimizedDynamicPrecisionOptimizer, + ) + + lama_register["HyperOptimizedDynamicPrecisionOptimizer"] = HyperOptimizedDynamicPrecisionOptimizer + LLAMAHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizer" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizer", register=True) +except Exception as e: + print("HyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV12 import ( + HyperOptimizedDynamicPrecisionOptimizerV12, + ) + + lama_register["HyperOptimizedDynamicPrecisionOptimizerV12"] = HyperOptimizedDynamicPrecisionOptimizerV12 + LLAMAHyperOptimizedDynamicPrecisionOptimizerV12 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV12", register=True) +except Exception as e: + print("HyperOptimizedDynamicPrecisionOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV42 import ( + HyperOptimizedDynamicPrecisionOptimizerV42, + ) + + lama_register["HyperOptimizedDynamicPrecisionOptimizerV42"] = HyperOptimizedDynamicPrecisionOptimizerV42 + LLAMAHyperOptimizedDynamicPrecisionOptimizerV42 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV42", register=True) +except Exception as e: + print("HyperOptimizedDynamicPrecisionOptimizerV42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV43 import ( + HyperOptimizedDynamicPrecisionOptimizerV43, + ) + + lama_register["HyperOptimizedDynamicPrecisionOptimizerV43"] = HyperOptimizedDynamicPrecisionOptimizerV43 + LLAMAHyperOptimizedDynamicPrecisionOptimizerV43 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV43", register=True) +except Exception as e: + print("HyperOptimizedDynamicPrecisionOptimizerV43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV57 import ( + HyperOptimizedDynamicPrecisionOptimizerV57, + ) + + lama_register["HyperOptimizedDynamicPrecisionOptimizerV57"] = HyperOptimizedDynamicPrecisionOptimizerV57 + LLAMAHyperOptimizedDynamicPrecisionOptimizerV57 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV57", register=True) +except Exception as e: + print("HyperOptimizedDynamicPrecisionOptimizerV57 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedEvolutionaryGradientOptimizerV61 import ( + HyperOptimizedEvolutionaryGradientOptimizerV61, + ) + + lama_register["HyperOptimizedEvolutionaryGradientOptimizerV61"] = ( + HyperOptimizedEvolutionaryGradientOptimizerV61 + ) + LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61 = NonObjectOptimizer( + method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61" + ).set_name("LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61", register=True) +except Exception as e: + print("HyperOptimizedEvolutionaryGradientOptimizerV61 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedGradientEnhancedRAMEDS import ( + HyperOptimizedGradientEnhancedRAMEDS, + ) + + lama_register["HyperOptimizedGradientEnhancedRAMEDS"] = HyperOptimizedGradientEnhancedRAMEDS + LLAMAHyperOptimizedGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMAHyperOptimizedGradientEnhancedRAMEDS" + ).set_name("LLAMAHyperOptimizedGradientEnhancedRAMEDS", register=True) +except Exception as e: + print("HyperOptimizedGradientEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 import ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV47, + ) + + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV47"] = ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 + ) + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47 = NonObjectOptimizer( + method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47" + ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47", register=True) +except Exception as e: + print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 import ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV48, + ) + + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV48"] = ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 + ) + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48 = NonObjectOptimizer( + method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48" + ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48", register=True) +except Exception as e: + print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedRAMEDS import HyperOptimizedRAMEDS + + lama_register["HyperOptimizedRAMEDS"] = HyperOptimizedRAMEDS + LLAMAHyperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS").set_name( + "LLAMAHyperOptimizedRAMEDS", register=True + ) +except Exception as e: + print("HyperOptimizedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedSpiralDifferentialOptimizerV8 import ( + HyperOptimizedSpiralDifferentialOptimizerV8, + ) + + lama_register["HyperOptimizedSpiralDifferentialOptimizerV8"] = HyperOptimizedSpiralDifferentialOptimizerV8 + LLAMAHyperOptimizedSpiralDifferentialOptimizerV8 = NonObjectOptimizer( + method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8" + ).set_name("LLAMAHyperOptimizedSpiralDifferentialOptimizerV8", register=True) +except Exception as e: + print("HyperOptimizedSpiralDifferentialOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedThermalEvolutionaryOptimizer import ( + HyperOptimizedThermalEvolutionaryOptimizer, + ) + + lama_register["HyperOptimizedThermalEvolutionaryOptimizer"] = HyperOptimizedThermalEvolutionaryOptimizer + LLAMAHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMAHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: + print("HyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperOptimizedUltraRefinedRAMEDS import HyperOptimizedUltraRefinedRAMEDS + + lama_register["HyperOptimizedUltraRefinedRAMEDS"] = HyperOptimizedUltraRefinedRAMEDS + LLAMAHyperOptimizedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMAHyperOptimizedUltraRefinedRAMEDS" + ).set_name("LLAMAHyperOptimizedUltraRefinedRAMEDS", register=True) +except Exception as e: + print("HyperOptimizedUltraRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperPreciseEvolutionaryOptimizer import ( + HyperPreciseEvolutionaryOptimizer, + ) + + lama_register["HyperPreciseEvolutionaryOptimizer"] = HyperPreciseEvolutionaryOptimizer + LLAMAHyperPreciseEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHyperPreciseEvolutionaryOptimizer" + ).set_name("LLAMAHyperPreciseEvolutionaryOptimizer", register=True) +except Exception as e: + print("HyperPreciseEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperPrecisionEvolutionaryOptimizerV23 import ( + HyperPrecisionEvolutionaryOptimizerV23, + ) + + lama_register["HyperPrecisionEvolutionaryOptimizerV23"] = HyperPrecisionEvolutionaryOptimizerV23 + LLAMAHyperPrecisionEvolutionaryOptimizerV23 = NonObjectOptimizer( + method="LLAMAHyperPrecisionEvolutionaryOptimizerV23" + ).set_name("LLAMAHyperPrecisionEvolutionaryOptimizerV23", register=True) +except Exception as e: + print("HyperPrecisionEvolutionaryOptimizerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperQuantumConvergenceOptimizer import HyperQuantumConvergenceOptimizer + + lama_register["HyperQuantumConvergenceOptimizer"] = HyperQuantumConvergenceOptimizer + LLAMAHyperQuantumConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAHyperQuantumConvergenceOptimizer" + ).set_name("LLAMAHyperQuantumConvergenceOptimizer", register=True) +except Exception as e: + print("HyperQuantumConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperQuantumStateCrossoverOptimization import ( + HyperQuantumStateCrossoverOptimization, + ) + + lama_register["HyperQuantumStateCrossoverOptimization"] = HyperQuantumStateCrossoverOptimization + LLAMAHyperQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAHyperQuantumStateCrossoverOptimization" + ).set_name("LLAMAHyperQuantumStateCrossoverOptimization", register=True) +except Exception as e: + print("HyperQuantumStateCrossoverOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRAMEDS import HyperRAMEDS + + lama_register["HyperRAMEDS"] = HyperRAMEDS + LLAMAHyperRAMEDS = NonObjectOptimizer(method="LLAMAHyperRAMEDS").set_name( + "LLAMAHyperRAMEDS", register=True + ) +except Exception as e: + print("HyperRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 import ( + HyperRefinedAdaptiveDynamicPrecisionOptimizerV52, + ) + + lama_register["HyperRefinedAdaptiveDynamicPrecisionOptimizerV52"] = ( + HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 + ) + LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52 = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52" + ).set_name("LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52", register=True) +except Exception as e: + print("HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedAdaptiveGuidedMutationOptimizer import ( + HyperRefinedAdaptiveGuidedMutationOptimizer, + ) + + lama_register["HyperRefinedAdaptiveGuidedMutationOptimizer"] = HyperRefinedAdaptiveGuidedMutationOptimizer + LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: + print("HyperRefinedAdaptiveGuidedMutationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionOptimizer import ( + HyperRefinedAdaptivePrecisionOptimizer, + ) + + lama_register["HyperRefinedAdaptivePrecisionOptimizer"] = HyperRefinedAdaptivePrecisionOptimizer + LLAMAHyperRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAHyperRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: + print("HyperRefinedAdaptivePrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionSearch import ( + HyperRefinedAdaptivePrecisionSearch, + ) + + lama_register["HyperRefinedAdaptivePrecisionSearch"] = HyperRefinedAdaptivePrecisionSearch + LLAMAHyperRefinedAdaptivePrecisionSearch = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptivePrecisionSearch" + ).set_name("LLAMAHyperRefinedAdaptivePrecisionSearch", register=True) +except Exception as e: + print("HyperRefinedAdaptivePrecisionSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV3 import ( + HyperRefinedDynamicPrecisionOptimizerV3, + ) + + lama_register["HyperRefinedDynamicPrecisionOptimizerV3"] = HyperRefinedDynamicPrecisionOptimizerV3 + LLAMAHyperRefinedDynamicPrecisionOptimizerV3 = NonObjectOptimizer( + method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3" + ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV3", register=True) +except Exception as e: + print("HyperRefinedDynamicPrecisionOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV49 import ( + HyperRefinedDynamicPrecisionOptimizerV49, + ) + + lama_register["HyperRefinedDynamicPrecisionOptimizerV49"] = HyperRefinedDynamicPrecisionOptimizerV49 + LLAMAHyperRefinedDynamicPrecisionOptimizerV49 = NonObjectOptimizer( + method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49" + ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV49", register=True) +except Exception as e: + print("HyperRefinedDynamicPrecisionOptimizerV49 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedEnhancedRAMEDS import HyperRefinedEnhancedRAMEDS + + lama_register["HyperRefinedEnhancedRAMEDS"] = HyperRefinedEnhancedRAMEDS + LLAMAHyperRefinedEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS").set_name( + "LLAMAHyperRefinedEnhancedRAMEDS", register=True + ) +except Exception as e: + print("HyperRefinedEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperRefinedQuantumVelocityOptimizer import ( + HyperRefinedQuantumVelocityOptimizer, + ) + + lama_register["HyperRefinedQuantumVelocityOptimizer"] = HyperRefinedQuantumVelocityOptimizer + LLAMAHyperRefinedQuantumVelocityOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedQuantumVelocityOptimizer" + ).set_name("LLAMAHyperRefinedQuantumVelocityOptimizer", register=True) +except Exception as e: + print("HyperRefinedQuantumVelocityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperSpiralDifferentialClimber import HyperSpiralDifferentialClimber + + lama_register["HyperSpiralDifferentialClimber"] = HyperSpiralDifferentialClimber + LLAMAHyperSpiralDifferentialClimber = NonObjectOptimizer( + method="LLAMAHyperSpiralDifferentialClimber" + ).set_name("LLAMAHyperSpiralDifferentialClimber", register=True) +except Exception as e: + print("HyperSpiralDifferentialClimber can not be imported: ", e) + +try: + from nevergrad.optimization.lama.HyperSpiralDifferentialClimberV2 import HyperSpiralDifferentialClimberV2 + + lama_register["HyperSpiralDifferentialClimberV2"] = HyperSpiralDifferentialClimberV2 + LLAMAHyperSpiralDifferentialClimberV2 = NonObjectOptimizer( + method="LLAMAHyperSpiralDifferentialClimberV2" + ).set_name("LLAMAHyperSpiralDifferentialClimberV2", register=True) +except Exception as e: + print("HyperSpiralDifferentialClimberV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IADEA import IADEA + + lama_register["IADEA"] = IADEA + LLAMAIADEA = NonObjectOptimizer(method="LLAMAIADEA").set_name("LLAMAIADEA", register=True) +except Exception as e: + print("IADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IAGEA import IAGEA + + lama_register["IAGEA"] = IAGEA + LLAMAIAGEA = NonObjectOptimizer(method="LLAMAIAGEA").set_name("LLAMAIAGEA", register=True) +except Exception as e: + print("IAGEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IALNF import IALNF + + lama_register["IALNF"] = IALNF + LLAMAIALNF = NonObjectOptimizer(method="LLAMAIALNF").set_name("LLAMAIALNF", register=True) +except Exception as e: + print("IALNF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IASDD import IASDD + + lama_register["IASDD"] = IASDD + LLAMAIASDD = NonObjectOptimizer(method="LLAMAIASDD").set_name("LLAMAIASDD", register=True) +except Exception as e: + print("IASDD can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveCovarianceGradientSearch import ( + ImprovedAdaptiveCovarianceGradientSearch, + ) + + lama_register["ImprovedAdaptiveCovarianceGradientSearch"] = ImprovedAdaptiveCovarianceGradientSearch + LLAMAImprovedAdaptiveCovarianceGradientSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveCovarianceGradientSearch" + ).set_name("LLAMAImprovedAdaptiveCovarianceGradientSearch", register=True) +except Exception as e: + print("ImprovedAdaptiveCovarianceGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveDifferentialEvolution import ( + ImprovedAdaptiveDifferentialEvolution, + ) + + lama_register["ImprovedAdaptiveDifferentialEvolution"] = ImprovedAdaptiveDifferentialEvolution + LLAMAImprovedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveEliteGuidedRestartDE import ( + ImprovedAdaptiveEliteGuidedRestartDE, + ) + + lama_register["ImprovedAdaptiveEliteGuidedRestartDE"] = ImprovedAdaptiveEliteGuidedRestartDE + LLAMAImprovedAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEliteGuidedRestartDE" + ).set_name("LLAMAImprovedAdaptiveEliteGuidedRestartDE", register=True) +except Exception as e: + print("ImprovedAdaptiveEliteGuidedRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveEnhancedQuantumHarmonySearch import ( + ImprovedAdaptiveEnhancedQuantumHarmonySearch, + ) + + lama_register["ImprovedAdaptiveEnhancedQuantumHarmonySearch"] = ( + ImprovedAdaptiveEnhancedQuantumHarmonySearch + ) + LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch" + ).set_name("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch", register=True) +except Exception as e: + print("ImprovedAdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveEvolutionaryHyperHeuristic import ( + ImprovedAdaptiveEvolutionaryHyperHeuristic, + ) + + lama_register["ImprovedAdaptiveEvolutionaryHyperHeuristic"] = ImprovedAdaptiveEvolutionaryHyperHeuristic + LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic" + ).set_name("LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic", register=True) +except Exception as e: + print("ImprovedAdaptiveEvolutionaryHyperHeuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveExplorationExploitationAlgorithm import ( + ImprovedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["ImprovedAdaptiveExplorationExploitationAlgorithm"] = ( + ImprovedAdaptiveExplorationExploitationAlgorithm + ) + LLAMAImprovedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAImprovedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: + print("ImprovedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonyMemeticAlgorithmV17 import ( + ImprovedAdaptiveHarmonyMemeticAlgorithmV17, + ) + + lama_register["ImprovedAdaptiveHarmonyMemeticAlgorithmV17"] = ImprovedAdaptiveHarmonyMemeticAlgorithmV17 + LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17 = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17" + ).set_name("LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17", register=True) +except Exception as e: + print("ImprovedAdaptiveHarmonyMemeticAlgorithmV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonySearchWithCuckooInspiration import ( + ImprovedAdaptiveHarmonySearchWithCuckooInspiration, + ) + + lama_register["ImprovedAdaptiveHarmonySearchWithCuckooInspiration"] = ( + ImprovedAdaptiveHarmonySearchWithCuckooInspiration + ) + LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration" + ).set_name("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration", register=True) +except Exception as e: + print("ImprovedAdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHybridMetaOptimizer import ( + ImprovedAdaptiveHybridMetaOptimizer, + ) + + lama_register["ImprovedAdaptiveHybridMetaOptimizer"] = ImprovedAdaptiveHybridMetaOptimizer + LLAMAImprovedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridMetaOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridMetaOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveHybridMetaOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimization import ( + ImprovedAdaptiveHybridOptimization, + ) + + lama_register["ImprovedAdaptiveHybridOptimization"] = ImprovedAdaptiveHybridOptimization + LLAMAImprovedAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridOptimization" + ).set_name("LLAMAImprovedAdaptiveHybridOptimization", register=True) +except Exception as e: + print("ImprovedAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimizer import ImprovedAdaptiveHybridOptimizer + + lama_register["ImprovedAdaptiveHybridOptimizer"] = ImprovedAdaptiveHybridOptimizer + LLAMAImprovedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveHybridSearchOptimizer import ( + ImprovedAdaptiveHybridSearchOptimizer, + ) + + lama_register["ImprovedAdaptiveHybridSearchOptimizer"] = ImprovedAdaptiveHybridSearchOptimizer + LLAMAImprovedAdaptiveHybridSearchOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridSearchOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridSearchOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveHybridSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveLevyHarmonySearch import ( + ImprovedAdaptiveLevyHarmonySearch, + ) + + lama_register["ImprovedAdaptiveLevyHarmonySearch"] = ImprovedAdaptiveLevyHarmonySearch + LLAMAImprovedAdaptiveLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveLevyHarmonySearch" + ).set_name("LLAMAImprovedAdaptiveLevyHarmonySearch", register=True) +except Exception as e: + print("ImprovedAdaptiveLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveMemeticHybridOptimizer import ( + ImprovedAdaptiveMemeticHybridOptimizer, + ) + + lama_register["ImprovedAdaptiveMemeticHybridOptimizer"] = ImprovedAdaptiveMemeticHybridOptimizer + LLAMAImprovedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAImprovedAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveMemeticHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveMultiOperatorSearch import ( + ImprovedAdaptiveMultiOperatorSearch, + ) + + lama_register["ImprovedAdaptiveMultiOperatorSearch"] = ImprovedAdaptiveMultiOperatorSearch + LLAMAImprovedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAImprovedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: + print("ImprovedAdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyDifferentialEvolution import ( + ImprovedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["ImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( + ImprovedAdaptiveMultiStrategyDifferentialEvolution + ) + LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyOptimizer import ( + ImprovedAdaptiveMultiStrategyOptimizer, + ) + + lama_register["ImprovedAdaptiveMultiStrategyOptimizer"] = ImprovedAdaptiveMultiStrategyOptimizer + LLAMAImprovedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAImprovedAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveMultiStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveParticleSwarmOptimization import ( + ImprovedAdaptiveParticleSwarmOptimization, + ) + + lama_register["ImprovedAdaptiveParticleSwarmOptimization"] = ImprovedAdaptiveParticleSwarmOptimization + LLAMAImprovedAdaptiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveParticleSwarmOptimization" + ).set_name("LLAMAImprovedAdaptiveParticleSwarmOptimization", register=True) +except Exception as e: + print("ImprovedAdaptiveParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptivePopulationMemeticOptimizer import ( + ImprovedAdaptivePopulationMemeticOptimizer, + ) + + lama_register["ImprovedAdaptivePopulationMemeticOptimizer"] = ImprovedAdaptivePopulationMemeticOptimizer + LLAMAImprovedAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptivePopulationMemeticOptimizer" + ).set_name("LLAMAImprovedAdaptivePopulationMemeticOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptivePopulationMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( + ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, + ) + + lama_register["ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( + ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + ) + LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" + ).set_name("LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) +except Exception as e: + print("ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumEntropyDE import ImprovedAdaptiveQuantumEntropyDE + + lama_register["ImprovedAdaptiveQuantumEntropyDE"] = ImprovedAdaptiveQuantumEntropyDE + LLAMAImprovedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumEntropyDE" + ).set_name("LLAMAImprovedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: + print("ImprovedAdaptiveQuantumEntropyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumLevyOptimizer import ( + ImprovedAdaptiveQuantumLevyOptimizer, + ) + + lama_register["ImprovedAdaptiveQuantumLevyOptimizer"] = ImprovedAdaptiveQuantumLevyOptimizer + LLAMAImprovedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAImprovedAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: + print("ImprovedAdaptiveQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumPSO import ImprovedAdaptiveQuantumPSO + + lama_register["ImprovedAdaptiveQuantumPSO"] = ImprovedAdaptiveQuantumPSO + LLAMAImprovedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO").set_name( + "LLAMAImprovedAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("ImprovedAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumSwarmOptimization import ( + ImprovedAdaptiveQuantumSwarmOptimization, + ) + + lama_register["ImprovedAdaptiveQuantumSwarmOptimization"] = ImprovedAdaptiveQuantumSwarmOptimization + LLAMAImprovedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumSwarmOptimization" + ).set_name("LLAMAImprovedAdaptiveQuantumSwarmOptimization", register=True) +except Exception as e: + print("ImprovedAdaptiveQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedAdvancedHybridAdaptiveOptimization import ( + ImprovedAdvancedHybridAdaptiveOptimization, + ) + + lama_register["ImprovedAdvancedHybridAdaptiveOptimization"] = ImprovedAdvancedHybridAdaptiveOptimization + LLAMAImprovedAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdvancedHybridAdaptiveOptimization" + ).set_name("LLAMAImprovedAdvancedHybridAdaptiveOptimization", register=True) +except Exception as e: + print("ImprovedAdvancedHybridAdaptiveOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedBalancedQuantumLevyDifferentialSearch import ( + ImprovedBalancedQuantumLevyDifferentialSearch, + ) + + lama_register["ImprovedBalancedQuantumLevyDifferentialSearch"] = ( + ImprovedBalancedQuantumLevyDifferentialSearch + ) + LLAMAImprovedBalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch" + ).set_name("LLAMAImprovedBalancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: + print("ImprovedBalancedQuantumLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedCooperativeAdaptiveEvolutionaryOptimizer import ( + ImprovedCooperativeAdaptiveEvolutionaryOptimizer, + ) + + lama_register["ImprovedCooperativeAdaptiveEvolutionaryOptimizer"] = ( + ImprovedCooperativeAdaptiveEvolutionaryOptimizer + ) + LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: + print("ImprovedCooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedCulturalDifferentialMemeticEvolution import ( + ImprovedCulturalDifferentialMemeticEvolution, + ) + + lama_register["ImprovedCulturalDifferentialMemeticEvolution"] = ( + ImprovedCulturalDifferentialMemeticEvolution + ) + LLAMAImprovedCulturalDifferentialMemeticEvolution = NonObjectOptimizer( + method="LLAMAImprovedCulturalDifferentialMemeticEvolution" + ).set_name("LLAMAImprovedCulturalDifferentialMemeticEvolution", register=True) +except Exception as e: + print("ImprovedCulturalDifferentialMemeticEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedCulturalEvolutionaryOptimizer import ( + ImprovedCulturalEvolutionaryOptimizer, + ) + + lama_register["ImprovedCulturalEvolutionaryOptimizer"] = ImprovedCulturalEvolutionaryOptimizer + LLAMAImprovedCulturalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAImprovedCulturalEvolutionaryOptimizer" + ).set_name("LLAMAImprovedCulturalEvolutionaryOptimizer", register=True) +except Exception as e: + print("ImprovedCulturalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDiversifiedHarmonySearchOptimizer import ( + ImprovedDiversifiedHarmonySearchOptimizer, + ) + + lama_register["ImprovedDiversifiedHarmonySearchOptimizer"] = ImprovedDiversifiedHarmonySearchOptimizer + LLAMAImprovedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAImprovedDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAImprovedDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: + print("ImprovedDiversifiedHarmonySearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveMemoryStrategyV58 import ( + ImprovedDualPhaseAdaptiveMemoryStrategyV58, + ) + + lama_register["ImprovedDualPhaseAdaptiveMemoryStrategyV58"] = ImprovedDualPhaseAdaptiveMemoryStrategyV58 + LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58 = NonObjectOptimizer( + method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58" + ).set_name("LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58", register=True) +except Exception as e: + print("ImprovedDualPhaseAdaptiveMemoryStrategyV58 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 import ( + ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1, + ) + + lama_register["ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1"] = ( + ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 + ) + LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 = NonObjectOptimizer( + method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1" + ).set_name("LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1", register=True) +except Exception as e: + print("ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 import ( + ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2, + ) + + lama_register["ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2"] = ( + ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 + ) + LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2" + ).set_name("LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2", register=True) +except Exception as e: + print("ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveExplorationOptimization import ( + ImprovedDynamicAdaptiveExplorationOptimization, + ) + + lama_register["ImprovedDynamicAdaptiveExplorationOptimization"] = ( + ImprovedDynamicAdaptiveExplorationOptimization + ) + LLAMAImprovedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAImprovedDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("ImprovedDynamicAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSO import ( + ImprovedDynamicAdaptiveHybridDEPSO, + ) + + lama_register["ImprovedDynamicAdaptiveHybridDEPSO"] = ImprovedDynamicAdaptiveHybridDEPSO + LLAMAImprovedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("ImprovedDynamicAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicHarmonyFireworksSearch import ( + ImprovedDynamicHarmonyFireworksSearch, + ) + + lama_register["ImprovedDynamicHarmonyFireworksSearch"] = ImprovedDynamicHarmonyFireworksSearch + LLAMAImprovedDynamicHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAImprovedDynamicHarmonyFireworksSearch" + ).set_name("LLAMAImprovedDynamicHarmonyFireworksSearch", register=True) +except Exception as e: + print("ImprovedDynamicHarmonyFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicHybridDEPSOWithEliteMemoryV3 import ( + ImprovedDynamicHybridDEPSOWithEliteMemoryV3, + ) + + lama_register["ImprovedDynamicHybridDEPSOWithEliteMemoryV3"] = ImprovedDynamicHybridDEPSOWithEliteMemoryV3 + LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3 = NonObjectOptimizer( + method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3" + ).set_name("LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3", register=True) +except Exception as e: + print("ImprovedDynamicHybridDEPSOWithEliteMemoryV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedDynamicQuantumSwarmOptimization import ( + ImprovedDynamicQuantumSwarmOptimization, + ) + + lama_register["ImprovedDynamicQuantumSwarmOptimization"] = ImprovedDynamicQuantumSwarmOptimization + LLAMAImprovedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("ImprovedDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 import ( + ImprovedEliteAdaptiveCrowdingHybridOptimizerV2, + ) + + lama_register["ImprovedEliteAdaptiveCrowdingHybridOptimizerV2"] = ( + ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 + ) + LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2" + ).set_name("LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2", register=True) +except Exception as e: + print("ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemeticDifferentialEvolution import ( + ImprovedEliteAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["ImprovedEliteAdaptiveMemeticDifferentialEvolution"] = ( + ImprovedEliteAdaptiveMemeticDifferentialEvolution + ) + LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedEliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemoryHybridOptimizer import ( + ImprovedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["ImprovedEliteAdaptiveMemoryHybridOptimizer"] = ImprovedEliteAdaptiveMemoryHybridOptimizer + LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("ImprovedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteGuidedHybridAdaptiveDE import ( + ImprovedEliteGuidedHybridAdaptiveDE, + ) + + lama_register["ImprovedEliteGuidedHybridAdaptiveDE"] = ImprovedEliteGuidedHybridAdaptiveDE + LLAMAImprovedEliteGuidedHybridAdaptiveDE = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedHybridAdaptiveDE" + ).set_name("LLAMAImprovedEliteGuidedHybridAdaptiveDE", register=True) +except Exception as e: + print("ImprovedEliteGuidedHybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE import ImprovedEliteGuidedMutationDE + + lama_register["ImprovedEliteGuidedMutationDE"] = ImprovedEliteGuidedMutationDE + LLAMAImprovedEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedMutationDE" + ).set_name("LLAMAImprovedEliteGuidedMutationDE", register=True) +except Exception as e: + print("ImprovedEliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE_v2 import ImprovedEliteGuidedMutationDE_v2 + + lama_register["ImprovedEliteGuidedMutationDE_v2"] = ImprovedEliteGuidedMutationDE_v2 + LLAMAImprovedEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedMutationDE_v2" + ).set_name("LLAMAImprovedEliteGuidedMutationDE_v2", register=True) +except Exception as e: + print("ImprovedEliteGuidedMutationDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEliteQuantumDifferentialMemeticOptimizer import ( + ImprovedEliteQuantumDifferentialMemeticOptimizer, + ) + + lama_register["ImprovedEliteQuantumDifferentialMemeticOptimizer"] = ( + ImprovedEliteQuantumDifferentialMemeticOptimizer + ) + LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: + print("ImprovedEliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 import ( + ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6, + ) + + lama_register["ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6"] = ( + ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 + ) + LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6" + ).set_name("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6", register=True) +except Exception as e: + print("ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 import ( + ImprovedEnhancedAdaptiveDynamicHarmonySearchV4, + ) + + lama_register["ImprovedEnhancedAdaptiveDynamicHarmonySearchV4"] = ( + ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 + ) + LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4" + ).set_name("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4", register=True) +except Exception as e: + print("ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 import ( + ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19, + ) + + lama_register["ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19"] = ( + ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 + ) + LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19" + ).set_name("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19", register=True) +except Exception as e: + print("ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveLevyHarmonySearchV4 import ( + ImprovedEnhancedAdaptiveLevyHarmonySearchV4, + ) + + lama_register["ImprovedEnhancedAdaptiveLevyHarmonySearchV4"] = ImprovedEnhancedAdaptiveLevyHarmonySearchV4 + LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4" + ).set_name("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4", register=True) +except Exception as e: + print("ImprovedEnhancedAdaptiveLevyHarmonySearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 import ( + ImprovedEnhancedAdaptiveMetaNetAQAPSOv4, + ) + + lama_register["ImprovedEnhancedAdaptiveMetaNetAQAPSOv4"] = ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 + LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4" + ).set_name("LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4", register=True) +except Exception as e: + print("ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 import ( + ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15, + ) + + lama_register["ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15"] = ( + ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 + ) + LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15" + ).set_name("LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15", register=True) +except Exception as e: + print("ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v54, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v54"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 + ) + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54", register=True) +except Exception as e: + print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v61, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v61"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 + ) + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61", register=True) +except Exception as e: + print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v65, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v65"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 + ) + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65", register=True) +except Exception as e: + print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDiversifiedGravitationalSwarmOptimization import ( + ImprovedEnhancedDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["ImprovedEnhancedDiversifiedGravitationalSwarmOptimization"] = ( + ImprovedEnhancedDiversifiedGravitationalSwarmOptimization + ) + LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: + print("ImprovedEnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDynamicDifferentialEvolution import ( + ImprovedEnhancedDynamicDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedDynamicDifferentialEvolution"] = ( + ImprovedEnhancedDynamicDifferentialEvolution + ) + LLAMAImprovedEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedEnhancedDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDynamicHarmonyAlgorithm import ( + ImprovedEnhancedDynamicHarmonyAlgorithm, + ) + + lama_register["ImprovedEnhancedDynamicHarmonyAlgorithm"] = ImprovedEnhancedDynamicHarmonyAlgorithm + LLAMAImprovedEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm" + ).set_name("LLAMAImprovedEnhancedDynamicHarmonyAlgorithm", register=True) +except Exception as e: + print("ImprovedEnhancedDynamicHarmonyAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLevyHarmonySearch import ( + ImprovedEnhancedDynamicLevyHarmonySearch, + ) + + lama_register["ImprovedEnhancedDynamicLevyHarmonySearch"] = ImprovedEnhancedDynamicLevyHarmonySearch + LLAMAImprovedEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch" + ).set_name("LLAMAImprovedEnhancedDynamicLevyHarmonySearch", register=True) +except Exception as e: + print("ImprovedEnhancedDynamicLevyHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm import ( + ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( + ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm + ) + LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: + print("ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedDynamicQuantumSwarmOptimization import ( + ImprovedEnhancedDynamicQuantumSwarmOptimization, + ) + + lama_register["ImprovedEnhancedDynamicQuantumSwarmOptimization"] = ( + ImprovedEnhancedDynamicQuantumSwarmOptimization + ) + LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("ImprovedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedEliteGuidedMassQGSA_v84 import ( + ImprovedEnhancedEliteGuidedMassQGSA_v84, + ) + + lama_register["ImprovedEnhancedEliteGuidedMassQGSA_v84"] = ImprovedEnhancedEliteGuidedMassQGSA_v84 + LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84" + ).set_name("LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84", register=True) +except Exception as e: + print("ImprovedEnhancedEliteGuidedMassQGSA_v84 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 import ( + ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11, + ) + + lama_register["ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11"] = ( + ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 + ) + LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11" + ).set_name("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11", register=True) +except Exception as e: + print("ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedEvolutionaryFireworksSearch import ( + ImprovedEnhancedEvolutionaryFireworksSearch, + ) + + lama_register["ImprovedEnhancedEvolutionaryFireworksSearch"] = ImprovedEnhancedEvolutionaryFireworksSearch + LLAMAImprovedEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAImprovedEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: + print("ImprovedEnhancedEvolutionaryFireworksSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmOptimization import ( + ImprovedEnhancedFireworkAlgorithmOptimization, + ) + + lama_register["ImprovedEnhancedFireworkAlgorithmOptimization"] = ( + ImprovedEnhancedFireworkAlgorithmOptimization + ) + LLAMAImprovedEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization" + ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmOptimization", register=True) +except Exception as e: + print("ImprovedEnhancedFireworkAlgorithmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: + print("ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedGradientDifferentialEvolution import ( + ImprovedEnhancedGradientDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedGradientDifferentialEvolution"] = ( + ImprovedEnhancedGradientDifferentialEvolution + ) + LLAMAImprovedEnhancedGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedGradientDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedGradientDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedEnhancedGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchOB import ImprovedEnhancedHarmonySearchOB + + lama_register["ImprovedEnhancedHarmonySearchOB"] = ImprovedEnhancedHarmonySearchOB + LLAMAImprovedEnhancedHarmonySearchOB = NonObjectOptimizer( + method="LLAMAImprovedEnhancedHarmonySearchOB" + ).set_name("LLAMAImprovedEnhancedHarmonySearchOB", register=True) +except Exception as e: + print("ImprovedEnhancedHarmonySearchOB can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( + ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + ) + LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: + print("ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedMemeticHarmonyOptimization import ( + ImprovedEnhancedMemeticHarmonyOptimization, + ) + + lama_register["ImprovedEnhancedMemeticHarmonyOptimization"] = ImprovedEnhancedMemeticHarmonyOptimization + LLAMAImprovedEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedMemeticHarmonyOptimization" + ).set_name("LLAMAImprovedEnhancedMemeticHarmonyOptimization", register=True) +except Exception as e: + print("ImprovedEnhancedMemeticHarmonyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution import ( + ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( + ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution + ) + LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedQuantumHarmonySearch import ( + ImprovedEnhancedQuantumHarmonySearch, + ) + + lama_register["ImprovedEnhancedQuantumHarmonySearch"] = ImprovedEnhancedQuantumHarmonySearch + LLAMAImprovedEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedQuantumHarmonySearch" + ).set_name("LLAMAImprovedEnhancedQuantumHarmonySearch", register=True) +except Exception as e: + print("ImprovedEnhancedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedRefinedAdaptiveQGSA_v61 import ( + ImprovedEnhancedRefinedAdaptiveQGSA_v61, + ) + + lama_register["ImprovedEnhancedRefinedAdaptiveQGSA_v61"] = ImprovedEnhancedRefinedAdaptiveQGSA_v61 + LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61" + ).set_name("LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61", register=True) +except Exception as e: + print("ImprovedEnhancedRefinedAdaptiveQGSA_v61 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedSADE import ImprovedEnhancedSADE + + lama_register["ImprovedEnhancedSADE"] = ImprovedEnhancedSADE + LLAMAImprovedEnhancedSADE = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE").set_name( + "LLAMAImprovedEnhancedSADE", register=True + ) +except Exception as e: + print("ImprovedEnhancedSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnhancedStochasticMetaHeuristicOptimizer import ( + ImprovedEnhancedStochasticMetaHeuristicOptimizer, + ) + + lama_register["ImprovedEnhancedStochasticMetaHeuristicOptimizer"] = ( + ImprovedEnhancedStochasticMetaHeuristicOptimizer + ) + LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer" + ).set_name("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer", register=True) +except Exception as e: + print("ImprovedEnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedEnsembleMemeticOptimizer import ImprovedEnsembleMemeticOptimizer + + lama_register["ImprovedEnsembleMemeticOptimizer"] = ImprovedEnsembleMemeticOptimizer + LLAMAImprovedEnsembleMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEnsembleMemeticOptimizer" + ).set_name("LLAMAImprovedEnsembleMemeticOptimizer", register=True) +except Exception as e: + print("ImprovedEnsembleMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedFireworkAlgorithm import ImprovedFireworkAlgorithm + + lama_register["ImprovedFireworkAlgorithm"] = ImprovedFireworkAlgorithm + LLAMAImprovedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm").set_name( + "LLAMAImprovedFireworkAlgorithm", register=True + ) +except Exception as e: + print("ImprovedFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridAdaptiveDifferentialEvolution import ( + ImprovedHybridAdaptiveDifferentialEvolution, + ) + + lama_register["ImprovedHybridAdaptiveDifferentialEvolution"] = ImprovedHybridAdaptiveDifferentialEvolution + LLAMAImprovedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedHybridAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridAdaptiveGeneticSwarmOptimizer import ( + ImprovedHybridAdaptiveGeneticSwarmOptimizer, + ) + + lama_register["ImprovedHybridAdaptiveGeneticSwarmOptimizer"] = ImprovedHybridAdaptiveGeneticSwarmOptimizer + LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: + print("ImprovedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridAdaptiveHarmonicFireworksTabuSearch import ( + ImprovedHybridAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["ImprovedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( + ImprovedHybridAdaptiveHarmonicFireworksTabuSearch + ) + LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: + print("ImprovedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridCMAESDE import ImprovedHybridCMAESDE + + lama_register["ImprovedHybridCMAESDE"] = ImprovedHybridCMAESDE + LLAMAImprovedHybridCMAESDE = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE").set_name( + "LLAMAImprovedHybridCMAESDE", register=True + ) +except Exception as e: + print("ImprovedHybridCMAESDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridGeneticPSO import ImprovedHybridGeneticPSO + + lama_register["ImprovedHybridGeneticPSO"] = ImprovedHybridGeneticPSO + LLAMAImprovedHybridGeneticPSO = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO").set_name( + "LLAMAImprovedHybridGeneticPSO", register=True + ) +except Exception as e: + print("ImprovedHybridGeneticPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedHybridPSODEOptimizer import ImprovedHybridPSODEOptimizer + + lama_register["ImprovedHybridPSODEOptimizer"] = ImprovedHybridPSODEOptimizer + LLAMAImprovedHybridPSODEOptimizer = NonObjectOptimizer( + method="LLAMAImprovedHybridPSODEOptimizer" + ).set_name("LLAMAImprovedHybridPSODEOptimizer", register=True) +except Exception as e: + print("ImprovedHybridPSODEOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedIterativeAdaptiveGradientEvolver import ( + ImprovedIterativeAdaptiveGradientEvolver, + ) + + lama_register["ImprovedIterativeAdaptiveGradientEvolver"] = ImprovedIterativeAdaptiveGradientEvolver + LLAMAImprovedIterativeAdaptiveGradientEvolver = NonObjectOptimizer( + method="LLAMAImprovedIterativeAdaptiveGradientEvolver" + ).set_name("LLAMAImprovedIterativeAdaptiveGradientEvolver", register=True) +except Exception as e: + print("ImprovedIterativeAdaptiveGradientEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedMetaDynamicQuantumSwarmOptimization import ( + ImprovedMetaDynamicQuantumSwarmOptimization, + ) + + lama_register["ImprovedMetaDynamicQuantumSwarmOptimization"] = ImprovedMetaDynamicQuantumSwarmOptimization + LLAMAImprovedMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedMetaDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("ImprovedMetaDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedMultiOperatorSearch import ImprovedMultiOperatorSearch + + lama_register["ImprovedMultiOperatorSearch"] = ImprovedMultiOperatorSearch + LLAMAImprovedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch").set_name( + "LLAMAImprovedMultiOperatorSearch", register=True + ) +except Exception as e: + print("ImprovedMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedMultiStrategySelfAdaptiveDE import ( + ImprovedMultiStrategySelfAdaptiveDE, + ) + + lama_register["ImprovedMultiStrategySelfAdaptiveDE"] = ImprovedMultiStrategySelfAdaptiveDE + LLAMAImprovedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAImprovedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAImprovedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: + print("ImprovedMultiStrategySelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedOppositionBasedDifferentialEvolution import ( + ImprovedOppositionBasedDifferentialEvolution, + ) + + lama_register["ImprovedOppositionBasedDifferentialEvolution"] = ( + ImprovedOppositionBasedDifferentialEvolution + ) + LLAMAImprovedOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedOppositionBasedDifferentialEvolution" + ).set_name("LLAMAImprovedOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedPrecisionAdaptiveEvolutiveStrategy import ( + ImprovedPrecisionAdaptiveEvolutiveStrategy, + ) + + lama_register["ImprovedPrecisionAdaptiveEvolutiveStrategy"] = ImprovedPrecisionAdaptiveEvolutiveStrategy + LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy" + ).set_name("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy", register=True) +except Exception as e: + print("ImprovedPrecisionAdaptiveEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning import ( + ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning, + ) + + lama_register["ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( + ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning + ) + LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( + method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning" + ).set_name("LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) +except Exception as e: + print("ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedQuantumEnhancedDynamicDifferentialEvolution import ( + ImprovedQuantumEnhancedDynamicDifferentialEvolution, + ) + + lama_register["ImprovedQuantumEnhancedDynamicDifferentialEvolution"] = ( + ImprovedQuantumEnhancedDynamicDifferentialEvolution + ) + LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedQuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedQuantumHarmonySearch import ImprovedQuantumHarmonySearch + + lama_register["ImprovedQuantumHarmonySearch"] = ImprovedQuantumHarmonySearch + LLAMAImprovedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedQuantumHarmonySearch" + ).set_name("LLAMAImprovedQuantumHarmonySearch", register=True) +except Exception as e: + print("ImprovedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedQuantumLevyAdaptiveHybridSearch import ( + ImprovedQuantumLevyAdaptiveHybridSearch, + ) + + lama_register["ImprovedQuantumLevyAdaptiveHybridSearch"] = ImprovedQuantumLevyAdaptiveHybridSearch + LLAMAImprovedQuantumLevyAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch" + ).set_name("LLAMAImprovedQuantumLevyAdaptiveHybridSearch", register=True) +except Exception as e: + print("ImprovedQuantumLevyAdaptiveHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedQuantumSimulatedAnnealing import ( + ImprovedQuantumSimulatedAnnealing, + ) + + lama_register["ImprovedQuantumSimulatedAnnealing"] = ImprovedQuantumSimulatedAnnealing + LLAMAImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAImprovedQuantumSimulatedAnnealing" + ).set_name("LLAMAImprovedQuantumSimulatedAnnealing", register=True) +except Exception as e: + print("ImprovedQuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveDynamicExplorationOptimization import ( + ImprovedRefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["ImprovedRefinedAdaptiveDynamicExplorationOptimization"] = ( + ImprovedRefinedAdaptiveDynamicExplorationOptimization + ) + LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: + print("ImprovedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveMultiOperatorSearch import ( + ImprovedRefinedAdaptiveMultiOperatorSearch, + ) + + lama_register["ImprovedRefinedAdaptiveMultiOperatorSearch"] = ImprovedRefinedAdaptiveMultiOperatorSearch + LLAMAImprovedRefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAImprovedRefinedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: + print("ImprovedRefinedAdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( + ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( + ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution + ) + LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization import ( + ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization, + ) + + lama_register["ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( + ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization + ) + LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: + print("ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 import ( + ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4"] = ( + ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 + ) + LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: + print("ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO import ( + ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO, + ) + + lama_register["ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO"] = ( + ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO + ) + LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedSelfAdaptiveDifferentialEvolution import ( + ImprovedSelfAdaptiveDifferentialEvolution, + ) + + lama_register["ImprovedSelfAdaptiveDifferentialEvolution"] = ImprovedSelfAdaptiveDifferentialEvolution + LLAMAImprovedSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedSelfAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedSelfAdaptiveHybridOptimizer import ( + ImprovedSelfAdaptiveHybridOptimizer, + ) + + lama_register["ImprovedSelfAdaptiveHybridOptimizer"] = ImprovedSelfAdaptiveHybridOptimizer + LLAMAImprovedSelfAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveHybridOptimizer" + ).set_name("LLAMAImprovedSelfAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("ImprovedSelfAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution import ( + ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution"] = ( + ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution + ) + LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: + print("ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ImprovedUnifiedAdaptiveMemeticOptimizer import ( + ImprovedUnifiedAdaptiveMemeticOptimizer, + ) + + lama_register["ImprovedUnifiedAdaptiveMemeticOptimizer"] = ImprovedUnifiedAdaptiveMemeticOptimizer + LLAMAImprovedUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer" + ).set_name("LLAMAImprovedUnifiedAdaptiveMemeticOptimizer", register=True) +except Exception as e: + print("ImprovedUnifiedAdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IncrementalCrossoverOptimization import IncrementalCrossoverOptimization + + lama_register["IncrementalCrossoverOptimization"] = IncrementalCrossoverOptimization + LLAMAIncrementalCrossoverOptimization = NonObjectOptimizer( + method="LLAMAIncrementalCrossoverOptimization" + ).set_name("LLAMAIncrementalCrossoverOptimization", register=True) +except Exception as e: + print("IncrementalCrossoverOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IntelligentDynamicDualPhaseStrategyV39 import ( + IntelligentDynamicDualPhaseStrategyV39, + ) + + lama_register["IntelligentDynamicDualPhaseStrategyV39"] = IntelligentDynamicDualPhaseStrategyV39 + LLAMAIntelligentDynamicDualPhaseStrategyV39 = NonObjectOptimizer( + method="LLAMAIntelligentDynamicDualPhaseStrategyV39" + ).set_name("LLAMAIntelligentDynamicDualPhaseStrategyV39", register=True) +except Exception as e: + print("IntelligentDynamicDualPhaseStrategyV39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IntelligentEvolvingAdaptiveStrategyV34 import ( + IntelligentEvolvingAdaptiveStrategyV34, + ) + + lama_register["IntelligentEvolvingAdaptiveStrategyV34"] = IntelligentEvolvingAdaptiveStrategyV34 + LLAMAIntelligentEvolvingAdaptiveStrategyV34 = NonObjectOptimizer( + method="LLAMAIntelligentEvolvingAdaptiveStrategyV34" + ).set_name("LLAMAIntelligentEvolvingAdaptiveStrategyV34", register=True) +except Exception as e: + print("IntelligentEvolvingAdaptiveStrategyV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IntelligentPerturbationSearch import IntelligentPerturbationSearch + + lama_register["IntelligentPerturbationSearch"] = IntelligentPerturbationSearch + LLAMAIntelligentPerturbationSearch = NonObjectOptimizer( + method="LLAMAIntelligentPerturbationSearch" + ).set_name("LLAMAIntelligentPerturbationSearch", register=True) +except Exception as e: + print("IntelligentPerturbationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IterativeAdaptiveDifferentialEvolution import ( + IterativeAdaptiveDifferentialEvolution, + ) + + lama_register["IterativeAdaptiveDifferentialEvolution"] = IterativeAdaptiveDifferentialEvolution + LLAMAIterativeAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAIterativeAdaptiveDifferentialEvolution" + ).set_name("LLAMAIterativeAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("IterativeAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.IterativeProgressiveDifferentialEvolution import ( + IterativeProgressiveDifferentialEvolution, + ) + + lama_register["IterativeProgressiveDifferentialEvolution"] = IterativeProgressiveDifferentialEvolution + LLAMAIterativeProgressiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAIterativeProgressiveDifferentialEvolution" + ).set_name("LLAMAIterativeProgressiveDifferentialEvolution", register=True) +except Exception as e: + print("IterativeProgressiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.LADESA import LADESA + + lama_register["LADESA"] = LADESA + LLAMALADESA = NonObjectOptimizer(method="LLAMALADESA").set_name("LLAMALADESA", register=True) +except Exception as e: + print("LADESA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.LAOS import LAOS + + lama_register["LAOS"] = LAOS + LLAMALAOS = NonObjectOptimizer(method="LLAMALAOS").set_name("LLAMALAOS", register=True) +except Exception as e: + print("LAOS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.LearningAdaptiveMemoryEnhancedStrategyV42 import ( + LearningAdaptiveMemoryEnhancedStrategyV42, + ) + + lama_register["LearningAdaptiveMemoryEnhancedStrategyV42"] = LearningAdaptiveMemoryEnhancedStrategyV42 + LLAMALearningAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( + method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42" + ).set_name("LLAMALearningAdaptiveMemoryEnhancedStrategyV42", register=True) +except Exception as e: + print("LearningAdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.LearningAdaptiveStrategyV24 import LearningAdaptiveStrategyV24 + + lama_register["LearningAdaptiveStrategyV24"] = LearningAdaptiveStrategyV24 + LLAMALearningAdaptiveStrategyV24 = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24").set_name( + "LLAMALearningAdaptiveStrategyV24", register=True + ) +except Exception as e: + print("LearningAdaptiveStrategyV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.LevyEnhancedAdaptiveSimulatedAnnealingDE import ( + LevyEnhancedAdaptiveSimulatedAnnealingDE, + ) + + lama_register["LevyEnhancedAdaptiveSimulatedAnnealingDE"] = LevyEnhancedAdaptiveSimulatedAnnealingDE + LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( + method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE" + ).set_name("LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE", register=True) +except Exception as e: + print("LevyEnhancedAdaptiveSimulatedAnnealingDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MADE import MADE + + lama_register["MADE"] = MADE + LLAMAMADE = NonObjectOptimizer(method="LLAMAMADE").set_name("LLAMAMADE", register=True) +except Exception as e: + print("MADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MIDEAT import MIDEAT + + lama_register["MIDEAT"] = MIDEAT + LLAMAMIDEAT = NonObjectOptimizer(method="LLAMAMIDEAT").set_name("LLAMAMIDEAT", register=True) +except Exception as e: + print("MIDEAT can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MSADE import MSADE + + lama_register["MSADE"] = MSADE + LLAMAMSADE = NonObjectOptimizer(method="LLAMAMSADE").set_name("LLAMAMSADE", register=True) +except Exception as e: + print("MSADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MSEAS import MSEAS + + lama_register["MSEAS"] = MSEAS + LLAMAMSEAS = NonObjectOptimizer(method="LLAMAMSEAS").set_name("LLAMAMSEAS", register=True) +except Exception as e: + print("MSEAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemeticAdaptiveDifferentialEvolution import ( + MemeticAdaptiveDifferentialEvolution, + ) + + lama_register["MemeticAdaptiveDifferentialEvolution"] = MemeticAdaptiveDifferentialEvolution + LLAMAMemeticAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMemeticAdaptiveDifferentialEvolution" + ).set_name("LLAMAMemeticAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("MemeticAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemeticDifferentialEvolutionOptimizer import ( + MemeticDifferentialEvolutionOptimizer, + ) + + lama_register["MemeticDifferentialEvolutionOptimizer"] = MemeticDifferentialEvolutionOptimizer + LLAMAMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAMemeticDifferentialEvolutionOptimizer" + ).set_name("LLAMAMemeticDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("MemeticDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemeticElitistDifferentialEvolutionWithDynamicFandCR import ( + MemeticElitistDifferentialEvolutionWithDynamicFandCR, + ) + + lama_register["MemeticElitistDifferentialEvolutionWithDynamicFandCR"] = ( + MemeticElitistDifferentialEvolutionWithDynamicFandCR + ) + LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR = NonObjectOptimizer( + method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR" + ).set_name("LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR", register=True) +except Exception as e: + print("MemeticElitistDifferentialEvolutionWithDynamicFandCR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemeticEnhancedParticleSwarmOptimization import ( + MemeticEnhancedParticleSwarmOptimization, + ) + + lama_register["MemeticEnhancedParticleSwarmOptimization"] = MemeticEnhancedParticleSwarmOptimization + LLAMAMemeticEnhancedParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAMemeticEnhancedParticleSwarmOptimization" + ).set_name("LLAMAMemeticEnhancedParticleSwarmOptimization", register=True) +except Exception as e: + print("MemeticEnhancedParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemeticSpatialDifferentialEvolution import ( + MemeticSpatialDifferentialEvolution, + ) + + lama_register["MemeticSpatialDifferentialEvolution"] = MemeticSpatialDifferentialEvolution + LLAMAMemeticSpatialDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMemeticSpatialDifferentialEvolution" + ).set_name("LLAMAMemeticSpatialDifferentialEvolution", register=True) +except Exception as e: + print("MemeticSpatialDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryBasedSimulatedAnnealing import MemoryBasedSimulatedAnnealing + + lama_register["MemoryBasedSimulatedAnnealing"] = MemoryBasedSimulatedAnnealing + LLAMAMemoryBasedSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAMemoryBasedSimulatedAnnealing" + ).set_name("LLAMAMemoryBasedSimulatedAnnealing", register=True) +except Exception as e: + print("MemoryBasedSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveAnnealing import MemoryEnhancedAdaptiveAnnealing + + lama_register["MemoryEnhancedAdaptiveAnnealing"] = MemoryEnhancedAdaptiveAnnealing + LLAMAMemoryEnhancedAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveAnnealing" + ).set_name("LLAMAMemoryEnhancedAdaptiveAnnealing", register=True) +except Exception as e: + print("MemoryEnhancedAdaptiveAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealing import ( + MemoryEnhancedAdaptiveMultiPhaseAnnealing, + ) + + lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealing"] = MemoryEnhancedAdaptiveMultiPhaseAnnealing + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing" + ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing", register=True) +except Exception as e: + print("MemoryEnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( + MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient, + ) + + lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( + MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient + ) + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient" + ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) +except Exception as e: + print("MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryEnhancedDynamicHybridOptimizer import ( + MemoryEnhancedDynamicHybridOptimizer, + ) + + lama_register["MemoryEnhancedDynamicHybridOptimizer"] = MemoryEnhancedDynamicHybridOptimizer + LLAMAMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: + print("MemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryGuidedAdaptiveDualPhaseStrategyV40 import ( + MemoryGuidedAdaptiveDualPhaseStrategyV40, + ) + + lama_register["MemoryGuidedAdaptiveDualPhaseStrategyV40"] = MemoryGuidedAdaptiveDualPhaseStrategyV40 + LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40 = NonObjectOptimizer( + method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40" + ).set_name("LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40", register=True) +except Exception as e: + print("MemoryGuidedAdaptiveDualPhaseStrategyV40 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MemoryHybridAdaptiveDE import MemoryHybridAdaptiveDE + + lama_register["MemoryHybridAdaptiveDE"] = MemoryHybridAdaptiveDE + LLAMAMemoryHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE").set_name( + "LLAMAMemoryHybridAdaptiveDE", register=True + ) +except Exception as e: + print("MemoryHybridAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MetaDynamicPrecisionOptimizerV1 import MetaDynamicPrecisionOptimizerV1 + + lama_register["MetaDynamicPrecisionOptimizerV1"] = MetaDynamicPrecisionOptimizerV1 + LLAMAMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAMetaDynamicPrecisionOptimizerV1" + ).set_name("LLAMAMetaDynamicPrecisionOptimizerV1", register=True) +except Exception as e: + print("MetaDynamicPrecisionOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MetaDynamicQuantumSwarmOptimization import ( + MetaDynamicQuantumSwarmOptimization, + ) + + lama_register["MetaDynamicQuantumSwarmOptimization"] = MetaDynamicQuantumSwarmOptimization + LLAMAMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAMetaDynamicQuantumSwarmOptimization" + ).set_name("LLAMAMetaDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("MetaDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MetaHarmonicSearch import MetaHarmonicSearch + + lama_register["MetaHarmonicSearch"] = MetaHarmonicSearch + LLAMAMetaHarmonicSearch = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch").set_name( + "LLAMAMetaHarmonicSearch", register=True + ) +except Exception as e: + print("MetaHarmonicSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MetaHarmonicSearch2 import MetaHarmonicSearch2 + + lama_register["MetaHarmonicSearch2"] = MetaHarmonicSearch2 + LLAMAMetaHarmonicSearch2 = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2").set_name( + "LLAMAMetaHarmonicSearch2", register=True + ) +except Exception as e: + print("MetaHarmonicSearch2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MetaNetAQAPSO import MetaNetAQAPSO + + lama_register["MetaNetAQAPSO"] = MetaNetAQAPSO + LLAMAMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO").set_name( + "LLAMAMetaNetAQAPSO", register=True + ) +except Exception as e: + print("MetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MomentumGradientExploration import MomentumGradientExploration + + lama_register["MomentumGradientExploration"] = MomentumGradientExploration + LLAMAMomentumGradientExploration = NonObjectOptimizer(method="LLAMAMomentumGradientExploration").set_name( + "LLAMAMomentumGradientExploration", register=True + ) +except Exception as e: + print("MomentumGradientExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiFacetAdaptiveSearch import MultiFacetAdaptiveSearch + + lama_register["MultiFacetAdaptiveSearch"] = MultiFacetAdaptiveSearch + LLAMAMultiFacetAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch").set_name( + "LLAMAMultiFacetAdaptiveSearch", register=True + ) +except Exception as e: + print("MultiFacetAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiFocalAdaptiveOptimizer import MultiFocalAdaptiveOptimizer + + lama_register["MultiFocalAdaptiveOptimizer"] = MultiFocalAdaptiveOptimizer + LLAMAMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer").set_name( + "LLAMAMultiFocalAdaptiveOptimizer", register=True + ) +except Exception as e: + print("MultiFocalAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiLayeredAdaptiveCovarianceMatrixEvolution import ( + MultiLayeredAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["MultiLayeredAdaptiveCovarianceMatrixEvolution"] = ( + MultiLayeredAdaptiveCovarianceMatrixEvolution + ) + LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: + print("MultiLayeredAdaptiveCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiModalMemoryEnhancedHybridOptimizer import ( + MultiModalMemoryEnhancedHybridOptimizer, + ) + + lama_register["MultiModalMemoryEnhancedHybridOptimizer"] = MultiModalMemoryEnhancedHybridOptimizer + LLAMAMultiModalMemoryEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMAMultiModalMemoryEnhancedHybridOptimizer" + ).set_name("LLAMAMultiModalMemoryEnhancedHybridOptimizer", register=True) +except Exception as e: + print("MultiModalMemoryEnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 import ( + MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66, + ) + + lama_register["MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66"] = ( + MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 + ) + LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 = NonObjectOptimizer( + method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66" + ).set_name("LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66", register=True) +except Exception as e: + print("MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 import ( + MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67, + ) + + lama_register["MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67"] = ( + MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 + ) + LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 = NonObjectOptimizer( + method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67" + ).set_name("LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67", register=True) +except Exception as e: + print("MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiOperatorSearch import MultiOperatorSearch + + lama_register["MultiOperatorSearch"] = MultiOperatorSearch + LLAMAMultiOperatorSearch = NonObjectOptimizer(method="LLAMAMultiOperatorSearch").set_name( + "LLAMAMultiOperatorSearch", register=True + ) +except Exception as e: + print("MultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPhaseAdaptiveDE import MultiPhaseAdaptiveDE + + lama_register["MultiPhaseAdaptiveDE"] = MultiPhaseAdaptiveDE + LLAMAMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE").set_name( + "LLAMAMultiPhaseAdaptiveDE", register=True + ) +except Exception as e: + print("MultiPhaseAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPhaseAdaptiveDifferentialEvolution import ( + MultiPhaseAdaptiveDifferentialEvolution, + ) + + lama_register["MultiPhaseAdaptiveDifferentialEvolution"] = MultiPhaseAdaptiveDifferentialEvolution + LLAMAMultiPhaseAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveDifferentialEvolution" + ).set_name("LLAMAMultiPhaseAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("MultiPhaseAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPhaseAdaptiveExplorationOptimization import ( + MultiPhaseAdaptiveExplorationOptimization, + ) + + lama_register["MultiPhaseAdaptiveExplorationOptimization"] = MultiPhaseAdaptiveExplorationOptimization + LLAMAMultiPhaseAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveExplorationOptimization" + ).set_name("LLAMAMultiPhaseAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("MultiPhaseAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPhaseAdaptiveHybridDEPSO import MultiPhaseAdaptiveHybridDEPSO + + lama_register["MultiPhaseAdaptiveHybridDEPSO"] = MultiPhaseAdaptiveHybridDEPSO + LLAMAMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMAMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("MultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPhaseDiversityAdaptiveDE import MultiPhaseDiversityAdaptiveDE + + lama_register["MultiPhaseDiversityAdaptiveDE"] = MultiPhaseDiversityAdaptiveDE + LLAMAMultiPhaseDiversityAdaptiveDE = NonObjectOptimizer( + method="LLAMAMultiPhaseDiversityAdaptiveDE" + ).set_name("LLAMAMultiPhaseDiversityAdaptiveDE", register=True) +except Exception as e: + print("MultiPhaseDiversityAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiPopulationAdaptiveMemorySearch import ( + MultiPopulationAdaptiveMemorySearch, + ) + + lama_register["MultiPopulationAdaptiveMemorySearch"] = MultiPopulationAdaptiveMemorySearch + LLAMAMultiPopulationAdaptiveMemorySearch = NonObjectOptimizer( + method="LLAMAMultiPopulationAdaptiveMemorySearch" + ).set_name("LLAMAMultiPopulationAdaptiveMemorySearch", register=True) +except Exception as e: + print("MultiPopulationAdaptiveMemorySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiScaleAdaptiveHybridOptimization import ( + MultiScaleAdaptiveHybridOptimization, + ) + + lama_register["MultiScaleAdaptiveHybridOptimization"] = MultiScaleAdaptiveHybridOptimization + LLAMAMultiScaleAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAMultiScaleAdaptiveHybridOptimization" + ).set_name("LLAMAMultiScaleAdaptiveHybridOptimization", register=True) +except Exception as e: + print("MultiScaleAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiScaleGradientExploration import MultiScaleGradientExploration + + lama_register["MultiScaleGradientExploration"] = MultiScaleGradientExploration + LLAMAMultiScaleGradientExploration = NonObjectOptimizer( + method="LLAMAMultiScaleGradientExploration" + ).set_name("LLAMAMultiScaleGradientExploration", register=True) +except Exception as e: + print("MultiScaleGradientExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiScaleGradientSearch import MultiScaleGradientSearch + + lama_register["MultiScaleGradientSearch"] = MultiScaleGradientSearch + LLAMAMultiScaleGradientSearch = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch").set_name( + "LLAMAMultiScaleGradientSearch", register=True + ) +except Exception as e: + print("MultiScaleGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiScaleQuadraticSearch import MultiScaleQuadraticSearch + + lama_register["MultiScaleQuadraticSearch"] = MultiScaleQuadraticSearch + LLAMAMultiScaleQuadraticSearch = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch").set_name( + "LLAMAMultiScaleQuadraticSearch", register=True + ) +except Exception as e: + print("MultiScaleQuadraticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStageAdaptiveSearch import MultiStageAdaptiveSearch + + lama_register["MultiStageAdaptiveSearch"] = MultiStageAdaptiveSearch + LLAMAMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch").set_name( + "LLAMAMultiStageAdaptiveSearch", register=True + ) +except Exception as e: + print("MultiStageAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStageHybridGradientBoostedAnnealing import ( + MultiStageHybridGradientBoostedAnnealing, + ) + + lama_register["MultiStageHybridGradientBoostedAnnealing"] = MultiStageHybridGradientBoostedAnnealing + LLAMAMultiStageHybridGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAMultiStageHybridGradientBoostedAnnealing" + ).set_name("LLAMAMultiStageHybridGradientBoostedAnnealing", register=True) +except Exception as e: + print("MultiStageHybridGradientBoostedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyAdaptiveGradientEvolution import ( + MultiStrategyAdaptiveGradientEvolution, + ) + + lama_register["MultiStrategyAdaptiveGradientEvolution"] = MultiStrategyAdaptiveGradientEvolution + LLAMAMultiStrategyAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyAdaptiveGradientEvolution" + ).set_name("LLAMAMultiStrategyAdaptiveGradientEvolution", register=True) +except Exception as e: + print("MultiStrategyAdaptiveGradientEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyAdaptiveSwarmDifferentialEvolution import ( + MultiStrategyAdaptiveSwarmDifferentialEvolution, + ) + + lama_register["MultiStrategyAdaptiveSwarmDifferentialEvolution"] = ( + MultiStrategyAdaptiveSwarmDifferentialEvolution + ) + LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: + print("MultiStrategyAdaptiveSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyDifferentialEvolution import ( + MultiStrategyDifferentialEvolution, + ) + + lama_register["MultiStrategyDifferentialEvolution"] = MultiStrategyDifferentialEvolution + LLAMAMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyDifferentialEvolution" + ).set_name("LLAMAMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("MultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyMemeticAlgorithm import MultiStrategyMemeticAlgorithm + + lama_register["MultiStrategyMemeticAlgorithm"] = MultiStrategyMemeticAlgorithm + LLAMAMultiStrategyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAMultiStrategyMemeticAlgorithm" + ).set_name("LLAMAMultiStrategyMemeticAlgorithm", register=True) +except Exception as e: + print("MultiStrategyMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyQuantumCognitionOptimizerV9 import ( + MultiStrategyQuantumCognitionOptimizerV9, + ) + + lama_register["MultiStrategyQuantumCognitionOptimizerV9"] = MultiStrategyQuantumCognitionOptimizerV9 + LLAMAMultiStrategyQuantumCognitionOptimizerV9 = NonObjectOptimizer( + method="LLAMAMultiStrategyQuantumCognitionOptimizerV9" + ).set_name("LLAMAMultiStrategyQuantumCognitionOptimizerV9", register=True) +except Exception as e: + print("MultiStrategyQuantumCognitionOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategyQuantumLevyOptimizer import ( + MultiStrategyQuantumLevyOptimizer, + ) + + lama_register["MultiStrategyQuantumLevyOptimizer"] = MultiStrategyQuantumLevyOptimizer + LLAMAMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAMultiStrategyQuantumLevyOptimizer" + ).set_name("LLAMAMultiStrategyQuantumLevyOptimizer", register=True) +except Exception as e: + print("MultiStrategyQuantumLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiStrategySelfAdaptiveDE import MultiStrategySelfAdaptiveDE + + lama_register["MultiStrategySelfAdaptiveDE"] = MultiStrategySelfAdaptiveDE + LLAMAMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE").set_name( + "LLAMAMultiStrategySelfAdaptiveDE", register=True + ) +except Exception as e: + print("MultiStrategySelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.MultiSwarmAdaptiveDE_PSO import MultiSwarmAdaptiveDE_PSO + + lama_register["MultiSwarmAdaptiveDE_PSO"] = MultiSwarmAdaptiveDE_PSO + LLAMAMultiSwarmAdaptiveDE_PSO = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO").set_name( + "LLAMAMultiSwarmAdaptiveDE_PSO", register=True + ) +except Exception as e: + print("MultiSwarmAdaptiveDE_PSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.NovelAdaptiveHarmonicFireworksTabuSearch import ( + NovelAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["NovelAdaptiveHarmonicFireworksTabuSearch"] = NovelAdaptiveHarmonicFireworksTabuSearch + LLAMANovelAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMANovelAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: + print("NovelAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.NovelDynamicFireworkAlgorithm import NovelDynamicFireworkAlgorithm + + lama_register["NovelDynamicFireworkAlgorithm"] = NovelDynamicFireworkAlgorithm + LLAMANovelDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMANovelDynamicFireworkAlgorithm" + ).set_name("LLAMANovelDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("NovelDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 import ( + NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2"] = ( + NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 + ) + LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: + print("NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.NovelHarmonyTabuSearch import NovelHarmonyTabuSearch + + lama_register["NovelHarmonyTabuSearch"] = NovelHarmonyTabuSearch + LLAMANovelHarmonyTabuSearch = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch").set_name( + "LLAMANovelHarmonyTabuSearch", register=True + ) +except Exception as e: + print("NovelHarmonyTabuSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ODEMF import ODEMF + + lama_register["ODEMF"] = ODEMF + LLAMAODEMF = NonObjectOptimizer(method="LLAMAODEMF").set_name("LLAMAODEMF", register=True) +except Exception as e: + print("ODEMF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ORAMED import ORAMED + + lama_register["ORAMED"] = ORAMED + LLAMAORAMED = NonObjectOptimizer(method="LLAMAORAMED").set_name("LLAMAORAMED", register=True) +except Exception as e: + print("ORAMED can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OctopusSwarmAlgorithm import OctopusSwarmAlgorithm + + lama_register["OctopusSwarmAlgorithm"] = OctopusSwarmAlgorithm + LLAMAOctopusSwarmAlgorithm = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm").set_name( + "LLAMAOctopusSwarmAlgorithm", register=True + ) +except Exception as e: + print("OctopusSwarmAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialEvolution import ( + OptimalAdaptiveDifferentialEvolution, + ) + + lama_register["OptimalAdaptiveDifferentialEvolution"] = OptimalAdaptiveDifferentialEvolution + LLAMAOptimalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveDifferentialEvolution" + ).set_name("LLAMAOptimalAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("OptimalAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialSearch import ( + OptimalAdaptiveDifferentialSearch, + ) + + lama_register["OptimalAdaptiveDifferentialSearch"] = OptimalAdaptiveDifferentialSearch + LLAMAOptimalAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveDifferentialSearch" + ).set_name("LLAMAOptimalAdaptiveDifferentialSearch", register=True) +except Exception as e: + print("OptimalAdaptiveDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalAdaptiveMutationEnhancedSearch import ( + OptimalAdaptiveMutationEnhancedSearch, + ) + + lama_register["OptimalAdaptiveMutationEnhancedSearch"] = OptimalAdaptiveMutationEnhancedSearch + LLAMAOptimalAdaptiveMutationEnhancedSearch = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveMutationEnhancedSearch" + ).set_name("LLAMAOptimalAdaptiveMutationEnhancedSearch", register=True) +except Exception as e: + print("OptimalAdaptiveMutationEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalAdaptiveSwarmDifferentialEvolution import ( + OptimalAdaptiveSwarmDifferentialEvolution, + ) + + lama_register["OptimalAdaptiveSwarmDifferentialEvolution"] = OptimalAdaptiveSwarmDifferentialEvolution + LLAMAOptimalAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAOptimalAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: + print("OptimalAdaptiveSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalBalanceSearch import OptimalBalanceSearch + + lama_register["OptimalBalanceSearch"] = OptimalBalanceSearch + LLAMAOptimalBalanceSearch = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch").set_name( + "LLAMAOptimalBalanceSearch", register=True + ) +except Exception as e: + print("OptimalBalanceSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalCohortDiversityOptimizer import OptimalCohortDiversityOptimizer + + lama_register["OptimalCohortDiversityOptimizer"] = OptimalCohortDiversityOptimizer + LLAMAOptimalCohortDiversityOptimizer = NonObjectOptimizer( + method="LLAMAOptimalCohortDiversityOptimizer" + ).set_name("LLAMAOptimalCohortDiversityOptimizer", register=True) +except Exception as e: + print("OptimalCohortDiversityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalConvergenceDE import OptimalConvergenceDE + + lama_register["OptimalConvergenceDE"] = OptimalConvergenceDE + LLAMAOptimalConvergenceDE = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE").set_name( + "LLAMAOptimalConvergenceDE", register=True + ) +except Exception as e: + print("OptimalConvergenceDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalDynamicAdaptiveEvolutionOptimizer import ( + OptimalDynamicAdaptiveEvolutionOptimizer, + ) + + lama_register["OptimalDynamicAdaptiveEvolutionOptimizer"] = OptimalDynamicAdaptiveEvolutionOptimizer + LLAMAOptimalDynamicAdaptiveEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer" + ).set_name("LLAMAOptimalDynamicAdaptiveEvolutionOptimizer", register=True) +except Exception as e: + print("OptimalDynamicAdaptiveEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalDynamicMutationSearch import OptimalDynamicMutationSearch + + lama_register["OptimalDynamicMutationSearch"] = OptimalDynamicMutationSearch + LLAMAOptimalDynamicMutationSearch = NonObjectOptimizer( + method="LLAMAOptimalDynamicMutationSearch" + ).set_name("LLAMAOptimalDynamicMutationSearch", register=True) +except Exception as e: + print("OptimalDynamicMutationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV14 import ( + OptimalDynamicPrecisionOptimizerV14, + ) + + lama_register["OptimalDynamicPrecisionOptimizerV14"] = OptimalDynamicPrecisionOptimizerV14 + LLAMAOptimalDynamicPrecisionOptimizerV14 = NonObjectOptimizer( + method="LLAMAOptimalDynamicPrecisionOptimizerV14" + ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV14", register=True) +except Exception as e: + print("OptimalDynamicPrecisionOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV21 import ( + OptimalDynamicPrecisionOptimizerV21, + ) + + lama_register["OptimalDynamicPrecisionOptimizerV21"] = OptimalDynamicPrecisionOptimizerV21 + LLAMAOptimalDynamicPrecisionOptimizerV21 = NonObjectOptimizer( + method="LLAMAOptimalDynamicPrecisionOptimizerV21" + ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV21", register=True) +except Exception as e: + print("OptimalDynamicPrecisionOptimizerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalEnhancedRAMEDS import OptimalEnhancedRAMEDS + + lama_register["OptimalEnhancedRAMEDS"] = OptimalEnhancedRAMEDS + LLAMAOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS").set_name( + "LLAMAOptimalEnhancedRAMEDS", register=True + ) +except Exception as e: + print("OptimalEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalEnhancedStrategyDE import OptimalEnhancedStrategyDE + + lama_register["OptimalEnhancedStrategyDE"] = OptimalEnhancedStrategyDE + LLAMAOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE").set_name( + "LLAMAOptimalEnhancedStrategyDE", register=True + ) +except Exception as e: + print("OptimalEnhancedStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalEvolutionaryGradientHybridOptimizerV8 import ( + OptimalEvolutionaryGradientHybridOptimizerV8, + ) + + lama_register["OptimalEvolutionaryGradientHybridOptimizerV8"] = ( + OptimalEvolutionaryGradientHybridOptimizerV8 + ) + LLAMAOptimalEvolutionaryGradientHybridOptimizerV8 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8" + ).set_name("LLAMAOptimalEvolutionaryGradientHybridOptimizerV8", register=True) +except Exception as e: + print("OptimalEvolutionaryGradientHybridOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV11 import ( + OptimalEvolutionaryGradientOptimizerV11, + ) + + lama_register["OptimalEvolutionaryGradientOptimizerV11"] = OptimalEvolutionaryGradientOptimizerV11 + LLAMAOptimalEvolutionaryGradientOptimizerV11 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientOptimizerV11" + ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV11", register=True) +except Exception as e: + print("OptimalEvolutionaryGradientOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV25 import ( + OptimalEvolutionaryGradientOptimizerV25, + ) + + lama_register["OptimalEvolutionaryGradientOptimizerV25"] = OptimalEvolutionaryGradientOptimizerV25 + LLAMAOptimalEvolutionaryGradientOptimizerV25 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientOptimizerV25" + ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV25", register=True) +except Exception as e: + print("OptimalEvolutionaryGradientOptimizerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalHybridDifferentialAnnealingOptimizer import ( + OptimalHybridDifferentialAnnealingOptimizer, + ) + + lama_register["OptimalHybridDifferentialAnnealingOptimizer"] = OptimalHybridDifferentialAnnealingOptimizer + LLAMAOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAOptimalHybridDifferentialAnnealingOptimizer" + ).set_name("LLAMAOptimalHybridDifferentialAnnealingOptimizer", register=True) +except Exception as e: + print("OptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalHyperStrategicOptimizerV51 import ( + OptimalHyperStrategicOptimizerV51, + ) + + lama_register["OptimalHyperStrategicOptimizerV51"] = OptimalHyperStrategicOptimizerV51 + LLAMAOptimalHyperStrategicOptimizerV51 = NonObjectOptimizer( + method="LLAMAOptimalHyperStrategicOptimizerV51" + ).set_name("LLAMAOptimalHyperStrategicOptimizerV51", register=True) +except Exception as e: + print("OptimalHyperStrategicOptimizerV51 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalPrecisionDynamicAdaptationOptimizer import ( + OptimalPrecisionDynamicAdaptationOptimizer, + ) + + lama_register["OptimalPrecisionDynamicAdaptationOptimizer"] = OptimalPrecisionDynamicAdaptationOptimizer + LLAMAOptimalPrecisionDynamicAdaptationOptimizer = NonObjectOptimizer( + method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer" + ).set_name("LLAMAOptimalPrecisionDynamicAdaptationOptimizer", register=True) +except Exception as e: + print("OptimalPrecisionDynamicAdaptationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryOptimizerV37 import ( + OptimalPrecisionEvolutionaryOptimizerV37, + ) + + lama_register["OptimalPrecisionEvolutionaryOptimizerV37"] = OptimalPrecisionEvolutionaryOptimizerV37 + LLAMAOptimalPrecisionEvolutionaryOptimizerV37 = NonObjectOptimizer( + method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37" + ).set_name("LLAMAOptimalPrecisionEvolutionaryOptimizerV37", register=True) +except Exception as e: + print("OptimalPrecisionEvolutionaryOptimizerV37 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryThermalOptimizer import ( + OptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["OptimalPrecisionEvolutionaryThermalOptimizer"] = ( + OptimalPrecisionEvolutionaryThermalOptimizer + ) + LLAMAOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: + print("OptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalPrecisionHybridSearchV3 import OptimalPrecisionHybridSearchV3 + + lama_register["OptimalPrecisionHybridSearchV3"] = OptimalPrecisionHybridSearchV3 + LLAMAOptimalPrecisionHybridSearchV3 = NonObjectOptimizer( + method="LLAMAOptimalPrecisionHybridSearchV3" + ).set_name("LLAMAOptimalPrecisionHybridSearchV3", register=True) +except Exception as e: + print("OptimalPrecisionHybridSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalQuantumSynergyStrategy import OptimalQuantumSynergyStrategy + + lama_register["OptimalQuantumSynergyStrategy"] = OptimalQuantumSynergyStrategy + LLAMAOptimalQuantumSynergyStrategy = NonObjectOptimizer( + method="LLAMAOptimalQuantumSynergyStrategy" + ).set_name("LLAMAOptimalQuantumSynergyStrategy", register=True) +except Exception as e: + print("OptimalQuantumSynergyStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalRefinedEnhancedUltraRefinedRAMEDS import ( + OptimalRefinedEnhancedUltraRefinedRAMEDS, + ) + + lama_register["OptimalRefinedEnhancedUltraRefinedRAMEDS"] = OptimalRefinedEnhancedUltraRefinedRAMEDS + LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS" + ).set_name("LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS", register=True) +except Exception as e: + print("OptimalRefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalSelectiveEvolutionaryOptimizerV20 import ( + OptimalSelectiveEvolutionaryOptimizerV20, + ) + + lama_register["OptimalSelectiveEvolutionaryOptimizerV20"] = OptimalSelectiveEvolutionaryOptimizerV20 + LLAMAOptimalSelectiveEvolutionaryOptimizerV20 = NonObjectOptimizer( + method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20" + ).set_name("LLAMAOptimalSelectiveEvolutionaryOptimizerV20", register=True) +except Exception as e: + print("OptimalSelectiveEvolutionaryOptimizerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalSmartRefinedRAMEDS import OptimalSmartRefinedRAMEDS + + lama_register["OptimalSmartRefinedRAMEDS"] = OptimalSmartRefinedRAMEDS + LLAMAOptimalSmartRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS").set_name( + "LLAMAOptimalSmartRefinedRAMEDS", register=True + ) +except Exception as e: + print("OptimalSmartRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalSpiralCentroidSearch import OptimalSpiralCentroidSearch + + lama_register["OptimalSpiralCentroidSearch"] = OptimalSpiralCentroidSearch + LLAMAOptimalSpiralCentroidSearch = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch").set_name( + "LLAMAOptimalSpiralCentroidSearch", register=True + ) +except Exception as e: + print("OptimalSpiralCentroidSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalStrategicAdaptiveOptimizer import ( + OptimalStrategicAdaptiveOptimizer, + ) + + lama_register["OptimalStrategicAdaptiveOptimizer"] = OptimalStrategicAdaptiveOptimizer + LLAMAOptimalStrategicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAOptimalStrategicAdaptiveOptimizer" + ).set_name("LLAMAOptimalStrategicAdaptiveOptimizer", register=True) +except Exception as e: + print("OptimalStrategicAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimalStrategicHybridDE import OptimalStrategicHybridDE + + lama_register["OptimalStrategicHybridDE"] = OptimalStrategicHybridDE + LLAMAOptimalStrategicHybridDE = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE").set_name( + "LLAMAOptimalStrategicHybridDE", register=True + ) +except Exception as e: + print("OptimalStrategicHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimallyBalancedQuantumStrategy import OptimallyBalancedQuantumStrategy + + lama_register["OptimallyBalancedQuantumStrategy"] = OptimallyBalancedQuantumStrategy + LLAMAOptimallyBalancedQuantumStrategy = NonObjectOptimizer( + method="LLAMAOptimallyBalancedQuantumStrategy" + ).set_name("LLAMAOptimallyBalancedQuantumStrategy", register=True) +except Exception as e: + print("OptimallyBalancedQuantumStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveDifferentialClimber import ( + OptimizedAdaptiveDifferentialClimber, + ) + + lama_register["OptimizedAdaptiveDifferentialClimber"] = OptimizedAdaptiveDifferentialClimber + LLAMAOptimizedAdaptiveDifferentialClimber = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDifferentialClimber" + ).set_name("LLAMAOptimizedAdaptiveDifferentialClimber", register=True) +except Exception as e: + print("OptimizedAdaptiveDifferentialClimber can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategy import ( + OptimizedAdaptiveDualPhaseStrategy, + ) + + lama_register["OptimizedAdaptiveDualPhaseStrategy"] = OptimizedAdaptiveDualPhaseStrategy + LLAMAOptimizedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDualPhaseStrategy" + ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: + print("OptimizedAdaptiveDualPhaseStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategyV4 import ( + OptimizedAdaptiveDualPhaseStrategyV4, + ) + + lama_register["OptimizedAdaptiveDualPhaseStrategyV4"] = OptimizedAdaptiveDualPhaseStrategyV4 + LLAMAOptimizedAdaptiveDualPhaseStrategyV4 = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4" + ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategyV4", register=True) +except Exception as e: + print("OptimizedAdaptiveDualPhaseStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveDynamicStrategyV34 import ( + OptimizedAdaptiveDynamicStrategyV34, + ) + + lama_register["OptimizedAdaptiveDynamicStrategyV34"] = OptimizedAdaptiveDynamicStrategyV34 + LLAMAOptimizedAdaptiveDynamicStrategyV34 = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDynamicStrategyV34" + ).set_name("LLAMAOptimizedAdaptiveDynamicStrategyV34", register=True) +except Exception as e: + print("OptimizedAdaptiveDynamicStrategyV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveGlobalLocalSearch import ( + OptimizedAdaptiveGlobalLocalSearch, + ) + + lama_register["OptimizedAdaptiveGlobalLocalSearch"] = OptimizedAdaptiveGlobalLocalSearch + LLAMAOptimizedAdaptiveGlobalLocalSearch = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveGlobalLocalSearch" + ).set_name("LLAMAOptimizedAdaptiveGlobalLocalSearch", register=True) +except Exception as e: + print("OptimizedAdaptiveGlobalLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveQuantumGradientHybridStrategy import ( + OptimizedAdaptiveQuantumGradientHybridStrategy, + ) + + lama_register["OptimizedAdaptiveQuantumGradientHybridStrategy"] = ( + OptimizedAdaptiveQuantumGradientHybridStrategy + ) + LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy" + ).set_name("LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy", register=True) +except Exception as e: + print("OptimizedAdaptiveQuantumGradientHybridStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedAdaptiveSimulatedAnnealingWithSmartMemory import ( + OptimizedAdaptiveSimulatedAnnealingWithSmartMemory, + ) + + lama_register["OptimizedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( + OptimizedAdaptiveSimulatedAnnealingWithSmartMemory + ) + LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: + print("OptimizedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedBalancedDualStrategyAdaptiveDE import ( + OptimizedBalancedDualStrategyAdaptiveDE, + ) + + lama_register["OptimizedBalancedDualStrategyAdaptiveDE"] = OptimizedBalancedDualStrategyAdaptiveDE + LLAMAOptimizedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("OptimizedBalancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedConvergenceIslandStrategy import ( + OptimizedConvergenceIslandStrategy, + ) + + lama_register["OptimizedConvergenceIslandStrategy"] = OptimizedConvergenceIslandStrategy + LLAMAOptimizedConvergenceIslandStrategy = NonObjectOptimizer( + method="LLAMAOptimizedConvergenceIslandStrategy" + ).set_name("LLAMAOptimizedConvergenceIslandStrategy", register=True) +except Exception as e: + print("OptimizedConvergenceIslandStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedConvergentAdaptiveEvolver import ( + OptimizedConvergentAdaptiveEvolver, + ) + + lama_register["OptimizedConvergentAdaptiveEvolver"] = OptimizedConvergentAdaptiveEvolver + LLAMAOptimizedConvergentAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAOptimizedConvergentAdaptiveEvolver" + ).set_name("LLAMAOptimizedConvergentAdaptiveEvolver", register=True) +except Exception as e: + print("OptimizedConvergentAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedCrossoverElitistStrategyV8 import ( + OptimizedCrossoverElitistStrategyV8, + ) + + lama_register["OptimizedCrossoverElitistStrategyV8"] = OptimizedCrossoverElitistStrategyV8 + LLAMAOptimizedCrossoverElitistStrategyV8 = NonObjectOptimizer( + method="LLAMAOptimizedCrossoverElitistStrategyV8" + ).set_name("LLAMAOptimizedCrossoverElitistStrategyV8", register=True) +except Exception as e: + print("OptimizedCrossoverElitistStrategyV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDifferentialEvolution import OptimizedDifferentialEvolution + + lama_register["OptimizedDifferentialEvolution"] = OptimizedDifferentialEvolution + LLAMAOptimizedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimizedDifferentialEvolution" + ).set_name("LLAMAOptimizedDifferentialEvolution", register=True) +except Exception as e: + print("OptimizedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDualPhaseAdaptiveHybridOptimizationV4 import ( + OptimizedDualPhaseAdaptiveHybridOptimizationV4, + ) + + lama_register["OptimizedDualPhaseAdaptiveHybridOptimizationV4"] = ( + OptimizedDualPhaseAdaptiveHybridOptimizationV4 + ) + LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4" + ).set_name("LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4", register=True) +except Exception as e: + print("OptimizedDualPhaseAdaptiveHybridOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDualStrategyAdaptiveDE import OptimizedDualStrategyAdaptiveDE + + lama_register["OptimizedDualStrategyAdaptiveDE"] = OptimizedDualStrategyAdaptiveDE + LLAMAOptimizedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("OptimizedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicAdaptiveHybridOptimizer import ( + OptimizedDynamicAdaptiveHybridOptimizer, + ) + + lama_register["OptimizedDynamicAdaptiveHybridOptimizer"] = OptimizedDynamicAdaptiveHybridOptimizer + LLAMAOptimizedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAOptimizedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("OptimizedDynamicAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicDualPhaseStrategyV13 import ( + OptimizedDynamicDualPhaseStrategyV13, + ) + + lama_register["OptimizedDynamicDualPhaseStrategyV13"] = OptimizedDynamicDualPhaseStrategyV13 + LLAMAOptimizedDynamicDualPhaseStrategyV13 = NonObjectOptimizer( + method="LLAMAOptimizedDynamicDualPhaseStrategyV13" + ).set_name("LLAMAOptimizedDynamicDualPhaseStrategyV13", register=True) +except Exception as e: + print("OptimizedDynamicDualPhaseStrategyV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: + print("OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedSimulatedAnnealing import ( + OptimizedDynamicGradientBoostedSimulatedAnnealing, + ) + + lama_register["OptimizedDynamicGradientBoostedSimulatedAnnealing"] = ( + OptimizedDynamicGradientBoostedSimulatedAnnealing + ) + LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing" + ).set_name("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing", register=True) +except Exception as e: + print("OptimizedDynamicGradientBoostedSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicQuantumSwarmOptimization import ( + OptimizedDynamicQuantumSwarmOptimization, + ) + + lama_register["OptimizedDynamicQuantumSwarmOptimization"] = OptimizedDynamicQuantumSwarmOptimization + LLAMAOptimizedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAOptimizedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAOptimizedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("OptimizedDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedDynamicRestartAdaptiveDE import ( + OptimizedDynamicRestartAdaptiveDE, + ) + + lama_register["OptimizedDynamicRestartAdaptiveDE"] = OptimizedDynamicRestartAdaptiveDE + LLAMAOptimizedDynamicRestartAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedDynamicRestartAdaptiveDE" + ).set_name("LLAMAOptimizedDynamicRestartAdaptiveDE", register=True) +except Exception as e: + print("OptimizedDynamicRestartAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedEliteAdaptiveMemoryHybridOptimizer import ( + OptimizedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["OptimizedEliteAdaptiveMemoryHybridOptimizer"] = OptimizedEliteAdaptiveMemoryHybridOptimizer + LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("OptimizedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedEnhancedAdaptiveMetaNetAQAPSO import ( + OptimizedEnhancedAdaptiveMetaNetAQAPSO, + ) + + lama_register["OptimizedEnhancedAdaptiveMetaNetAQAPSO"] = OptimizedEnhancedAdaptiveMetaNetAQAPSO + LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: + print("OptimizedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedEnhancedDualStrategyAdaptiveDE import ( + OptimizedEnhancedDualStrategyAdaptiveDE, + ) + + lama_register["OptimizedEnhancedDualStrategyAdaptiveDE"] = OptimizedEnhancedDualStrategyAdaptiveDE + LLAMAOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedEnhancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("OptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedEnhancedDynamicFireworkAlgorithm import ( + OptimizedEnhancedDynamicFireworkAlgorithm, + ) + + lama_register["OptimizedEnhancedDynamicFireworkAlgorithm"] = OptimizedEnhancedDynamicFireworkAlgorithm + LLAMAOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("OptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedEvolutiveStrategy import OptimizedEvolutiveStrategy + + lama_register["OptimizedEvolutiveStrategy"] = OptimizedEvolutiveStrategy + LLAMAOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy").set_name( + "LLAMAOptimizedEvolutiveStrategy", register=True + ) +except Exception as e: + print("OptimizedEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedExplorationConvergenceStrategy import ( + OptimizedExplorationConvergenceStrategy, + ) + + lama_register["OptimizedExplorationConvergenceStrategy"] = OptimizedExplorationConvergenceStrategy + LLAMAOptimizedExplorationConvergenceStrategy = NonObjectOptimizer( + method="LLAMAOptimizedExplorationConvergenceStrategy" + ).set_name("LLAMAOptimizedExplorationConvergenceStrategy", register=True) +except Exception as e: + print("OptimizedExplorationConvergenceStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedGlobalStructureAwareEvolver import ( + OptimizedGlobalStructureAwareEvolver, + ) + + lama_register["OptimizedGlobalStructureAwareEvolver"] = OptimizedGlobalStructureAwareEvolver + LLAMAOptimizedGlobalStructureAwareEvolver = NonObjectOptimizer( + method="LLAMAOptimizedGlobalStructureAwareEvolver" + ).set_name("LLAMAOptimizedGlobalStructureAwareEvolver", register=True) +except Exception as e: + print("OptimizedGlobalStructureAwareEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedGradientBalancedPSO import OptimizedGradientBalancedPSO + + lama_register["OptimizedGradientBalancedPSO"] = OptimizedGradientBalancedPSO + LLAMAOptimizedGradientBalancedPSO = NonObjectOptimizer( + method="LLAMAOptimizedGradientBalancedPSO" + ).set_name("LLAMAOptimizedGradientBalancedPSO", register=True) +except Exception as e: + print("OptimizedGradientBalancedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch import ( + OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch, + ) + + lama_register["OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch"] = ( + OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch + ) + LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch = NonObjectOptimizer( + method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch" + ).set_name("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch", register=True) +except Exception as e: + print("OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedGradientMemorySimulatedAnnealing import ( + OptimizedGradientMemorySimulatedAnnealing, + ) + + lama_register["OptimizedGradientMemorySimulatedAnnealing"] = OptimizedGradientMemorySimulatedAnnealing + LLAMAOptimizedGradientMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAOptimizedGradientMemorySimulatedAnnealing" + ).set_name("LLAMAOptimizedGradientMemorySimulatedAnnealing", register=True) +except Exception as e: + print("OptimizedGradientMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHybridAdaptiveDualPhaseStrategyV7 import ( + OptimizedHybridAdaptiveDualPhaseStrategyV7, + ) + + lama_register["OptimizedHybridAdaptiveDualPhaseStrategyV7"] = OptimizedHybridAdaptiveDualPhaseStrategyV7 + LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7 = NonObjectOptimizer( + method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7" + ).set_name("LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7", register=True) +except Exception as e: + print("OptimizedHybridAdaptiveDualPhaseStrategyV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHybridAdaptiveMultiStageOptimization import ( + OptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["OptimizedHybridAdaptiveMultiStageOptimization"] = ( + OptimizedHybridAdaptiveMultiStageOptimization + ) + LLAMAOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("OptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHybridExplorationOptimization import ( + OptimizedHybridExplorationOptimization, + ) + + lama_register["OptimizedHybridExplorationOptimization"] = OptimizedHybridExplorationOptimization + LLAMAOptimizedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMAOptimizedHybridExplorationOptimization" + ).set_name("LLAMAOptimizedHybridExplorationOptimization", register=True) +except Exception as e: + print("OptimizedHybridExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHybridSearch import OptimizedHybridSearch + + lama_register["OptimizedHybridSearch"] = OptimizedHybridSearch + LLAMAOptimizedHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch").set_name( + "LLAMAOptimizedHybridSearch", register=True + ) +except Exception as e: + print("OptimizedHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHybridStrategyDE import OptimizedHybridStrategyDE + + lama_register["OptimizedHybridStrategyDE"] = OptimizedHybridStrategyDE + LLAMAOptimizedHybridStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE").set_name( + "LLAMAOptimizedHybridStrategyDE", register=True + ) +except Exception as e: + print("OptimizedHybridStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedHyperStrategicOptimizerV53 import ( + OptimizedHyperStrategicOptimizerV53, + ) + + lama_register["OptimizedHyperStrategicOptimizerV53"] = OptimizedHyperStrategicOptimizerV53 + LLAMAOptimizedHyperStrategicOptimizerV53 = NonObjectOptimizer( + method="LLAMAOptimizedHyperStrategicOptimizerV53" + ).set_name("LLAMAOptimizedHyperStrategicOptimizerV53", register=True) +except Exception as e: + print("OptimizedHyperStrategicOptimizerV53 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedIslandEvolutionStrategyV4 import ( + OptimizedIslandEvolutionStrategyV4, + ) + + lama_register["OptimizedIslandEvolutionStrategyV4"] = OptimizedIslandEvolutionStrategyV4 + LLAMAOptimizedIslandEvolutionStrategyV4 = NonObjectOptimizer( + method="LLAMAOptimizedIslandEvolutionStrategyV4" + ).set_name("LLAMAOptimizedIslandEvolutionStrategyV4", register=True) +except Exception as e: + print("OptimizedIslandEvolutionStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedMemoryEnhancedAdaptiveStrategyV70 import ( + OptimizedMemoryEnhancedAdaptiveStrategyV70, + ) + + lama_register["OptimizedMemoryEnhancedAdaptiveStrategyV70"] = OptimizedMemoryEnhancedAdaptiveStrategyV70 + LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70" + ).set_name("LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70", register=True) +except Exception as e: + print("OptimizedMemoryEnhancedAdaptiveStrategyV70 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedMemoryGuidedAdaptiveStrategyV81 import ( + OptimizedMemoryGuidedAdaptiveStrategyV81, + ) + + lama_register["OptimizedMemoryGuidedAdaptiveStrategyV81"] = OptimizedMemoryGuidedAdaptiveStrategyV81 + LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81" + ).set_name("LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81", register=True) +except Exception as e: + print("OptimizedMemoryGuidedAdaptiveStrategyV81 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedMemoryResponsiveAdaptiveStrategyV78 import ( + OptimizedMemoryResponsiveAdaptiveStrategyV78, + ) + + lama_register["OptimizedMemoryResponsiveAdaptiveStrategyV78"] = ( + OptimizedMemoryResponsiveAdaptiveStrategyV78 + ) + LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78" + ).set_name("LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78", register=True) +except Exception as e: + print("OptimizedMemoryResponsiveAdaptiveStrategyV78 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedParallelStrategyDE import OptimizedParallelStrategyDE + + lama_register["OptimizedParallelStrategyDE"] = OptimizedParallelStrategyDE + LLAMAOptimizedParallelStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE").set_name( + "LLAMAOptimizedParallelStrategyDE", register=True + ) +except Exception as e: + print("OptimizedParallelStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedPrecisionAdaptiveStrategy import ( + OptimizedPrecisionAdaptiveStrategy, + ) + + lama_register["OptimizedPrecisionAdaptiveStrategy"] = OptimizedPrecisionAdaptiveStrategy + LLAMAOptimizedPrecisionAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAOptimizedPrecisionAdaptiveStrategy" + ).set_name("LLAMAOptimizedPrecisionAdaptiveStrategy", register=True) +except Exception as e: + print("OptimizedPrecisionAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedPrecisionTunedCrossoverElitistStrategyV13 import ( + OptimizedPrecisionTunedCrossoverElitistStrategyV13, + ) + + lama_register["OptimizedPrecisionTunedCrossoverElitistStrategyV13"] = ( + OptimizedPrecisionTunedCrossoverElitistStrategyV13 + ) + LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13 = NonObjectOptimizer( + method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13" + ).set_name("LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13", register=True) +except Exception as e: + print("OptimizedPrecisionTunedCrossoverElitistStrategyV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 import ( + OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3"] = ( + OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 + ) + LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: + print("OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumFluxDifferentialSwarm import ( + OptimizedQuantumFluxDifferentialSwarm, + ) + + lama_register["OptimizedQuantumFluxDifferentialSwarm"] = OptimizedQuantumFluxDifferentialSwarm + LLAMAOptimizedQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMAOptimizedQuantumFluxDifferentialSwarm" + ).set_name("LLAMAOptimizedQuantumFluxDifferentialSwarm", register=True) +except Exception as e: + print("OptimizedQuantumFluxDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumGradientExplorationOptimization import ( + OptimizedQuantumGradientExplorationOptimization, + ) + + lama_register["OptimizedQuantumGradientExplorationOptimization"] = ( + OptimizedQuantumGradientExplorationOptimization + ) + LLAMAOptimizedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAOptimizedQuantumGradientExplorationOptimization" + ).set_name("LLAMAOptimizedQuantumGradientExplorationOptimization", register=True) +except Exception as e: + print("OptimizedQuantumGradientExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumHarmonySearch import OptimizedQuantumHarmonySearch + + lama_register["OptimizedQuantumHarmonySearch"] = OptimizedQuantumHarmonySearch + LLAMAOptimizedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAOptimizedQuantumHarmonySearch" + ).set_name("LLAMAOptimizedQuantumHarmonySearch", register=True) +except Exception as e: + print("OptimizedQuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumHybridDEPSO import OptimizedQuantumHybridDEPSO + + lama_register["OptimizedQuantumHybridDEPSO"] = OptimizedQuantumHybridDEPSO + LLAMAOptimizedQuantumHybridDEPSO = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO").set_name( + "LLAMAOptimizedQuantumHybridDEPSO", register=True + ) +except Exception as e: + print("OptimizedQuantumHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedQuantumLevyDifferentialSearch import ( + OptimizedQuantumLevyDifferentialSearch, + ) + + lama_register["OptimizedQuantumLevyDifferentialSearch"] = OptimizedQuantumLevyDifferentialSearch + LLAMAOptimizedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAOptimizedQuantumLevyDifferentialSearch" + ).set_name("LLAMAOptimizedQuantumLevyDifferentialSearch", register=True) +except Exception as e: + print("OptimizedQuantumLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRAMEDS import OptimizedRAMEDS + + lama_register["OptimizedRAMEDS"] = OptimizedRAMEDS + LLAMAOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS").set_name( + "LLAMAOptimizedRAMEDS", register=True + ) +except Exception as e: + print("OptimizedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( + OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO, + ) + + lama_register["OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( + OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO + ) + LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: + print("OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveHybridSearch import ( + OptimizedRefinedAdaptiveHybridSearch, + ) + + lama_register["OptimizedRefinedAdaptiveHybridSearch"] = OptimizedRefinedAdaptiveHybridSearch + LLAMAOptimizedRefinedAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveHybridSearch" + ).set_name("LLAMAOptimizedRefinedAdaptiveHybridSearch", register=True) +except Exception as e: + print("OptimizedRefinedAdaptiveHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveMultiStrategyDE import ( + OptimizedRefinedAdaptiveMultiStrategyDE, + ) + + lama_register["OptimizedRefinedAdaptiveMultiStrategyDE"] = OptimizedRefinedAdaptiveMultiStrategyDE + LLAMAOptimizedRefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE" + ).set_name("LLAMAOptimizedRefinedAdaptiveMultiStrategyDE", register=True) +except Exception as e: + print("OptimizedRefinedAdaptiveMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveRefinementPSO import ( + OptimizedRefinedAdaptiveRefinementPSO, + ) + + lama_register["OptimizedRefinedAdaptiveRefinementPSO"] = OptimizedRefinedAdaptiveRefinementPSO + LLAMAOptimizedRefinedAdaptiveRefinementPSO = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveRefinementPSO" + ).set_name("LLAMAOptimizedRefinedAdaptiveRefinementPSO", register=True) +except Exception as e: + print("OptimizedRefinedAdaptiveRefinementPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedEnhancedRAMEDSv5 import OptimizedRefinedEnhancedRAMEDSv5 + + lama_register["OptimizedRefinedEnhancedRAMEDSv5"] = OptimizedRefinedEnhancedRAMEDSv5 + LLAMAOptimizedRefinedEnhancedRAMEDSv5 = NonObjectOptimizer( + method="LLAMAOptimizedRefinedEnhancedRAMEDSv5" + ).set_name("LLAMAOptimizedRefinedEnhancedRAMEDSv5", register=True) +except Exception as e: + print("OptimizedRefinedEnhancedRAMEDSv5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedRefinedMemoryDualPhaseStrategyV65 import ( + OptimizedRefinedMemoryDualPhaseStrategyV65, + ) + + lama_register["OptimizedRefinedMemoryDualPhaseStrategyV65"] = OptimizedRefinedMemoryDualPhaseStrategyV65 + LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65 = NonObjectOptimizer( + method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65" + ).set_name("LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65", register=True) +except Exception as e: + print("OptimizedRefinedMemoryDualPhaseStrategyV65 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 import ( + OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45, + ) + + lama_register["OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45"] = ( + OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 + ) + LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 = NonObjectOptimizer( + method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45" + ).set_name("LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45", register=True) +except Exception as e: + print("OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.OscillatoryCrossoverDifferentialEvolution import ( + OscillatoryCrossoverDifferentialEvolution, + ) + + lama_register["OscillatoryCrossoverDifferentialEvolution"] = OscillatoryCrossoverDifferentialEvolution + LLAMAOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOscillatoryCrossoverDifferentialEvolution" + ).set_name("LLAMAOscillatoryCrossoverDifferentialEvolution", register=True) +except Exception as e: + print("OscillatoryCrossoverDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PADE import PADE + + lama_register["PADE"] = PADE + LLAMAPADE = NonObjectOptimizer(method="LLAMAPADE").set_name("LLAMAPADE", register=True) +except Exception as e: + print("PADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PAMDMDESM import PAMDMDESM + + lama_register["PAMDMDESM"] = PAMDMDESM + LLAMAPAMDMDESM = NonObjectOptimizer(method="LLAMAPAMDMDESM").set_name("LLAMAPAMDMDESM", register=True) +except Exception as e: + print("PAMDMDESM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PDEAF import PDEAF + + lama_register["PDEAF"] = PDEAF + LLAMAPDEAF = NonObjectOptimizer(method="LLAMAPDEAF").set_name("LLAMAPDEAF", register=True) +except Exception as e: + print("PDEAF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PGDE import PGDE + + lama_register["PGDE"] = PGDE + LLAMAPGDE = NonObjectOptimizer(method="LLAMAPGDE").set_name("LLAMAPGDE", register=True) +except Exception as e: + print("PGDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PMFSA import PMFSA + + lama_register["PMFSA"] = PMFSA + LLAMAPMFSA = NonObjectOptimizer(method="LLAMAPMFSA").set_name("LLAMAPMFSA", register=True) +except Exception as e: + print("PMFSA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PPDE import PPDE + + lama_register["PPDE"] = PPDE + LLAMAPPDE = NonObjectOptimizer(method="LLAMAPPDE").set_name("LLAMAPPDE", register=True) +except Exception as e: + print("PPDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PWDE import PWDE + + lama_register["PWDE"] = PWDE + LLAMAPWDE = NonObjectOptimizer(method="LLAMAPWDE").set_name("LLAMAPWDE", register=True) +except Exception as e: + print("PWDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimization import ( + PrecisionAdaptiveCohortOptimization, + ) + + lama_register["PrecisionAdaptiveCohortOptimization"] = PrecisionAdaptiveCohortOptimization + LLAMAPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveCohortOptimization" + ).set_name("LLAMAPrecisionAdaptiveCohortOptimization", register=True) +except Exception as e: + print("PrecisionAdaptiveCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimizationV2 import ( + PrecisionAdaptiveCohortOptimizationV2, + ) + + lama_register["PrecisionAdaptiveCohortOptimizationV2"] = PrecisionAdaptiveCohortOptimizationV2 + LLAMAPrecisionAdaptiveCohortOptimizationV2 = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveCohortOptimizationV2" + ).set_name("LLAMAPrecisionAdaptiveCohortOptimizationV2", register=True) +except Exception as e: + print("PrecisionAdaptiveCohortOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveDecayOptimizer import PrecisionAdaptiveDecayOptimizer + + lama_register["PrecisionAdaptiveDecayOptimizer"] = PrecisionAdaptiveDecayOptimizer + LLAMAPrecisionAdaptiveDecayOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDecayOptimizer" + ).set_name("LLAMAPrecisionAdaptiveDecayOptimizer", register=True) +except Exception as e: + print("PrecisionAdaptiveDecayOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveDifferentialEvolutionPlus import ( + PrecisionAdaptiveDifferentialEvolutionPlus, + ) + + lama_register["PrecisionAdaptiveDifferentialEvolutionPlus"] = PrecisionAdaptiveDifferentialEvolutionPlus + LLAMAPrecisionAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus" + ).set_name("LLAMAPrecisionAdaptiveDifferentialEvolutionPlus", register=True) +except Exception as e: + print("PrecisionAdaptiveDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveDynamicStrategyV33 import ( + PrecisionAdaptiveDynamicStrategyV33, + ) + + lama_register["PrecisionAdaptiveDynamicStrategyV33"] = PrecisionAdaptiveDynamicStrategyV33 + LLAMAPrecisionAdaptiveDynamicStrategyV33 = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDynamicStrategyV33" + ).set_name("LLAMAPrecisionAdaptiveDynamicStrategyV33", register=True) +except Exception as e: + print("PrecisionAdaptiveDynamicStrategyV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveGlobalClimbingEnhancer import ( + PrecisionAdaptiveGlobalClimbingEnhancer, + ) + + lama_register["PrecisionAdaptiveGlobalClimbingEnhancer"] = PrecisionAdaptiveGlobalClimbingEnhancer + LLAMAPrecisionAdaptiveGlobalClimbingEnhancer = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer" + ).set_name("LLAMAPrecisionAdaptiveGlobalClimbingEnhancer", register=True) +except Exception as e: + print("PrecisionAdaptiveGlobalClimbingEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptiveGradientClusteringPSO import ( + PrecisionAdaptiveGradientClusteringPSO, + ) + + lama_register["PrecisionAdaptiveGradientClusteringPSO"] = PrecisionAdaptiveGradientClusteringPSO + LLAMAPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveGradientClusteringPSO" + ).set_name("LLAMAPrecisionAdaptiveGradientClusteringPSO", register=True) +except Exception as e: + print("PrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionAdaptivePSO import PrecisionAdaptivePSO + + lama_register["PrecisionAdaptivePSO"] = PrecisionAdaptivePSO + LLAMAPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO").set_name( + "LLAMAPrecisionAdaptivePSO", register=True + ) +except Exception as e: + print("PrecisionAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionBalancedAdaptivePSO import PrecisionBalancedAdaptivePSO + + lama_register["PrecisionBalancedAdaptivePSO"] = PrecisionBalancedAdaptivePSO + LLAMAPrecisionBalancedAdaptivePSO = NonObjectOptimizer( + method="LLAMAPrecisionBalancedAdaptivePSO" + ).set_name("LLAMAPrecisionBalancedAdaptivePSO", register=True) +except Exception as e: + print("PrecisionBalancedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionBalancedEvolutionStrategy import ( + PrecisionBalancedEvolutionStrategy, + ) + + lama_register["PrecisionBalancedEvolutionStrategy"] = PrecisionBalancedEvolutionStrategy + LLAMAPrecisionBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionBalancedEvolutionStrategy" + ).set_name("LLAMAPrecisionBalancedEvolutionStrategy", register=True) +except Exception as e: + print("PrecisionBalancedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionBalancedOptimizer import PrecisionBalancedOptimizer + + lama_register["PrecisionBalancedOptimizer"] = PrecisionBalancedOptimizer + LLAMAPrecisionBalancedOptimizer = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer").set_name( + "LLAMAPrecisionBalancedOptimizer", register=True + ) +except Exception as e: + print("PrecisionBalancedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionBoostedDifferentialEvolution import ( + PrecisionBoostedDifferentialEvolution, + ) + + lama_register["PrecisionBoostedDifferentialEvolution"] = PrecisionBoostedDifferentialEvolution + LLAMAPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAPrecisionBoostedDifferentialEvolution" + ).set_name("LLAMAPrecisionBoostedDifferentialEvolution", register=True) +except Exception as e: + print("PrecisionBoostedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionCosineAdaptiveDifferentialSwarm import ( + PrecisionCosineAdaptiveDifferentialSwarm, + ) + + lama_register["PrecisionCosineAdaptiveDifferentialSwarm"] = PrecisionCosineAdaptiveDifferentialSwarm + LLAMAPrecisionCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMAPrecisionCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: + print("PrecisionCosineAdaptiveDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionDifferentialEvolution import PrecisionDifferentialEvolution + + lama_register["PrecisionDifferentialEvolution"] = PrecisionDifferentialEvolution + LLAMAPrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAPrecisionDifferentialEvolution" + ).set_name("LLAMAPrecisionDifferentialEvolution", register=True) +except Exception as e: + print("PrecisionDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionDynamicAdaptiveOptimizerV6 import ( + PrecisionDynamicAdaptiveOptimizerV6, + ) + + lama_register["PrecisionDynamicAdaptiveOptimizerV6"] = PrecisionDynamicAdaptiveOptimizerV6 + LLAMAPrecisionDynamicAdaptiveOptimizerV6 = NonObjectOptimizer( + method="LLAMAPrecisionDynamicAdaptiveOptimizerV6" + ).set_name("LLAMAPrecisionDynamicAdaptiveOptimizerV6", register=True) +except Exception as e: + print("PrecisionDynamicAdaptiveOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedDualStrategyOptimizer import ( + PrecisionEnhancedDualStrategyOptimizer, + ) + + lama_register["PrecisionEnhancedDualStrategyOptimizer"] = PrecisionEnhancedDualStrategyOptimizer + LLAMAPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedDualStrategyOptimizer" + ).set_name("LLAMAPrecisionEnhancedDualStrategyOptimizer", register=True) +except Exception as e: + print("PrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedDynamicOptimizerV13 import ( + PrecisionEnhancedDynamicOptimizerV13, + ) + + lama_register["PrecisionEnhancedDynamicOptimizerV13"] = PrecisionEnhancedDynamicOptimizerV13 + LLAMAPrecisionEnhancedDynamicOptimizerV13 = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedDynamicOptimizerV13" + ).set_name("LLAMAPrecisionEnhancedDynamicOptimizerV13", register=True) +except Exception as e: + print("PrecisionEnhancedDynamicOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedSearch import PrecisionEnhancedSearch + + lama_register["PrecisionEnhancedSearch"] = PrecisionEnhancedSearch + LLAMAPrecisionEnhancedSearch = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch").set_name( + "LLAMAPrecisionEnhancedSearch", register=True + ) +except Exception as e: + print("PrecisionEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedSpatialAdaptiveEvolver import ( + PrecisionEnhancedSpatialAdaptiveEvolver, + ) + + lama_register["PrecisionEnhancedSpatialAdaptiveEvolver"] = PrecisionEnhancedSpatialAdaptiveEvolver + LLAMAPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMAPrecisionEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: + print("PrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedSpiralDifferentialClimberV4 import ( + PrecisionEnhancedSpiralDifferentialClimberV4, + ) + + lama_register["PrecisionEnhancedSpiralDifferentialClimberV4"] = ( + PrecisionEnhancedSpiralDifferentialClimberV4 + ) + LLAMAPrecisionEnhancedSpiralDifferentialClimberV4 = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4" + ).set_name("LLAMAPrecisionEnhancedSpiralDifferentialClimberV4", register=True) +except Exception as e: + print("PrecisionEnhancedSpiralDifferentialClimberV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEnhancedStrategicOptimizer import ( + PrecisionEnhancedStrategicOptimizer, + ) + + lama_register["PrecisionEnhancedStrategicOptimizer"] = PrecisionEnhancedStrategicOptimizer + LLAMAPrecisionEnhancedStrategicOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedStrategicOptimizer" + ).set_name("LLAMAPrecisionEnhancedStrategicOptimizer", register=True) +except Exception as e: + print("PrecisionEnhancedStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionEvolutionaryThermalOptimizer import ( + PrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["PrecisionEvolutionaryThermalOptimizer"] = PrecisionEvolutionaryThermalOptimizer + LLAMAPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: + print("PrecisionEvolutionaryThermalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionFocusedAdaptivePSO import PrecisionFocusedAdaptivePSO + + lama_register["PrecisionFocusedAdaptivePSO"] = PrecisionFocusedAdaptivePSO + LLAMAPrecisionFocusedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO").set_name( + "LLAMAPrecisionFocusedAdaptivePSO", register=True + ) +except Exception as e: + print("PrecisionFocusedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionGuidedEvolutionStrategy import PrecisionGuidedEvolutionStrategy + + lama_register["PrecisionGuidedEvolutionStrategy"] = PrecisionGuidedEvolutionStrategy + LLAMAPrecisionGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionGuidedEvolutionStrategy" + ).set_name("LLAMAPrecisionGuidedEvolutionStrategy", register=True) +except Exception as e: + print("PrecisionGuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionGuidedEvolutionaryAlgorithm import ( + PrecisionGuidedEvolutionaryAlgorithm, + ) + + lama_register["PrecisionGuidedEvolutionaryAlgorithm"] = PrecisionGuidedEvolutionaryAlgorithm + LLAMAPrecisionGuidedEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAPrecisionGuidedEvolutionaryAlgorithm" + ).set_name("LLAMAPrecisionGuidedEvolutionaryAlgorithm", register=True) +except Exception as e: + print("PrecisionGuidedEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionGuidedQuantumStrategy import PrecisionGuidedQuantumStrategy + + lama_register["PrecisionGuidedQuantumStrategy"] = PrecisionGuidedQuantumStrategy + LLAMAPrecisionGuidedQuantumStrategy = NonObjectOptimizer( + method="LLAMAPrecisionGuidedQuantumStrategy" + ).set_name("LLAMAPrecisionGuidedQuantumStrategy", register=True) +except Exception as e: + print("PrecisionGuidedQuantumStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionIncrementalEvolutionStrategy import ( + PrecisionIncrementalEvolutionStrategy, + ) + + lama_register["PrecisionIncrementalEvolutionStrategy"] = PrecisionIncrementalEvolutionStrategy + LLAMAPrecisionIncrementalEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionIncrementalEvolutionStrategy" + ).set_name("LLAMAPrecisionIncrementalEvolutionStrategy", register=True) +except Exception as e: + print("PrecisionIncrementalEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionOptimizedEvolutionaryOptimizerV22 import ( + PrecisionOptimizedEvolutionaryOptimizerV22, + ) + + lama_register["PrecisionOptimizedEvolutionaryOptimizerV22"] = PrecisionOptimizedEvolutionaryOptimizerV22 + LLAMAPrecisionOptimizedEvolutionaryOptimizerV22 = NonObjectOptimizer( + method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22" + ).set_name("LLAMAPrecisionOptimizedEvolutionaryOptimizerV22", register=True) +except Exception as e: + print("PrecisionOptimizedEvolutionaryOptimizerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionRotationalClimbOptimizer import ( + PrecisionRotationalClimbOptimizer, + ) + + lama_register["PrecisionRotationalClimbOptimizer"] = PrecisionRotationalClimbOptimizer + LLAMAPrecisionRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionRotationalClimbOptimizer" + ).set_name("LLAMAPrecisionRotationalClimbOptimizer", register=True) +except Exception as e: + print("PrecisionRotationalClimbOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionScaledEvolutionarySearch import ( + PrecisionScaledEvolutionarySearch, + ) + + lama_register["PrecisionScaledEvolutionarySearch"] = PrecisionScaledEvolutionarySearch + LLAMAPrecisionScaledEvolutionarySearch = NonObjectOptimizer( + method="LLAMAPrecisionScaledEvolutionarySearch" + ).set_name("LLAMAPrecisionScaledEvolutionarySearch", register=True) +except Exception as e: + print("PrecisionScaledEvolutionarySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionSpiralDifferentialOptimizerV6 import ( + PrecisionSpiralDifferentialOptimizerV6, + ) + + lama_register["PrecisionSpiralDifferentialOptimizerV6"] = PrecisionSpiralDifferentialOptimizerV6 + LLAMAPrecisionSpiralDifferentialOptimizerV6 = NonObjectOptimizer( + method="LLAMAPrecisionSpiralDifferentialOptimizerV6" + ).set_name("LLAMAPrecisionSpiralDifferentialOptimizerV6", register=True) +except Exception as e: + print("PrecisionSpiralDifferentialOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionTunedCrossoverElitistStrategyV11 import ( + PrecisionTunedCrossoverElitistStrategyV11, + ) + + lama_register["PrecisionTunedCrossoverElitistStrategyV11"] = PrecisionTunedCrossoverElitistStrategyV11 + LLAMAPrecisionTunedCrossoverElitistStrategyV11 = NonObjectOptimizer( + method="LLAMAPrecisionTunedCrossoverElitistStrategyV11" + ).set_name("LLAMAPrecisionTunedCrossoverElitistStrategyV11", register=True) +except Exception as e: + print("PrecisionTunedCrossoverElitistStrategyV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionTunedEvolver import PrecisionTunedEvolver + + lama_register["PrecisionTunedEvolver"] = PrecisionTunedEvolver + LLAMAPrecisionTunedEvolver = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver").set_name( + "LLAMAPrecisionTunedEvolver", register=True + ) +except Exception as e: + print("PrecisionTunedEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionTunedHybridSearch import PrecisionTunedHybridSearch + + lama_register["PrecisionTunedHybridSearch"] = PrecisionTunedHybridSearch + LLAMAPrecisionTunedHybridSearch = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch").set_name( + "LLAMAPrecisionTunedHybridSearch", register=True + ) +except Exception as e: + print("PrecisionTunedHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionTunedPSO import PrecisionTunedPSO + + lama_register["PrecisionTunedPSO"] = PrecisionTunedPSO + LLAMAPrecisionTunedPSO = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO").set_name( + "LLAMAPrecisionTunedPSO", register=True + ) +except Exception as e: + print("PrecisionTunedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.PrecisionTunedQuantumHarmonicFeedbackOptimizer import ( + PrecisionTunedQuantumHarmonicFeedbackOptimizer, + ) + + lama_register["PrecisionTunedQuantumHarmonicFeedbackOptimizer"] = ( + PrecisionTunedQuantumHarmonicFeedbackOptimizer + ) + LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: + print("PrecisionTunedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveAdaptiveDifferentialEvolution import ( + ProgressiveAdaptiveDifferentialEvolution, + ) + + lama_register["ProgressiveAdaptiveDifferentialEvolution"] = ProgressiveAdaptiveDifferentialEvolution + LLAMAProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAProgressiveAdaptiveDifferentialEvolution" + ).set_name("LLAMAProgressiveAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveAdaptiveGlobalLocalSearch import ( + ProgressiveAdaptiveGlobalLocalSearch, + ) + + lama_register["ProgressiveAdaptiveGlobalLocalSearch"] = ProgressiveAdaptiveGlobalLocalSearch + LLAMAProgressiveAdaptiveGlobalLocalSearch = NonObjectOptimizer( + method="LLAMAProgressiveAdaptiveGlobalLocalSearch" + ).set_name("LLAMAProgressiveAdaptiveGlobalLocalSearch", register=True) +except Exception as e: + print("ProgressiveAdaptiveGlobalLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveCohortDiversityOptimization import ( + ProgressiveCohortDiversityOptimization, + ) + + lama_register["ProgressiveCohortDiversityOptimization"] = ProgressiveCohortDiversityOptimization + LLAMAProgressiveCohortDiversityOptimization = NonObjectOptimizer( + method="LLAMAProgressiveCohortDiversityOptimization" + ).set_name("LLAMAProgressiveCohortDiversityOptimization", register=True) +except Exception as e: + print("ProgressiveCohortDiversityOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveDimensionalOptimizer import ProgressiveDimensionalOptimizer + + lama_register["ProgressiveDimensionalOptimizer"] = ProgressiveDimensionalOptimizer + LLAMAProgressiveDimensionalOptimizer = NonObjectOptimizer( + method="LLAMAProgressiveDimensionalOptimizer" + ).set_name("LLAMAProgressiveDimensionalOptimizer", register=True) +except Exception as e: + print("ProgressiveDimensionalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveEvolutionaryFireworkAlgorithm import ( + ProgressiveEvolutionaryFireworkAlgorithm, + ) + + lama_register["ProgressiveEvolutionaryFireworkAlgorithm"] = ProgressiveEvolutionaryFireworkAlgorithm + LLAMAProgressiveEvolutionaryFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAProgressiveEvolutionaryFireworkAlgorithm" + ).set_name("LLAMAProgressiveEvolutionaryFireworkAlgorithm", register=True) +except Exception as e: + print("ProgressiveEvolutionaryFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveHybridAdaptiveDifferentialEvolution import ( + ProgressiveHybridAdaptiveDifferentialEvolution, + ) + + lama_register["ProgressiveHybridAdaptiveDifferentialEvolution"] = ( + ProgressiveHybridAdaptiveDifferentialEvolution + ) + LLAMAProgressiveHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAProgressiveHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("ProgressiveHybridAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveParticleSwarmOptimization import ( + ProgressiveParticleSwarmOptimization, + ) + + lama_register["ProgressiveParticleSwarmOptimization"] = ProgressiveParticleSwarmOptimization + LLAMAProgressiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAProgressiveParticleSwarmOptimization" + ).set_name("LLAMAProgressiveParticleSwarmOptimization", register=True) +except Exception as e: + print("ProgressiveParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressivePopulationRefinementStrategy import ( + ProgressivePopulationRefinementStrategy, + ) + + lama_register["ProgressivePopulationRefinementStrategy"] = ProgressivePopulationRefinementStrategy + LLAMAProgressivePopulationRefinementStrategy = NonObjectOptimizer( + method="LLAMAProgressivePopulationRefinementStrategy" + ).set_name("LLAMAProgressivePopulationRefinementStrategy", register=True) +except Exception as e: + print("ProgressivePopulationRefinementStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveQuorumEvolutionStrategy import ( + ProgressiveQuorumEvolutionStrategy, + ) + + lama_register["ProgressiveQuorumEvolutionStrategy"] = ProgressiveQuorumEvolutionStrategy + LLAMAProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAProgressiveQuorumEvolutionStrategy" + ).set_name("LLAMAProgressiveQuorumEvolutionStrategy", register=True) +except Exception as e: + print("ProgressiveQuorumEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ProgressiveRefinementSearch import ProgressiveRefinementSearch + + lama_register["ProgressiveRefinementSearch"] = ProgressiveRefinementSearch + LLAMAProgressiveRefinementSearch = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch").set_name( + "LLAMAProgressiveRefinementSearch", register=True + ) +except Exception as e: + print("ProgressiveRefinementSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSO import QAPSO + + lama_register["QAPSO"] = QAPSO + LLAMAQAPSO = NonObjectOptimizer(method="LLAMAQAPSO").set_name("LLAMAQAPSO", register=True) +except Exception as e: + print("QAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSOAIR import QAPSOAIR + + lama_register["QAPSOAIR"] = QAPSOAIR + LLAMAQAPSOAIR = NonObjectOptimizer(method="LLAMAQAPSOAIR").set_name("LLAMAQAPSOAIR", register=True) +except Exception as e: + print("QAPSOAIR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSOAIRVC import QAPSOAIRVC + + lama_register["QAPSOAIRVC"] = QAPSOAIRVC + LLAMAQAPSOAIRVC = NonObjectOptimizer(method="LLAMAQAPSOAIRVC").set_name("LLAMAQAPSOAIRVC", register=True) +except Exception as e: + print("QAPSOAIRVC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSOAIRVCHR import QAPSOAIRVCHR + + lama_register["QAPSOAIRVCHR"] = QAPSOAIRVCHR + LLAMAQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR").set_name( + "LLAMAQAPSOAIRVCHR", register=True + ) +except Exception as e: + print("QAPSOAIRVCHR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSOAIW import QAPSOAIW + + lama_register["QAPSOAIW"] = QAPSOAIW + LLAMAQAPSOAIW = NonObjectOptimizer(method="LLAMAQAPSOAIW").set_name("LLAMAQAPSOAIW", register=True) +except Exception as e: + print("QAPSOAIW can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QAPSOAIWRR import QAPSOAIWRR + + lama_register["QAPSOAIWRR"] = QAPSOAIWRR + LLAMAQAPSOAIWRR = NonObjectOptimizer(method="LLAMAQAPSOAIWRR").set_name("LLAMAQAPSOAIWRR", register=True) +except Exception as e: + print("QAPSOAIWRR can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QPSO import QPSO + + lama_register["QPSO"] = QPSO + LLAMAQPSO = NonObjectOptimizer(method="LLAMAQPSO").set_name("LLAMAQPSO", register=True) +except Exception as e: + print("QPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAcceleratedEvolutionStrategy import ( + QuantumAcceleratedEvolutionStrategy, + ) + + lama_register["QuantumAcceleratedEvolutionStrategy"] = QuantumAcceleratedEvolutionStrategy + LLAMAQuantumAcceleratedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedEvolutionStrategy" + ).set_name("LLAMAQuantumAcceleratedEvolutionStrategy", register=True) +except Exception as e: + print("QuantumAcceleratedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAcceleratedNesterovOptimizer import ( + QuantumAcceleratedNesterovOptimizer, + ) + + lama_register["QuantumAcceleratedNesterovOptimizer"] = QuantumAcceleratedNesterovOptimizer + LLAMAQuantumAcceleratedNesterovOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedNesterovOptimizer" + ).set_name("LLAMAQuantumAcceleratedNesterovOptimizer", register=True) +except Exception as e: + print("QuantumAcceleratedNesterovOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAcceleratedNesterovPlusOptimizer import ( + QuantumAcceleratedNesterovPlusOptimizer, + ) + + lama_register["QuantumAcceleratedNesterovPlusOptimizer"] = QuantumAcceleratedNesterovPlusOptimizer + LLAMAQuantumAcceleratedNesterovPlusOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedNesterovPlusOptimizer" + ).set_name("LLAMAQuantumAcceleratedNesterovPlusOptimizer", register=True) +except Exception as e: + print("QuantumAcceleratedNesterovPlusOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV5 import ( + QuantumAdaptiveCognitionOptimizerV5, + ) + + lama_register["QuantumAdaptiveCognitionOptimizerV5"] = QuantumAdaptiveCognitionOptimizerV5 + LLAMAQuantumAdaptiveCognitionOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCognitionOptimizerV5" + ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV5", register=True) +except Exception as e: + print("QuantumAdaptiveCognitionOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV6 import ( + QuantumAdaptiveCognitionOptimizerV6, + ) + + lama_register["QuantumAdaptiveCognitionOptimizerV6"] = QuantumAdaptiveCognitionOptimizerV6 + LLAMAQuantumAdaptiveCognitionOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCognitionOptimizerV6" + ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV6", register=True) +except Exception as e: + print("QuantumAdaptiveCognitionOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveConvergenceOptimizer import ( + QuantumAdaptiveConvergenceOptimizer, + ) + + lama_register["QuantumAdaptiveConvergenceOptimizer"] = QuantumAdaptiveConvergenceOptimizer + LLAMAQuantumAdaptiveConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveConvergenceOptimizer" + ).set_name("LLAMAQuantumAdaptiveConvergenceOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveCrossoverRefinement import ( + QuantumAdaptiveCrossoverRefinement, + ) + + lama_register["QuantumAdaptiveCrossoverRefinement"] = QuantumAdaptiveCrossoverRefinement + LLAMAQuantumAdaptiveCrossoverRefinement = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCrossoverRefinement" + ).set_name("LLAMAQuantumAdaptiveCrossoverRefinement", register=True) +except Exception as e: + print("QuantumAdaptiveCrossoverRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( + QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, + ) + + lama_register["QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( + QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + ) + LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" + ).set_name("LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) +except Exception as e: + print("QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolution import ( + QuantumAdaptiveDifferentialEvolution, + ) + + lama_register["QuantumAdaptiveDifferentialEvolution"] = QuantumAdaptiveDifferentialEvolution + LLAMAQuantumAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolution" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV3 import ( + QuantumAdaptiveDifferentialEvolutionV3, + ) + + lama_register["QuantumAdaptiveDifferentialEvolutionV3"] = QuantumAdaptiveDifferentialEvolutionV3 + LLAMAQuantumAdaptiveDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolutionV3" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV3", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV4 import ( + QuantumAdaptiveDifferentialEvolutionV4, + ) + + lama_register["QuantumAdaptiveDifferentialEvolutionV4"] = QuantumAdaptiveDifferentialEvolutionV4 + LLAMAQuantumAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolutionV4" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV4", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV10 import ( + QuantumAdaptiveDifferentialStrategyV10, + ) + + lama_register["QuantumAdaptiveDifferentialStrategyV10"] = QuantumAdaptiveDifferentialStrategyV10 + LLAMAQuantumAdaptiveDifferentialStrategyV10 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV10" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV10", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialStrategyV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV11 import ( + QuantumAdaptiveDifferentialStrategyV11, + ) + + lama_register["QuantumAdaptiveDifferentialStrategyV11"] = QuantumAdaptiveDifferentialStrategyV11 + LLAMAQuantumAdaptiveDifferentialStrategyV11 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV11" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV11", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialStrategyV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV12 import ( + QuantumAdaptiveDifferentialStrategyV12, + ) + + lama_register["QuantumAdaptiveDifferentialStrategyV12"] = QuantumAdaptiveDifferentialStrategyV12 + LLAMAQuantumAdaptiveDifferentialStrategyV12 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV12" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV12", register=True) +except Exception as e: + print("QuantumAdaptiveDifferentialStrategyV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV11 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV11, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV11"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV11 + ) + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedDynamicHybridSearchV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV12 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV12, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV12"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV12 + ) + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedDynamicHybridSearchV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV13 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV13, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV13"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV13 + ) + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedDynamicHybridSearchV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV14 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV14, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV14"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV14 + ) + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedDynamicHybridSearchV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV15 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV15, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV15"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV15 + ) + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedDynamicHybridSearchV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedHybridSearchV10 import ( + QuantumAdaptiveDiversifiedHybridSearchV10, + ) + + lama_register["QuantumAdaptiveDiversifiedHybridSearchV10"] = QuantumAdaptiveDiversifiedHybridSearchV10 + LLAMAQuantumAdaptiveDiversifiedHybridSearchV10 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10" + ).set_name("LLAMAQuantumAdaptiveDiversifiedHybridSearchV10", register=True) +except Exception as e: + print("QuantumAdaptiveDiversifiedHybridSearchV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExploration import ( + QuantumAdaptiveDynamicExploration, + ) + + lama_register["QuantumAdaptiveDynamicExploration"] = QuantumAdaptiveDynamicExploration + LLAMAQuantumAdaptiveDynamicExploration = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExploration" + ).set_name("LLAMAQuantumAdaptiveDynamicExploration", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV2 import ( + QuantumAdaptiveDynamicExplorationV2, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV2"] = QuantumAdaptiveDynamicExplorationV2 + LLAMAQuantumAdaptiveDynamicExplorationV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV2" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV2", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV3 import ( + QuantumAdaptiveDynamicExplorationV3, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV3"] = QuantumAdaptiveDynamicExplorationV3 + LLAMAQuantumAdaptiveDynamicExplorationV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV3" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV3", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV4 import ( + QuantumAdaptiveDynamicExplorationV4, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV4"] = QuantumAdaptiveDynamicExplorationV4 + LLAMAQuantumAdaptiveDynamicExplorationV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV4" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV4", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV5 import ( + QuantumAdaptiveDynamicExplorationV5, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV5"] = QuantumAdaptiveDynamicExplorationV5 + LLAMAQuantumAdaptiveDynamicExplorationV5 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV5" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV5", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV6 import ( + QuantumAdaptiveDynamicExplorationV6, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV6"] = QuantumAdaptiveDynamicExplorationV6 + LLAMAQuantumAdaptiveDynamicExplorationV6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV6" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV6", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV7 import ( + QuantumAdaptiveDynamicExplorationV7, + ) + + lama_register["QuantumAdaptiveDynamicExplorationV7"] = QuantumAdaptiveDynamicExplorationV7 + LLAMAQuantumAdaptiveDynamicExplorationV7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV7" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV7", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicExplorationV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveDynamicStrategyV7 import QuantumAdaptiveDynamicStrategyV7 + + lama_register["QuantumAdaptiveDynamicStrategyV7"] = QuantumAdaptiveDynamicStrategyV7 + LLAMAQuantumAdaptiveDynamicStrategyV7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicStrategyV7" + ).set_name("LLAMAQuantumAdaptiveDynamicStrategyV7", register=True) +except Exception as e: + print("QuantumAdaptiveDynamicStrategyV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveEliteGuidedSearch import QuantumAdaptiveEliteGuidedSearch + + lama_register["QuantumAdaptiveEliteGuidedSearch"] = QuantumAdaptiveEliteGuidedSearch + LLAMAQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveEliteGuidedSearch" + ).set_name("LLAMAQuantumAdaptiveEliteGuidedSearch", register=True) +except Exception as e: + print("QuantumAdaptiveEliteGuidedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveFireworksOptimizer import ( + QuantumAdaptiveFireworksOptimizer, + ) + + lama_register["QuantumAdaptiveFireworksOptimizer"] = QuantumAdaptiveFireworksOptimizer + LLAMAQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveFireworksOptimizer" + ).set_name("LLAMAQuantumAdaptiveFireworksOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveFireworksOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveGradientDiversityExplorer import ( + QuantumAdaptiveGradientDiversityExplorer, + ) + + lama_register["QuantumAdaptiveGradientDiversityExplorer"] = QuantumAdaptiveGradientDiversityExplorer + LLAMAQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveGradientDiversityExplorer" + ).set_name("LLAMAQuantumAdaptiveGradientDiversityExplorer", register=True) +except Exception as e: + print("QuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveGradientSearch import QuantumAdaptiveGradientSearch + + lama_register["QuantumAdaptiveGradientSearch"] = QuantumAdaptiveGradientSearch + LLAMAQuantumAdaptiveGradientSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveGradientSearch" + ).set_name("LLAMAQuantumAdaptiveGradientSearch", register=True) +except Exception as e: + print("QuantumAdaptiveGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveHarmonicOptimizerV8 import ( + QuantumAdaptiveHarmonicOptimizerV8, + ) + + lama_register["QuantumAdaptiveHarmonicOptimizerV8"] = QuantumAdaptiveHarmonicOptimizerV8 + LLAMAQuantumAdaptiveHarmonicOptimizerV8 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHarmonicOptimizerV8" + ).set_name("LLAMAQuantumAdaptiveHarmonicOptimizerV8", register=True) +except Exception as e: + print("QuantumAdaptiveHarmonicOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveHybridDEPSO_V7 import QuantumAdaptiveHybridDEPSO_V7 + + lama_register["QuantumAdaptiveHybridDEPSO_V7"] = QuantumAdaptiveHybridDEPSO_V7 + LLAMAQuantumAdaptiveHybridDEPSO_V7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridDEPSO_V7" + ).set_name("LLAMAQuantumAdaptiveHybridDEPSO_V7", register=True) +except Exception as e: + print("QuantumAdaptiveHybridDEPSO_V7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizer import QuantumAdaptiveHybridOptimizer + + lama_register["QuantumAdaptiveHybridOptimizer"] = QuantumAdaptiveHybridOptimizer + LLAMAQuantumAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridOptimizer" + ).set_name("LLAMAQuantumAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizerV3 import QuantumAdaptiveHybridOptimizerV3 + + lama_register["QuantumAdaptiveHybridOptimizerV3"] = QuantumAdaptiveHybridOptimizerV3 + LLAMAQuantumAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridOptimizerV3" + ).set_name("LLAMAQuantumAdaptiveHybridOptimizerV3", register=True) +except Exception as e: + print("QuantumAdaptiveHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveHybridStrategyV4 import QuantumAdaptiveHybridStrategyV4 + + lama_register["QuantumAdaptiveHybridStrategyV4"] = QuantumAdaptiveHybridStrategyV4 + LLAMAQuantumAdaptiveHybridStrategyV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridStrategyV4" + ).set_name("LLAMAQuantumAdaptiveHybridStrategyV4", register=True) +except Exception as e: + print("QuantumAdaptiveHybridStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveLevyDifferentialSearch import ( + QuantumAdaptiveLevyDifferentialSearch, + ) + + lama_register["QuantumAdaptiveLevyDifferentialSearch"] = QuantumAdaptiveLevyDifferentialSearch + LLAMAQuantumAdaptiveLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyDifferentialSearch" + ).set_name("LLAMAQuantumAdaptiveLevyDifferentialSearch", register=True) +except Exception as e: + print("QuantumAdaptiveLevyDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveLevyDynamicDifferentialSwarmV4 import ( + QuantumAdaptiveLevyDynamicDifferentialSwarmV4, + ) + + lama_register["QuantumAdaptiveLevyDynamicDifferentialSwarmV4"] = ( + QuantumAdaptiveLevyDynamicDifferentialSwarmV4 + ) + LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4" + ).set_name("LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4", register=True) +except Exception as e: + print("QuantumAdaptiveLevyDynamicDifferentialSwarmV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveLevyMemeticSearch import QuantumAdaptiveLevyMemeticSearch + + lama_register["QuantumAdaptiveLevyMemeticSearch"] = QuantumAdaptiveLevyMemeticSearch + LLAMAQuantumAdaptiveLevyMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyMemeticSearch" + ).set_name("LLAMAQuantumAdaptiveLevyMemeticSearch", register=True) +except Exception as e: + print("QuantumAdaptiveLevyMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveLevyOptimizer import QuantumAdaptiveLevyOptimizer + + lama_register["QuantumAdaptiveLevyOptimizer"] = QuantumAdaptiveLevyOptimizer + LLAMAQuantumAdaptiveLevyOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyOptimizer" + ).set_name("LLAMAQuantumAdaptiveLevyOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveLevyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveLevySwarmOptimizationV2 import ( + QuantumAdaptiveLevySwarmOptimizationV2, + ) + + lama_register["QuantumAdaptiveLevySwarmOptimizationV2"] = QuantumAdaptiveLevySwarmOptimizationV2 + LLAMAQuantumAdaptiveLevySwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2" + ).set_name("LLAMAQuantumAdaptiveLevySwarmOptimizationV2", register=True) +except Exception as e: + print("QuantumAdaptiveLevySwarmOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithm import QuantumAdaptiveMemeticAlgorithm + + lama_register["QuantumAdaptiveMemeticAlgorithm"] = QuantumAdaptiveMemeticAlgorithm + LLAMAQuantumAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticAlgorithm" + ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithm", register=True) +except Exception as e: + print("QuantumAdaptiveMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithmV2 import ( + QuantumAdaptiveMemeticAlgorithmV2, + ) + + lama_register["QuantumAdaptiveMemeticAlgorithmV2"] = QuantumAdaptiveMemeticAlgorithmV2 + LLAMAQuantumAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticAlgorithmV2" + ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithmV2", register=True) +except Exception as e: + print("QuantumAdaptiveMemeticAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMemeticSearchV2 import QuantumAdaptiveMemeticSearchV2 + + lama_register["QuantumAdaptiveMemeticSearchV2"] = QuantumAdaptiveMemeticSearchV2 + LLAMAQuantumAdaptiveMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticSearchV2" + ).set_name("LLAMAQuantumAdaptiveMemeticSearchV2", register=True) +except Exception as e: + print("QuantumAdaptiveMemeticSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMultiPhaseDE_v6 import QuantumAdaptiveMultiPhaseDE_v6 + + lama_register["QuantumAdaptiveMultiPhaseDE_v6"] = QuantumAdaptiveMultiPhaseDE_v6 + LLAMAQuantumAdaptiveMultiPhaseDE_v6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiPhaseDE_v6" + ).set_name("LLAMAQuantumAdaptiveMultiPhaseDE_v6", register=True) +except Exception as e: + print("QuantumAdaptiveMultiPhaseDE_v6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMultiPopulationDE import QuantumAdaptiveMultiPopulationDE + + lama_register["QuantumAdaptiveMultiPopulationDE"] = QuantumAdaptiveMultiPopulationDE + LLAMAQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiPopulationDE" + ).set_name("LLAMAQuantumAdaptiveMultiPopulationDE", register=True) +except Exception as e: + print("QuantumAdaptiveMultiPopulationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveMultiStrategyEvolution import ( + QuantumAdaptiveMultiStrategyEvolution, + ) + + lama_register["QuantumAdaptiveMultiStrategyEvolution"] = QuantumAdaptiveMultiStrategyEvolution + LLAMAQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiStrategyEvolution" + ).set_name("LLAMAQuantumAdaptiveMultiStrategyEvolution", register=True) +except Exception as e: + print("QuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveNesterovGradientEnhancer import ( + QuantumAdaptiveNesterovGradientEnhancer, + ) + + lama_register["QuantumAdaptiveNesterovGradientEnhancer"] = QuantumAdaptiveNesterovGradientEnhancer + LLAMAQuantumAdaptiveNesterovGradientEnhancer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveNesterovGradientEnhancer" + ).set_name("LLAMAQuantumAdaptiveNesterovGradientEnhancer", register=True) +except Exception as e: + print("QuantumAdaptiveNesterovGradientEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveNesterovSynergy import QuantumAdaptiveNesterovSynergy + + lama_register["QuantumAdaptiveNesterovSynergy"] = QuantumAdaptiveNesterovSynergy + LLAMAQuantumAdaptiveNesterovSynergy = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveNesterovSynergy" + ).set_name("LLAMAQuantumAdaptiveNesterovSynergy", register=True) +except Exception as e: + print("QuantumAdaptiveNesterovSynergy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveRefinementOptimizer import ( + QuantumAdaptiveRefinementOptimizer, + ) + + lama_register["QuantumAdaptiveRefinementOptimizer"] = QuantumAdaptiveRefinementOptimizer + LLAMAQuantumAdaptiveRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementOptimizer" + ).set_name("LLAMAQuantumAdaptiveRefinementOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveRefinementOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategy import ( + QuantumAdaptiveRefinementStrategy, + ) + + lama_register["QuantumAdaptiveRefinementStrategy"] = QuantumAdaptiveRefinementStrategy + LLAMAQuantumAdaptiveRefinementStrategy = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementStrategy" + ).set_name("LLAMAQuantumAdaptiveRefinementStrategy", register=True) +except Exception as e: + print("QuantumAdaptiveRefinementStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategyV2 import ( + QuantumAdaptiveRefinementStrategyV2, + ) + + lama_register["QuantumAdaptiveRefinementStrategyV2"] = QuantumAdaptiveRefinementStrategyV2 + LLAMAQuantumAdaptiveRefinementStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementStrategyV2" + ).set_name("LLAMAQuantumAdaptiveRefinementStrategyV2", register=True) +except Exception as e: + print("QuantumAdaptiveRefinementStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveStrategicEnhancer import QuantumAdaptiveStrategicEnhancer + + lama_register["QuantumAdaptiveStrategicEnhancer"] = QuantumAdaptiveStrategicEnhancer + LLAMAQuantumAdaptiveStrategicEnhancer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveStrategicEnhancer" + ).set_name("LLAMAQuantumAdaptiveStrategicEnhancer", register=True) +except Exception as e: + print("QuantumAdaptiveStrategicEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAdaptiveVelocityOptimizer import QuantumAdaptiveVelocityOptimizer + + lama_register["QuantumAdaptiveVelocityOptimizer"] = QuantumAdaptiveVelocityOptimizer + LLAMAQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveVelocityOptimizer" + ).set_name("LLAMAQuantumAdaptiveVelocityOptimizer", register=True) +except Exception as e: + print("QuantumAdaptiveVelocityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAnnealingDifferentialEvolution import ( + QuantumAnnealingDifferentialEvolution, + ) + + lama_register["QuantumAnnealingDifferentialEvolution"] = QuantumAnnealingDifferentialEvolution + LLAMAQuantumAnnealingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumAnnealingDifferentialEvolution" + ).set_name("LLAMAQuantumAnnealingDifferentialEvolution", register=True) +except Exception as e: + print("QuantumAnnealingDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumAssistedHybridOptimizerV1 import QuantumAssistedHybridOptimizerV1 + + lama_register["QuantumAssistedHybridOptimizerV1"] = QuantumAssistedHybridOptimizerV1 + LLAMAQuantumAssistedHybridOptimizerV1 = NonObjectOptimizer( + method="LLAMAQuantumAssistedHybridOptimizerV1" + ).set_name("LLAMAQuantumAssistedHybridOptimizerV1", register=True) +except Exception as e: + print("QuantumAssistedHybridOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumBalancedAdaptiveNesterovStrategy import ( + QuantumBalancedAdaptiveNesterovStrategy, + ) + + lama_register["QuantumBalancedAdaptiveNesterovStrategy"] = QuantumBalancedAdaptiveNesterovStrategy + LLAMAQuantumBalancedAdaptiveNesterovStrategy = NonObjectOptimizer( + method="LLAMAQuantumBalancedAdaptiveNesterovStrategy" + ).set_name("LLAMAQuantumBalancedAdaptiveNesterovStrategy", register=True) +except Exception as e: + print("QuantumBalancedAdaptiveNesterovStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumBalancedEvolutionStrategy import QuantumBalancedEvolutionStrategy + + lama_register["QuantumBalancedEvolutionStrategy"] = QuantumBalancedEvolutionStrategy + LLAMAQuantumBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumBalancedEvolutionStrategy" + ).set_name("LLAMAQuantumBalancedEvolutionStrategy", register=True) +except Exception as e: + print("QuantumBalancedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancedOptimizerV16 import ( + QuantumCognitionAdaptiveEnhancedOptimizerV16, + ) + + lama_register["QuantumCognitionAdaptiveEnhancedOptimizerV16"] = ( + QuantumCognitionAdaptiveEnhancedOptimizerV16 + ) + LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16" + ).set_name("LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16", register=True) +except Exception as e: + print("QuantumCognitionAdaptiveEnhancedOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancerV8 import ( + QuantumCognitionAdaptiveEnhancerV8, + ) + + lama_register["QuantumCognitionAdaptiveEnhancerV8"] = QuantumCognitionAdaptiveEnhancerV8 + LLAMAQuantumCognitionAdaptiveEnhancerV8 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveEnhancerV8" + ).set_name("LLAMAQuantumCognitionAdaptiveEnhancerV8", register=True) +except Exception as e: + print("QuantumCognitionAdaptiveEnhancerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionAdaptiveTuningOptimizerV14 import ( + QuantumCognitionAdaptiveTuningOptimizerV14, + ) + + lama_register["QuantumCognitionAdaptiveTuningOptimizerV14"] = QuantumCognitionAdaptiveTuningOptimizerV14 + LLAMAQuantumCognitionAdaptiveTuningOptimizerV14 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14" + ).set_name("LLAMAQuantumCognitionAdaptiveTuningOptimizerV14", register=True) +except Exception as e: + print("QuantumCognitionAdaptiveTuningOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionDynamicAdaptationOptimizerV30 import ( + QuantumCognitionDynamicAdaptationOptimizerV30, + ) + + lama_register["QuantumCognitionDynamicAdaptationOptimizerV30"] = ( + QuantumCognitionDynamicAdaptationOptimizerV30 + ) + LLAMAQuantumCognitionDynamicAdaptationOptimizerV30 = NonObjectOptimizer( + method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30" + ).set_name("LLAMAQuantumCognitionDynamicAdaptationOptimizerV30", register=True) +except Exception as e: + print("QuantumCognitionDynamicAdaptationOptimizerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionEnhancedOptimizerV7 import ( + QuantumCognitionEnhancedOptimizerV7, + ) + + lama_register["QuantumCognitionEnhancedOptimizerV7"] = QuantumCognitionEnhancedOptimizerV7 + LLAMAQuantumCognitionEnhancedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumCognitionEnhancedOptimizerV7" + ).set_name("LLAMAQuantumCognitionEnhancedOptimizerV7", register=True) +except Exception as e: + print("QuantumCognitionEnhancedOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionFocusedHybridOptimizerV21 import ( + QuantumCognitionFocusedHybridOptimizerV21, + ) + + lama_register["QuantumCognitionFocusedHybridOptimizerV21"] = QuantumCognitionFocusedHybridOptimizerV21 + LLAMAQuantumCognitionFocusedHybridOptimizerV21 = NonObjectOptimizer( + method="LLAMAQuantumCognitionFocusedHybridOptimizerV21" + ).set_name("LLAMAQuantumCognitionFocusedHybridOptimizerV21", register=True) +except Exception as e: + print("QuantumCognitionFocusedHybridOptimizerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionFocusedOptimizerV17 import ( + QuantumCognitionFocusedOptimizerV17, + ) + + lama_register["QuantumCognitionFocusedOptimizerV17"] = QuantumCognitionFocusedOptimizerV17 + LLAMAQuantumCognitionFocusedOptimizerV17 = NonObjectOptimizer( + method="LLAMAQuantumCognitionFocusedOptimizerV17" + ).set_name("LLAMAQuantumCognitionFocusedOptimizerV17", register=True) +except Exception as e: + print("QuantumCognitionFocusedOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV19 import ( + QuantumCognitionHybridEvolutionaryOptimizerV19, + ) + + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV19"] = ( + QuantumCognitionHybridEvolutionaryOptimizerV19 + ) + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19" + ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19", register=True) +except Exception as e: + print("QuantumCognitionHybridEvolutionaryOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV20 import ( + QuantumCognitionHybridEvolutionaryOptimizerV20, + ) + + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV20"] = ( + QuantumCognitionHybridEvolutionaryOptimizerV20 + ) + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20" + ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20", register=True) +except Exception as e: + print("QuantumCognitionHybridEvolutionaryOptimizerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV23 import ( + QuantumCognitionHybridOptimizerV23, + ) + + lama_register["QuantumCognitionHybridOptimizerV23"] = QuantumCognitionHybridOptimizerV23 + LLAMAQuantumCognitionHybridOptimizerV23 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV23" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV23", register=True) +except Exception as e: + print("QuantumCognitionHybridOptimizerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV24 import ( + QuantumCognitionHybridOptimizerV24, + ) + + lama_register["QuantumCognitionHybridOptimizerV24"] = QuantumCognitionHybridOptimizerV24 + LLAMAQuantumCognitionHybridOptimizerV24 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV24" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV24", register=True) +except Exception as e: + print("QuantumCognitionHybridOptimizerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV25 import ( + QuantumCognitionHybridOptimizerV25, + ) + + lama_register["QuantumCognitionHybridOptimizerV25"] = QuantumCognitionHybridOptimizerV25 + LLAMAQuantumCognitionHybridOptimizerV25 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV25" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV25", register=True) +except Exception as e: + print("QuantumCognitionHybridOptimizerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV26 import ( + QuantumCognitionHybridOptimizerV26, + ) + + lama_register["QuantumCognitionHybridOptimizerV26"] = QuantumCognitionHybridOptimizerV26 + LLAMAQuantumCognitionHybridOptimizerV26 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV26" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV26", register=True) +except Exception as e: + print("QuantumCognitionHybridOptimizerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV27 import ( + QuantumCognitionHybridOptimizerV27, + ) + + lama_register["QuantumCognitionHybridOptimizerV27"] = QuantumCognitionHybridOptimizerV27 + LLAMAQuantumCognitionHybridOptimizerV27 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV27" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV27", register=True) +except Exception as e: + print("QuantumCognitionHybridOptimizerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionOptimizerV2 import QuantumCognitionOptimizerV2 + + lama_register["QuantumCognitionOptimizerV2"] = QuantumCognitionOptimizerV2 + LLAMAQuantumCognitionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2").set_name( + "LLAMAQuantumCognitionOptimizerV2", register=True + ) +except Exception as e: + print("QuantumCognitionOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitionTrajectoryOptimizerV28 import ( + QuantumCognitionTrajectoryOptimizerV28, + ) + + lama_register["QuantumCognitionTrajectoryOptimizerV28"] = QuantumCognitionTrajectoryOptimizerV28 + LLAMAQuantumCognitionTrajectoryOptimizerV28 = NonObjectOptimizer( + method="LLAMAQuantumCognitionTrajectoryOptimizerV28" + ).set_name("LLAMAQuantumCognitionTrajectoryOptimizerV28", register=True) +except Exception as e: + print("QuantumCognitionTrajectoryOptimizerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCognitiveAdaptiveOptimizer import ( + QuantumCognitiveAdaptiveOptimizer, + ) + + lama_register["QuantumCognitiveAdaptiveOptimizer"] = QuantumCognitiveAdaptiveOptimizer + LLAMAQuantumCognitiveAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumCognitiveAdaptiveOptimizer" + ).set_name("LLAMAQuantumCognitiveAdaptiveOptimizer", register=True) +except Exception as e: + print("QuantumCognitiveAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumControlledDiversityStrategy import ( + QuantumControlledDiversityStrategy, + ) + + lama_register["QuantumControlledDiversityStrategy"] = QuantumControlledDiversityStrategy + LLAMAQuantumControlledDiversityStrategy = NonObjectOptimizer( + method="LLAMAQuantumControlledDiversityStrategy" + ).set_name("LLAMAQuantumControlledDiversityStrategy", register=True) +except Exception as e: + print("QuantumControlledDiversityStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCooperativeCrossoverStrategy import ( + QuantumCooperativeCrossoverStrategy, + ) + + lama_register["QuantumCooperativeCrossoverStrategy"] = QuantumCooperativeCrossoverStrategy + LLAMAQuantumCooperativeCrossoverStrategy = NonObjectOptimizer( + method="LLAMAQuantumCooperativeCrossoverStrategy" + ).set_name("LLAMAQuantumCooperativeCrossoverStrategy", register=True) +except Exception as e: + print("QuantumCooperativeCrossoverStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolution import ( + QuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["QuantumCovarianceMatrixDifferentialEvolution"] = ( + QuantumCovarianceMatrixDifferentialEvolution + ) + LLAMAQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("QuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( + QuantumCovarianceMatrixDifferentialEvolutionRefinedV2, + ) + + lama_register["QuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( + QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + ) + LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" + ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) +except Exception as e: + print("QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart import ( + QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart"] = ( + QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning import ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement import ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning import ( + QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning + ) + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning import ( + QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning"] = ( + QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning + ) + LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning" + ).set_name( + "LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning", register=True + ) +except Exception as e: + print( + "QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts import ( + QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts"] = ( + QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts + ) + LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch import ( + QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch"] = ( + QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch + ) + LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicElitismAndRestarts import ( + QuantumDifferentialEvolutionWithDynamicElitismAndRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicElitismAndRestarts"] = ( + QuantumDifferentialEvolutionWithDynamicElitismAndRestarts + ) + LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithDynamicElitismAndRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart import ( + QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart"] = ( + QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart + ) + LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEliteGuidance import ( + QuantumDifferentialEvolutionWithEliteGuidance, + ) + + lama_register["QuantumDifferentialEvolutionWithEliteGuidance"] = ( + QuantumDifferentialEvolutionWithEliteGuidance + ) + LLAMAQuantumDifferentialEvolutionWithEliteGuidance = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEliteGuidance", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithEliteGuidance can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitism import ( + QuantumDifferentialEvolutionWithElitism, + ) + + lama_register["QuantumDifferentialEvolutionWithElitism"] = QuantumDifferentialEvolutionWithElitism + LLAMAQuantumDifferentialEvolutionWithElitism = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithElitism" + ).set_name("LLAMAQuantumDifferentialEvolutionWithElitism", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch + ) + LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch + ) + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch + ) + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch" + ).set_name( + "LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch", register=True + ) +except Exception as e: + print( + "QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch can not be imported: ", + e, + ) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch + ) + LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts import ( + QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts"] = ( + QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts + ) + LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch import ( + QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch"] = ( + QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch + ) + LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithMultiStrategyLearning import ( + QuantumDifferentialEvolutionWithMultiStrategyLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithMultiStrategyLearning"] = ( + QuantumDifferentialEvolutionWithMultiStrategyLearning + ) + LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning", register=True) +except Exception as e: + print("QuantumDifferentialEvolutionWithMultiStrategyLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithAdaptiveRestarts import ( + QuantumDifferentialParticleOptimizerWithAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithAdaptiveRestarts"] = ( + QuantumDifferentialParticleOptimizerWithAdaptiveRestarts + ) + LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts", register=True) +except Exception as e: + print("QuantumDifferentialParticleOptimizerWithAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteGuidedMutation import ( + QuantumDifferentialParticleOptimizerWithEliteGuidedMutation, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEliteGuidedMutation"] = ( + QuantumDifferentialParticleOptimizerWithEliteGuidedMutation + ) + LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation", register=True) +except Exception as e: + print("QuantumDifferentialParticleOptimizerWithEliteGuidedMutation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteRefinement import ( + QuantumDifferentialParticleOptimizerWithEliteRefinement, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEliteRefinement"] = ( + QuantumDifferentialParticleOptimizerWithEliteRefinement + ) + LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement", register=True) +except Exception as e: + print("QuantumDifferentialParticleOptimizerWithEliteRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithElitism import ( + QuantumDifferentialParticleOptimizerWithElitism, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithElitism"] = ( + QuantumDifferentialParticleOptimizerWithElitism + ) + LLAMAQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithElitism" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithElitism", register=True) +except Exception as e: + print("QuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts import ( + QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts"] = ( + QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts + ) + LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts", register=True) +except Exception as e: + print("QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDifferentialParticleSwarmRefinement import ( + QuantumDifferentialParticleSwarmRefinement, + ) + + lama_register["QuantumDifferentialParticleSwarmRefinement"] = QuantumDifferentialParticleSwarmRefinement + LLAMAQuantumDifferentialParticleSwarmRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleSwarmRefinement" + ).set_name("LLAMAQuantumDifferentialParticleSwarmRefinement", register=True) +except Exception as e: + print("QuantumDifferentialParticleSwarmRefinement can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalAcceleratorV19 import QuantumDirectionalAcceleratorV19 + + lama_register["QuantumDirectionalAcceleratorV19"] = QuantumDirectionalAcceleratorV19 + LLAMAQuantumDirectionalAcceleratorV19 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalAcceleratorV19" + ).set_name("LLAMAQuantumDirectionalAcceleratorV19", register=True) +except Exception as e: + print("QuantumDirectionalAcceleratorV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancer import QuantumDirectionalEnhancer + + lama_register["QuantumDirectionalEnhancer"] = QuantumDirectionalEnhancer + LLAMAQuantumDirectionalEnhancer = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer").set_name( + "LLAMAQuantumDirectionalEnhancer", register=True + ) +except Exception as e: + print("QuantumDirectionalEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV10 import QuantumDirectionalEnhancerV10 + + lama_register["QuantumDirectionalEnhancerV10"] = QuantumDirectionalEnhancerV10 + LLAMAQuantumDirectionalEnhancerV10 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV10" + ).set_name("LLAMAQuantumDirectionalEnhancerV10", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV11 import QuantumDirectionalEnhancerV11 + + lama_register["QuantumDirectionalEnhancerV11"] = QuantumDirectionalEnhancerV11 + LLAMAQuantumDirectionalEnhancerV11 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV11" + ).set_name("LLAMAQuantumDirectionalEnhancerV11", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV12 import QuantumDirectionalEnhancerV12 + + lama_register["QuantumDirectionalEnhancerV12"] = QuantumDirectionalEnhancerV12 + LLAMAQuantumDirectionalEnhancerV12 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV12" + ).set_name("LLAMAQuantumDirectionalEnhancerV12", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV13 import QuantumDirectionalEnhancerV13 + + lama_register["QuantumDirectionalEnhancerV13"] = QuantumDirectionalEnhancerV13 + LLAMAQuantumDirectionalEnhancerV13 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV13" + ).set_name("LLAMAQuantumDirectionalEnhancerV13", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV14 import QuantumDirectionalEnhancerV14 + + lama_register["QuantumDirectionalEnhancerV14"] = QuantumDirectionalEnhancerV14 + LLAMAQuantumDirectionalEnhancerV14 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV14" + ).set_name("LLAMAQuantumDirectionalEnhancerV14", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV15 import QuantumDirectionalEnhancerV15 + + lama_register["QuantumDirectionalEnhancerV15"] = QuantumDirectionalEnhancerV15 + LLAMAQuantumDirectionalEnhancerV15 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV15" + ).set_name("LLAMAQuantumDirectionalEnhancerV15", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV16 import QuantumDirectionalEnhancerV16 + + lama_register["QuantumDirectionalEnhancerV16"] = QuantumDirectionalEnhancerV16 + LLAMAQuantumDirectionalEnhancerV16 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV16" + ).set_name("LLAMAQuantumDirectionalEnhancerV16", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV17 import QuantumDirectionalEnhancerV17 + + lama_register["QuantumDirectionalEnhancerV17"] = QuantumDirectionalEnhancerV17 + LLAMAQuantumDirectionalEnhancerV17 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV17" + ).set_name("LLAMAQuantumDirectionalEnhancerV17", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV18 import QuantumDirectionalEnhancerV18 + + lama_register["QuantumDirectionalEnhancerV18"] = QuantumDirectionalEnhancerV18 + LLAMAQuantumDirectionalEnhancerV18 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV18" + ).set_name("LLAMAQuantumDirectionalEnhancerV18", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV2 import QuantumDirectionalEnhancerV2 + + lama_register["QuantumDirectionalEnhancerV2"] = QuantumDirectionalEnhancerV2 + LLAMAQuantumDirectionalEnhancerV2 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV2" + ).set_name("LLAMAQuantumDirectionalEnhancerV2", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV3 import QuantumDirectionalEnhancerV3 + + lama_register["QuantumDirectionalEnhancerV3"] = QuantumDirectionalEnhancerV3 + LLAMAQuantumDirectionalEnhancerV3 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV3" + ).set_name("LLAMAQuantumDirectionalEnhancerV3", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV4 import QuantumDirectionalEnhancerV4 + + lama_register["QuantumDirectionalEnhancerV4"] = QuantumDirectionalEnhancerV4 + LLAMAQuantumDirectionalEnhancerV4 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV4" + ).set_name("LLAMAQuantumDirectionalEnhancerV4", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV5 import QuantumDirectionalEnhancerV5 + + lama_register["QuantumDirectionalEnhancerV5"] = QuantumDirectionalEnhancerV5 + LLAMAQuantumDirectionalEnhancerV5 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV5" + ).set_name("LLAMAQuantumDirectionalEnhancerV5", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV6 import QuantumDirectionalEnhancerV6 + + lama_register["QuantumDirectionalEnhancerV6"] = QuantumDirectionalEnhancerV6 + LLAMAQuantumDirectionalEnhancerV6 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV6" + ).set_name("LLAMAQuantumDirectionalEnhancerV6", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV7 import QuantumDirectionalEnhancerV7 + + lama_register["QuantumDirectionalEnhancerV7"] = QuantumDirectionalEnhancerV7 + LLAMAQuantumDirectionalEnhancerV7 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV7" + ).set_name("LLAMAQuantumDirectionalEnhancerV7", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV8 import QuantumDirectionalEnhancerV8 + + lama_register["QuantumDirectionalEnhancerV8"] = QuantumDirectionalEnhancerV8 + LLAMAQuantumDirectionalEnhancerV8 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV8" + ).set_name("LLAMAQuantumDirectionalEnhancerV8", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalEnhancerV9 import QuantumDirectionalEnhancerV9 + + lama_register["QuantumDirectionalEnhancerV9"] = QuantumDirectionalEnhancerV9 + LLAMAQuantumDirectionalEnhancerV9 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV9" + ).set_name("LLAMAQuantumDirectionalEnhancerV9", register=True) +except Exception as e: + print("QuantumDirectionalEnhancerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizer import ( + QuantumDirectionalFusionOptimizer, + ) + + lama_register["QuantumDirectionalFusionOptimizer"] = QuantumDirectionalFusionOptimizer + LLAMAQuantumDirectionalFusionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumDirectionalFusionOptimizer" + ).set_name("LLAMAQuantumDirectionalFusionOptimizer", register=True) +except Exception as e: + print("QuantumDirectionalFusionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizerV2 import ( + QuantumDirectionalFusionOptimizerV2, + ) + + lama_register["QuantumDirectionalFusionOptimizerV2"] = QuantumDirectionalFusionOptimizerV2 + LLAMAQuantumDirectionalFusionOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalFusionOptimizerV2" + ).set_name("LLAMAQuantumDirectionalFusionOptimizerV2", register=True) +except Exception as e: + print("QuantumDirectionalFusionOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV20 import QuantumDirectionalRefinerV20 + + lama_register["QuantumDirectionalRefinerV20"] = QuantumDirectionalRefinerV20 + LLAMAQuantumDirectionalRefinerV20 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV20" + ).set_name("LLAMAQuantumDirectionalRefinerV20", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV21 import QuantumDirectionalRefinerV21 + + lama_register["QuantumDirectionalRefinerV21"] = QuantumDirectionalRefinerV21 + LLAMAQuantumDirectionalRefinerV21 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV21" + ).set_name("LLAMAQuantumDirectionalRefinerV21", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV22 import QuantumDirectionalRefinerV22 + + lama_register["QuantumDirectionalRefinerV22"] = QuantumDirectionalRefinerV22 + LLAMAQuantumDirectionalRefinerV22 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV22" + ).set_name("LLAMAQuantumDirectionalRefinerV22", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV23 import QuantumDirectionalRefinerV23 + + lama_register["QuantumDirectionalRefinerV23"] = QuantumDirectionalRefinerV23 + LLAMAQuantumDirectionalRefinerV23 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV23" + ).set_name("LLAMAQuantumDirectionalRefinerV23", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV24 import QuantumDirectionalRefinerV24 + + lama_register["QuantumDirectionalRefinerV24"] = QuantumDirectionalRefinerV24 + LLAMAQuantumDirectionalRefinerV24 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV24" + ).set_name("LLAMAQuantumDirectionalRefinerV24", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV25 import QuantumDirectionalRefinerV25 + + lama_register["QuantumDirectionalRefinerV25"] = QuantumDirectionalRefinerV25 + LLAMAQuantumDirectionalRefinerV25 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV25" + ).set_name("LLAMAQuantumDirectionalRefinerV25", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV26 import QuantumDirectionalRefinerV26 + + lama_register["QuantumDirectionalRefinerV26"] = QuantumDirectionalRefinerV26 + LLAMAQuantumDirectionalRefinerV26 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV26" + ).set_name("LLAMAQuantumDirectionalRefinerV26", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV27 import QuantumDirectionalRefinerV27 + + lama_register["QuantumDirectionalRefinerV27"] = QuantumDirectionalRefinerV27 + LLAMAQuantumDirectionalRefinerV27 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV27" + ).set_name("LLAMAQuantumDirectionalRefinerV27", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV28 import QuantumDirectionalRefinerV28 + + lama_register["QuantumDirectionalRefinerV28"] = QuantumDirectionalRefinerV28 + LLAMAQuantumDirectionalRefinerV28 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV28" + ).set_name("LLAMAQuantumDirectionalRefinerV28", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV29 import QuantumDirectionalRefinerV29 + + lama_register["QuantumDirectionalRefinerV29"] = QuantumDirectionalRefinerV29 + LLAMAQuantumDirectionalRefinerV29 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV29" + ).set_name("LLAMAQuantumDirectionalRefinerV29", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV30 import QuantumDirectionalRefinerV30 + + lama_register["QuantumDirectionalRefinerV30"] = QuantumDirectionalRefinerV30 + LLAMAQuantumDirectionalRefinerV30 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV30" + ).set_name("LLAMAQuantumDirectionalRefinerV30", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV31 import QuantumDirectionalRefinerV31 + + lama_register["QuantumDirectionalRefinerV31"] = QuantumDirectionalRefinerV31 + LLAMAQuantumDirectionalRefinerV31 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV31" + ).set_name("LLAMAQuantumDirectionalRefinerV31", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV32 import QuantumDirectionalRefinerV32 + + lama_register["QuantumDirectionalRefinerV32"] = QuantumDirectionalRefinerV32 + LLAMAQuantumDirectionalRefinerV32 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV32" + ).set_name("LLAMAQuantumDirectionalRefinerV32", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDirectionalRefinerV33 import QuantumDirectionalRefinerV33 + + lama_register["QuantumDirectionalRefinerV33"] = QuantumDirectionalRefinerV33 + LLAMAQuantumDirectionalRefinerV33 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV33" + ).set_name("LLAMAQuantumDirectionalRefinerV33", register=True) +except Exception as e: + print("QuantumDirectionalRefinerV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDualStrategyAdaptiveDE import QuantumDualStrategyAdaptiveDE + + lama_register["QuantumDualStrategyAdaptiveDE"] = QuantumDualStrategyAdaptiveDE + LLAMAQuantumDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumDualStrategyAdaptiveDE" + ).set_name("LLAMAQuantumDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("QuantumDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicAdaptationStrategy import QuantumDynamicAdaptationStrategy + + lama_register["QuantumDynamicAdaptationStrategy"] = QuantumDynamicAdaptationStrategy + LLAMAQuantumDynamicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAQuantumDynamicAdaptationStrategy" + ).set_name("LLAMAQuantumDynamicAdaptationStrategy", register=True) +except Exception as e: + print("QuantumDynamicAdaptationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicBalanceOptimizer import QuantumDynamicBalanceOptimizer + + lama_register["QuantumDynamicBalanceOptimizer"] = QuantumDynamicBalanceOptimizer + LLAMAQuantumDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumDynamicBalanceOptimizer" + ).set_name("LLAMAQuantumDynamicBalanceOptimizer", register=True) +except Exception as e: + print("QuantumDynamicBalanceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicBalancedOptimizerV7 import ( + QuantumDynamicBalancedOptimizerV7, + ) + + lama_register["QuantumDynamicBalancedOptimizerV7"] = QuantumDynamicBalancedOptimizerV7 + LLAMAQuantumDynamicBalancedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumDynamicBalancedOptimizerV7" + ).set_name("LLAMAQuantumDynamicBalancedOptimizerV7", register=True) +except Exception as e: + print("QuantumDynamicBalancedOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicExplorationOptimizerV6 import ( + QuantumDynamicExplorationOptimizerV6, + ) + + lama_register["QuantumDynamicExplorationOptimizerV6"] = QuantumDynamicExplorationOptimizerV6 + LLAMAQuantumDynamicExplorationOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumDynamicExplorationOptimizerV6" + ).set_name("LLAMAQuantumDynamicExplorationOptimizerV6", register=True) +except Exception as e: + print("QuantumDynamicExplorationOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicGradientClimberV2 import QuantumDynamicGradientClimberV2 + + lama_register["QuantumDynamicGradientClimberV2"] = QuantumDynamicGradientClimberV2 + LLAMAQuantumDynamicGradientClimberV2 = NonObjectOptimizer( + method="LLAMAQuantumDynamicGradientClimberV2" + ).set_name("LLAMAQuantumDynamicGradientClimberV2", register=True) +except Exception as e: + print("QuantumDynamicGradientClimberV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicGradientClimberV3 import QuantumDynamicGradientClimberV3 + + lama_register["QuantumDynamicGradientClimberV3"] = QuantumDynamicGradientClimberV3 + LLAMAQuantumDynamicGradientClimberV3 = NonObjectOptimizer( + method="LLAMAQuantumDynamicGradientClimberV3" + ).set_name("LLAMAQuantumDynamicGradientClimberV3", register=True) +except Exception as e: + print("QuantumDynamicGradientClimberV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumDynamicallyAdaptiveFireworksAlgorithm import ( + QuantumDynamicallyAdaptiveFireworksAlgorithm, + ) + + lama_register["QuantumDynamicallyAdaptiveFireworksAlgorithm"] = ( + QuantumDynamicallyAdaptiveFireworksAlgorithm + ) + LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm" + ).set_name("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm", register=True) +except Exception as e: + print("QuantumDynamicallyAdaptiveFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEliteMemeticAdaptiveSearch import ( + QuantumEliteMemeticAdaptiveSearch, + ) + + lama_register["QuantumEliteMemeticAdaptiveSearch"] = QuantumEliteMemeticAdaptiveSearch + LLAMAQuantumEliteMemeticAdaptiveSearch = NonObjectOptimizer( + method="LLAMAQuantumEliteMemeticAdaptiveSearch" + ).set_name("LLAMAQuantumEliteMemeticAdaptiveSearch", register=True) +except Exception as e: + print("QuantumEliteMemeticAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v4 import ( + QuantumEnhancedAdaptiveDifferentialEvolution_v4, + ) + + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v4"] = ( + QuantumEnhancedAdaptiveDifferentialEvolution_v4 + ) + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4" + ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveDifferentialEvolution_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v5 import ( + QuantumEnhancedAdaptiveDifferentialEvolution_v5, + ) + + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v5"] = ( + QuantumEnhancedAdaptiveDifferentialEvolution_v5 + ) + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5" + ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveDifferentialEvolution_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDiversityStrategyV6 import ( + QuantumEnhancedAdaptiveDiversityStrategyV6, + ) + + lama_register["QuantumEnhancedAdaptiveDiversityStrategyV6"] = QuantumEnhancedAdaptiveDiversityStrategyV6 + LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6" + ).set_name("LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveDiversityStrategyV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDualStrategyDE import ( + QuantumEnhancedAdaptiveDualStrategyDE, + ) + + lama_register["QuantumEnhancedAdaptiveDualStrategyDE"] = QuantumEnhancedAdaptiveDualStrategyDE + LLAMAQuantumEnhancedAdaptiveDualStrategyDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE" + ).set_name("LLAMAQuantumEnhancedAdaptiveDualStrategyDE", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveDualStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveExplorationOptimization import ( + QuantumEnhancedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumEnhancedAdaptiveExplorationOptimization"] = ( + QuantumEnhancedAdaptiveExplorationOptimization + ) + LLAMAQuantumEnhancedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumEnhancedAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE import ( + QuantumEnhancedAdaptiveMultiPhaseDE, + ) + + lama_register["QuantumEnhancedAdaptiveMultiPhaseDE"] = QuantumEnhancedAdaptiveMultiPhaseDE + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE" + ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE_v7 import ( + QuantumEnhancedAdaptiveMultiPhaseDE_v7, + ) + + lama_register["QuantumEnhancedAdaptiveMultiPhaseDE_v7"] = QuantumEnhancedAdaptiveMultiPhaseDE_v7 + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7" + ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveMultiPhaseDE_v7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveOptimizer import QuantumEnhancedAdaptiveOptimizer + + lama_register["QuantumEnhancedAdaptiveOptimizer"] = QuantumEnhancedAdaptiveOptimizer + LLAMAQuantumEnhancedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveOptimizer" + ).set_name("LLAMAQuantumEnhancedAdaptiveOptimizer", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveSwarmOptimization import ( + QuantumEnhancedAdaptiveSwarmOptimization, + ) + + lama_register["QuantumEnhancedAdaptiveSwarmOptimization"] = QuantumEnhancedAdaptiveSwarmOptimization + LLAMAQuantumEnhancedAdaptiveSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization" + ).set_name("LLAMAQuantumEnhancedAdaptiveSwarmOptimization", register=True) +except Exception as e: + print("QuantumEnhancedAdaptiveSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolution import ( + QuantumEnhancedDifferentialEvolution, + ) + + lama_register["QuantumEnhancedDifferentialEvolution"] = QuantumEnhancedDifferentialEvolution + LLAMAQuantumEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDifferentialEvolution" + ).set_name("LLAMAQuantumEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("QuantumEnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart import ( + QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart, + ) + + lama_register["QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart"] = ( + QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart + ) + LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart" + ).set_name("LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart", register=True) +except Exception as e: + print("QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDiversityExplorerV8 import ( + QuantumEnhancedDiversityExplorerV8, + ) + + lama_register["QuantumEnhancedDiversityExplorerV8"] = QuantumEnhancedDiversityExplorerV8 + LLAMAQuantumEnhancedDiversityExplorerV8 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDiversityExplorerV8" + ).set_name("LLAMAQuantumEnhancedDiversityExplorerV8", register=True) +except Exception as e: + print("QuantumEnhancedDiversityExplorerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("QuantumEnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 + ) + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2", register=True) +except Exception as e: + print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 + ) + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3", register=True) +except Exception as e: + print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 + ) + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4", register=True) +except Exception as e: + print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 + ) + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5", register=True) +except Exception as e: + print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution import ( + QuantumEnhancedDynamicDifferentialEvolution, + ) + + lama_register["QuantumEnhancedDynamicDifferentialEvolution"] = QuantumEnhancedDynamicDifferentialEvolution + LLAMAQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: + print("QuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v2 import ( + QuantumEnhancedDynamicDifferentialEvolution_v2, + ) + + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v2"] = ( + QuantumEnhancedDynamicDifferentialEvolution_v2 + ) + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2", register=True) +except Exception as e: + print("QuantumEnhancedDynamicDifferentialEvolution_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v3 import ( + QuantumEnhancedDynamicDifferentialEvolution_v3, + ) + + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v3"] = ( + QuantumEnhancedDynamicDifferentialEvolution_v3 + ) + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3", register=True) +except Exception as e: + print("QuantumEnhancedDynamicDifferentialEvolution_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicHybridSearchV9 import ( + QuantumEnhancedDynamicHybridSearchV9, + ) + + lama_register["QuantumEnhancedDynamicHybridSearchV9"] = QuantumEnhancedDynamicHybridSearchV9 + LLAMAQuantumEnhancedDynamicHybridSearchV9 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicHybridSearchV9" + ).set_name("LLAMAQuantumEnhancedDynamicHybridSearchV9", register=True) +except Exception as e: + print("QuantumEnhancedDynamicHybridSearchV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE import ( + QuantumEnhancedDynamicMultiStrategyDE, + ) + + lama_register["QuantumEnhancedDynamicMultiStrategyDE"] = QuantumEnhancedDynamicMultiStrategyDE + LLAMAQuantumEnhancedDynamicMultiStrategyDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicMultiStrategyDE" + ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE", register=True) +except Exception as e: + print("QuantumEnhancedDynamicMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE_v2 import ( + QuantumEnhancedDynamicMultiStrategyDE_v2, + ) + + lama_register["QuantumEnhancedDynamicMultiStrategyDE_v2"] = QuantumEnhancedDynamicMultiStrategyDE_v2 + LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2" + ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2", register=True) +except Exception as e: + print("QuantumEnhancedDynamicMultiStrategyDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedGlobalTacticalOptimizer import ( + QuantumEnhancedGlobalTacticalOptimizer, + ) + + lama_register["QuantumEnhancedGlobalTacticalOptimizer"] = QuantumEnhancedGlobalTacticalOptimizer + LLAMAQuantumEnhancedGlobalTacticalOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEnhancedGlobalTacticalOptimizer" + ).set_name("LLAMAQuantumEnhancedGlobalTacticalOptimizer", register=True) +except Exception as e: + print("QuantumEnhancedGlobalTacticalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedGradientClimber import QuantumEnhancedGradientClimber + + lama_register["QuantumEnhancedGradientClimber"] = QuantumEnhancedGradientClimber + LLAMAQuantumEnhancedGradientClimber = NonObjectOptimizer( + method="LLAMAQuantumEnhancedGradientClimber" + ).set_name("LLAMAQuantumEnhancedGradientClimber", register=True) +except Exception as e: + print("QuantumEnhancedGradientClimber can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedHybridDEPSO import QuantumEnhancedHybridDEPSO + + lama_register["QuantumEnhancedHybridDEPSO"] = QuantumEnhancedHybridDEPSO + LLAMAQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO").set_name( + "LLAMAQuantumEnhancedHybridDEPSO", register=True + ) +except Exception as e: + print("QuantumEnhancedHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMemeticAdaptiveSearch import ( + QuantumEnhancedMemeticAdaptiveSearch, + ) + + lama_register["QuantumEnhancedMemeticAdaptiveSearch"] = QuantumEnhancedMemeticAdaptiveSearch + LLAMAQuantumEnhancedMemeticAdaptiveSearch = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMemeticAdaptiveSearch" + ).set_name("LLAMAQuantumEnhancedMemeticAdaptiveSearch", register=True) +except Exception as e: + print("QuantumEnhancedMemeticAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMemeticSearch import QuantumEnhancedMemeticSearch + + lama_register["QuantumEnhancedMemeticSearch"] = QuantumEnhancedMemeticSearch + LLAMAQuantumEnhancedMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMemeticSearch" + ).set_name("LLAMAQuantumEnhancedMemeticSearch", register=True) +except Exception as e: + print("QuantumEnhancedMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v8 import ( + QuantumEnhancedMultiPhaseAdaptiveDE_v8, + ) + + lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v8"] = QuantumEnhancedMultiPhaseAdaptiveDE_v8 + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8" + ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseAdaptiveDE_v8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v9 import ( + QuantumEnhancedMultiPhaseAdaptiveDE_v9, + ) + + lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v9"] = QuantumEnhancedMultiPhaseAdaptiveDE_v9 + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9" + ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseAdaptiveDE_v9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE import QuantumEnhancedMultiPhaseDE + + lama_register["QuantumEnhancedMultiPhaseDE"] = QuantumEnhancedMultiPhaseDE + LLAMAQuantumEnhancedMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE").set_name( + "LLAMAQuantumEnhancedMultiPhaseDE", register=True + ) +except Exception as e: + print("QuantumEnhancedMultiPhaseDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v2 import QuantumEnhancedMultiPhaseDE_v2 + + lama_register["QuantumEnhancedMultiPhaseDE_v2"] = QuantumEnhancedMultiPhaseDE_v2 + LLAMAQuantumEnhancedMultiPhaseDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v2" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v2", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v3 import QuantumEnhancedMultiPhaseDE_v3 + + lama_register["QuantumEnhancedMultiPhaseDE_v3"] = QuantumEnhancedMultiPhaseDE_v3 + LLAMAQuantumEnhancedMultiPhaseDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v3" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v3", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v4 import QuantumEnhancedMultiPhaseDE_v4 + + lama_register["QuantumEnhancedMultiPhaseDE_v4"] = QuantumEnhancedMultiPhaseDE_v4 + LLAMAQuantumEnhancedMultiPhaseDE_v4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v4" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v4", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseDE_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v5 import QuantumEnhancedMultiPhaseDE_v5 + + lama_register["QuantumEnhancedMultiPhaseDE_v5"] = QuantumEnhancedMultiPhaseDE_v5 + LLAMAQuantumEnhancedMultiPhaseDE_v5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v5" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v5", register=True) +except Exception as e: + print("QuantumEnhancedMultiPhaseDE_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEnhancedRefinedAdaptiveExplorationOptimization import ( + QuantumEnhancedRefinedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumEnhancedRefinedAdaptiveExplorationOptimization"] = ( + QuantumEnhancedRefinedAdaptiveExplorationOptimization + ) + LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("QuantumEnhancedRefinedAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEntropyEnhancedDE import QuantumEntropyEnhancedDE + + lama_register["QuantumEntropyEnhancedDE"] = QuantumEntropyEnhancedDE + LLAMAQuantumEntropyEnhancedDE = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE").set_name( + "LLAMAQuantumEntropyEnhancedDE", register=True + ) +except Exception as e: + print("QuantumEntropyEnhancedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolutionaryAdaptiveOptimizer import ( + QuantumEvolutionaryAdaptiveOptimizer, + ) + + lama_register["QuantumEvolutionaryAdaptiveOptimizer"] = QuantumEvolutionaryAdaptiveOptimizer + LLAMAQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryAdaptiveOptimizer" + ).set_name("LLAMAQuantumEvolutionaryAdaptiveOptimizer", register=True) +except Exception as e: + print("QuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategy import ( + QuantumEvolutionaryConvergenceStrategy, + ) + + lama_register["QuantumEvolutionaryConvergenceStrategy"] = QuantumEvolutionaryConvergenceStrategy + LLAMAQuantumEvolutionaryConvergenceStrategy = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryConvergenceStrategy" + ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategy", register=True) +except Exception as e: + print("QuantumEvolutionaryConvergenceStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategyV2 import ( + QuantumEvolutionaryConvergenceStrategyV2, + ) + + lama_register["QuantumEvolutionaryConvergenceStrategyV2"] = QuantumEvolutionaryConvergenceStrategyV2 + LLAMAQuantumEvolutionaryConvergenceStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryConvergenceStrategyV2" + ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategyV2", register=True) +except Exception as e: + print("QuantumEvolutionaryConvergenceStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolutionaryOptimization import QuantumEvolutionaryOptimization + + lama_register["QuantumEvolutionaryOptimization"] = QuantumEvolutionaryOptimization + LLAMAQuantumEvolutionaryOptimization = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryOptimization" + ).set_name("LLAMAQuantumEvolutionaryOptimization", register=True) +except Exception as e: + print("QuantumEvolutionaryOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV10 import ( + QuantumEvolvedDiversityExplorerV10, + ) + + lama_register["QuantumEvolvedDiversityExplorerV10"] = QuantumEvolvedDiversityExplorerV10 + LLAMAQuantumEvolvedDiversityExplorerV10 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV10" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV10", register=True) +except Exception as e: + print("QuantumEvolvedDiversityExplorerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV11 import ( + QuantumEvolvedDiversityExplorerV11, + ) + + lama_register["QuantumEvolvedDiversityExplorerV11"] = QuantumEvolvedDiversityExplorerV11 + LLAMAQuantumEvolvedDiversityExplorerV11 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV11" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV11", register=True) +except Exception as e: + print("QuantumEvolvedDiversityExplorerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV12 import ( + QuantumEvolvedDiversityExplorerV12, + ) + + lama_register["QuantumEvolvedDiversityExplorerV12"] = QuantumEvolvedDiversityExplorerV12 + LLAMAQuantumEvolvedDiversityExplorerV12 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV12" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV12", register=True) +except Exception as e: + print("QuantumEvolvedDiversityExplorerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV9 import ( + QuantumEvolvedDiversityExplorerV9, + ) + + lama_register["QuantumEvolvedDiversityExplorerV9"] = QuantumEvolvedDiversityExplorerV9 + LLAMAQuantumEvolvedDiversityExplorerV9 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV9" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV9", register=True) +except Exception as e: + print("QuantumEvolvedDiversityExplorerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumFeedbackEvolutionStrategy import QuantumFeedbackEvolutionStrategy + + lama_register["QuantumFeedbackEvolutionStrategy"] = QuantumFeedbackEvolutionStrategy + LLAMAQuantumFeedbackEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumFeedbackEvolutionStrategy" + ).set_name("LLAMAQuantumFeedbackEvolutionStrategy", register=True) +except Exception as e: + print("QuantumFeedbackEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumFireworksAlgorithm import QuantumFireworksAlgorithm + + lama_register["QuantumFireworksAlgorithm"] = QuantumFireworksAlgorithm + LLAMAQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm").set_name( + "LLAMAQuantumFireworksAlgorithm", register=True + ) +except Exception as e: + print("QuantumFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumFluxDifferentialSwarm import QuantumFluxDifferentialSwarm + + lama_register["QuantumFluxDifferentialSwarm"] = QuantumFluxDifferentialSwarm + LLAMAQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumFluxDifferentialSwarm" + ).set_name("LLAMAQuantumFluxDifferentialSwarm", register=True) +except Exception as e: + print("QuantumFluxDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGeneticDifferentialEvolution import ( + QuantumGeneticDifferentialEvolution, + ) + + lama_register["QuantumGeneticDifferentialEvolution"] = QuantumGeneticDifferentialEvolution + LLAMAQuantumGeneticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumGeneticDifferentialEvolution" + ).set_name("LLAMAQuantumGeneticDifferentialEvolution", register=True) +except Exception as e: + print("QuantumGeneticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimization import ( + QuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimization"] = ( + QuantumGradientAdaptiveExplorationOptimization + ) + LLAMAQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV2 import ( + QuantumGradientAdaptiveExplorationOptimizationV2, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV2"] = ( + QuantumGradientAdaptiveExplorationOptimizationV2 + ) + LLAMAQuantumGradientAdaptiveExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV2", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV3 import ( + QuantumGradientAdaptiveExplorationOptimizationV3, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV3"] = ( + QuantumGradientAdaptiveExplorationOptimizationV3 + ) + LLAMAQuantumGradientAdaptiveExplorationOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV3", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV4 import ( + QuantumGradientAdaptiveExplorationOptimizationV4, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV4"] = ( + QuantumGradientAdaptiveExplorationOptimizationV4 + ) + LLAMAQuantumGradientAdaptiveExplorationOptimizationV4 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV4", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV5 import ( + QuantumGradientAdaptiveExplorationOptimizationV5, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV5"] = ( + QuantumGradientAdaptiveExplorationOptimizationV5 + ) + LLAMAQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV5", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationRefinedOptimization import ( + QuantumGradientAdaptiveExplorationRefinedOptimization, + ) + + lama_register["QuantumGradientAdaptiveExplorationRefinedOptimization"] = ( + QuantumGradientAdaptiveExplorationRefinedOptimization + ) + LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization", register=True) +except Exception as e: + print("QuantumGradientAdaptiveExplorationRefinedOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientBalancedOptimizerV6 import ( + QuantumGradientBalancedOptimizerV6, + ) + + lama_register["QuantumGradientBalancedOptimizerV6"] = QuantumGradientBalancedOptimizerV6 + LLAMAQuantumGradientBalancedOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumGradientBalancedOptimizerV6" + ).set_name("LLAMAQuantumGradientBalancedOptimizerV6", register=True) +except Exception as e: + print("QuantumGradientBalancedOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientBoostedMemeticSearch import ( + QuantumGradientBoostedMemeticSearch, + ) + + lama_register["QuantumGradientBoostedMemeticSearch"] = QuantumGradientBoostedMemeticSearch + LLAMAQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMAQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: + print("QuantumGradientBoostedMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientEnhancedExplorationOptimization import ( + QuantumGradientEnhancedExplorationOptimization, + ) + + lama_register["QuantumGradientEnhancedExplorationOptimization"] = ( + QuantumGradientEnhancedExplorationOptimization + ) + LLAMAQuantumGradientEnhancedExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientEnhancedExplorationOptimization" + ).set_name("LLAMAQuantumGradientEnhancedExplorationOptimization", register=True) +except Exception as e: + print("QuantumGradientEnhancedExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientFusionOptimizer import QuantumGradientFusionOptimizer + + lama_register["QuantumGradientFusionOptimizer"] = QuantumGradientFusionOptimizer + LLAMAQuantumGradientFusionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientFusionOptimizer" + ).set_name("LLAMAQuantumGradientFusionOptimizer", register=True) +except Exception as e: + print("QuantumGradientFusionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientGuidedFireworksAlgorithm import ( + QuantumGradientGuidedFireworksAlgorithm, + ) + + lama_register["QuantumGradientGuidedFireworksAlgorithm"] = QuantumGradientGuidedFireworksAlgorithm + LLAMAQuantumGradientGuidedFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumGradientGuidedFireworksAlgorithm" + ).set_name("LLAMAQuantumGradientGuidedFireworksAlgorithm", register=True) +except Exception as e: + print("QuantumGradientGuidedFireworksAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientHybridOptimization import ( + QuantumGradientHybridOptimization, + ) + + lama_register["QuantumGradientHybridOptimization"] = QuantumGradientHybridOptimization + LLAMAQuantumGradientHybridOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimization" + ).set_name("LLAMAQuantumGradientHybridOptimization", register=True) +except Exception as e: + print("QuantumGradientHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV2 import ( + QuantumGradientHybridOptimizationV2, + ) + + lama_register["QuantumGradientHybridOptimizationV2"] = QuantumGradientHybridOptimizationV2 + LLAMAQuantumGradientHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV2" + ).set_name("LLAMAQuantumGradientHybridOptimizationV2", register=True) +except Exception as e: + print("QuantumGradientHybridOptimizationV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV3 import ( + QuantumGradientHybridOptimizationV3, + ) + + lama_register["QuantumGradientHybridOptimizationV3"] = QuantumGradientHybridOptimizationV3 + LLAMAQuantumGradientHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV3" + ).set_name("LLAMAQuantumGradientHybridOptimizationV3", register=True) +except Exception as e: + print("QuantumGradientHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV4 import ( + QuantumGradientHybridOptimizationV4, + ) + + lama_register["QuantumGradientHybridOptimizationV4"] = QuantumGradientHybridOptimizationV4 + LLAMAQuantumGradientHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV4" + ).set_name("LLAMAQuantumGradientHybridOptimizationV4", register=True) +except Exception as e: + print("QuantumGradientHybridOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientHybridOptimizer import QuantumGradientHybridOptimizer + + lama_register["QuantumGradientHybridOptimizer"] = QuantumGradientHybridOptimizer + LLAMAQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizer" + ).set_name("LLAMAQuantumGradientHybridOptimizer", register=True) +except Exception as e: + print("QuantumGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientMemeticOptimizer import QuantumGradientMemeticOptimizer + + lama_register["QuantumGradientMemeticOptimizer"] = QuantumGradientMemeticOptimizer + LLAMAQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticOptimizer" + ).set_name("LLAMAQuantumGradientMemeticOptimizer", register=True) +except Exception as e: + print("QuantumGradientMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientMemeticSearch import QuantumGradientMemeticSearch + + lama_register["QuantumGradientMemeticSearch"] = QuantumGradientMemeticSearch + LLAMAQuantumGradientMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearch" + ).set_name("LLAMAQuantumGradientMemeticSearch", register=True) +except Exception as e: + print("QuantumGradientMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientMemeticSearchV2 import QuantumGradientMemeticSearchV2 + + lama_register["QuantumGradientMemeticSearchV2"] = QuantumGradientMemeticSearchV2 + LLAMAQuantumGradientMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearchV2" + ).set_name("LLAMAQuantumGradientMemeticSearchV2", register=True) +except Exception as e: + print("QuantumGradientMemeticSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGradientMemeticSearchV3 import QuantumGradientMemeticSearchV3 + + lama_register["QuantumGradientMemeticSearchV3"] = QuantumGradientMemeticSearchV3 + LLAMAQuantumGradientMemeticSearchV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearchV3" + ).set_name("LLAMAQuantumGradientMemeticSearchV3", register=True) +except Exception as e: + print("QuantumGradientMemeticSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGuidedAdaptiveStrategy import QuantumGuidedAdaptiveStrategy + + lama_register["QuantumGuidedAdaptiveStrategy"] = QuantumGuidedAdaptiveStrategy + LLAMAQuantumGuidedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumGuidedAdaptiveStrategy" + ).set_name("LLAMAQuantumGuidedAdaptiveStrategy", register=True) +except Exception as e: + print("QuantumGuidedAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGuidedCrossoverAdaptation import QuantumGuidedCrossoverAdaptation + + lama_register["QuantumGuidedCrossoverAdaptation"] = QuantumGuidedCrossoverAdaptation + LLAMAQuantumGuidedCrossoverAdaptation = NonObjectOptimizer( + method="LLAMAQuantumGuidedCrossoverAdaptation" + ).set_name("LLAMAQuantumGuidedCrossoverAdaptation", register=True) +except Exception as e: + print("QuantumGuidedCrossoverAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGuidedHybridDifferentialSwarm import ( + QuantumGuidedHybridDifferentialSwarm, + ) + + lama_register["QuantumGuidedHybridDifferentialSwarm"] = QuantumGuidedHybridDifferentialSwarm + LLAMAQuantumGuidedHybridDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumGuidedHybridDifferentialSwarm" + ).set_name("LLAMAQuantumGuidedHybridDifferentialSwarm", register=True) +except Exception as e: + print("QuantumGuidedHybridDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumGuidedLevyAdaptiveSwarm import QuantumGuidedLevyAdaptiveSwarm + + lama_register["QuantumGuidedLevyAdaptiveSwarm"] = QuantumGuidedLevyAdaptiveSwarm + LLAMAQuantumGuidedLevyAdaptiveSwarm = NonObjectOptimizer( + method="LLAMAQuantumGuidedLevyAdaptiveSwarm" + ).set_name("LLAMAQuantumGuidedLevyAdaptiveSwarm", register=True) +except Exception as e: + print("QuantumGuidedLevyAdaptiveSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicAdaptationStrategy import ( + QuantumHarmonicAdaptationStrategy, + ) + + lama_register["QuantumHarmonicAdaptationStrategy"] = QuantumHarmonicAdaptationStrategy + LLAMAQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptationStrategy" + ).set_name("LLAMAQuantumHarmonicAdaptationStrategy", register=True) +except Exception as e: + print("QuantumHarmonicAdaptationStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveFeedbackOptimizer import ( + QuantumHarmonicAdaptiveFeedbackOptimizer, + ) + + lama_register["QuantumHarmonicAdaptiveFeedbackOptimizer"] = QuantumHarmonicAdaptiveFeedbackOptimizer + LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicAdaptiveFeedbackOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveOptimizer import QuantumHarmonicAdaptiveOptimizer + + lama_register["QuantumHarmonicAdaptiveOptimizer"] = QuantumHarmonicAdaptiveOptimizer + LLAMAQuantumHarmonicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveRefinementOptimizer import ( + QuantumHarmonicAdaptiveRefinementOptimizer, + ) + + lama_register["QuantumHarmonicAdaptiveRefinementOptimizer"] = QuantumHarmonicAdaptiveRefinementOptimizer + LLAMAQuantumHarmonicAdaptiveRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveRefinementOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicAdaptiveRefinementOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicDynamicAdaptation import QuantumHarmonicDynamicAdaptation + + lama_register["QuantumHarmonicDynamicAdaptation"] = QuantumHarmonicDynamicAdaptation + LLAMAQuantumHarmonicDynamicAdaptation = NonObjectOptimizer( + method="LLAMAQuantumHarmonicDynamicAdaptation" + ).set_name("LLAMAQuantumHarmonicDynamicAdaptation", register=True) +except Exception as e: + print("QuantumHarmonicDynamicAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicDynamicOptimizer import QuantumHarmonicDynamicOptimizer + + lama_register["QuantumHarmonicDynamicOptimizer"] = QuantumHarmonicDynamicOptimizer + LLAMAQuantumHarmonicDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicDynamicOptimizer" + ).set_name("LLAMAQuantumHarmonicDynamicOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicDynamicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicEvolutionStrategy import QuantumHarmonicEvolutionStrategy + + lama_register["QuantumHarmonicEvolutionStrategy"] = QuantumHarmonicEvolutionStrategy + LLAMAQuantumHarmonicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicEvolutionStrategy" + ).set_name("LLAMAQuantumHarmonicEvolutionStrategy", register=True) +except Exception as e: + print("QuantumHarmonicEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFeedbackOptimizer import QuantumHarmonicFeedbackOptimizer + + lama_register["QuantumHarmonicFeedbackOptimizer"] = QuantumHarmonicFeedbackOptimizer + LLAMAQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicFeedbackOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizer import QuantumHarmonicFocusedOptimizer + + lama_register["QuantumHarmonicFocusedOptimizer"] = QuantumHarmonicFocusedOptimizer + LLAMAQuantumHarmonicFocusedOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizer" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV2 import ( + QuantumHarmonicFocusedOptimizerV2, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV2"] = QuantumHarmonicFocusedOptimizerV2 + LLAMAQuantumHarmonicFocusedOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV2" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV2", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV3 import ( + QuantumHarmonicFocusedOptimizerV3, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV3"] = QuantumHarmonicFocusedOptimizerV3 + LLAMAQuantumHarmonicFocusedOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV3" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV3", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV4 import ( + QuantumHarmonicFocusedOptimizerV4, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV4"] = QuantumHarmonicFocusedOptimizerV4 + LLAMAQuantumHarmonicFocusedOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV4" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV4", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV5 import ( + QuantumHarmonicFocusedOptimizerV5, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV5"] = QuantumHarmonicFocusedOptimizerV5 + LLAMAQuantumHarmonicFocusedOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV5" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV5", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV6 import ( + QuantumHarmonicFocusedOptimizerV6, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV6"] = QuantumHarmonicFocusedOptimizerV6 + LLAMAQuantumHarmonicFocusedOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV6" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV6", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV7 import ( + QuantumHarmonicFocusedOptimizerV7, + ) + + lama_register["QuantumHarmonicFocusedOptimizerV7"] = QuantumHarmonicFocusedOptimizerV7 + LLAMAQuantumHarmonicFocusedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV7" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV7", register=True) +except Exception as e: + print("QuantumHarmonicFocusedOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicImpulseOptimizerV9 import ( + QuantumHarmonicImpulseOptimizerV9, + ) + + lama_register["QuantumHarmonicImpulseOptimizerV9"] = QuantumHarmonicImpulseOptimizerV9 + LLAMAQuantumHarmonicImpulseOptimizerV9 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicImpulseOptimizerV9" + ).set_name("LLAMAQuantumHarmonicImpulseOptimizerV9", register=True) +except Exception as e: + print("QuantumHarmonicImpulseOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicPrecisionOptimizer import ( + QuantumHarmonicPrecisionOptimizer, + ) + + lama_register["QuantumHarmonicPrecisionOptimizer"] = QuantumHarmonicPrecisionOptimizer + LLAMAQuantumHarmonicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicPrecisionOptimizer" + ).set_name("LLAMAQuantumHarmonicPrecisionOptimizer", register=True) +except Exception as e: + print("QuantumHarmonicPrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonicResilientEvolutionStrategy import ( + QuantumHarmonicResilientEvolutionStrategy, + ) + + lama_register["QuantumHarmonicResilientEvolutionStrategy"] = QuantumHarmonicResilientEvolutionStrategy + LLAMAQuantumHarmonicResilientEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicResilientEvolutionStrategy" + ).set_name("LLAMAQuantumHarmonicResilientEvolutionStrategy", register=True) +except Exception as e: + print("QuantumHarmonicResilientEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonizedPSO import QuantumHarmonizedPSO + + lama_register["QuantumHarmonizedPSO"] = QuantumHarmonizedPSO + LLAMAQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO").set_name( + "LLAMAQuantumHarmonizedPSO", register=True + ) +except Exception as e: + print("QuantumHarmonizedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithm import QuantumHarmonyMemeticAlgorithm + + lama_register["QuantumHarmonyMemeticAlgorithm"] = QuantumHarmonyMemeticAlgorithm + LLAMAQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithm" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithm", register=True) +except Exception as e: + print("QuantumHarmonyMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmImproved import ( + QuantumHarmonyMemeticAlgorithmImproved, + ) + + lama_register["QuantumHarmonyMemeticAlgorithmImproved"] = QuantumHarmonyMemeticAlgorithmImproved + LLAMAQuantumHarmonyMemeticAlgorithmImproved = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithmImproved" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmImproved", register=True) +except Exception as e: + print("QuantumHarmonyMemeticAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmRefined import ( + QuantumHarmonyMemeticAlgorithmRefined, + ) + + lama_register["QuantumHarmonyMemeticAlgorithmRefined"] = QuantumHarmonyMemeticAlgorithmRefined + LLAMAQuantumHarmonyMemeticAlgorithmRefined = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithmRefined" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmRefined", register=True) +except Exception as e: + print("QuantumHarmonyMemeticAlgorithmRefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHarmonySearch import QuantumHarmonySearch + + lama_register["QuantumHarmonySearch"] = QuantumHarmonySearch + LLAMAQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch").set_name( + "LLAMAQuantumHarmonySearch", register=True + ) +except Exception as e: + print("QuantumHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategy import QuantumHybridAdaptiveStrategy + + lama_register["QuantumHybridAdaptiveStrategy"] = QuantumHybridAdaptiveStrategy + LLAMAQuantumHybridAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategy" + ).set_name("LLAMAQuantumHybridAdaptiveStrategy", register=True) +except Exception as e: + print("QuantumHybridAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV2 import QuantumHybridAdaptiveStrategyV2 + + lama_register["QuantumHybridAdaptiveStrategyV2"] = QuantumHybridAdaptiveStrategyV2 + LLAMAQuantumHybridAdaptiveStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV2" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV2", register=True) +except Exception as e: + print("QuantumHybridAdaptiveStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV8 import QuantumHybridAdaptiveStrategyV8 + + lama_register["QuantumHybridAdaptiveStrategyV8"] = QuantumHybridAdaptiveStrategyV8 + LLAMAQuantumHybridAdaptiveStrategyV8 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV8" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV8", register=True) +except Exception as e: + print("QuantumHybridAdaptiveStrategyV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV9 import QuantumHybridAdaptiveStrategyV9 + + lama_register["QuantumHybridAdaptiveStrategyV9"] = QuantumHybridAdaptiveStrategyV9 + LLAMAQuantumHybridAdaptiveStrategyV9 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV9" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV9", register=True) +except Exception as e: + print("QuantumHybridAdaptiveStrategyV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridDifferentialEvolution import ( + QuantumHybridDifferentialEvolution, + ) + + lama_register["QuantumHybridDifferentialEvolution"] = QuantumHybridDifferentialEvolution + LLAMAQuantumHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumHybridDifferentialEvolution" + ).set_name("LLAMAQuantumHybridDifferentialEvolution", register=True) +except Exception as e: + print("QuantumHybridDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE import QuantumHybridDynamicAdaptiveDE + + lama_register["QuantumHybridDynamicAdaptiveDE"] = QuantumHybridDynamicAdaptiveDE + LLAMAQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE", register=True) +except Exception as e: + print("QuantumHybridDynamicAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v2 import ( + QuantumHybridDynamicAdaptiveDE_v2, + ) + + lama_register["QuantumHybridDynamicAdaptiveDE_v2"] = QuantumHybridDynamicAdaptiveDE_v2 + LLAMAQuantumHybridDynamicAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE_v2" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v2", register=True) +except Exception as e: + print("QuantumHybridDynamicAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v3 import ( + QuantumHybridDynamicAdaptiveDE_v3, + ) + + lama_register["QuantumHybridDynamicAdaptiveDE_v3"] = QuantumHybridDynamicAdaptiveDE_v3 + LLAMAQuantumHybridDynamicAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE_v3" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v3", register=True) +except Exception as e: + print("QuantumHybridDynamicAdaptiveDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE import QuantumHybridEliteAdaptiveDE + + lama_register["QuantumHybridEliteAdaptiveDE"] = QuantumHybridEliteAdaptiveDE + LLAMAQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v2 import QuantumHybridEliteAdaptiveDE_v2 + + lama_register["QuantumHybridEliteAdaptiveDE_v2"] = QuantumHybridEliteAdaptiveDE_v2 + LLAMAQuantumHybridEliteAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v2" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v2", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v3 import QuantumHybridEliteAdaptiveDE_v3 + + lama_register["QuantumHybridEliteAdaptiveDE_v3"] = QuantumHybridEliteAdaptiveDE_v3 + LLAMAQuantumHybridEliteAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v3" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v3", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v4 import QuantumHybridEliteAdaptiveDE_v4 + + lama_register["QuantumHybridEliteAdaptiveDE_v4"] = QuantumHybridEliteAdaptiveDE_v4 + LLAMAQuantumHybridEliteAdaptiveDE_v4 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v4" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v4", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v5 import QuantumHybridEliteAdaptiveDE_v5 + + lama_register["QuantumHybridEliteAdaptiveDE_v5"] = QuantumHybridEliteAdaptiveDE_v5 + LLAMAQuantumHybridEliteAdaptiveDE_v5 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v5" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v5", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v6 import QuantumHybridEliteAdaptiveDE_v6 + + lama_register["QuantumHybridEliteAdaptiveDE_v6"] = QuantumHybridEliteAdaptiveDE_v6 + LLAMAQuantumHybridEliteAdaptiveDE_v6 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v6" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v6", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v7 import QuantumHybridEliteAdaptiveDE_v7 + + lama_register["QuantumHybridEliteAdaptiveDE_v7"] = QuantumHybridEliteAdaptiveDE_v7 + LLAMAQuantumHybridEliteAdaptiveDE_v7 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v7" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v7", register=True) +except Exception as e: + print("QuantumHybridEliteAdaptiveDE_v7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridImprovedDE import QuantumHybridImprovedDE + + lama_register["QuantumHybridImprovedDE"] = QuantumHybridImprovedDE + LLAMAQuantumHybridImprovedDE = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE").set_name( + "LLAMAQuantumHybridImprovedDE", register=True + ) +except Exception as e: + print("QuantumHybridImprovedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumHybridParticleDifferentialSearch import ( + QuantumHybridParticleDifferentialSearch, + ) + + lama_register["QuantumHybridParticleDifferentialSearch"] = QuantumHybridParticleDifferentialSearch + LLAMAQuantumHybridParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAQuantumHybridParticleDifferentialSearch" + ).set_name("LLAMAQuantumHybridParticleDifferentialSearch", register=True) +except Exception as e: + print("QuantumHybridParticleDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInfluenceCrossoverOptimizer import ( + QuantumInfluenceCrossoverOptimizer, + ) + + lama_register["QuantumInfluenceCrossoverOptimizer"] = QuantumInfluenceCrossoverOptimizer + LLAMAQuantumInfluenceCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInfluenceCrossoverOptimizer" + ).set_name("LLAMAQuantumInfluenceCrossoverOptimizer", register=True) +except Exception as e: + print("QuantumInfluenceCrossoverOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInfluencedAdaptiveDifferentialSwarm import ( + QuantumInfluencedAdaptiveDifferentialSwarm, + ) + + lama_register["QuantumInfluencedAdaptiveDifferentialSwarm"] = QuantumInfluencedAdaptiveDifferentialSwarm + LLAMAQuantumInfluencedAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm" + ).set_name("LLAMAQuantumInfluencedAdaptiveDifferentialSwarm", register=True) +except Exception as e: + print("QuantumInfluencedAdaptiveDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearch import ( + QuantumInformedAdaptiveHybridSearch, + ) + + lama_register["QuantumInformedAdaptiveHybridSearch"] = QuantumInformedAdaptiveHybridSearch + LLAMAQuantumInformedAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveHybridSearch" + ).set_name("LLAMAQuantumInformedAdaptiveHybridSearch", register=True) +except Exception as e: + print("QuantumInformedAdaptiveHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearchV4 import ( + QuantumInformedAdaptiveHybridSearchV4, + ) + + lama_register["QuantumInformedAdaptiveHybridSearchV4"] = QuantumInformedAdaptiveHybridSearchV4 + LLAMAQuantumInformedAdaptiveHybridSearchV4 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveHybridSearchV4" + ).set_name("LLAMAQuantumInformedAdaptiveHybridSearchV4", register=True) +except Exception as e: + print("QuantumInformedAdaptiveHybridSearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveInertiaOptimizer import ( + QuantumInformedAdaptiveInertiaOptimizer, + ) + + lama_register["QuantumInformedAdaptiveInertiaOptimizer"] = QuantumInformedAdaptiveInertiaOptimizer + LLAMAQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveInertiaOptimizer" + ).set_name("LLAMAQuantumInformedAdaptiveInertiaOptimizer", register=True) +except Exception as e: + print("QuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptivePSO import QuantumInformedAdaptivePSO + + lama_register["QuantumInformedAdaptivePSO"] = QuantumInformedAdaptivePSO + LLAMAQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO").set_name( + "LLAMAQuantumInformedAdaptivePSO", register=True + ) +except Exception as e: + print("QuantumInformedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV4 import QuantumInformedAdaptiveSearchV4 + + lama_register["QuantumInformedAdaptiveSearchV4"] = QuantumInformedAdaptiveSearchV4 + LLAMAQuantumInformedAdaptiveSearchV4 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV4" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV4", register=True) +except Exception as e: + print("QuantumInformedAdaptiveSearchV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV5 import QuantumInformedAdaptiveSearchV5 + + lama_register["QuantumInformedAdaptiveSearchV5"] = QuantumInformedAdaptiveSearchV5 + LLAMAQuantumInformedAdaptiveSearchV5 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV5" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV5", register=True) +except Exception as e: + print("QuantumInformedAdaptiveSearchV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV6 import QuantumInformedAdaptiveSearchV6 + + lama_register["QuantumInformedAdaptiveSearchV6"] = QuantumInformedAdaptiveSearchV6 + LLAMAQuantumInformedAdaptiveSearchV6 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV6" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV6", register=True) +except Exception as e: + print("QuantumInformedAdaptiveSearchV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedCooperativeSearchV1 import ( + QuantumInformedCooperativeSearchV1, + ) + + lama_register["QuantumInformedCooperativeSearchV1"] = QuantumInformedCooperativeSearchV1 + LLAMAQuantumInformedCooperativeSearchV1 = NonObjectOptimizer( + method="LLAMAQuantumInformedCooperativeSearchV1" + ).set_name("LLAMAQuantumInformedCooperativeSearchV1", register=True) +except Exception as e: + print("QuantumInformedCooperativeSearchV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedCrossoverEvolution import ( + QuantumInformedCrossoverEvolution, + ) + + lama_register["QuantumInformedCrossoverEvolution"] = QuantumInformedCrossoverEvolution + LLAMAQuantumInformedCrossoverEvolution = NonObjectOptimizer( + method="LLAMAQuantumInformedCrossoverEvolution" + ).set_name("LLAMAQuantumInformedCrossoverEvolution", register=True) +except Exception as e: + print("QuantumInformedCrossoverEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedDifferentialStrategy import ( + QuantumInformedDifferentialStrategy, + ) + + lama_register["QuantumInformedDifferentialStrategy"] = QuantumInformedDifferentialStrategy + LLAMAQuantumInformedDifferentialStrategy = NonObjectOptimizer( + method="LLAMAQuantumInformedDifferentialStrategy" + ).set_name("LLAMAQuantumInformedDifferentialStrategy", register=True) +except Exception as e: + print("QuantumInformedDifferentialStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedDynamicSwarmOptimizer import ( + QuantumInformedDynamicSwarmOptimizer, + ) + + lama_register["QuantumInformedDynamicSwarmOptimizer"] = QuantumInformedDynamicSwarmOptimizer + LLAMAQuantumInformedDynamicSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedDynamicSwarmOptimizer" + ).set_name("LLAMAQuantumInformedDynamicSwarmOptimizer", register=True) +except Exception as e: + print("QuantumInformedDynamicSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedEvolutionStrategy import QuantumInformedEvolutionStrategy + + lama_register["QuantumInformedEvolutionStrategy"] = QuantumInformedEvolutionStrategy + LLAMAQuantumInformedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumInformedEvolutionStrategy" + ).set_name("LLAMAQuantumInformedEvolutionStrategy", register=True) +except Exception as e: + print("QuantumInformedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedGradientOptimizer import QuantumInformedGradientOptimizer + + lama_register["QuantumInformedGradientOptimizer"] = QuantumInformedGradientOptimizer + LLAMAQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedGradientOptimizer" + ).set_name("LLAMAQuantumInformedGradientOptimizer", register=True) +except Exception as e: + print("QuantumInformedGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedHyperStrategicOptimizer import ( + QuantumInformedHyperStrategicOptimizer, + ) + + lama_register["QuantumInformedHyperStrategicOptimizer"] = QuantumInformedHyperStrategicOptimizer + LLAMAQuantumInformedHyperStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedHyperStrategicOptimizer" + ).set_name("LLAMAQuantumInformedHyperStrategicOptimizer", register=True) +except Exception as e: + print("QuantumInformedHyperStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedOptimizer import QuantumInformedOptimizer + + lama_register["QuantumInformedOptimizer"] = QuantumInformedOptimizer + LLAMAQuantumInformedOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer").set_name( + "LLAMAQuantumInformedOptimizer", register=True + ) +except Exception as e: + print("QuantumInformedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedPSO import QuantumInformedPSO + + lama_register["QuantumInformedPSO"] = QuantumInformedPSO + LLAMAQuantumInformedPSO = NonObjectOptimizer(method="LLAMAQuantumInformedPSO").set_name( + "LLAMAQuantumInformedPSO", register=True + ) +except Exception as e: + print("QuantumInformedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedParticleSwarmOptimizer import ( + QuantumInformedParticleSwarmOptimizer, + ) + + lama_register["QuantumInformedParticleSwarmOptimizer"] = QuantumInformedParticleSwarmOptimizer + LLAMAQuantumInformedParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedParticleSwarmOptimizer" + ).set_name("LLAMAQuantumInformedParticleSwarmOptimizer", register=True) +except Exception as e: + print("QuantumInformedParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInformedStrategicOptimizer import ( + QuantumInformedStrategicOptimizer, + ) + + lama_register["QuantumInformedStrategicOptimizer"] = QuantumInformedStrategicOptimizer + LLAMAQuantumInformedStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedStrategicOptimizer" + ).set_name("LLAMAQuantumInformedStrategicOptimizer", register=True) +except Exception as e: + print("QuantumInformedStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInfusedAdaptiveStrategy import QuantumInfusedAdaptiveStrategy + + lama_register["QuantumInfusedAdaptiveStrategy"] = QuantumInfusedAdaptiveStrategy + LLAMAQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumInfusedAdaptiveStrategy" + ).set_name("LLAMAQuantumInfusedAdaptiveStrategy", register=True) +except Exception as e: + print("QuantumInfusedAdaptiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEElitistLocalSearch import ( + QuantumInspiredAdaptiveDEElitistLocalSearch, + ) + + lama_register["QuantumInspiredAdaptiveDEElitistLocalSearch"] = QuantumInspiredAdaptiveDEElitistLocalSearch + LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveDEElitistLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEHybridLocalSearch import ( + QuantumInspiredAdaptiveDEHybridLocalSearch, + ) + + lama_register["QuantumInspiredAdaptiveDEHybridLocalSearch"] = QuantumInspiredAdaptiveDEHybridLocalSearch + LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveDEHybridLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning import ( + QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning, + ) + + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning"] = ( + QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning + ) + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning" + ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch import ( + QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch, + ) + + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch"] = ( + QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch + ) + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridDEPSO import ( + QuantumInspiredAdaptiveHybridDEPSO, + ) + + lama_register["QuantumInspiredAdaptiveHybridDEPSO"] = QuantumInspiredAdaptiveHybridDEPSO + LLAMAQuantumInspiredAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumInspiredAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridOptimizer import ( + QuantumInspiredAdaptiveHybridOptimizer, + ) + + lama_register["QuantumInspiredAdaptiveHybridOptimizer"] = QuantumInspiredAdaptiveHybridOptimizer + LLAMAQuantumInspiredAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveHybridOptimizer" + ).set_name("LLAMAQuantumInspiredAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredAdaptiveMemeticOptimizer import ( + QuantumInspiredAdaptiveMemeticOptimizer, + ) + + lama_register["QuantumInspiredAdaptiveMemeticOptimizer"] = QuantumInspiredAdaptiveMemeticOptimizer + LLAMAQuantumInspiredAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer" + ).set_name("LLAMAQuantumInspiredAdaptiveMemeticOptimizer", register=True) +except Exception as e: + print("QuantumInspiredAdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredDifferentialEvolution import ( + QuantumInspiredDifferentialEvolution, + ) + + lama_register["QuantumInspiredDifferentialEvolution"] = QuantumInspiredDifferentialEvolution + LLAMAQuantumInspiredDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumInspiredDifferentialEvolution" + ).set_name("LLAMAQuantumInspiredDifferentialEvolution", register=True) +except Exception as e: + print("QuantumInspiredDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredDifferentialParticleSwarmOptimizer import ( + QuantumInspiredDifferentialParticleSwarmOptimizer, + ) + + lama_register["QuantumInspiredDifferentialParticleSwarmOptimizer"] = ( + QuantumInspiredDifferentialParticleSwarmOptimizer + ) + LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("QuantumInspiredDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredHybridOptimizer import QuantumInspiredHybridOptimizer + + lama_register["QuantumInspiredHybridOptimizer"] = QuantumInspiredHybridOptimizer + LLAMAQuantumInspiredHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredHybridOptimizer" + ).set_name("LLAMAQuantumInspiredHybridOptimizer", register=True) +except Exception as e: + print("QuantumInspiredHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredMetaheuristic import QuantumInspiredMetaheuristic + + lama_register["QuantumInspiredMetaheuristic"] = QuantumInspiredMetaheuristic + LLAMAQuantumInspiredMetaheuristic = NonObjectOptimizer( + method="LLAMAQuantumInspiredMetaheuristic" + ).set_name("LLAMAQuantumInspiredMetaheuristic", register=True) +except Exception as e: + print("QuantumInspiredMetaheuristic can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredOptimization import QuantumInspiredOptimization + + lama_register["QuantumInspiredOptimization"] = QuantumInspiredOptimization + LLAMAQuantumInspiredOptimization = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization").set_name( + "LLAMAQuantumInspiredOptimization", register=True + ) +except Exception as e: + print("QuantumInspiredOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumInspiredSpiralOptimizer import QuantumInspiredSpiralOptimizer + + lama_register["QuantumInspiredSpiralOptimizer"] = QuantumInspiredSpiralOptimizer + LLAMAQuantumInspiredSpiralOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredSpiralOptimizer" + ).set_name("LLAMAQuantumInspiredSpiralOptimizer", register=True) +except Exception as e: + print("QuantumInspiredSpiralOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumIterativeDeepeningHybridSearch import ( + QuantumIterativeDeepeningHybridSearch, + ) + + lama_register["QuantumIterativeDeepeningHybridSearch"] = QuantumIterativeDeepeningHybridSearch + LLAMAQuantumIterativeDeepeningHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumIterativeDeepeningHybridSearch" + ).set_name("LLAMAQuantumIterativeDeepeningHybridSearch", register=True) +except Exception as e: + print("QuantumIterativeDeepeningHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumIterativeRefinementOptimizer import ( + QuantumIterativeRefinementOptimizer, + ) + + lama_register["QuantumIterativeRefinementOptimizer"] = QuantumIterativeRefinementOptimizer + LLAMAQuantumIterativeRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumIterativeRefinementOptimizer" + ).set_name("LLAMAQuantumIterativeRefinementOptimizer", register=True) +except Exception as e: + print("QuantumIterativeRefinementOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLeapOptimizer import QuantumLeapOptimizer + + lama_register["QuantumLeapOptimizer"] = QuantumLeapOptimizer + LLAMAQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer").set_name( + "LLAMAQuantumLeapOptimizer", register=True + ) +except Exception as e: + print("QuantumLeapOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLeapOptimizerV2 import QuantumLeapOptimizerV2 + + lama_register["QuantumLeapOptimizerV2"] = QuantumLeapOptimizerV2 + LLAMAQuantumLeapOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2").set_name( + "LLAMAQuantumLeapOptimizerV2", register=True + ) +except Exception as e: + print("QuantumLeapOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDEHybridLocalSearch import ( + QuantumLevyAdaptiveDEHybridLocalSearch, + ) + + lama_register["QuantumLevyAdaptiveDEHybridLocalSearch"] = QuantumLevyAdaptiveDEHybridLocalSearch + LLAMAQuantumLevyAdaptiveDEHybridLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch" + ).set_name("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDEHybridLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV2 import ( + QuantumLevyAdaptiveDifferentialOptimizerV2, + ) + + lama_register["QuantumLevyAdaptiveDifferentialOptimizerV2"] = QuantumLevyAdaptiveDifferentialOptimizerV2 + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDifferentialOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV3 import ( + QuantumLevyAdaptiveDifferentialOptimizerV3, + ) + + lama_register["QuantumLevyAdaptiveDifferentialOptimizerV3"] = QuantumLevyAdaptiveDifferentialOptimizerV3 + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDifferentialOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV4 import ( + QuantumLevyAdaptiveDifferentialOptimizerV4, + ) + + lama_register["QuantumLevyAdaptiveDifferentialOptimizerV4"] = QuantumLevyAdaptiveDifferentialOptimizerV4 + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDifferentialOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV5 import ( + QuantumLevyAdaptiveDifferentialOptimizerV5, + ) + + lama_register["QuantumLevyAdaptiveDifferentialOptimizerV5"] = QuantumLevyAdaptiveDifferentialOptimizerV5 + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDifferentialOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV6 import ( + QuantumLevyAdaptiveDifferentialOptimizerV6, + ) + + lama_register["QuantumLevyAdaptiveDifferentialOptimizerV6"] = QuantumLevyAdaptiveDifferentialOptimizerV6 + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6", register=True) +except Exception as e: + print("QuantumLevyAdaptiveDifferentialOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyAdaptiveMemeticOptimizerV3 import ( + QuantumLevyAdaptiveMemeticOptimizerV3, + ) + + lama_register["QuantumLevyAdaptiveMemeticOptimizerV3"] = QuantumLevyAdaptiveMemeticOptimizerV3 + LLAMAQuantumLevyAdaptiveMemeticOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3" + ).set_name("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3", register=True) +except Exception as e: + print("QuantumLevyAdaptiveMemeticOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizer import ( + QuantumLevyDifferentialDynamicOptimizer, + ) + + lama_register["QuantumLevyDifferentialDynamicOptimizer"] = QuantumLevyDifferentialDynamicOptimizer + LLAMAQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizer" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizer", register=True) +except Exception as e: + print("QuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV2 import ( + QuantumLevyDifferentialDynamicOptimizerV2, + ) + + lama_register["QuantumLevyDifferentialDynamicOptimizerV2"] = QuantumLevyDifferentialDynamicOptimizerV2 + LLAMAQuantumLevyDifferentialDynamicOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV2", register=True) +except Exception as e: + print("QuantumLevyDifferentialDynamicOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV3 import ( + QuantumLevyDifferentialDynamicOptimizerV3, + ) + + lama_register["QuantumLevyDifferentialDynamicOptimizerV3"] = QuantumLevyDifferentialDynamicOptimizerV3 + LLAMAQuantumLevyDifferentialDynamicOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV3", register=True) +except Exception as e: + print("QuantumLevyDifferentialDynamicOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizer import ( + QuantumLevyDifferentialHybridOptimizer, + ) + + lama_register["QuantumLevyDifferentialHybridOptimizer"] = QuantumLevyDifferentialHybridOptimizer + LLAMAQuantumLevyDifferentialHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridOptimizer" + ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizer", register=True) +except Exception as e: + print("QuantumLevyDifferentialHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizerV2 import ( + QuantumLevyDifferentialHybridOptimizerV2, + ) + + lama_register["QuantumLevyDifferentialHybridOptimizerV2"] = QuantumLevyDifferentialHybridOptimizerV2 + LLAMAQuantumLevyDifferentialHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridOptimizerV2" + ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizerV2", register=True) +except Exception as e: + print("QuantumLevyDifferentialHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridSearch import ( + QuantumLevyDifferentialHybridSearch, + ) + + lama_register["QuantumLevyDifferentialHybridSearch"] = QuantumLevyDifferentialHybridSearch + LLAMAQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridSearch" + ).set_name("LLAMAQuantumLevyDifferentialHybridSearch", register=True) +except Exception as e: + print("QuantumLevyDifferentialHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmOptimizerV3 import ( + QuantumLevyDynamicDifferentialSwarmOptimizerV3, + ) + + lama_register["QuantumLevyDynamicDifferentialSwarmOptimizerV3"] = ( + QuantumLevyDynamicDifferentialSwarmOptimizerV3 + ) + LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3" + ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3", register=True) +except Exception as e: + print("QuantumLevyDynamicDifferentialSwarmOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmV5 import ( + QuantumLevyDynamicDifferentialSwarmV5, + ) + + lama_register["QuantumLevyDynamicDifferentialSwarmV5"] = QuantumLevyDynamicDifferentialSwarmV5 + LLAMAQuantumLevyDynamicDifferentialSwarmV5 = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicDifferentialSwarmV5" + ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmV5", register=True) +except Exception as e: + print("QuantumLevyDynamicDifferentialSwarmV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDynamicParticleSwarm import QuantumLevyDynamicParticleSwarm + + lama_register["QuantumLevyDynamicParticleSwarm"] = QuantumLevyDynamicParticleSwarm + LLAMAQuantumLevyDynamicParticleSwarm = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicParticleSwarm" + ).set_name("LLAMAQuantumLevyDynamicParticleSwarm", register=True) +except Exception as e: + print("QuantumLevyDynamicParticleSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyDynamicSwarmOptimization import ( + QuantumLevyDynamicSwarmOptimization, + ) + + lama_register["QuantumLevyDynamicSwarmOptimization"] = QuantumLevyDynamicSwarmOptimization + LLAMAQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicSwarmOptimization" + ).set_name("LLAMAQuantumLevyDynamicSwarmOptimization", register=True) +except Exception as e: + print("QuantumLevyDynamicSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEliteMemeticDEHybridOptimizer import ( + QuantumLevyEliteMemeticDEHybridOptimizer, + ) + + lama_register["QuantumLevyEliteMemeticDEHybridOptimizer"] = QuantumLevyEliteMemeticDEHybridOptimizer + LLAMAQuantumLevyEliteMemeticDEHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer" + ).set_name("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer", register=True) +except Exception as e: + print("QuantumLevyEliteMemeticDEHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEliteMemeticOptimizer import QuantumLevyEliteMemeticOptimizer + + lama_register["QuantumLevyEliteMemeticOptimizer"] = QuantumLevyEliteMemeticOptimizer + LLAMAQuantumLevyEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEliteMemeticOptimizer" + ).set_name("LLAMAQuantumLevyEliteMemeticOptimizer", register=True) +except Exception as e: + print("QuantumLevyEliteMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveDifferentialOptimizer import ( + QuantumLevyEnhancedAdaptiveDifferentialOptimizer, + ) + + lama_register["QuantumLevyEnhancedAdaptiveDifferentialOptimizer"] = ( + QuantumLevyEnhancedAdaptiveDifferentialOptimizer + ) + LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer" + ).set_name("LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer", register=True) +except Exception as e: + print("QuantumLevyEnhancedAdaptiveDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveOptimizerV2 import ( + QuantumLevyEnhancedAdaptiveOptimizerV2, + ) + + lama_register["QuantumLevyEnhancedAdaptiveOptimizerV2"] = QuantumLevyEnhancedAdaptiveOptimizerV2 + LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2" + ).set_name("LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2", register=True) +except Exception as e: + print("QuantumLevyEnhancedAdaptiveOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEnhancedDifferentialOptimizer import ( + QuantumLevyEnhancedDifferentialOptimizer, + ) + + lama_register["QuantumLevyEnhancedDifferentialOptimizer"] = QuantumLevyEnhancedDifferentialOptimizer + LLAMAQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedDifferentialOptimizer" + ).set_name("LLAMAQuantumLevyEnhancedDifferentialOptimizer", register=True) +except Exception as e: + print("QuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyEnhancedMemeticOptimizerV2 import ( + QuantumLevyEnhancedMemeticOptimizerV2, + ) + + lama_register["QuantumLevyEnhancedMemeticOptimizerV2"] = QuantumLevyEnhancedMemeticOptimizerV2 + LLAMAQuantumLevyEnhancedMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2" + ).set_name("LLAMAQuantumLevyEnhancedMemeticOptimizerV2", register=True) +except Exception as e: + print("QuantumLevyEnhancedMemeticOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyImprovedDifferentialSwarmOptimization import ( + QuantumLevyImprovedDifferentialSwarmOptimization, + ) + + lama_register["QuantumLevyImprovedDifferentialSwarmOptimization"] = ( + QuantumLevyImprovedDifferentialSwarmOptimization + ) + LLAMAQuantumLevyImprovedDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization" + ).set_name("LLAMAQuantumLevyImprovedDifferentialSwarmOptimization", register=True) +except Exception as e: + print("QuantumLevyImprovedDifferentialSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevyParticleAdaptiveOptimization import ( + QuantumLevyParticleAdaptiveOptimization, + ) + + lama_register["QuantumLevyParticleAdaptiveOptimization"] = QuantumLevyParticleAdaptiveOptimization + LLAMAQuantumLevyParticleAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyParticleAdaptiveOptimization" + ).set_name("LLAMAQuantumLevyParticleAdaptiveOptimization", register=True) +except Exception as e: + print("QuantumLevyParticleAdaptiveOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLevySwarmOptimizationV3 import QuantumLevySwarmOptimizationV3 + + lama_register["QuantumLevySwarmOptimizationV3"] = QuantumLevySwarmOptimizationV3 + LLAMAQuantumLevySwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumLevySwarmOptimizationV3" + ).set_name("LLAMAQuantumLevySwarmOptimizationV3", register=True) +except Exception as e: + print("QuantumLevySwarmOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLocustSearch import QuantumLocustSearch + + lama_register["QuantumLocustSearch"] = QuantumLocustSearch + LLAMAQuantumLocustSearch = NonObjectOptimizer(method="LLAMAQuantumLocustSearch").set_name( + "LLAMAQuantumLocustSearch", register=True + ) +except Exception as e: + print("QuantumLocustSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumLocustSearchV2 import QuantumLocustSearchV2 + + lama_register["QuantumLocustSearchV2"] = QuantumLocustSearchV2 + LLAMAQuantumLocustSearchV2 = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2").set_name( + "LLAMAQuantumLocustSearchV2", register=True + ) +except Exception as e: + print("QuantumLocustSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalAdaptiveCrossoverOptimizerV20 import ( + QuantumOrbitalAdaptiveCrossoverOptimizerV20, + ) + + lama_register["QuantumOrbitalAdaptiveCrossoverOptimizerV20"] = QuantumOrbitalAdaptiveCrossoverOptimizerV20 + LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20" + ).set_name("LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20", register=True) +except Exception as e: + print("QuantumOrbitalAdaptiveCrossoverOptimizerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV12 import QuantumOrbitalDynamicEnhancerV12 + + lama_register["QuantumOrbitalDynamicEnhancerV12"] = QuantumOrbitalDynamicEnhancerV12 + LLAMAQuantumOrbitalDynamicEnhancerV12 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV12" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV12", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV13 import QuantumOrbitalDynamicEnhancerV13 + + lama_register["QuantumOrbitalDynamicEnhancerV13"] = QuantumOrbitalDynamicEnhancerV13 + LLAMAQuantumOrbitalDynamicEnhancerV13 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV13" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV13", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV14 import QuantumOrbitalDynamicEnhancerV14 + + lama_register["QuantumOrbitalDynamicEnhancerV14"] = QuantumOrbitalDynamicEnhancerV14 + LLAMAQuantumOrbitalDynamicEnhancerV14 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV14" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV14", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV15 import QuantumOrbitalDynamicEnhancerV15 + + lama_register["QuantumOrbitalDynamicEnhancerV15"] = QuantumOrbitalDynamicEnhancerV15 + LLAMAQuantumOrbitalDynamicEnhancerV15 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV15" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV15", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV16 import QuantumOrbitalDynamicEnhancerV16 + + lama_register["QuantumOrbitalDynamicEnhancerV16"] = QuantumOrbitalDynamicEnhancerV16 + LLAMAQuantumOrbitalDynamicEnhancerV16 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV16" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV16", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV17 import QuantumOrbitalDynamicEnhancerV17 + + lama_register["QuantumOrbitalDynamicEnhancerV17"] = QuantumOrbitalDynamicEnhancerV17 + LLAMAQuantumOrbitalDynamicEnhancerV17 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV17" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV17", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV18 import QuantumOrbitalDynamicEnhancerV18 + + lama_register["QuantumOrbitalDynamicEnhancerV18"] = QuantumOrbitalDynamicEnhancerV18 + LLAMAQuantumOrbitalDynamicEnhancerV18 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV18" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV18", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV24 import QuantumOrbitalDynamicEnhancerV24 + + lama_register["QuantumOrbitalDynamicEnhancerV24"] = QuantumOrbitalDynamicEnhancerV24 + LLAMAQuantumOrbitalDynamicEnhancerV24 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV24" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV24", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV25 import QuantumOrbitalDynamicEnhancerV25 + + lama_register["QuantumOrbitalDynamicEnhancerV25"] = QuantumOrbitalDynamicEnhancerV25 + LLAMAQuantumOrbitalDynamicEnhancerV25 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV25" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV25", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV26 import QuantumOrbitalDynamicEnhancerV26 + + lama_register["QuantumOrbitalDynamicEnhancerV26"] = QuantumOrbitalDynamicEnhancerV26 + LLAMAQuantumOrbitalDynamicEnhancerV26 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV26" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV26", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV27 import QuantumOrbitalDynamicEnhancerV27 + + lama_register["QuantumOrbitalDynamicEnhancerV27"] = QuantumOrbitalDynamicEnhancerV27 + LLAMAQuantumOrbitalDynamicEnhancerV27 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV27" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV27", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV28 import QuantumOrbitalDynamicEnhancerV28 + + lama_register["QuantumOrbitalDynamicEnhancerV28"] = QuantumOrbitalDynamicEnhancerV28 + LLAMAQuantumOrbitalDynamicEnhancerV28 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV28" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV28", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV29 import QuantumOrbitalDynamicEnhancerV29 + + lama_register["QuantumOrbitalDynamicEnhancerV29"] = QuantumOrbitalDynamicEnhancerV29 + LLAMAQuantumOrbitalDynamicEnhancerV29 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV29" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV29", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV30 import QuantumOrbitalDynamicEnhancerV30 + + lama_register["QuantumOrbitalDynamicEnhancerV30"] = QuantumOrbitalDynamicEnhancerV30 + LLAMAQuantumOrbitalDynamicEnhancerV30 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV30" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV30", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV31 import QuantumOrbitalDynamicEnhancerV31 + + lama_register["QuantumOrbitalDynamicEnhancerV31"] = QuantumOrbitalDynamicEnhancerV31 + LLAMAQuantumOrbitalDynamicEnhancerV31 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV31" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV31", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV32 import QuantumOrbitalDynamicEnhancerV32 + + lama_register["QuantumOrbitalDynamicEnhancerV32"] = QuantumOrbitalDynamicEnhancerV32 + LLAMAQuantumOrbitalDynamicEnhancerV32 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV32" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV32", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV33 import QuantumOrbitalDynamicEnhancerV33 + + lama_register["QuantumOrbitalDynamicEnhancerV33"] = QuantumOrbitalDynamicEnhancerV33 + LLAMAQuantumOrbitalDynamicEnhancerV33 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV33" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV33", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV34 import QuantumOrbitalDynamicEnhancerV34 + + lama_register["QuantumOrbitalDynamicEnhancerV34"] = QuantumOrbitalDynamicEnhancerV34 + LLAMAQuantumOrbitalDynamicEnhancerV34 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV34" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV34", register=True) +except Exception as e: + print("QuantumOrbitalDynamicEnhancerV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalDynamicOptimizerV11 import ( + QuantumOrbitalDynamicOptimizerV11, + ) + + lama_register["QuantumOrbitalDynamicOptimizerV11"] = QuantumOrbitalDynamicOptimizerV11 + LLAMAQuantumOrbitalDynamicOptimizerV11 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicOptimizerV11" + ).set_name("LLAMAQuantumOrbitalDynamicOptimizerV11", register=True) +except Exception as e: + print("QuantumOrbitalDynamicOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalEnhancedCrossoverOptimizerV22 import ( + QuantumOrbitalEnhancedCrossoverOptimizerV22, + ) + + lama_register["QuantumOrbitalEnhancedCrossoverOptimizerV22"] = QuantumOrbitalEnhancedCrossoverOptimizerV22 + LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22" + ).set_name("LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22", register=True) +except Exception as e: + print("QuantumOrbitalEnhancedCrossoverOptimizerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalEnhancedDynamicEnhancerV19 import ( + QuantumOrbitalEnhancedDynamicEnhancerV19, + ) + + lama_register["QuantumOrbitalEnhancedDynamicEnhancerV19"] = QuantumOrbitalEnhancedDynamicEnhancerV19 + LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19" + ).set_name("LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19", register=True) +except Exception as e: + print("QuantumOrbitalEnhancedDynamicEnhancerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalHarmonicOptimizerV10 import ( + QuantumOrbitalHarmonicOptimizerV10, + ) + + lama_register["QuantumOrbitalHarmonicOptimizerV10"] = QuantumOrbitalHarmonicOptimizerV10 + LLAMAQuantumOrbitalHarmonicOptimizerV10 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalHarmonicOptimizerV10" + ).set_name("LLAMAQuantumOrbitalHarmonicOptimizerV10", register=True) +except Exception as e: + print("QuantumOrbitalHarmonicOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalPrecisionOptimizerV34 import ( + QuantumOrbitalPrecisionOptimizerV34, + ) + + lama_register["QuantumOrbitalPrecisionOptimizerV34"] = QuantumOrbitalPrecisionOptimizerV34 + LLAMAQuantumOrbitalPrecisionOptimizerV34 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalPrecisionOptimizerV34" + ).set_name("LLAMAQuantumOrbitalPrecisionOptimizerV34", register=True) +except Exception as e: + print("QuantumOrbitalPrecisionOptimizerV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV21 import ( + QuantumOrbitalRefinedCrossoverOptimizerV21, + ) + + lama_register["QuantumOrbitalRefinedCrossoverOptimizerV21"] = QuantumOrbitalRefinedCrossoverOptimizerV21 + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21" + ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21", register=True) +except Exception as e: + print("QuantumOrbitalRefinedCrossoverOptimizerV21 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV23 import ( + QuantumOrbitalRefinedCrossoverOptimizerV23, + ) + + lama_register["QuantumOrbitalRefinedCrossoverOptimizerV23"] = QuantumOrbitalRefinedCrossoverOptimizerV23 + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23" + ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23", register=True) +except Exception as e: + print("QuantumOrbitalRefinedCrossoverOptimizerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumParticleSwarmDifferentialEvolution import ( + QuantumParticleSwarmDifferentialEvolution, + ) + + lama_register["QuantumParticleSwarmDifferentialEvolution"] = QuantumParticleSwarmDifferentialEvolution + LLAMAQuantumParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumParticleSwarmDifferentialEvolution" + ).set_name("LLAMAQuantumParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("QuantumParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumParticleSwarmOptimization import QuantumParticleSwarmOptimization + + lama_register["QuantumParticleSwarmOptimization"] = QuantumParticleSwarmOptimization + LLAMAQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumParticleSwarmOptimization" + ).set_name("LLAMAQuantumParticleSwarmOptimization", register=True) +except Exception as e: + print("QuantumParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumReactiveCooperativeStrategy import ( + QuantumReactiveCooperativeStrategy, + ) + + lama_register["QuantumReactiveCooperativeStrategy"] = QuantumReactiveCooperativeStrategy + LLAMAQuantumReactiveCooperativeStrategy = NonObjectOptimizer( + method="LLAMAQuantumReactiveCooperativeStrategy" + ).set_name("LLAMAQuantumReactiveCooperativeStrategy", register=True) +except Exception as e: + print("QuantumReactiveCooperativeStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumRefinedAdaptiveExplorationOptimization import ( + QuantumRefinedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumRefinedAdaptiveExplorationOptimization"] = ( + QuantumRefinedAdaptiveExplorationOptimization + ) + LLAMAQuantumRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumRefinedAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("QuantumRefinedAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumRefinedAdaptiveHybridStrategyV5 import ( + QuantumRefinedAdaptiveHybridStrategyV5, + ) + + lama_register["QuantumRefinedAdaptiveHybridStrategyV5"] = QuantumRefinedAdaptiveHybridStrategyV5 + LLAMAQuantumRefinedAdaptiveHybridStrategyV5 = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5" + ).set_name("LLAMAQuantumRefinedAdaptiveHybridStrategyV5", register=True) +except Exception as e: + print("QuantumRefinedAdaptiveHybridStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumRefinedAdaptiveStrategicOptimizer import ( + QuantumRefinedAdaptiveStrategicOptimizer, + ) + + lama_register["QuantumRefinedAdaptiveStrategicOptimizer"] = QuantumRefinedAdaptiveStrategicOptimizer + LLAMAQuantumRefinedAdaptiveStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer" + ).set_name("LLAMAQuantumRefinedAdaptiveStrategicOptimizer", register=True) +except Exception as e: + print("QuantumRefinedAdaptiveStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumRefinedDynamicAdaptiveHybridDEPSO import ( + QuantumRefinedDynamicAdaptiveHybridDEPSO, + ) + + lama_register["QuantumRefinedDynamicAdaptiveHybridDEPSO"] = QuantumRefinedDynamicAdaptiveHybridDEPSO + LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("QuantumRefinedDynamicAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumReinforcedNesterovAccelerator import ( + QuantumReinforcedNesterovAccelerator, + ) + + lama_register["QuantumReinforcedNesterovAccelerator"] = QuantumReinforcedNesterovAccelerator + LLAMAQuantumReinforcedNesterovAccelerator = NonObjectOptimizer( + method="LLAMAQuantumReinforcedNesterovAccelerator" + ).set_name("LLAMAQuantumReinforcedNesterovAccelerator", register=True) +except Exception as e: + print("QuantumReinforcedNesterovAccelerator can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumResonanceEvolutionaryStrategy import ( + QuantumResonanceEvolutionaryStrategy, + ) + + lama_register["QuantumResonanceEvolutionaryStrategy"] = QuantumResonanceEvolutionaryStrategy + LLAMAQuantumResonanceEvolutionaryStrategy = NonObjectOptimizer( + method="LLAMAQuantumResonanceEvolutionaryStrategy" + ).set_name("LLAMAQuantumResonanceEvolutionaryStrategy", register=True) +except Exception as e: + print("QuantumResonanceEvolutionaryStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSearch import QuantumSearch + + lama_register["QuantumSearch"] = QuantumSearch + LLAMAQuantumSearch = NonObjectOptimizer(method="LLAMAQuantumSearch").set_name( + "LLAMAQuantumSearch", register=True + ) +except Exception as e: + print("QuantumSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSimulatedAnnealing import QuantumSimulatedAnnealing + + lama_register["QuantumSimulatedAnnealing"] = QuantumSimulatedAnnealing + LLAMAQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing").set_name( + "LLAMAQuantumSimulatedAnnealing", register=True + ) +except Exception as e: + print("QuantumSimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSimulatedAnnealingHybridOptimizer import ( + QuantumSimulatedAnnealingHybridOptimizer, + ) + + lama_register["QuantumSimulatedAnnealingHybridOptimizer"] = QuantumSimulatedAnnealingHybridOptimizer + LLAMAQuantumSimulatedAnnealingHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumSimulatedAnnealingHybridOptimizer" + ).set_name("LLAMAQuantumSimulatedAnnealingHybridOptimizer", register=True) +except Exception as e: + print("QuantumSimulatedAnnealingHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSimulatedAnnealingImproved import ( + QuantumSimulatedAnnealingImproved, + ) + + lama_register["QuantumSimulatedAnnealingImproved"] = QuantumSimulatedAnnealingImproved + LLAMAQuantumSimulatedAnnealingImproved = NonObjectOptimizer( + method="LLAMAQuantumSimulatedAnnealingImproved" + ).set_name("LLAMAQuantumSimulatedAnnealingImproved", register=True) +except Exception as e: + print("QuantumSimulatedAnnealingImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralAdaptiveHybridStrategy import ( + QuantumSpectralAdaptiveHybridStrategy, + ) + + lama_register["QuantumSpectralAdaptiveHybridStrategy"] = QuantumSpectralAdaptiveHybridStrategy + LLAMAQuantumSpectralAdaptiveHybridStrategy = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveHybridStrategy" + ).set_name("LLAMAQuantumSpectralAdaptiveHybridStrategy", register=True) +except Exception as e: + print("QuantumSpectralAdaptiveHybridStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV2 import ( + QuantumSpectralAdaptiveOptimizerV2, + ) + + lama_register["QuantumSpectralAdaptiveOptimizerV2"] = QuantumSpectralAdaptiveOptimizerV2 + LLAMAQuantumSpectralAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveOptimizerV2" + ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV2", register=True) +except Exception as e: + print("QuantumSpectralAdaptiveOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV3 import ( + QuantumSpectralAdaptiveOptimizerV3, + ) + + lama_register["QuantumSpectralAdaptiveOptimizerV3"] = QuantumSpectralAdaptiveOptimizerV3 + LLAMAQuantumSpectralAdaptiveOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveOptimizerV3" + ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV3", register=True) +except Exception as e: + print("QuantumSpectralAdaptiveOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralDynamicOptimizer import QuantumSpectralDynamicOptimizer + + lama_register["QuantumSpectralDynamicOptimizer"] = QuantumSpectralDynamicOptimizer + LLAMAQuantumSpectralDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumSpectralDynamicOptimizer" + ).set_name("LLAMAQuantumSpectralDynamicOptimizer", register=True) +except Exception as e: + print("QuantumSpectralDynamicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralEnhancedOptimizerV5 import ( + QuantumSpectralEnhancedOptimizerV5, + ) + + lama_register["QuantumSpectralEnhancedOptimizerV5"] = QuantumSpectralEnhancedOptimizerV5 + LLAMAQuantumSpectralEnhancedOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumSpectralEnhancedOptimizerV5" + ).set_name("LLAMAQuantumSpectralEnhancedOptimizerV5", register=True) +except Exception as e: + print("QuantumSpectralEnhancedOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSpectralRefinedOptimizerV4 import ( + QuantumSpectralRefinedOptimizerV4, + ) + + lama_register["QuantumSpectralRefinedOptimizerV4"] = QuantumSpectralRefinedOptimizerV4 + LLAMAQuantumSpectralRefinedOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumSpectralRefinedOptimizerV4" + ).set_name("LLAMAQuantumSpectralRefinedOptimizerV4", register=True) +except Exception as e: + print("QuantumSpectralRefinedOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStabilizedDynamicBalanceOptimizer import ( + QuantumStabilizedDynamicBalanceOptimizer, + ) + + lama_register["QuantumStabilizedDynamicBalanceOptimizer"] = QuantumStabilizedDynamicBalanceOptimizer + LLAMAQuantumStabilizedDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStabilizedDynamicBalanceOptimizer" + ).set_name("LLAMAQuantumStabilizedDynamicBalanceOptimizer", register=True) +except Exception as e: + print("QuantumStabilizedDynamicBalanceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStateConvergenceOptimizer import QuantumStateConvergenceOptimizer + + lama_register["QuantumStateConvergenceOptimizer"] = QuantumStateConvergenceOptimizer + LLAMAQuantumStateConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStateConvergenceOptimizer" + ).set_name("LLAMAQuantumStateConvergenceOptimizer", register=True) +except Exception as e: + print("QuantumStateConvergenceOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStateCrossoverOptimization import ( + QuantumStateCrossoverOptimization, + ) + + lama_register["QuantumStateCrossoverOptimization"] = QuantumStateCrossoverOptimization + LLAMAQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAQuantumStateCrossoverOptimization" + ).set_name("LLAMAQuantumStateCrossoverOptimization", register=True) +except Exception as e: + print("QuantumStateCrossoverOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStateHybridStrategy import QuantumStateHybridStrategy + + lama_register["QuantumStateHybridStrategy"] = QuantumStateHybridStrategy + LLAMAQuantumStateHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy").set_name( + "LLAMAQuantumStateHybridStrategy", register=True + ) +except Exception as e: + print("QuantumStateHybridStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStateRefinedHybridStrategy import ( + QuantumStateRefinedHybridStrategy, + ) + + lama_register["QuantumStateRefinedHybridStrategy"] = QuantumStateRefinedHybridStrategy + LLAMAQuantumStateRefinedHybridStrategy = NonObjectOptimizer( + method="LLAMAQuantumStateRefinedHybridStrategy" + ).set_name("LLAMAQuantumStateRefinedHybridStrategy", register=True) +except Exception as e: + print("QuantumStateRefinedHybridStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStochasticGradientDescentFireworks import ( + QuantumStochasticGradientDescentFireworks, + ) + + lama_register["QuantumStochasticGradientDescentFireworks"] = QuantumStochasticGradientDescentFireworks + LLAMAQuantumStochasticGradientDescentFireworks = NonObjectOptimizer( + method="LLAMAQuantumStochasticGradientDescentFireworks" + ).set_name("LLAMAQuantumStochasticGradientDescentFireworks", register=True) +except Exception as e: + print("QuantumStochasticGradientDescentFireworks can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumStochasticGradientOptimizer import ( + QuantumStochasticGradientOptimizer, + ) + + lama_register["QuantumStochasticGradientOptimizer"] = QuantumStochasticGradientOptimizer + LLAMAQuantumStochasticGradientOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStochasticGradientOptimizer" + ).set_name("LLAMAQuantumStochasticGradientOptimizer", register=True) +except Exception as e: + print("QuantumStochasticGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSwarmOptimization import QuantumSwarmOptimization + + lama_register["QuantumSwarmOptimization"] = QuantumSwarmOptimization + LLAMAQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization").set_name( + "LLAMAQuantumSwarmOptimization", register=True + ) +except Exception as e: + print("QuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSwarmOptimizationImproved import QuantumSwarmOptimizationImproved + + lama_register["QuantumSwarmOptimizationImproved"] = QuantumSwarmOptimizationImproved + LLAMAQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMAQuantumSwarmOptimizationImproved" + ).set_name("LLAMAQuantumSwarmOptimizationImproved", register=True) +except Exception as e: + print("QuantumSwarmOptimizationImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumSymbioticEnhancedStrategyV3 import ( + QuantumSymbioticEnhancedStrategyV3, + ) + + lama_register["QuantumSymbioticEnhancedStrategyV3"] = QuantumSymbioticEnhancedStrategyV3 + LLAMAQuantumSymbioticEnhancedStrategyV3 = NonObjectOptimizer( + method="LLAMAQuantumSymbioticEnhancedStrategyV3" + ).set_name("LLAMAQuantumSymbioticEnhancedStrategyV3", register=True) +except Exception as e: + print("QuantumSymbioticEnhancedStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunedGradientSearchV2 import QuantumTunedGradientSearchV2 + + lama_register["QuantumTunedGradientSearchV2"] = QuantumTunedGradientSearchV2 + LLAMAQuantumTunedGradientSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumTunedGradientSearchV2" + ).set_name("LLAMAQuantumTunedGradientSearchV2", register=True) +except Exception as e: + print("QuantumTunedGradientSearchV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizer import QuantumTunnelingOptimizer + + lama_register["QuantumTunnelingOptimizer"] = QuantumTunnelingOptimizer + LLAMAQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer").set_name( + "LLAMAQuantumTunnelingOptimizer", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV10 import QuantumTunnelingOptimizerV10 + + lama_register["QuantumTunnelingOptimizerV10"] = QuantumTunnelingOptimizerV10 + LLAMAQuantumTunnelingOptimizerV10 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV10" + ).set_name("LLAMAQuantumTunnelingOptimizerV10", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV11 import QuantumTunnelingOptimizerV11 + + lama_register["QuantumTunnelingOptimizerV11"] = QuantumTunnelingOptimizerV11 + LLAMAQuantumTunnelingOptimizerV11 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV11" + ).set_name("LLAMAQuantumTunnelingOptimizerV11", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV12 import QuantumTunnelingOptimizerV12 + + lama_register["QuantumTunnelingOptimizerV12"] = QuantumTunnelingOptimizerV12 + LLAMAQuantumTunnelingOptimizerV12 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV12" + ).set_name("LLAMAQuantumTunnelingOptimizerV12", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV13 import QuantumTunnelingOptimizerV13 + + lama_register["QuantumTunnelingOptimizerV13"] = QuantumTunnelingOptimizerV13 + LLAMAQuantumTunnelingOptimizerV13 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV13" + ).set_name("LLAMAQuantumTunnelingOptimizerV13", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV14 import QuantumTunnelingOptimizerV14 + + lama_register["QuantumTunnelingOptimizerV14"] = QuantumTunnelingOptimizerV14 + LLAMAQuantumTunnelingOptimizerV14 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV14" + ).set_name("LLAMAQuantumTunnelingOptimizerV14", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV15 import QuantumTunnelingOptimizerV15 + + lama_register["QuantumTunnelingOptimizerV15"] = QuantumTunnelingOptimizerV15 + LLAMAQuantumTunnelingOptimizerV15 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV15" + ).set_name("LLAMAQuantumTunnelingOptimizerV15", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV16 import QuantumTunnelingOptimizerV16 + + lama_register["QuantumTunnelingOptimizerV16"] = QuantumTunnelingOptimizerV16 + LLAMAQuantumTunnelingOptimizerV16 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV16" + ).set_name("LLAMAQuantumTunnelingOptimizerV16", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV17 import QuantumTunnelingOptimizerV17 + + lama_register["QuantumTunnelingOptimizerV17"] = QuantumTunnelingOptimizerV17 + LLAMAQuantumTunnelingOptimizerV17 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV17" + ).set_name("LLAMAQuantumTunnelingOptimizerV17", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV18 import QuantumTunnelingOptimizerV18 + + lama_register["QuantumTunnelingOptimizerV18"] = QuantumTunnelingOptimizerV18 + LLAMAQuantumTunnelingOptimizerV18 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV18" + ).set_name("LLAMAQuantumTunnelingOptimizerV18", register=True) +except Exception as e: + print("QuantumTunnelingOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV2 import QuantumTunnelingOptimizerV2 + + lama_register["QuantumTunnelingOptimizerV2"] = QuantumTunnelingOptimizerV2 + LLAMAQuantumTunnelingOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2").set_name( + "LLAMAQuantumTunnelingOptimizerV2", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV3 import QuantumTunnelingOptimizerV3 + + lama_register["QuantumTunnelingOptimizerV3"] = QuantumTunnelingOptimizerV3 + LLAMAQuantumTunnelingOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3").set_name( + "LLAMAQuantumTunnelingOptimizerV3", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV4 import QuantumTunnelingOptimizerV4 + + lama_register["QuantumTunnelingOptimizerV4"] = QuantumTunnelingOptimizerV4 + LLAMAQuantumTunnelingOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4").set_name( + "LLAMAQuantumTunnelingOptimizerV4", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV5 import QuantumTunnelingOptimizerV5 + + lama_register["QuantumTunnelingOptimizerV5"] = QuantumTunnelingOptimizerV5 + LLAMAQuantumTunnelingOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5").set_name( + "LLAMAQuantumTunnelingOptimizerV5", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV6 import QuantumTunnelingOptimizerV6 + + lama_register["QuantumTunnelingOptimizerV6"] = QuantumTunnelingOptimizerV6 + LLAMAQuantumTunnelingOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6").set_name( + "LLAMAQuantumTunnelingOptimizerV6", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV7 import QuantumTunnelingOptimizerV7 + + lama_register["QuantumTunnelingOptimizerV7"] = QuantumTunnelingOptimizerV7 + LLAMAQuantumTunnelingOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7").set_name( + "LLAMAQuantumTunnelingOptimizerV7", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV8 import QuantumTunnelingOptimizerV8 + + lama_register["QuantumTunnelingOptimizerV8"] = QuantumTunnelingOptimizerV8 + LLAMAQuantumTunnelingOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8").set_name( + "LLAMAQuantumTunnelingOptimizerV8", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.QuantumTunnelingOptimizerV9 import QuantumTunnelingOptimizerV9 + + lama_register["QuantumTunnelingOptimizerV9"] = QuantumTunnelingOptimizerV9 + LLAMAQuantumTunnelingOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9").set_name( + "LLAMAQuantumTunnelingOptimizerV9", register=True + ) +except Exception as e: + print("QuantumTunnelingOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADE import RADE + + lama_register["RADE"] = RADE + LLAMARADE = NonObjectOptimizer(method="LLAMARADE").set_name("LLAMARADE", register=True) +except Exception as e: + print("RADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADEA import RADEA + + lama_register["RADEA"] = RADEA + LLAMARADEA = NonObjectOptimizer(method="LLAMARADEA").set_name("LLAMARADEA", register=True) +except Exception as e: + print("RADEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADECM import RADECM + + lama_register["RADECM"] = RADECM + LLAMARADECM = NonObjectOptimizer(method="LLAMARADECM").set_name("LLAMARADECM", register=True) +except Exception as e: + print("RADECM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADEDM import RADEDM + + lama_register["RADEDM"] = RADEDM + LLAMARADEDM = NonObjectOptimizer(method="LLAMARADEDM").set_name("LLAMARADEDM", register=True) +except Exception as e: + print("RADEDM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADEEM import RADEEM + + lama_register["RADEEM"] = RADEEM + LLAMARADEEM = NonObjectOptimizer(method="LLAMARADEEM").set_name("LLAMARADEEM", register=True) +except Exception as e: + print("RADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADEPM import RADEPM + + lama_register["RADEPM"] = RADEPM + LLAMARADEPM = NonObjectOptimizer(method="LLAMARADEPM").set_name("LLAMARADEPM", register=True) +except Exception as e: + print("RADEPM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RADSDiffEvo import RADSDiffEvo + + lama_register["RADSDiffEvo"] = RADSDiffEvo + LLAMARADSDiffEvo = NonObjectOptimizer(method="LLAMARADSDiffEvo").set_name( + "LLAMARADSDiffEvo", register=True + ) +except Exception as e: + print("RADSDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAGCES import RAGCES + + lama_register["RAGCES"] = RAGCES + LLAMARAGCES = NonObjectOptimizer(method="LLAMARAGCES").set_name("LLAMARAGCES", register=True) +except Exception as e: + print("RAGCES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAGEA import RAGEA + + lama_register["RAGEA"] = RAGEA + LLAMARAGEA = NonObjectOptimizer(method="LLAMARAGEA").set_name("LLAMARAGEA", register=True) +except Exception as e: + print("RAGEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAHDEMI import RAHDEMI + + lama_register["RAHDEMI"] = RAHDEMI + LLAMARAHDEMI = NonObjectOptimizer(method="LLAMARAHDEMI").set_name("LLAMARAHDEMI", register=True) +except Exception as e: + print("RAHDEMI can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RALES import RALES + + lama_register["RALES"] = RALES + LLAMARALES = NonObjectOptimizer(method="LLAMARALES").set_name("LLAMARALES", register=True) +except Exception as e: + print("RALES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAMDE import RAMDE + + lama_register["RAMDE"] = RAMDE + LLAMARAMDE = NonObjectOptimizer(method="LLAMARAMDE").set_name("LLAMARAMDE", register=True) +except Exception as e: + print("RAMDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAMEDS import RAMEDS + + lama_register["RAMEDS"] = RAMEDS + LLAMARAMEDS = NonObjectOptimizer(method="LLAMARAMEDS").set_name("LLAMARAMEDS", register=True) +except Exception as e: + print("RAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAMEDSPlus import RAMEDSPlus + + lama_register["RAMEDSPlus"] = RAMEDSPlus + LLAMARAMEDSPlus = NonObjectOptimizer(method="LLAMARAMEDSPlus").set_name("LLAMARAMEDSPlus", register=True) +except Exception as e: + print("RAMEDSPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAMEDSPro import RAMEDSPro + + lama_register["RAMEDSPro"] = RAMEDSPro + LLAMARAMEDSPro = NonObjectOptimizer(method="LLAMARAMEDSPro").set_name("LLAMARAMEDSPro", register=True) +except Exception as e: + print("RAMEDSPro can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAMSDiffEvo import RAMSDiffEvo + + lama_register["RAMSDiffEvo"] = RAMSDiffEvo + LLAMARAMSDiffEvo = NonObjectOptimizer(method="LLAMARAMSDiffEvo").set_name( + "LLAMARAMSDiffEvo", register=True + ) +except Exception as e: + print("RAMSDiffEvo can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAPDE import RAPDE + + lama_register["RAPDE"] = RAPDE + LLAMARAPDE = NonObjectOptimizer(method="LLAMARAPDE").set_name("LLAMARAPDE", register=True) +except Exception as e: + print("RAPDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RASES import RASES + + lama_register["RASES"] = RASES + LLAMARASES = NonObjectOptimizer(method="LLAMARASES").set_name("LLAMARASES", register=True) +except Exception as e: + print("RASES can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RAVDE import RAVDE + + lama_register["RAVDE"] = RAVDE + LLAMARAVDE = NonObjectOptimizer(method="LLAMARAVDE").set_name("LLAMARAVDE", register=True) +except Exception as e: + print("RAVDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RDACE import RDACE + + lama_register["RDACE"] = RDACE + LLAMARDACE = NonObjectOptimizer(method="LLAMARDACE").set_name("LLAMARDACE", register=True) +except Exception as e: + print("RDACE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RDSAS import RDSAS + + lama_register["RDSAS"] = RDSAS + LLAMARDSAS = NonObjectOptimizer(method="LLAMARDSAS").set_name("LLAMARDSAS", register=True) +except Exception as e: + print("RDSAS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.READEPMC import READEPMC + + lama_register["READEPMC"] = READEPMC + LLAMAREADEPMC = NonObjectOptimizer(method="LLAMAREADEPMC").set_name("LLAMAREADEPMC", register=True) +except Exception as e: + print("READEPMC can not be imported: ", e) + +try: + from nevergrad.optimization.lama.REAMSEA import REAMSEA + + lama_register["REAMSEA"] = REAMSEA + LLAMAREAMSEA = NonObjectOptimizer(method="LLAMAREAMSEA").set_name("LLAMAREAMSEA", register=True) +except Exception as e: + print("REAMSEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RE_ADMMMS import RE_ADMMMS + + lama_register["RE_ADMMMS"] = RE_ADMMMS + LLAMARE_ADMMMS = NonObjectOptimizer(method="LLAMARE_ADMMMS").set_name("LLAMARE_ADMMMS", register=True) +except Exception as e: + print("RE_ADMMMS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RPWDE import RPWDE + + lama_register["RPWDE"] = RPWDE + LLAMARPWDE = NonObjectOptimizer(method="LLAMARPWDE").set_name("LLAMARPWDE", register=True) +except Exception as e: + print("RPWDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RankingDifferentialEvolution import RankingDifferentialEvolution + + lama_register["RankingDifferentialEvolution"] = RankingDifferentialEvolution + LLAMARankingDifferentialEvolution = NonObjectOptimizer( + method="LLAMARankingDifferentialEvolution" + ).set_name("LLAMARankingDifferentialEvolution", register=True) +except Exception as e: + print("RankingDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveClusteredDifferentialEvolution import ( + RefinedAdaptiveClusteredDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveClusteredDifferentialEvolution"] = ( + RefinedAdaptiveClusteredDifferentialEvolution + ) + LLAMARefinedAdaptiveClusteredDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveClusteredDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveClusteredDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveClusteredDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixAdaptation import ( + RefinedAdaptiveCovarianceMatrixAdaptation, + ) + + lama_register["RefinedAdaptiveCovarianceMatrixAdaptation"] = RefinedAdaptiveCovarianceMatrixAdaptation + LLAMARefinedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMARefinedAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: + print("RefinedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixEvolution import ( + RefinedAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["RefinedAdaptiveCovarianceMatrixEvolution"] = RefinedAdaptiveCovarianceMatrixEvolution + LLAMARefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMARefinedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveCrossoverElitistStrategyV7 import ( + RefinedAdaptiveCrossoverElitistStrategyV7, + ) + + lama_register["RefinedAdaptiveCrossoverElitistStrategyV7"] = RefinedAdaptiveCrossoverElitistStrategyV7 + LLAMARefinedAdaptiveCrossoverElitistStrategyV7 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7" + ).set_name("LLAMARefinedAdaptiveCrossoverElitistStrategyV7", register=True) +except Exception as e: + print("RefinedAdaptiveCrossoverElitistStrategyV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolution import ( + RefinedAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveDifferentialEvolution"] = RefinedAdaptiveDifferentialEvolution + LLAMARefinedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionStrategy import ( + RefinedAdaptiveDifferentialEvolutionStrategy, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionStrategy"] = ( + RefinedAdaptiveDifferentialEvolutionStrategy + ) + LLAMARefinedAdaptiveDifferentialEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( + RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( + RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation + ) + LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( + RefinedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + RefinedAdaptiveDifferentialEvolutionWithGradientBoost + ) + LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSearch import ( + RefinedAdaptiveDifferentialSearch, + ) + + lama_register["RefinedAdaptiveDifferentialSearch"] = RefinedAdaptiveDifferentialSearch + LLAMARefinedAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialSearch" + ).set_name("LLAMARefinedAdaptiveDifferentialSearch", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSpiralSearch import ( + RefinedAdaptiveDifferentialSpiralSearch, + ) + + lama_register["RefinedAdaptiveDifferentialSpiralSearch"] = RefinedAdaptiveDifferentialSpiralSearch + LLAMARefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMARefinedAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: + print("RefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalClimbingStrategy import ( + RefinedAdaptiveDimensionalClimbingStrategy, + ) + + lama_register["RefinedAdaptiveDimensionalClimbingStrategy"] = RefinedAdaptiveDimensionalClimbingStrategy + LLAMARefinedAdaptiveDimensionalClimbingStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDimensionalClimbingStrategy" + ).set_name("LLAMARefinedAdaptiveDimensionalClimbingStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveDimensionalClimbingStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalCrossoverEvolver import ( + RefinedAdaptiveDimensionalCrossoverEvolver, + ) + + lama_register["RefinedAdaptiveDimensionalCrossoverEvolver"] = RefinedAdaptiveDimensionalCrossoverEvolver + LLAMARefinedAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver" + ).set_name("LLAMARefinedAdaptiveDimensionalCrossoverEvolver", register=True) +except Exception as e: + print("RefinedAdaptiveDimensionalCrossoverEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDirectionalBiasQuorumOptimization import ( + RefinedAdaptiveDirectionalBiasQuorumOptimization, + ) + + lama_register["RefinedAdaptiveDirectionalBiasQuorumOptimization"] = ( + RefinedAdaptiveDirectionalBiasQuorumOptimization + ) + LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDivergenceClusteringSearch import ( + RefinedAdaptiveDivergenceClusteringSearch, + ) + + lama_register["RefinedAdaptiveDivergenceClusteringSearch"] = RefinedAdaptiveDivergenceClusteringSearch + LLAMARefinedAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDivergenceClusteringSearch" + ).set_name("LLAMARefinedAdaptiveDivergenceClusteringSearch", register=True) +except Exception as e: + print("RefinedAdaptiveDivergenceClusteringSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDiversityPSO import RefinedAdaptiveDiversityPSO + + lama_register["RefinedAdaptiveDiversityPSO"] = RefinedAdaptiveDiversityPSO + LLAMARefinedAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO").set_name( + "LLAMARefinedAdaptiveDiversityPSO", register=True + ) +except Exception as e: + print("RefinedAdaptiveDiversityPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategy import RefinedAdaptiveDualPhaseStrategy + + lama_register["RefinedAdaptiveDualPhaseStrategy"] = RefinedAdaptiveDualPhaseStrategy + LLAMARefinedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDualPhaseStrategy" + ).set_name("LLAMARefinedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveDualPhaseStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategyV3 import ( + RefinedAdaptiveDualPhaseStrategyV3, + ) + + lama_register["RefinedAdaptiveDualPhaseStrategyV3"] = RefinedAdaptiveDualPhaseStrategyV3 + LLAMARefinedAdaptiveDualPhaseStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDualPhaseStrategyV3" + ).set_name("LLAMARefinedAdaptiveDualPhaseStrategyV3", register=True) +except Exception as e: + print("RefinedAdaptiveDualPhaseStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDE import RefinedAdaptiveDynamicDE + + lama_register["RefinedAdaptiveDynamicDE"] = RefinedAdaptiveDynamicDE + LLAMARefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE").set_name( + "LLAMARefinedAdaptiveDynamicDE", register=True + ) +except Exception as e: + print("RefinedAdaptiveDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV14 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV14, + ) + + lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV14"] = RefinedAdaptiveDynamicDualPhaseStrategyV14 + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicDualPhaseStrategyV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV17 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV17, + ) + + lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV17"] = RefinedAdaptiveDynamicDualPhaseStrategyV17 + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicDualPhaseStrategyV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV20 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV20, + ) + + lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV20"] = RefinedAdaptiveDynamicDualPhaseStrategyV20 + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicDualPhaseStrategyV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicExplorationOptimization import ( + RefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["RefinedAdaptiveDynamicExplorationOptimization"] = ( + RefinedAdaptiveDynamicExplorationOptimization + ) + LLAMARefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMARefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) + + lama_register["RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( + RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm + ) + LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveDynamicStrategyV25 import ( + RefinedAdaptiveDynamicStrategyV25, + ) + + lama_register["RefinedAdaptiveDynamicStrategyV25"] = RefinedAdaptiveDynamicStrategyV25 + LLAMARefinedAdaptiveDynamicStrategyV25 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicStrategyV25" + ).set_name("LLAMARefinedAdaptiveDynamicStrategyV25", register=True) +except Exception as e: + print("RefinedAdaptiveDynamicStrategyV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedDE import RefinedAdaptiveEliteGuidedDE + + lama_register["RefinedAdaptiveEliteGuidedDE"] = RefinedAdaptiveEliteGuidedDE + LLAMARefinedAdaptiveEliteGuidedDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedDE" + ).set_name("LLAMARefinedAdaptiveEliteGuidedDE", register=True) +except Exception as e: + print("RefinedAdaptiveEliteGuidedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE import ( + RefinedAdaptiveEliteGuidedMutationDE, + ) + + lama_register["RefinedAdaptiveEliteGuidedMutationDE"] = RefinedAdaptiveEliteGuidedMutationDE + LLAMARefinedAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedMutationDE" + ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE", register=True) +except Exception as e: + print("RefinedAdaptiveEliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE_v5 import ( + RefinedAdaptiveEliteGuidedMutationDE_v5, + ) + + lama_register["RefinedAdaptiveEliteGuidedMutationDE_v5"] = RefinedAdaptiveEliteGuidedMutationDE_v5 + LLAMARefinedAdaptiveEliteGuidedMutationDE_v5 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5" + ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE_v5", register=True) +except Exception as e: + print("RefinedAdaptiveEliteGuidedMutationDE_v5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveElitistDE_v4 import RefinedAdaptiveElitistDE_v4 + + lama_register["RefinedAdaptiveElitistDE_v4"] = RefinedAdaptiveElitistDE_v4 + LLAMARefinedAdaptiveElitistDE_v4 = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4").set_name( + "LLAMARefinedAdaptiveElitistDE_v4", register=True + ) +except Exception as e: + print("RefinedAdaptiveElitistDE_v4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( + RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch, + ) + + lama_register["RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( + RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch + ) + LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: + print("RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( + RefinedAdaptiveEnhancedGradientGuidedHybridPSO, + ) + + lama_register["RefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( + RefinedAdaptiveEnhancedGradientGuidedHybridPSO + ) + LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: + print("RefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 import ( + RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2, + ) + + lama_register["RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2"] = ( + RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 + ) + LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2" + ).set_name("LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2", register=True) +except Exception as e: + print("RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveEvolutionStrategy import RefinedAdaptiveEvolutionStrategy + + lama_register["RefinedAdaptiveEvolutionStrategy"] = RefinedAdaptiveEvolutionStrategy + LLAMARefinedAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveExplorationOptimizer import ( + RefinedAdaptiveExplorationOptimizer, + ) + + lama_register["RefinedAdaptiveExplorationOptimizer"] = RefinedAdaptiveExplorationOptimizer + LLAMARefinedAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveExplorationOptimizer" + ).set_name("LLAMARefinedAdaptiveExplorationOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingOptimizerV5 import ( + RefinedAdaptiveGlobalClimbingOptimizerV5, + ) + + lama_register["RefinedAdaptiveGlobalClimbingOptimizerV5"] = RefinedAdaptiveGlobalClimbingOptimizerV5 + LLAMARefinedAdaptiveGlobalClimbingOptimizerV5 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5" + ).set_name("LLAMARefinedAdaptiveGlobalClimbingOptimizerV5", register=True) +except Exception as e: + print("RefinedAdaptiveGlobalClimbingOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingStrategy import ( + RefinedAdaptiveGlobalClimbingStrategy, + ) + + lama_register["RefinedAdaptiveGlobalClimbingStrategy"] = RefinedAdaptiveGlobalClimbingStrategy + LLAMARefinedAdaptiveGlobalClimbingStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGlobalClimbingStrategy" + ).set_name("LLAMARefinedAdaptiveGlobalClimbingStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveGlobalClimbingStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientCrossover import RefinedAdaptiveGradientCrossover + + lama_register["RefinedAdaptiveGradientCrossover"] = RefinedAdaptiveGradientCrossover + LLAMARefinedAdaptiveGradientCrossover = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientCrossover" + ).set_name("LLAMARefinedAdaptiveGradientCrossover", register=True) +except Exception as e: + print("RefinedAdaptiveGradientCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientDifferentialEvolution import ( + RefinedAdaptiveGradientDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveGradientDifferentialEvolution"] = ( + RefinedAdaptiveGradientDifferentialEvolution + ) + LLAMARefinedAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveGradientDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientEnhancedRAMEDS import ( + RefinedAdaptiveGradientEnhancedRAMEDS, + ) + + lama_register["RefinedAdaptiveGradientEnhancedRAMEDS"] = RefinedAdaptiveGradientEnhancedRAMEDS + LLAMARefinedAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS" + ).set_name("LLAMARefinedAdaptiveGradientEnhancedRAMEDS", register=True) +except Exception as e: + print("RefinedAdaptiveGradientEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientEvolverV2 import RefinedAdaptiveGradientEvolverV2 + + lama_register["RefinedAdaptiveGradientEvolverV2"] = RefinedAdaptiveGradientEvolverV2 + LLAMARefinedAdaptiveGradientEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientEvolverV2" + ).set_name("LLAMARefinedAdaptiveGradientEvolverV2", register=True) +except Exception as e: + print("RefinedAdaptiveGradientEvolverV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientGuidedEvolution import ( + RefinedAdaptiveGradientGuidedEvolution, + ) + + lama_register["RefinedAdaptiveGradientGuidedEvolution"] = RefinedAdaptiveGradientGuidedEvolution + LLAMARefinedAdaptiveGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientGuidedEvolution" + ).set_name("LLAMARefinedAdaptiveGradientGuidedEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveGradientGuidedEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGradientHybridOptimizer import ( + RefinedAdaptiveGradientHybridOptimizer, + ) + + lama_register["RefinedAdaptiveGradientHybridOptimizer"] = RefinedAdaptiveGradientHybridOptimizer + LLAMARefinedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveGradientHybridOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveGuidedEvolutionStrategy import ( + RefinedAdaptiveGuidedEvolutionStrategy, + ) + + lama_register["RefinedAdaptiveGuidedEvolutionStrategy"] = RefinedAdaptiveGuidedEvolutionStrategy + LLAMARefinedAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGuidedEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveGuidedEvolutionStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveGuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridDE import RefinedAdaptiveHybridDE + + lama_register["RefinedAdaptiveHybridDE"] = RefinedAdaptiveHybridDE + LLAMARefinedAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE").set_name( + "LLAMARefinedAdaptiveHybridDE", register=True + ) +except Exception as e: + print("RefinedAdaptiveHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridEvolutionStrategyV6 import ( + RefinedAdaptiveHybridEvolutionStrategyV6, + ) + + lama_register["RefinedAdaptiveHybridEvolutionStrategyV6"] = RefinedAdaptiveHybridEvolutionStrategyV6 + LLAMARefinedAdaptiveHybridEvolutionStrategyV6 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6" + ).set_name("LLAMARefinedAdaptiveHybridEvolutionStrategyV6", register=True) +except Exception as e: + print("RefinedAdaptiveHybridEvolutionStrategyV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimization import ( + RefinedAdaptiveHybridOptimization, + ) + + lama_register["RefinedAdaptiveHybridOptimization"] = RefinedAdaptiveHybridOptimization + LLAMARefinedAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridOptimization" + ).set_name("LLAMARefinedAdaptiveHybridOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimizer import RefinedAdaptiveHybridOptimizer + + lama_register["RefinedAdaptiveHybridOptimizer"] = RefinedAdaptiveHybridOptimizer + LLAMARefinedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridParticleSwarmDifferentialEvolution import ( + RefinedAdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + RefinedAdaptiveHybridParticleSwarmDifferentialEvolution + ) + LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridQuasiRandomGradientDE import ( + RefinedAdaptiveHybridQuasiRandomGradientDE, + ) + + lama_register["RefinedAdaptiveHybridQuasiRandomGradientDE"] = RefinedAdaptiveHybridQuasiRandomGradientDE + LLAMARefinedAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE" + ).set_name("LLAMARefinedAdaptiveHybridQuasiRandomGradientDE", register=True) +except Exception as e: + print("RefinedAdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveHybridSwarmEvolutionOptimization import ( + RefinedAdaptiveHybridSwarmEvolutionOptimization, + ) + + lama_register["RefinedAdaptiveHybridSwarmEvolutionOptimization"] = ( + RefinedAdaptiveHybridSwarmEvolutionOptimization + ) + LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization" + ).set_name("LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveIncrementalCrossover import ( + RefinedAdaptiveIncrementalCrossover, + ) + + lama_register["RefinedAdaptiveIncrementalCrossover"] = RefinedAdaptiveIncrementalCrossover + LLAMARefinedAdaptiveIncrementalCrossover = NonObjectOptimizer( + method="LLAMARefinedAdaptiveIncrementalCrossover" + ).set_name("LLAMARefinedAdaptiveIncrementalCrossover", register=True) +except Exception as e: + print("RefinedAdaptiveIncrementalCrossover can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveIslandEvolutionStrategy import ( + RefinedAdaptiveIslandEvolutionStrategy, + ) + + lama_register["RefinedAdaptiveIslandEvolutionStrategy"] = RefinedAdaptiveIslandEvolutionStrategy + LLAMARefinedAdaptiveIslandEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveIslandEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveIslandEvolutionStrategy", register=True) +except Exception as e: + print("RefinedAdaptiveIslandEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDifferentialEvolution import ( + RefinedAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveMemeticDifferentialEvolution"] = RefinedAdaptiveMemeticDifferentialEvolution + LLAMARefinedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDiverseOptimizer import ( + RefinedAdaptiveMemeticDiverseOptimizer, + ) + + lama_register["RefinedAdaptiveMemeticDiverseOptimizer"] = RefinedAdaptiveMemeticDiverseOptimizer + LLAMARefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMARefinedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedSearch import ( + RefinedAdaptiveMemoryEnhancedSearch, + ) + + lama_register["RefinedAdaptiveMemoryEnhancedSearch"] = RefinedAdaptiveMemoryEnhancedSearch + LLAMARefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMARefinedAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: + print("RefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedStrategyV55 import ( + RefinedAdaptiveMemoryEnhancedStrategyV55, + ) + + lama_register["RefinedAdaptiveMemoryEnhancedStrategyV55"] = RefinedAdaptiveMemoryEnhancedStrategyV55 + LLAMARefinedAdaptiveMemoryEnhancedStrategyV55 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55" + ).set_name("LLAMARefinedAdaptiveMemoryEnhancedStrategyV55", register=True) +except Exception as e: + print("RefinedAdaptiveMemoryEnhancedStrategyV55 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMemoryStrategyV67 import RefinedAdaptiveMemoryStrategyV67 + + lama_register["RefinedAdaptiveMemoryStrategyV67"] = RefinedAdaptiveMemoryStrategyV67 + LLAMARefinedAdaptiveMemoryStrategyV67 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryStrategyV67" + ).set_name("LLAMARefinedAdaptiveMemoryStrategyV67", register=True) +except Exception as e: + print("RefinedAdaptiveMemoryStrategyV67 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMultiOperatorSearch import ( + RefinedAdaptiveMultiOperatorSearch, + ) + + lama_register["RefinedAdaptiveMultiOperatorSearch"] = RefinedAdaptiveMultiOperatorSearch + LLAMARefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiOperatorSearch" + ).set_name("LLAMARefinedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: + print("RefinedAdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE import RefinedAdaptiveMultiStrategyDE + + lama_register["RefinedAdaptiveMultiStrategyDE"] = RefinedAdaptiveMultiStrategyDE + LLAMARefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDE" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDE", register=True) +except Exception as e: + print("RefinedAdaptiveMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE_v2 import ( + RefinedAdaptiveMultiStrategyDE_v2, + ) + + lama_register["RefinedAdaptiveMultiStrategyDE_v2"] = RefinedAdaptiveMultiStrategyDE_v2 + LLAMARefinedAdaptiveMultiStrategyDE_v2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDE_v2" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDE_v2", register=True) +except Exception as e: + print("RefinedAdaptiveMultiStrategyDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolution import ( + RefinedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolution"] = ( + RefinedAdaptiveMultiStrategyDifferentialEvolution + ) + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 import ( + RefinedAdaptiveMultiStrategyDifferentialEvolutionV2, + ) + + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolutionV2"] = ( + RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 + ) + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2", register=True) +except Exception as e: + print("RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveParameterStrategyV38 import ( + RefinedAdaptiveParameterStrategyV38, + ) + + lama_register["RefinedAdaptiveParameterStrategyV38"] = RefinedAdaptiveParameterStrategyV38 + LLAMARefinedAdaptiveParameterStrategyV38 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveParameterStrategyV38" + ).set_name("LLAMARefinedAdaptiveParameterStrategyV38", register=True) +except Exception as e: + print("RefinedAdaptiveParameterStrategyV38 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( + RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( + RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + ) + LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" + ).set_name( + "LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True + ) +except Exception as e: + print( + "RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( + RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( + RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + ) + LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: + print("RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionBalanceStrategy import ( + RefinedAdaptivePrecisionBalanceStrategy, + ) + + lama_register["RefinedAdaptivePrecisionBalanceStrategy"] = RefinedAdaptivePrecisionBalanceStrategy + LLAMARefinedAdaptivePrecisionBalanceStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionBalanceStrategy" + ).set_name("LLAMARefinedAdaptivePrecisionBalanceStrategy", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionBalanceStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV4 import ( + RefinedAdaptivePrecisionCohortOptimizationV4, + ) + + lama_register["RefinedAdaptivePrecisionCohortOptimizationV4"] = ( + RefinedAdaptivePrecisionCohortOptimizationV4 + ) + LLAMARefinedAdaptivePrecisionCohortOptimizationV4 = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4" + ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV4", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionCohortOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV6 import ( + RefinedAdaptivePrecisionCohortOptimizationV6, + ) + + lama_register["RefinedAdaptivePrecisionCohortOptimizationV6"] = ( + RefinedAdaptivePrecisionCohortOptimizationV6 + ) + LLAMARefinedAdaptivePrecisionCohortOptimizationV6 = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6" + ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV6", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionCohortOptimizationV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDifferentialEvolution import ( + RefinedAdaptivePrecisionDifferentialEvolution, + ) + + lama_register["RefinedAdaptivePrecisionDifferentialEvolution"] = ( + RefinedAdaptivePrecisionDifferentialEvolution + ) + LLAMARefinedAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionDifferentialEvolution" + ).set_name("LLAMARefinedAdaptivePrecisionDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDivideSearch import ( + RefinedAdaptivePrecisionDivideSearch, + ) + + lama_register["RefinedAdaptivePrecisionDivideSearch"] = RefinedAdaptivePrecisionDivideSearch + LLAMARefinedAdaptivePrecisionDivideSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionDivideSearch" + ).set_name("LLAMARefinedAdaptivePrecisionDivideSearch", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionDivideSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionEvolutionStrategy import ( + RefinedAdaptivePrecisionEvolutionStrategy, + ) + + lama_register["RefinedAdaptivePrecisionEvolutionStrategy"] = RefinedAdaptivePrecisionEvolutionStrategy + LLAMARefinedAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionEvolutionStrategy" + ).set_name("LLAMARefinedAdaptivePrecisionEvolutionStrategy", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionFocalHybrid import ( + RefinedAdaptivePrecisionFocalHybrid, + ) + + lama_register["RefinedAdaptivePrecisionFocalHybrid"] = RefinedAdaptivePrecisionFocalHybrid + LLAMARefinedAdaptivePrecisionFocalHybrid = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionFocalHybrid" + ).set_name("LLAMARefinedAdaptivePrecisionFocalHybrid", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionFocalHybrid can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionHybridSearch import ( + RefinedAdaptivePrecisionHybridSearch, + ) + + lama_register["RefinedAdaptivePrecisionHybridSearch"] = RefinedAdaptivePrecisionHybridSearch + LLAMARefinedAdaptivePrecisionHybridSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionHybridSearch" + ).set_name("LLAMARefinedAdaptivePrecisionHybridSearch", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptivePrecisionStrategicOptimizer import ( + RefinedAdaptivePrecisionStrategicOptimizer, + ) + + lama_register["RefinedAdaptivePrecisionStrategicOptimizer"] = RefinedAdaptivePrecisionStrategicOptimizer + LLAMARefinedAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionStrategicOptimizer" + ).set_name("LLAMARefinedAdaptivePrecisionStrategicOptimizer", register=True) +except Exception as e: + print("RefinedAdaptivePrecisionStrategicOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumCrossoverStrategyV3 import ( + RefinedAdaptiveQuantumCrossoverStrategyV3, + ) + + lama_register["RefinedAdaptiveQuantumCrossoverStrategyV3"] = RefinedAdaptiveQuantumCrossoverStrategyV3 + LLAMARefinedAdaptiveQuantumCrossoverStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3" + ).set_name("LLAMARefinedAdaptiveQuantumCrossoverStrategyV3", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumCrossoverStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolution import ( + RefinedAdaptiveQuantumDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveQuantumDifferentialEvolution"] = RefinedAdaptiveQuantumDifferentialEvolution + LLAMARefinedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolutionPlus import ( + RefinedAdaptiveQuantumDifferentialEvolutionPlus, + ) + + lama_register["RefinedAdaptiveQuantumDifferentialEvolutionPlus"] = ( + RefinedAdaptiveQuantumDifferentialEvolutionPlus + ) + LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus" + ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumEliteDE import RefinedAdaptiveQuantumEliteDE + + lama_register["RefinedAdaptiveQuantumEliteDE"] = RefinedAdaptiveQuantumEliteDE + LLAMARefinedAdaptiveQuantumEliteDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumEliteDE" + ).set_name("LLAMARefinedAdaptiveQuantumEliteDE", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumEliteDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumEntropyDE import RefinedAdaptiveQuantumEntropyDE + + lama_register["RefinedAdaptiveQuantumEntropyDE"] = RefinedAdaptiveQuantumEntropyDE + LLAMARefinedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumEntropyDE" + ).set_name("LLAMARefinedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumEntropyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientBoostedMemeticSearch import ( + RefinedAdaptiveQuantumGradientBoostedMemeticSearch, + ) + + lama_register["RefinedAdaptiveQuantumGradientBoostedMemeticSearch"] = ( + RefinedAdaptiveQuantumGradientBoostedMemeticSearch + ) + LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientExplorationOptimization import ( + RefinedAdaptiveQuantumGradientExplorationOptimization, + ) + + lama_register["RefinedAdaptiveQuantumGradientExplorationOptimization"] = ( + RefinedAdaptiveQuantumGradientExplorationOptimization + ) + LLAMARefinedAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization" + ).set_name("LLAMARefinedAdaptiveQuantumGradientExplorationOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientHybridOptimizer import ( + RefinedAdaptiveQuantumGradientHybridOptimizer, + ) + + lama_register["RefinedAdaptiveQuantumGradientHybridOptimizer"] = ( + RefinedAdaptiveQuantumGradientHybridOptimizer + ) + LLAMARefinedAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveQuantumGradientHybridOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumPSO import RefinedAdaptiveQuantumPSO + + lama_register["RefinedAdaptiveQuantumPSO"] = RefinedAdaptiveQuantumPSO + LLAMARefinedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO").set_name( + "LLAMARefinedAdaptiveQuantumPSO", register=True + ) +except Exception as e: + print("RefinedAdaptiveQuantumPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuantumSwarmOptimizerV3 import ( + RefinedAdaptiveQuantumSwarmOptimizerV3, + ) + + lama_register["RefinedAdaptiveQuantumSwarmOptimizerV3"] = RefinedAdaptiveQuantumSwarmOptimizerV3 + LLAMARefinedAdaptiveQuantumSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3" + ).set_name("LLAMARefinedAdaptiveQuantumSwarmOptimizerV3", register=True) +except Exception as e: + print("RefinedAdaptiveQuantumSwarmOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomDEGradientAnnealing import ( + RefinedAdaptiveQuasiRandomDEGradientAnnealing, + ) + + lama_register["RefinedAdaptiveQuasiRandomDEGradientAnnealing"] = ( + RefinedAdaptiveQuasiRandomDEGradientAnnealing + ) + LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing" + ).set_name("LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: + print("RefinedAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution import ( + RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( + RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution + ) + LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveRefinementPSO import RefinedAdaptiveRefinementPSO + + lama_register["RefinedAdaptiveRefinementPSO"] = RefinedAdaptiveRefinementPSO + LLAMARefinedAdaptiveRefinementPSO = NonObjectOptimizer( + method="LLAMARefinedAdaptiveRefinementPSO" + ).set_name("LLAMARefinedAdaptiveRefinementPSO", register=True) +except Exception as e: + print("RefinedAdaptiveRefinementPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSimulatedAnnealingWithSmartMemory import ( + RefinedAdaptiveSimulatedAnnealingWithSmartMemory, + ) + + lama_register["RefinedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( + RefinedAdaptiveSimulatedAnnealingWithSmartMemory + ) + LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: + print("RefinedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSpatialExplorationOptimizer import ( + RefinedAdaptiveSpatialExplorationOptimizer, + ) + + lama_register["RefinedAdaptiveSpatialExplorationOptimizer"] = RefinedAdaptiveSpatialExplorationOptimizer + LLAMARefinedAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpatialExplorationOptimizer" + ).set_name("LLAMARefinedAdaptiveSpatialExplorationOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveSpatialExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSpatialOptimizer import RefinedAdaptiveSpatialOptimizer + + lama_register["RefinedAdaptiveSpatialOptimizer"] = RefinedAdaptiveSpatialOptimizer + LLAMARefinedAdaptiveSpatialOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpatialOptimizer" + ).set_name("LLAMARefinedAdaptiveSpatialOptimizer", register=True) +except Exception as e: + print("RefinedAdaptiveSpatialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSpectralEvolution import RefinedAdaptiveSpectralEvolution + + lama_register["RefinedAdaptiveSpectralEvolution"] = RefinedAdaptiveSpectralEvolution + LLAMARefinedAdaptiveSpectralEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpectralEvolution" + ).set_name("LLAMARefinedAdaptiveSpectralEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveSpectralEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSpiralGradientSearch import ( + RefinedAdaptiveSpiralGradientSearch, + ) + + lama_register["RefinedAdaptiveSpiralGradientSearch"] = RefinedAdaptiveSpiralGradientSearch + LLAMARefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpiralGradientSearch" + ).set_name("LLAMARefinedAdaptiveSpiralGradientSearch", register=True) +except Exception as e: + print("RefinedAdaptiveSpiralGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveStochasticGradientQuorumOptimization import ( + RefinedAdaptiveStochasticGradientQuorumOptimization, + ) + + lama_register["RefinedAdaptiveStochasticGradientQuorumOptimization"] = ( + RefinedAdaptiveStochasticGradientQuorumOptimization + ) + LLAMARefinedAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization" + ).set_name("LLAMARefinedAdaptiveStochasticGradientQuorumOptimization", register=True) +except Exception as e: + print("RefinedAdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveStochasticHybridEvolution import ( + RefinedAdaptiveStochasticHybridEvolution, + ) + + lama_register["RefinedAdaptiveStochasticHybridEvolution"] = RefinedAdaptiveStochasticHybridEvolution + LLAMARefinedAdaptiveStochasticHybridEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveStochasticHybridEvolution" + ).set_name("LLAMARefinedAdaptiveStochasticHybridEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveStochasticHybridEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdaptiveSwarmDifferentialEvolution import ( + RefinedAdaptiveSwarmDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveSwarmDifferentialEvolution"] = RefinedAdaptiveSwarmDifferentialEvolution + LLAMARefinedAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdaptiveSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( + RefinedArchiveEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( + RefinedArchiveEnhancedAdaptiveDifferentialEvolution + ) + LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedAttenuatedAdaptiveEvolver import RefinedAttenuatedAdaptiveEvolver + + lama_register["RefinedAttenuatedAdaptiveEvolver"] = RefinedAttenuatedAdaptiveEvolver + LLAMARefinedAttenuatedAdaptiveEvolver = NonObjectOptimizer( + method="LLAMARefinedAttenuatedAdaptiveEvolver" + ).set_name("LLAMARefinedAttenuatedAdaptiveEvolver", register=True) +except Exception as e: + print("RefinedAttenuatedAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedBalancedAdaptiveElitistStrategy import ( + RefinedBalancedAdaptiveElitistStrategy, + ) + + lama_register["RefinedBalancedAdaptiveElitistStrategy"] = RefinedBalancedAdaptiveElitistStrategy + LLAMARefinedBalancedAdaptiveElitistStrategy = NonObjectOptimizer( + method="LLAMARefinedBalancedAdaptiveElitistStrategy" + ).set_name("LLAMARefinedBalancedAdaptiveElitistStrategy", register=True) +except Exception as e: + print("RefinedBalancedAdaptiveElitistStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedBalancedExplorationOptimizer import ( + RefinedBalancedExplorationOptimizer, + ) + + lama_register["RefinedBalancedExplorationOptimizer"] = RefinedBalancedExplorationOptimizer + LLAMARefinedBalancedExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedBalancedExplorationOptimizer" + ).set_name("LLAMARefinedBalancedExplorationOptimizer", register=True) +except Exception as e: + print("RefinedBalancedExplorationOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedCMADiffEvoPSO import RefinedCMADiffEvoPSO + + lama_register["RefinedCMADiffEvoPSO"] = RefinedCMADiffEvoPSO + LLAMARefinedCMADiffEvoPSO = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO").set_name( + "LLAMARefinedCMADiffEvoPSO", register=True + ) +except Exception as e: + print("RefinedCMADiffEvoPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedConcentricDiversityStrategy import ( + RefinedConcentricDiversityStrategy, + ) + + lama_register["RefinedConcentricDiversityStrategy"] = RefinedConcentricDiversityStrategy + LLAMARefinedConcentricDiversityStrategy = NonObjectOptimizer( + method="LLAMARefinedConcentricDiversityStrategy" + ).set_name("LLAMARefinedConcentricDiversityStrategy", register=True) +except Exception as e: + print("RefinedConcentricDiversityStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedConcentricQuantumCrossoverStrategyV5 import ( + RefinedConcentricQuantumCrossoverStrategyV5, + ) + + lama_register["RefinedConcentricQuantumCrossoverStrategyV5"] = RefinedConcentricQuantumCrossoverStrategyV5 + LLAMARefinedConcentricQuantumCrossoverStrategyV5 = NonObjectOptimizer( + method="LLAMARefinedConcentricQuantumCrossoverStrategyV5" + ).set_name("LLAMARefinedConcentricQuantumCrossoverStrategyV5", register=True) +except Exception as e: + print("RefinedConcentricQuantumCrossoverStrategyV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedConvergenceAdaptiveOptimizer import ( + RefinedConvergenceAdaptiveOptimizer, + ) + + lama_register["RefinedConvergenceAdaptiveOptimizer"] = RefinedConvergenceAdaptiveOptimizer + LLAMARefinedConvergenceAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedConvergenceAdaptiveOptimizer" + ).set_name("LLAMARefinedConvergenceAdaptiveOptimizer", register=True) +except Exception as e: + print("RefinedConvergenceAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedConvergenceDE import RefinedConvergenceDE + + lama_register["RefinedConvergenceDE"] = RefinedConvergenceDE + LLAMARefinedConvergenceDE = NonObjectOptimizer(method="LLAMARefinedConvergenceDE").set_name( + "LLAMARefinedConvergenceDE", register=True + ) +except Exception as e: + print("RefinedConvergenceDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedConvergentAdaptiveEvolutionStrategy import ( + RefinedConvergentAdaptiveEvolutionStrategy, + ) + + lama_register["RefinedConvergentAdaptiveEvolutionStrategy"] = RefinedConvergentAdaptiveEvolutionStrategy + LLAMARefinedConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedConvergentAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedConvergentAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("RefinedConvergentAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedCooperativeDifferentialEvolution import ( + RefinedCooperativeDifferentialEvolution, + ) + + lama_register["RefinedCooperativeDifferentialEvolution"] = RefinedCooperativeDifferentialEvolution + LLAMARefinedCooperativeDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedCooperativeDifferentialEvolution" + ).set_name("LLAMARefinedCooperativeDifferentialEvolution", register=True) +except Exception as e: + print("RefinedCooperativeDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedCosineAdaptiveDifferentialSwarm import ( + RefinedCosineAdaptiveDifferentialSwarm, + ) + + lama_register["RefinedCosineAdaptiveDifferentialSwarm"] = RefinedCosineAdaptiveDifferentialSwarm + LLAMARefinedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMARefinedCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMARefinedCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: + print("RefinedCosineAdaptiveDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDifferentialEvolutionWithAdaptiveLearningRate import ( + RefinedDifferentialEvolutionWithAdaptiveLearningRate, + ) + + lama_register["RefinedDifferentialEvolutionWithAdaptiveLearningRate"] = ( + RefinedDifferentialEvolutionWithAdaptiveLearningRate + ) + LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( + method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate" + ).set_name("LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate", register=True) +except Exception as e: + print("RefinedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDifferentialParticleSwarmOptimization import ( + RefinedDifferentialParticleSwarmOptimization, + ) + + lama_register["RefinedDifferentialParticleSwarmOptimization"] = ( + RefinedDifferentialParticleSwarmOptimization + ) + LLAMARefinedDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedDifferentialParticleSwarmOptimization" + ).set_name("LLAMARefinedDifferentialParticleSwarmOptimization", register=True) +except Exception as e: + print("RefinedDifferentialParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDimensionalCyclicCrossoverEvolver import ( + RefinedDimensionalCyclicCrossoverEvolver, + ) + + lama_register["RefinedDimensionalCyclicCrossoverEvolver"] = RefinedDimensionalCyclicCrossoverEvolver + LLAMARefinedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( + method="LLAMARefinedDimensionalCyclicCrossoverEvolver" + ).set_name("LLAMARefinedDimensionalCyclicCrossoverEvolver", register=True) +except Exception as e: + print("RefinedDimensionalCyclicCrossoverEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV2 import ( + RefinedDimensionalFeedbackEvolverV2, + ) + + lama_register["RefinedDimensionalFeedbackEvolverV2"] = RefinedDimensionalFeedbackEvolverV2 + LLAMARefinedDimensionalFeedbackEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedDimensionalFeedbackEvolverV2" + ).set_name("LLAMARefinedDimensionalFeedbackEvolverV2", register=True) +except Exception as e: + print("RefinedDimensionalFeedbackEvolverV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV4 import ( + RefinedDimensionalFeedbackEvolverV4, + ) + + lama_register["RefinedDimensionalFeedbackEvolverV4"] = RefinedDimensionalFeedbackEvolverV4 + LLAMARefinedDimensionalFeedbackEvolverV4 = NonObjectOptimizer( + method="LLAMARefinedDimensionalFeedbackEvolverV4" + ).set_name("LLAMARefinedDimensionalFeedbackEvolverV4", register=True) +except Exception as e: + print("RefinedDimensionalFeedbackEvolverV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDualConvergenceEvolutiveStrategy import ( + RefinedDualConvergenceEvolutiveStrategy, + ) + + lama_register["RefinedDualConvergenceEvolutiveStrategy"] = RefinedDualConvergenceEvolutiveStrategy + LLAMARefinedDualConvergenceEvolutiveStrategy = NonObjectOptimizer( + method="LLAMARefinedDualConvergenceEvolutiveStrategy" + ).set_name("LLAMARefinedDualConvergenceEvolutiveStrategy", register=True) +except Exception as e: + print("RefinedDualConvergenceEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDualPhaseADPSO_DE_V3_Enhanced import ( + RefinedDualPhaseADPSO_DE_V3_Enhanced, + ) + + lama_register["RefinedDualPhaseADPSO_DE_V3_Enhanced"] = RefinedDualPhaseADPSO_DE_V3_Enhanced + LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced = NonObjectOptimizer( + method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced" + ).set_name("LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced", register=True) +except Exception as e: + print("RefinedDualPhaseADPSO_DE_V3_Enhanced can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDualPhaseOptimization import RefinedDualPhaseOptimization + + lama_register["RefinedDualPhaseOptimization"] = RefinedDualPhaseOptimization + LLAMARefinedDualPhaseOptimization = NonObjectOptimizer( + method="LLAMARefinedDualPhaseOptimization" + ).set_name("LLAMARefinedDualPhaseOptimization", register=True) +except Exception as e: + print("RefinedDualPhaseOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDualStrategyAdaptiveDE import RefinedDualStrategyAdaptiveDE + + lama_register["RefinedDualStrategyAdaptiveDE"] = RefinedDualStrategyAdaptiveDE + LLAMARefinedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("RefinedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveDE import RefinedDynamicAdaptiveDE + + lama_register["RefinedDynamicAdaptiveDE"] = RefinedDynamicAdaptiveDE + LLAMARefinedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE").set_name( + "LLAMARefinedDynamicAdaptiveDE", register=True + ) +except Exception as e: + print("RefinedDynamicAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDE import RefinedDynamicAdaptiveHybridDE + + lama_register["RefinedDynamicAdaptiveHybridDE"] = RefinedDynamicAdaptiveHybridDE + LLAMARefinedDynamicAdaptiveHybridDE = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridDE" + ).set_name("LLAMARefinedDynamicAdaptiveHybridDE", register=True) +except Exception as e: + print("RefinedDynamicAdaptiveHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: + print("RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizer import ( + RefinedDynamicAdaptiveHybridOptimizer, + ) + + lama_register["RefinedDynamicAdaptiveHybridOptimizer"] = RefinedDynamicAdaptiveHybridOptimizer + LLAMARefinedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("RefinedDynamicAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizerV2 import ( + RefinedDynamicAdaptiveHybridOptimizerV2, + ) + + lama_register["RefinedDynamicAdaptiveHybridOptimizerV2"] = RefinedDynamicAdaptiveHybridOptimizerV2 + LLAMARefinedDynamicAdaptiveHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2" + ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizerV2", register=True) +except Exception as e: + print("RefinedDynamicAdaptiveHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicAdaptiveStrategyV23 import ( + RefinedDynamicAdaptiveStrategyV23, + ) + + lama_register["RefinedDynamicAdaptiveStrategyV23"] = RefinedDynamicAdaptiveStrategyV23 + LLAMARefinedDynamicAdaptiveStrategyV23 = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveStrategyV23" + ).set_name("LLAMARefinedDynamicAdaptiveStrategyV23", register=True) +except Exception as e: + print("RefinedDynamicAdaptiveStrategyV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV3 import ( + RefinedDynamicClusterHybridOptimizationV3, + ) + + lama_register["RefinedDynamicClusterHybridOptimizationV3"] = RefinedDynamicClusterHybridOptimizationV3 + LLAMARefinedDynamicClusterHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedDynamicClusterHybridOptimizationV3" + ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV3", register=True) +except Exception as e: + print("RefinedDynamicClusterHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV4 import ( + RefinedDynamicClusterHybridOptimizationV4, + ) + + lama_register["RefinedDynamicClusterHybridOptimizationV4"] = RefinedDynamicClusterHybridOptimizationV4 + LLAMARefinedDynamicClusterHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMARefinedDynamicClusterHybridOptimizationV4" + ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV4", register=True) +except Exception as e: + print("RefinedDynamicClusterHybridOptimizationV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicClusteringPSO import RefinedDynamicClusteringPSO + + lama_register["RefinedDynamicClusteringPSO"] = RefinedDynamicClusteringPSO + LLAMARefinedDynamicClusteringPSO = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO").set_name( + "LLAMARefinedDynamicClusteringPSO", register=True + ) +except Exception as e: + print("RefinedDynamicClusteringPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicCrowdingHybridOptimizer import ( + RefinedDynamicCrowdingHybridOptimizer, + ) + + lama_register["RefinedDynamicCrowdingHybridOptimizer"] = RefinedDynamicCrowdingHybridOptimizer + LLAMARefinedDynamicCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicCrowdingHybridOptimizer" + ).set_name("LLAMARefinedDynamicCrowdingHybridOptimizer", register=True) +except Exception as e: + print("RefinedDynamicCrowdingHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicEliteAdaptiveHybridOptimizer import ( + RefinedDynamicEliteAdaptiveHybridOptimizer, + ) + + lama_register["RefinedDynamicEliteAdaptiveHybridOptimizer"] = RefinedDynamicEliteAdaptiveHybridOptimizer + LLAMARefinedDynamicEliteAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("RefinedDynamicEliteAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicEnhancedHybridOptimizer import ( + RefinedDynamicEnhancedHybridOptimizer, + ) + + lama_register["RefinedDynamicEnhancedHybridOptimizer"] = RefinedDynamicEnhancedHybridOptimizer + LLAMARefinedDynamicEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicEnhancedHybridOptimizer" + ).set_name("LLAMARefinedDynamicEnhancedHybridOptimizer", register=True) +except Exception as e: + print("RefinedDynamicEnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( + RefinedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedDynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("RefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicHybridDEPSOWithEliteMemoryV2 import ( + RefinedDynamicHybridDEPSOWithEliteMemoryV2, + ) + + lama_register["RefinedDynamicHybridDEPSOWithEliteMemoryV2"] = RefinedDynamicHybridDEPSOWithEliteMemoryV2 + LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2 = NonObjectOptimizer( + method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2" + ).set_name("LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2", register=True) +except Exception as e: + print("RefinedDynamicHybridDEPSOWithEliteMemoryV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicHybridOptimizer import RefinedDynamicHybridOptimizer + + lama_register["RefinedDynamicHybridOptimizer"] = RefinedDynamicHybridOptimizer + LLAMARefinedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicHybridOptimizer" + ).set_name("LLAMARefinedDynamicHybridOptimizer", register=True) +except Exception as e: + print("RefinedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedDynamicQuantumEvolution import RefinedDynamicQuantumEvolution + + lama_register["RefinedDynamicQuantumEvolution"] = RefinedDynamicQuantumEvolution + LLAMARefinedDynamicQuantumEvolution = NonObjectOptimizer( + method="LLAMARefinedDynamicQuantumEvolution" + ).set_name("LLAMARefinedDynamicQuantumEvolution", register=True) +except Exception as e: + print("RefinedDynamicQuantumEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveHybridDEPSO import RefinedEliteAdaptiveHybridDEPSO + + lama_register["RefinedEliteAdaptiveHybridDEPSO"] = RefinedEliteAdaptiveHybridDEPSO + LLAMARefinedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedEliteAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("RefinedEliteAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 import ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3, + ) + + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3"] = ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 + ) + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3" + ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizer import ( + RefinedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["RefinedEliteAdaptiveMemoryHybridOptimizer"] = RefinedEliteAdaptiveMemoryHybridOptimizer + LLAMARefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV3 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV3, + ) + + lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV3"] = RefinedEliteAdaptiveMemoryHybridOptimizerV3 + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV4 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV4, + ) + + lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV4"] = RefinedEliteAdaptiveMemoryHybridOptimizerV4 + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV5 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV5, + ) + + lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV5"] = RefinedEliteAdaptiveMemoryHybridOptimizerV5 + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5", register=True) +except Exception as e: + print("RefinedEliteAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch import ( + RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch, + ) + + lama_register["RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch"] = ( + RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch + ) + LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch" + ).set_name("LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch", register=True) +except Exception as e: + print("RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteDynamicHybridOptimizer import ( + RefinedEliteDynamicHybridOptimizer, + ) + + lama_register["RefinedEliteDynamicHybridOptimizer"] = RefinedEliteDynamicHybridOptimizer + LLAMARefinedEliteDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteDynamicHybridOptimizer" + ).set_name("LLAMARefinedEliteDynamicHybridOptimizer", register=True) +except Exception as e: + print("RefinedEliteDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteDynamicMemoryHybridOptimizer import ( + RefinedEliteDynamicMemoryHybridOptimizer, + ) + + lama_register["RefinedEliteDynamicMemoryHybridOptimizer"] = RefinedEliteDynamicMemoryHybridOptimizer + LLAMARefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMARefinedEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: + print("RefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteGuidedAdaptiveDE import RefinedEliteGuidedAdaptiveDE + + lama_register["RefinedEliteGuidedAdaptiveDE"] = RefinedEliteGuidedAdaptiveDE + LLAMARefinedEliteGuidedAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedAdaptiveDE" + ).set_name("LLAMARefinedEliteGuidedAdaptiveDE", register=True) +except Exception as e: + print("RefinedEliteGuidedAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE import RefinedEliteGuidedMutationDE + + lama_register["RefinedEliteGuidedMutationDE"] = RefinedEliteGuidedMutationDE + LLAMARefinedEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedMutationDE" + ).set_name("LLAMARefinedEliteGuidedMutationDE", register=True) +except Exception as e: + print("RefinedEliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE_v3 import RefinedEliteGuidedMutationDE_v3 + + lama_register["RefinedEliteGuidedMutationDE_v3"] = RefinedEliteGuidedMutationDE_v3 + LLAMARefinedEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedMutationDE_v3" + ).set_name("LLAMARefinedEliteGuidedMutationDE_v3", register=True) +except Exception as e: + print("RefinedEliteGuidedMutationDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( + RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, + ) + + lama_register["RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = ( + RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + ) + LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( + method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" + ).set_name("LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) +except Exception as e: + print("RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 import ( + RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5, + ) + + lama_register["RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5"] = ( + RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 + ) + LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5" + ).set_name("LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost import ( + RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost + ) + LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDualPhaseStrategyV9 import ( + RefinedEnhancedAdaptiveDualPhaseStrategyV9, + ) + + lama_register["RefinedEnhancedAdaptiveDualPhaseStrategyV9"] = RefinedEnhancedAdaptiveDualPhaseStrategyV9 + LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9" + ).set_name("LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveDualPhaseStrategyV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO import ( + RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO, + ) + + lama_register["RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( + RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO + ) + LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 import ( + RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9, + ) + + lama_register["RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9"] = ( + RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 + ) + LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9" + ).set_name("LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonySearch import ( + RefinedEnhancedAdaptiveHarmonySearch, + ) + + lama_register["RefinedEnhancedAdaptiveHarmonySearch"] = RefinedEnhancedAdaptiveHarmonySearch + LLAMARefinedEnhancedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHarmonySearch" + ).set_name("LLAMARefinedEnhancedAdaptiveHarmonySearch", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveHarmonySearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 import ( + RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2, + ) + + lama_register["RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2"] = ( + RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 + ) + LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2" + ).set_name("LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm import ( + RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm, + ) + + lama_register["RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm"] = ( + RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm + ) + LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiOperatorSearch import ( + RefinedEnhancedAdaptiveMultiOperatorSearch, + ) + + lama_register["RefinedEnhancedAdaptiveMultiOperatorSearch"] = RefinedEnhancedAdaptiveMultiOperatorSearch + LLAMARefinedEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch" + ).set_name("LLAMARefinedEnhancedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiStrategyDE import ( + RefinedEnhancedAdaptiveMultiStrategyDE, + ) + + lama_register["RefinedEnhancedAdaptiveMultiStrategyDE"] = RefinedEnhancedAdaptiveMultiStrategyDE + LLAMARefinedEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE" + ).set_name("LLAMARefinedEnhancedAdaptiveMultiStrategyDE", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveMultiStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v45 import RefinedEnhancedAdaptiveQGSA_v45 + + lama_register["RefinedEnhancedAdaptiveQGSA_v45"] = RefinedEnhancedAdaptiveQGSA_v45 + LLAMARefinedEnhancedAdaptiveQGSA_v45 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v45" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v45", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveQGSA_v45 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v46 import RefinedEnhancedAdaptiveQGSA_v46 + + lama_register["RefinedEnhancedAdaptiveQGSA_v46"] = RefinedEnhancedAdaptiveQGSA_v46 + LLAMARefinedEnhancedAdaptiveQGSA_v46 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v46" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v46", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveQGSA_v46 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v48 import RefinedEnhancedAdaptiveQGSA_v48 + + lama_register["RefinedEnhancedAdaptiveQGSA_v48"] = RefinedEnhancedAdaptiveQGSA_v48 + LLAMARefinedEnhancedAdaptiveQGSA_v48 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v48" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v48", register=True) +except Exception as e: + print("RefinedEnhancedAdaptiveQGSA_v48 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedBalancedDualStrategyAdaptiveDE import ( + RefinedEnhancedBalancedDualStrategyAdaptiveDE, + ) + + lama_register["RefinedEnhancedBalancedDualStrategyAdaptiveDE"] = ( + RefinedEnhancedBalancedDualStrategyAdaptiveDE + ) + LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("RefinedEnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedCovarianceMatrixDifferentialEvolution import ( + RefinedEnhancedCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedEnhancedCovarianceMatrixDifferentialEvolution"] = ( + RefinedEnhancedCovarianceMatrixDifferentialEvolution + ) + LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("RefinedEnhancedCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDifferentialEvolutionLocalSearch_v42 import ( + RefinedEnhancedDifferentialEvolutionLocalSearch_v42, + ) + + lama_register["RefinedEnhancedDifferentialEvolutionLocalSearch_v42"] = ( + RefinedEnhancedDifferentialEvolutionLocalSearch_v42 + ) + LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42" + ).set_name("LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42", register=True) +except Exception as e: + print("RefinedEnhancedDifferentialEvolutionLocalSearch_v42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( + RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3, + ) + + lama_register["RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( + RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 + ) + LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3" + ).set_name("LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) +except Exception as e: + print("RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimization import ( + RefinedEnhancedDualPhaseHybridOptimization, + ) + + lama_register["RefinedEnhancedDualPhaseHybridOptimization"] = RefinedEnhancedDualPhaseHybridOptimization + LLAMARefinedEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseHybridOptimization" + ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimization", register=True) +except Exception as e: + print("RefinedEnhancedDualPhaseHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimizationV3 import ( + RefinedEnhancedDualPhaseHybridOptimizationV3, + ) + + lama_register["RefinedEnhancedDualPhaseHybridOptimizationV3"] = ( + RefinedEnhancedDualPhaseHybridOptimizationV3 + ) + LLAMARefinedEnhancedDualPhaseHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3" + ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimizationV3", register=True) +except Exception as e: + print("RefinedEnhancedDualPhaseHybridOptimizationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v2 import ( + RefinedEnhancedDualStrategyAdaptiveDE_v2, + ) + + lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v2"] = RefinedEnhancedDualStrategyAdaptiveDE_v2 + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2" + ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2", register=True) +except Exception as e: + print("RefinedEnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v3 import ( + RefinedEnhancedDualStrategyAdaptiveDE_v3, + ) + + lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v3"] = RefinedEnhancedDualStrategyAdaptiveDE_v3 + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3" + ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3", register=True) +except Exception as e: + print("RefinedEnhancedDualStrategyAdaptiveDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyDynamicDE import ( + RefinedEnhancedDualStrategyDynamicDE, + ) + + lama_register["RefinedEnhancedDualStrategyDynamicDE"] = RefinedEnhancedDualStrategyDynamicDE + LLAMARefinedEnhancedDualStrategyDynamicDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyDynamicDE" + ).set_name("LLAMARefinedEnhancedDualStrategyDynamicDE", register=True) +except Exception as e: + print("RefinedEnhancedDualStrategyDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyElitistDE_v2 import ( + RefinedEnhancedDualStrategyElitistDE_v2, + ) + + lama_register["RefinedEnhancedDualStrategyElitistDE_v2"] = RefinedEnhancedDualStrategyElitistDE_v2 + LLAMARefinedEnhancedDualStrategyElitistDE_v2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyElitistDE_v2" + ).set_name("LLAMARefinedEnhancedDualStrategyElitistDE_v2", register=True) +except Exception as e: + print("RefinedEnhancedDualStrategyElitistDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDynamicAdaptiveHybridOptimization import ( + RefinedEnhancedDynamicAdaptiveHybridOptimization, + ) + + lama_register["RefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( + RefinedEnhancedDynamicAdaptiveHybridOptimization + ) + LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: + print("RefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedDynamicDualStrategyHybridDE import ( + RefinedEnhancedDynamicDualStrategyHybridDE, + ) + + lama_register["RefinedEnhancedDynamicDualStrategyHybridDE"] = RefinedEnhancedDynamicDualStrategyHybridDE + LLAMARefinedEnhancedDynamicDualStrategyHybridDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE" + ).set_name("LLAMARefinedEnhancedDynamicDualStrategyHybridDE", register=True) +except Exception as e: + print("RefinedEnhancedDynamicDualStrategyHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedAdaptiveRestartDE import ( + RefinedEnhancedEliteGuidedAdaptiveRestartDE, + ) + + lama_register["RefinedEnhancedEliteGuidedAdaptiveRestartDE"] = RefinedEnhancedEliteGuidedAdaptiveRestartDE + LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: + print("RefinedEnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedMassQGSA_v87 import ( + RefinedEnhancedEliteGuidedMassQGSA_v87, + ) + + lama_register["RefinedEnhancedEliteGuidedMassQGSA_v87"] = RefinedEnhancedEliteGuidedMassQGSA_v87 + LLAMARefinedEnhancedEliteGuidedMassQGSA_v87 = NonObjectOptimizer( + method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87" + ).set_name("LLAMARefinedEnhancedEliteGuidedMassQGSA_v87", register=True) +except Exception as e: + print("RefinedEnhancedEliteGuidedMassQGSA_v87 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHybridAdaptiveMultiStageOptimization import ( + RefinedEnhancedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["RefinedEnhancedHybridAdaptiveMultiStageOptimization"] = ( + RefinedEnhancedHybridAdaptiveMultiStageOptimization + ) + LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("RefinedEnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( + RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 + ) + LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: + print("RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 import ( + RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2, + ) + + lama_register["RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2"] = ( + RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 + ) + LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2" + ).set_name("LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2", register=True) +except Exception as e: + print("RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHybridExplorationOptimization import ( + RefinedEnhancedHybridExplorationOptimization, + ) + + lama_register["RefinedEnhancedHybridExplorationOptimization"] = ( + RefinedEnhancedHybridExplorationOptimization + ) + LLAMARefinedEnhancedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridExplorationOptimization" + ).set_name("LLAMARefinedEnhancedHybridExplorationOptimization", register=True) +except Exception as e: + print("RefinedEnhancedHybridExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHyperAdaptiveHybridDEPSO import ( + RefinedEnhancedHyperAdaptiveHybridDEPSO, + ) + + lama_register["RefinedEnhancedHyperAdaptiveHybridDEPSO"] = RefinedEnhancedHyperAdaptiveHybridDEPSO + LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("RefinedEnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 import ( + RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63, + ) + + lama_register["RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63"] = ( + RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 + ) + LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63" + ).set_name("LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63", register=True) +except Exception as e: + print("RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedHyperStrategicOptimizerV57 import ( + RefinedEnhancedHyperStrategicOptimizerV57, + ) + + lama_register["RefinedEnhancedHyperStrategicOptimizerV57"] = RefinedEnhancedHyperStrategicOptimizerV57 + LLAMARefinedEnhancedHyperStrategicOptimizerV57 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperStrategicOptimizerV57" + ).set_name("LLAMARefinedEnhancedHyperStrategicOptimizerV57", register=True) +except Exception as e: + print("RefinedEnhancedHyperStrategicOptimizerV57 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedMetaNetAQAPSOv7 import RefinedEnhancedMetaNetAQAPSOv7 + + lama_register["RefinedEnhancedMetaNetAQAPSOv7"] = RefinedEnhancedMetaNetAQAPSOv7 + LLAMARefinedEnhancedMetaNetAQAPSOv7 = NonObjectOptimizer( + method="LLAMARefinedEnhancedMetaNetAQAPSOv7" + ).set_name("LLAMARefinedEnhancedMetaNetAQAPSOv7", register=True) +except Exception as e: + print("RefinedEnhancedMetaNetAQAPSOv7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedOptimizedEvolutiveStrategy import ( + RefinedEnhancedOptimizedEvolutiveStrategy, + ) + + lama_register["RefinedEnhancedOptimizedEvolutiveStrategy"] = RefinedEnhancedOptimizedEvolutiveStrategy + LLAMARefinedEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( + method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy" + ).set_name("LLAMARefinedEnhancedOptimizedEvolutiveStrategy", register=True) +except Exception as e: + print("RefinedEnhancedOptimizedEvolutiveStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedPrecisionEvolutionaryOptimizerV40 import ( + RefinedEnhancedPrecisionEvolutionaryOptimizerV40, + ) + + lama_register["RefinedEnhancedPrecisionEvolutionaryOptimizerV40"] = ( + RefinedEnhancedPrecisionEvolutionaryOptimizerV40 + ) + LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40 = NonObjectOptimizer( + method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40" + ).set_name("LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40", register=True) +except Exception as e: + print("RefinedEnhancedPrecisionEvolutionaryOptimizerV40 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedQAPSOAIRVCHRLS import RefinedEnhancedQAPSOAIRVCHRLS + + lama_register["RefinedEnhancedQAPSOAIRVCHRLS"] = RefinedEnhancedQAPSOAIRVCHRLS + LLAMARefinedEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer( + method="LLAMARefinedEnhancedQAPSOAIRVCHRLS" + ).set_name("LLAMARefinedEnhancedQAPSOAIRVCHRLS", register=True) +except Exception as e: + print("RefinedEnhancedQAPSOAIRVCHRLS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 import ( + RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2, + ) + + lama_register["RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2"] = ( + RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 + ) + LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2" + ).set_name("LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2", register=True) +except Exception as e: + print("RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedRAMEDSProV3 import RefinedEnhancedRAMEDSProV3 + + lama_register["RefinedEnhancedRAMEDSProV3"] = RefinedEnhancedRAMEDSProV3 + LLAMARefinedEnhancedRAMEDSProV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3").set_name( + "LLAMARefinedEnhancedRAMEDSProV3", register=True + ) +except Exception as e: + print("RefinedEnhancedRAMEDSProV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv3 import RefinedEnhancedRAMEDSv3 + + lama_register["RefinedEnhancedRAMEDSv3"] = RefinedEnhancedRAMEDSv3 + LLAMARefinedEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3").set_name( + "LLAMARefinedEnhancedRAMEDSv3", register=True + ) +except Exception as e: + print("RefinedEnhancedRAMEDSv3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv4 import RefinedEnhancedRAMEDSv4 + + lama_register["RefinedEnhancedRAMEDSv4"] = RefinedEnhancedRAMEDSv4 + LLAMARefinedEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4").set_name( + "LLAMARefinedEnhancedRAMEDSv4", register=True + ) +except Exception as e: + print("RefinedEnhancedRAMEDSv4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedStrategyDE import RefinedEnhancedStrategyDE + + lama_register["RefinedEnhancedStrategyDE"] = RefinedEnhancedStrategyDE + LLAMARefinedEnhancedStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE").set_name( + "LLAMARefinedEnhancedStrategyDE", register=True + ) +except Exception as e: + print("RefinedEnhancedStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnhancedUltraRefinedRAMEDS import ( + RefinedEnhancedUltraRefinedRAMEDS, + ) + + lama_register["RefinedEnhancedUltraRefinedRAMEDS"] = RefinedEnhancedUltraRefinedRAMEDS + LLAMARefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedEnhancedUltraRefinedRAMEDS" + ).set_name("LLAMARefinedEnhancedUltraRefinedRAMEDS", register=True) +except Exception as e: + print("RefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEnsembleAdaptiveQuantumDE import RefinedEnsembleAdaptiveQuantumDE + + lama_register["RefinedEnsembleAdaptiveQuantumDE"] = RefinedEnsembleAdaptiveQuantumDE + LLAMARefinedEnsembleAdaptiveQuantumDE = NonObjectOptimizer( + method="LLAMARefinedEnsembleAdaptiveQuantumDE" + ).set_name("LLAMARefinedEnsembleAdaptiveQuantumDE", register=True) +except Exception as e: + print("RefinedEnsembleAdaptiveQuantumDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEvolutionaryGradientHybridOptimizerV3 import ( + RefinedEvolutionaryGradientHybridOptimizerV3, + ) + + lama_register["RefinedEvolutionaryGradientHybridOptimizerV3"] = ( + RefinedEvolutionaryGradientHybridOptimizerV3 + ) + LLAMARefinedEvolutionaryGradientHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3" + ).set_name("LLAMARefinedEvolutionaryGradientHybridOptimizerV3", register=True) +except Exception as e: + print("RefinedEvolutionaryGradientHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedEvolutionaryTuningStrategy import ( + RefinedEvolutionaryTuningStrategy, + ) + + lama_register["RefinedEvolutionaryTuningStrategy"] = RefinedEvolutionaryTuningStrategy + LLAMARefinedEvolutionaryTuningStrategy = NonObjectOptimizer( + method="LLAMARefinedEvolutionaryTuningStrategy" + ).set_name("LLAMARefinedEvolutionaryTuningStrategy", register=True) +except Exception as e: + print("RefinedEvolutionaryTuningStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGlobalClimbingOptimizerV2 import RefinedGlobalClimbingOptimizerV2 + + lama_register["RefinedGlobalClimbingOptimizerV2"] = RefinedGlobalClimbingOptimizerV2 + LLAMARefinedGlobalClimbingOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalClimbingOptimizerV2" + ).set_name("LLAMARefinedGlobalClimbingOptimizerV2", register=True) +except Exception as e: + print("RefinedGlobalClimbingOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGlobalLocalBalancingOptimizer import ( + RefinedGlobalLocalBalancingOptimizer, + ) + + lama_register["RefinedGlobalLocalBalancingOptimizer"] = RefinedGlobalLocalBalancingOptimizer + LLAMARefinedGlobalLocalBalancingOptimizer = NonObjectOptimizer( + method="LLAMARefinedGlobalLocalBalancingOptimizer" + ).set_name("LLAMARefinedGlobalLocalBalancingOptimizer", register=True) +except Exception as e: + print("RefinedGlobalLocalBalancingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGlobalStructureAdaptiveEvolverV2 import ( + RefinedGlobalStructureAdaptiveEvolverV2, + ) + + lama_register["RefinedGlobalStructureAdaptiveEvolverV2"] = RefinedGlobalStructureAdaptiveEvolverV2 + LLAMARefinedGlobalStructureAdaptiveEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAdaptiveEvolverV2" + ).set_name("LLAMARefinedGlobalStructureAdaptiveEvolverV2", register=True) +except Exception as e: + print("RefinedGlobalStructureAdaptiveEvolverV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV2 import ( + RefinedGlobalStructureAwareOptimizerV2, + ) + + lama_register["RefinedGlobalStructureAwareOptimizerV2"] = RefinedGlobalStructureAwareOptimizerV2 + LLAMARefinedGlobalStructureAwareOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAwareOptimizerV2" + ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV2", register=True) +except Exception as e: + print("RefinedGlobalStructureAwareOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV3 import ( + RefinedGlobalStructureAwareOptimizerV3, + ) + + lama_register["RefinedGlobalStructureAwareOptimizerV3"] = RefinedGlobalStructureAwareOptimizerV3 + LLAMARefinedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAwareOptimizerV3" + ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV3", register=True) +except Exception as e: + print("RefinedGlobalStructureAwareOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBalancedExplorationPSO import ( + RefinedGradientBalancedExplorationPSO, + ) + + lama_register["RefinedGradientBalancedExplorationPSO"] = RefinedGradientBalancedExplorationPSO + LLAMARefinedGradientBalancedExplorationPSO = NonObjectOptimizer( + method="LLAMARefinedGradientBalancedExplorationPSO" + ).set_name("LLAMARefinedGradientBalancedExplorationPSO", register=True) +except Exception as e: + print("RefinedGradientBalancedExplorationPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration import ( + RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration, + ) + + lama_register["RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration"] = ( + RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration + ) + LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration" + ).set_name("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration", register=True) +except Exception as e: + print("RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBoostedMemoryAnnealing import ( + RefinedGradientBoostedMemoryAnnealing, + ) + + lama_register["RefinedGradientBoostedMemoryAnnealing"] = RefinedGradientBoostedMemoryAnnealing + LLAMARefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMARefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: + print("RefinedGradientBoostedMemoryAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealing import ( + RefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedGradientBoostedMemorySimulatedAnnealing + ) + LLAMARefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("RefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealingPlus import ( + RefinedGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["RefinedGradientBoostedMemorySimulatedAnnealingPlus"] = ( + RefinedGradientBoostedMemorySimulatedAnnealingPlus + ) + LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: + print("RefinedGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientBoostedOptimizer import RefinedGradientBoostedOptimizer + + lama_register["RefinedGradientBoostedOptimizer"] = RefinedGradientBoostedOptimizer + LLAMARefinedGradientBoostedOptimizer = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedOptimizer" + ).set_name("LLAMARefinedGradientBoostedOptimizer", register=True) +except Exception as e: + print("RefinedGradientBoostedOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedGradientGuidedEvolutionStrategy import ( + RefinedGradientGuidedEvolutionStrategy, + ) + + lama_register["RefinedGradientGuidedEvolutionStrategy"] = RefinedGradientGuidedEvolutionStrategy + LLAMARefinedGradientGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedGradientGuidedEvolutionStrategy" + ).set_name("LLAMARefinedGradientGuidedEvolutionStrategy", register=True) +except Exception as e: + print("RefinedGradientGuidedEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution import ( + RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution + ) + LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridAdaptiveDifferentialEvolution import ( + RefinedHybridAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedHybridAdaptiveDifferentialEvolution"] = RefinedHybridAdaptiveDifferentialEvolution + LLAMARefinedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RefinedHybridAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridAdaptiveGradientPSO import RefinedHybridAdaptiveGradientPSO + + lama_register["RefinedHybridAdaptiveGradientPSO"] = RefinedHybridAdaptiveGradientPSO + LLAMARefinedHybridAdaptiveGradientPSO = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveGradientPSO" + ).set_name("LLAMARefinedHybridAdaptiveGradientPSO", register=True) +except Exception as e: + print("RefinedHybridAdaptiveGradientPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridAdaptiveMultiStageOptimization import ( + RefinedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["RefinedHybridAdaptiveMultiStageOptimization"] = RefinedHybridAdaptiveMultiStageOptimization + LLAMARefinedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("RefinedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridCovarianceMatrixDifferentialEvolution import ( + RefinedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedHybridCovarianceMatrixDifferentialEvolution"] = ( + RefinedHybridCovarianceMatrixDifferentialEvolution + ) + LLAMARefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("RefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridDEPSO import RefinedHybridDEPSO + + lama_register["RefinedHybridDEPSO"] = RefinedHybridDEPSO + LLAMARefinedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO").set_name( + "LLAMARefinedHybridDEPSO", register=True + ) +except Exception as e: + print("RefinedHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridDEPSOWithAdaptiveMemoryV4 import ( + RefinedHybridDEPSOWithAdaptiveMemoryV4, + ) + + lama_register["RefinedHybridDEPSOWithAdaptiveMemoryV4"] = RefinedHybridDEPSOWithAdaptiveMemoryV4 + LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4 = NonObjectOptimizer( + method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4" + ).set_name("LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4", register=True) +except Exception as e: + print("RefinedHybridDEPSOWithAdaptiveMemoryV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridDEPSOWithDynamicAdaptationV3 import ( + RefinedHybridDEPSOWithDynamicAdaptationV3, + ) + + lama_register["RefinedHybridDEPSOWithDynamicAdaptationV3"] = RefinedHybridDEPSOWithDynamicAdaptationV3 + LLAMARefinedHybridDEPSOWithDynamicAdaptationV3 = NonObjectOptimizer( + method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3" + ).set_name("LLAMARefinedHybridDEPSOWithDynamicAdaptationV3", register=True) +except Exception as e: + print("RefinedHybridDEPSOWithDynamicAdaptationV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( + RefinedHybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["RefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + RefinedHybridDualPhaseParticleSwarmDifferentialEvolution + ) + LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: + print("RefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridDynamicClusterOptimization import ( + RefinedHybridDynamicClusterOptimization, + ) + + lama_register["RefinedHybridDynamicClusterOptimization"] = RefinedHybridDynamicClusterOptimization + LLAMARefinedHybridDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMARefinedHybridDynamicClusterOptimization" + ).set_name("LLAMARefinedHybridDynamicClusterOptimization", register=True) +except Exception as e: + print("RefinedHybridDynamicClusterOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE import ( + RefinedHybridEliteGuidedMutationDE, + ) + + lama_register["RefinedHybridEliteGuidedMutationDE"] = RefinedHybridEliteGuidedMutationDE + LLAMARefinedHybridEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE", register=True) +except Exception as e: + print("RefinedHybridEliteGuidedMutationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v2 import ( + RefinedHybridEliteGuidedMutationDE_v2, + ) + + lama_register["RefinedHybridEliteGuidedMutationDE_v2"] = RefinedHybridEliteGuidedMutationDE_v2 + LLAMARefinedHybridEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE_v2" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v2", register=True) +except Exception as e: + print("RefinedHybridEliteGuidedMutationDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v3 import ( + RefinedHybridEliteGuidedMutationDE_v3, + ) + + lama_register["RefinedHybridEliteGuidedMutationDE_v3"] = RefinedHybridEliteGuidedMutationDE_v3 + LLAMARefinedHybridEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE_v3" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v3", register=True) +except Exception as e: + print("RefinedHybridEliteGuidedMutationDE_v3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridEvolutionStrategyV4 import RefinedHybridEvolutionStrategyV4 + + lama_register["RefinedHybridEvolutionStrategyV4"] = RefinedHybridEvolutionStrategyV4 + LLAMARefinedHybridEvolutionStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedHybridEvolutionStrategyV4" + ).set_name("LLAMARefinedHybridEvolutionStrategyV4", register=True) +except Exception as e: + print("RefinedHybridEvolutionStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridEvolutionaryAnnealingOptimizer import ( + RefinedHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["RefinedHybridEvolutionaryAnnealingOptimizer"] = RefinedHybridEvolutionaryAnnealingOptimizer + LLAMARefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMARefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: + print("RefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridOptimizer import RefinedHybridOptimizer + + lama_register["RefinedHybridOptimizer"] = RefinedHybridOptimizer + LLAMARefinedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer").set_name( + "LLAMARefinedHybridOptimizer", register=True + ) +except Exception as e: + print("RefinedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridPSODEOptimizer import RefinedHybridPSODEOptimizer + + lama_register["RefinedHybridPSODEOptimizer"] = RefinedHybridPSODEOptimizer + LLAMARefinedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer").set_name( + "LLAMARefinedHybridPSODEOptimizer", register=True + ) +except Exception as e: + print("RefinedHybridPSODEOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridPSODESimulatedAnnealing import ( + RefinedHybridPSODESimulatedAnnealing, + ) + + lama_register["RefinedHybridPSODESimulatedAnnealing"] = RefinedHybridPSODESimulatedAnnealing + LLAMARefinedHybridPSODESimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedHybridPSODESimulatedAnnealing" + ).set_name("LLAMARefinedHybridPSODESimulatedAnnealing", register=True) +except Exception as e: + print("RefinedHybridPSODESimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridPSO_DE import RefinedHybridPSO_DE + + lama_register["RefinedHybridPSO_DE"] = RefinedHybridPSO_DE + LLAMARefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE").set_name( + "LLAMARefinedHybridPSO_DE", register=True + ) +except Exception as e: + print("RefinedHybridPSO_DE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridPrecisionSearch import RefinedHybridPrecisionSearch + + lama_register["RefinedHybridPrecisionSearch"] = RefinedHybridPrecisionSearch + LLAMARefinedHybridPrecisionSearch = NonObjectOptimizer( + method="LLAMARefinedHybridPrecisionSearch" + ).set_name("LLAMARefinedHybridPrecisionSearch", register=True) +except Exception as e: + print("RefinedHybridPrecisionSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridQuantumAdaptiveDE import RefinedHybridQuantumAdaptiveDE + + lama_register["RefinedHybridQuantumAdaptiveDE"] = RefinedHybridQuantumAdaptiveDE + LLAMARefinedHybridQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedHybridQuantumAdaptiveDE" + ).set_name("LLAMARefinedHybridQuantumAdaptiveDE", register=True) +except Exception as e: + print("RefinedHybridQuantumAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridQuantumLevyAdaptiveSwarm import ( + RefinedHybridQuantumLevyAdaptiveSwarm, + ) + + lama_register["RefinedHybridQuantumLevyAdaptiveSwarm"] = RefinedHybridQuantumLevyAdaptiveSwarm + LLAMARefinedHybridQuantumLevyAdaptiveSwarm = NonObjectOptimizer( + method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm" + ).set_name("LLAMARefinedHybridQuantumLevyAdaptiveSwarm", register=True) +except Exception as e: + print("RefinedHybridQuantumLevyAdaptiveSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHybridQuasiRandomDEGradientAnnealing import ( + RefinedHybridQuasiRandomDEGradientAnnealing, + ) + + lama_register["RefinedHybridQuasiRandomDEGradientAnnealing"] = RefinedHybridQuasiRandomDEGradientAnnealing + LLAMARefinedHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing" + ).set_name("LLAMARefinedHybridQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: + print("RefinedHybridQuasiRandomDEGradientAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 import ( + RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2, + ) + + lama_register["RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2"] = ( + RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 + ) + LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 = NonObjectOptimizer( + method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2" + ).set_name("LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2", register=True) +except Exception as e: + print("RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperEvolvedDynamicRAMEDS import RefinedHyperEvolvedDynamicRAMEDS + + lama_register["RefinedHyperEvolvedDynamicRAMEDS"] = RefinedHyperEvolvedDynamicRAMEDS + LLAMARefinedHyperEvolvedDynamicRAMEDS = NonObjectOptimizer( + method="LLAMARefinedHyperEvolvedDynamicRAMEDS" + ).set_name("LLAMARefinedHyperEvolvedDynamicRAMEDS", register=True) +except Exception as e: + print("RefinedHyperEvolvedDynamicRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperOptimizedDynamicPrecisionOptimizer import ( + RefinedHyperOptimizedDynamicPrecisionOptimizer, + ) + + lama_register["RefinedHyperOptimizedDynamicPrecisionOptimizer"] = ( + RefinedHyperOptimizedDynamicPrecisionOptimizer + ) + LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer" + ).set_name("LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer", register=True) +except Exception as e: + print("RefinedHyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperOptimizedThermalEvolutionaryOptimizer import ( + RefinedHyperOptimizedThermalEvolutionaryOptimizer, + ) + + lama_register["RefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( + RefinedHyperOptimizedThermalEvolutionaryOptimizer + ) + LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: + print("RefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperRefinedDynamicPrecisionOptimizerV50 import ( + RefinedHyperRefinedDynamicPrecisionOptimizerV50, + ) + + lama_register["RefinedHyperRefinedDynamicPrecisionOptimizerV50"] = ( + RefinedHyperRefinedDynamicPrecisionOptimizerV50 + ) + LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50 = NonObjectOptimizer( + method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50" + ).set_name("LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50", register=True) +except Exception as e: + print("RefinedHyperRefinedDynamicPrecisionOptimizerV50 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV52 import ( + RefinedHyperStrategicOptimizerV52, + ) + + lama_register["RefinedHyperStrategicOptimizerV52"] = RefinedHyperStrategicOptimizerV52 + LLAMARefinedHyperStrategicOptimizerV52 = NonObjectOptimizer( + method="LLAMARefinedHyperStrategicOptimizerV52" + ).set_name("LLAMARefinedHyperStrategicOptimizerV52", register=True) +except Exception as e: + print("RefinedHyperStrategicOptimizerV52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV55 import ( + RefinedHyperStrategicOptimizerV55, + ) + + lama_register["RefinedHyperStrategicOptimizerV55"] = RefinedHyperStrategicOptimizerV55 + LLAMARefinedHyperStrategicOptimizerV55 = NonObjectOptimizer( + method="LLAMARefinedHyperStrategicOptimizerV55" + ).set_name("LLAMARefinedHyperStrategicOptimizerV55", register=True) +except Exception as e: + print("RefinedHyperStrategicOptimizerV55 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution import ( + RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( + RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution + ) + LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 import ( + RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2, + ) + + lama_register["RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2"] = ( + RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 + ) + LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2" + ).set_name("LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2", register=True) +except Exception as e: + print("RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 import ( + RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4, + ) + + lama_register["RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4"] = ( + RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 + ) + LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 = NonObjectOptimizer( + method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4" + ).set_name("LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4", register=True) +except Exception as e: + print("RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedInertiaFocalOptimizer import RefinedInertiaFocalOptimizer + + lama_register["RefinedInertiaFocalOptimizer"] = RefinedInertiaFocalOptimizer + LLAMARefinedInertiaFocalOptimizer = NonObjectOptimizer( + method="LLAMARefinedInertiaFocalOptimizer" + ).set_name("LLAMARefinedInertiaFocalOptimizer", register=True) +except Exception as e: + print("RefinedInertiaFocalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedIntelligentEvolvingAdaptiveStrategyV35 import ( + RefinedIntelligentEvolvingAdaptiveStrategyV35, + ) + + lama_register["RefinedIntelligentEvolvingAdaptiveStrategyV35"] = ( + RefinedIntelligentEvolvingAdaptiveStrategyV35 + ) + LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35 = NonObjectOptimizer( + method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35" + ).set_name("LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35", register=True) +except Exception as e: + print("RefinedIntelligentEvolvingAdaptiveStrategyV35 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV10Plus import ( + RefinedIslandEvolutionStrategyV10Plus, + ) + + lama_register["RefinedIslandEvolutionStrategyV10Plus"] = RefinedIslandEvolutionStrategyV10Plus + LLAMARefinedIslandEvolutionStrategyV10Plus = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV10Plus" + ).set_name("LLAMARefinedIslandEvolutionStrategyV10Plus", register=True) +except Exception as e: + print("RefinedIslandEvolutionStrategyV10Plus can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV2 import RefinedIslandEvolutionStrategyV2 + + lama_register["RefinedIslandEvolutionStrategyV2"] = RefinedIslandEvolutionStrategyV2 + LLAMARefinedIslandEvolutionStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV2" + ).set_name("LLAMARefinedIslandEvolutionStrategyV2", register=True) +except Exception as e: + print("RefinedIslandEvolutionStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV6 import RefinedIslandEvolutionStrategyV6 + + lama_register["RefinedIslandEvolutionStrategyV6"] = RefinedIslandEvolutionStrategyV6 + LLAMARefinedIslandEvolutionStrategyV6 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV6" + ).set_name("LLAMARefinedIslandEvolutionStrategyV6", register=True) +except Exception as e: + print("RefinedIslandEvolutionStrategyV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV9 import RefinedIslandEvolutionStrategyV9 + + lama_register["RefinedIslandEvolutionStrategyV9"] = RefinedIslandEvolutionStrategyV9 + LLAMARefinedIslandEvolutionStrategyV9 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV9" + ).set_name("LLAMARefinedIslandEvolutionStrategyV9", register=True) +except Exception as e: + print("RefinedIslandEvolutionStrategyV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemeticDifferentialEvolution import ( + RefinedMemeticDifferentialEvolution, + ) + + lama_register["RefinedMemeticDifferentialEvolution"] = RefinedMemeticDifferentialEvolution + LLAMARefinedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMemeticDifferentialEvolution" + ).set_name("LLAMARefinedMemeticDifferentialEvolution", register=True) +except Exception as e: + print("RefinedMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizer import RefinedMemeticDiverseOptimizer + + lama_register["RefinedMemeticDiverseOptimizer"] = RefinedMemeticDiverseOptimizer + LLAMARefinedMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemeticDiverseOptimizer" + ).set_name("LLAMARefinedMemeticDiverseOptimizer", register=True) +except Exception as e: + print("RefinedMemeticDiverseOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizerV4 import RefinedMemeticDiverseOptimizerV4 + + lama_register["RefinedMemeticDiverseOptimizerV4"] = RefinedMemeticDiverseOptimizerV4 + LLAMARefinedMemeticDiverseOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedMemeticDiverseOptimizerV4" + ).set_name("LLAMARefinedMemeticDiverseOptimizerV4", register=True) +except Exception as e: + print("RefinedMemeticDiverseOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemeticQuantumDifferentialOptimizer import ( + RefinedMemeticQuantumDifferentialOptimizer, + ) + + lama_register["RefinedMemeticQuantumDifferentialOptimizer"] = RefinedMemeticQuantumDifferentialOptimizer + LLAMARefinedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemeticQuantumDifferentialOptimizer" + ).set_name("LLAMARefinedMemeticQuantumDifferentialOptimizer", register=True) +except Exception as e: + print("RefinedMemeticQuantumDifferentialOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryAdaptiveDynamicHybridOptimizer import ( + RefinedMemoryAdaptiveDynamicHybridOptimizer, + ) + + lama_register["RefinedMemoryAdaptiveDynamicHybridOptimizer"] = RefinedMemoryAdaptiveDynamicHybridOptimizer + LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: + print("RefinedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryAdaptiveHybridOptimizer import ( + RefinedMemoryAdaptiveHybridOptimizer, + ) + + lama_register["RefinedMemoryAdaptiveHybridOptimizer"] = RefinedMemoryAdaptiveHybridOptimizer + LLAMARefinedMemoryAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedMemoryAdaptiveHybridOptimizer", register=True) +except Exception as e: + print("RefinedMemoryAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryEnhancedDynamicHybridOptimizer import ( + RefinedMemoryEnhancedDynamicHybridOptimizer, + ) + + lama_register["RefinedMemoryEnhancedDynamicHybridOptimizer"] = RefinedMemoryEnhancedDynamicHybridOptimizer + LLAMARefinedMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMARefinedMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: + print("RefinedMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryEnhancedHybridOptimizerV2 import ( + RefinedMemoryEnhancedHybridOptimizerV2, + ) + + lama_register["RefinedMemoryEnhancedHybridOptimizerV2"] = RefinedMemoryEnhancedHybridOptimizerV2 + LLAMARefinedMemoryEnhancedHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedMemoryEnhancedHybridOptimizerV2" + ).set_name("LLAMARefinedMemoryEnhancedHybridOptimizerV2", register=True) +except Exception as e: + print("RefinedMemoryEnhancedHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 import ( + RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72, + ) + + lama_register["RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72"] = ( + RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 + ) + LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 = NonObjectOptimizer( + method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72" + ).set_name("LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72", register=True) +except Exception as e: + print("RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMemoryGuidedHybridStrategyV63 import ( + RefinedMemoryGuidedHybridStrategyV63, + ) + + lama_register["RefinedMemoryGuidedHybridStrategyV63"] = RefinedMemoryGuidedHybridStrategyV63 + LLAMARefinedMemoryGuidedHybridStrategyV63 = NonObjectOptimizer( + method="LLAMARefinedMemoryGuidedHybridStrategyV63" + ).set_name("LLAMARefinedMemoryGuidedHybridStrategyV63", register=True) +except Exception as e: + print("RefinedMemoryGuidedHybridStrategyV63 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMetaNetAQAPSO import RefinedMetaNetAQAPSO + + lama_register["RefinedMetaNetAQAPSO"] = RefinedMetaNetAQAPSO + LLAMARefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO").set_name( + "LLAMARefinedMetaNetAQAPSO", register=True + ) +except Exception as e: + print("RefinedMetaNetAQAPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiFocalAdaptiveElitistStrategyV4 import ( + RefinedMultiFocalAdaptiveElitistStrategyV4, + ) + + lama_register["RefinedMultiFocalAdaptiveElitistStrategyV4"] = RefinedMultiFocalAdaptiveElitistStrategyV4 + LLAMARefinedMultiFocalAdaptiveElitistStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4" + ).set_name("LLAMARefinedMultiFocalAdaptiveElitistStrategyV4", register=True) +except Exception as e: + print("RefinedMultiFocalAdaptiveElitistStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiOperatorAdaptiveOptimization import ( + RefinedMultiOperatorAdaptiveOptimization, + ) + + lama_register["RefinedMultiOperatorAdaptiveOptimization"] = RefinedMultiOperatorAdaptiveOptimization + LLAMARefinedMultiOperatorAdaptiveOptimization = NonObjectOptimizer( + method="LLAMARefinedMultiOperatorAdaptiveOptimization" + ).set_name("LLAMARefinedMultiOperatorAdaptiveOptimization", register=True) +except Exception as e: + print("RefinedMultiOperatorAdaptiveOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiPhaseAdaptiveHybridDEPSO import ( + RefinedMultiPhaseAdaptiveHybridDEPSO, + ) + + lama_register["RefinedMultiPhaseAdaptiveHybridDEPSO"] = RefinedMultiPhaseAdaptiveHybridDEPSO + LLAMARefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: + print("RefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiStageAdaptiveSearch import RefinedMultiStageAdaptiveSearch + + lama_register["RefinedMultiStageAdaptiveSearch"] = RefinedMultiStageAdaptiveSearch + LLAMARefinedMultiStageAdaptiveSearch = NonObjectOptimizer( + method="LLAMARefinedMultiStageAdaptiveSearch" + ).set_name("LLAMARefinedMultiStageAdaptiveSearch", register=True) +except Exception as e: + print("RefinedMultiStageAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiStrategyDifferentialEvolution import ( + RefinedMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedMultiStrategyDifferentialEvolution"] = RefinedMultiStrategyDifferentialEvolution + LLAMARefinedMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedMultiStrategyDifferentialEvolution", register=True) +except Exception as e: + print("RefinedMultiStrategyDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiStrategySelfAdaptiveDE import ( + RefinedMultiStrategySelfAdaptiveDE, + ) + + lama_register["RefinedMultiStrategySelfAdaptiveDE"] = RefinedMultiStrategySelfAdaptiveDE + LLAMARefinedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMARefinedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: + print("RefinedMultiStrategySelfAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedMultiStrategySwarmDifferentialEvolution import ( + RefinedMultiStrategySwarmDifferentialEvolution, + ) + + lama_register["RefinedMultiStrategySwarmDifferentialEvolution"] = ( + RefinedMultiStrategySwarmDifferentialEvolution + ) + LLAMARefinedMultiStrategySwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMultiStrategySwarmDifferentialEvolution" + ).set_name("LLAMARefinedMultiStrategySwarmDifferentialEvolution", register=True) +except Exception as e: + print("RefinedMultiStrategySwarmDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedNicheDifferentialParticleSwarmOptimizer import ( + RefinedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["RefinedNicheDifferentialParticleSwarmOptimizer"] = ( + RefinedNicheDifferentialParticleSwarmOptimizer + ) + LLAMARefinedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMARefinedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: + print("RefinedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimalDynamicPrecisionOptimizerV15 import ( + RefinedOptimalDynamicPrecisionOptimizerV15, + ) + + lama_register["RefinedOptimalDynamicPrecisionOptimizerV15"] = RefinedOptimalDynamicPrecisionOptimizerV15 + LLAMARefinedOptimalDynamicPrecisionOptimizerV15 = NonObjectOptimizer( + method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15" + ).set_name("LLAMARefinedOptimalDynamicPrecisionOptimizerV15", register=True) +except Exception as e: + print("RefinedOptimalDynamicPrecisionOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimalEnhancedRAMEDS import RefinedOptimalEnhancedRAMEDS + + lama_register["RefinedOptimalEnhancedRAMEDS"] = RefinedOptimalEnhancedRAMEDS + LLAMARefinedOptimalEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedOptimalEnhancedRAMEDS" + ).set_name("LLAMARefinedOptimalEnhancedRAMEDS", register=True) +except Exception as e: + print("RefinedOptimalEnhancedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimalEvolutionaryGradientOptimizerV12 import ( + RefinedOptimalEvolutionaryGradientOptimizerV12, + ) + + lama_register["RefinedOptimalEvolutionaryGradientOptimizerV12"] = ( + RefinedOptimalEvolutionaryGradientOptimizerV12 + ) + LLAMARefinedOptimalEvolutionaryGradientOptimizerV12 = NonObjectOptimizer( + method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12" + ).set_name("LLAMARefinedOptimalEvolutionaryGradientOptimizerV12", register=True) +except Exception as e: + print("RefinedOptimalEvolutionaryGradientOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 import ( + RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5, + ) + + lama_register["RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5"] = ( + RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 + ) + LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 = NonObjectOptimizer( + method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5" + ).set_name("LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5", register=True) +except Exception as e: + print("RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing import ( + RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing + ) + LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimizedEnhancedDualStrategyAdaptiveDE import ( + RefinedOptimizedEnhancedDualStrategyAdaptiveDE, + ) + + lama_register["RefinedOptimizedEnhancedDualStrategyAdaptiveDE"] = ( + RefinedOptimizedEnhancedDualStrategyAdaptiveDE + ) + LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE", register=True) +except Exception as e: + print("RefinedOptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedOptimizedHybridAdaptiveMultiStageOptimization import ( + RefinedOptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["RefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( + RefinedOptimizedHybridAdaptiveMultiStageOptimization + ) + LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: + print("RefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedPrecisionAdaptivePSO import RefinedPrecisionAdaptivePSO + + lama_register["RefinedPrecisionAdaptivePSO"] = RefinedPrecisionAdaptivePSO + LLAMARefinedPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO").set_name( + "LLAMARefinedPrecisionAdaptivePSO", register=True + ) +except Exception as e: + print("RefinedPrecisionAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedPrecisionEnhancedDualStrategyOptimizer import ( + RefinedPrecisionEnhancedDualStrategyOptimizer, + ) + + lama_register["RefinedPrecisionEnhancedDualStrategyOptimizer"] = ( + RefinedPrecisionEnhancedDualStrategyOptimizer + ) + LLAMARefinedPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer" + ).set_name("LLAMARefinedPrecisionEnhancedDualStrategyOptimizer", register=True) +except Exception as e: + print("RefinedPrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedPrecisionEnhancedSpatialAdaptiveEvolver import ( + RefinedPrecisionEnhancedSpatialAdaptiveEvolver, + ) + + lama_register["RefinedPrecisionEnhancedSpatialAdaptiveEvolver"] = ( + RefinedPrecisionEnhancedSpatialAdaptiveEvolver + ) + LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: + print("RefinedPrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedPrecisionEvolutionaryThermalOptimizer import ( + RefinedPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["RefinedPrecisionEvolutionaryThermalOptimizer"] = ( + RefinedPrecisionEvolutionaryThermalOptimizer + ) + LLAMARefinedPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMARefinedPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: + print("RefinedPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedPrecisionTunedCrossoverElitistStrategyV12 import ( + RefinedPrecisionTunedCrossoverElitistStrategyV12, + ) + + lama_register["RefinedPrecisionTunedCrossoverElitistStrategyV12"] = ( + RefinedPrecisionTunedCrossoverElitistStrategyV12 + ) + LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12 = NonObjectOptimizer( + method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12" + ).set_name("LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12", register=True) +except Exception as e: + print("RefinedPrecisionTunedCrossoverElitistStrategyV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedProgressiveParticleSwarmOptimization import ( + RefinedProgressiveParticleSwarmOptimization, + ) + + lama_register["RefinedProgressiveParticleSwarmOptimization"] = RefinedProgressiveParticleSwarmOptimization + LLAMARefinedProgressiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedProgressiveParticleSwarmOptimization" + ).set_name("LLAMARefinedProgressiveParticleSwarmOptimization", register=True) +except Exception as e: + print("RefinedProgressiveParticleSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedProgressiveQuorumEvolutionStrategy import ( + RefinedProgressiveQuorumEvolutionStrategy, + ) + + lama_register["RefinedProgressiveQuorumEvolutionStrategy"] = RefinedProgressiveQuorumEvolutionStrategy + LLAMARefinedProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedProgressiveQuorumEvolutionStrategy" + ).set_name("LLAMARefinedProgressiveQuorumEvolutionStrategy", register=True) +except Exception as e: + print("RefinedProgressiveQuorumEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuadraticAdaptiveEvolutionStrategy import ( + RefinedQuadraticAdaptiveEvolutionStrategy, + ) + + lama_register["RefinedQuadraticAdaptiveEvolutionStrategy"] = RefinedQuadraticAdaptiveEvolutionStrategy + LLAMARefinedQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("RefinedQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveExplorationOptimization import ( + RefinedQuantumAdaptiveExplorationOptimization, + ) + + lama_register["RefinedQuantumAdaptiveExplorationOptimization"] = ( + RefinedQuantumAdaptiveExplorationOptimization + ) + LLAMARefinedQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMARefinedQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridOptimizerV4 import ( + RefinedQuantumAdaptiveHybridOptimizerV4, + ) + + lama_register["RefinedQuantumAdaptiveHybridOptimizerV4"] = RefinedQuantumAdaptiveHybridOptimizerV4 + LLAMARefinedQuantumAdaptiveHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4" + ).set_name("LLAMARefinedQuantumAdaptiveHybridOptimizerV4", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveHybridOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridSearchV3 import ( + RefinedQuantumAdaptiveHybridSearchV3, + ) + + lama_register["RefinedQuantumAdaptiveHybridSearchV3"] = RefinedQuantumAdaptiveHybridSearchV3 + LLAMARefinedQuantumAdaptiveHybridSearchV3 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveHybridSearchV3" + ).set_name("LLAMARefinedQuantumAdaptiveHybridSearchV3", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveHybridSearchV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveLevySwarmOptimization import ( + RefinedQuantumAdaptiveLevySwarmOptimization, + ) + + lama_register["RefinedQuantumAdaptiveLevySwarmOptimization"] = RefinedQuantumAdaptiveLevySwarmOptimization + LLAMARefinedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization" + ).set_name("LLAMARefinedQuantumAdaptiveLevySwarmOptimization", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveMultiPopulationDE import ( + RefinedQuantumAdaptiveMultiPopulationDE, + ) + + lama_register["RefinedQuantumAdaptiveMultiPopulationDE"] = RefinedQuantumAdaptiveMultiPopulationDE + LLAMARefinedQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveMultiPopulationDE" + ).set_name("LLAMARefinedQuantumAdaptiveMultiPopulationDE", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveMultiPopulationDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveOptimizerV2 import ( + RefinedQuantumAdaptiveOptimizerV2, + ) + + lama_register["RefinedQuantumAdaptiveOptimizerV2"] = RefinedQuantumAdaptiveOptimizerV2 + LLAMARefinedQuantumAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveOptimizerV2" + ).set_name("LLAMARefinedQuantumAdaptiveOptimizerV2", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumAdaptiveVelocityOptimizer import ( + RefinedQuantumAdaptiveVelocityOptimizer, + ) + + lama_register["RefinedQuantumAdaptiveVelocityOptimizer"] = RefinedQuantumAdaptiveVelocityOptimizer + LLAMARefinedQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveVelocityOptimizer" + ).set_name("LLAMARefinedQuantumAdaptiveVelocityOptimizer", register=True) +except Exception as e: + print("RefinedQuantumAdaptiveVelocityOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumCognitionAdaptiveTuningOptimizerV15 import ( + RefinedQuantumCognitionAdaptiveTuningOptimizerV15, + ) + + lama_register["RefinedQuantumCognitionAdaptiveTuningOptimizerV15"] = ( + RefinedQuantumCognitionAdaptiveTuningOptimizerV15 + ) + LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15" + ).set_name("LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15", register=True) +except Exception as e: + print("RefinedQuantumCognitionAdaptiveTuningOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumCognitionHybridOptimizerV22 import ( + RefinedQuantumCognitionHybridOptimizerV22, + ) + + lama_register["RefinedQuantumCognitionHybridOptimizerV22"] = RefinedQuantumCognitionHybridOptimizerV22 + LLAMARefinedQuantumCognitionHybridOptimizerV22 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionHybridOptimizerV22" + ).set_name("LLAMARefinedQuantumCognitionHybridOptimizerV22", register=True) +except Exception as e: + print("RefinedQuantumCognitionHybridOptimizerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV13 import ( + RefinedQuantumCognitionOptimizerV13, + ) + + lama_register["RefinedQuantumCognitionOptimizerV13"] = RefinedQuantumCognitionOptimizerV13 + LLAMARefinedQuantumCognitionOptimizerV13 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionOptimizerV13" + ).set_name("LLAMARefinedQuantumCognitionOptimizerV13", register=True) +except Exception as e: + print("RefinedQuantumCognitionOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV4 import ( + RefinedQuantumCognitionOptimizerV4, + ) + + lama_register["RefinedQuantumCognitionOptimizerV4"] = RefinedQuantumCognitionOptimizerV4 + LLAMARefinedQuantumCognitionOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionOptimizerV4" + ).set_name("LLAMARefinedQuantumCognitionOptimizerV4", register=True) +except Exception as e: + print("RefinedQuantumCognitionOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 import ( + RefinedQuantumCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["RefinedQuantumCovarianceMatrixDifferentialEvolutionV4"] = ( + RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 + ) + LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: + print("RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism + ) + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism", register=True) +except Exception as e: + print("RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveLearning import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveLearning, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveLearning + ) + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) +except Exception as e: + print("RefinedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + ) + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" + ).set_name( + "LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True + ) +except Exception as e: + print( + "RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e + ) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism + ) + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism", register=True) +except Exception as e: + print("RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialMemeticOptimizer import ( + RefinedQuantumDifferentialMemeticOptimizer, + ) + + lama_register["RefinedQuantumDifferentialMemeticOptimizer"] = RefinedQuantumDifferentialMemeticOptimizer + LLAMARefinedQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMARefinedQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: + print("RefinedQuantumDifferentialMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumDifferentialParticleOptimizerWithElitism import ( + RefinedQuantumDifferentialParticleOptimizerWithElitism, + ) + + lama_register["RefinedQuantumDifferentialParticleOptimizerWithElitism"] = ( + RefinedQuantumDifferentialParticleOptimizerWithElitism + ) + LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism" + ).set_name("LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism", register=True) +except Exception as e: + print("RefinedQuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE import ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE, + ) + + lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE"] = RefinedQuantumEnhancedAdaptiveMultiPhaseDE + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE" + ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE", register=True) +except Exception as e: + print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 import ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2, + ) + + lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2"] = ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 + ) + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2" + ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2", register=True) +except Exception as e: + print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 import ( + RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6, + ) + + lama_register["RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6"] = ( + RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 + ) + LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6" + ).set_name("LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6", register=True) +except Exception as e: + print("RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEnhancedHybridDEPSO import ( + RefinedQuantumEnhancedHybridDEPSO, + ) + + lama_register["RefinedQuantumEnhancedHybridDEPSO"] = RefinedQuantumEnhancedHybridDEPSO + LLAMARefinedQuantumEnhancedHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedHybridDEPSO" + ).set_name("LLAMARefinedQuantumEnhancedHybridDEPSO", register=True) +except Exception as e: + print("RefinedQuantumEnhancedHybridDEPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptation import ( + RefinedQuantumEvolutionaryAdaptation, + ) + + lama_register["RefinedQuantumEvolutionaryAdaptation"] = RefinedQuantumEvolutionaryAdaptation + LLAMARefinedQuantumEvolutionaryAdaptation = NonObjectOptimizer( + method="LLAMARefinedQuantumEvolutionaryAdaptation" + ).set_name("LLAMARefinedQuantumEvolutionaryAdaptation", register=True) +except Exception as e: + print("RefinedQuantumEvolutionaryAdaptation can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptiveOptimizer import ( + RefinedQuantumEvolutionaryAdaptiveOptimizer, + ) + + lama_register["RefinedQuantumEvolutionaryAdaptiveOptimizer"] = RefinedQuantumEvolutionaryAdaptiveOptimizer + LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer" + ).set_name("LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer", register=True) +except Exception as e: + print("RefinedQuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumFluxDifferentialSwarm import ( + RefinedQuantumFluxDifferentialSwarm, + ) + + lama_register["RefinedQuantumFluxDifferentialSwarm"] = RefinedQuantumFluxDifferentialSwarm + LLAMARefinedQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMARefinedQuantumFluxDifferentialSwarm" + ).set_name("LLAMARefinedQuantumFluxDifferentialSwarm", register=True) +except Exception as e: + print("RefinedQuantumFluxDifferentialSwarm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumGradientAdaptiveExplorationOptimization import ( + RefinedQuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["RefinedQuantumGradientAdaptiveExplorationOptimization"] = ( + RefinedQuantumGradientAdaptiveExplorationOptimization + ) + LLAMARefinedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMARefinedQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: + print("RefinedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumGradientSearch import RefinedQuantumGradientSearch + + lama_register["RefinedQuantumGradientSearch"] = RefinedQuantumGradientSearch + LLAMARefinedQuantumGradientSearch = NonObjectOptimizer( + method="LLAMARefinedQuantumGradientSearch" + ).set_name("LLAMARefinedQuantumGradientSearch", register=True) +except Exception as e: + print("RefinedQuantumGradientSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV6 import ( + RefinedQuantumGuidedHybridSearchV6, + ) + + lama_register["RefinedQuantumGuidedHybridSearchV6"] = RefinedQuantumGuidedHybridSearchV6 + LLAMARefinedQuantumGuidedHybridSearchV6 = NonObjectOptimizer( + method="LLAMARefinedQuantumGuidedHybridSearchV6" + ).set_name("LLAMARefinedQuantumGuidedHybridSearchV6", register=True) +except Exception as e: + print("RefinedQuantumGuidedHybridSearchV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV8 import ( + RefinedQuantumGuidedHybridSearchV8, + ) + + lama_register["RefinedQuantumGuidedHybridSearchV8"] = RefinedQuantumGuidedHybridSearchV8 + LLAMARefinedQuantumGuidedHybridSearchV8 = NonObjectOptimizer( + method="LLAMARefinedQuantumGuidedHybridSearchV8" + ).set_name("LLAMARefinedQuantumGuidedHybridSearchV8", register=True) +except Exception as e: + print("RefinedQuantumGuidedHybridSearchV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumHybridAdaptiveStrategyV3 import ( + RefinedQuantumHybridAdaptiveStrategyV3, + ) + + lama_register["RefinedQuantumHybridAdaptiveStrategyV3"] = RefinedQuantumHybridAdaptiveStrategyV3 + LLAMARefinedQuantumHybridAdaptiveStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridAdaptiveStrategyV3" + ).set_name("LLAMARefinedQuantumHybridAdaptiveStrategyV3", register=True) +except Exception as e: + print("RefinedQuantumHybridAdaptiveStrategyV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumHybridDynamicAdaptiveDE import ( + RefinedQuantumHybridDynamicAdaptiveDE, + ) + + lama_register["RefinedQuantumHybridDynamicAdaptiveDE"] = RefinedQuantumHybridDynamicAdaptiveDE + LLAMARefinedQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridDynamicAdaptiveDE" + ).set_name("LLAMARefinedQuantumHybridDynamicAdaptiveDE", register=True) +except Exception as e: + print("RefinedQuantumHybridDynamicAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumHybridEliteAdaptiveDE import ( + RefinedQuantumHybridEliteAdaptiveDE, + ) + + lama_register["RefinedQuantumHybridEliteAdaptiveDE"] = RefinedQuantumHybridEliteAdaptiveDE + LLAMARefinedQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridEliteAdaptiveDE" + ).set_name("LLAMARefinedQuantumHybridEliteAdaptiveDE", register=True) +except Exception as e: + print("RefinedQuantumHybridEliteAdaptiveDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInfluenceLocalSearchOptimizer import ( + RefinedQuantumInfluenceLocalSearchOptimizer, + ) + + lama_register["RefinedQuantumInfluenceLocalSearchOptimizer"] = RefinedQuantumInfluenceLocalSearchOptimizer + LLAMARefinedQuantumInfluenceLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer" + ).set_name("LLAMARefinedQuantumInfluenceLocalSearchOptimizer", register=True) +except Exception as e: + print("RefinedQuantumInfluenceLocalSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptiveInertiaOptimizer import ( + RefinedQuantumInformedAdaptiveInertiaOptimizer, + ) + + lama_register["RefinedQuantumInformedAdaptiveInertiaOptimizer"] = ( + RefinedQuantumInformedAdaptiveInertiaOptimizer + ) + LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer" + ).set_name("LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer", register=True) +except Exception as e: + print("RefinedQuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptivePSO import ( + RefinedQuantumInformedAdaptivePSO, + ) + + lama_register["RefinedQuantumInformedAdaptivePSO"] = RefinedQuantumInformedAdaptivePSO + LLAMARefinedQuantumInformedAdaptivePSO = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedAdaptivePSO" + ).set_name("LLAMARefinedQuantumInformedAdaptivePSO", register=True) +except Exception as e: + print("RefinedQuantumInformedAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInformedDifferentialStrategyV2 import ( + RefinedQuantumInformedDifferentialStrategyV2, + ) + + lama_register["RefinedQuantumInformedDifferentialStrategyV2"] = ( + RefinedQuantumInformedDifferentialStrategyV2 + ) + LLAMARefinedQuantumInformedDifferentialStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedDifferentialStrategyV2" + ).set_name("LLAMARefinedQuantumInformedDifferentialStrategyV2", register=True) +except Exception as e: + print("RefinedQuantumInformedDifferentialStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInformedGradientOptimizer import ( + RefinedQuantumInformedGradientOptimizer, + ) + + lama_register["RefinedQuantumInformedGradientOptimizer"] = RefinedQuantumInformedGradientOptimizer + LLAMARefinedQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedGradientOptimizer" + ).set_name("LLAMARefinedQuantumInformedGradientOptimizer", register=True) +except Exception as e: + print("RefinedQuantumInformedGradientOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInformedPSO import RefinedQuantumInformedPSO + + lama_register["RefinedQuantumInformedPSO"] = RefinedQuantumInformedPSO + LLAMARefinedQuantumInformedPSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO").set_name( + "LLAMARefinedQuantumInformedPSO", register=True + ) +except Exception as e: + print("RefinedQuantumInformedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumInfusedAdaptiveStrategyV2 import ( + RefinedQuantumInfusedAdaptiveStrategyV2, + ) + + lama_register["RefinedQuantumInfusedAdaptiveStrategyV2"] = RefinedQuantumInfusedAdaptiveStrategyV2 + LLAMARefinedQuantumInfusedAdaptiveStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2" + ).set_name("LLAMARefinedQuantumInfusedAdaptiveStrategyV2", register=True) +except Exception as e: + print("RefinedQuantumInfusedAdaptiveStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumLevyMemeticDifferentialEvolution import ( + RefinedQuantumLevyMemeticDifferentialEvolution, + ) + + lama_register["RefinedQuantumLevyMemeticDifferentialEvolution"] = ( + RefinedQuantumLevyMemeticDifferentialEvolution + ) + LLAMARefinedQuantumLevyMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution" + ).set_name("LLAMARefinedQuantumLevyMemeticDifferentialEvolution", register=True) +except Exception as e: + print("RefinedQuantumLevyMemeticDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumMultiStrategyOptimization import ( + RefinedQuantumMultiStrategyOptimization, + ) + + lama_register["RefinedQuantumMultiStrategyOptimization"] = RefinedQuantumMultiStrategyOptimization + LLAMARefinedQuantumMultiStrategyOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumMultiStrategyOptimization" + ).set_name("LLAMARefinedQuantumMultiStrategyOptimization", register=True) +except Exception as e: + print("RefinedQuantumMultiStrategyOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumNesterovSynergyV2 import RefinedQuantumNesterovSynergyV2 + + lama_register["RefinedQuantumNesterovSynergyV2"] = RefinedQuantumNesterovSynergyV2 + LLAMARefinedQuantumNesterovSynergyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumNesterovSynergyV2" + ).set_name("LLAMARefinedQuantumNesterovSynergyV2", register=True) +except Exception as e: + print("RefinedQuantumNesterovSynergyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumResilientCrossoverEnhancer import ( + RefinedQuantumResilientCrossoverEnhancer, + ) + + lama_register["RefinedQuantumResilientCrossoverEnhancer"] = RefinedQuantumResilientCrossoverEnhancer + LLAMARefinedQuantumResilientCrossoverEnhancer = NonObjectOptimizer( + method="LLAMARefinedQuantumResilientCrossoverEnhancer" + ).set_name("LLAMARefinedQuantumResilientCrossoverEnhancer", register=True) +except Exception as e: + print("RefinedQuantumResilientCrossoverEnhancer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumSwarmOptimizer import RefinedQuantumSwarmOptimizer + + lama_register["RefinedQuantumSwarmOptimizer"] = RefinedQuantumSwarmOptimizer + LLAMARefinedQuantumSwarmOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumSwarmOptimizer" + ).set_name("LLAMARefinedQuantumSwarmOptimizer", register=True) +except Exception as e: + print("RefinedQuantumSwarmOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV2 import ( + RefinedQuantumSymbioticStrategyV2, + ) + + lama_register["RefinedQuantumSymbioticStrategyV2"] = RefinedQuantumSymbioticStrategyV2 + LLAMARefinedQuantumSymbioticStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumSymbioticStrategyV2" + ).set_name("LLAMARefinedQuantumSymbioticStrategyV2", register=True) +except Exception as e: + print("RefinedQuantumSymbioticStrategyV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV4 import ( + RefinedQuantumSymbioticStrategyV4, + ) + + lama_register["RefinedQuantumSymbioticStrategyV4"] = RefinedQuantumSymbioticStrategyV4 + LLAMARefinedQuantumSymbioticStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumSymbioticStrategyV4" + ).set_name("LLAMARefinedQuantumSymbioticStrategyV4", register=True) +except Exception as e: + print("RefinedQuantumSymbioticStrategyV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedQuantumTunnelingOptimizerV19 import ( + RefinedQuantumTunnelingOptimizerV19, + ) + + lama_register["RefinedQuantumTunnelingOptimizerV19"] = RefinedQuantumTunnelingOptimizerV19 + LLAMARefinedQuantumTunnelingOptimizerV19 = NonObjectOptimizer( + method="LLAMARefinedQuantumTunnelingOptimizerV19" + ).set_name("LLAMARefinedQuantumTunnelingOptimizerV19", register=True) +except Exception as e: + print("RefinedQuantumTunnelingOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedRAMEDSPro import RefinedRAMEDSPro + + lama_register["RefinedRAMEDSPro"] = RefinedRAMEDSPro + LLAMARefinedRAMEDSPro = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro").set_name( + "LLAMARefinedRAMEDSPro", register=True + ) +except Exception as e: + print("RefinedRAMEDSPro can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedRAMEDSv2 import RefinedRAMEDSv2 + + lama_register["RefinedRAMEDSv2"] = RefinedRAMEDSv2 + LLAMARefinedRAMEDSv2 = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2").set_name( + "LLAMARefinedRAMEDSv2", register=True + ) +except Exception as e: + print("RefinedRAMEDSv2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedSpatialAdaptiveOptimizer import RefinedSpatialAdaptiveOptimizer + + lama_register["RefinedSpatialAdaptiveOptimizer"] = RefinedSpatialAdaptiveOptimizer + LLAMARefinedSpatialAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedSpatialAdaptiveOptimizer" + ).set_name("LLAMARefinedSpatialAdaptiveOptimizer", register=True) +except Exception as e: + print("RefinedSpatialAdaptiveOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedSpiralSearchOptimizer import RefinedSpiralSearchOptimizer + + lama_register["RefinedSpiralSearchOptimizer"] = RefinedSpiralSearchOptimizer + LLAMARefinedSpiralSearchOptimizer = NonObjectOptimizer( + method="LLAMARefinedSpiralSearchOptimizer" + ).set_name("LLAMARefinedSpiralSearchOptimizer", register=True) +except Exception as e: + print("RefinedSpiralSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedStochasticBalancingOptimizer import ( + RefinedStochasticBalancingOptimizer, + ) + + lama_register["RefinedStochasticBalancingOptimizer"] = RefinedStochasticBalancingOptimizer + LLAMARefinedStochasticBalancingOptimizer = NonObjectOptimizer( + method="LLAMARefinedStochasticBalancingOptimizer" + ).set_name("LLAMARefinedStochasticBalancingOptimizer", register=True) +except Exception as e: + print("RefinedStochasticBalancingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedStrategicAdaptiveDifferentialEvolution import ( + RefinedStrategicAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedStrategicAdaptiveDifferentialEvolution"] = ( + RefinedStrategicAdaptiveDifferentialEvolution + ) + LLAMARefinedStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedStrategicAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedStrategicAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RefinedStrategicAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedStrategicDiminishingEvolver import ( + RefinedStrategicDiminishingEvolver, + ) + + lama_register["RefinedStrategicDiminishingEvolver"] = RefinedStrategicDiminishingEvolver + LLAMARefinedStrategicDiminishingEvolver = NonObjectOptimizer( + method="LLAMARefinedStrategicDiminishingEvolver" + ).set_name("LLAMARefinedStrategicDiminishingEvolver", register=True) +except Exception as e: + print("RefinedStrategicDiminishingEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedStrategicQuorumWithDirectionalBias import ( + RefinedStrategicQuorumWithDirectionalBias, + ) + + lama_register["RefinedStrategicQuorumWithDirectionalBias"] = RefinedStrategicQuorumWithDirectionalBias + LLAMARefinedStrategicQuorumWithDirectionalBias = NonObjectOptimizer( + method="LLAMARefinedStrategicQuorumWithDirectionalBias" + ).set_name("LLAMARefinedStrategicQuorumWithDirectionalBias", register=True) +except Exception as e: + print("RefinedStrategicQuorumWithDirectionalBias can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedSuperiorAdaptiveStrategyDE import ( + RefinedSuperiorAdaptiveStrategyDE, + ) + + lama_register["RefinedSuperiorAdaptiveStrategyDE"] = RefinedSuperiorAdaptiveStrategyDE + LLAMARefinedSuperiorAdaptiveStrategyDE = NonObjectOptimizer( + method="LLAMARefinedSuperiorAdaptiveStrategyDE" + ).set_name("LLAMARefinedSuperiorAdaptiveStrategyDE", register=True) +except Exception as e: + print("RefinedSuperiorAdaptiveStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedTemporalAdaptiveDifferentialEvolution import ( + RefinedTemporalAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedTemporalAdaptiveDifferentialEvolution"] = ( + RefinedTemporalAdaptiveDifferentialEvolution + ) + LLAMARefinedTemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedTemporalAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedTemporalAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RefinedTemporalAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimateEnhancedGuidedMassQGSA_v71 import ( + RefinedUltimateEnhancedGuidedMassQGSA_v71, + ) + + lama_register["RefinedUltimateEnhancedGuidedMassQGSA_v71"] = RefinedUltimateEnhancedGuidedMassQGSA_v71 + LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71 = NonObjectOptimizer( + method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71" + ).set_name("LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71", register=True) +except Exception as e: + print("RefinedUltimateEnhancedGuidedMassQGSA_v71 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV16 import ( + RefinedUltimateEvolutionaryGradientOptimizerV16, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV16"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV16 + ) + LLAMARefinedUltimateEvolutionaryGradientOptimizerV16 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV16", register=True) +except Exception as e: + print("RefinedUltimateEvolutionaryGradientOptimizerV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV17 import ( + RefinedUltimateEvolutionaryGradientOptimizerV17, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV17"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV17 + ) + LLAMARefinedUltimateEvolutionaryGradientOptimizerV17 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV17", register=True) +except Exception as e: + print("RefinedUltimateEvolutionaryGradientOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV34 import ( + RefinedUltimateEvolutionaryGradientOptimizerV34, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV34"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV34 + ) + LLAMARefinedUltimateEvolutionaryGradientOptimizerV34 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV34", register=True) +except Exception as e: + print("RefinedUltimateEvolutionaryGradientOptimizerV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryOptimizer import ( + RefinedUltimateEvolutionaryOptimizer, + ) + + lama_register["RefinedUltimateEvolutionaryOptimizer"] = RefinedUltimateEvolutionaryOptimizer + LLAMARefinedUltimateEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryOptimizer" + ).set_name("LLAMARefinedUltimateEvolutionaryOptimizer", register=True) +except Exception as e: + print("RefinedUltimateEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltimatePrecisionEvolutionaryOptimizerV42 import ( + RefinedUltimatePrecisionEvolutionaryOptimizerV42, + ) + + lama_register["RefinedUltimatePrecisionEvolutionaryOptimizerV42"] = ( + RefinedUltimatePrecisionEvolutionaryOptimizerV42 + ) + LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42 = NonObjectOptimizer( + method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42" + ).set_name("LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42", register=True) +except Exception as e: + print("RefinedUltimatePrecisionEvolutionaryOptimizerV42 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( + RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( + RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer + ) + LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltraEvolutionaryGradientOptimizerV28 import ( + RefinedUltraEvolutionaryGradientOptimizerV28, + ) + + lama_register["RefinedUltraEvolutionaryGradientOptimizerV28"] = ( + RefinedUltraEvolutionaryGradientOptimizerV28 + ) + LLAMARefinedUltraEvolutionaryGradientOptimizerV28 = NonObjectOptimizer( + method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28" + ).set_name("LLAMARefinedUltraEvolutionaryGradientOptimizerV28", register=True) +except Exception as e: + print("RefinedUltraEvolutionaryGradientOptimizerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltraOptimizedDynamicPrecisionOptimizerV20 import ( + RefinedUltraOptimizedDynamicPrecisionOptimizerV20, + ) + + lama_register["RefinedUltraOptimizedDynamicPrecisionOptimizerV20"] = ( + RefinedUltraOptimizedDynamicPrecisionOptimizerV20 + ) + LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20 = NonObjectOptimizer( + method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20" + ).set_name("LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20", register=True) +except Exception as e: + print("RefinedUltraOptimizedDynamicPrecisionOptimizerV20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 import ( + RefinedUltraOptimizedEvolutionaryGradientOptimizerV31, + ) + + lama_register["RefinedUltraOptimizedEvolutionaryGradientOptimizerV31"] = ( + RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 + ) + LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31 = NonObjectOptimizer( + method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31" + ).set_name("LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31", register=True) +except Exception as e: + print("RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinedUltraRefinedRAMEDS import RefinedUltraRefinedRAMEDS + + lama_register["RefinedUltraRefinedRAMEDS"] = RefinedUltraRefinedRAMEDS + LLAMARefinedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS").set_name( + "LLAMARefinedUltraRefinedRAMEDS", register=True + ) +except Exception as e: + print("RefinedUltraRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinementEnhancedHybridOptimizer import ( + RefinementEnhancedHybridOptimizer, + ) + + lama_register["RefinementEnhancedHybridOptimizer"] = RefinementEnhancedHybridOptimizer + LLAMARefinementEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinementEnhancedHybridOptimizer" + ).set_name("LLAMARefinementEnhancedHybridOptimizer", register=True) +except Exception as e: + print("RefinementEnhancedHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinementSelectiveCohortOptimization import ( + RefinementSelectiveCohortOptimization, + ) + + lama_register["RefinementSelectiveCohortOptimization"] = RefinementSelectiveCohortOptimization + LLAMARefinementSelectiveCohortOptimization = NonObjectOptimizer( + method="LLAMARefinementSelectiveCohortOptimization" + ).set_name("LLAMARefinementSelectiveCohortOptimization", register=True) +except Exception as e: + print("RefinementSelectiveCohortOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RefinementTunedPSO import RefinementTunedPSO + + lama_register["RefinementTunedPSO"] = RefinementTunedPSO + LLAMARefinementTunedPSO = NonObjectOptimizer(method="LLAMARefinementTunedPSO").set_name( + "LLAMARefinementTunedPSO", register=True + ) +except Exception as e: + print("RefinementTunedPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ResilientAdaptivePSO import ResilientAdaptivePSO + + lama_register["ResilientAdaptivePSO"] = ResilientAdaptivePSO + LLAMAResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO").set_name( + "LLAMAResilientAdaptivePSO", register=True + ) +except Exception as e: + print("ResilientAdaptivePSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ResponsiveAdaptiveMemoryStrategyV52 import ( + ResponsiveAdaptiveMemoryStrategyV52, + ) + + lama_register["ResponsiveAdaptiveMemoryStrategyV52"] = ResponsiveAdaptiveMemoryStrategyV52 + LLAMAResponsiveAdaptiveMemoryStrategyV52 = NonObjectOptimizer( + method="LLAMAResponsiveAdaptiveMemoryStrategyV52" + ).set_name("LLAMAResponsiveAdaptiveMemoryStrategyV52", register=True) +except Exception as e: + print("ResponsiveAdaptiveMemoryStrategyV52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ResponsiveAdaptiveStrategyV27 import ResponsiveAdaptiveStrategyV27 + + lama_register["ResponsiveAdaptiveStrategyV27"] = ResponsiveAdaptiveStrategyV27 + LLAMAResponsiveAdaptiveStrategyV27 = NonObjectOptimizer( + method="LLAMAResponsiveAdaptiveStrategyV27" + ).set_name("LLAMAResponsiveAdaptiveStrategyV27", register=True) +except Exception as e: + print("ResponsiveAdaptiveStrategyV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RestartAdaptiveDifferentialEvolutionPSO import ( + RestartAdaptiveDifferentialEvolutionPSO, + ) + + lama_register["RestartAdaptiveDifferentialEvolutionPSO"] = RestartAdaptiveDifferentialEvolutionPSO + LLAMARestartAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( + method="LLAMARestartAdaptiveDifferentialEvolutionPSO" + ).set_name("LLAMARestartAdaptiveDifferentialEvolutionPSO", register=True) +except Exception as e: + print("RestartAdaptiveDifferentialEvolutionPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RevisedEnhancedDifferentialEvolutionLSRefinement_v20 import ( + RevisedEnhancedDifferentialEvolutionLSRefinement_v20, + ) + + lama_register["RevisedEnhancedDifferentialEvolutionLSRefinement_v20"] = ( + RevisedEnhancedDifferentialEvolutionLSRefinement_v20 + ) + LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20 = NonObjectOptimizer( + method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20" + ).set_name("LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20", register=True) +except Exception as e: + print("RevisedEnhancedDifferentialEvolutionLSRefinement_v20 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RevolutionaryFireworkAlgorithm import RevolutionaryFireworkAlgorithm + + lama_register["RevolutionaryFireworkAlgorithm"] = RevolutionaryFireworkAlgorithm + LLAMARevolutionaryFireworkAlgorithm = NonObjectOptimizer( + method="LLAMARevolutionaryFireworkAlgorithm" + ).set_name("LLAMARevolutionaryFireworkAlgorithm", register=True) +except Exception as e: + print("RevolutionaryFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RobustAdaptiveDifferentialEvolution import ( + RobustAdaptiveDifferentialEvolution, + ) + + lama_register["RobustAdaptiveDifferentialEvolution"] = RobustAdaptiveDifferentialEvolution + LLAMARobustAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARobustAdaptiveDifferentialEvolution" + ).set_name("LLAMARobustAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("RobustAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RobustAdaptiveMemoryLeveragedStrategyV43 import ( + RobustAdaptiveMemoryLeveragedStrategyV43, + ) + + lama_register["RobustAdaptiveMemoryLeveragedStrategyV43"] = RobustAdaptiveMemoryLeveragedStrategyV43 + LLAMARobustAdaptiveMemoryLeveragedStrategyV43 = NonObjectOptimizer( + method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43" + ).set_name("LLAMARobustAdaptiveMemoryLeveragedStrategyV43", register=True) +except Exception as e: + print("RobustAdaptiveMemoryLeveragedStrategyV43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.RobustCovarianceMatrixAdaptationMemeticSearch import ( + RobustCovarianceMatrixAdaptationMemeticSearch, + ) + + lama_register["RobustCovarianceMatrixAdaptationMemeticSearch"] = ( + RobustCovarianceMatrixAdaptationMemeticSearch + ) + LLAMARobustCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( + method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch" + ).set_name("LLAMARobustCovarianceMatrixAdaptationMemeticSearch", register=True) +except Exception as e: + print("RobustCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SADE import SADE + + lama_register["SADE"] = SADE + LLAMASADE = NonObjectOptimizer(method="LLAMASADE").set_name("LLAMASADE", register=True) +except Exception as e: + print("SADE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SADEEM import SADEEM + + lama_register["SADEEM"] = SADEEM + LLAMASADEEM = NonObjectOptimizer(method="LLAMASADEEM").set_name("LLAMASADEEM", register=True) +except Exception as e: + print("SADEEM can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SADEIOL import SADEIOL + + lama_register["SADEIOL"] = SADEIOL + LLAMASADEIOL = NonObjectOptimizer(method="LLAMASADEIOL").set_name("LLAMASADEIOL", register=True) +except Exception as e: + print("SADEIOL can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SADEPF import SADEPF + + lama_register["SADEPF"] = SADEPF + LLAMASADEPF = NonObjectOptimizer(method="LLAMASADEPF").set_name("LLAMASADEPF", register=True) +except Exception as e: + print("SADEPF can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SAGEA import SAGEA + + lama_register["SAGEA"] = SAGEA + LLAMASAGEA = NonObjectOptimizer(method="LLAMASAGEA").set_name("LLAMASAGEA", register=True) +except Exception as e: + print("SAGEA can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SGAE import SGAE + + lama_register["SGAE"] = SGAE + LLAMASGAE = NonObjectOptimizer(method="LLAMASGAE").set_name("LLAMASGAE", register=True) +except Exception as e: + print("SGAE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SGE import SGE + + lama_register["SGE"] = SGE + LLAMASGE = NonObjectOptimizer(method="LLAMASGE").set_name("LLAMASGE", register=True) +except Exception as e: + print("SGE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SORAMED import SORAMED + + lama_register["SORAMED"] = SORAMED + LLAMASORAMED = NonObjectOptimizer(method="LLAMASORAMED").set_name("LLAMASORAMED", register=True) +except Exception as e: + print("SORAMED can not be imported: ", e) + +try: + from nevergrad.optimization.lama.ScaledHybridDifferentialEvolution import ( + ScaledHybridDifferentialEvolution, + ) + + lama_register["ScaledHybridDifferentialEvolution"] = ScaledHybridDifferentialEvolution + LLAMAScaledHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAScaledHybridDifferentialEvolution" + ).set_name("LLAMAScaledHybridDifferentialEvolution", register=True) +except Exception as e: + print("ScaledHybridDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptingDifferentialEvolutionOptimizer import ( + SelfAdaptingDifferentialEvolutionOptimizer, + ) + + lama_register["SelfAdaptingDifferentialEvolutionOptimizer"] = SelfAdaptingDifferentialEvolutionOptimizer + LLAMASelfAdaptingDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMASelfAdaptingDifferentialEvolutionOptimizer" + ).set_name("LLAMASelfAdaptingDifferentialEvolutionOptimizer", register=True) +except Exception as e: + print("SelfAdaptingDifferentialEvolutionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveCovarianceMatrixDifferentialEvolution import ( + SelfAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["SelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + SelfAdaptiveCovarianceMatrixDifferentialEvolution + ) + LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: + print("SelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolution import ( + SelfAdaptiveDifferentialEvolution, + ) + + lama_register["SelfAdaptiveDifferentialEvolution"] = SelfAdaptiveDifferentialEvolution + LLAMASelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolution" + ).set_name("LLAMASelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("SelfAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithLocalRestart import ( + SelfAdaptiveDifferentialEvolutionWithLocalRestart, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithLocalRestart"] = ( + SelfAdaptiveDifferentialEvolutionWithLocalRestart + ) + LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart", register=True) +except Exception as e: + print("SelfAdaptiveDifferentialEvolutionWithLocalRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithMemeticSearch import ( + SelfAdaptiveDifferentialEvolutionWithMemeticSearch, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithMemeticSearch"] = ( + SelfAdaptiveDifferentialEvolutionWithMemeticSearch + ) + LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) +except Exception as e: + print("SelfAdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithRestart import ( + SelfAdaptiveDifferentialEvolutionWithRestart, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithRestart"] = ( + SelfAdaptiveDifferentialEvolutionWithRestart + ) + LLAMASelfAdaptiveDifferentialEvolutionWithRestart = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithRestart", register=True) +except Exception as e: + print("SelfAdaptiveDifferentialEvolutionWithRestart can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveDifferentialSwarmOptimization import ( + SelfAdaptiveDifferentialSwarmOptimization, + ) + + lama_register["SelfAdaptiveDifferentialSwarmOptimization"] = SelfAdaptiveDifferentialSwarmOptimization + LLAMASelfAdaptiveDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialSwarmOptimization" + ).set_name("LLAMASelfAdaptiveDifferentialSwarmOptimization", register=True) +except Exception as e: + print("SelfAdaptiveDifferentialSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveEvolutionaryAlgorithm import ( + SelfAdaptiveEvolutionaryAlgorithm, + ) + + lama_register["SelfAdaptiveEvolutionaryAlgorithm"] = SelfAdaptiveEvolutionaryAlgorithm + LLAMASelfAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveEvolutionaryAlgorithm" + ).set_name("LLAMASelfAdaptiveEvolutionaryAlgorithm", register=True) +except Exception as e: + print("SelfAdaptiveEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveHybridOptimizer import SelfAdaptiveHybridOptimizer + + lama_register["SelfAdaptiveHybridOptimizer"] = SelfAdaptiveHybridOptimizer + LLAMASelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer").set_name( + "LLAMASelfAdaptiveHybridOptimizer", register=True + ) +except Exception as e: + print("SelfAdaptiveHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveInterleavedOptimization import ( + SelfAdaptiveInterleavedOptimization, + ) + + lama_register["SelfAdaptiveInterleavedOptimization"] = SelfAdaptiveInterleavedOptimization + LLAMASelfAdaptiveInterleavedOptimization = NonObjectOptimizer( + method="LLAMASelfAdaptiveInterleavedOptimization" + ).set_name("LLAMASelfAdaptiveInterleavedOptimization", register=True) +except Exception as e: + print("SelfAdaptiveInterleavedOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveMemeticAlgorithmV2 import SelfAdaptiveMemeticAlgorithmV2 + + lama_register["SelfAdaptiveMemeticAlgorithmV2"] = SelfAdaptiveMemeticAlgorithmV2 + LLAMASelfAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMASelfAdaptiveMemeticAlgorithmV2" + ).set_name("LLAMASelfAdaptiveMemeticAlgorithmV2", register=True) +except Exception as e: + print("SelfAdaptiveMemeticAlgorithmV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveMemeticEvolutionaryAlgorithm import ( + SelfAdaptiveMemeticEvolutionaryAlgorithm, + ) + + lama_register["SelfAdaptiveMemeticEvolutionaryAlgorithm"] = SelfAdaptiveMemeticEvolutionaryAlgorithm + LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: + print("SelfAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveOppositionBasedHarmonySearchDE import ( + SelfAdaptiveOppositionBasedHarmonySearchDE, + ) + + lama_register["SelfAdaptiveOppositionBasedHarmonySearchDE"] = SelfAdaptiveOppositionBasedHarmonySearchDE + LLAMASelfAdaptiveOppositionBasedHarmonySearchDE = NonObjectOptimizer( + method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE" + ).set_name("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE", register=True) +except Exception as e: + print("SelfAdaptiveOppositionBasedHarmonySearchDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SelfAdaptiveQuantumMemeticAlgorithm import ( + SelfAdaptiveQuantumMemeticAlgorithm, + ) + + lama_register["SelfAdaptiveQuantumMemeticAlgorithm"] = SelfAdaptiveQuantumMemeticAlgorithm + LLAMASelfAdaptiveQuantumMemeticAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveQuantumMemeticAlgorithm" + ).set_name("LLAMASelfAdaptiveQuantumMemeticAlgorithm", register=True) +except Exception as e: + print("SelfAdaptiveQuantumMemeticAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SequentialAdaptiveDifferentialEvolution import ( + SequentialAdaptiveDifferentialEvolution, + ) + + lama_register["SequentialAdaptiveDifferentialEvolution"] = SequentialAdaptiveDifferentialEvolution + LLAMASequentialAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMASequentialAdaptiveDifferentialEvolution" + ).set_name("LLAMASequentialAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("SequentialAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SequentialQuadraticAdaptiveEvolutionStrategy import ( + SequentialQuadraticAdaptiveEvolutionStrategy, + ) + + lama_register["SequentialQuadraticAdaptiveEvolutionStrategy"] = ( + SequentialQuadraticAdaptiveEvolutionStrategy + ) + LLAMASequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMASequentialQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: + print("SequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SequentialQuadraticExploitationSearch import ( + SequentialQuadraticExploitationSearch, + ) + + lama_register["SequentialQuadraticExploitationSearch"] = SequentialQuadraticExploitationSearch + LLAMASequentialQuadraticExploitationSearch = NonObjectOptimizer( + method="LLAMASequentialQuadraticExploitationSearch" + ).set_name("LLAMASequentialQuadraticExploitationSearch", register=True) +except Exception as e: + print("SequentialQuadraticExploitationSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SimpleHybridDE import SimpleHybridDE + + lama_register["SimpleHybridDE"] = SimpleHybridDE + LLAMASimpleHybridDE = NonObjectOptimizer(method="LLAMASimpleHybridDE").set_name( + "LLAMASimpleHybridDE", register=True + ) +except Exception as e: + print("SimpleHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SimplifiedAdaptiveDynamicDualPhaseStrategyV18 import ( + SimplifiedAdaptiveDynamicDualPhaseStrategyV18, + ) + + lama_register["SimplifiedAdaptiveDynamicDualPhaseStrategyV18"] = ( + SimplifiedAdaptiveDynamicDualPhaseStrategyV18 + ) + LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18 = NonObjectOptimizer( + method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18" + ).set_name("LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18", register=True) +except Exception as e: + print("SimplifiedAdaptiveDynamicDualPhaseStrategyV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SimulatedAnnealingOptimizer import SimulatedAnnealingOptimizer + + lama_register["SimulatedAnnealingOptimizer"] = SimulatedAnnealingOptimizer + LLAMASimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer").set_name( + "LLAMASimulatedAnnealingOptimizer", register=True + ) +except Exception as e: + print("SimulatedAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SpiralSearchOptimizer import SpiralSearchOptimizer + + lama_register["SpiralSearchOptimizer"] = SpiralSearchOptimizer + LLAMASpiralSearchOptimizer = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer").set_name( + "LLAMASpiralSearchOptimizer", register=True + ) +except Exception as e: + print("SpiralSearchOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StabilizedQuantumCognitionOptimizerV11 import ( + StabilizedQuantumCognitionOptimizerV11, + ) + + lama_register["StabilizedQuantumCognitionOptimizerV11"] = StabilizedQuantumCognitionOptimizerV11 + LLAMAStabilizedQuantumCognitionOptimizerV11 = NonObjectOptimizer( + method="LLAMAStabilizedQuantumCognitionOptimizerV11" + ).set_name("LLAMAStabilizedQuantumCognitionOptimizerV11", register=True) +except Exception as e: + print("StabilizedQuantumCognitionOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StabilizedQuantumConcentricOptimizer import ( + StabilizedQuantumConcentricOptimizer, + ) + + lama_register["StabilizedQuantumConcentricOptimizer"] = StabilizedQuantumConcentricOptimizer + LLAMAStabilizedQuantumConcentricOptimizer = NonObjectOptimizer( + method="LLAMAStabilizedQuantumConcentricOptimizer" + ).set_name("LLAMAStabilizedQuantumConcentricOptimizer", register=True) +except Exception as e: + print("StabilizedQuantumConcentricOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StabilizedRefinedEnhancedDynamicBalancingPSO import ( + StabilizedRefinedEnhancedDynamicBalancingPSO, + ) + + lama_register["StabilizedRefinedEnhancedDynamicBalancingPSO"] = ( + StabilizedRefinedEnhancedDynamicBalancingPSO + ) + LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO = NonObjectOptimizer( + method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO" + ).set_name("LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO", register=True) +except Exception as e: + print("StabilizedRefinedEnhancedDynamicBalancingPSO can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticAdaptiveEvolutionaryOptimizer import ( + StochasticAdaptiveEvolutionaryOptimizer, + ) + + lama_register["StochasticAdaptiveEvolutionaryOptimizer"] = StochasticAdaptiveEvolutionaryOptimizer + LLAMAStochasticAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAStochasticAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAStochasticAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: + print("StochasticAdaptiveEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticBalancingOptimizer import StochasticBalancingOptimizer + + lama_register["StochasticBalancingOptimizer"] = StochasticBalancingOptimizer + LLAMAStochasticBalancingOptimizer = NonObjectOptimizer( + method="LLAMAStochasticBalancingOptimizer" + ).set_name("LLAMAStochasticBalancingOptimizer", register=True) +except Exception as e: + print("StochasticBalancingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticGradientEnhancedDE import StochasticGradientEnhancedDE + + lama_register["StochasticGradientEnhancedDE"] = StochasticGradientEnhancedDE + LLAMAStochasticGradientEnhancedDE = NonObjectOptimizer( + method="LLAMAStochasticGradientEnhancedDE" + ).set_name("LLAMAStochasticGradientEnhancedDE", register=True) +except Exception as e: + print("StochasticGradientEnhancedDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticGradientExploration import StochasticGradientExploration + + lama_register["StochasticGradientExploration"] = StochasticGradientExploration + LLAMAStochasticGradientExploration = NonObjectOptimizer( + method="LLAMAStochasticGradientExploration" + ).set_name("LLAMAStochasticGradientExploration", register=True) +except Exception as e: + print("StochasticGradientExploration can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticGradientHybridOptimization import ( + StochasticGradientHybridOptimization, + ) + + lama_register["StochasticGradientHybridOptimization"] = StochasticGradientHybridOptimization + LLAMAStochasticGradientHybridOptimization = NonObjectOptimizer( + method="LLAMAStochasticGradientHybridOptimization" + ).set_name("LLAMAStochasticGradientHybridOptimization", register=True) +except Exception as e: + print("StochasticGradientHybridOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StochasticGradientQuorumOptimization import ( + StochasticGradientQuorumOptimization, + ) + + lama_register["StochasticGradientQuorumOptimization"] = StochasticGradientQuorumOptimization + LLAMAStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMAStochasticGradientQuorumOptimization" + ).set_name("LLAMAStochasticGradientQuorumOptimization", register=True) +except Exception as e: + print("StochasticGradientQuorumOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicAdaptiveDifferentialEvolution import ( + StrategicAdaptiveDifferentialEvolution, + ) + + lama_register["StrategicAdaptiveDifferentialEvolution"] = StrategicAdaptiveDifferentialEvolution + LLAMAStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAStrategicAdaptiveDifferentialEvolution" + ).set_name("LLAMAStrategicAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("StrategicAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicDifferentialEvolution import StrategicDifferentialEvolution + + lama_register["StrategicDifferentialEvolution"] = StrategicDifferentialEvolution + LLAMAStrategicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAStrategicDifferentialEvolution" + ).set_name("LLAMAStrategicDifferentialEvolution", register=True) +except Exception as e: + print("StrategicDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicDiminishingAdaptiveEvolver import ( + StrategicDiminishingAdaptiveEvolver, + ) + + lama_register["StrategicDiminishingAdaptiveEvolver"] = StrategicDiminishingAdaptiveEvolver + LLAMAStrategicDiminishingAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAStrategicDiminishingAdaptiveEvolver" + ).set_name("LLAMAStrategicDiminishingAdaptiveEvolver", register=True) +except Exception as e: + print("StrategicDiminishingAdaptiveEvolver can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicHybridDE import StrategicHybridDE + + lama_register["StrategicHybridDE"] = StrategicHybridDE + LLAMAStrategicHybridDE = NonObjectOptimizer(method="LLAMAStrategicHybridDE").set_name( + "LLAMAStrategicHybridDE", register=True + ) +except Exception as e: + print("StrategicHybridDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicMultiPhaseEvolutionaryAlgorithm import ( + StrategicMultiPhaseEvolutionaryAlgorithm, + ) + + lama_register["StrategicMultiPhaseEvolutionaryAlgorithm"] = StrategicMultiPhaseEvolutionaryAlgorithm + LLAMAStrategicMultiPhaseEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm" + ).set_name("LLAMAStrategicMultiPhaseEvolutionaryAlgorithm", register=True) +except Exception as e: + print("StrategicMultiPhaseEvolutionaryAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicQuorumMutationWithAdaptiveElites import ( + StrategicQuorumMutationWithAdaptiveElites, + ) + + lama_register["StrategicQuorumMutationWithAdaptiveElites"] = StrategicQuorumMutationWithAdaptiveElites + LLAMAStrategicQuorumMutationWithAdaptiveElites = NonObjectOptimizer( + method="LLAMAStrategicQuorumMutationWithAdaptiveElites" + ).set_name("LLAMAStrategicQuorumMutationWithAdaptiveElites", register=True) +except Exception as e: + print("StrategicQuorumMutationWithAdaptiveElites can not be imported: ", e) + +try: + from nevergrad.optimization.lama.StrategicResilienceAdaptiveSearch import ( + StrategicResilienceAdaptiveSearch, + ) + + lama_register["StrategicResilienceAdaptiveSearch"] = StrategicResilienceAdaptiveSearch + LLAMAStrategicResilienceAdaptiveSearch = NonObjectOptimizer( + method="LLAMAStrategicResilienceAdaptiveSearch" + ).set_name("LLAMAStrategicResilienceAdaptiveSearch", register=True) +except Exception as e: + print("StrategicResilienceAdaptiveSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimization import ( + SuperDynamicQuantumSwarmOptimization, + ) + + lama_register["SuperDynamicQuantumSwarmOptimization"] = SuperDynamicQuantumSwarmOptimization + LLAMASuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMASuperDynamicQuantumSwarmOptimization" + ).set_name("LLAMASuperDynamicQuantumSwarmOptimization", register=True) +except Exception as e: + print("SuperDynamicQuantumSwarmOptimization can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimizationImproved import ( + SuperDynamicQuantumSwarmOptimizationImproved, + ) + + lama_register["SuperDynamicQuantumSwarmOptimizationImproved"] = ( + SuperDynamicQuantumSwarmOptimizationImproved + ) + LLAMASuperDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMASuperDynamicQuantumSwarmOptimizationImproved" + ).set_name("LLAMASuperDynamicQuantumSwarmOptimizationImproved", register=True) +except Exception as e: + print("SuperDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperOptimizedRAMEDS import SuperOptimizedRAMEDS + + lama_register["SuperOptimizedRAMEDS"] = SuperOptimizedRAMEDS + LLAMASuperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS").set_name( + "LLAMASuperOptimizedRAMEDS", register=True + ) +except Exception as e: + print("SuperOptimizedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperRefinedRAMEDSv5 import SuperRefinedRAMEDSv5 + + lama_register["SuperRefinedRAMEDSv5"] = SuperRefinedRAMEDSv5 + LLAMASuperRefinedRAMEDSv5 = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5").set_name( + "LLAMASuperRefinedRAMEDSv5", register=True + ) +except Exception as e: + print("SuperRefinedRAMEDSv5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 import ( + SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5, + ) + + lama_register["SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5"] = ( + SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 + ) + LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 = NonObjectOptimizer( + method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5" + ).set_name("LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5", register=True) +except Exception as e: + print("SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 import ( + SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16, + ) + + lama_register["SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16"] = ( + SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 + ) + LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16" + ).set_name("LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16", register=True) +except Exception as e: + print("SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperiorAdaptiveStrategyDE import SuperiorAdaptiveStrategyDE + + lama_register["SuperiorAdaptiveStrategyDE"] = SuperiorAdaptiveStrategyDE + LLAMASuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE").set_name( + "LLAMASuperiorAdaptiveStrategyDE", register=True + ) +except Exception as e: + print("SuperiorAdaptiveStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperiorEnhancedDynamicPrecisionOptimizerV1 import ( + SuperiorEnhancedDynamicPrecisionOptimizerV1, + ) + + lama_register["SuperiorEnhancedDynamicPrecisionOptimizerV1"] = SuperiorEnhancedDynamicPrecisionOptimizerV1 + LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1" + ).set_name("LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1", register=True) +except Exception as e: + print("SuperiorEnhancedDynamicPrecisionOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperiorHybridEvolutionaryAnnealingOptimizer import ( + SuperiorHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["SuperiorHybridEvolutionaryAnnealingOptimizer"] = ( + SuperiorHybridEvolutionaryAnnealingOptimizer + ) + LLAMASuperiorHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMASuperiorHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: + print("SuperiorHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperiorOptimalEnhancedStrategyDE import ( + SuperiorOptimalEnhancedStrategyDE, + ) + + lama_register["SuperiorOptimalEnhancedStrategyDE"] = SuperiorOptimalEnhancedStrategyDE + LLAMASuperiorOptimalEnhancedStrategyDE = NonObjectOptimizer( + method="LLAMASuperiorOptimalEnhancedStrategyDE" + ).set_name("LLAMASuperiorOptimalEnhancedStrategyDE", register=True) +except Exception as e: + print("SuperiorOptimalEnhancedStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SuperiorRefinedEvolutionaryGradientOptimizerV13 import ( + SuperiorRefinedEvolutionaryGradientOptimizerV13, + ) + + lama_register["SuperiorRefinedEvolutionaryGradientOptimizerV13"] = ( + SuperiorRefinedEvolutionaryGradientOptimizerV13 + ) + LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13 = NonObjectOptimizer( + method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13" + ).set_name("LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13", register=True) +except Exception as e: + print("SuperiorRefinedEvolutionaryGradientOptimizerV13 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeDynamicAdaptiveOptimizerV5 import ( + SupremeDynamicAdaptiveOptimizerV5, + ) + + lama_register["SupremeDynamicAdaptiveOptimizerV5"] = SupremeDynamicAdaptiveOptimizerV5 + LLAMASupremeDynamicAdaptiveOptimizerV5 = NonObjectOptimizer( + method="LLAMASupremeDynamicAdaptiveOptimizerV5" + ).set_name("LLAMASupremeDynamicAdaptiveOptimizerV5", register=True) +except Exception as e: + print("SupremeDynamicAdaptiveOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV1 import ( + SupremeDynamicPrecisionOptimizerV1, + ) + + lama_register["SupremeDynamicPrecisionOptimizerV1"] = SupremeDynamicPrecisionOptimizerV1 + LLAMASupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMASupremeDynamicPrecisionOptimizerV1" + ).set_name("LLAMASupremeDynamicPrecisionOptimizerV1", register=True) +except Exception as e: + print("SupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV2 import ( + SupremeDynamicPrecisionOptimizerV2, + ) + + lama_register["SupremeDynamicPrecisionOptimizerV2"] = SupremeDynamicPrecisionOptimizerV2 + LLAMASupremeDynamicPrecisionOptimizerV2 = NonObjectOptimizer( + method="LLAMASupremeDynamicPrecisionOptimizerV2" + ).set_name("LLAMASupremeDynamicPrecisionOptimizerV2", register=True) +except Exception as e: + print("SupremeDynamicPrecisionOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeEvolutionaryGradientHybridOptimizerV6 import ( + SupremeEvolutionaryGradientHybridOptimizerV6, + ) + + lama_register["SupremeEvolutionaryGradientHybridOptimizerV6"] = ( + SupremeEvolutionaryGradientHybridOptimizerV6 + ) + LLAMASupremeEvolutionaryGradientHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6" + ).set_name("LLAMASupremeEvolutionaryGradientHybridOptimizerV6", register=True) +except Exception as e: + print("SupremeEvolutionaryGradientHybridOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeOptimalPrecisionEvolutionaryThermalOptimizer import ( + SupremeOptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["SupremeOptimalPrecisionEvolutionaryThermalOptimizer"] = ( + SupremeOptimalPrecisionEvolutionaryThermalOptimizer + ) + LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: + print("SupremeOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.SupremeUltraEnhancedEvolutionaryOptimizer import ( + SupremeUltraEnhancedEvolutionaryOptimizer, + ) + + lama_register["SupremeUltraEnhancedEvolutionaryOptimizer"] = SupremeUltraEnhancedEvolutionaryOptimizer + LLAMASupremeUltraEnhancedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer" + ).set_name("LLAMASupremeUltraEnhancedEvolutionaryOptimizer", register=True) +except Exception as e: + print("SupremeUltraEnhancedEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.TemporalAdaptiveDifferentialEvolution import ( + TemporalAdaptiveDifferentialEvolution, + ) + + lama_register["TemporalAdaptiveDifferentialEvolution"] = TemporalAdaptiveDifferentialEvolution + LLAMATemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMATemporalAdaptiveDifferentialEvolution" + ).set_name("LLAMATemporalAdaptiveDifferentialEvolution", register=True) +except Exception as e: + print("TemporalAdaptiveDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.TurbochargedDifferentialEvolution import ( + TurbochargedDifferentialEvolution, + ) + + lama_register["TurbochargedDifferentialEvolution"] = TurbochargedDifferentialEvolution + LLAMATurbochargedDifferentialEvolution = NonObjectOptimizer( + method="LLAMATurbochargedDifferentialEvolution" + ).set_name("LLAMATurbochargedDifferentialEvolution", register=True) +except Exception as e: + print("TurbochargedDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithm import UltimateDynamicFireworkAlgorithm + + lama_register["UltimateDynamicFireworkAlgorithm"] = UltimateDynamicFireworkAlgorithm + LLAMAUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAUltimateDynamicFireworkAlgorithm" + ).set_name("LLAMAUltimateDynamicFireworkAlgorithm", register=True) +except Exception as e: + print("UltimateDynamicFireworkAlgorithm can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithmImproved import ( + UltimateDynamicFireworkAlgorithmImproved, + ) + + lama_register["UltimateDynamicFireworkAlgorithmImproved"] = UltimateDynamicFireworkAlgorithmImproved + LLAMAUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAUltimateDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAUltimateDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: + print("UltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 import ( + UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19, + ) + + lama_register["UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19"] = ( + UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 + ) + LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 = NonObjectOptimizer( + method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19" + ).set_name("LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19", register=True) +except Exception as e: + print("UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV15 import ( + UltimateEvolutionaryGradientOptimizerV15, + ) + + lama_register["UltimateEvolutionaryGradientOptimizerV15"] = UltimateEvolutionaryGradientOptimizerV15 + LLAMAUltimateEvolutionaryGradientOptimizerV15 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV15" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV15", register=True) +except Exception as e: + print("UltimateEvolutionaryGradientOptimizerV15 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV26 import ( + UltimateEvolutionaryGradientOptimizerV26, + ) + + lama_register["UltimateEvolutionaryGradientOptimizerV26"] = UltimateEvolutionaryGradientOptimizerV26 + LLAMAUltimateEvolutionaryGradientOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV26" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV26", register=True) +except Exception as e: + print("UltimateEvolutionaryGradientOptimizerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV33 import ( + UltimateEvolutionaryGradientOptimizerV33, + ) + + lama_register["UltimateEvolutionaryGradientOptimizerV33"] = UltimateEvolutionaryGradientOptimizerV33 + LLAMAUltimateEvolutionaryGradientOptimizerV33 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV33" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV33", register=True) +except Exception as e: + print("UltimateEvolutionaryGradientOptimizerV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateEvolutionaryOptimizer import UltimateEvolutionaryOptimizer + + lama_register["UltimateEvolutionaryOptimizer"] = UltimateEvolutionaryOptimizer + LLAMAUltimateEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryOptimizer" + ).set_name("LLAMAUltimateEvolutionaryOptimizer", register=True) +except Exception as e: + print("UltimateEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateRefinedAQAPSO_LS_DIW_AP import UltimateRefinedAQAPSO_LS_DIW_AP + + lama_register["UltimateRefinedAQAPSO_LS_DIW_AP"] = UltimateRefinedAQAPSO_LS_DIW_AP + LLAMAUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: + print("UltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateRefinedPrecisionEvolutionaryOptimizerV41 import ( + UltimateRefinedPrecisionEvolutionaryOptimizerV41, + ) + + lama_register["UltimateRefinedPrecisionEvolutionaryOptimizerV41"] = ( + UltimateRefinedPrecisionEvolutionaryOptimizerV41 + ) + LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41 = NonObjectOptimizer( + method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41" + ).set_name("LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41", register=True) +except Exception as e: + print("UltimateRefinedPrecisionEvolutionaryOptimizerV41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 import ( + UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18, + ) + + lama_register["UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18"] = ( + UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 + ) + LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 = NonObjectOptimizer( + method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18" + ).set_name("LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18", register=True) +except Exception as e: + print("UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraDynamicAdaptiveRAMEDS import UltraDynamicAdaptiveRAMEDS + + lama_register["UltraDynamicAdaptiveRAMEDS"] = UltraDynamicAdaptiveRAMEDS + LLAMAUltraDynamicAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS").set_name( + "LLAMAUltraDynamicAdaptiveRAMEDS", register=True + ) +except Exception as e: + print("UltraDynamicAdaptiveRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraDynamicDualPhaseOptimizedStrategyV16 import ( + UltraDynamicDualPhaseOptimizedStrategyV16, + ) + + lama_register["UltraDynamicDualPhaseOptimizedStrategyV16"] = UltraDynamicDualPhaseOptimizedStrategyV16 + LLAMAUltraDynamicDualPhaseOptimizedStrategyV16 = NonObjectOptimizer( + method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16" + ).set_name("LLAMAUltraDynamicDualPhaseOptimizedStrategyV16", register=True) +except Exception as e: + print("UltraDynamicDualPhaseOptimizedStrategyV16 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV10 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV10, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV10"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV10 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV11 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV11, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV11"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV11 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV12 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV12, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV12"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV12 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV12 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV2 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV2, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV2"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV2 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV3 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV3, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV3"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV3 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV4 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV4, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV4"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV4 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV7 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV7, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV7"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV7 + ) + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7", register=True) +except Exception as e: + print("UltraEnhancedAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedAdaptiveRAMEDS import UltraEnhancedAdaptiveRAMEDS + + lama_register["UltraEnhancedAdaptiveRAMEDS"] = UltraEnhancedAdaptiveRAMEDS + LLAMAUltraEnhancedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS").set_name( + "LLAMAUltraEnhancedAdaptiveRAMEDS", register=True + ) +except Exception as e: + print("UltraEnhancedAdaptiveRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedDynamicDE import UltraEnhancedDynamicDE + + lama_register["UltraEnhancedDynamicDE"] = UltraEnhancedDynamicDE + LLAMAUltraEnhancedDynamicDE = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE").set_name( + "LLAMAUltraEnhancedDynamicDE", register=True + ) +except Exception as e: + print("UltraEnhancedDynamicDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( + UltraEnhancedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["UltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( + UltraEnhancedEliteAdaptiveMemoryHybridOptimizer + ) + LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: + print("UltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedEvolutionaryGradientOptimizerV14 import ( + UltraEnhancedEvolutionaryGradientOptimizerV14, + ) + + lama_register["UltraEnhancedEvolutionaryGradientOptimizerV14"] = ( + UltraEnhancedEvolutionaryGradientOptimizerV14 + ) + LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14 = NonObjectOptimizer( + method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14" + ).set_name("LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14", register=True) +except Exception as e: + print("UltraEnhancedEvolutionaryGradientOptimizerV14 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEnhancedPrecisionEvolutionaryOptimizer import ( + UltraEnhancedPrecisionEvolutionaryOptimizer, + ) + + lama_register["UltraEnhancedPrecisionEvolutionaryOptimizer"] = UltraEnhancedPrecisionEvolutionaryOptimizer + LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer" + ).set_name("LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer", register=True) +except Exception as e: + print("UltraEnhancedPrecisionEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraEvolutionaryGradientOptimizerV27 import ( + UltraEvolutionaryGradientOptimizerV27, + ) + + lama_register["UltraEvolutionaryGradientOptimizerV27"] = UltraEvolutionaryGradientOptimizerV27 + LLAMAUltraEvolutionaryGradientOptimizerV27 = NonObjectOptimizer( + method="LLAMAUltraEvolutionaryGradientOptimizerV27" + ).set_name("LLAMAUltraEvolutionaryGradientOptimizerV27", register=True) +except Exception as e: + print("UltraEvolutionaryGradientOptimizerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraFineSpiralDifferentialOptimizerV7 import ( + UltraFineSpiralDifferentialOptimizerV7, + ) + + lama_register["UltraFineSpiralDifferentialOptimizerV7"] = UltraFineSpiralDifferentialOptimizerV7 + LLAMAUltraFineSpiralDifferentialOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraFineSpiralDifferentialOptimizerV7" + ).set_name("LLAMAUltraFineSpiralDifferentialOptimizerV7", register=True) +except Exception as e: + print("UltraFineSpiralDifferentialOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizer import ( + UltraFineTunedEvolutionaryOptimizer, + ) + + lama_register["UltraFineTunedEvolutionaryOptimizer"] = UltraFineTunedEvolutionaryOptimizer + LLAMAUltraFineTunedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraFineTunedEvolutionaryOptimizer" + ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizer", register=True) +except Exception as e: + print("UltraFineTunedEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizerV24 import ( + UltraFineTunedEvolutionaryOptimizerV24, + ) + + lama_register["UltraFineTunedEvolutionaryOptimizerV24"] = UltraFineTunedEvolutionaryOptimizerV24 + LLAMAUltraFineTunedEvolutionaryOptimizerV24 = NonObjectOptimizer( + method="LLAMAUltraFineTunedEvolutionaryOptimizerV24" + ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizerV24", register=True) +except Exception as e: + print("UltraFineTunedEvolutionaryOptimizerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV18 import ( + UltraOptimizedDynamicPrecisionOptimizerV18, + ) + + lama_register["UltraOptimizedDynamicPrecisionOptimizerV18"] = UltraOptimizedDynamicPrecisionOptimizerV18 + LLAMAUltraOptimizedDynamicPrecisionOptimizerV18 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV18", register=True) +except Exception as e: + print("UltraOptimizedDynamicPrecisionOptimizerV18 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV19 import ( + UltraOptimizedDynamicPrecisionOptimizerV19, + ) + + lama_register["UltraOptimizedDynamicPrecisionOptimizerV19"] = UltraOptimizedDynamicPrecisionOptimizerV19 + LLAMAUltraOptimizedDynamicPrecisionOptimizerV19 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV19", register=True) +except Exception as e: + print("UltraOptimizedDynamicPrecisionOptimizerV19 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV52 import ( + UltraOptimizedDynamicPrecisionOptimizerV52, + ) + + lama_register["UltraOptimizedDynamicPrecisionOptimizerV52"] = UltraOptimizedDynamicPrecisionOptimizerV52 + LLAMAUltraOptimizedDynamicPrecisionOptimizerV52 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV52", register=True) +except Exception as e: + print("UltraOptimizedDynamicPrecisionOptimizerV52 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV53 import ( + UltraOptimizedDynamicPrecisionOptimizerV53, + ) + + lama_register["UltraOptimizedDynamicPrecisionOptimizerV53"] = UltraOptimizedDynamicPrecisionOptimizerV53 + LLAMAUltraOptimizedDynamicPrecisionOptimizerV53 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV53", register=True) +except Exception as e: + print("UltraOptimizedDynamicPrecisionOptimizerV53 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedEvolutionaryGradientOptimizerV30 import ( + UltraOptimizedEvolutionaryGradientOptimizerV30, + ) + + lama_register["UltraOptimizedEvolutionaryGradientOptimizerV30"] = ( + UltraOptimizedEvolutionaryGradientOptimizerV30 + ) + LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30 = NonObjectOptimizer( + method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30" + ).set_name("LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30", register=True) +except Exception as e: + print("UltraOptimizedEvolutionaryGradientOptimizerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer import ( + UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer, + ) + + lama_register["UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer"] = ( + UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer + ) + LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: + print("UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedRAMEDS import UltraOptimizedRAMEDS + + lama_register["UltraOptimizedRAMEDS"] = UltraOptimizedRAMEDS + LLAMAUltraOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS").set_name( + "LLAMAUltraOptimizedRAMEDS", register=True + ) +except Exception as e: + print("UltraOptimizedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraOptimizedSpiralDifferentialEvolution import ( + UltraOptimizedSpiralDifferentialEvolution, + ) + + lama_register["UltraOptimizedSpiralDifferentialEvolution"] = UltraOptimizedSpiralDifferentialEvolution + LLAMAUltraOptimizedSpiralDifferentialEvolution = NonObjectOptimizer( + method="LLAMAUltraOptimizedSpiralDifferentialEvolution" + ).set_name("LLAMAUltraOptimizedSpiralDifferentialEvolution", register=True) +except Exception as e: + print("UltraOptimizedSpiralDifferentialEvolution can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraPreciseDynamicOptimizerV26 import UltraPreciseDynamicOptimizerV26 + + lama_register["UltraPreciseDynamicOptimizerV26"] = UltraPreciseDynamicOptimizerV26 + LLAMAUltraPreciseDynamicOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltraPreciseDynamicOptimizerV26" + ).set_name("LLAMAUltraPreciseDynamicOptimizerV26", register=True) +except Exception as e: + print("UltraPreciseDynamicOptimizerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraPrecisionSpiralDifferentialOptimizerV9 import ( + UltraPrecisionSpiralDifferentialOptimizerV9, + ) + + lama_register["UltraPrecisionSpiralDifferentialOptimizerV9"] = UltraPrecisionSpiralDifferentialOptimizerV9 + LLAMAUltraPrecisionSpiralDifferentialOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9" + ).set_name("LLAMAUltraPrecisionSpiralDifferentialOptimizerV9", register=True) +except Exception as e: + print("UltraPrecisionSpiralDifferentialOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraQuantumReactiveHybridStrategy import ( + UltraQuantumReactiveHybridStrategy, + ) + + lama_register["UltraQuantumReactiveHybridStrategy"] = UltraQuantumReactiveHybridStrategy + LLAMAUltraQuantumReactiveHybridStrategy = NonObjectOptimizer( + method="LLAMAUltraQuantumReactiveHybridStrategy" + ).set_name("LLAMAUltraQuantumReactiveHybridStrategy", register=True) +except Exception as e: + print("UltraQuantumReactiveHybridStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRAMEDS import UltraRAMEDS + + lama_register["UltraRAMEDS"] = UltraRAMEDS + LLAMAUltraRAMEDS = NonObjectOptimizer(method="LLAMAUltraRAMEDS").set_name( + "LLAMAUltraRAMEDS", register=True + ) +except Exception as e: + print("UltraRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveConvergenceStrategy import ( + UltraRefinedAdaptiveConvergenceStrategy, + ) + + lama_register["UltraRefinedAdaptiveConvergenceStrategy"] = UltraRefinedAdaptiveConvergenceStrategy + LLAMAUltraRefinedAdaptiveConvergenceStrategy = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveConvergenceStrategy" + ).set_name("LLAMAUltraRefinedAdaptiveConvergenceStrategy", register=True) +except Exception as e: + print("UltraRefinedAdaptiveConvergenceStrategy can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV5 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV5, + ) + + lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV5"] = UltraRefinedAdaptiveMemoryHybridOptimizerV5 + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5", register=True) +except Exception as e: + print("UltraRefinedAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV6 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV6, + ) + + lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV6"] = UltraRefinedAdaptiveMemoryHybridOptimizerV6 + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6", register=True) +except Exception as e: + print("UltraRefinedAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV8 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV8, + ) + + lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV8"] = UltraRefinedAdaptiveMemoryHybridOptimizerV8 + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8", register=True) +except Exception as e: + print("UltraRefinedAdaptiveMemoryHybridOptimizerV8 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV9 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV9, + ) + + lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV9"] = UltraRefinedAdaptiveMemoryHybridOptimizerV9 + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9", register=True) +except Exception as e: + print("UltraRefinedAdaptiveMemoryHybridOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptivePrecisionOptimizer import ( + UltraRefinedAdaptivePrecisionOptimizer, + ) + + lama_register["UltraRefinedAdaptivePrecisionOptimizer"] = UltraRefinedAdaptivePrecisionOptimizer + LLAMAUltraRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAUltraRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: + print("UltraRefinedAdaptivePrecisionOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedAdaptiveRAMEDS import UltraRefinedAdaptiveRAMEDS + + lama_register["UltraRefinedAdaptiveRAMEDS"] = UltraRefinedAdaptiveRAMEDS + LLAMAUltraRefinedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS").set_name( + "LLAMAUltraRefinedAdaptiveRAMEDS", register=True + ) +except Exception as e: + print("UltraRefinedAdaptiveRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedConvergenceSpiralSearch import ( + UltraRefinedConvergenceSpiralSearch, + ) + + lama_register["UltraRefinedConvergenceSpiralSearch"] = UltraRefinedConvergenceSpiralSearch + LLAMAUltraRefinedConvergenceSpiralSearch = NonObjectOptimizer( + method="LLAMAUltraRefinedConvergenceSpiralSearch" + ).set_name("LLAMAUltraRefinedConvergenceSpiralSearch", register=True) +except Exception as e: + print("UltraRefinedConvergenceSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV10 import ( + UltraRefinedDynamicPrecisionOptimizerV10, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV10"] = UltraRefinedDynamicPrecisionOptimizerV10 + LLAMAUltraRefinedDynamicPrecisionOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV10", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV11 import ( + UltraRefinedDynamicPrecisionOptimizerV11, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV11"] = UltraRefinedDynamicPrecisionOptimizerV11 + LLAMAUltraRefinedDynamicPrecisionOptimizerV11 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV11", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV11 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV17 import ( + UltraRefinedDynamicPrecisionOptimizerV17, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV17"] = UltraRefinedDynamicPrecisionOptimizerV17 + LLAMAUltraRefinedDynamicPrecisionOptimizerV17 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV17", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV17 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV22 import ( + UltraRefinedDynamicPrecisionOptimizerV22, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV22"] = UltraRefinedDynamicPrecisionOptimizerV22 + LLAMAUltraRefinedDynamicPrecisionOptimizerV22 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV22", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV22 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV23 import ( + UltraRefinedDynamicPrecisionOptimizerV23, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV23"] = UltraRefinedDynamicPrecisionOptimizerV23 + LLAMAUltraRefinedDynamicPrecisionOptimizerV23 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV23", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV23 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV24 import ( + UltraRefinedDynamicPrecisionOptimizerV24, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV24"] = UltraRefinedDynamicPrecisionOptimizerV24 + LLAMAUltraRefinedDynamicPrecisionOptimizerV24 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV24", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV24 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV25 import ( + UltraRefinedDynamicPrecisionOptimizerV25, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV25"] = UltraRefinedDynamicPrecisionOptimizerV25 + LLAMAUltraRefinedDynamicPrecisionOptimizerV25 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV25", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV25 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV26 import ( + UltraRefinedDynamicPrecisionOptimizerV26, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV26"] = UltraRefinedDynamicPrecisionOptimizerV26 + LLAMAUltraRefinedDynamicPrecisionOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV26", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV26 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV27 import ( + UltraRefinedDynamicPrecisionOptimizerV27, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV27"] = UltraRefinedDynamicPrecisionOptimizerV27 + LLAMAUltraRefinedDynamicPrecisionOptimizerV27 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV27", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV27 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV28 import ( + UltraRefinedDynamicPrecisionOptimizerV28, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV28"] = UltraRefinedDynamicPrecisionOptimizerV28 + LLAMAUltraRefinedDynamicPrecisionOptimizerV28 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV28", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV28 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV29 import ( + UltraRefinedDynamicPrecisionOptimizerV29, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV29"] = UltraRefinedDynamicPrecisionOptimizerV29 + LLAMAUltraRefinedDynamicPrecisionOptimizerV29 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV29", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV29 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV30 import ( + UltraRefinedDynamicPrecisionOptimizerV30, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV30"] = UltraRefinedDynamicPrecisionOptimizerV30 + LLAMAUltraRefinedDynamicPrecisionOptimizerV30 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV30", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV30 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV31 import ( + UltraRefinedDynamicPrecisionOptimizerV31, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV31"] = UltraRefinedDynamicPrecisionOptimizerV31 + LLAMAUltraRefinedDynamicPrecisionOptimizerV31 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV31", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV31 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV32 import ( + UltraRefinedDynamicPrecisionOptimizerV32, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV32"] = UltraRefinedDynamicPrecisionOptimizerV32 + LLAMAUltraRefinedDynamicPrecisionOptimizerV32 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV32", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV33 import ( + UltraRefinedDynamicPrecisionOptimizerV33, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV33"] = UltraRefinedDynamicPrecisionOptimizerV33 + LLAMAUltraRefinedDynamicPrecisionOptimizerV33 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV33", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV33 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV34 import ( + UltraRefinedDynamicPrecisionOptimizerV34, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV34"] = UltraRefinedDynamicPrecisionOptimizerV34 + LLAMAUltraRefinedDynamicPrecisionOptimizerV34 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV34", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV34 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV35 import ( + UltraRefinedDynamicPrecisionOptimizerV35, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV35"] = UltraRefinedDynamicPrecisionOptimizerV35 + LLAMAUltraRefinedDynamicPrecisionOptimizerV35 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV35", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV35 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV36 import ( + UltraRefinedDynamicPrecisionOptimizerV36, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV36"] = UltraRefinedDynamicPrecisionOptimizerV36 + LLAMAUltraRefinedDynamicPrecisionOptimizerV36 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV36", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV36 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV37 import ( + UltraRefinedDynamicPrecisionOptimizerV37, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV37"] = UltraRefinedDynamicPrecisionOptimizerV37 + LLAMAUltraRefinedDynamicPrecisionOptimizerV37 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV37", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV37 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV38 import ( + UltraRefinedDynamicPrecisionOptimizerV38, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV38"] = UltraRefinedDynamicPrecisionOptimizerV38 + LLAMAUltraRefinedDynamicPrecisionOptimizerV38 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV38", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV38 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV39 import ( + UltraRefinedDynamicPrecisionOptimizerV39, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV39"] = UltraRefinedDynamicPrecisionOptimizerV39 + LLAMAUltraRefinedDynamicPrecisionOptimizerV39 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV39", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV39 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV4 import ( + UltraRefinedDynamicPrecisionOptimizerV4, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV4"] = UltraRefinedDynamicPrecisionOptimizerV4 + LLAMAUltraRefinedDynamicPrecisionOptimizerV4 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV4", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV4 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV40 import ( + UltraRefinedDynamicPrecisionOptimizerV40, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV40"] = UltraRefinedDynamicPrecisionOptimizerV40 + LLAMAUltraRefinedDynamicPrecisionOptimizerV40 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV40", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV40 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV41 import ( + UltraRefinedDynamicPrecisionOptimizerV41, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV41"] = UltraRefinedDynamicPrecisionOptimizerV41 + LLAMAUltraRefinedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV41", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV41 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV44 import ( + UltraRefinedDynamicPrecisionOptimizerV44, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV44"] = UltraRefinedDynamicPrecisionOptimizerV44 + LLAMAUltraRefinedDynamicPrecisionOptimizerV44 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV44", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV44 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV45 import ( + UltraRefinedDynamicPrecisionOptimizerV45, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV45"] = UltraRefinedDynamicPrecisionOptimizerV45 + LLAMAUltraRefinedDynamicPrecisionOptimizerV45 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV45", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV45 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV46 import ( + UltraRefinedDynamicPrecisionOptimizerV46, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV46"] = UltraRefinedDynamicPrecisionOptimizerV46 + LLAMAUltraRefinedDynamicPrecisionOptimizerV46 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV46", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV46 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV47 import ( + UltraRefinedDynamicPrecisionOptimizerV47, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV47"] = UltraRefinedDynamicPrecisionOptimizerV47 + LLAMAUltraRefinedDynamicPrecisionOptimizerV47 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV47", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV47 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV5 import ( + UltraRefinedDynamicPrecisionOptimizerV5, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV5"] = UltraRefinedDynamicPrecisionOptimizerV5 + LLAMAUltraRefinedDynamicPrecisionOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV5", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV54 import ( + UltraRefinedDynamicPrecisionOptimizerV54, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV54"] = UltraRefinedDynamicPrecisionOptimizerV54 + LLAMAUltraRefinedDynamicPrecisionOptimizerV54 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV54", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV54 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV55 import ( + UltraRefinedDynamicPrecisionOptimizerV55, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV55"] = UltraRefinedDynamicPrecisionOptimizerV55 + LLAMAUltraRefinedDynamicPrecisionOptimizerV55 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV55", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV55 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV56 import ( + UltraRefinedDynamicPrecisionOptimizerV56, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV56"] = UltraRefinedDynamicPrecisionOptimizerV56 + LLAMAUltraRefinedDynamicPrecisionOptimizerV56 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV56", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV56 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV9 import ( + UltraRefinedDynamicPrecisionOptimizerV9, + ) + + lama_register["UltraRefinedDynamicPrecisionOptimizerV9"] = UltraRefinedDynamicPrecisionOptimizerV9 + LLAMAUltraRefinedDynamicPrecisionOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV9", register=True) +except Exception as e: + print("UltraRefinedDynamicPrecisionOptimizerV9 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: + print("UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientHybridOptimizerV5 import ( + UltraRefinedEvolutionaryGradientHybridOptimizerV5, + ) + + lama_register["UltraRefinedEvolutionaryGradientHybridOptimizerV5"] = ( + UltraRefinedEvolutionaryGradientHybridOptimizerV5 + ) + LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5", register=True) +except Exception as e: + print("UltraRefinedEvolutionaryGradientHybridOptimizerV5 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV10 import ( + UltraRefinedEvolutionaryGradientOptimizerV10, + ) + + lama_register["UltraRefinedEvolutionaryGradientOptimizerV10"] = ( + UltraRefinedEvolutionaryGradientOptimizerV10 + ) + LLAMAUltraRefinedEvolutionaryGradientOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV10", register=True) +except Exception as e: + print("UltraRefinedEvolutionaryGradientOptimizerV10 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV32 import ( + UltraRefinedEvolutionaryGradientOptimizerV32, + ) + + lama_register["UltraRefinedEvolutionaryGradientOptimizerV32"] = ( + UltraRefinedEvolutionaryGradientOptimizerV32 + ) + LLAMAUltraRefinedEvolutionaryGradientOptimizerV32 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV32", register=True) +except Exception as e: + print("UltraRefinedEvolutionaryGradientOptimizerV32 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedHybridEvolutionaryAnnealingOptimizer import ( + UltraRefinedHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["UltraRefinedHybridEvolutionaryAnnealingOptimizer"] = ( + UltraRefinedHybridEvolutionaryAnnealingOptimizer + ) + LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: + print("UltraRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV50 import ( + UltraRefinedHyperStrategicOptimizerV50, + ) + + lama_register["UltraRefinedHyperStrategicOptimizerV50"] = UltraRefinedHyperStrategicOptimizerV50 + LLAMAUltraRefinedHyperStrategicOptimizerV50 = NonObjectOptimizer( + method="LLAMAUltraRefinedHyperStrategicOptimizerV50" + ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV50", register=True) +except Exception as e: + print("UltraRefinedHyperStrategicOptimizerV50 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV54 import ( + UltraRefinedHyperStrategicOptimizerV54, + ) + + lama_register["UltraRefinedHyperStrategicOptimizerV54"] = UltraRefinedHyperStrategicOptimizerV54 + LLAMAUltraRefinedHyperStrategicOptimizerV54 = NonObjectOptimizer( + method="LLAMAUltraRefinedHyperStrategicOptimizerV54" + ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV54", register=True) +except Exception as e: + print("UltraRefinedHyperStrategicOptimizerV54 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedPrecisionEvolutionaryOptimizerV43 import ( + UltraRefinedPrecisionEvolutionaryOptimizerV43, + ) + + lama_register["UltraRefinedPrecisionEvolutionaryOptimizerV43"] = ( + UltraRefinedPrecisionEvolutionaryOptimizerV43 + ) + LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( + method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43" + ).set_name("LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43", register=True) +except Exception as e: + print("UltraRefinedPrecisionEvolutionaryOptimizerV43 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedRAMEDS import UltraRefinedRAMEDS + + lama_register["UltraRefinedRAMEDS"] = UltraRefinedRAMEDS + LLAMAUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS").set_name( + "LLAMAUltraRefinedRAMEDS", register=True + ) +except Exception as e: + print("UltraRefinedRAMEDS can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedSpiralDifferentialClimberV3 import ( + UltraRefinedSpiralDifferentialClimberV3, + ) + + lama_register["UltraRefinedSpiralDifferentialClimberV3"] = UltraRefinedSpiralDifferentialClimberV3 + LLAMAUltraRefinedSpiralDifferentialClimberV3 = NonObjectOptimizer( + method="LLAMAUltraRefinedSpiralDifferentialClimberV3" + ).set_name("LLAMAUltraRefinedSpiralDifferentialClimberV3", register=True) +except Exception as e: + print("UltraRefinedSpiralDifferentialClimberV3 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedStrategicEvolutionaryOptimizerV60 import ( + UltraRefinedStrategicEvolutionaryOptimizerV60, + ) + + lama_register["UltraRefinedStrategicEvolutionaryOptimizerV60"] = ( + UltraRefinedStrategicEvolutionaryOptimizerV60 + ) + LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60 = NonObjectOptimizer( + method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60" + ).set_name("LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60", register=True) +except Exception as e: + print("UltraRefinedStrategicEvolutionaryOptimizerV60 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraRefinedStrategyDE import UltraRefinedStrategyDE + + lama_register["UltraRefinedStrategyDE"] = UltraRefinedStrategyDE + LLAMAUltraRefinedStrategyDE = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE").set_name( + "LLAMAUltraRefinedStrategyDE", register=True + ) +except Exception as e: + print("UltraRefinedStrategyDE can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UltraSupremeEvolutionaryGradientHybridOptimizerV7 import ( + UltraSupremeEvolutionaryGradientHybridOptimizerV7, + ) + + lama_register["UltraSupremeEvolutionaryGradientHybridOptimizerV7"] = ( + UltraSupremeEvolutionaryGradientHybridOptimizerV7 + ) + LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7" + ).set_name("LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7", register=True) +except Exception as e: + print("UltraSupremeEvolutionaryGradientHybridOptimizerV7 can not be imported: ", e) + +try: + from nevergrad.optimization.lama.UnifiedAdaptiveMemeticOptimizer import UnifiedAdaptiveMemeticOptimizer + + lama_register["UnifiedAdaptiveMemeticOptimizer"] = UnifiedAdaptiveMemeticOptimizer + LLAMAUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAUnifiedAdaptiveMemeticOptimizer" + ).set_name("LLAMAUnifiedAdaptiveMemeticOptimizer", register=True) +except Exception as e: + print("UnifiedAdaptiveMemeticOptimizer can not be imported: ", e) + +try: + from nevergrad.optimization.lama.VectorizedRefinedSpiralSearch import VectorizedRefinedSpiralSearch + + lama_register["VectorizedRefinedSpiralSearch"] = VectorizedRefinedSpiralSearch + LLAMAVectorizedRefinedSpiralSearch = NonObjectOptimizer( + method="LLAMAVectorizedRefinedSpiralSearch" + ).set_name("LLAMAVectorizedRefinedSpiralSearch", register=True) +except Exception as e: + print("VectorizedRefinedSpiralSearch can not be imported: ", e) + +try: + from nevergrad.optimization.lama.eQGSA_v2 import eQGSA_v2 + + lama_register["eQGSA_v2"] = eQGSA_v2 + LLAMAeQGSA_v2 = NonObjectOptimizer(method="LLAMAeQGSA_v2").set_name("LLAMAeQGSA_v2", register=True) +except Exception as e: + print("eQGSA_v2 can not be imported: ", e) From f281c627dad732b7144149d9e3da8bb3289fec4f Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Mon, 24 Jun 2024 10:51:57 +0200 Subject: [PATCH 2/6] lama_optimized_files --- nevergrad/optimization/lama/AADCCS.py | 75 ++++++ nevergrad/optimization/lama/AADEHLS.py | 83 +++++++ nevergrad/optimization/lama/AADMEM.py | 77 ++++++ nevergrad/optimization/lama/AAES.py | 93 +++++++ nevergrad/optimization/lama/ACDE.py | 69 +++++ nevergrad/optimization/lama/ACMDEOBD.py | 77 ++++++ nevergrad/optimization/lama/ADAEDA.py | 67 +++++ nevergrad/optimization/lama/ADCE.py | 60 +++++ nevergrad/optimization/lama/ADEA.py | 68 +++++ nevergrad/optimization/lama/ADEAS.py | 97 ++++++++ nevergrad/optimization/lama/ADECMS.py | 60 +++++ nevergrad/optimization/lama/ADEDCA.py | 69 +++++ nevergrad/optimization/lama/ADEDE.py | 68 +++++ nevergrad/optimization/lama/ADEDLR.py | 68 +++++ nevergrad/optimization/lama/ADEDM.py | 85 +++++++ nevergrad/optimization/lama/ADEEM.py | 68 +++++ nevergrad/optimization/lama/ADEGE.py | 78 ++++++ nevergrad/optimization/lama/ADEGM.py | 56 +++++ nevergrad/optimization/lama/ADEGS.py | 61 +++++ nevergrad/optimization/lama/ADEM.py | 75 ++++++ nevergrad/optimization/lama/ADEMSC.py | 88 +++++++ nevergrad/optimization/lama/ADEPF.py | 66 +++++ nevergrad/optimization/lama/ADEPM.py | 60 +++++ nevergrad/optimization/lama/ADEPMC.py | 69 +++++ nevergrad/optimization/lama/ADEPMI.py | 81 ++++++ nevergrad/optimization/lama/ADEPR.py | 50 ++++ nevergrad/optimization/lama/ADES.py | 79 ++++++ nevergrad/optimization/lama/ADESA.py | 84 +++++++ nevergrad/optimization/lama/ADE_FPC.py | 53 ++++ nevergrad/optimization/lama/ADGD.py | 62 +++++ nevergrad/optimization/lama/ADGE.py | 51 ++++ nevergrad/optimization/lama/ADMDE.py | 89 +++++++ nevergrad/optimization/lama/ADMEMS.py | 64 +++++ nevergrad/optimization/lama/ADSDiffEvo.py | 74 ++++++ nevergrad/optimization/lama/ADSEA.py | 74 ++++++ nevergrad/optimization/lama/ADSEAPlus.py | 78 ++++++ nevergrad/optimization/lama/AGBES.py | 63 +++++ nevergrad/optimization/lama/AGCES.py | 86 +++++++ nevergrad/optimization/lama/AGDE.py | 75 ++++++ nevergrad/optimization/lama/AGDELS.py | 73 ++++++ nevergrad/optimization/lama/AGDiffEvo.py | 67 +++++ nevergrad/optimization/lama/AGEA.py | 57 +++++ nevergrad/optimization/lama/AGESA.py | 69 +++++ nevergrad/optimization/lama/AGGE.py | 76 ++++++ nevergrad/optimization/lama/AGGES.py | 61 +++++ nevergrad/optimization/lama/AGIDE.py | 69 +++++ nevergrad/optimization/lama/AHDEMI.py | 87 +++++++ nevergrad/optimization/lama/ALDEEM.py | 73 ++++++ nevergrad/optimization/lama/ALES.py | 62 +++++ nevergrad/optimization/lama/ALSS.py | 56 +++++ nevergrad/optimization/lama/AMDE.py | 86 +++++++ nevergrad/optimization/lama/AMES.py | 68 +++++ nevergrad/optimization/lama/AMSDiffEvo.py | 75 ++++++ nevergrad/optimization/lama/AMSEA.py | 77 ++++++ nevergrad/optimization/lama/AN_MDEPSO.py | 125 ++++++++++ nevergrad/optimization/lama/APBES.py | 63 +++++ nevergrad/optimization/lama/APDE.py | 66 +++++ nevergrad/optimization/lama/APDETL.py | 86 +++++++ nevergrad/optimization/lama/APES.py | 60 +++++ nevergrad/optimization/lama/AQAPSO_LS_DIW.py | 94 +++++++ .../optimization/lama/AQAPSO_LS_DIW_AP.py | 85 +++++++ nevergrad/optimization/lama/ARDLS.py | 76 ++++++ nevergrad/optimization/lama/ARESM.py | 94 +++++++ nevergrad/optimization/lama/ARISA.py | 58 +++++ nevergrad/optimization/lama/ASADEA.py | 81 ++++++ nevergrad/optimization/lama/ASO.py | 53 ++++ nevergrad/optimization/lama/AVDE.py | 67 +++++ ...atedAdaptivePrecisionCrossoverEvolution.py | 77 ++++++ .../AdaptiveAnnealingDifferentialEvolution.py | 125 ++++++++++ .../optimization/lama/AdaptiveArchiveDE.py | 93 +++++++ .../lama/AdaptiveCMADiffEvoPSO.py | 127 ++++++++++ .../AdaptiveChaoticFireworksOptimization.py | 77 ++++++ .../AdaptiveClusterBasedHybridOptimization.py | 196 +++++++++++++++ .../AdaptiveClusterHybridOptimizationV5.py | 115 +++++++++ ...daptiveClusteredDifferentialEvolutionV2.py | 144 +++++++++++ ...AdaptiveCohortHarmonizationOptimization.py | 72 ++++++ .../lama/AdaptiveCohortMemeticAlgorithm.py | 127 ++++++++++ .../lama/AdaptiveControlledMemoryAnnealing.py | 66 +++++ ...daptiveCooperativeDifferentialEvolution.py | 121 +++++++++ ...CooperativeDifferentialMemeticAlgorithm.py | 96 +++++++ .../lama/AdaptiveCovarianceGradientSearch.py | 150 +++++++++++ ...veCovarianceMatrixDifferentialEvolution.py | 91 +++++++ ...alEvolutionWithDynamicStrategySwitching.py | 140 +++++++++++ .../lama/AdaptiveCovarianceMatrixEvolution.py | 104 ++++++++ ...aptiveCovarianceMatrixEvolutionStrategy.py | 79 ++++++ ...MatrixEvolutionWithSelfAdaptiveMutation.py | 88 +++++++ .../AdaptiveCovarianceMatrixSelfAdaptation.py | 114 +++++++++ ...daptiveCovarianceMatrixSelfAdaptationV2.py | 112 +++++++++ .../lama/AdaptiveCrossoverDEPSO.py | 155 ++++++++++++ .../AdaptiveCrossoverElitistStrategyV6.py | 87 +++++++ .../lama/AdaptiveCrossoverSearch.py | 73 ++++++ .../lama/AdaptiveCulturalCooperativeSearch.py | 108 ++++++++ .../AdaptiveCulturalDifferentialEvolution.py | 121 +++++++++ ...iveCulturalDifferentialMemeticEvolution.py | 130 ++++++++++ .../lama/AdaptiveCulturalEvolutionStrategy.py | 109 ++++++++ .../AdaptiveCulturalEvolutionaryAlgorithm.py | 117 +++++++++ .../lama/AdaptiveCulturalMemeticAlgorithm.py | 115 +++++++++ ...iveCulturalMemeticDifferentialEvolution.py | 125 ++++++++++ .../lama/AdaptiveDEPSOOptimizer.py | 106 ++++++++ .../AdaptiveDEWithElitismAndLocalSearch.py | 86 +++++++ .../lama/AdaptiveDEWithOrthogonalCrossover.py | 45 ++++ .../lama/AdaptiveDecayOptimizer.py | 78 ++++++ .../lama/AdaptiveDifferentialCrossover.py | 55 ++++ .../lama/AdaptiveDifferentialEvolution.py | 54 ++++ ...ptiveDifferentialEvolutionHarmonySearch.py | 87 +++++++ .../AdaptiveDifferentialEvolutionOptimizer.py | 52 ++++ .../lama/AdaptiveDifferentialEvolutionPSO.py | 91 +++++++ .../lama/AdaptiveDifferentialEvolutionPlus.py | 59 +++++ ...entialEvolutionWithAdaptivePerturbation.py | 112 +++++++++ ...rentialEvolutionWithBayesianLocalSearch.py | 146 +++++++++++ ...EvolutionWithCovarianceMatrixAdaptation.py | 107 ++++++++ ...rentialEvolutionWithDynamicPopulationV2.py | 103 ++++++++ ...eDifferentialEvolutionWithGradientBoost.py | 112 +++++++++ ...veDifferentialEvolutionWithGuidedSearch.py | 148 +++++++++++ ...iveDifferentialEvolutionWithLocalSearch.py | 94 +++++++ ...eDifferentialEvolutionWithMemeticSearch.py | 126 ++++++++++ ...rentialEvolutionWithSurrogateAssistance.py | 125 ++++++++++ .../lama/AdaptiveDifferentialHarmonySearch.py | 52 ++++ .../AdaptiveDifferentialMemeticAlgorithm.py | 122 +++++++++ .../AdaptiveDifferentialQuantumEvolution.py | 82 ++++++ ...daptiveDifferentialQuantumMetaheuristic.py | 72 ++++++ .../lama/AdaptiveDifferentialSpiralSearch.py | 59 +++++ ...iveDimensionalClimbingEvolutionStrategy.py | 78 ++++++ .../AdaptiveDimensionalCrossoverEvolver.py | 73 ++++++ ...aptiveDirectionalBiasQuorumOptimization.py | 77 ++++++ .../lama/AdaptiveDirectionalSearch.py | 62 +++++ .../AdaptiveDivergenceClusteringSearch.py | 64 +++++ .../lama/AdaptiveDiverseHybridOptimizer.py | 186 ++++++++++++++ .../AdaptiveDiversifiedEvolutionStrategy.py | 61 +++++ .../lama/AdaptiveDiversifiedHarmonySearch.py | 93 +++++++ ...aptiveDiversifiedHarmonySearchOptimizer.py | 106 ++++++++ .../lama/AdaptiveDiversifiedSearch.py | 55 ++++ .../AdaptiveDiversityDifferentialHybrid.py | 88 +++++++ ...ptiveDiversityDifferentialMemeticHybrid.py | 89 +++++++ ...iversityMaintainedDifferentialEvolution.py | 97 ++++++++ ...veDiversityMaintainingGradientEvolution.py | 112 +++++++++ .../optimization/lama/AdaptiveDiversityPSO.py | 77 ++++++ .../lama/AdaptiveDolphinPodOptimization.py | 64 +++++ .../AdaptiveDualPhaseDifferentialEvolution.py | 74 ++++++ ...eDualPhaseEvolutionarySwarmOptimization.py | 146 +++++++++++ ...OptimizationWithDynamicParameterControl.py | 146 +++++++++++ .../lama/AdaptiveDualPhaseStrategy.py | 81 ++++++ .../lama/AdaptiveDualPopulationDE_LS.py | 95 +++++++ .../lama/AdaptiveDualStrategyOptimizer.py | 66 +++++ .../optimization/lama/AdaptiveDynamicDE.py | 95 +++++++ .../AdaptiveDynamicDifferentialEvolution.py | 136 ++++++++++ ...tiveDynamicDualPhaseEnhancedStrategyV20.py | 80 ++++++ .../AdaptiveDynamicDualPhaseStrategyV11.py | 83 +++++++ .../lama/AdaptiveDynamicEvolutionStrategy.py | 76 ++++++ ...DynamicExplorationExploitationAlgorithm.py | 111 +++++++++ ...namicExplorationExploitationAlgorithmV2.py | 150 +++++++++++ ...namicExplorationExploitationAlgorithmV3.py | 161 ++++++++++++ .../AdaptiveDynamicExplorationOptimization.py | 166 +++++++++++++ .../lama/AdaptiveDynamicFireworkAlgorithm.py | 93 +++++++ ...ptiveDynamicFireworkAlgorithmRedesigned.py | 108 ++++++++ ...eDynamicFireworkDifferentialEvolutionV4.py | 71 ++++++ .../lama/AdaptiveDynamicHarmonySearch.py | 67 +++++ .../AdaptiveDynamicHybridOptimizationV2.py | 163 ++++++++++++ .../lama/AdaptiveDynamicHybridOptimizer.py | 126 ++++++++++ ...tiveDynamicMemeticEvolutionaryAlgorithm.py | 98 ++++++++ ...namicMultiStrategyDifferentialEvolution.py | 155 ++++++++++++ ...AdaptiveDynamicQuantumSwarmOptimization.py | 73 ++++++ ...ptiveEliteCovarianceMatrixMemeticSearch.py | 114 +++++++++ .../AdaptiveEliteDifferentialEvolution.py | 122 +++++++++ .../AdaptiveEliteDiverseHybridOptimizer.py | 136 ++++++++++ .../lama/AdaptiveEliteGuidedDE_LS_v2.py | 109 ++++++++ .../lama/AdaptiveEliteGuidedDE_v2.py | 113 +++++++++ .../lama/AdaptiveEliteGuidedMutationDE.py | 109 ++++++++ .../lama/AdaptiveEliteGuidedMutationDE_v3.py | 125 ++++++++++ .../lama/AdaptiveEliteGuidedMutationDE_v4.py | 98 ++++++++ .../lama/AdaptiveEliteGuidedRestartDE.py | 121 +++++++++ .../lama/AdaptiveEliteHybridOptimizer.py | 126 ++++++++++ ...aptiveEliteMemeticDifferentialEvolution.py | 109 ++++++++ .../lama/AdaptiveEliteMemeticOptimizer.py | 107 ++++++++ .../lama/AdaptiveEliteMemeticOptimizerV5.py | 139 +++++++++++ .../lama/AdaptiveEliteMemeticOptimizerV6.py | 144 +++++++++++ ...EliteMultiStrategyDifferentialEvolution.py | 151 +++++++++++ .../optimization/lama/AdaptiveElitistDE.py | 80 ++++++ .../optimization/lama/AdaptiveElitistDE_v3.py | 128 ++++++++++ .../lama/AdaptiveElitistMutationDE.py | 89 +++++++ .../lama/AdaptiveElitistPopulationStrategy.py | 58 +++++ ...veElitistQuasiRandomDEGradientAnnealing.py | 145 +++++++++++ ...dDifferentialEvolutionFireworkAlgorithm.py | 72 ++++++ ...ynamicFireworkAlgorithmWithHybridSearch.py | 118 +++++++++ ...tiveEnhancedEvolutionaryFireworksSearch.py | 78 ++++++ ...eEnhancedEvolutionaryFireworksSearch_v2.py | 76 ++++++ ...plorationGravitationalSwarmOptimization.py | 77 ++++++ .../lama/AdaptiveEnhancedFireworkAlgorithm.py | 91 +++++++ ...nhancedFireworkAlgorithmWithLocalSearch.py | 111 +++++++++ ...AdaptiveEnhancedGradientGuidedHybridPSO.py | 70 ++++++ ...eEnhancedGravitationalSwarmIntelligence.py | 94 +++++++ ...hancedGravitationalSwarmIntelligenceV18.py | 115 +++++++++ ...nhancedGravitationalSwarmIntelligenceV2.py | 94 +++++++ ...hancedGravitationalSwarmIntelligenceV22.py | 96 +++++++ ...hancedGravitationalSwarmIntelligenceV29.py | 96 +++++++ ...hancedGravitationalSwarmIntelligenceV33.py | 96 +++++++ ...aptiveEnhancedHarmonicFireworkAlgorithm.py | 74 ++++++ .../AdaptiveEnhancedHarmonyFireworksSearch.py | 83 +++++++ ...aptiveEnhancedHarmonyFireworksSearch_v2.py | 102 ++++++++ ...dHarmonySearchWithLevyFlightInspiration.py | 75 ++++++ ...iveEnhancedMemeticDifferentialEvolution.py | 94 +++++++ ...eEnhancedMemeticEvolutionaryAlgorithmV3.py | 98 ++++++++ .../lama/AdaptiveEnhancedMetaNetAQAPSOv10.py | 124 +++++++++ .../lama/AdaptiveEnhancedMetaNetAQAPSOv11.py | 130 ++++++++++ ...EnhancedMultiPhaseDifferentialEvolution.py | 153 ++++++++++++ ...EnhancedMultiPhaseOptimizationAlgorithm.py | 116 +++++++++ .../lama/AdaptiveEnhancedQGSA_v7.py | 74 ++++++ .../AdaptiveEnhancedQuantumHarmonySearch.py | 58 +++++ ...aptiveEnhancedQuantumSimulatedAnnealing.py | 71 ++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V11.py | 97 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V14.py | 98 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V28.py | 108 ++++++++ .../lama/AdaptiveEnsembleMemeticAlgorithm.py | 110 ++++++++ ...iveEvolutionaryDifferentialOptimization.py | 42 ++++ ...lutionaryDifferentialPopulationStrategy.py | 81 ++++++ .../AdaptiveEvolutionaryFireworksSearch_v1.py | 78 ++++++ .../AdaptiveEvolutionaryGradientSearch.py | 94 +++++++ .../AdaptiveExplorationEvolutionStrategy.py | 71 ++++++ ...rationExploitationDifferentialEvolution.py | 130 ++++++++++ ...eExplorationExploitationHybridAlgorithm.py | 107 ++++++++ .../lama/AdaptiveExploratoryOptimizer.py | 80 ++++++ .../AdaptiveFeedbackControlStrategyV61.py | 81 ++++++ ...aptiveFeedbackEnhancedMemoryStrategyV71.py | 94 +++++++ .../lama/AdaptiveFireworkAlgorithmEnhanced.py | 96 +++++++ .../AdaptiveFireworkAlgorithmOptimization.py | 59 +++++ .../AdaptiveFireworksEnhancedHarmonySearch.py | 85 +++++++ .../lama/AdaptiveFocusedEvolutionStrategy.py | 64 +++++ .../lama/AdaptiveFuzzyDynamicDE.py | 96 +++++++ .../lama/AdaptiveGaussianSearch.py | 47 ++++ .../AdaptiveGlobalLocalSearchStrategyV62.py | 71 ++++++ .../lama/AdaptiveGradientAssistedEvolution.py | 83 +++++++ .../AdaptiveGradientBalancedCrossoverPSO.py | 70 ++++++ ...aptiveGradientBalancedEvolutionStrategy.py | 101 ++++++++ ...ptiveGradientBoostedMemoryAnnealingPlus.py | 174 +++++++++++++ ...edMemoryAnnealingWithExplorationControl.py | 174 +++++++++++++ ...daptiveGradientBoostedMemoryExploration.py | 158 ++++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 141 +++++++++++ .../AdaptiveGradientClusteringEvolution.py | 87 +++++++ .../AdaptiveGradientCrossoverOptimizer.py | 61 +++++ .../AdaptiveGradientDifferentialEvolution.py | 104 ++++++++ ...veGradientDifferentialEvolutionEnhanced.py | 105 ++++++++ ...aptiveGradientDifferentialEvolutionPlus.py | 132 ++++++++++ .../AdaptiveGradientDifferentialHybrid.py | 65 +++++ .../AdaptiveGradientEnhancedExplorationPSO.py | 72 ++++++ ...tiveGradientEnhancedMultiPhaseAnnealing.py | 125 ++++++++++ .../lama/AdaptiveGradientEnhancedRAMEDS.py | 86 +++++++ .../lama/AdaptiveGradientEvolution.py | 89 +++++++ .../lama/AdaptiveGradientExploration.py | 56 +++++ .../lama/AdaptiveGradientExplorationV2.py | 66 +++++ .../lama/AdaptiveGradientGuidedEvolution.py | 75 ++++++ .../lama/AdaptiveGradientInformedPSO.py | 64 +++++ .../lama/AdaptiveGradientSampling.py | 64 +++++ .../lama/AdaptiveGradientSearch.py | 56 +++++ .../AdaptiveGravitationalSwarmIntelligence.py | 67 +++++ ...aptiveGravitationalSwarmIntelligenceV15.py | 92 +++++++ ...daptiveGravitationalSwarmIntelligenceV2.py | 67 +++++ ...aptiveGravitationalSwarmIntelligenceV26.py | 92 +++++++ ...daptiveGravitationalSwarmIntelligenceV3.py | 78 ++++++ ...daptiveGravitationalSwarmIntelligenceV4.py | 78 ++++++ ...izationWithDynamicDiversityPreservation.py | 101 ++++++++ .../lama/AdaptiveGuidedCulturalSearch.py | 113 +++++++++ .../AdaptiveGuidedDifferentialEvolution.py | 125 ++++++++++ .../lama/AdaptiveGuidedEvolutionStrategy.py | 60 +++++ .../lama/AdaptiveGuidedHybridOptimizer.py | 123 +++++++++ .../lama/AdaptiveGuidedMutationOptimizer.py | 71 ++++++ .../lama/AdaptiveHarmonicFireworkAlgorithm.py | 74 ++++++ .../lama/AdaptiveHarmonicSearchOptimizer.py | 76 ++++++ .../lama/AdaptiveHarmonicSwarmOptimization.py | 68 +++++ .../AdaptiveHarmonicSwarmOptimizationV2.py | 70 ++++++ .../AdaptiveHarmonicSwarmOptimizationV3.py | 70 ++++++ .../lama/AdaptiveHarmonicTabuSearchV12.py | 98 ++++++++ .../lama/AdaptiveHarmonicTabuSearchV17.py | 109 ++++++++ .../lama/AdaptiveHarmonicTabuSearchV20.py | 100 ++++++++ .../lama/AdaptiveHarmonicTabuSearchV8.py | 95 +++++++ .../lama/AdaptiveHarmonyFireworksAlgorithm.py | 66 +++++ .../lama/AdaptiveHarmonyMemeticAlgorithm.py | 90 +++++++ .../AdaptiveHarmonyMemeticAlgorithmV15.py | 86 +++++++ .../AdaptiveHarmonyMemeticOptimizationV2.py | 86 +++++++ .../AdaptiveHarmonyMemeticOptimizationV27.py | 107 ++++++++ .../lama/AdaptiveHarmonyMemeticSearchV2.py | 101 ++++++++ .../lama/AdaptiveHarmonySearchOptimizerV2.py | 93 +++++++ ...ptiveHarmonySearchWithCuckooInspiration.py | 63 +++++ ...thDiversificationAndLocalOptimizationV2.py | 110 ++++++++ ...tiveHarmonySearchWithImprovedLevyFlight.py | 68 +++++ ...SearchWithImprovedLevyFlightInspiration.py | 100 ++++++++ ...eHarmonySearchWithLevyFlightImprovement.py | 76 ++++++ ...ptiveHarmonySearchWithLocalOptimization.py | 112 +++++++++ ...monySearchWithLocalOptimizationImproved.py | 112 +++++++++ ...iveHarmonySearchWithLocalOptimizationV2.py | 106 ++++++++ ...tiveHarmonySearchWithSimulatedAnnealing.py | 98 ++++++++ .../lama/AdaptiveHarmonyTabuOptimization.py | 74 ++++++ .../lama/AdaptiveHybridAlgorithm.py | 122 +++++++++ ...daptiveHybridAnnealingWithGradientBoost.py | 104 ++++++++ ...tiveHybridAnnealingWithMemoryRefinement.py | 125 ++++++++++ .../lama/AdaptiveHybridCMAESDE.py | 183 ++++++++++++++ ...CovarianceMatrixDifferentialEvolutionV3.py | 122 +++++++++ .../lama/AdaptiveHybridCulturalOptimizer.py | 123 +++++++++ .../AdaptiveHybridDEPSOWithDynamicRestart.py | 149 +++++++++++ ...ptiveHybridDEWithIntensifiedLocalSearch.py | 110 ++++++++ .../AdaptiveHybridDifferentialEvolution.py | 124 +++++++++ .../lama/AdaptiveHybridEvolutionStrategyV5.py | 72 ++++++ .../lama/AdaptiveHybridFireworkAlgorithm.py | 82 ++++++ ...bridGradientAnnealingWithVariableMemory.py | 135 ++++++++++ .../lama/AdaptiveHybridHarmonySearch.py | 57 +++++ .../lama/AdaptiveHybridMetaOptimizer.py | 122 +++++++++ .../lama/AdaptiveHybridOptimization.py | 161 ++++++++++++ .../lama/AdaptiveHybridOptimizationV2.py | 161 ++++++++++++ .../lama/AdaptiveHybridOptimizationV3.py | 102 ++++++++ .../lama/AdaptiveHybridOptimizer.py | 148 +++++++++++ ...ybridParticleSwarmDifferentialEvolution.py | 134 ++++++++++ ...dParticleSwarmDifferentialEvolutionPlus.py | 134 ++++++++++ .../AdaptiveHybridQuasiRandomGradientDE.py | 129 ++++++++++ .../AdaptiveHybridRecombinativeStrategy.py | 65 +++++ .../lama/AdaptiveHybridSearchOptimizer.py | 160 ++++++++++++ ...daptiveHybridSwarmEvolutionOptimization.py | 145 +++++++++++ ...yperQuantumStateCrossoverOptimizationV2.py | 93 +++++++ ...AdaptiveIncrementalCrossoverEnhancement.py | 73 ++++++ .../lama/AdaptiveInertiaHybridOptimizer.py | 68 +++++ .../lama/AdaptiveInertiaParticleOptimizer.py | 67 +++++ ...daptiveInertiaParticleSwarmOptimization.py | 57 +++++ ...eLearningDifferentialEvolutionOptimizer.py | 89 +++++++ ...veLevyDiversifiedMetaHeuristicAlgorithm.py | 68 +++++ .../lama/AdaptiveLevyHarmonySearch.py | 65 +++++ ...SearchImprovedQuantumSimulatedAnnealing.py | 60 +++++ .../lama/AdaptiveLocalSearchOptimizer.py | 58 +++++ ...iveLocalSearchQuantumSimulatedAnnealing.py | 59 +++++ .../lama/AdaptiveMemeticAlgorithm.py | 72 ++++++ ...CrossoverDifferentialEvolutionOptimizer.py | 93 +++++++ ...fferentialEvolutionWithElitismOptimizer.py | 124 +++++++++ .../AdaptiveMemeticDifferentialEvolution.py | 166 +++++++++++++ ...veMemeticDifferentialEvolutionOptimizer.py | 86 +++++++ .../AdaptiveMemeticDifferentialEvolutionV2.py | 96 +++++++ .../AdaptiveMemeticDifferentialEvolutionV3.py | 96 +++++++ .../AdaptiveMemeticDifferentialEvolutionV4.py | 109 ++++++++ .../AdaptiveMemeticDifferentialEvolutionV5.py | 110 ++++++++ .../AdaptiveMemeticDifferentialEvolutionV6.py | 91 +++++++ .../AdaptiveMemeticDifferentialEvolutionV7.py | 93 +++++++ ...entialEvolutionWithElitismAndDynamicFCR.py | 125 ++++++++++ ...rentialEvolutionWithSurrogateAssistance.py | 130 ++++++++++ ...daptiveMemeticDifferentialQuantumSearch.py | 159 ++++++++++++ .../lama/AdaptiveMemeticDifferentialSearch.py | 89 +++++++ .../lama/AdaptiveMemeticDiverseOptimizer.py | 144 +++++++++++ .../lama/AdaptiveMemeticEvolutionStrategy.py | 97 ++++++++ .../AdaptiveMemeticEvolutionaryAlgorithm.py | 93 +++++++ .../AdaptiveMemeticEvolutionaryOptimizer.py | 94 +++++++ .../lama/AdaptiveMemeticEvolutionarySearch.py | 92 +++++++ .../AdaptiveMemeticHarmonyOptimization.py | 85 +++++++ .../AdaptiveMemeticHarmonyOptimizationV5.py | 85 +++++++ .../lama/AdaptiveMemeticHybridOptimizer.py | 156 ++++++++++++ .../lama/AdaptiveMemeticOptimizer.py | 91 +++++++ .../lama/AdaptiveMemeticOptimizerV2.py | 91 +++++++ ...daptiveMemeticParticleSwarmOptimization.py | 101 ++++++++ .../lama/AdaptiveMemoryAssistedStrategyV41.py | 81 ++++++ .../AdaptiveMemoryEnhancedDualStrategyV45.py | 76 ++++++ .../lama/AdaptiveMemoryEnhancedSearch.py | 75 ++++++ .../lama/AdaptiveMemoryEnhancedStrategyV42.py | 77 ++++++ .../AdaptiveMemoryEvolutionaryOptimizer.py | 105 ++++++++ .../lama/AdaptiveMemoryGradientAnnealing.py | 125 ++++++++++ .../AdaptiveMemoryGradientAnnealingPlus.py | 125 ++++++++++ ...ryGradientAnnealingWithExplorationBoost.py | 141 +++++++++++ ...daptiveMemoryGradientSimulatedAnnealing.py | 120 +++++++++ ...daptiveMemoryGuidedEvolutionStrategyV57.py | 76 ++++++ .../lama/AdaptiveMemoryHybridAnnealing.py | 74 ++++++ .../lama/AdaptiveMemoryHybridDEPSO.py | 162 ++++++++++++ .../lama/AdaptiveMemoryHybridDEPSO_V2.py | 164 ++++++++++++ ...daptiveMemoryParticleDifferentialSearch.py | 113 +++++++++ .../AdaptiveMemorySelfTuningStrategyV60.py | 95 +++++++ .../lama/AdaptiveMemorySimulatedAnnealing.py | 63 +++++ .../lama/AdaptiveMetaNetAQAPSO.py | 123 +++++++++ .../lama/AdaptiveMetaNetAQAPSOv13.py | 130 ++++++++++ .../lama/AdaptiveMetaNetPSO_v3.py | 134 ++++++++++ .../optimization/lama/AdaptiveMetaNetPSOv3.py | 134 ++++++++++ .../lama/AdaptiveMetaheuristicOptimization.py | 146 +++++++++++ .../lama/AdaptiveMomentumOptimization.py | 67 +++++ .../lama/AdaptiveMultiExplorationAlgorithm.py | 96 +++++++ .../AdaptiveMultiMemorySimulatedAnnealing.py | 140 +++++++++++ ...ptiveMultiOperatorDifferentialEvolution.py | 162 ++++++++++++ .../lama/AdaptiveMultiOperatorSearch.py | 141 +++++++++++ .../lama/AdaptiveMultiOperatorSearchV2.py | 141 +++++++++++ .../lama/AdaptiveMultiOperatorSearchV3.py | 145 +++++++++++ .../lama/AdaptiveMultiPhaseAnnealing.py | 82 ++++++ .../lama/AdaptiveMultiPhaseAnnealingV2.py | 108 ++++++++ .../lama/AdaptiveMultiPhaseOptimization.py | 147 +++++++++++ ...iveMultiPopulationDifferentialEvolution.py | 177 +++++++++++++ .../lama/AdaptiveMultiStageOptimization.py | 139 +++++++++++ .../lama/AdaptiveMultiStrategicOptimizer.py | 82 ++++++ .../lama/AdaptiveMultiStrategyDE.py | 161 ++++++++++++ .../lama/AdaptiveMultiStrategyDEWithMemory.py | 128 ++++++++++ ...ptiveMultiStrategyDifferentialEvolution.py | 124 +++++++++ ...eMultiStrategyDifferentialEvolutionPlus.py | 135 ++++++++++ .../lama/AdaptiveMultiStrategyOptimizer.py | 125 ++++++++++ .../lama/AdaptiveMultiStrategyOptimizerV2.py | 155 ++++++++++++ ...NicheDifferentialParticleSwarmOptimizer.py | 163 ++++++++++++ .../lama/AdaptiveNichingDE_PSO.py | 125 ++++++++++ ...iveOppositionBasedDifferentialEvolution.py | 89 +++++++ ...itionBasedDifferentialEvolutionImproved.py | 71 ++++++ ...ionBasedHarmonySearchDynamicBandwidthDE.py | 108 ++++++++ ...AdaptiveOrthogonalDifferentialEvolution.py | 58 +++++ ...cillatoryCrossoverDifferentialEvolution.py | 53 ++++ .../AdaptiveParticleDifferentialSearch.py | 97 ++++++++ .../lama/AdaptiveParticleSwarmOptimization.py | 62 +++++ ...aptivePerturbationDifferentialEvolution.py | 51 ++++ ...opulationDifferentialEvolutionOptimizer.py | 89 +++++++ ...ustDifferentialEvolutionWithEliteSearch.py | 155 ++++++++++++ .../AdaptivePopulationMemeticOptimizer.py | 101 ++++++++ .../AdaptivePopulationResizingOptimizer.py | 154 ++++++++++++ .../AdaptivePrecisionCohortOptimizationV3.py | 66 +++++ ...vePrecisionControlDifferentialEvolution.py | 54 ++++ .../AdaptivePrecisionCrossoverEvolution.py | 77 ++++++ .../AdaptivePrecisionDifferentialEvolution.py | 60 +++++ .../lama/AdaptivePrecisionDivideSearch.py | 44 ++++ ...aptivePrecisionDynamicMemoryStrategyV48.py | 90 +++++++ .../AdaptivePrecisionEvolutionStrategy.py | 67 +++++ .../lama/AdaptivePrecisionFocalStrategy.py | 82 ++++++ .../lama/AdaptivePrecisionHybridSearch.py | 74 ++++++ .../AdaptivePrecisionMemoryStrategyV47.py | 93 +++++++ ...aptivePrecisionRotationalClimbOptimizer.py | 64 +++++ .../lama/AdaptivePrecisionSearch.py | 68 +++++ .../AdaptivePrecisionStrategicOptimizer.py | 74 ++++++ nevergrad/optimization/lama/AdaptiveQGSA.py | 65 +++++ .../optimization/lama/AdaptiveQGSA_EC.py | 65 +++++ .../lama/AdaptiveQuantumAnnealingDE.py | 152 +++++++++++ .../lama/AdaptiveQuantumAnnealingDEv2.py | 157 ++++++++++++ .../AdaptiveQuantumCognitionOptimizerV3.py | 84 +++++++ .../lama/AdaptiveQuantumCrossoverOptimizer.py | 69 +++++ .../AdaptiveQuantumDifferentialEvolution.py | 69 +++++ ...daptiveQuantumDifferentialEvolutionPlus.py | 89 +++++++ .../AdaptiveQuantumDifferentialEvolutionV2.py | 113 +++++++++ ...nWithAdaptiveMemoryAndHybridLocalSearch.py | 172 +++++++++++++ ...rentialEvolutionWithDynamicHybridSearch.py | 161 ++++++++++++ ...ferentialEvolutionWithEliteGuidedSearch.py | 166 +++++++++++++ ...alEvolutionWithElitistLearningAndMemory.py | 199 +++++++++++++++ ...nWithEnhancedElitismAndMemoryRefinement.py | 163 ++++++++++++ ...rentialEvolutionWithEnhancedLocalSearch.py | 161 ++++++++++++ .../AdaptiveQuantumDiversityEnhancerV7.py | 92 +++++++ .../AdaptiveQuantumDynamicTuningOptimizer.py | 74 ++++++ ...aptiveQuantumEliteDifferentialEvolution.py | 186 ++++++++++++++ .../AdaptiveQuantumEliteMemeticOptimizer.py | 129 ++++++++++ .../lama/AdaptiveQuantumEntropyDE.py | 140 +++++++++++ .../lama/AdaptiveQuantumEvolutionStrategy.py | 60 +++++ ...ptiveQuantumEvolvedDiversityExplorerV15.py | 80 ++++++ ...radientBoostedEvolutionaryMemeticSearch.py | 151 +++++++++++ ...tiveQuantumGradientBoostedMemeticSearch.py | 133 ++++++++++ ...daptiveQuantumGradientEnhancedOptimizer.py | 80 ++++++ ...eQuantumGradientExplorationOptimization.py | 213 ++++++++++++++++ ...uantumGradientExplorationOptimizationV2.py | 216 ++++++++++++++++ .../AdaptiveQuantumGradientHybridOptimizer.py | 92 +++++++ .../lama/AdaptiveQuantumGradientOptimizer.py | 81 ++++++ .../lama/AdaptiveQuantumHarmonizedPSO.py | 74 ++++++ .../lama/AdaptiveQuantumHybridOptimizer.py | 187 ++++++++++++++ .../lama/AdaptiveQuantumHybridSearchV2.py | 78 ++++++ ...aptiveQuantumInfluencedMemeticAlgorithm.py | 115 +++++++++ ...tiveQuantumInformedDifferentialStrategy.py | 75 ++++++ ...AdaptiveQuantumInformedGradientEnhancer.py | 88 +++++++ .../lama/AdaptiveQuantumLeapOptimizer.py | 74 ++++++ ...uantumLevyDifferentialEnhancedOptimizer.py | 156 ++++++++++++ ...daptiveQuantumLevyDifferentialOptimizer.py | 156 ++++++++++++ ...ptiveQuantumLevyDifferentialOptimizerV2.py | 156 ++++++++++++ ...ntumLevyDifferentialSwarmOptimizationV2.py | 157 ++++++++++++ .../AdaptiveQuantumLevyDynamicOptimization.py | 160 ++++++++++++ ...tiveQuantumLevyDynamicSwarmOptimization.py | 147 +++++++++++ ...veQuantumLevyDynamicSwarmOptimizationV2.py | 147 +++++++++++ ...uantumLevyEnhancedDifferentialOptimizer.py | 156 ++++++++++++ .../AdaptiveQuantumLevyMemeticOptimizer.py | 140 +++++++++++ .../AdaptiveQuantumLevyMemeticOptimizerV2.py | 140 +++++++++++ .../AdaptiveQuantumLevySwarmOptimization.py | 143 +++++++++++ .../AdaptiveQuantumLevyTreeOptimization.py | 145 +++++++++++ .../lama/AdaptiveQuantumLocalSearch.py | 75 ++++++ ...tiveQuantumMemeticEvolutionaryOptimizer.py | 181 ++++++++++++++ .../AdaptiveQuantumMemeticGradientBoost.py | 116 +++++++++ .../lama/AdaptiveQuantumMemeticOptimizer.py | 110 ++++++++ .../AdaptiveQuantumMemeticOptimizerPlus.py | 133 ++++++++++ .../lama/AdaptiveQuantumMemeticOptimizerV2.py | 129 ++++++++++ .../lama/AdaptiveQuantumMemeticOptimizerV3.py | 128 ++++++++++ .../lama/AdaptiveQuantumMetaheuristic.py | 70 ++++++ .../optimization/lama/AdaptiveQuantumPSO.py | 102 ++++++++ .../lama/AdaptiveQuantumPSOEnhanced.py | 117 +++++++++ ...daptiveQuantumParticleDifferentialSwarm.py | 138 ++++++++++ ...daptiveQuantumParticleSwarmOptimization.py | 174 +++++++++++++ .../lama/AdaptiveQuantumResonanceOptimizer.py | 52 ++++ .../lama/AdaptiveQuantumStrategicOptimizer.py | 73 ++++++ .../AdaptiveQuantumSwarmOptimizationV2.py | 76 ++++++ .../lama/AdaptiveQuantumSwarmOptimizerV2.py | 89 +++++++ .../lama/AdaptiveQuantumSymbioticStrategy.py | 75 ++++++ .../lama/AdaptiveQuasiGradientEvolution.py | 118 +++++++++ ...uasiRandomEnhancedDifferentialEvolution.py | 120 +++++++++ .../lama/AdaptiveQuasiRandomGradientDE.py | 116 +++++++++ .../AdaptiveQuorumWithStrategicMutation.py | 65 +++++ ...AdaptiveRefinedGradientBoostedAnnealing.py | 175 +++++++++++++ .../lama/AdaptiveRefinedHybridPSO_DE.py | 115 +++++++++ .../AdaptiveRefinementEvolutiveStrategy.py | 70 ++++++ .../lama/AdaptiveRefinementPSO.py | 66 +++++ .../AdaptiveRefinementSearchStrategyV30.py | 67 +++++ ...aptiveResilientQuantumCrossoverStrategy.py | 75 ++++++ .../optimization/lama/AdaptiveRestartDE.py | 151 +++++++++++ .../lama/AdaptiveRestartHybridOptimizer.py | 159 ++++++++++++ .../lama/AdaptiveRotationalClimbOptimizer.py | 62 +++++ .../lama/AdaptiveSigmaCrossoverEvolution.py | 59 +++++ .../lama/AdaptiveSimulatedAnnealing.py | 38 +++ .../lama/AdaptiveSimulatedAnnealingSearch.py | 58 +++++ ...aptiveSimulatedAnnealingWithSmartMemory.py | 157 ++++++++++++ ...AdaptiveSineCosineDifferentialEvolution.py | 50 ++++ .../AdaptiveSinusoidalDifferentialSwarm.py | 55 ++++ .../AdaptiveSpatialExplorationOptimizer.py | 73 ++++++ .../lama/AdaptiveSpiralGradientSearch.py | 65 +++++ .../optimization/lama/AdaptiveStepSearch.py | 50 ++++ ...iveStochasticGradientQuorumOptimization.py | 76 ++++++ .../lama/AdaptiveStochasticHybridEvolution.py | 60 +++++ .../lama/AdaptiveStochasticTunneling.py | 64 +++++ .../AdaptiveStrategicExplorationOptimizer.py | 83 +++++++ .../AdaptiveSwarmDifferentialEvolution.py | 50 ++++ .../lama/AdaptiveSwarmGradientOptimization.py | 143 +++++++++++ .../AdaptiveSwarmHarmonicOptimizationV4.py | 70 ++++++ .../lama/AdaptiveSwarmHybridOptimization.py | 121 +++++++++ .../AdaptiveThresholdDifferentialStrategy.py | 67 +++++ .../AdvancedAdaptiveDifferentialEvolution.py | 151 +++++++++++ .../lama/AdvancedAdaptiveDualPhaseStrategy.py | 81 ++++++ ...dvancedAdaptiveDynamicMemoryStrategyV64.py | 74 ++++++ ...namicMultiStrategyDifferentialEvolution.py | 159 ++++++++++++ ...daptiveExplorationExploitationAlgorithm.py | 116 +++++++++ ...daptiveExplorationOptimizationAlgorithm.py | 116 +++++++++ .../lama/AdvancedAdaptiveFireworkAlgorithm.py | 98 ++++++++ ...vancedAdaptiveGlobalClimbingOptimizerV6.py | 75 ++++++ ...daptiveGradientBoostedMemoryExploration.py | 180 ++++++++++++++ ...AdvancedAdaptiveGradientHybridOptimizer.py | 79 ++++++ ...vancedAdaptiveMemoryEnhancedStrategyV56.py | 76 ++++++ ...vancedAdaptiveMemoryEnhancedStrategyV73.py | 78 ++++++ ...AdvancedAdaptiveMemoryGuidedStrategyV77.py | 80 ++++++ ...dvancedAdaptiveMemorySimulatedAnnealing.py | 124 +++++++++ .../optimization/lama/AdvancedAdaptivePSO.py | 81 ++++++ .../lama/AdvancedAdaptiveQuantumEntropyDE.py | 153 ++++++++++++ .../AdvancedAdaptiveQuantumLevyOptimizer.py | 199 +++++++++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV1.py | 120 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV2.py | 120 +++++++++ .../lama/AdvancedAdaptiveStrategyOptimizer.py | 69 +++++ .../lama/AdvancedAttenuatedAdaptiveEvolver.py | 92 +++++++ ...vancedBalancedAdaptiveElitistStrategyV2.py | 80 ++++++ .../AdvancedBalancedExplorationOptimizer.py | 80 ++++++ ...entialEvolutionWithAdaptiveLearningRate.py | 107 ++++++++ ...tialEvolutionWithAdaptiveLearningRateV2.py | 107 ++++++++ ...edDifferentialParticleSwarmOptimization.py | 154 ++++++++++++ ...vancedDimensionalCyclicCrossoverEvolver.py | 90 +++++++ .../AdvancedDimensionalFeedbackEvolver.py | 86 +++++++ .../lama/AdvancedDiversityAdaptiveDE.py | 169 +++++++++++++ .../optimization/lama/AdvancedDiversityDE.py | 55 ++++ .../lama/AdvancedDualStrategyAdaptiveDE.py | 130 ++++++++++ .../lama/AdvancedDualStrategyHybridDE.py | 125 ++++++++++ ...namicAdaptiveHybridDEPSOWithEliteMemory.py | 174 +++++++++++++ .../AdvancedDynamicAdaptiveHybridOptimizer.py | 166 +++++++++++++ .../lama/AdvancedDynamicCrowdedDE.py | 154 ++++++++++++ .../AdvancedDynamicDualPhaseStrategyV37.py | 81 ++++++ .../AdvancedDynamicExplorationOptimizer.py | 166 +++++++++++++ .../lama/AdvancedDynamicFireworkAlgorithm.py | 96 +++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 140 +++++++++++ .../lama/AdvancedDynamicHybridOptimization.py | 163 ++++++++++++ .../lama/AdvancedDynamicHybridOptimizer.py | 179 +++++++++++++ ...ncedDynamicMultimodalSimulatedAnnealing.py | 140 +++++++++++ .../lama/AdvancedDynamicStrategyAdaptiveDE.py | 167 +++++++++++++ ...cedEliteAdaptiveCrowdingHybridOptimizer.py | 195 +++++++++++++++ .../AdvancedEliteDynamicHybridOptimizer.py | 136 ++++++++++ ...vancedEnhancedAdaptiveFireworkAlgorithm.py | 96 +++++++ .../AdvancedEnhancedAdaptiveMetaNetAQAPSO.py | 130 ++++++++++ ...cedDifferentialEvolutionLocalSearch_v55.py | 104 ++++++++ ...ancedEnhancedEnhancedGuidedMassQGSA_v69.py | 105 ++++++++ .../AdvancedEnhancedGuidedMassQGSA_v65.py | 116 +++++++++ ...cedEnhancedHybridMetaHeuristicOptimizer.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV16.py | 110 ++++++++ .../AdvancedExplorativeConvergenceEnhancer.py | 96 +++++++ ...edFireworkAlgorithmWithAdaptiveMutation.py | 114 +++++++++ .../lama/AdvancedFocusedAdaptiveOptimizer.py | 66 +++++ .../lama/AdvancedGlobalClimbingOptimizerV4.py | 73 ++++++ ...AdvancedGlobalStructureAwareOptimizerV3.py | 92 +++++++ ...mulatedAnnealingWithAdaptiveExploration.py | 159 ++++++++++++ .../lama/AdvancedGradientEvolutionStrategy.py | 66 +++++ .../AdvancedGradientEvolutionStrategyV2.py | 74 ++++++ .../AdvancedHarmonyMemeticOptimization.py | 115 +++++++++ .../lama/AdvancedHarmonySearch.py | 86 +++++++ .../lama/AdvancedHybridAdaptiveDE.py | 133 ++++++++++ .../AdvancedHybridAdaptiveOptimization.py | 145 +++++++++++ ...CovarianceMatrixDifferentialEvolutionV3.py | 201 +++++++++++++++ ...AdvancedHybridDEPSOWithAdaptiveRestarts.py | 152 +++++++++++ ...ridDEPSOWithDynamicAdaptationAndRestart.py | 152 +++++++++++ ...dHybridExplorationExploitationOptimizer.py | 144 +++++++++++ .../lama/AdvancedHybridLocalOptimizationDE.py | 193 ++++++++++++++ .../AdvancedHybridMetaHeuristicOptimizer.py | 93 +++++++ .../lama/AdvancedHybridMetaheuristic.py | 144 +++++++++++ .../lama/AdvancedHybridOptimization.py | 142 +++++++++++ .../lama/AdvancedHybridOptimizer.py | 156 ++++++++++++ .../lama/AdvancedHybridQuantumAdaptiveDE.py | 137 ++++++++++ ...ridSimulatedAnnealingWithAdaptiveMemory.py | 140 +++++++++++ ...SimulatedAnnealingWithGuidedExploration.py | 145 +++++++++++ .../AdvancedImprovedMetaHeuristicOptimizer.py | 104 ++++++++ .../lama/AdvancedIslandEvolutionStrategyV5.py | 103 ++++++++ .../lama/AdvancedIslandEvolutionStrategyV8.py | 109 ++++++++ .../lama/AdvancedIslandEvolutionStrategyV9.py | 109 ++++++++ ...ncedMemeticQuantumDifferentialOptimizer.py | 157 ++++++++++++ .../lama/AdvancedMemoryAdaptiveStrategyV50.py | 102 ++++++++ .../AdvancedMemoryEnhancedHybridOptimizer.py | 163 ++++++++++++ ...AdvancedMemoryGuidedAdaptiveStrategyV68.py | 92 +++++++ .../AdvancedMemoryGuidedDualStrategyV80.py | 84 +++++++ .../AdvancedMultiModalAdaptiveOptimizer.py | 96 +++++++ .../AdvancedMultiStrategySelfAdaptiveDE.py | 135 ++++++++++ ...NicheDifferentialParticleSwarmOptimizer.py | 149 +++++++++++ ...nBasedHarmonySearchDynamicBandwidthSADE.py | 116 +++++++++ ...malHybridDifferentialAnnealingOptimizer.py | 57 +++++ .../AdvancedParallelDifferentialEvolution.py | 53 ++++ .../lama/AdvancedPrecisionEvolver.py | 75 ++++++ .../lama/AdvancedPrecisionGuidedStrategy.py | 67 +++++ ...dQuantumCognitionTrajectoryOptimizerV29.py | 85 +++++++ ...ancedQuantumControlledDiversityStrategy.py | 82 ++++++ .../lama/AdvancedQuantumCrossoverOptimizer.py | 66 +++++ ...eOptimizerWithAdaptiveElitismAndRestart.py | 154 ++++++++++++ .../lama/AdvancedQuantumGradientDescent.py | 70 ++++++ ...dQuantumGradientExplorationOptimization.py | 201 +++++++++++++++ ...dvancedQuantumHarmonicFeedbackOptimizer.py | 80 ++++++ ...dvancedQuantumInfusedAdaptiveStrategyV3.py | 84 +++++++ ...ncedQuantumMemeticDifferentialEvolution.py | 174 +++++++++++++ ...vancedQuantumStateCrossoverOptimization.py | 86 +++++++ .../lama/AdvancedQuantumSwarmOptimization.py | 89 +++++++ .../lama/AdvancedQuantumVelocityOptimizer.py | 80 ++++++ .../optimization/lama/AdvancedRAMEDSv6.py | 98 ++++++++ ...ncedRefinedAdaptiveMemoryEnhancedSearch.py | 92 +++++++ ...ientBoostedMemorySimulatedAnnealingPlus.py | 144 +++++++++++ ...eAdaptiveMemoryDynamicCrowdingOptimizer.py | 203 +++++++++++++++ ...finedEliteAdaptiveMemoryHybridOptimizer.py | 196 +++++++++++++++ ...AdvancedRefinedGradientBoostedAnnealing.py | 150 +++++++++++ ...edRefinedGradientBoostedMemoryAnnealing.py | 158 ++++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 175 +++++++++++++ ...nedHybridEvolutionaryAnnealingOptimizer.py | 53 ++++ ...yperRefinedDynamicPrecisionOptimizerV51.py | 61 +++++ .../lama/AdvancedRefinedRAMEDSPro.py | 82 ++++++ .../AdvancedRefinedSpiralSearchOptimizer.py | 63 +++++ ...edUltraEvolutionaryGradientOptimizerV29.py | 79 ++++++ .../lama/AdvancedSelfAdaptiveDE_v2.py | 141 +++++++++++ .../lama/AdvancedSelfAdaptiveDE_v3.py | 139 +++++++++++ ...ncedSpatialAdaptiveConvergenceOptimizer.py | 81 ++++++ .../lama/AdvancedSpatialGradientOptimizer.py | 97 ++++++++ .../lama/AdvancedStrategicHybridDE.py | 80 ++++++ .../lama/ArchiveEnhancedAdaptiveDE.py | 146 +++++++++++ .../lama/AttenuatedAdaptiveEvolver.py | 68 +++++ .../lama/BalancedAdaptiveMemeticDE.py | 94 +++++++ .../BalancedCulturalDifferentialEvolution.py | 127 ++++++++++ .../lama/BalancedDualStrategyAdaptiveDE.py | 129 ++++++++++ .../lama/BalancedDynamicQuantumLevySwarm.py | 145 +++++++++++ .../BalancedQuantumLevyDifferentialSearch.py | 159 ++++++++++++ .../BalancedQuantumLevySwarmOptimization.py | 145 +++++++++++ .../lama/BayesianAdaptiveMemeticSearch.py | 126 ++++++++++ nevergrad/optimization/lama/CAMSQSOB.py | 77 ++++++ nevergrad/optimization/lama/CGES.py | 63 +++++ .../lama/CMADifferentialEvolutionPSO.py | 122 +++++++++ nevergrad/optimization/lama/CMDEALX.py | 74 ++++++ .../ClusterAdaptiveQuantumLevyOptimizer.py | 153 ++++++++++++ ...usterBasedAdaptiveDifferentialEvolution.py | 148 +++++++++++ ...edAdaptiveHybridPSODESimulatedAnnealing.py | 128 ++++++++++ ...redDifferentialEvolutionWithLocalSearch.py | 111 +++++++++ .../CoevolutionaryDualPopulationSearch.py | 124 +++++++++ .../lama/CohortDiversityDrivenOptimization.py | 72 ++++++ .../CohortEvolutionWithDynamicSelection.py | 74 ++++++ .../lama/ConcentricConvergenceOptimizer.py | 63 +++++ .../lama/ConcentricDiversityStrategy.py | 113 +++++++++ .../lama/ConcentricGradientDescentEvolver.py | 68 +++++ .../lama/ConcentricGradientEnhancedEvolver.py | 72 ++++++ .../ConcentricQuantumCrossoverStrategyV4.py | 91 +++++++ .../ConvergenceAcceleratedSpiralSearch.py | 75 ++++++ .../ConvergentAdaptiveEvolutionStrategy.py | 72 ++++++ .../ConvergentAdaptiveEvolutiveStrategy.py | 68 +++++ .../lama/CooperativeAdaptiveCulturalSearch.py | 117 +++++++++ ...ooperativeAdaptiveEvolutionaryOptimizer.py | 97 ++++++++ .../lama/CooperativeCulturalAdaptiveSearch.py | 108 ++++++++ .../CooperativeCulturalDifferentialSearch.py | 125 ++++++++++ .../CooperativeCulturalEvolutionStrategy.py | 109 ++++++++ .../CooperativeEvolutionaryGradientSearch.py | 93 +++++++ .../CooperativeParticleSwarmOptimization.py | 62 +++++ .../CoordinatedAdaptiveHybridOptimizer.py | 132 ++++++++++ ...ceMatrixAdaptationDifferentialEvolution.py | 70 ++++++ .../CulturalAdaptiveDifferentialEvolution.py | 114 +++++++++ .../CulturalGuidedDifferentialEvolution.py | 125 ++++++++++ nevergrad/optimization/lama/DADERC.py | 68 +++++ nevergrad/optimization/lama/DADESM.py | 64 +++++ nevergrad/optimization/lama/DADe.py | 63 +++++ nevergrad/optimization/lama/DAEA.py | 80 ++++++ nevergrad/optimization/lama/DAES.py | 79 ++++++ nevergrad/optimization/lama/DAESF.py | 81 ++++++ nevergrad/optimization/lama/DASES.py | 67 +++++ nevergrad/optimization/lama/DASOGG.py | 60 +++++ nevergrad/optimization/lama/DDCEA.py | 66 +++++ nevergrad/optimization/lama/DDPO.py | 69 +++++ nevergrad/optimization/lama/DEAMC.py | 65 +++++ nevergrad/optimization/lama/DEAMC_DSR.py | 89 +++++++ nevergrad/optimization/lama/DEAMC_LSI.py | 81 ++++++ .../optimization/lama/DEWithNelderMead.py | 65 +++++ nevergrad/optimization/lama/DHDGE.py | 76 ++++++ nevergrad/optimization/lama/DLASS.py | 66 +++++ nevergrad/optimization/lama/DMDE.py | 90 +++++++ nevergrad/optimization/lama/DMDESM.py | 89 +++++++ nevergrad/optimization/lama/DMES.py | 66 +++++ nevergrad/optimization/lama/DNAS.py | 75 ++++++ nevergrad/optimization/lama/DPADE.py | 81 ++++++ nevergrad/optimization/lama/DPES.py | 69 +++++ nevergrad/optimization/lama/DSDE.py | 66 +++++ nevergrad/optimization/lama/DSEDES.py | 66 +++++ .../DifferentialEvolutionAdaptiveCrossover.py | 55 ++++ .../lama/DifferentialEvolutionAdaptivePSO.py | 87 +++++++ .../lama/DifferentialEvolutionHybrid.py | 49 ++++ .../lama/DifferentialEvolutionOptimizer.py | 47 ++++ .../lama/DifferentialEvolutionPSOHybrid.py | 81 ++++++ .../lama/DifferentialEvolutionSearch.py | 68 +++++ .../lama/DifferentialFireworkAlgorithm.py | 52 ++++ .../DifferentialGradientEvolutionStrategy.py | 58 +++++ .../lama/DifferentialHarmonySearch.py | 49 ++++ .../lama/DifferentialMemeticAlgorithm.py | 71 ++++++ .../lama/DifferentialQuantumMetaheuristic.py | 65 +++++ ...DifferentialSimulatedAnnealingOptimizer.py | 52 ++++ ...ersityEnhancedAdaptiveGradientEvolution.py | 102 ++++++++ ...sityEnhancedAdaptiveGradientEvolutionV2.py | 104 ++++++++ .../lama/DolphinPodOptimization.py | 58 +++++ .../lama/DualAdaptiveRestartDE.py | 123 +++++++++ .../optimization/lama/DualAdaptiveSearch.py | 54 ++++ .../lama/DualConvergenceEvolutiveStrategy.py | 67 +++++ .../optimization/lama/DualModeOptimization.py | 76 ++++++ .../DualPhaseAdaptiveGradientEvolution.py | 76 ++++++ .../DualPhaseAdaptiveHybridOptimizerV3.py | 160 ++++++++++++ ...aseAdaptiveMemeticDifferentialEvolution.py | 167 +++++++++++++ ...eAdaptiveMemeticDifferentialEvolutionV2.py | 168 +++++++++++++ ...leSwarmDifferentialEvolutionV3_Enhanced.py | 139 +++++++++++ .../lama/DualPhaseDifferentialEvolution.py | 73 ++++++ .../lama/DualPhaseOptimizationStrategy.py | 79 ++++++ .../lama/DualPhaseQuantumMemeticSearch.py | 133 ++++++++++ ...PhaseRefinedQuantumLocalSearchOptimizer.py | 92 +++++++ .../optimization/lama/DualPopulationADE.py | 69 +++++ .../lama/DualPopulationAdaptiveSearch.py | 109 ++++++++ ...opulationCovarianceMatrixGradientSearch.py | 179 +++++++++++++ .../lama/DualPopulationEnhancedSearch.py | 113 +++++++++ .../lama/DualStrategyAdaptiveDE.py | 117 +++++++++ .../lama/DualStrategyDifferentialEvolution.py | 57 +++++ .../lama/DualStrategyOptimizer.py | 64 +++++ .../DualStrategyQuantumEvolutionOptimizer.py | 73 ++++++ .../lama/DynamicAdaptiveClimbingStrategy.py | 85 +++++++ .../lama/DynamicAdaptiveCohortOptimization.py | 71 ++++++ .../DynamicAdaptiveEliteHybridOptimizer.py | 136 ++++++++++ ...icAdaptiveEnhancedDifferentialEvolution.py | 112 +++++++++ .../DynamicAdaptiveExplorationOptimization.py | 116 +++++++++ .../DynamicAdaptiveExplorationOptimizer.py | 166 +++++++++++++ .../lama/DynamicAdaptiveFireworkAlgorithm.py | 99 ++++++++ ...icAdaptiveGradientDifferentialEvolution.py | 136 ++++++++++ ...cAdaptiveGravitationalSwarmIntelligence.py | 99 ++++++++ ...daptiveGravitationalSwarmIntelligenceV2.py | 96 +++++++ .../lama/DynamicAdaptiveHybridAlgorithm.py | 122 +++++++++ .../lama/DynamicAdaptiveHybridDE.py | 76 ++++++ ...namicAdaptiveHybridDEPSOWithEliteMemory.py | 167 +++++++++++++ .../lama/DynamicAdaptiveHybridOptimization.py | 136 ++++++++++ .../lama/DynamicAdaptiveHybridOptimizer.py | 128 ++++++++++ ...fferentialEvolutionWithSmartLocalSearch.py | 125 ++++++++++ .../lama/DynamicAdaptiveMemeticOptimizer.py | 127 ++++++++++ ...AdaptivePopulationDifferentialEvolution.py | 184 ++++++++++++++ ...micAdaptiveQuantumDifferentialEvolution.py | 84 +++++++ .../DynamicAdaptiveQuantumLevyOptimizer.py | 160 ++++++++++++ .../lama/DynamicAdaptiveQuantumPSO.py | 133 ++++++++++ ...cAdaptiveQuasiRandomDEGradientAnnealing.py | 144 +++++++++++ .../lama/DynamicAdaptiveSwarmOptimization.py | 146 +++++++++++ .../optimization/lama/DynamicBalancingPSO.py | 81 ++++++ .../lama/DynamicClusterHybridOptimization.py | 128 ++++++++++ .../lama/DynamicCohortAdaptiveEvolution.py | 96 +++++++ .../lama/DynamicCohortMemeticAlgorithm.py | 110 ++++++++ .../lama/DynamicCohortOptimization.py | 69 +++++ .../optimization/lama/DynamicCrowdedDE.py | 137 ++++++++++ .../DynamicCulturalDifferentialEvolution.py | 127 ++++++++++ .../DynamicEliteAdaptiveHybridOptimizerV2.py | 165 ++++++++++++ .../lama/DynamicEliteAnnealingDE.py | 157 ++++++++++++ .../DynamicEliteCovarianceMemeticSearch.py | 122 +++++++++ ...namicEliteEnhancedDifferentialEvolution.py | 97 ++++++++ .../lama/DynamicElitistHybridOptimizer.py | 146 +++++++++++ ...icEnhancedDifferentialFireworkAlgorithm.py | 88 +++++++ .../lama/DynamicEnhancedHybridOptimizer.py | 186 ++++++++++++++ ...DynamicExplorationExploitationAlgorithm.py | 105 ++++++++ .../lama/DynamicExplorationExploitationDE.py | 88 +++++++ ...ExplorationExploitationMemeticAlgorithm.py | 141 +++++++++++ .../lama/DynamicExplorationOptimization.py | 141 +++++++++++ .../lama/DynamicFireworkAlgorithm.py | 63 +++++ .../lama/DynamicFireworksSwarmOptimization.py | 89 +++++++ .../DynamicFractionalClusterOptimization.py | 144 +++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 141 +++++++++++ ...adientBoostedMemorySimulatedAnnealingV2.py | 141 +++++++++++ ...namicGradientBoostedRefinementAnnealing.py | 175 +++++++++++++ .../lama/DynamicGradientEnhancedAnnealing.py | 175 +++++++++++++ .../lama/DynamicHybridAnnealing.py | 109 ++++++++ .../lama/DynamicHybridOptimizer.py | 73 ++++++ ...namicHybridQuantumDifferentialEvolution.py | 177 +++++++++++++ .../lama/DynamicHybridSelfAdaptiveDE.py | 136 ++++++++++ .../lama/DynamicLevyHarmonySearch.py | 79 ++++++ .../DynamicLocalSearchFireworkAlgorithm.py | 96 +++++++ ...ifferentialEvolutionWithAdaptiveElitism.py | 128 ++++++++++ ...micMemoryAdaptiveConvergenceStrategyV76.py | 84 +++++++ ...namicMemoryEnhancedDualPhaseStrategyV66.py | 93 +++++++ .../lama/DynamicMemoryHybridSearch.py | 162 ++++++++++++ .../lama/DynamicMultiPhaseAnnealingPlus.py | 125 ++++++++++ .../lama/DynamicMultiStrategyOptimizer.py | 122 +++++++++ .../lama/DynamicNichePSO_DE_LS.py | 154 ++++++++++++ .../lama/DynamicNichingDEPSOWithRestart.py | 148 +++++++++++ ...amicPopulationAdaptiveGradientEvolution.py | 112 +++++++++ ...cPopulationMemeticDifferentialEvolution.py | 184 ++++++++++++++ .../lama/DynamicPrecisionBalancedEvolution.py | 76 ++++++ ...DynamicPrecisionCosineDifferentialSwarm.py | 55 ++++ .../DynamicPrecisionExplorationOptimizer.py | 56 +++++ .../lama/DynamicPrecisionOptimizer.py | 81 ++++++ ...DynamicQuantumAdaptiveEvolutionStrategy.py | 184 ++++++++++++++ .../DynamicQuantumDifferentialEvolution.py | 89 +++++++ ...olutionWithElitistMemoryAndHybridSearch.py | 167 +++++++++++++ ...ntialEvolutionWithLocalSearchAndRestart.py | 137 ++++++++++ .../lama/DynamicQuantumEvolution.py | 186 ++++++++++++++ .../DynamicQuantumGuidedHybridSearchV7.py | 88 +++++++ ...amicQuantumLevyDifferentialHybridSearch.py | 159 ++++++++++++ ...uantumLevyDifferentialSwarmOptimization.py | 145 +++++++++++ .../DynamicQuantumLevySwarmOptimization.py | 145 +++++++++++ .../lama/DynamicQuantumMemeticOptimizer.py | 129 ++++++++++ .../lama/DynamicQuantumSwarmOptimization.py | 69 +++++ .../DynamicQuantumSwarmOptimizationRefined.py | 76 ++++++ ...uasiRandomAdaptiveDifferentialEvolution.py | 158 ++++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 181 ++++++++++++++ ...efinementGradientBoostedMemoryAnnealing.py | 167 +++++++++++++ .../optimization/lama/DynamicScaleSearch.py | 62 +++++ .../lama/DynamicSelfAdaptiveOptimizer.py | 96 +++++++ .../lama/DynamicStrategyAdaptiveDE.py | 169 +++++++++++++ .../DynamicallyAdaptiveFireworkAlgorithm.py | 108 ++++++++ nevergrad/optimization/lama/EACDE.py | 74 ++++++ nevergrad/optimization/lama/EADE.py | 62 +++++ nevergrad/optimization/lama/EADEA.py | 75 ++++++ nevergrad/optimization/lama/EADEDM.py | 66 +++++ nevergrad/optimization/lama/EADEDMGM.py | 66 +++++ nevergrad/optimization/lama/EADEPC.py | 87 +++++++ nevergrad/optimization/lama/EADEPM.py | 71 ++++++ nevergrad/optimization/lama/EADEPMC.py | 75 ++++++ nevergrad/optimization/lama/EADES.py | 58 +++++ nevergrad/optimization/lama/EADESC.py | 74 ++++++ nevergrad/optimization/lama/EADEWM.py | 68 +++++ nevergrad/optimization/lama/EADE_FIDM.py | 54 ++++ nevergrad/optimization/lama/EADGM.py | 75 ++++++ nevergrad/optimization/lama/EADMMMS.py | 83 +++++++ nevergrad/optimization/lama/EADSEA.py | 72 ++++++ nevergrad/optimization/lama/EADSM.py | 89 +++++++ nevergrad/optimization/lama/EAMDE.py | 95 +++++++ nevergrad/optimization/lama/EAMES.py | 66 +++++ nevergrad/optimization/lama/EAMSDiffEvo.py | 75 ++++++ nevergrad/optimization/lama/EAMSEA.py | 82 ++++++ nevergrad/optimization/lama/EAPBES.py | 74 ++++++ nevergrad/optimization/lama/EAPDELS.py | 91 +++++++ nevergrad/optimization/lama/EARESDM.py | 87 +++++++ nevergrad/optimization/lama/EASO.py | 56 +++++ nevergrad/optimization/lama/EDAEA.py | 85 +++++++ nevergrad/optimization/lama/EDAG.py | 71 ++++++ nevergrad/optimization/lama/EDASOGG.py | 73 ++++++ nevergrad/optimization/lama/EDDCEA.py | 76 ++++++ nevergrad/optimization/lama/EDEAS.py | 53 ++++ nevergrad/optimization/lama/EDEPM.py | 64 +++++ nevergrad/optimization/lama/EDGB.py | 59 +++++ nevergrad/optimization/lama/EDMDESM.py | 89 +++++++ nevergrad/optimization/lama/EDMRL.py | 100 ++++++++ nevergrad/optimization/lama/EDMS.py | 86 +++++++ nevergrad/optimization/lama/EDNAS.py | 76 ++++++ nevergrad/optimization/lama/EDNAS_SAMRA.py | 73 ++++++ nevergrad/optimization/lama/EDSDiffEvoM.py | 73 ++++++ nevergrad/optimization/lama/EGBDE.py | 66 +++++ nevergrad/optimization/lama/EGGEO.py | 78 ++++++ nevergrad/optimization/lama/EHADEEM.py | 78 ++++++ nevergrad/optimization/lama/EHADEMI.py | 95 +++++++ nevergrad/optimization/lama/EHDAM.py | 92 +++++++ nevergrad/optimization/lama/EHDE.py | 70 ++++++ nevergrad/optimization/lama/EIADEA.py | 80 ++++++ nevergrad/optimization/lama/EMIDE.py | 87 +++++++ nevergrad/optimization/lama/EMSADE.py | 74 ++++++ nevergrad/optimization/lama/EMSEAS.py | 73 ++++++ nevergrad/optimization/lama/EORAMED.py | 85 +++++++ nevergrad/optimization/lama/EPADE.py | 71 ++++++ nevergrad/optimization/lama/EPDE.py | 62 +++++ nevergrad/optimization/lama/EPWDEM.py | 70 ++++++ nevergrad/optimization/lama/ERADE.py | 69 +++++ nevergrad/optimization/lama/ERADS.py | 75 ++++++ .../lama/ERADS_AdaptiveDynamic.py | 62 +++++ .../lama/ERADS_AdaptiveDynamicPlus.py | 64 +++++ .../optimization/lama/ERADS_AdaptiveHybrid.py | 70 ++++++ .../optimization/lama/ERADS_AdaptivePlus.py | 60 +++++ .../lama/ERADS_AdaptiveProgressive.py | 67 +++++ .../lama/ERADS_AdaptiveRefinement.py | 68 +++++ nevergrad/optimization/lama/ERADS_Advanced.py | 70 ++++++ .../lama/ERADS_AdvancedDynamic.py | 65 +++++ .../lama/ERADS_AdvancedRefined.py | 65 +++++ .../lama/ERADS_DynamicPrecision.py | 65 +++++ nevergrad/optimization/lama/ERADS_Enhanced.py | 83 +++++++ .../lama/ERADS_EnhancedPrecision.py | 68 +++++ .../optimization/lama/ERADS_HyperOptimized.py | 65 +++++ nevergrad/optimization/lama/ERADS_NextGen.py | 65 +++++ .../optimization/lama/ERADS_Optimized.py | 65 +++++ .../optimization/lama/ERADS_Precision.py | 80 ++++++ .../lama/ERADS_ProgressiveAdaptive.py | 65 +++++ .../lama/ERADS_ProgressiveAdaptivePlus.py | 65 +++++ .../lama/ERADS_ProgressiveDynamic.py | 66 +++++ .../lama/ERADS_ProgressiveOptimized.py | 67 +++++ .../lama/ERADS_ProgressivePrecision.py | 81 ++++++ .../lama/ERADS_ProgressiveRefinement.py | 65 +++++ .../optimization/lama/ERADS_QuantumFlux.py | 65 +++++ .../optimization/lama/ERADS_QuantumFluxPro.py | 65 +++++ .../lama/ERADS_QuantumFluxUltra.py | 65 +++++ .../lama/ERADS_QuantumFluxUltraRefined.py | 65 +++++ .../lama/ERADS_QuantumFluxUltraRefinedPlus.py | 76 ++++++ .../optimization/lama/ERADS_QuantumLeap.py | 65 +++++ nevergrad/optimization/lama/ERADS_Refined.py | 65 +++++ nevergrad/optimization/lama/ERADS_Superior.py | 70 ++++++ nevergrad/optimization/lama/ERADS_Ultra.py | 72 ++++++ .../optimization/lama/ERADS_UltraDynamic.py | 65 +++++ .../lama/ERADS_UltraDynamicMax.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxEnhanced.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxHybrid.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxHyper.py | 76 ++++++ .../ERADS_UltraDynamicMaxHyperOptimized.py | 69 +++++ .../ERADS_UltraDynamicMaxHyperOptimizedV4.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxHyperPlus.py | 70 ++++++ .../lama/ERADS_UltraDynamicMaxHyperRefined.py | 65 +++++ ...DS_UltraDynamicMaxHyperRefinedOptimized.py | 65 +++++ ..._UltraDynamicMaxHyperRefinedOptimizedV2.py | 65 +++++ ..._UltraDynamicMaxHyperRefinedOptimizedV3.py | 65 +++++ .../ERADS_UltraDynamicMaxHyperRefinedPlus.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxOptimal.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxOptimized.py | 65 +++++ .../ERADS_UltraDynamicMaxOptimizedPlus.py | 67 +++++ .../lama/ERADS_UltraDynamicMaxPlus.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxPrecision.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxRefined.py | 70 ++++++ .../lama/ERADS_UltraDynamicMaxRefinedPlus.py | 66 +++++ .../lama/ERADS_UltraDynamicMaxSupreme.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxUltra.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxUltraPlus.py | 65 +++++ .../lama/ERADS_UltraDynamicMaxUltraRefined.py | 67 +++++ .../ERADS_UltraDynamicMaxUltraRefinedV2.py | 65 +++++ .../ERADS_UltraDynamicMaxUltraRefinedV3.py | 65 +++++ .../ERADS_UltraDynamicMaxUltraRefinedV4.py | 71 ++++++ .../ERADS_UltraDynamicMaxUltraRefinedV5.py | 73 ++++++ .../ERADS_UltraDynamicMaxUltraRefinedV6.py | 64 +++++ .../ERADS_UltraDynamicMaxUltraRefinedV7.py | 64 +++++ .../ERADS_UltraDynamicMaxUltraRefinedV8.py | 80 ++++++ .../lama/ERADS_UltraDynamicPlus.py | 65 +++++ .../ERADS_UltraDynamicPrecisionEnhanced.py | 65 +++++ .../ERADS_UltraDynamicPrecisionOptimized.py | 67 +++++ .../optimization/lama/ERADS_UltraEnhanced.py | 71 ++++++ nevergrad/optimization/lama/ERADS_UltraMax.py | 59 +++++ .../optimization/lama/ERADS_UltraOptimized.py | 69 +++++ .../optimization/lama/ERADS_UltraPrecise.py | 77 ++++++ .../optimization/lama/ERADS_UltraRefined.py | 78 ++++++ nevergrad/optimization/lama/ERAMEDS.py | 100 ++++++++ nevergrad/optimization/lama/ESADE.py | 84 +++++++ nevergrad/optimization/lama/ESADEPFLLP.py | 79 ++++++ nevergrad/optimization/lama/ESBASM.py | 64 +++++ .../EliteAdaptiveCrowdingHybridOptimizer.py | 194 +++++++++++++++ .../lama/EliteAdaptiveHybridDEPSO.py | 152 +++++++++++ ...iteAdaptiveMemeticDifferentialEvolution.py | 106 ++++++++ ...daptiveMemoryDynamicCrowdingOptimizerV2.py | 199 +++++++++++++++ .../EliteAdaptiveMemoryHybridOptimizer.py | 168 +++++++++++++ ...ntumDEWithAdaptiveMemoryAndHybridSearch.py | 197 +++++++++++++++ ...CovarianceMatrixAdaptationMemeticSearch.py | 102 ++++++++ .../lama/EliteDynamicHybridOptimizer.py | 132 ++++++++++ .../lama/EliteDynamicMemoryHybridOptimizer.py | 197 +++++++++++++++ .../EliteDynamicMultiStrategyHybridDEPSO.py | 152 +++++++++++ .../lama/EliteGuidedAdaptiveRestartDE.py | 112 +++++++++ .../lama/EliteGuidedDualStrategyDE.py | 111 +++++++++ .../lama/EliteGuidedHybridAdaptiveDE.py | 123 +++++++++ .../optimization/lama/EliteGuidedHybridDE.py | 112 +++++++++ .../lama/EliteGuidedMutationDE.py | 94 +++++++ .../lama/EliteGuidedMutationDE_v2.py | 125 ++++++++++ .../lama/EliteGuidedQuantumAdaptiveDE.py | 138 ++++++++++ .../lama/EliteHybridAdaptiveOptimizer.py | 155 ++++++++++++ ...iteMemoryEnhancedDynamicHybridOptimizer.py | 186 ++++++++++++++ .../lama/EliteMultiStrategySelfAdaptiveDE.py | 128 ++++++++++ .../ElitePreservingDifferentialEvolution.py | 97 ++++++++ ...eQuantumAdaptiveExplorationOptimization.py | 235 ++++++++++++++++++ ...liteQuantumDifferentialMemeticOptimizer.py | 156 ++++++++++++ .../EliteRefinedAdaptivePrecisionOptimizer.py | 62 +++++ ...liteTranscendentalEvolutionaryOptimizer.py | 52 ++++ .../optimization/lama/ElitistAdaptiveDE.py | 113 +++++++++ .../lama/EnhancedAQAPSOHR_LSDIW.py | 111 +++++++++ .../lama/EnhancedAQAPSOHR_LSDIW_AP.py | 117 +++++++++ .../lama/EnhancedAQAPSO_LS_DIW_AP.py | 85 +++++++ .../lama/EnhancedAQAPSO_LS_DIW_AP_Final.py | 84 +++++++ .../lama/EnhancedAQAPSO_LS_DIW_AP_Refined.py | 85 +++++++ .../EnhancedAQAPSO_LS_DIW_AP_Refined_Final.py | 87 +++++++ .../lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate.py | 87 +++++++ ...ncedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py | 87 +++++++ ...SO_LS_DIW_AP_Ultimate_Redefined_Refined.py | 85 +++++++ ...hancedAQAPSO_LS_DIW_AP_Ultimate_Refined.py | 85 +++++++ ...SO_LS_DIW_AP_Ultimate_Refined_Redefined.py | 84 +++++++ ...AdaptiveChaoticFireworksOptimization_v2.py | 84 +++++++ ...AdaptiveChaoticFireworksOptimization_v3.py | 89 +++++++ .../EnhancedAdaptiveCohortMemeticAlgorithm.py | 140 +++++++++++ ...hancedAdaptiveControlledMemoryAnnealing.py | 68 +++++ ...CovarianceMatrixDifferentialEvolutionV4.py | 122 +++++++++ ...hancedAdaptiveCovarianceMatrixEvolution.py | 96 +++++++ .../lama/EnhancedAdaptiveDEPSOOptimizer.py | 106 ++++++++ ...cedAdaptiveDiffEvolutionGradientDescent.py | 93 +++++++ .../EnhancedAdaptiveDifferentialEvolution.py | 61 +++++ ...cedAdaptiveDifferentialEvolutionDynamic.py | 116 +++++++++ ...iveDifferentialEvolutionDynamicImproved.py | 116 +++++++++ ...edAdaptiveDifferentialEvolutionEnhanced.py | 121 +++++++++ ...cedAdaptiveDifferentialEvolutionRefined.py | 97 ++++++++ ...iveDifferentialEvolutionRefinedImproved.py | 121 +++++++++ ...dAdaptiveDifferentialEvolutionRefinedV2.py | 121 +++++++++ ...dAdaptiveDifferentialEvolutionRefinedV3.py | 121 +++++++++ ...dAdaptiveDifferentialEvolutionRefinedV4.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV10.py | 62 +++++ ...nhancedAdaptiveDifferentialEvolutionV11.py | 80 ++++++ ...nhancedAdaptiveDifferentialEvolutionV12.py | 83 +++++++ ...nhancedAdaptiveDifferentialEvolutionV13.py | 89 +++++++ ...nhancedAdaptiveDifferentialEvolutionV14.py | 89 +++++++ ...nhancedAdaptiveDifferentialEvolutionV15.py | 92 +++++++ ...nhancedAdaptiveDifferentialEvolutionV16.py | 93 +++++++ ...nhancedAdaptiveDifferentialEvolutionV17.py | 67 +++++ ...nhancedAdaptiveDifferentialEvolutionV18.py | 71 ++++++ ...nhancedAdaptiveDifferentialEvolutionV19.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV20.py | 123 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV21.py | 123 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV22.py | 123 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV23.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV24.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV25.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV26.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV27.py | 121 +++++++++ ...nhancedAdaptiveDifferentialEvolutionV28.py | 116 +++++++++ ...EnhancedAdaptiveDifferentialEvolutionV4.py | 50 ++++ ...EnhancedAdaptiveDifferentialEvolutionV5.py | 50 ++++ ...EnhancedAdaptiveDifferentialEvolutionV6.py | 50 ++++ ...EnhancedAdaptiveDifferentialEvolutionV7.py | 60 +++++ ...EnhancedAdaptiveDifferentialEvolutionV8.py | 60 +++++ ...EnhancedAdaptiveDifferentialEvolutionV9.py | 62 +++++ ...rentialEvolutionWithBayesianLocalSearch.py | 123 +++++++++ ...volutionWithDynamicCrossoverAndMutation.py | 58 +++++ ...WithDynamicCrossoverAndMutationImproved.py | 64 +++++ ...ferentialEvolutionWithDynamicParameters.py | 62 +++++ ...rentialEvolutionWithDynamicParametersV2.py | 74 ++++++ ...rentialEvolutionWithDynamicParametersV3.py | 78 ++++++ ...rentialEvolutionWithDynamicParametersV4.py | 86 +++++++ ...rentialEvolutionWithDynamicParametersV5.py | 76 ++++++ ...ferentialEvolutionWithDynamicPopulation.py | 103 ++++++++ ...alEvolutionWithDynamicPopulationRefined.py | 103 ++++++++ ...rentialEvolutionWithDynamicPopulationV2.py | 103 ++++++++ ...ifferentialEvolutionWithDynamicStepSize.py | 132 ++++++++++ ...tialEvolutionWithDynamicStepSizeRefined.py | 132 ++++++++++ ...tialEvolutionWithSelfAdaptiveParameters.py | 80 ++++++ ...cedAdaptiveDifferentialMemeticAlgorithm.py | 126 ++++++++++ ...aptiveDirectionalBiasQuorumOptimization.py | 76 ++++++ ...cedAdaptiveDiversifiedEvolutionStrategy.py | 76 ++++++ ...versifiedGravitationalSwarmOptimization.py | 99 ++++++++ ...rsifiedGravitationalSwarmOptimizationV2.py | 99 ++++++++ ...rsifiedGravitationalSwarmOptimizationV3.py | 99 ++++++++ ...rsifiedGravitationalSwarmOptimizationV4.py | 99 ++++++++ ...nhancedAdaptiveDiversifiedHarmonySearch.py | 93 +++++++ ...aptiveDiversifiedHarmonySearchOptimizer.py | 115 +++++++++ ...tiveDiversifiedHarmonySearchOptimizerV2.py | 116 +++++++++ ...tiveDiversifiedHarmonySearchOptimizerV3.py | 116 +++++++++ ...tiveDiversifiedHarmonySearchOptimizerV4.py | 116 +++++++++ ...tiveDiversifiedHarmonySearchOptimizerV5.py | 116 +++++++++ ...ancedAdaptiveDiversifiedHarmonySearchV2.py | 93 +++++++ ...ancedAdaptiveDiversifiedHarmonySearchV3.py | 93 +++++++ ...ancedAdaptiveDiversifiedHarmonySearchV4.py | 93 +++++++ ...aptiveDiversifiedMetaHeuristicAlgorithm.py | 62 +++++ ...tiveDiversifiedMetaHeuristicAlgorithmV2.py | 70 ++++++ .../lama/EnhancedAdaptiveDiversifiedSearch.py | 64 +++++ .../EnhancedAdaptiveDolphinPodOptimization.py | 76 ++++++ ...eDualPhaseEvolutionarySwarmOptimization.py | 146 +++++++++++ ...OptimizationWithDynamicParameterControl.py | 150 +++++++++++ .../EnhancedAdaptiveDualPhaseStrategyV2.py | 82 ++++++ .../EnhancedAdaptiveDualPhaseStrategyV5.py | 80 ++++++ .../EnhancedAdaptiveDualStrategyOptimizer.py | 68 +++++ .../lama/EnhancedAdaptiveDynamicDE.py | 96 +++++++ ...cedAdaptiveDynamicDifferentialEvolution.py | 121 +++++++++ ...ncedAdaptiveDynamicDualPhaseStrategyV19.py | 82 ++++++ ...ncedAdaptiveDynamicDualPhaseStrategyV22.py | 83 +++++++ ...nhancedAdaptiveDynamicFireworkAlgorithm.py | 96 +++++++ ...daptiveDynamicFireworkAlgorithmEnhanced.py | 96 +++++++ ...daptiveDynamicFireworkAlgorithmImproved.py | 96 +++++++ ...AdaptiveDynamicFireworkAlgorithmRefined.py | 96 +++++++ ...eDynamicFireworkDifferentialEvolutionV5.py | 62 +++++ ...eDynamicFireworkDifferentialEvolutionV6.py | 65 +++++ ...eDynamicFireworkDifferentialEvolutionV7.py | 65 +++++ .../EnhancedAdaptiveDynamicHarmonySearch.py | 67 +++++ .../EnhancedAdaptiveDynamicHarmonySearchV2.py | 76 ++++++ .../EnhancedAdaptiveDynamicHarmonySearchV3.py | 76 ++++++ ...tiveDynamicMemeticEvolutionaryAlgorithm.py | 98 ++++++++ ...namicMultiStrategyDifferentialEvolution.py | 161 ++++++++++++ ...AdaptiveDynamicQuantumSwarmOptimization.py | 77 ++++++ ...ancedAdaptiveEliteDifferentialEvolution.py | 122 +++++++++ ...nhancedAdaptiveEliteGuidedMutationDE_v2.py | 106 ++++++++ ...EliteMultiStrategyDifferentialEvolution.py | 164 ++++++++++++ ...nhancedAdaptiveEnvironmentalStrategyV24.py | 82 ++++++ ...lutionaryDifferentialPopulationStrategy.py | 84 +++++++ ...daptiveExplorationExploitationAlgorithm.py | 106 ++++++++ .../EnhancedAdaptiveExplorationOptimizer.py | 83 +++++++ .../lama/EnhancedAdaptiveFireworkAlgorithm.py | 96 +++++++ .../EnhancedAdaptiveFireworksAlgorithm.py | 76 ++++++ .../lama/EnhancedAdaptiveGaussianSearch.py | 61 +++++ ...cedAdaptiveGradientBalancedCrossoverPSO.py | 80 ++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 141 +++++++++++ .../EnhancedAdaptiveGranularStrategyV26.py | 74 ++++++ ...aptiveGravitationalSwarmIntelligenceV10.py | 107 ++++++++ ...aptiveGravitationalSwarmIntelligenceV11.py | 112 +++++++++ ...aptiveGravitationalSwarmIntelligenceV12.py | 115 +++++++++ ...aptiveGravitationalSwarmIntelligenceV19.py | 115 +++++++++ ...aptiveGravitationalSwarmIntelligenceV20.py | 119 +++++++++ ...aptiveGravitationalSwarmIntelligenceV21.py | 92 +++++++ ...aptiveGravitationalSwarmIntelligenceV27.py | 91 +++++++ ...aptiveGravitationalSwarmIntelligenceV28.py | 91 +++++++ ...daptiveGravitationalSwarmIntelligenceV3.py | 94 +++++++ ...daptiveGravitationalSwarmIntelligenceV4.py | 94 +++++++ ...daptiveGravitationalSwarmIntelligenceV5.py | 96 +++++++ ...daptiveGravitationalSwarmIntelligenceV6.py | 96 +++++++ ...daptiveGravitationalSwarmIntelligenceV7.py | 96 +++++++ ...daptiveGravitationalSwarmIntelligenceV8.py | 96 +++++++ ...daptiveGravitationalSwarmIntelligenceV9.py | 96 +++++++ ...izationWithDynamicDiversityPreservation.py | 99 ++++++++ ...ncedAdaptiveGuidedDifferentialEvolution.py | 125 ++++++++++ ...EnhancedAdaptiveGuidedMutationOptimizer.py | 86 +++++++ ...ncedAdaptiveHarmonicFireworksTabuSearch.py | 108 ++++++++ ...edAdaptiveHarmonicFireworksTabuSearchV2.py | 111 +++++++++ .../EnhancedAdaptiveHarmonicOptimizationV2.py | 70 ++++++ .../EnhancedAdaptiveHarmonicTabuSearchV10.py | 95 +++++++ .../EnhancedAdaptiveHarmonicTabuSearchV18.py | 111 +++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV21.py | 100 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV22.py | 100 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV23.py | 100 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV25.py | 100 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV26.py | 100 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV27.py | 102 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV29.py | 102 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV30.py | 103 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV31.py | 103 ++++++++ .../EnhancedAdaptiveHarmonicTabuSearchV9.py | 95 +++++++ ...hancedAdaptiveHarmonyFireworksAlgorithm.py | 69 +++++ ...EnhancedAdaptiveHarmonyMemeticAlgorithm.py | 93 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV10.py | 93 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV11.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV12.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV13.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV14.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV16.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV18.py | 93 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV19.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV2.py | 93 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV20.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV21.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV22.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV23.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV24.py | 86 +++++++ ...ancedAdaptiveHarmonyMemeticAlgorithmV25.py | 86 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV3.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV4.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV5.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV6.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV7.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV8.py | 93 +++++++ ...hancedAdaptiveHarmonyMemeticAlgorithmV9.py | 93 +++++++ ...edAdaptiveHarmonyMemeticOptimizationV28.py | 100 ++++++++ ...edAdaptiveHarmonyMemeticOptimizationV29.py | 100 ++++++++ ...cedAdaptiveHarmonyMemeticOptimizationV3.py | 86 +++++++ ...edAdaptiveHarmonyMemeticOptimizationV30.py | 100 ++++++++ ...edAdaptiveHarmonyMemeticOptimizationV31.py | 100 ++++++++ ...edAdaptiveHarmonyMemeticOptimizationV32.py | 108 ++++++++ ...edAdaptiveHarmonyMemeticOptimizationV33.py | 108 ++++++++ ...cedAdaptiveHarmonyMemeticOptimizationV4.py | 95 +++++++ ...cedAdaptiveHarmonyMemeticOptimizationV5.py | 95 +++++++ ...cedAdaptiveHarmonyMemeticOptimizationV6.py | 95 +++++++ ...cedAdaptiveHarmonyMemeticOptimizationV7.py | 95 +++++++ ...cedAdaptiveHarmonyMemeticOptimizationV8.py | 95 +++++++ .../EnhancedAdaptiveHarmonyMemeticSearch.py | 106 ++++++++ .../EnhancedAdaptiveHarmonyMemeticSearchV2.py | 111 +++++++++ ...monySearchImprovedWithLocalOptimization.py | 112 +++++++++ ...onySearchOptimizedWithLocalOptimization.py | 112 +++++++++ .../EnhancedAdaptiveHarmonySearchOptimizer.py | 66 +++++ ...nhancedAdaptiveHarmonySearchOptimizerV2.py | 98 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV10.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV11.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV12.py | 95 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV13.py | 98 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV14.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV15.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV16.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV17.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV18.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV19.py | 106 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV20.py | 99 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV21.py | 99 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV22.py | 99 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV23.py | 101 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV24.py | 101 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV25.py | 101 ++++++++ .../lama/EnhancedAdaptiveHarmonySearchV3.py | 81 ++++++ .../lama/EnhancedAdaptiveHarmonySearchV4.py | 141 +++++++++++ .../lama/EnhancedAdaptiveHarmonySearchV5.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV6.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV7.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV8.py | 88 +++++++ .../lama/EnhancedAdaptiveHarmonySearchV9.py | 88 +++++++ ...SearchWithAdaptiveLevyFlightInspiration.py | 92 +++++++ ...hDiversificationAndLocalOptimizationV10.py | 110 ++++++++ ...thDiversificationAndLocalOptimizationV3.py | 110 ++++++++ ...thDiversificationAndLocalOptimizationV4.py | 110 ++++++++ ...thDiversificationAndLocalOptimizationV5.py | 110 ++++++++ ...thDiversificationAndLocalOptimizationV6.py | 110 ++++++++ ...thDiversificationAndLocalOptimizationV7.py | 108 ++++++++ ...thDiversificationAndLocalOptimizationV8.py | 112 +++++++++ ...thDiversificationAndLocalOptimizationV9.py | 108 ++++++++ ...ySearchWithDynamicLevyFlightImprovement.py | 63 +++++ ...archWithDynamicLevyFlightImprovementV10.py | 63 +++++ ...archWithDynamicLevyFlightImprovementV11.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV2.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV3.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV4.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV5.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV6.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV7.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV8.py | 63 +++++ ...earchWithDynamicLevyFlightImprovementV9.py | 63 +++++ ...ySearchWithEnhancedHybridInspirationV17.py | 100 ++++++++ ...ySearchWithEnhancedHybridInspirationV18.py | 100 ++++++++ ...archWithEnhancedLevyFlightInspirationV6.py | 90 +++++++ ...eHarmonySearchWithEnhancedLevyFlightV12.py | 63 +++++ ...eHarmonySearchWithEnhancedLevyFlightV13.py | 63 +++++ ...edLocalOptimizationAndDiversificationV2.py | 132 ++++++++++ ...edLocalOptimizationAndDiversificationV3.py | 141 +++++++++++ ...veHarmonySearchWithHybridInspirationV16.py | 98 ++++++++ ...tiveHarmonySearchWithImprovedLevyFlight.py | 69 +++++ ...rchWithImprovedLevyFlightInspirationV10.py | 90 +++++++ ...rchWithImprovedLevyFlightInspirationV11.py | 90 +++++++ ...rchWithImprovedLevyFlightInspirationV12.py | 90 +++++++ ...rchWithImprovedLevyFlightInspirationV13.py | 90 +++++++ ...rchWithImprovedLevyFlightInspirationV14.py | 90 +++++++ ...rchWithImprovedLevyFlightInspirationV15.py | 90 +++++++ ...archWithImprovedLevyFlightInspirationV4.py | 90 +++++++ ...archWithImprovedLevyFlightInspirationV7.py | 90 +++++++ ...archWithImprovedLevyFlightInspirationV8.py | 90 +++++++ ...archWithImprovedLevyFlightInspirationV9.py | 90 +++++++ ...ncedAdaptiveHarmonySearchWithLevyFlight.py | 69 +++++ ...armonySearchWithLevyFlightInspirationV2.py | 89 +++++++ ...ptiveHarmonySearchWithLocalOptimization.py | 112 +++++++++ ...thLocalOptimizationAndDiversificationV2.py | 108 ++++++++ ...thLocalOptimizationAndDiversificationV3.py | 108 ++++++++ ...thLocalOptimizationAndDiversificationV4.py | 117 +++++++++ ...thLocalOptimizationAndDiversificationV5.py | 117 +++++++++ ...thLocalOptimizationAndDiversificationV6.py | 117 +++++++++ ...iveHarmonySearchWithLocalOptimizationV3.py | 106 ++++++++ ...iveHarmonySearchWithLocalOptimizationV4.py | 110 ++++++++ ...iveHarmonySearchWithLocalOptimizationV5.py | 110 ++++++++ ...iveHarmonySearchWithLocalOptimizationV6.py | 110 ++++++++ ...iveHarmonySearchWithLocalOptimizationV7.py | 110 ++++++++ ...iveHarmonySearchWithLocalOptimizationV8.py | 110 ++++++++ ...ptiveHarmonySearchWithRefinedLevyFlight.py | 69 +++++ ...ySearchWithRefinedLevyFlightInspiration.py | 94 +++++++ ...tiveHarmonySearchWithSimulatedAnnealing.py | 98 ++++++++ ...veHarmonySearchWithSimulatedAnnealingV2.py | 98 ++++++++ ...veHarmonySearchWithSimulatedAnnealingV3.py | 96 +++++++ ...veHarmonySearchWithSimulatedAnnealingV4.py | 96 +++++++ ...veHarmonySearchWithSimulatedAnnealingV5.py | 96 +++++++ ...veHarmonySearchWithSimulatedAnnealingV6.py | 106 ++++++++ ...EnhancedAdaptiveHarmonyTabuOptimization.py | 76 ++++++ .../EnhancedAdaptiveHarmonyTabuSearchV2.py | 94 +++++++ .../EnhancedAdaptiveHarmonyTabuSearchV3.py | 93 +++++++ .../EnhancedAdaptiveHarmonyTabuSearchV4.py | 93 +++++++ .../EnhancedAdaptiveHarmonyTabuSearchV5.py | 93 +++++++ ...ybridGradientAnnealingWithDynamicMemory.py | 135 ++++++++++ .../EnhancedAdaptiveHybridHarmonySearchV22.py | 83 +++++++ .../EnhancedAdaptiveHybridHarmonySearchV23.py | 91 +++++++ .../EnhancedAdaptiveHybridHarmonySearchV24.py | 91 +++++++ .../EnhancedAdaptiveHybridHarmonySearchV25.py | 91 +++++++ .../EnhancedAdaptiveHybridHarmonySearchV26.py | 91 +++++++ .../EnhancedAdaptiveHybridHarmonySearchV27.py | 91 +++++++ .../EnhancedAdaptiveHybridMetaOptimizer.py | 122 +++++++++ .../lama/EnhancedAdaptiveHybridOptimizer.py | 148 +++++++++++ ...ybridParticleSwarmDifferentialEvolution.py | 136 ++++++++++ ...dParticleSwarmDifferentialEvolutionPlus.py | 125 ++++++++++ .../EnhancedAdaptiveInertiaHybridOptimizer.py | 66 +++++ ...veLevyDiversifiedMetaHeuristicAlgorithm.py | 68 +++++ ...LevyDiversifiedMetaHeuristicAlgorithmV2.py | 72 ++++++ ...LevyDiversifiedMetaHeuristicAlgorithmV3.py | 74 ++++++ ...LevyDiversifiedMetaHeuristicAlgorithmV4.py | 78 ++++++ .../lama/EnhancedAdaptiveLevyHarmonySearch.py | 78 ++++++ .../EnhancedAdaptiveLevyHarmonySearchV2.py | 78 ++++++ .../EnhancedAdaptiveLevyHarmonySearchV3.py | 78 ++++++ ...iveLocalSearchQuantumSimulatedAnnealing.py | 60 +++++ ...eLocalSearchQuantumSimulatedAnnealingV2.py | 67 +++++ ...eLocalSearchQuantumSimulatedAnnealingV3.py | 67 +++++ ...eLocalSearchQuantumSimulatedAnnealingV4.py | 67 +++++ ...eLocalSearchQuantumSimulatedAnnealingV5.py | 69 +++++ ...cedAdaptiveMemeticDifferentialEvolution.py | 115 +++++++++ ...EnhancedAdaptiveMemeticDiverseOptimizer.py | 157 ++++++++++++ ...hancedAdaptiveMemeticDiverseOptimizerV2.py | 164 ++++++++++++ ...hancedAdaptiveMemeticDiverseOptimizerV3.py | 186 ++++++++++++++ ...dAdaptiveMemeticEvolutionaryAlgorithmV2.py | 98 ++++++++ ...ancedAdaptiveMemeticHarmonyOptimization.py | 85 +++++++ ...cedAdaptiveMemeticHarmonyOptimizationV2.py | 85 +++++++ ...cedAdaptiveMemeticHarmonyOptimizationV3.py | 85 +++++++ ...cedAdaptiveMemeticHarmonyOptimizationV4.py | 85 +++++++ ...cedAdaptiveMemeticHarmonyOptimizationV6.py | 86 +++++++ .../EnhancedAdaptiveMemeticHybridOptimizer.py | 156 ++++++++++++ .../EnhancedAdaptiveMemeticOptimizerV7.py | 144 +++++++++++ ...nhancedAdaptiveMemoryControlStrategyV49.py | 90 +++++++ ...ancedAdaptiveMemoryDualPhaseStrategyV46.py | 95 +++++++ ...ryGradientAnnealingWithExplorationBoost.py | 141 +++++++++++ .../EnhancedAdaptiveMemoryHybridAnnealing.py | 74 ++++++ .../lama/EnhancedAdaptiveMemoryHybridDEPSO.py | 174 +++++++++++++ .../lama/EnhancedAdaptiveMemoryStrategyV54.py | 70 ++++++ .../lama/EnhancedAdaptiveMemoryStrategyV79.py | 67 +++++ .../lama/EnhancedAdaptiveMetaNetAQAPSO.py | 123 +++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv12.py | 130 ++++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv14.py | 130 ++++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv15.py | 130 ++++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv16.py | 130 ++++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv2.py | 123 +++++++++ .../lama/EnhancedAdaptiveMetaNetAQAPSOv3.py | 123 +++++++++ .../lama/EnhancedAdaptiveMetaNetPSO.py | 134 ++++++++++ .../lama/EnhancedAdaptiveMetaNetPSO_v2.py | 134 ++++++++++ .../lama/EnhancedAdaptiveMetaNetPSO_v3.py | 134 ++++++++++ ...edAdaptiveMultiMemorySimulatedAnnealing.py | 141 +++++++++++ .../EnhancedAdaptiveMultiOperatorSearch.py | 141 +++++++++++ .../EnhancedAdaptiveMultiPhaseAnnealing.py | 103 ++++++++ ...AdaptiveMultiPhaseAnnealingWithGradient.py | 104 ++++++++ ...iveMultiPopulationDifferentialEvolution.py | 175 +++++++++++++ ...EnhancedAdaptiveMultiStrategicOptimizer.py | 85 +++++++ .../lama/EnhancedAdaptiveMultiStrategyDE.py | 128 ++++++++++ ...ptiveMultiStrategyDifferentialEvolution.py | 135 ++++++++++ .../EnhancedAdaptiveMultiStrategyOptimizer.py | 172 +++++++++++++ ...NicheDifferentialParticleSwarmOptimizer.py | 163 ++++++++++++ ...iveOppositionBasedDifferentialEvolution.py | 102 ++++++++ ...OppositionBasedDifferentialEvolution_v2.py | 92 +++++++ ...nBasedHarmonySearchDynamicBandwidthSADE.py | 118 +++++++++ ...AdaptiveOrthogonalDifferentialEvolution.py | 63 +++++ ...ustDifferentialEvolutionWithEliteSearch.py | 157 ++++++++++++ ...edAdaptivePrecisionCohortOptimizationV5.py | 67 +++++ .../EnhancedAdaptivePrecisionFocalStrategy.py | 96 +++++++ .../optimization/lama/EnhancedAdaptiveQGSA.py | 65 +++++ .../lama/EnhancedAdaptiveQGSA_v10.py | 75 ++++++ .../lama/EnhancedAdaptiveQGSA_v11.py | 75 ++++++ .../lama/EnhancedAdaptiveQGSA_v12.py | 73 ++++++ .../lama/EnhancedAdaptiveQGSA_v13.py | 73 ++++++ .../lama/EnhancedAdaptiveQGSA_v14.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v15.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v16.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v17.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v18.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v19.py | 79 ++++++ .../lama/EnhancedAdaptiveQGSA_v2.py | 66 +++++ .../lama/EnhancedAdaptiveQGSA_v20.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v21.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v22.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v23.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v24.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v25.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v26.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v27.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v28.py | 78 ++++++ .../lama/EnhancedAdaptiveQGSA_v29.py | 86 +++++++ .../lama/EnhancedAdaptiveQGSA_v3.py | 66 +++++ .../lama/EnhancedAdaptiveQGSA_v30.py | 86 +++++++ .../lama/EnhancedAdaptiveQGSA_v31.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v32.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v33.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v34.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v35.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v36.py | 97 ++++++++ .../lama/EnhancedAdaptiveQGSA_v38.py | 86 +++++++ .../lama/EnhancedAdaptiveQGSA_v39.py | 87 +++++++ .../lama/EnhancedAdaptiveQGSA_v4.py | 72 ++++++ .../lama/EnhancedAdaptiveQGSA_v40.py | 94 +++++++ .../lama/EnhancedAdaptiveQGSA_v41.py | 80 ++++++ .../lama/EnhancedAdaptiveQGSA_v42.py | 86 +++++++ .../lama/EnhancedAdaptiveQGSA_v43.py | 88 +++++++ .../lama/EnhancedAdaptiveQGSA_v44.py | 93 +++++++ .../lama/EnhancedAdaptiveQGSA_v47.py | 91 +++++++ .../lama/EnhancedAdaptiveQGSA_v5.py | 74 ++++++ .../lama/EnhancedAdaptiveQGSA_v6.py | 74 ++++++ .../lama/EnhancedAdaptiveQGSA_v8.py | 76 ++++++ .../lama/EnhancedAdaptiveQGSA_v9.py | 75 ++++++ ...tiveQuantumDEWithDynamicElitistLearning.py | 199 +++++++++++++++ ...cedAdaptiveQuantumDifferentialEvolution.py | 82 ++++++ ...entialEvolutionWithMemoryAndLocalSearch.py | 167 +++++++++++++ ...dAdaptiveQuantumDynamicLevyOptimization.py | 160 ++++++++++++ ...AdaptiveQuantumGradientMemeticOptimizer.py | 129 ++++++++++ ...nhancedAdaptiveQuantumHarmonySearchDBGB.py | 62 +++++ ...edAdaptiveQuantumHarmonySearchDBGBFinal.py | 67 +++++ ...AdaptiveQuantumHarmonySearchDBGBFinalII.py | 74 ++++++ ...daptiveQuantumHarmonySearchDBGBFinalIII.py | 74 ++++++ ...daptiveQuantumHarmonySearchDBGBImproved.py | 65 +++++ ...hancedAdaptiveQuantumHarmonySearchFinal.py | 74 ++++++ ...cedAdaptiveQuantumHarmonySearchImproved.py | 74 ++++++ ...tiveQuantumHarmonySearchImprovedRefined.py | 74 ++++++ ...ncedAdaptiveQuantumLevyMemeticOptimizer.py | 140 +++++++++++ ...cedAdaptiveQuantumLevySwarmOptimization.py | 157 ++++++++++++ .../EnhancedAdaptiveQuantumLocalSearch.py | 75 ++++++ ...hancedAdaptiveQuantumMemeticOptimizerV4.py | 129 ++++++++++ .../lama/EnhancedAdaptiveQuantumPSO.py | 129 ++++++++++ .../lama/EnhancedAdaptiveQuantumPSOv2.py | 133 ++++++++++ ...daptiveQuantumParticleSwarmOptimization.py | 70 ++++++ ...hancedAdaptiveQuantumSimulatedAnnealing.py | 75 ++++++ ...ptiveQuantumSimulatedAnnealingOptimized.py | 52 ++++ ...nhancedAdaptiveQuantumSwarmOptimization.py | 80 ++++++ ...ncedAdaptiveQuantumSwarmOptimizationV10.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV11.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV12.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV13.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV14.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV15.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV16.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV17.py | 80 ++++++ ...ncedAdaptiveQuantumSwarmOptimizationV18.py | 82 ++++++ ...ncedAdaptiveQuantumSwarmOptimizationV19.py | 88 +++++++ ...ancedAdaptiveQuantumSwarmOptimizationV2.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV20.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV21.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV22.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV23.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV24.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV25.py | 88 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV26.py | 83 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV27.py | 83 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV28.py | 83 +++++++ ...ncedAdaptiveQuantumSwarmOptimizationV29.py | 83 +++++++ ...ancedAdaptiveQuantumSwarmOptimizationV3.py | 124 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV30.py | 111 +++++++++ ...ncedAdaptiveQuantumSwarmOptimizationV31.py | 111 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV4.py | 124 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV5.py | 124 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV6.py | 124 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV7.py | 124 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV8.py | 124 +++++++++ ...ancedAdaptiveQuantumSwarmOptimizationV9.py | 124 +++++++++ ...ncedAdaptiveSinusoidalDifferentialSwarm.py | 54 ++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V12.py | 98 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V13.py | 98 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V15.py | 98 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V16.py | 103 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V17.py | 108 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V18.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V19.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V20.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V21.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V22.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V23.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V24.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V25.py | 107 ++++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V26.py | 107 ++++++++ ...hancedAdaptiveSwarmHarmonicOptimization.py | 70 ++++++ .../lama/EnhancedAdaptiveTabuHarmonySearch.py | 76 ++++++ .../EnhancedAdaptiveTabuHarmonySearchV2.py | 76 ++++++ ...hancedAdvancedAdaptiveFireworkAlgorithm.py | 98 ++++++++ ...cedDifferentialEvolutionLocalSearch_v56.py | 110 ++++++++ ...edAdvancedHybridDifferentialEvolutionV4.py | 199 +++++++++++++++ ...AdvancedHybridMetaHeuristicOptimizerV17.py | 110 ++++++++ ...AdvancedHybridMetaHeuristicOptimizerV18.py | 110 ++++++++ ...AdvancedHybridMetaHeuristicOptimizerV19.py | 110 ++++++++ ...perParameterTunedMetaHeuristicOptimizer.py | 104 ++++++++ ...ancedAdvancedQuantumSwarmOptimizationV1.py | 89 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV10.py | 89 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV11.py | 89 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV12.py | 89 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV13.py | 89 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV14.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV2.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV3.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV4.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV5.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV6.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV7.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV8.py | 89 +++++++ ...ancedAdvancedQuantumSwarmOptimizationV9.py | 89 +++++++ ...vancedRefinedUltimateGuidedMassQGSA_v78.py | 124 +++++++++ ...ancedAdvancedUltimateGuidedMassQGSA_v79.py | 123 +++++++++ .../optimization/lama/EnhancedArchiveDE.py | 150 +++++++++++ .../EnhancedBalancedDualStrategyAdaptiveDE.py | 135 ++++++++++ nevergrad/optimization/lama/EnhancedCMAES.py | 44 ++++ .../optimization/lama/EnhancedCMAESv2.py | 55 ++++ .../EnhancedChaoticFireworksOptimization.py | 84 +++++++ .../EnhancedClusterDifferentialCrossover.py | 153 ++++++++++++ .../EnhancedClusteredDifferentialEvolution.py | 136 ++++++++++ ...ancedConvergenceAcceleratedSpiralSearch.py | 84 +++++++ ...EnhancedConvergentDifferentialEvolution.py | 103 ++++++++ ...hancedConvergentDifferentialEvolutionV2.py | 103 ++++++++ ...hancedConvergentDifferentialEvolutionV3.py | 103 ++++++++ ...hancedConvergentDifferentialEvolutionV4.py | 103 ++++++++ ...edCooperativeCulturalDifferentialSearch.py | 127 ++++++++++ ...EnhancedCosineAdaptiveDifferentialSwarm.py | 54 ++++ ...hancedCosineAdaptiveDifferentialSwarmV2.py | 58 +++++ .../EnhancedCovarianceGradientSearchV2.py | 189 ++++++++++++++ .../EnhancedCovarianceMatrixAdaptation.py | 83 +++++++ .../lama/EnhancedCovarianceMatrixEvolution.py | 116 +++++++++ .../EnhancedCovarianceMatrixEvolutionV2.py | 135 ++++++++++ .../EnhancedCrossoverElitistStrategyV9.py | 78 ++++++ .../EnhancedCrowdingMemoryHybridOptimizer.py | 196 +++++++++++++++ ...edCulturalAdaptiveDifferentialEvolution.py | 125 ++++++++++ .../EnhancedCulturalEvolutionaryOptimizer.py | 117 +++++++++ ...cedCulturalMemeticDifferentialEvolution.py | 130 ++++++++++ .../lama/EnhancedDifferentialEvolution.py | 56 +++++ ...nhancedDifferentialEvolutionAdaptivePSO.py | 95 +++++++ ...edDifferentialEvolutionAdaptiveStrategy.py | 68 +++++ ...dDifferentialEvolutionFireworkAlgorithm.py | 55 ++++ ...edDifferentialEvolutionLSRefinement_v15.py | 85 +++++++ ...edDifferentialEvolutionLSRefinement_v16.py | 85 +++++++ ...edDifferentialEvolutionLSRefinement_v17.py | 85 +++++++ ...edDifferentialEvolutionLSRefinement_v18.py | 85 +++++++ ...edDifferentialEvolutionLSRefinement_v19.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v21.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v22.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v23.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v24.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v25.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v26.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v27.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v28.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v29.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v30.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v31.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v32.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v33.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v34.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v35.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v36.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v37.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v38.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v39.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v40.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v41.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v43.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v44.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v45.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v46.py | 85 +++++++ ...cedDifferentialEvolutionLocalSearch_v47.py | 87 +++++++ ...cedDifferentialEvolutionLocalSearch_v48.py | 94 +++++++ ...cedDifferentialEvolutionLocalSearch_v49.py | 94 +++++++ ...cedDifferentialEvolutionLocalSearch_v50.py | 94 +++++++ ...cedDifferentialEvolutionLocalSearch_v51.py | 94 +++++++ ...cedDifferentialEvolutionLocalSearch_v52.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v53.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v59.py | 110 ++++++++ ...cedDifferentialEvolutionLocalSearch_v60.py | 110 ++++++++ ...cedDifferentialEvolutionLocalSearch_v62.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v63.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v64.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v66.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v67.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v68.py | 109 ++++++++ ...cedDifferentialEvolutionLocalSearch_v69.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v70.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v71.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v72.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v73.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v74.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v75.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v76.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v77.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v78.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v79.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v80.py | 104 ++++++++ .../EnhancedDifferentialEvolutionOptimizer.py | 78 ++++++ ...erentialEvolutionParticleSwarmOptimizer.py | 79 ++++++ ...entialEvolutionParticleSwarmOptimizerV2.py | 79 ++++++ ...entialEvolutionParticleSwarmOptimizerV3.py | 79 ++++++ ...entialEvolutionParticleSwarmOptimizerV4.py | 93 +++++++ ...ialEvolutionWithAdaptiveMutationControl.py | 89 +++++++ .../EnhancedDifferentialFireworkAlgorithm.py | 72 ++++++ ...nhancedDifferentialFireworkAlgorithm_v2.py | 82 ++++++ ...DifferentialSimulatedAnnealingOptimizer.py | 53 ++++ ...EnhancedDifferentiatedAdaptiveEvolution.py | 86 +++++++ .../EnhancedDimensionalFeedbackEvolverV3.py | 92 +++++++ .../EnhancedDiverseMemoryHybridOptimizer.py | 194 +++++++++++++++ ...nhancedDiversifiedAdaptiveHarmonySearch.py | 103 ++++++++ ...ncedDiversifiedCuckooFireworksAlgorithm.py | 78 ++++++ ...edDiversifiedCuckooFireworksAlgorithmV2.py | 79 ++++++ ...versifiedGravitationalSwarmOptimization.py | 99 ++++++++ ...rsifiedGravitationalSwarmOptimizationV2.py | 91 +++++++ ...rsifiedGravitationalSwarmOptimizationV3.py | 91 +++++++ ...rsifiedGravitationalSwarmOptimizationV4.py | 91 +++++++ ...rsifiedGravitationalSwarmOptimizationV5.py | 91 +++++++ ...rsifiedGravitationalSwarmOptimizationV6.py | 91 +++++++ ...rsifiedGravitationalSwarmOptimizationV7.py | 91 +++++++ ...ncedDiversifiedHarmonicHarmonyOptimizer.py | 70 ++++++ ...dDiversifiedHarmonicHarmonyOptimizer_V2.py | 84 +++++++ ...dDiversifiedHarmonicHarmonyOptimizer_V3.py | 80 ++++++ .../EnhancedDiversifiedHarmonyAlgorithm.py | 70 ++++++ ...cedDiversifiedHarmonyFireworksAlgorithm.py | 74 ++++++ ...dDiversifiedHarmonyFireworksAlgorithmV2.py | 74 ++++++ ...dDiversifiedHarmonyFireworksAlgorithmV3.py | 74 ++++++ ...hancedDiversifiedHarmonySearchOptimizer.py | 105 ++++++++ ...ncedDiversifiedMetaHeuristicAlgorithmV3.py | 85 +++++++ ...ncedDiversifiedMetaHeuristicAlgorithmV4.py | 85 +++++++ ...seAdaptiveEvolutionarySwarmOptimization.py | 146 +++++++++++ ...edDualPhaseAdaptiveHybridOptimizationV3.py | 146 +++++++++++ ...ancedDualPhaseAdaptiveHybridOptimizerV3.py | 160 ++++++++++++ ...aseAdaptiveMemeticDifferentialEvolution.py | 170 +++++++++++++ .../EnhancedDualPhaseDifferentialEvolution.py | 159 ++++++++++++ .../EnhancedDualPhaseHybridOptimization.py | 145 +++++++++++ .../EnhancedDualPhaseHybridOptimizationV2.py | 145 +++++++++++ .../lama/EnhancedDualStrategyAdaptiveDE_v2.py | 125 ++++++++++ .../EnhancedDualStrategyHybridOptimizer.py | 154 ++++++++++++ ...EnhancedDynamicAdaptiveClimbingStrategy.py | 86 +++++++ .../lama/EnhancedDynamicAdaptiveDE.py | 162 ++++++++++++ ...cedDynamicAdaptiveDifferentialEvolution.py | 84 +++++++ ...ptiveDifferentialEvolutionHyperMutation.py | 99 ++++++++ ...micAdaptiveDifferentialEvolutionRefined.py | 88 +++++++ ...dDynamicAdaptiveDifferentialEvolutionV2.py | 87 +++++++ ...dDynamicAdaptiveExplorationOptimization.py | 163 ++++++++++++ ...nhancedDynamicAdaptiveFireworkAlgorithm.py | 99 ++++++++ ...cAdaptiveGravitationalSwarmIntelligence.py | 96 +++++++ ...daptiveGravitationalSwarmIntelligenceV2.py | 99 ++++++++ ...edDynamicAdaptiveHarmonySearchOptimizer.py | 71 ++++++ ...DynamicAdaptiveHarmonySearchOptimizerV2.py | 71 ++++++ ...DynamicAdaptiveHarmonySearchOptimizerV3.py | 71 ++++++ ...DynamicAdaptiveHarmonySearchOptimizerV4.py | 71 ++++++ ...DynamicAdaptiveHarmonySearchOptimizerV5.py | 71 ++++++ ...DynamicAdaptiveHarmonySearchOptimizerV6.py | 71 ++++++ .../EnhancedDynamicAdaptiveHybridDEPSO.py | 151 +++++++++++ ...hancedDynamicAdaptiveHybridOptimization.py | 141 +++++++++++ .../EnhancedDynamicAdaptiveHybridOptimizer.py | 58 +++++ .../EnhancedDynamicAdaptiveMemoryAnnealing.py | 135 ++++++++++ ...nhancedDynamicAdaptiveMemoryStrategyV59.py | 91 +++++++ .../EnhancedDynamicAdaptiveOptimizerV8.py | 61 +++++ ...AdaptivePopulationDifferentialEvolution.py | 184 ++++++++++++++ .../lama/EnhancedDynamicAdaptiveQuantumPSO.py | 133 ++++++++++ .../lama/EnhancedDynamicBalancingPSO.py | 79 ++++++ .../EnhancedDynamicClusterOptimization.py | 151 +++++++++++ .../lama/EnhancedDynamicClusterSearch.py | 164 ++++++++++++ .../lama/EnhancedDynamicCohortOptimization.py | 75 ++++++ .../lama/EnhancedDynamicCrossoverRAMEDS.py | 70 ++++++ .../EnhancedDynamicCuckooHarmonyAlgorithm.py | 67 +++++ .../EnhancedDynamicDifferentialEvolution.py | 136 ++++++++++ ...cedDynamicDifferentialEvolutionImproved.py | 116 +++++++++ ...ncedDynamicDifferentialEvolutionRefined.py | 121 +++++++++ .../EnhancedDynamicDifferentialEvolutionV2.py | 116 +++++++++ .../EnhancedDynamicDifferentialEvolutionV3.py | 116 +++++++++ ...ferentialEvolutionWithAdaptiveCrossover.py | 121 +++++++++ ...olutionWithAdaptiveCrossoverAndMutation.py | 132 ++++++++++ ...alEvolutionWithAdaptiveCrossoverRefined.py | 121 +++++++++ ...nWithSelfAdaptiveParametersAndCrossover.py | 83 +++++++ ...ynamicDiversifiedHarmonySearchOptimizer.py | 82 ++++++ .../EnhancedDynamicDualPhaseStrategyV12.py | 88 +++++++ .../lama/EnhancedDynamicEliteAnnealingDE.py | 165 ++++++++++++ .../lama/EnhancedDynamicEscapeStrategyV32.py | 82 ++++++ .../lama/EnhancedDynamicEvolutionStrategy.py | 76 ++++++ .../EnhancedDynamicExplorationOptimizer.py | 166 +++++++++++++ .../lama/EnhancedDynamicFireworkAlgorithm.py | 96 +++++++ .../EnhancedDynamicFireworkAlgorithmFinal.py | 95 +++++++ ...nhancedDynamicFireworkAlgorithmImproved.py | 98 ++++++++ ...ancedDynamicFireworkAlgorithmRedesigned.py | 96 +++++++ ...EnhancedDynamicFireworkAlgorithmRefined.py | 98 ++++++++ .../EnhancedDynamicFireworkAlgorithmV2.py | 91 +++++++ ...ireworkAlgorithmWithAdaptiveLocalSearch.py | 96 +++++++ ...thAdaptiveLocalSearchAndDynamicMutation.py | 115 +++++++++ ...daptiveLocalSearchAndDynamicMutationV10.py | 115 +++++++++ ...daptiveLocalSearchAndDynamicMutationV11.py | 115 +++++++++ ...daptiveLocalSearchAndDynamicMutationV12.py | 117 +++++++++ ...daptiveLocalSearchAndDynamicMutationV13.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV2.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV3.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV4.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV5.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV6.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV7.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV8.py | 115 +++++++++ ...AdaptiveLocalSearchAndDynamicMutationV9.py | 115 +++++++++ ...ithmWithAdaptiveLocalSearchOptimization.py | 99 ++++++++ ...eworkAlgorithmWithBeeColonyOptimization.py | 118 +++++++++ ...thBetterAdaptiveLocalSearchOptimization.py | 99 ++++++++ ...ynamicFireworkAlgorithmWithHybridSearch.py | 118 +++++++++ ...orkAlgorithmWithLocalSearchOptimization.py | 99 ++++++++ ...cedDynamicFireworkDifferentialEvolution.py | 65 +++++ ...dDynamicFireworkDifferentialEvolutionV2.py | 65 +++++ ...dDynamicFireworkDifferentialEvolutionV3.py | 65 +++++ ...GradientBoostedMemorySimulatedAnnealing.py | 156 ++++++++++++ ...ientBoostedMemorySimulatedAnnealingPlus.py | 140 +++++++++++ .../lama/EnhancedDynamicHarmonyAlgorithm.py | 84 +++++++ .../lama/EnhancedDynamicHarmonyAlgorithmV2.py | 87 +++++++ .../EnhancedDynamicHarmonyFireworksSearch.py | 72 ++++++ .../EnhancedDynamicHarmonySearchOptimizer.py | 73 ++++++ ...EnhancedDynamicHarmonySearchOptimizerV7.py | 71 ++++++ .../lama/EnhancedDynamicHarmonySearchV5.py | 76 ++++++ .../lama/EnhancedDynamicHarmonySearchV6.py | 76 ++++++ .../lama/EnhancedDynamicHarmonySearchV7.py | 76 ++++++ .../lama/EnhancedDynamicHarmonySearchV8.py | 76 ++++++ .../lama/EnhancedDynamicHarmonyTabuSearch.py | 82 ++++++ ...hancedDynamicHybridDEPSOWithEliteMemory.py | 166 +++++++++++++ ...ridHarmonySearchWithAdaptiveMutationV21.py | 80 ++++++ .../lama/EnhancedDynamicHybridOptimization.py | 163 ++++++++++++ .../lama/EnhancedDynamicHybridOptimizer.py | 171 +++++++++++++ .../lama/EnhancedDynamicLevyHarmonySearch.py | 70 ++++++ .../EnhancedDynamicLevyHarmonySearchV2.py | 67 +++++ .../EnhancedDynamicLevyHarmonySearchV3.py | 67 +++++ ...ncedDynamicLocalSearchFireworkAlgorithm.py | 96 +++++++ ...edDynamicLocalSearchFireworkAlgorithmV2.py | 96 +++++++ ...edDynamicLocalSearchFireworkAlgorithmV3.py | 96 +++++++ .../lama/EnhancedDynamicMemoryStrategyV51.py | 91 +++++++ .../EnhancedDynamicMultiPhaseAnnealingPlus.py | 125 ++++++++++ .../lama/EnhancedDynamicMutationSearch.py | 79 ++++++ .../lama/EnhancedDynamicNichePSO_DE_LS.py | 154 ++++++++++++ .../lama/EnhancedDynamicNichingDEPSO.py | 137 ++++++++++ ...hancedDynamicPrecisionBalancedEvolution.py | 83 +++++++ .../lama/EnhancedDynamicPrecisionOptimizer.py | 60 +++++ ...ncedDynamicQuantumDifferentialEvolution.py | 177 +++++++++++++ ...tionWithAdaptiveRestartAndDiverseMemory.py | 167 +++++++++++++ ...lutionWithLocalSearchAndAdaptiveRestart.py | 145 +++++++++++ ...EnhancedDynamicQuantumSwarmOptimization.py | 100 ++++++++ ...cedDynamicQuantumSwarmOptimizationFinal.py | 86 +++++++ ...DynamicQuantumSwarmOptimizationImproved.py | 83 +++++++ ...ancedDynamicQuantumSwarmOptimizationV10.py | 104 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV11.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV12.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV13.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV14.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV15.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV16.py | 103 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV17.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV18.py | 107 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV19.py | 106 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV2.py | 101 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV20.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV21.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV22.py | 106 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV23.py | 95 +++++++ ...ancedDynamicQuantumSwarmOptimizationV24.py | 100 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV25.py | 100 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV26.py | 100 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV27.py | 100 ++++++++ ...ancedDynamicQuantumSwarmOptimizationV28.py | 100 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV3.py | 107 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV4.py | 103 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV5.py | 108 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV6.py | 108 ++++++++ ...hancedDynamicQuantumSwarmOptimizationV7.py | 76 ++++++ ...hancedDynamicQuantumSwarmOptimizationV8.py | 76 ++++++ ...hancedDynamicQuantumSwarmOptimizationV9.py | 108 ++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 180 ++++++++++++++ ...efinementGradientBoostedMemoryAnnealing.py | 150 +++++++++++ .../lama/EnhancedDynamicRestartAdaptiveDE.py | 150 +++++++++++ .../lama/EnhancedDynamicStrategyAdaptiveDE.py | 169 +++++++++++++ ...cedDynamicallyAdaptiveFireworkAlgorithm.py | 108 ++++++++ ...icallyAdaptiveFireworkAlgorithmImproved.py | 108 ++++++++ .../lama/EnhancedEliteAdaptiveHybridDEPSO.py | 152 +++++++++++ ...ancedEliteAdaptiveMemoryHybridOptimizer.py | 206 +++++++++++++++ ...cedEliteAdaptiveMemoryHybridOptimizerV2.py | 206 +++++++++++++++ ...cedEliteAdaptiveMemoryHybridOptimizerV6.py | 206 +++++++++++++++ ...cedEliteAdaptiveMemoryHybridOptimizerV7.py | 174 +++++++++++++ ...cedEliteCrowdingMemoryHybridOptimizerV3.py | 197 +++++++++++++++ .../lama/EnhancedEliteGuidedAdaptiveDE.py | 98 ++++++++ .../EnhancedEliteGuidedAdaptiveRestartDE.py | 115 +++++++++ .../lama/EnhancedEliteGuidedDualMutationDE.py | 118 +++++++++ .../lama/EnhancedEliteGuidedMassQGSA_v81.py | 125 ++++++++++ .../lama/EnhancedEliteGuidedMassQGSA_v82.py | 125 ++++++++++ .../lama/EnhancedEliteGuidedMassQGSA_v83.py | 125 ++++++++++ .../lama/EnhancedEliteGuidedMassQGSA_v85.py | 125 ++++++++++ .../lama/EnhancedEliteGuidedMassQGSA_v86.py | 125 ++++++++++ .../lama/EnhancedEliteGuidedMutationDE_v2.py | 125 ++++++++++ .../lama/EnhancedEliteHybridOptimizer.py | 163 ++++++++++++ ...eQuantumAdaptiveExplorationOptimization.py | 235 ++++++++++++++++++ ...edEnhancedAdaptiveHarmonicTabuSearchV24.py | 100 ++++++++ ...thLocalOptimizationAndDiversificationV7.py | 117 +++++++++ ...thLocalOptimizationAndDiversificationV8.py | 117 +++++++++ ...iveOppositionBasedDifferentialEvolution.py | 97 ++++++++ ...cedDifferentialEvolutionLocalSearch_v57.py | 110 ++++++++ ...cAdaptiveGravitationalSwarmIntelligence.py | 96 +++++++ ...EnhancedDynamicQuantumSwarmOptimization.py | 83 +++++++ ...olutionaryDifferentialSwarmOptimizerV10.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV6.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV7.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV8.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV9.py | 110 ++++++++ ...hancedEnhancedFireworkSwarmOptimization.py | 72 ++++++ ...cedEnhancedFireworkSwarmOptimization_v2.py | 72 ++++++ ...cedEnhancedFireworkSwarmOptimization_v3.py | 72 ++++++ ...cedEnhancedFireworkSwarmOptimization_v4.py | 76 ++++++ .../EnhancedEnhancedGuidedMassQGSA_v63.py | 105 ++++++++ .../EnhancedEnhancedGuidedMassQGSA_v64.py | 116 +++++++++ .../EnhancedEnhancedGuidedMassQGSA_v68.py | 105 ++++++++ ...thImprovedAdaptiveLevyFlightInspiration.py | 100 ++++++++ ...cedEnhancedHybridMetaHeuristicOptimizer.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV10.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV11.py | 92 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV12.py | 92 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV13.py | 92 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV14.py | 92 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV2.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV3.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV4.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV5.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV6.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV7.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV8.py | 93 +++++++ ...dEnhancedHybridMetaHeuristicOptimizerV9.py | 93 +++++++ ...nhancedEnhancedMetaHeuristicOptimizerV3.py | 93 +++++++ ...EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py | 93 +++++++ ...imateRefinedAQAPSO_LS_DIW_AP_Refined_V4.py | 85 +++++++ ...volutionaryDifferentialSwarmOptimizerV1.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV12.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV13.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV14.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV15.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV16.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV17.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV18.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV19.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV2.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV20.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV21.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV22.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV23.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV24.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV25.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV26.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV27.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV28.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV29.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV3.py | 110 ++++++++ ...olutionaryDifferentialSwarmOptimizerV30.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV4.py | 110 ++++++++ ...volutionaryDifferentialSwarmOptimizerV5.py | 110 ++++++++ .../EnhancedEvolutionaryFireworksSearch.py | 76 ++++++ .../EnhancedEvolutionaryFireworksSearch_v2.py | 77 ++++++ .../EnhancedEvolutionaryFireworksSearch_v3.py | 82 ++++++ .../EnhancedEvolutionaryFireworksSearch_v4.py | 81 ++++++ .../EnhancedEvolutionaryFireworksSearch_v5.py | 74 ++++++ .../EnhancedEvolutionaryFireworksSearch_v6.py | 78 ++++++ .../EnhancedEvolutionaryGradientSearch.py | 87 +++++++ ...ancedEvolutionaryParticleSwarmOptimizer.py | 70 ++++++ ...cedEvolutionaryParticleSwarmOptimizerV2.py | 70 ++++++ ...cedEvolutionaryParticleSwarmOptimizerV3.py | 70 ++++++ .../lama/EnhancedEvolutionaryStrategy.py | 46 ++++ ...plorationGravitationalSwarmOptimization.py | 77 ++++++ ...orationGravitationalSwarmOptimizationV2.py | 79 ++++++ ...orationGravitationalSwarmOptimizationV3.py | 79 ++++++ ...orationGravitationalSwarmOptimizationV4.py | 77 ++++++ ...orationGravitationalSwarmOptimizationV5.py | 77 ++++++ ...hancedExplorativeHarmonicSwarmOptimizer.py | 61 +++++ .../lama/EnhancedFireworkAlgorithm.py | 68 +++++ .../EnhancedFireworkAlgorithmOptimization.py | 71 ++++++ ...nhancedFireworkAlgorithmOptimization_v2.py | 74 ++++++ ...ireworkAlgorithmWithAdaptiveLocalSearch.py | 115 +++++++++ ...AlgorithmWithAdaptiveLocalSearchRefined.py | 94 +++++++ ...edFireworkAlgorithmWithAdaptiveMutation.py | 92 +++++++ ...cedFireworkAlgorithmWithDynamicMutation.py | 114 +++++++++ ...dFireworkAlgorithmWithHybridLocalSearch.py | 94 +++++++ ...edFireworkAlgorithmWithImprovedMutation.py | 114 +++++++++ ...nhancedFireworkAlgorithmWithLocalSearch.py | 91 +++++++ ...edFireworkAlgorithmWithLocalSearchFinal.py | 95 +++++++ ...kAlgorithmWithLocalSearchFinalOptimized.py | 98 ++++++++ ...orkAlgorithmWithLocalSearchFinalRefined.py | 95 +++++++ ...ireworkAlgorithmWithLocalSearchImproved.py | 96 +++++++ ...reworkAlgorithmWithLocalSearchOptimized.py | 95 +++++++ ...FireworkAlgorithmWithLocalSearchRefined.py | 95 +++++++ .../lama/EnhancedFireworkSwarmOptimization.py | 55 ++++ .../lama/EnhancedFireworksAlgorithm.py | 77 ++++++ .../EnhancedFireworksSwarmOptimization_v4.py | 89 +++++++ .../EnhancedFocusedBalancedAdaptivePSO.py | 76 ++++++ .../lama/EnhancedGlobalClimbingOptimizer.py | 77 ++++++ .../lama/EnhancedGlobalClimbingOptimizerV3.py | 73 ++++++ .../EnhancedGlobalStructureAdaptiveEvolver.py | 75 ++++++ .../EnhancedGlobalStructureAwareOptimizer.py | 91 +++++++ .../lama/EnhancedGlobalStructureOptimizer.py | 75 ++++++ ...dientBoostedAnnealingWithAdaptiveMemory.py | 170 +++++++++++++ .../EnhancedGradientGuidedClusterSearch.py | 74 ++++++ .../lama/EnhancedGradientGuidedEvolution.py | 81 ++++++ .../lama/EnhancedGradientGuidedHybridPSO.py | 70 ++++++ .../lama/EnhancedGradualAdaptiveRAMEDS.py | 81 ++++++ .../EnhancedGravitationSwarmOptimization.py | 96 +++++++ .../EnhancedGravitationSwarmOptimizationV2.py | 96 +++++++ ...hancedGravitationalSwarmIntelligenceV10.py | 108 ++++++++ ...hancedGravitationalSwarmIntelligenceV11.py | 99 ++++++++ ...hancedGravitationalSwarmIntelligenceV12.py | 99 ++++++++ ...hancedGravitationalSwarmIntelligenceV13.py | 102 ++++++++ ...hancedGravitationalSwarmIntelligenceV14.py | 102 ++++++++ ...hancedGravitationalSwarmIntelligenceV15.py | 102 ++++++++ ...hancedGravitationalSwarmIntelligenceV16.py | 102 ++++++++ ...hancedGravitationalSwarmIntelligenceV17.py | 87 +++++++ ...hancedGravitationalSwarmIntelligenceV18.py | 91 +++++++ ...hancedGravitationalSwarmIntelligenceV19.py | 91 +++++++ ...nhancedGravitationalSwarmIntelligenceV2.py | 89 +++++++ ...hancedGravitationalSwarmIntelligenceV20.py | 92 +++++++ ...hancedGravitationalSwarmIntelligenceV21.py | 94 +++++++ ...hancedGravitationalSwarmIntelligenceV22.py | 100 ++++++++ ...hancedGravitationalSwarmIntelligenceV23.py | 98 ++++++++ ...hancedGravitationalSwarmIntelligenceV24.py | 91 +++++++ ...hancedGravitationalSwarmIntelligenceV25.py | 91 +++++++ ...nhancedGravitationalSwarmIntelligenceV3.py | 89 +++++++ ...hancedGravitationalSwarmIntelligenceV30.py | 96 +++++++ ...hancedGravitationalSwarmIntelligenceV31.py | 96 +++++++ ...hancedGravitationalSwarmIntelligenceV32.py | 96 +++++++ ...nhancedGravitationalSwarmIntelligenceV4.py | 89 +++++++ ...nhancedGravitationalSwarmIntelligenceV6.py | 90 +++++++ ...nhancedGravitationalSwarmIntelligenceV7.py | 99 ++++++++ ...nhancedGravitationalSwarmIntelligenceV8.py | 99 ++++++++ ...nhancedGravitationalSwarmIntelligenceV9.py | 99 ++++++++ ...rmOptimizationWithDiversityPreservation.py | 84 +++++++ ...ationWithDynamicDiversityPreservationV2.py | 99 ++++++++ ...ationWithDynamicDiversityPreservationV3.py | 99 ++++++++ .../lama/EnhancedGuidedMassQGSA_v62.py | 105 ++++++++ .../lama/EnhancedGuidedMassQGSA_v94.py | 129 ++++++++++ .../lama/EnhancedHarmonicFireworkAlgorithm.py | 74 ++++++ ...EnhancedHarmonicLevyDolphinOptimization.py | 74 ++++++ .../lama/EnhancedHarmonicSearchOptimizer.py | 76 ++++++ .../lama/EnhancedHarmonicSearchOptimizerV2.py | 76 ++++++ .../lama/EnhancedHarmonicSearchOptimizerV3.py | 76 ++++++ .../lama/EnhancedHarmonicSearchOptimizerV4.py | 77 ++++++ .../lama/EnhancedHarmonicSearchOptimizerV5.py | 86 +++++++ .../lama/EnhancedHarmonicSwarmOptimization.py | 61 +++++ .../EnhancedHarmonicSwarmOptimizationV2.py | 67 +++++ .../EnhancedHarmonicSwarmOptimizationV3.py | 67 +++++ .../EnhancedHarmonicSwarmOptimizationV4.py | 69 +++++ .../lama/EnhancedHarmonicTabuSearchV11.py | 97 ++++++++ .../lama/EnhancedHarmonicTabuSearchV13.py | 77 ++++++ .../lama/EnhancedHarmonicTabuSearchV14.py | 89 +++++++ .../lama/EnhancedHarmonicTabuSearchV15.py | 97 ++++++++ .../lama/EnhancedHarmonicTabuSearchV16.py | 105 ++++++++ .../lama/EnhancedHarmonicTabuSearchV19.py | 93 +++++++ ...hancedHarmonyDiversifiedCuckooAlgorithm.py | 70 ++++++ .../lama/EnhancedHarmonyFireworkOptimizer.py | 65 +++++ .../lama/EnhancedHarmonyMemeticAlgorithmV2.py | 83 +++++++ .../lama/EnhancedHarmonyMemeticAlgorithmV3.py | 83 +++++++ .../lama/EnhancedHarmonyMemeticAlgorithmV4.py | 83 +++++++ .../EnhancedHarmonyMemeticOptimizationV10.py | 95 +++++++ .../EnhancedHarmonyMemeticOptimizationV11.py | 95 +++++++ .../EnhancedHarmonyMemeticOptimizationV12.py | 95 +++++++ .../EnhancedHarmonyMemeticOptimizationV13.py | 95 +++++++ .../EnhancedHarmonyMemeticOptimizationV14.py | 99 ++++++++ .../EnhancedHarmonyMemeticOptimizationV15.py | 99 ++++++++ .../EnhancedHarmonyMemeticOptimizationV16.py | 99 ++++++++ .../EnhancedHarmonyMemeticOptimizationV17.py | 99 ++++++++ .../EnhancedHarmonyMemeticOptimizationV34.py | 108 ++++++++ .../lama/EnhancedHarmonyMemeticSearch.py | 86 +++++++ .../lama/EnhancedHarmonyMemeticSearchV2.py | 86 +++++++ .../lama/EnhancedHarmonyMemeticSearchV3.py | 92 +++++++ .../lama/EnhancedHarmonySearchOB.py | 72 ++++++ ...SearchWithAdaptiveLevyFlightInspiration.py | 96 +++++++ ...edHarmonySearchWithAdaptiveLevyFlightV2.py | 68 +++++ .../lama/EnhancedHarmonyTabuOptimization.py | 64 +++++ .../lama/EnhancedHarmonyTabuOptimizationV2.py | 68 +++++ .../lama/EnhancedHarmonyTabuOptimizationV3.py | 68 +++++ .../lama/EnhancedHarmonyTabuSearch.py | 94 +++++++ .../lama/EnhancedHarmonyTabuSearchV2.py | 94 +++++++ .../lama/EnhancedHarmonyTabuSearchV3.py | 92 +++++++ .../lama/EnhancedHarmonyTabuSearchV4.py | 74 ++++++ .../lama/EnhancedHarmonyTabuSearchV6.py | 93 +++++++ .../lama/EnhancedHarmonyTabuSearchV7.py | 95 +++++++ ...dHierarchicalCovarianceMatrixAdaptation.py | 149 +++++++++++ ...ncedHybridAdaptiveDifferentialEvolution.py | 92 +++++++ ...ancedHybridAdaptiveExplorationOptimizer.py | 186 ++++++++++++++ ...ncedHybridAdaptiveGeneticSwarmOptimizer.py | 140 +++++++++++ ...bridAdaptiveHarmonicFireworksTabuSearch.py | 111 +++++++++ .../EnhancedHybridAdaptiveMemoryAnnealing.py | 72 ++++++ ...hancedHybridAdaptiveMultiPhaseEvolution.py | 104 ++++++++ ...cedHybridAdaptiveMultiStageOptimization.py | 139 +++++++++++ .../EnhancedHybridAdaptiveQuantumOptimizer.py | 116 +++++++++ .../lama/EnhancedHybridAdaptiveSearch.py | 82 ++++++ ...aptiveSelfAdaptiveDifferentialEvolution.py | 152 +++++++++++ .../lama/EnhancedHybridCMAESDE.py | 183 ++++++++++++++ ...idCovarianceMatrixDifferentialEvolution.py | 156 ++++++++++++ ...ancedHybridDEPSOWithDynamicAdaptationV4.py | 152 +++++++++++ ...nhancedHybridDEPSOWithQuantumLevyFlight.py | 173 +++++++++++++ .../EnhancedHybridDEPSOwithAdaptiveRestart.py | 149 +++++++++++ ...idDifferentialEvolutionMemeticOptimizer.py | 97 ++++++++ ...dDynamicAdaptiveExplorationOptimization.py | 166 +++++++++++++ .../EnhancedHybridExplorationOptimization.py | 163 ++++++++++++ ...hancedHybridGradientAnnealingWithMemory.py | 125 ++++++++++ .../EnhancedHybridGradientBasedStrategyV8.py | 78 ++++++ .../lama/EnhancedHybridGradientPSO.py | 79 ++++++ ...ridHarmonySearchWithAdaptiveMutationV20.py | 103 ++++++++ .../lama/EnhancedHybridMemoryAdaptiveDE.py | 136 ++++++++++ .../lama/EnhancedHybridMemoryPSO.py | 154 ++++++++++++ .../EnhancedHybridMetaHeuristicOptimizer.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV10.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV11.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV12.py | 93 +++++++ ...EnhancedHybridMetaHeuristicOptimizerV15.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV2.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV3.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV4.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV5.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV6.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV7.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV8.py | 93 +++++++ .../EnhancedHybridMetaHeuristicOptimizerV9.py | 93 +++++++ ...EnhancedHybridMetaOptimizationAlgorithm.py | 86 +++++++ ...hancedHybridMetaOptimizationAlgorithmV2.py | 54 ++++ .../lama/EnhancedHybridOptimization.py | 157 ++++++++++++ .../lama/EnhancedHybridOptimizer.py | 170 +++++++++++++ .../EnhancedHybridQuantumDifferentialPSO.py | 154 ++++++++++++ ...uasiRandomGradientDifferentialEvolution.py | 125 ++++++++++ .../optimization/lama/EnhancedHybridSearch.py | 109 ++++++++ ...cedHybridSimulatedAnnealingOptimization.py | 214 ++++++++++++++++ .../lama/EnhancedHyperAdaptiveHybridDEPSO.py | 149 +++++++++++ ...ptimalStrategicEvolutionaryOptimizerV59.py | 81 ++++++ ...timizedEvolutionaryGradientOptimizerV62.py | 82 ++++++ ...yperOptimizedMultiStrategicOptimizerV49.py | 78 ++++++ ...rParameterTunedMetaHeuristicOptimizerV4.py | 93 +++++++ .../EnhancedHyperStrategicOptimizerV56.py | 82 ++++++ ...vedDifferentialEvolutionLocalSearch_v58.py | 110 ++++++++ ...perParameterTunedMetaHeuristicOptimizer.py | 93 +++++++ ...provedRefinedUltimateGuidedMassQGSA_v77.py | 124 +++++++++ ...dSuperDynamicQuantumSwarmOptimizationV7.py | 95 +++++++ .../lama/EnhancedIslandEvolutionStrategy.py | 98 ++++++++ .../EnhancedIslandEvolutionStrategyV10.py | 109 ++++++++ .../lama/EnhancedIslandEvolutionStrategyV3.py | 97 ++++++++ .../lama/EnhancedIslandEvolutionStrategyV7.py | 109 ++++++++ .../lama/EnhancedIslandEvolutionStrategyV8.py | 109 ++++++++ .../EnhancedLocalSearchAdaptiveStrategyV29.py | 71 ++++++ ...dLocalSearchQuantumSimulatedAnnealingV6.py | 69 +++++ .../EnhancedMemeticDifferentialEvolution.py | 94 +++++++ .../lama/EnhancedMemeticEvolutionarySearch.py | 92 +++++++ .../EnhancedMemeticHarmonyOptimization.py | 124 +++++++++ ...cedMemoryAdaptiveDynamicHybridOptimizer.py | 199 +++++++++++++++ ...emoryGuidedAdaptiveDualPhaseStrategyV77.py | 70 ++++++ ...EnhancedMemoryGuidedAdaptiveStrategyV41.py | 88 +++++++ ...EnhancedMemoryGuidedAdaptiveStrategyV69.py | 91 +++++++ ...EnhancedMetaDynamicPrecisionOptimizerV1.py | 59 +++++ .../lama/EnhancedMetaHeuristicOptimizerV2.py | 93 +++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V1.py | 116 +++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V2.py | 127 ++++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V3.py | 127 ++++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V4.py | 127 ++++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V5.py | 127 ++++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V6.py | 127 ++++++++++ .../EnhancedMetaNetAQAPSO_LS_DIW_AP_V7.py | 127 ++++++++++ .../lama/EnhancedMetaNetAQAPSOv2.py | 123 +++++++++ .../lama/EnhancedMetaNetAQAPSOv3.py | 123 +++++++++ .../lama/EnhancedMetaNetAQAPSOv4.py | 123 +++++++++ .../lama/EnhancedMetaNetAQAPSOv5.py | 123 +++++++++ .../lama/EnhancedMetaNetAQAPSOv6.py | 123 +++++++++ .../optimization/lama/EnhancedMetaNetPSO.py | 130 ++++++++++ .../optimization/lama/EnhancedMetaNetPSOv2.py | 130 ++++++++++ ...cedMetaPopulationAdaptiveGradientSearch.py | 174 +++++++++++++ .../EnhancedMultiFocalAdaptiveOptimizer.py | 65 +++++ .../EnhancedMultiModalAdaptiveOptimizer.py | 95 +++++++ .../EnhancedMultiModalConvergenceOptimizer.py | 96 +++++++ .../EnhancedMultiModalExplorationStrategy.py | 92 +++++++ ...EnhancedMultiModalMemoryHybridOptimizer.py | 201 +++++++++++++++ .../lama/EnhancedMultiOperatorSearch.py | 117 +++++++++ .../lama/EnhancedMultiOperatorSearch2.py | 122 +++++++++ .../lama/EnhancedMultiPhaseAdaptiveDE.py | 138 ++++++++++ ...EnhancedMultiPhaseOptimizationAlgorithm.py | 118 +++++++++ ...ancedMultiStageGradientBoostedAnnealing.py | 175 +++++++++++++ ...ancedMultiStrategyDifferentialEvolution.py | 156 ++++++++++++ ...hancedMultiStrategyQuantumLevyOptimizer.py | 199 +++++++++++++++ ...NicheDifferentialParticleSwarmOptimizer.py | 149 +++++++++++ ...cedOppositionBasedDifferentialEvolution.py | 89 +++++++ .../EnhancedOppositionBasedHarmonySearch.py | 75 ++++++ ...itionBasedHarmonySearchDynamicBandwidth.py | 87 +++++++ ...onBasedHarmonySearchDynamicBandwidthABC.py | 100 ++++++++ ...nBasedHarmonySearchDynamicBandwidthSADE.py | 127 ++++++++++ ...dOptimalEvolutionaryGradientOptimizerV9.py | 87 +++++++ ...alPrecisionEvolutionaryThermalOptimizer.py | 57 +++++ .../EnhancedOptimizedEvolutiveStrategy.py | 67 +++++ ...efinedPrecisionEvolutionaryOptimizerV46.py | 78 ++++++ .../optimization/lama/EnhancedOrthogonalDE.py | 46 ++++ ...EnhancedOrthogonalDifferentialEvolution.py | 46 ++++ ...OrthogonalDifferentialEvolutionImproved.py | 51 ++++ ...hancedOrthogonalDifferentialEvolutionV2.py | 51 ++++ ...hancedOrthogonalDifferentialEvolutionV3.py | 68 +++++ ...hancedOrthogonalDifferentialEvolutionV4.py | 78 ++++++ .../EnhancedParallelDifferentialEvolution.py | 61 +++++ .../lama/EnhancedParticleSwarmOptimization.py | 59 +++++ .../lama/EnhancedParticleSwarmOptimizer.py | 47 ++++ .../lama/EnhancedParticleSwarmOptimizerV4.py | 60 +++++ .../lama/EnhancedParticleSwarmOptimizerV5.py | 60 +++++ .../lama/EnhancedParticleSwarmOptimizerV6.py | 60 +++++ .../EnhancedPhaseAdaptiveMemoryStrategyV75.py | 73 ++++++ ...nhancedPhaseTransitionMemoryStrategyV82.py | 80 ++++++ ...ncedPrecisionAdaptiveCohortOptimization.py | 72 ++++++ ...dPrecisionAdaptiveGradientClusteringPSO.py | 80 ++++++ ...edPrecisionBoostedDifferentialEvolution.py | 59 +++++ .../EnhancedPrecisionConvergenceOptimizer.py | 95 +++++++ ...hancedPrecisionEvolutionaryOptimizerV38.py | 78 ++++++ ...hancedPrecisionEvolutionaryOptimizerV39.py | 76 ++++++ .../EnhancedPrecisionGuidedQuantumStrategy.py | 82 ++++++ .../lama/EnhancedPrecisionHybridSearchV2.py | 70 ++++++ ...ecisionTunedCrossoverElitistStrategyV14.py | 81 ++++++ ...rogressiveAdaptiveDifferentialEvolution.py | 56 +++++ .../optimization/lama/EnhancedQAPSOAIRVCHR.py | 110 ++++++++ .../lama/EnhancedQAPSOAIRVCHRLS.py | 110 ++++++++ .../lama/EnhancedQAPSOAIRVCHRLSDP.py | 106 ++++++++ .../lama/EnhancedQuantumAdaptiveCrossover.py | 72 ++++++ .../lama/EnhancedQuantumAdaptiveDE.py | 149 +++++++++++ ...tistDynamicRestartAndDifferentialMemory.py | 140 +++++++++++ ...nhancedQuantumAdaptiveEliteGuidedSearch.py | 186 ++++++++++++++ ...hancedQuantumAdaptiveFireworksOptimizer.py | 59 +++++ ...uantumAdaptiveGradientDiversityExplorer.py | 95 +++++++ .../EnhancedQuantumAdaptiveHybridDEPSO_V4.py | 160 ++++++++++++ .../EnhancedQuantumAdaptiveHybridSearchV2.py | 97 ++++++++ ...cedQuantumAdaptiveLevySwarmOptimization.py | 160 ++++++++++++ .../EnhancedQuantumAdaptiveMultiPhaseDE_v3.py | 136 ++++++++++ ...edQuantumAdaptiveMultiStrategyEvolution.py | 184 ++++++++++++++ ...EnhancedQuantumAdaptiveNesterovStrategy.py | 71 ++++++ .../lama/EnhancedQuantumAdaptiveOptimizer.py | 75 ++++++ .../lama/EnhancedQuantumAnnealingOptimizer.py | 152 +++++++++++ ...ncedQuantumCognitionFocusedOptimizerV18.py | 75 ++++++ .../EnhancedQuantumCognitionOptimizerV12.py | 78 ++++++ .../EnhancedQuantumCooperativeStrategy.py | 71 ++++++ ...umCovarianceMatrixDifferentialEvolution.py | 192 ++++++++++++++ ...varianceMatrixDifferentialEvolutionPlus.py | 194 +++++++++++++++ ...nceMatrixDifferentialEvolutionRefinedV2.py | 201 +++++++++++++++ ...ialElitistAlgorithmWithAdaptiveRestarts.py | 140 +++++++++++ .../EnhancedQuantumDifferentialEvolution.py | 149 +++++++++++ ...lEvolutionWithAdaptiveElitismAndRestart.py | 109 ++++++++ ...fferentialEvolutionWithAdaptiveRestarts.py | 138 ++++++++++ ...lEvolutionWithAdaptiveRestartsAndMemory.py | 172 +++++++++++++ ...ntialEvolutionWithSelfAdaptiveMechanism.py | 89 +++++++ ...ialParticleOptimizerWithAdaptiveElitism.py | 154 ++++++++++++ ...antumDifferentialParticleSwarmOptimizer.py | 164 ++++++++++++ .../lama/EnhancedQuantumDiversityDE.py | 153 ++++++++++++ ...hancedQuantumDynamicAdaptiveHybridDEPSO.py | 160 ++++++++++++ .../EnhancedQuantumDynamicBalanceOptimizer.py | 84 +++++++ .../lama/EnhancedQuantumDynamicOptimizer.py | 76 ++++++ .../lama/EnhancedQuantumEvolutionStrategy.py | 73 ++++++ .../lama/EnhancedQuantumFireworksAlgorithm.py | 53 ++++ .../EnhancedQuantumFireworksAlgorithmV2.py | 64 +++++ ...GradientAdaptiveExplorationOptimization.py | 225 +++++++++++++++++ ...adientAdaptiveExplorationOptimizationV5.py | 212 ++++++++++++++++ ...dQuantumGradientExplorationOptimization.py | 216 ++++++++++++++++ ...uantumGradientExplorationOptimizationV2.py | 194 +++++++++++++++ ...EnhancedQuantumGradientMemeticOptimizer.py | 129 ++++++++++ .../EnhancedQuantumGradientOptimizerV5.py | 76 ++++++ ...hancedQuantumHarmonicAdaptationStrategy.py | 67 +++++ .../EnhancedQuantumHarmonyMemeticAlgorithm.py | 83 +++++++ .../lama/EnhancedQuantumHarmonySearch.py | 55 ++++ .../lama/EnhancedQuantumHarmonySearchAB.py | 58 +++++ .../lama/EnhancedQuantumHarmonySearchABGB.py | 62 +++++ ...EnhancedQuantumHarmonySearchABGBRefined.py | 62 +++++ .../lama/EnhancedQuantumHybridAdaptiveDE.py | 184 ++++++++++++++ .../EnhancedQuantumHybridAdaptiveDE_v2.py | 184 ++++++++++++++ ...nhancedQuantumInformedGradientOptimizer.py | 72 ++++++ .../EnhancedQuantumInfusedAdaptiveStrategy.py | 74 ++++++ .../EnhancedQuantumInspiredHybridOptimizer.py | 68 +++++ .../EnhancedQuantumIterativeRefinement.py | 74 ++++++ .../EnhancedQuantumLeapGradientBoostPSO.py | 86 +++++++ .../lama/EnhancedQuantumLeapPSO.py | 78 ++++++ ...QuantumLevyDifferentialDynamicOptimizer.py | 156 ++++++++++++ ...nhancedQuantumLevyDifferentialOptimizer.py | 156 ++++++++++++ .../EnhancedQuantumLevyDifferentialSearch.py | 159 ++++++++++++ .../EnhancedQuantumLevyMemeticOptimizer.py | 139 +++++++++++ ...EnhancedQuantumLevyParticleOptimization.py | 160 ++++++++++++ .../lama/EnhancedQuantumLocalSearch.py | 69 +++++ .../EnhancedQuantumLocalSearchImproved.py | 75 ++++++ .../lama/EnhancedQuantumMemeticOptimizer.py | 128 ++++++++++ .../lama/EnhancedQuantumMemeticOptimizerV5.py | 129 ++++++++++ ...EnhancedQuantumMultiPhaseAdaptiveDE_v10.py | 175 +++++++++++++ ...ncedQuantumMultiStrategyOptimization_v2.py | 186 ++++++++++++++ .../optimization/lama/EnhancedQuantumPSO.py | 92 +++++++ ...ancedQuantumReactiveCooperativeStrategy.py | 79 ++++++ ...dQuantumReinforcedNesterovAcceleratorV2.py | 76 ++++++ ...ncedQuantumResilientCrossoverStrategyV2.py | 74 ++++++ .../lama/EnhancedQuantumSimulatedAnnealing.py | 60 +++++ ...hancedQuantumSimulatedAnnealingImproved.py | 49 ++++ ...ancedQuantumSimulatedAnnealingOptimized.py | 42 ++++ .../EnhancedQuantumSimulatedAnnealingV2.py | 59 +++++ ...nhancedQuantumStateConvergenceOptimizer.py | 50 ++++ .../lama/EnhancedQuantumSwarmOptimization.py | 83 +++++++ ...EnhancedQuantumSwarmOptimizationRefined.py | 83 +++++++ .../EnhancedQuantumSwarmOptimizationV10.py | 72 ++++++ .../EnhancedQuantumSwarmOptimizationV11.py | 78 ++++++ .../EnhancedQuantumSwarmOptimizationV12.py | 78 ++++++ .../EnhancedQuantumSwarmOptimizationV13.py | 76 ++++++ .../EnhancedQuantumSwarmOptimizationV2.py | 68 +++++ .../EnhancedQuantumSwarmOptimizationV3.py | 68 +++++ .../EnhancedQuantumSwarmOptimizationV4.py | 74 ++++++ .../EnhancedQuantumSwarmOptimizationV5.py | 73 ++++++ .../EnhancedQuantumSwarmOptimizationV6.py | 75 ++++++ .../EnhancedQuantumSwarmOptimizationV7.py | 68 +++++ .../EnhancedQuantumSwarmOptimizationV8.py | 69 +++++ .../EnhancedQuantumSwarmOptimizationV9.py | 71 ++++++ .../lama/EnhancedQuantumSwarmOptimizerV4.py | 87 +++++++ .../EnhancedQuantumSymbioticStrategyV5.py | 74 ++++++ .../lama/EnhancedQuantumSynergyStrategyV2.py | 78 ++++++ .../lama/EnhancedQuantumTunnelingOptimizer.py | 77 ++++++ nevergrad/optimization/lama/EnhancedRAMEDS.py | 92 +++++++ .../optimization/lama/EnhancedRAMEDSPro.py | 93 +++++++ .../optimization/lama/EnhancedRAMEDSProV2.py | 87 +++++++ .../optimization/lama/EnhancedRAMEDSv3.py | 89 +++++++ .../optimization/lama/EnhancedRAMEDSv4.py | 84 +++++++ ...efinedAdaptiveCovarianceMatrixEvolution.py | 111 +++++++++ ...edAdaptiveCovarianceMatrixEvolutionPlus.py | 111 +++++++++ ...eDifferentialEvolutionWithGradientBoost.py | 107 ++++++++ ...hancedRefinedAdaptiveDifferentialSearch.py | 81 ++++++ ...RefinedAdaptiveDifferentialSpiralSearch.py | 57 +++++ .../lama/EnhancedRefinedAdaptiveDynamicDE.py | 74 ++++++ ...inedAdaptiveDynamicDualPhaseStrategyV15.py | 82 ++++++ ...dAdaptiveDynamicExplorationOptimization.py | 163 ++++++++++++ ...namicMultiStrategyDifferentialEvolution.py | 178 +++++++++++++ ...RefinedAdaptiveFocusedEvolutionStrategy.py | 81 ++++++ .../EnhancedRefinedAdaptiveHarmonySearch.py | 81 ++++++ ...dRefinedAdaptiveMemeticDiverseOptimizer.py | 157 ++++++++++++ .../EnhancedRefinedAdaptiveMetaNetPSO_v4.py | 134 ++++++++++ .../EnhancedRefinedAdaptiveMetaNetPSO_v5.py | 134 ++++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v49.py | 91 +++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v52.py | 88 +++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v53.py | 95 +++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v54.py | 101 ++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v55.py | 101 ++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v56.py | 101 ++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v57.py | 101 ++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v58.py | 101 ++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v59.py | 131 ++++++++++ .../lama/EnhancedRefinedAdaptiveQGSA_v60.py | 131 ++++++++++ ...ncedRefinedAdaptiveSpiralGradientSearch.py | 67 +++++ ...AdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3.py | 108 ++++++++ .../EnhancedRefinedDualStrategyAdaptiveDE.py | 125 ++++++++++ ...EnhancedRefinedDynamicFireworkAlgorithm.py | 96 +++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 140 +++++++++++ ...finedEliteAdaptiveMemoryHybridOptimizer.py | 198 +++++++++++++++ ...efinedEliteDynamicMemoryHybridOptimizer.py | 197 +++++++++++++++ ...edEvolutionaryGradientHybridOptimizerV4.py | 74 ++++++ ...edRefinedGradientBoostedMemoryAnnealing.py | 142 +++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v88.py | 129 ++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v89.py | 129 ++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v90.py | 129 ++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v91.py | 129 ++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v92.py | 129 ++++++++++ .../lama/EnhancedRefinedGuidedMassQGSA_v93.py | 129 ++++++++++ ...idCovarianceMatrixDifferentialEvolution.py | 167 +++++++++++++ ...RefinedHybridDEPSOWithDynamicAdaptation.py | 152 +++++++++++ ...PhaseParticleSwarmDifferentialEvolution.py | 139 +++++++++++ .../lama/EnhancedRefinedHybridOptimizer.py | 148 +++++++++++ ...erAdaptiveSinusoidalDifferentialSwarmV3.py | 55 ++++ ...erOptimizedThermalEvolutionaryOptimizer.py | 59 +++++ .../lama/EnhancedRefinedMetaNetAQAPSO.py | 123 +++++++++ .../lama/EnhancedRefinedMetaNetAQAPSOv8.py | 123 +++++++++ .../lama/EnhancedRefinedMetaNetAQAPSOv9.py | 123 +++++++++ ...inedOptimalDynamicPrecisionOptimizerV16.py | 62 +++++ ...zedHybridAdaptiveMultiStageOptimization.py | 145 +++++++++++ .../lama/EnhancedRefinedSpatialOptimizer.py | 97 ++++++++ ...ltimateEvolutionaryGradientOptimizerV35.py | 85 +++++++ ...hancedRefinedUltimateGuidedMassQGSA_v72.py | 124 +++++++++ ...hancedRefinedUltimateGuidedMassQGSA_v73.py | 124 +++++++++ ...hancedRefinedUltimateGuidedMassQGSA_v74.py | 124 +++++++++ ...hancedRefinedUltimateGuidedMassQGSA_v76.py | 124 +++++++++ ...timatePrecisionEvolutionaryOptimizerV43.py | 82 ++++++ .../lama/EnhancedResilientAdaptivePSO.py | 71 ++++++ ...entialEvolutionWithMemoryAndEliteSearch.py | 156 ++++++++++++ .../lama/EnhancedRotationalClimbOptimizer.py | 57 +++++ ...hancedSelectiveEvolutionaryOptimizerV21.py | 81 ++++++ ...veCovarianceMatrixDifferentialEvolution.py | 91 +++++++ .../lama/EnhancedSelfAdaptiveDE.py | 56 +++++ .../lama/EnhancedSelfAdaptiveDE2.py | 56 +++++ .../EnhancedSelfAdaptiveMemeticAlgorithm.py | 111 +++++++++ ...ntialQuadraticAdaptiveEvolutionStrategy.py | 72 ++++++ .../lama/EnhancedSpatialAdaptiveEvolver.py | 70 ++++++ .../lama/EnhancedSpatialAdaptiveOptimizer.py | 69 +++++ .../EnhancedSpectralHybridOptimization.py | 85 +++++++ ...utionWithAdaptiveParametersAndCrossover.py | 83 +++++++ ...StochasticGradientDifferentialEvolution.py | 106 ++++++++ ...nhancedStochasticMetaHeuristicOptimizer.py | 104 ++++++++ .../EnhancedStrategicAdaptiveOptimizer.py | 80 ++++++ ...ancedStrategicMemoryAdaptiveStrategyV44.py | 76 ++++++ .../optimization/lama/EnhancedStrategicPSO.py | 98 ++++++++ .../optimization/lama/EnhancedStrategyDE.py | 68 +++++ ...cedSuperDynamicQuantumSwarmOptimization.py | 95 +++++++ ...dSuperDynamicQuantumSwarmOptimizationV2.py | 95 +++++++ ...dSuperDynamicQuantumSwarmOptimizationV3.py | 95 +++++++ ...dSuperDynamicQuantumSwarmOptimizationV4.py | 95 +++++++ ...dSuperDynamicQuantumSwarmOptimizationV5.py | 95 +++++++ ...dSuperDynamicQuantumSwarmOptimizationV6.py | 95 +++++++ .../lama/EnhancedSuperRefinedRAMEDS.py | 88 +++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V10.py | 86 +++++++ ...uperchargedAQAPSO_LS_DIW_AP_Refined_V27.py | 107 ++++++++ ...SuperchargedAQAPSO_LS_DIW_AP_Refined_V6.py | 85 +++++++ ...SuperchargedAQAPSO_LS_DIW_AP_Refined_V7.py | 86 +++++++ ...SuperchargedAQAPSO_LS_DIW_AP_Refined_V8.py | 86 +++++++ ...SuperchargedAQAPSO_LS_DIW_AP_Refined_V9.py | 86 +++++++ ...ancedSuperiorUltimateGuidedMassQGSA_v80.py | 124 +++++++++ ...ancedSupremeDynamicPrecisionOptimizerV1.py | 62 +++++ .../lama/EnhancedSwarmHybridOptimization.py | 130 ++++++++++ .../EnhancedTwoPhaseDynamicStrategyV39.py | 73 ++++++ ...nhancedUltimateDynamicFireworkAlgorithm.py | 98 ++++++++ ...ltimateDynamicFireworkAlgorithmImproved.py | 101 ++++++++ ...ltimateEvolutionaryGradientOptimizerV36.py | 80 ++++++ ...EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py | 84 +++++++ ...UltimateRefinedAQAPSO_LS_DIW_AP_Refined.py | 93 +++++++ ...imateRefinedAQAPSO_LS_DIW_AP_Refined_V2.py | 85 +++++++ ...imateRefinedAQAPSO_LS_DIW_AP_Refined_V3.py | 85 +++++++ ...efinedPrecisionEvolutionaryOptimizerV44.py | 78 ++++++ .../EnsembleAdaptiveEvolutionaryAlgorithm.py | 73 ++++++ .../lama/EnsembleAdaptiveMemeticOptimizer.py | 146 +++++++++++ .../lama/EnsembleAdaptiveQuantumDE.py | 130 ++++++++++ nevergrad/optimization/lama/EnsembleDE.py | 94 +++++++ .../EnsembleEvolutionaryCulturalSearch.py | 117 +++++++++ .../optimization/lama/EnsembleHybridSearch.py | 90 +++++++ .../lama/EnsembleMemeticAlgorithm.py | 104 ++++++++ .../lama/EnsembleMutationAdaptiveDE.py | 141 +++++++++++ .../EntropyEnhancedAdaptiveStrategyV61.py | 81 ++++++ .../EvolutionaryConvergenceSpiralSearch.py | 61 +++++ .../lama/EvolutionaryDynamicGradientSearch.py | 102 ++++++++ .../EvolutionaryGradientHybridOptimizer.py | 65 +++++ .../EvolutionaryGradientHybridOptimizerV2.py | 76 ++++++ .../lama/EvolutionaryGradientSearch.py | 97 ++++++++ .../EvolutionaryHarmonicFireworkAlgorithm.py | 65 +++++ .../EvolutionaryParticleSwarmOptimizer.py | 70 ++++++ nevergrad/optimization/lama/ExDADe.py | 73 ++++++ nevergrad/optimization/lama/FEDE.py | 64 +++++ nevergrad/optimization/lama/FTADEEM.py | 63 +++++ ...ireworkAlgorithmWithAdaptiveLocalSearch.py | 96 +++++++ ...ncedDynamicLocalSearchFireworkAlgorithm.py | 99 ++++++++ ...ireworkAlgorithmWithAdaptiveLocalSearch.py | 112 +++++++++ ...hancedRefinedUltimateGuidedMassQGSA_v75.py | 124 +++++++++ ...timizedEnhancedDynamicFireworkAlgorithm.py | 91 +++++++ ...EnhancedDynamicFireworkAlgorithmRefined.py | 91 +++++++ .../lama/FineTunedCohortDiversityOptimizer.py | 71 ++++++ .../lama/FineTunedFocusedAdaptiveOptimizer.py | 69 +++++ .../FineTunedProgressiveAdaptiveSearch.py | 80 ++++++ .../lama/FocusedBalancedAdaptivePSO.py | 78 ++++++ .../lama/FocusedEvolutionStrategy.py | 60 +++++ ...ractionalOrderClusterHybridOptimization.py | 118 +++++++++ ...EnhancedHybridMetaHeuristicOptimizerV13.py | 93 +++++++ nevergrad/optimization/lama/GEEA.py | 68 +++++ nevergrad/optimization/lama/GESA.py | 62 +++++ nevergrad/optimization/lama/GGAES.py | 75 ++++++ nevergrad/optimization/lama/GIDE.py | 71 ++++++ .../optimization/lama/GaussianAdaptivePSO.py | 77 ++++++ .../lama/GaussianEnhancedAdaptivePSO.py | 78 ++++++ .../GradientAssistedDifferentialCrossover.py | 70 ++++++ .../lama/GradientBalancedEvolutionStrategy.py | 101 ++++++++ ...BasedAdaptiveCovarianceMatrixAdaptation.py | 135 ++++++++++ .../lama/GradientBoostedMemoryAnnealing.py | 137 ++++++++++ .../lama/GradientEnhancedAdaptiveAnnealing.py | 125 ++++++++++ ...ntEnhancedAdaptiveDifferentialEvolution.py | 135 ++++++++++ .../lama/GradientEstimationSearch.py | 53 ++++ .../lama/GradientGuidedClusterSearch.py | 68 +++++ .../GradientGuidedDifferentialEvolution.py | 57 +++++ .../lama/GradientGuidedEvolutionStrategy.py | 57 +++++ .../lama/GradientGuidedHybridPSO.py | 68 +++++ ...GradientInformedAdaptiveDirectionSearch.py | 74 ++++++ .../lama/GradientInformedAdaptiveSearch.py | 72 ++++++ .../lama/GradientInformedParticleOptimizer.py | 63 +++++ .../GradientSpiralDifferentialEnhancerV5.py | 76 ++++++ .../lama/GravitationalSwarmIntelligence.py | 63 +++++ .../lama/GreedyDiversityMultiStrategySADE.py | 125 ++++++++++ .../lama/GreedyDynamicMultiStrategyDE.py | 116 +++++++++ .../lama/GuidedEvolutionStrategy.py | 60 +++++ .../lama/GuidedMutationOptimizer.py | 66 +++++ nevergrad/optimization/lama/HADE.py | 62 +++++ nevergrad/optimization/lama/HADEEM.py | 76 ++++++ nevergrad/optimization/lama/HADEMI.py | 94 +++++++ nevergrad/optimization/lama/HAVCDE.py | 71 ++++++ nevergrad/optimization/lama/HEAS.py | 89 +++++++ .../lama/HarmonyFireworkOptimizer.py | 54 ++++ .../lama/HarmonyTabuOptimization.py | 55 ++++ .../lama/HierarchicalAdaptiveAnnealing.py | 108 ++++++++ ...hicalAdaptiveCovarianceMatrixAdaptation.py | 112 +++++++++ .../lama/HierarchicalAdaptiveSearch.py | 118 +++++++++ ...rsityEnhancedCovarianceMatrixAdaptation.py | 146 +++++++++++ ...ghPerformanceAdaptiveDifferentialSearch.py | 132 ++++++++++ nevergrad/optimization/lama/HyGDAE.py | 73 ++++++ ...veCovarianceMatrixDifferentialEvolution.py | 122 +++++++++ ...bridAdaptiveCrossoverElitistStrategyV10.py | 80 ++++++ .../optimization/lama/HybridAdaptiveDE.py | 167 +++++++++++++ .../HybridAdaptiveDifferentialEvolution.py | 80 ++++++ ...tialEvolutionWithDynamicParameterTuning.py | 124 +++++++++ ...entialEvolutionWithMemoryAndEliteSearch.py | 150 +++++++++++ ...HybridAdaptiveDifferentialQuantumSearch.py | 159 ++++++++++++ .../lama/HybridAdaptiveDifferentialSwarm.py | 97 ++++++++ ...veDiversityMaintainingGradientEvolution.py | 125 ++++++++++ .../lama/HybridAdaptiveDualPhaseStrategyV6.py | 66 +++++ .../HybridAdaptiveEvolutionaryOptimizer.py | 160 ++++++++++++ .../HybridAdaptiveExplorationOptimizer.py | 166 +++++++++++++ .../HybridAdaptiveGeneticSwarmOptimizer.py | 139 +++++++++++ .../HybridAdaptiveGeneticSwarmOptimizerV2.py | 134 ++++++++++ .../lama/HybridAdaptiveGradientPSO.py | 90 +++++++ ...bridAdaptiveHarmonicFireworksTabuSearch.py | 111 +++++++++ .../lama/HybridAdaptiveMemeticAlgorithm.py | 97 ++++++++ ...DifferentialEvolutionWithDynamicElitism.py | 128 ++++++++++ .../lama/HybridAdaptiveMemeticOptimizerV4.py | 139 +++++++++++ .../lama/HybridAdaptiveMemoryAnnealing.py | 72 ++++++ .../lama/HybridAdaptiveMultiPhaseEvolution.py | 104 ++++++++ .../HybridAdaptiveMultiPhaseEvolutionV2.py | 104 ++++++++ .../lama/HybridAdaptiveNesterovSynergy.py | 67 +++++ .../lama/HybridAdaptiveOptimization.py | 87 +++++++ ...AdaptiveOrthogonalDifferentialEvolution.py | 67 +++++ ...idAdaptiveParallelDifferentialEvolution.py | 67 +++++ ...bridAdaptiveParameterTuningOptimization.py | 145 +++++++++++ .../lama/HybridAdaptivePopulationDE.py | 82 ++++++ .../lama/HybridAdaptiveQuantumLevySearch.py | 156 ++++++++++++ ...tiveQuantumMemeticDifferentialEvolution.py | 171 +++++++++++++ .../HybridAdaptiveQuantumMemeticOptimizer.py | 110 ++++++++ .../lama/HybridAdaptiveQuantumPSO.py | 78 ++++++ .../optimization/lama/HybridAdaptiveSearch.py | 86 +++++++ .../lama/HybridAdaptiveSearchStrategy.py | 77 ++++++ ...aptiveSelfAdaptiveDifferentialEvolution.py | 144 +++++++++++ .../HybridAdaptiveSimulatedAnnealingDE.py | 118 +++++++++ .../HybridCosineSineDualPhaseStrategyV10.py | 83 +++++++ ...anceMatrixAdaptionDifferentialEvolution.py | 115 +++++++++ ...ceMatrixAdaptiveDifferentialEvolutionV2.py | 124 +++++++++ ...rixDifferentialEvolutionWithLevyFlights.py | 155 ++++++++++++ .../HybridCulturalDifferentialEvolution.py | 125 ++++++++++ nevergrad/optimization/lama/HybridDEPSO.py | 144 +++++++++++ .../lama/HybridDEPSOWithDynamicAdaptation.py | 142 +++++++++++ .../lama/HybridDifferentialEvolution.py | 56 +++++ ...idDifferentialEvolutionMemeticOptimizer.py | 97 ++++++++ ...erentialEvolutionParticleSwarmOptimizer.py | 100 ++++++++ ...ridDifferentialEvolutionWithLocalSearch.py | 95 +++++++ .../lama/HybridDifferentialLocalSearch.py | 62 +++++ .../lama/HybridDualLocalOptimizationDE.py | 178 +++++++++++++ ...PhaseParticleSwarmDifferentialEvolution.py | 144 +++++++++++ .../lama/HybridDynamicAdaptiveDE.py | 123 +++++++++ ...dDynamicAdaptiveExplorationOptimization.py | 166 +++++++++++++ .../lama/HybridDynamicClusterOptimization.py | 153 ++++++++++++ .../HybridDynamicCuckooHarmonyAlgorithm.py | 64 +++++ .../HybridDynamicDifferentialEvolution.py | 90 +++++++ ...ridDynamicDifferentialEvolutionGradient.py | 118 +++++++++ .../lama/HybridDynamicElitistDE.py | 123 +++++++++ ...ridDynamicQuantumLevyDifferentialSearch.py | 159 ++++++++++++ .../optimization/lama/HybridDynamicSearch.py | 161 ++++++++++++ ...idEnhancedAdaptiveDifferentialEvolution.py | 148 +++++++++++ ...EnhancedDualPhaseAdaptiveOptimizationV6.py | 146 +++++++++++ ...dEnhancedGravitationalSwarmIntelligence.py | 94 +++++++ .../HybridEvolutionaryAnnealingOptimizer.py | 54 ++++ .../lama/HybridEvolutionaryOptimization.py | 104 ++++++++ .../lama/HybridEvolvingAdaptiveStrategyV28.py | 76 ++++++ ...idExploitationExplorationGradientSearch.py | 60 +++++ .../lama/HybridGradientAnnealingWithMemory.py | 125 ++++++++++ ...ybridGradientBoostedMemoryAnnealingPlus.py | 175 +++++++++++++ .../HybridGradientCrossoverOptimization.py | 79 ++++++ .../HybridGradientDifferentialEvolution.py | 62 +++++ .../lama/HybridGradientEvolution.py | 83 +++++++ .../lama/HybridGradientMemoryAnnealing.py | 129 ++++++++++ .../lama/HybridGradientMemoryAnnealingV2.py | 125 ++++++++++ .../lama/HybridGradientMemoryAnnealingV3.py | 125 ++++++++++ .../HybridGradientMemorySimulatedAnnealing.py | 162 ++++++++++++ .../optimization/lama/HybridGradientPSO.py | 74 ++++++ .../lama/HybridGuidedEvolutionaryOptimizer.py | 89 +++++++ .../lama/HybridMemoryAdaptiveDE.py | 124 +++++++++ .../lama/HybridMultiDimensionalAnnealing.py | 125 ++++++++++ nevergrad/optimization/lama/HybridPSO_DE.py | 153 ++++++++++++ .../lama/HybridPSO_DE_GradientOptimizer.py | 139 +++++++++++ .../optimization/lama/HybridParticleDE.py | 74 ++++++ .../optimization/lama/HybridParticleDE_v2.py | 74 ++++++ .../optimization/lama/HybridParticleDE_v3.py | 74 ++++++ ...icleSwarmDifferentialEvolutionOptimizer.py | 132 ++++++++++ .../HybridQuantumAdaptiveMemeticSearch.py | 145 +++++++++++ .../HybridQuantumDifferentialEvolution.py | 164 ++++++++++++ ...hAdaptiveMemoryAndElitistDynamicRestart.py | 163 ++++++++++++ ...olutionWithDynamicElitismAndLocalSearch.py | 183 ++++++++++++++ ...alEvolutionWithDynamicLearningAndMemory.py | 167 +++++++++++++ ...umDifferentialParticleSwarmOptimization.py | 133 ++++++++++ ...bridQuantumEnhancedMultiPhaseAdaptiveDE.py | 136 ++++++++++ .../lama/HybridQuantumEvolution.py | 190 ++++++++++++++ .../lama/HybridQuantumGradientEvolution.py | 118 +++++++++ .../lama/HybridQuantumLevyAdaptiveSwarmV2.py | 165 ++++++++++++ .../lama/HybridQuantumMemeticOptimization.py | 122 +++++++++ .../HybridQuasiRandomDEGradientAnnealing.py | 142 +++++++++++ ...uasiRandomGradientDifferentialEvolution.py | 123 +++++++++ ...alEvolutionWithQuasiRandomGradientBoost.py | 122 +++++++++ ...HybridSelfAdaptiveDifferentialEvolution.py | 82 ++++++ .../lama/HyperAdaptiveConvergenceStrategy.py | 68 +++++ .../lama/HyperAdaptiveGradientRAMEDS.py | 85 +++++++ ...erAdaptiveHybridDEPSOwithDynamicRestart.py | 149 +++++++++++ .../HyperAdaptiveMemoryGuidedStrategyV74.py | 74 ++++++ .../lama/HyperAdaptivePrecisionOptimizer.py | 59 +++++ ...yperAdaptiveSinusoidalDifferentialSwarm.py | 57 +++++ .../lama/HyperAdaptiveStrategyDE.py | 73 ++++++ ...perAdvancedDynamicPrecisionOptimizerV41.py | 55 ++++ ...yperEvolvedDynamicPrecisionOptimizerV48.py | 59 +++++ .../lama/HyperEvolvedDynamicRAMEDS.py | 78 ++++++ .../optimization/lama/HyperEvolvedRAMEDS.py | 85 +++++++ .../HyperFocusedAdaptiveElitistStrategyV5.py | 81 ++++++ .../optimization/lama/HyperOptimalRAMEDS.py | 82 ++++++ ...ptimalStrategicEvolutionaryOptimizerV58.py | 77 ++++++ ...HyperOptimizedDynamicPrecisionOptimizer.py | 60 +++++ ...erOptimizedDynamicPrecisionOptimizerV12.py | 57 +++++ ...erOptimizedDynamicPrecisionOptimizerV42.py | 57 +++++ ...erOptimizedDynamicPrecisionOptimizerV43.py | 57 +++++ ...erOptimizedDynamicPrecisionOptimizerV57.py | 58 +++++ ...timizedEvolutionaryGradientOptimizerV61.py | 80 ++++++ .../HyperOptimizedGradientEnhancedRAMEDS.py | 85 +++++++ ...dMultiStrategicEvolutionaryOptimizerV47.py | 79 ++++++ ...dMultiStrategicEvolutionaryOptimizerV48.py | 80 ++++++ .../optimization/lama/HyperOptimizedRAMEDS.py | 75 ++++++ ...rOptimizedSpiralDifferentialOptimizerV8.py | 75 ++++++ ...erOptimizedThermalEvolutionaryOptimizer.py | 57 +++++ .../lama/HyperOptimizedUltraRefinedRAMEDS.py | 79 ++++++ .../lama/HyperPreciseEvolutionaryOptimizer.py | 59 +++++ .../HyperPrecisionEvolutionaryOptimizerV23.py | 81 ++++++ .../lama/HyperQuantumConvergenceOptimizer.py | 55 ++++ .../HyperQuantumStateCrossoverOptimization.py | 93 +++++++ nevergrad/optimization/lama/HyperRAMEDS.py | 85 +++++++ ...nedAdaptiveDynamicPrecisionOptimizerV52.py | 54 ++++ ...rRefinedAdaptiveGuidedMutationOptimizer.py | 79 ++++++ .../HyperRefinedAdaptivePrecisionOptimizer.py | 62 +++++ .../HyperRefinedAdaptivePrecisionSearch.py | 57 +++++ ...HyperRefinedDynamicPrecisionOptimizerV3.py | 59 +++++ ...yperRefinedDynamicPrecisionOptimizerV49.py | 53 ++++ .../lama/HyperRefinedEnhancedRAMEDS.py | 84 +++++++ .../HyperRefinedQuantumVelocityOptimizer.py | 78 ++++++ .../lama/HyperSpiralDifferentialClimber.py | 73 ++++++ .../lama/HyperSpiralDifferentialClimberV2.py | 72 ++++++ nevergrad/optimization/lama/IADEA.py | 75 ++++++ nevergrad/optimization/lama/IAGEA.py | 74 ++++++ nevergrad/optimization/lama/IALNF.py | 70 ++++++ nevergrad/optimization/lama/IASDD.py | 62 +++++ ...mprovedAdaptiveCovarianceGradientSearch.py | 156 ++++++++++++ .../ImprovedAdaptiveDifferentialEvolution.py | 178 +++++++++++++ ...namicMultiStrategyDifferentialEvolution.py | 159 ++++++++++++ .../ImprovedAdaptiveEliteGuidedRestartDE.py | 106 ++++++++ ...vedAdaptiveEnhancedQuantumHarmonySearch.py | 58 +++++ ...rovedAdaptiveEvolutionaryHyperHeuristic.py | 133 ++++++++++ ...daptiveExplorationExploitationAlgorithm.py | 103 ++++++++ ...rovedAdaptiveHarmonyMemeticAlgorithmV17.py | 86 +++++++ ...ptiveHarmonySearchWithCuckooInspiration.py | 63 +++++ .../ImprovedAdaptiveHybridMetaOptimizer.py | 122 +++++++++ .../ImprovedAdaptiveHybridOptimization.py | 163 ++++++++++++ .../lama/ImprovedAdaptiveHybridOptimizer.py | 126 ++++++++++ .../ImprovedAdaptiveHybridSearchOptimizer.py | 160 ++++++++++++ .../lama/ImprovedAdaptiveLevyHarmonySearch.py | 65 +++++ .../ImprovedAdaptiveMemeticHybridOptimizer.py | 156 ++++++++++++ .../ImprovedAdaptiveMultiOperatorSearch.py | 146 +++++++++++ ...ptiveMultiStrategyDifferentialEvolution.py | 135 ++++++++++ .../ImprovedAdaptiveMultiStrategyOptimizer.py | 169 +++++++++++++ ...provedAdaptiveParticleSwarmOptimization.py | 67 +++++ ...rovedAdaptivePopulationMemeticOptimizer.py | 103 ++++++++ ...rentialEvolutionWithDynamicHybridSearch.py | 152 +++++++++++ .../lama/ImprovedAdaptiveQuantumEntropyDE.py | 152 +++++++++++ .../ImprovedAdaptiveQuantumLevyOptimizer.py | 199 +++++++++++++++ .../lama/ImprovedAdaptiveQuantumPSO.py | 111 +++++++++ ...mprovedAdaptiveQuantumSwarmOptimization.py | 74 ++++++ ...rovedAdvancedHybridAdaptiveOptimization.py | 145 +++++++++++ ...edBalancedQuantumLevyDifferentialSearch.py | 158 ++++++++++++ ...ooperativeAdaptiveEvolutionaryOptimizer.py | 97 ++++++++ ...vedCulturalDifferentialMemeticEvolution.py | 130 ++++++++++ .../ImprovedCulturalEvolutionaryOptimizer.py | 117 +++++++++ ...provedDiversifiedHarmonySearchOptimizer.py | 105 ++++++++ ...rovedDualPhaseAdaptiveMemoryStrategyV58.py | 81 ++++++ ...iveParticleSwarmDifferentialEvolutionV1.py | 139 +++++++++++ ...edDynamicAdaptiveDEPSOWithEliteMemoryV2.py | 166 +++++++++++++ ...dDynamicAdaptiveExplorationOptimization.py | 166 +++++++++++++ .../ImprovedDynamicAdaptiveHybridDEPSO.py | 149 +++++++++++ ...namicAdaptiveHybridDEPSOWithEliteMemory.py | 174 +++++++++++++ .../ImprovedDynamicHarmonyFireworksSearch.py | 104 ++++++++ ...ovedDynamicHybridDEPSOWithEliteMemoryV3.py | 168 +++++++++++++ ...ImprovedDynamicQuantumSwarmOptimization.py | 100 ++++++++ ...dEliteAdaptiveCrowdingHybridOptimizerV2.py | 195 +++++++++++++++ ...iteAdaptiveMemeticDifferentialEvolution.py | 106 ++++++++ ...rovedEliteAdaptiveMemoryHybridOptimizer.py | 170 +++++++++++++ .../ImprovedEliteGuidedHybridAdaptiveDE.py | 128 ++++++++++ .../lama/ImprovedEliteGuidedMutationDE.py | 123 +++++++++ .../lama/ImprovedEliteGuidedMutationDE_v2.py | 97 ++++++++ ...liteQuantumDifferentialMemeticOptimizer.py | 156 ++++++++++++ ...rentialEvolutionWithDynamicParametersV6.py | 84 +++++++ ...dEnhancedAdaptiveDynamicHarmonySearchV4.py | 76 ++++++ ...ySearchWithEnhancedHybridInspirationV19.py | 100 ++++++++ ...ovedEnhancedAdaptiveLevyHarmonySearchV4.py | 78 ++++++ ...ImprovedEnhancedAdaptiveMetaNetAQAPSOv4.py | 123 +++++++++ ...ncedAdvancedQuantumSwarmOptimizationV15.py | 89 +++++++ ...cedDifferentialEvolutionLocalSearch_v54.py | 104 ++++++++ ...cedDifferentialEvolutionLocalSearch_v61.py | 110 ++++++++ ...cedDifferentialEvolutionLocalSearch_v65.py | 109 ++++++++ ...versifiedGravitationalSwarmOptimization.py | 97 ++++++++ ...vedEnhancedDynamicDifferentialEvolution.py | 116 +++++++++ ...ImprovedEnhancedDynamicHarmonyAlgorithm.py | 87 +++++++ ...mprovedEnhancedDynamicLevyHarmonySearch.py | 67 +++++ ...ncedDynamicLocalSearchFireworkAlgorithm.py | 99 ++++++++ ...EnhancedDynamicQuantumSwarmOptimization.py | 101 ++++++++ ...ImprovedEnhancedEliteGuidedMassQGSA_v84.py | 125 ++++++++++ ...olutionaryDifferentialSwarmOptimizerV11.py | 110 ++++++++ ...ovedEnhancedEvolutionaryFireworksSearch.py | 74 ++++++ ...edEnhancedFireworkAlgorithmOptimization.py | 71 ++++++ ...ireworkAlgorithmWithAdaptiveLocalSearch.py | 94 +++++++ ...edEnhancedGradientDifferentialEvolution.py | 121 +++++++++ .../lama/ImprovedEnhancedHarmonySearchOB.py | 75 ++++++ ...SearchWithAdaptiveLevyFlightInspiration.py | 100 ++++++++ ...rovedEnhancedMemeticHarmonyOptimization.py | 124 +++++++++ ...umCovarianceMatrixDifferentialEvolution.py | 194 +++++++++++++++ .../ImprovedEnhancedQuantumHarmonySearch.py | 43 ++++ ...ImprovedEnhancedRefinedAdaptiveQGSA_v61.py | 131 ++++++++++ .../optimization/lama/ImprovedEnhancedSADE.py | 104 ++++++++ ...nhancedStochasticMetaHeuristicOptimizer.py | 104 ++++++++ .../lama/ImprovedEnsembleMemeticOptimizer.py | 146 +++++++++++ .../lama/ImprovedFireworkAlgorithm.py | 77 ++++++ ...ovedHybridAdaptiveDifferentialEvolution.py | 82 ++++++ ...ovedHybridAdaptiveGeneticSwarmOptimizer.py | 139 +++++++++++ ...bridAdaptiveHarmonicFireworksTabuSearch.py | 111 +++++++++ .../lama/ImprovedHybridCMAESDE.py | 183 ++++++++++++++ .../lama/ImprovedHybridGeneticPSO.py | 139 +++++++++++ .../lama/ImprovedHybridPSODEOptimizer.py | 96 +++++++ ...mprovedIterativeAdaptiveGradientEvolver.py | 99 ++++++++ ...ovedMetaDynamicQuantumSwarmOptimization.py | 92 +++++++ .../lama/ImprovedMultiOperatorSearch.py | 147 +++++++++++ .../ImprovedMultiStrategySelfAdaptiveDE.py | 128 ++++++++++ ...vedOppositionBasedDifferentialEvolution.py | 89 +++++++ ...rovedPrecisionAdaptiveEvolutiveStrategy.py | 70 ++++++ ...fferentialEvolutionWithAdaptiveLearning.py | 168 +++++++++++++ ...tumEnhancedDynamicDifferentialEvolution.py | 186 ++++++++++++++ .../lama/ImprovedQuantumHarmonySearch.py | 51 ++++ ...ImprovedQuantumLevyAdaptiveHybridSearch.py | 158 ++++++++++++ .../lama/ImprovedQuantumSimulatedAnnealing.py | 44 ++++ ...dAdaptiveDynamicExplorationOptimization.py | 166 +++++++++++++ ...rovedRefinedAdaptiveMultiOperatorSearch.py | 146 +++++++++++ ...veEnhancedAdaptiveDifferentialEvolution.py | 166 +++++++++++++ ...hancedDynamicAdaptiveHybridOptimization.py | 138 ++++++++++ ...CovarianceMatrixDifferentialEvolutionV4.py | 189 ++++++++++++++ ...vedRefinedMultiPhaseAdaptiveHybridDEPSO.py | 187 ++++++++++++++ ...provedSelfAdaptiveDifferentialEvolution.py | 96 +++++++ .../ImprovedSelfAdaptiveHybridOptimizer.py | 132 ++++++++++ ...iveOppositionBasedDifferentialEvolution.py | 89 +++++++ ...ImprovedUnifiedAdaptiveMemeticOptimizer.py | 155 ++++++++++++ .../lama/IncrementalCrossoverOptimization.py | 72 ++++++ .../IntelligentDynamicDualPhaseStrategyV39.py | 83 +++++++ .../IntelligentEvolvingAdaptiveStrategyV34.py | 70 ++++++ .../lama/IntelligentPerturbationSearch.py | 59 +++++ .../IterativeAdaptiveDifferentialEvolution.py | 48 ++++ ...erativeProgressiveDifferentialEvolution.py | 44 ++++ nevergrad/optimization/lama/LADESA.py | 95 +++++++ nevergrad/optimization/lama/LAOS.py | 55 ++++ ...arningAdaptiveMemoryEnhancedStrategyV42.py | 82 ++++++ .../lama/LearningAdaptiveStrategyV24.py | 81 ++++++ ...evyEnhancedAdaptiveSimulatedAnnealingDE.py | 144 +++++++++++ nevergrad/optimization/lama/MADE.py | 75 ++++++ nevergrad/optimization/lama/MIDEAT.py | 54 ++++ nevergrad/optimization/lama/MSADE.py | 74 ++++++ nevergrad/optimization/lama/MSEAS.py | 70 ++++++ .../MemeticAdaptiveDifferentialEvolution.py | 89 +++++++ .../MemeticDifferentialEvolutionOptimizer.py | 101 ++++++++ ...tDifferentialEvolutionWithDynamicFandCR.py | 124 +++++++++ ...emeticEnhancedParticleSwarmOptimization.py | 86 +++++++ .../MemeticSpatialDifferentialEvolution.py | 104 ++++++++ .../lama/MemoryBasedSimulatedAnnealing.py | 54 ++++ .../lama/MemoryEnhancedAdaptiveAnnealing.py | 107 ++++++++ ...moryEnhancedAdaptiveMultiPhaseAnnealing.py | 107 ++++++++ ...AdaptiveMultiPhaseAnnealingWithGradient.py | 104 ++++++++ .../MemoryEnhancedDynamicHybridOptimizer.py | 158 ++++++++++++ ...emoryGuidedAdaptiveDualPhaseStrategyV40.py | 84 +++++++ .../lama/MemoryHybridAdaptiveDE.py | 127 ++++++++++ .../lama/MetaDynamicPrecisionOptimizerV1.py | 58 +++++ .../MetaDynamicQuantumSwarmOptimization.py | 92 +++++++ .../optimization/lama/MetaHarmonicSearch.py | 47 ++++ .../optimization/lama/MetaHarmonicSearch2.py | 54 ++++ nevergrad/optimization/lama/MetaNetAQAPSO.py | 123 +++++++++ .../lama/MomentumGradientExploration.py | 70 ++++++ .../lama/MultiFacetAdaptiveSearch.py | 68 +++++ .../lama/MultiFocalAdaptiveOptimizer.py | 64 +++++ ...ayeredAdaptiveCovarianceMatrixEvolution.py | 150 +++++++++++ ...MultiModalMemoryEnhancedHybridOptimizer.py | 201 +++++++++++++++ ...ctiveAdvancedEnhancedGuidedMassQGSA_v66.py | 105 ++++++++ ...rovedAdvancedEnhancedGuidedMassQGSA_v67.py | 105 ++++++++ .../optimization/lama/MultiOperatorSearch.py | 117 +++++++++ .../optimization/lama/MultiPhaseAdaptiveDE.py | 137 ++++++++++ ...MultiPhaseAdaptiveDifferentialEvolution.py | 146 +++++++++++ ...ltiPhaseAdaptiveExplorationOptimization.py | 115 +++++++++ .../lama/MultiPhaseAdaptiveHybridDEPSO.py | 187 ++++++++++++++ .../lama/MultiPhaseDiversityAdaptiveDE.py | 164 ++++++++++++ .../MultiPopulationAdaptiveMemorySearch.py | 158 ++++++++++++ .../MultiScaleAdaptiveHybridOptimization.py | 115 +++++++++ .../lama/MultiScaleGradientExploration.py | 69 +++++ .../lama/MultiScaleGradientSearch.py | 79 ++++++ .../lama/MultiScaleQuadraticSearch.py | 78 ++++++ .../lama/MultiStageAdaptiveSearch.py | 62 +++++ ...ultiStageHybridGradientBoostedAnnealing.py | 175 +++++++++++++ .../MultiStrategyAdaptiveGradientEvolution.py | 125 ++++++++++ ...ategyAdaptiveSwarmDifferentialEvolution.py | 58 +++++ .../MultiStrategyDifferentialEvolution.py | 171 +++++++++++++ .../lama/MultiStrategyMemeticAlgorithm.py | 86 +++++++ ...ultiStrategyQuantumCognitionOptimizerV9.py | 82 ++++++ .../lama/MultiStrategyQuantumLevyOptimizer.py | 199 +++++++++++++++ .../lama/MultiStrategySelfAdaptiveDE.py | 114 +++++++++ .../lama/MultiSwarmAdaptiveDE_PSO.py | 125 ++++++++++ ...ovelAdaptiveHarmonicFireworksTabuSearch.py | 103 ++++++++ .../lama/NovelDynamicFireworkAlgorithm.py | 81 ++++++ ...ncedDiversifiedMetaHeuristicAlgorithmV2.py | 85 +++++++ .../lama/NovelHarmonyTabuSearch.py | 91 +++++++ nevergrad/optimization/lama/ODEMF.py | 86 +++++++ nevergrad/optimization/lama/ORAMED.py | 85 +++++++ .../lama/OctopusSwarmAlgorithm.py | 50 ++++ .../OptimalAdaptiveDifferentialEvolution.py | 62 +++++ .../lama/OptimalAdaptiveDifferentialSearch.py | 85 +++++++ .../OptimalAdaptiveMutationEnhancedSearch.py | 93 +++++++ ...timalAdaptiveSwarmDifferentialEvolution.py | 52 ++++ .../optimization/lama/OptimalBalanceSearch.py | 61 +++++ .../lama/OptimalCohortDiversityOptimizer.py | 72 ++++++ .../optimization/lama/OptimalConvergenceDE.py | 68 +++++ ...ptimalDynamicAdaptiveEvolutionOptimizer.py | 60 +++++ .../lama/OptimalDynamicMutationSearch.py | 79 ++++++ .../OptimalDynamicPrecisionOptimizerV14.py | 60 +++++ .../OptimalDynamicPrecisionOptimizerV21.py | 59 +++++ .../lama/OptimalEnhancedRAMEDS.py | 84 +++++++ .../lama/OptimalEnhancedStrategyDE.py | 66 +++++ ...alEvolutionaryGradientHybridOptimizerV8.py | 80 ++++++ ...OptimalEvolutionaryGradientOptimizerV11.py | 79 ++++++ ...OptimalEvolutionaryGradientOptimizerV25.py | 79 ++++++ ...malHybridDifferentialAnnealingOptimizer.py | 52 ++++ .../lama/OptimalHyperStrategicOptimizerV51.py | 78 ++++++ ...imalPrecisionDynamicAdaptationOptimizer.py | 62 +++++ ...ptimalPrecisionEvolutionaryOptimizerV37.py | 78 ++++++ ...alPrecisionEvolutionaryThermalOptimizer.py | 58 +++++ .../lama/OptimalPrecisionHybridSearchV3.py | 70 ++++++ .../lama/OptimalQuantumSynergyStrategy.py | 78 ++++++ ...ptimalRefinedEnhancedUltraRefinedRAMEDS.py | 84 +++++++ ...ptimalSelectiveEvolutionaryOptimizerV20.py | 79 ++++++ .../lama/OptimalSmartRefinedRAMEDS.py | 86 +++++++ .../lama/OptimalSpiralCentroidSearch.py | 59 +++++ .../lama/OptimalStrategicAdaptiveOptimizer.py | 84 +++++++ .../lama/OptimalStrategicHybridDE.py | 80 ++++++ .../lama/OptimallyBalancedQuantumStrategy.py | 80 ++++++ .../OptimizedAdaptiveDifferentialClimber.py | 68 +++++ .../OptimizedAdaptiveDualPhaseStrategy.py | 81 ++++++ .../OptimizedAdaptiveDualPhaseStrategyV4.py | 80 ++++++ .../OptimizedAdaptiveDynamicStrategyV34.py | 70 ++++++ .../OptimizedAdaptiveGlobalLocalSearch.py | 71 ++++++ ...edAdaptiveQuantumGradientHybridStrategy.py | 92 +++++++ ...aptiveSimulatedAnnealingWithSmartMemory.py | 159 ++++++++++++ ...OptimizedBalancedDualStrategyAdaptiveDE.py | 127 ++++++++++ .../OptimizedConvergenceIslandStrategy.py | 113 +++++++++ .../OptimizedConvergentAdaptiveEvolver.py | 81 ++++++ .../OptimizedCrossoverElitistStrategyV8.py | 78 ++++++ .../lama/OptimizedDifferentialEvolution.py | 52 ++++ ...edDualPhaseAdaptiveHybridOptimizationV4.py | 146 +++++++++++ .../lama/OptimizedDualStrategyAdaptiveDE.py | 125 ++++++++++ ...OptimizedDynamicAdaptiveHybridOptimizer.py | 58 +++++ .../OptimizedDynamicDualPhaseStrategyV13.py | 85 +++++++ ...ientBoostedMemorySimulatedAnnealingPlus.py | 141 +++++++++++ ...ynamicGradientBoostedSimulatedAnnealing.py | 157 ++++++++++++ ...ptimizedDynamicQuantumSwarmOptimization.py | 77 ++++++ .../lama/OptimizedDynamicRestartAdaptiveDE.py | 144 +++++++++++ ...mizedEliteAdaptiveMemoryHybridOptimizer.py | 197 +++++++++++++++ .../OptimizedEnhancedAdaptiveMetaNetAQAPSO.py | 123 +++++++++ ...OptimizedEnhancedDualStrategyAdaptiveDE.py | 127 ++++++++++ ...timizedEnhancedDynamicFireworkAlgorithm.py | 91 +++++++ .../lama/OptimizedEvolutiveStrategy.py | 54 ++++ ...OptimizedExplorationConvergenceStrategy.py | 96 +++++++ .../OptimizedGlobalStructureAwareEvolver.py | 93 +++++++ .../lama/OptimizedGradientBalancedPSO.py | 77 ++++++ ...oostedMemoryAnnealingWithAdaptiveSearch.py | 159 ++++++++++++ ...timizedGradientMemorySimulatedAnnealing.py | 175 +++++++++++++ ...imizedHybridAdaptiveDualPhaseStrategyV7.py | 79 ++++++ ...zedHybridAdaptiveMultiStageOptimization.py | 139 +++++++++++ .../OptimizedHybridExplorationOptimization.py | 163 ++++++++++++ .../lama/OptimizedHybridSearch.py | 65 +++++ .../lama/OptimizedHybridStrategyDE.py | 74 ++++++ .../OptimizedHyperStrategicOptimizerV53.py | 79 ++++++ .../OptimizedIslandEvolutionStrategyV4.py | 97 ++++++++ ...imizedMemoryEnhancedAdaptiveStrategyV70.py | 96 +++++++ ...ptimizedMemoryGuidedAdaptiveStrategyV81.py | 82 ++++++ ...izedMemoryResponsiveAdaptiveStrategyV78.py | 70 ++++++ .../lama/OptimizedParallelStrategyDE.py | 64 +++++ .../OptimizedPrecisionAdaptiveStrategy.py | 73 ++++++ ...ecisionTunedCrossoverElitistStrategyV13.py | 81 ++++++ ...CovarianceMatrixDifferentialEvolutionV3.py | 194 +++++++++++++++ .../OptimizedQuantumFluxDifferentialSwarm.py | 55 ++++ ...dQuantumGradientExplorationOptimization.py | 219 ++++++++++++++++ .../lama/OptimizedQuantumHarmonySearch.py | 43 ++++ .../lama/OptimizedQuantumHybridDEPSO.py | 162 ++++++++++++ .../OptimizedQuantumLevyDifferentialSearch.py | 156 ++++++++++++ .../optimization/lama/OptimizedRAMEDS.py | 86 +++++++ ...AdaptiveEnhancedGradientGuidedHybridPSO.py | 73 ++++++ .../OptimizedRefinedAdaptiveHybridSearch.py | 71 ++++++ ...OptimizedRefinedAdaptiveMultiStrategyDE.py | 162 ++++++++++++ .../OptimizedRefinedAdaptiveRefinementPSO.py | 86 +++++++ .../lama/OptimizedRefinedEnhancedRAMEDSv5.py | 107 ++++++++ ...imizedRefinedMemoryDualPhaseStrategyV65.py | 98 ++++++++ ...efinedPrecisionEvolutionaryOptimizerV45.py | 80 ++++++ ...cillatoryCrossoverDifferentialEvolution.py | 51 ++++ nevergrad/optimization/lama/PADE.py | 79 ++++++ nevergrad/optimization/lama/PAMDMDESM.py | 96 +++++++ nevergrad/optimization/lama/PDEAF.py | 62 +++++ nevergrad/optimization/lama/PGDE.py | 79 ++++++ nevergrad/optimization/lama/PMFSA.py | 60 +++++ nevergrad/optimization/lama/PPDE.py | 70 ++++++ nevergrad/optimization/lama/PWDE.py | 70 ++++++ .../PrecisionAdaptiveCohortOptimization.py | 69 +++++ .../PrecisionAdaptiveCohortOptimizationV2.py | 72 ++++++ .../lama/PrecisionAdaptiveDecayOptimizer.py | 78 ++++++ ...cisionAdaptiveDifferentialEvolutionPlus.py | 50 ++++ .../PrecisionAdaptiveDynamicStrategyV33.py | 66 +++++ ...PrecisionAdaptiveGlobalClimbingEnhancer.py | 95 +++++++ .../PrecisionAdaptiveGradientClusteringPSO.py | 76 ++++++ .../optimization/lama/PrecisionAdaptivePSO.py | 71 ++++++ .../lama/PrecisionBalancedAdaptivePSO.py | 67 +++++ .../PrecisionBalancedEvolutionStrategy.py | 72 ++++++ .../lama/PrecisionBalancedOptimizer.py | 57 +++++ .../PrecisionBoostedDifferentialEvolution.py | 61 +++++ ...recisionCosineAdaptiveDifferentialSwarm.py | 56 +++++ .../lama/PrecisionDifferentialEvolution.py | 48 ++++ .../PrecisionDynamicAdaptiveOptimizerV6.py | 62 +++++ .../PrecisionEnhancedDualStrategyOptimizer.py | 69 +++++ .../PrecisionEnhancedDynamicOptimizerV13.py | 58 +++++ .../lama/PrecisionEnhancedSearch.py | 66 +++++ ...PrecisionEnhancedSpatialAdaptiveEvolver.py | 88 +++++++ ...sionEnhancedSpiralDifferentialClimberV4.py | 72 ++++++ .../PrecisionEnhancedStrategicOptimizer.py | 66 +++++ .../PrecisionEvolutionaryThermalOptimizer.py | 59 +++++ .../lama/PrecisionFocusedAdaptivePSO.py | 76 ++++++ .../lama/PrecisionGuidedEvolutionStrategy.py | 80 ++++++ .../PrecisionGuidedEvolutionaryAlgorithm.py | 93 +++++++ .../lama/PrecisionGuidedQuantumStrategy.py | 77 ++++++ .../PrecisionIncrementalEvolutionStrategy.py | 72 ++++++ ...cisionOptimizedEvolutionaryOptimizerV22.py | 79 ++++++ .../lama/PrecisionRotationalClimbOptimizer.py | 58 +++++ .../lama/PrecisionScaledEvolutionarySearch.py | 99 ++++++++ .../PrecisionSpiralDifferentialOptimizerV6.py | 76 ++++++ ...ecisionTunedCrossoverElitistStrategyV11.py | 80 ++++++ .../lama/PrecisionTunedEvolver.py | 74 ++++++ .../lama/PrecisionTunedHybridSearch.py | 73 ++++++ .../optimization/lama/PrecisionTunedPSO.py | 76 ++++++ ...onTunedQuantumHarmonicFeedbackOptimizer.py | 84 +++++++ ...rogressiveAdaptiveDifferentialEvolution.py | 57 +++++ .../ProgressiveAdaptiveGlobalLocalSearch.py | 81 ++++++ .../ProgressiveCohortDiversityOptimization.py | 70 ++++++ .../lama/ProgressiveDimensionalOptimizer.py | 60 +++++ ...rogressiveEvolutionaryFireworkAlgorithm.py | 74 ++++++ ...siveHybridAdaptiveDifferentialEvolution.py | 50 ++++ .../ProgressiveParticleSwarmOptimization.py | 61 +++++ ...ProgressivePopulationRefinementStrategy.py | 73 ++++++ .../ProgressiveQuorumEvolutionStrategy.py | 54 ++++ .../lama/ProgressiveRefinementSearch.py | 59 +++++ nevergrad/optimization/lama/QAPSO.py | 65 +++++ nevergrad/optimization/lama/QAPSOAIR.py | 84 +++++++ nevergrad/optimization/lama/QAPSOAIRVC.py | 89 +++++++ nevergrad/optimization/lama/QAPSOAIRVCHR.py | 91 +++++++ nevergrad/optimization/lama/QAPSOAIW.py | 60 +++++ nevergrad/optimization/lama/QAPSOAIWRR.py | 82 ++++++ nevergrad/optimization/lama/QPSO.py | 53 ++++ .../QuantumAcceleratedEvolutionStrategy.py | 61 +++++ .../QuantumAcceleratedNesterovOptimizer.py | 51 ++++ ...QuantumAcceleratedNesterovPlusOptimizer.py | 62 +++++ .../QuantumAdaptiveCognitionOptimizerV5.py | 89 +++++++ .../QuantumAdaptiveCognitionOptimizerV6.py | 89 +++++++ .../QuantumAdaptiveConvergenceOptimizer.py | 63 +++++ .../QuantumAdaptiveCrossoverRefinement.py | 72 ++++++ ...tistDynamicRestartAndDifferentialMemory.py | 137 ++++++++++ .../QuantumAdaptiveDifferentialEvolution.py | 91 +++++++ .../QuantumAdaptiveDifferentialEvolutionV3.py | 113 +++++++++ .../QuantumAdaptiveDifferentialEvolutionV4.py | 113 +++++++++ .../QuantumAdaptiveDifferentialStrategyV10.py | 74 ++++++ .../QuantumAdaptiveDifferentialStrategyV11.py | 74 ++++++ .../QuantumAdaptiveDifferentialStrategyV12.py | 74 ++++++ ...aptiveDiversifiedDynamicHybridSearchV11.py | 90 +++++++ ...aptiveDiversifiedDynamicHybridSearchV12.py | 90 +++++++ ...aptiveDiversifiedDynamicHybridSearchV13.py | 90 +++++++ ...aptiveDiversifiedDynamicHybridSearchV14.py | 90 +++++++ ...aptiveDiversifiedDynamicHybridSearchV15.py | 90 +++++++ ...antumAdaptiveDiversifiedHybridSearchV10.py | 88 +++++++ .../lama/QuantumAdaptiveDynamicExploration.py | 185 ++++++++++++++ .../QuantumAdaptiveDynamicExplorationV2.py | 194 +++++++++++++++ .../QuantumAdaptiveDynamicExplorationV3.py | 194 +++++++++++++++ .../QuantumAdaptiveDynamicExplorationV4.py | 208 ++++++++++++++++ .../QuantumAdaptiveDynamicExplorationV5.py | 211 ++++++++++++++++ .../QuantumAdaptiveDynamicExplorationV6.py | 211 ++++++++++++++++ .../QuantumAdaptiveDynamicExplorationV7.py | 211 ++++++++++++++++ .../lama/QuantumAdaptiveDynamicStrategyV7.py | 74 ++++++ .../lama/QuantumAdaptiveEliteGuidedSearch.py | 184 ++++++++++++++ .../lama/QuantumAdaptiveFireworksOptimizer.py | 52 ++++ ...uantumAdaptiveGradientDiversityExplorer.py | 95 +++++++ .../lama/QuantumAdaptiveGradientSearch.py | 96 +++++++ .../QuantumAdaptiveHarmonicOptimizerV8.py | 68 +++++ .../lama/QuantumAdaptiveHybridDEPSO_V7.py | 160 ++++++++++++ .../lama/QuantumAdaptiveHybridOptimizer.py | 76 ++++++ .../lama/QuantumAdaptiveHybridOptimizerV3.py | 80 ++++++ .../lama/QuantumAdaptiveHybridStrategyV4.py | 64 +++++ .../QuantumAdaptiveLevyDifferentialSearch.py | 159 ++++++++++++ ...mAdaptiveLevyDynamicDifferentialSwarmV4.py | 160 ++++++++++++ .../lama/QuantumAdaptiveLevyMemeticSearch.py | 156 ++++++++++++ .../lama/QuantumAdaptiveLevyOptimizer.py | 187 ++++++++++++++ .../QuantumAdaptiveLevySwarmOptimizationV2.py | 157 ++++++++++++ .../lama/QuantumAdaptiveMemeticAlgorithm.py | 118 +++++++++ .../lama/QuantumAdaptiveMemeticAlgorithmV2.py | 118 +++++++++ .../lama/QuantumAdaptiveMemeticSearchV2.py | 112 +++++++++ .../lama/QuantumAdaptiveMultiPhaseDE_v6.py | 150 +++++++++++ .../lama/QuantumAdaptiveMultiPopulationDE.py | 178 +++++++++++++ .../QuantumAdaptiveMultiStrategyEvolution.py | 184 ++++++++++++++ ...QuantumAdaptiveNesterovGradientEnhancer.py | 64 +++++ .../lama/QuantumAdaptiveNesterovSynergy.py | 69 +++++ .../QuantumAdaptiveRefinementOptimizer.py | 75 ++++++ .../lama/QuantumAdaptiveRefinementStrategy.py | 68 +++++ .../QuantumAdaptiveRefinementStrategyV2.py | 77 ++++++ .../lama/QuantumAdaptiveStrategicEnhancer.py | 70 ++++++ .../lama/QuantumAdaptiveVelocityOptimizer.py | 76 ++++++ .../QuantumAnnealingDifferentialEvolution.py | 148 +++++++++++ .../lama/QuantumAssistedHybridOptimizerV1.py | 78 ++++++ ...QuantumBalancedAdaptiveNesterovStrategy.py | 73 ++++++ .../lama/QuantumBalancedEvolutionStrategy.py | 65 +++++ ...umCognitionAdaptiveEnhancedOptimizerV16.py | 77 ++++++ .../QuantumCognitionAdaptiveEnhancerV8.py | 88 +++++++ ...ntumCognitionAdaptiveTuningOptimizerV14.py | 77 ++++++ ...mCognitionDynamicAdaptationOptimizerV30.py | 85 +++++++ .../QuantumCognitionEnhancedOptimizerV7.py | 87 +++++++ ...antumCognitionFocusedHybridOptimizerV21.py | 84 +++++++ .../QuantumCognitionFocusedOptimizerV17.py | 77 ++++++ ...CognitionHybridEvolutionaryOptimizerV19.py | 82 ++++++ ...CognitionHybridEvolutionaryOptimizerV20.py | 84 +++++++ .../QuantumCognitionHybridOptimizerV23.py | 84 +++++++ .../QuantumCognitionHybridOptimizerV24.py | 87 +++++++ .../QuantumCognitionHybridOptimizerV25.py | 85 +++++++ .../QuantumCognitionHybridOptimizerV26.py | 85 +++++++ .../QuantumCognitionHybridOptimizerV27.py | 85 +++++++ .../lama/QuantumCognitionOptimizerV2.py | 75 ++++++ .../QuantumCognitionTrajectoryOptimizerV28.py | 85 +++++++ .../lama/QuantumCognitiveAdaptiveOptimizer.py | 86 +++++++ .../QuantumControlledDiversityStrategy.py | 80 ++++++ .../QuantumCooperativeCrossoverStrategy.py | 72 ++++++ ...umCovarianceMatrixDifferentialEvolution.py | 189 ++++++++++++++ ...nceMatrixDifferentialEvolutionRefinedV2.py | 194 +++++++++++++++ ...thAdaptiveElitismAndEnhancedLocalSearch.py | 108 ++++++++ ...EvolutionWithAdaptiveLearningAndRestart.py | 161 ++++++++++++ ...ithAdaptiveMemoryAndEnhancedLocalSearch.py | 131 ++++++++++ ...EvolutionWithAdaptiveRestartAndLearning.py | 161 ++++++++++++ ...nWithAdaptiveRestartAndMemoryRefinement.py | 109 ++++++++ ...nWithAdaptiveRestartsAndElitistLearning.py | 156 ++++++++++++ ...ancedRestartsAndEnhancedElitistLearning.py | 176 +++++++++++++ ...onWithDiverseElitismAndAdaptiveRestarts.py | 108 ++++++++ ...WithDynamicAdaptiveMemoryAndEliteSearch.py | 167 +++++++++++++ ...lEvolutionWithDynamicElitismAndRestarts.py | 141 +++++++++++ ...tionWithDynamicMemoryAndAdaptiveRestart.py | 105 ++++++++ ...mDifferentialEvolutionWithEliteGuidance.py | 112 +++++++++ ...QuantumDifferentialEvolutionWithElitism.py | 88 +++++++ ...WithElitistMemoryAndEnhancedLocalSearch.py | 108 ++++++++ ...thEnhancedAdaptiveMemoryAndHybridSearch.py | 147 +++++++++++ ...dAdaptiveRestartsAndDynamicHybridSearch.py | 163 ++++++++++++ ...EnhancedLearningAndAdaptiveHybridSearch.py | 152 +++++++++++ ...hEnhancedLocalSearchAndAdaptiveRestarts.py | 107 ++++++++ ...tionWithLearningAndAdaptiveHybridSearch.py | 152 +++++++++++ ...ntialEvolutionWithMultiStrategyLearning.py | 166 +++++++++++++ ...alParticleOptimizerWithAdaptiveRestarts.py | 144 +++++++++++ ...articleOptimizerWithEliteGuidedMutation.py | 157 ++++++++++++ ...ialParticleOptimizerWithEliteRefinement.py | 157 ++++++++++++ ...ifferentialParticleOptimizerWithElitism.py | 122 +++++++++ ...leOptimizerWithEnhancedAdaptiveRestarts.py | 157 ++++++++++++ ...ntumDifferentialParticleSwarmRefinement.py | 164 ++++++++++++ .../lama/QuantumDirectionalAcceleratorV19.py | 85 +++++++ .../lama/QuantumDirectionalEnhancer.py | 77 ++++++ .../lama/QuantumDirectionalEnhancerV10.py | 83 +++++++ .../lama/QuantumDirectionalEnhancerV11.py | 84 +++++++ .../lama/QuantumDirectionalEnhancerV12.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV13.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV14.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV15.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV16.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV17.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV18.py | 85 +++++++ .../lama/QuantumDirectionalEnhancerV2.py | 79 ++++++ .../lama/QuantumDirectionalEnhancerV3.py | 80 ++++++ .../lama/QuantumDirectionalEnhancerV4.py | 80 ++++++ .../lama/QuantumDirectionalEnhancerV5.py | 81 ++++++ .../lama/QuantumDirectionalEnhancerV6.py | 81 ++++++ .../lama/QuantumDirectionalEnhancerV7.py | 81 ++++++ .../lama/QuantumDirectionalEnhancerV8.py | 81 ++++++ .../lama/QuantumDirectionalEnhancerV9.py | 81 ++++++ .../lama/QuantumDirectionalFusionOptimizer.py | 83 +++++++ .../QuantumDirectionalFusionOptimizerV2.py | 83 +++++++ .../lama/QuantumDirectionalRefinerV20.py | 85 +++++++ .../lama/QuantumDirectionalRefinerV21.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV22.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV23.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV24.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV25.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV26.py | 78 ++++++ .../lama/QuantumDirectionalRefinerV27.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV28.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV29.py | 77 ++++++ .../lama/QuantumDirectionalRefinerV30.py | 79 ++++++ .../lama/QuantumDirectionalRefinerV31.py | 79 ++++++ .../lama/QuantumDirectionalRefinerV32.py | 76 ++++++ .../lama/QuantumDirectionalRefinerV33.py | 77 ++++++ .../lama/QuantumDualStrategyAdaptiveDE.py | 149 +++++++++++ .../lama/QuantumDynamicAdaptationStrategy.py | 68 +++++ .../lama/QuantumDynamicBalanceOptimizer.py | 80 ++++++ .../lama/QuantumDynamicBalancedOptimizerV7.py | 68 +++++ .../QuantumDynamicExplorationOptimizerV6.py | 68 +++++ .../lama/QuantumDynamicGradientClimberV2.py | 91 +++++++ .../lama/QuantumDynamicGradientClimberV3.py | 91 +++++++ ...umDynamicallyAdaptiveFireworksAlgorithm.py | 58 +++++ .../lama/QuantumEliteMemeticAdaptiveSearch.py | 138 ++++++++++ ...nhancedAdaptiveDifferentialEvolution_v4.py | 141 +++++++++++ ...nhancedAdaptiveDifferentialEvolution_v5.py | 141 +++++++++++ ...ntumEnhancedAdaptiveDiversityStrategyV6.py | 92 +++++++ .../QuantumEnhancedAdaptiveDualStrategyDE.py | 149 +++++++++++ ...EnhancedAdaptiveExplorationOptimization.py | 211 ++++++++++++++++ .../QuantumEnhancedAdaptiveMultiPhaseDE.py | 157 ++++++++++++ .../QuantumEnhancedAdaptiveMultiPhaseDE_v7.py | 144 +++++++++++ .../lama/QuantumEnhancedAdaptiveOptimizer.py | 57 +++++ ...uantumEnhancedAdaptiveSwarmOptimization.py | 157 ++++++++++++ .../QuantumEnhancedDifferentialEvolution.py | 132 ++++++++++ ...ionWithAdaptiveElitismAndDynamicRestart.py | 121 +++++++++ .../QuantumEnhancedDiversityExplorerV8.py | 92 +++++++ ...antumEnhancedDynamicAdaptiveHybridDEPSO.py | 160 ++++++++++++ ...umEnhancedDynamicAdaptiveHybridDEPSO_V2.py | 160 ++++++++++++ ...umEnhancedDynamicAdaptiveHybridDEPSO_V3.py | 160 ++++++++++++ ...umEnhancedDynamicAdaptiveHybridDEPSO_V4.py | 162 ++++++++++++ ...umEnhancedDynamicAdaptiveHybridDEPSO_V5.py | 160 ++++++++++++ ...tumEnhancedDynamicDifferentialEvolution.py | 186 ++++++++++++++ ...EnhancedDynamicDifferentialEvolution_v2.py | 186 ++++++++++++++ ...EnhancedDynamicDifferentialEvolution_v3.py | 186 ++++++++++++++ .../QuantumEnhancedDynamicHybridSearchV9.py | 80 ++++++ .../QuantumEnhancedDynamicMultiStrategyDE.py | 186 ++++++++++++++ ...uantumEnhancedDynamicMultiStrategyDE_v2.py | 186 ++++++++++++++ .../QuantumEnhancedGlobalTacticalOptimizer.py | 74 ++++++ .../lama/QuantumEnhancedGradientClimber.py | 90 +++++++ .../lama/QuantumEnhancedHybridDEPSO.py | 162 ++++++++++++ .../QuantumEnhancedMemeticAdaptiveSearch.py | 151 +++++++++++ .../lama/QuantumEnhancedMemeticSearch.py | 112 +++++++++ .../QuantumEnhancedMultiPhaseAdaptiveDE_v8.py | 149 +++++++++++ .../QuantumEnhancedMultiPhaseAdaptiveDE_v9.py | 150 +++++++++++ .../lama/QuantumEnhancedMultiPhaseDE.py | 138 ++++++++++ .../lama/QuantumEnhancedMultiPhaseDE_v2.py | 150 +++++++++++ .../lama/QuantumEnhancedMultiPhaseDE_v3.py | 150 +++++++++++ .../lama/QuantumEnhancedMultiPhaseDE_v4.py | 150 +++++++++++ .../lama/QuantumEnhancedMultiPhaseDE_v5.py | 150 +++++++++++ ...dRefinedAdaptiveExplorationOptimization.py | 210 ++++++++++++++++ .../lama/QuantumEntropyEnhancedDE.py | 140 +++++++++++ .../QuantumEvolutionaryAdaptiveOptimizer.py | 86 +++++++ .../QuantumEvolutionaryConvergenceStrategy.py | 79 ++++++ ...uantumEvolutionaryConvergenceStrategyV2.py | 79 ++++++ .../lama/QuantumEvolutionaryOptimization.py | 44 ++++ .../QuantumEvolvedDiversityExplorerV10.py | 92 +++++++ .../QuantumEvolvedDiversityExplorerV11.py | 92 +++++++ .../QuantumEvolvedDiversityExplorerV12.py | 86 +++++++ .../lama/QuantumEvolvedDiversityExplorerV9.py | 92 +++++++ .../lama/QuantumFeedbackEvolutionStrategy.py | 76 ++++++ .../lama/QuantumFireworksAlgorithm.py | 38 +++ .../lama/QuantumFluxDifferentialSwarm.py | 59 +++++ .../QuantumGeneticDifferentialEvolution.py | 172 +++++++++++++ ...GradientAdaptiveExplorationOptimization.py | 211 ++++++++++++++++ ...adientAdaptiveExplorationOptimizationV2.py | 213 ++++++++++++++++ ...adientAdaptiveExplorationOptimizationV3.py | 213 ++++++++++++++++ ...adientAdaptiveExplorationOptimizationV4.py | 213 ++++++++++++++++ ...adientAdaptiveExplorationOptimizationV5.py | 216 ++++++++++++++++ ...tAdaptiveExplorationRefinedOptimization.py | 213 ++++++++++++++++ .../QuantumGradientBalancedOptimizerV6.py | 76 ++++++ .../QuantumGradientBoostedMemeticSearch.py | 133 ++++++++++ ...GradientEnhancedExplorationOptimization.py | 211 ++++++++++++++++ .../lama/QuantumGradientFusionOptimizer.py | 90 +++++++ ...QuantumGradientGuidedFireworksAlgorithm.py | 73 ++++++ .../lama/QuantumGradientHybridOptimization.py | 213 ++++++++++++++++ .../QuantumGradientHybridOptimizationV2.py | 214 ++++++++++++++++ .../QuantumGradientHybridOptimizationV3.py | 214 ++++++++++++++++ .../QuantumGradientHybridOptimizationV4.py | 214 ++++++++++++++++ .../lama/QuantumGradientHybridOptimizer.py | 116 +++++++++ .../lama/QuantumGradientMemeticOptimizer.py | 129 ++++++++++ .../lama/QuantumGradientMemeticSearch.py | 118 +++++++++ .../lama/QuantumGradientMemeticSearchV2.py | 118 +++++++++ .../lama/QuantumGradientMemeticSearchV3.py | 118 +++++++++ .../lama/QuantumGuidedAdaptiveStrategy.py | 77 ++++++ .../lama/QuantumGuidedCrossoverAdaptation.py | 87 +++++++ .../QuantumGuidedHybridDifferentialSwarm.py | 61 +++++ .../lama/QuantumGuidedLevyAdaptiveSwarm.py | 163 ++++++++++++ .../lama/QuantumHarmonicAdaptationStrategy.py | 67 +++++ ...uantumHarmonicAdaptiveFeedbackOptimizer.py | 82 ++++++ .../lama/QuantumHarmonicAdaptiveOptimizer.py | 73 ++++++ ...ntumHarmonicAdaptiveRefinementOptimizer.py | 82 ++++++ .../lama/QuantumHarmonicDynamicAdaptation.py | 67 +++++ .../lama/QuantumHarmonicDynamicOptimizer.py | 82 ++++++ .../lama/QuantumHarmonicEvolutionStrategy.py | 65 +++++ .../lama/QuantumHarmonicFeedbackOptimizer.py | 76 ++++++ .../lama/QuantumHarmonicFocusedOptimizer.py | 81 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV2.py | 78 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV3.py | 78 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV4.py | 78 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV5.py | 78 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV6.py | 78 ++++++ .../lama/QuantumHarmonicFocusedOptimizerV7.py | 78 ++++++ .../lama/QuantumHarmonicImpulseOptimizerV9.py | 69 +++++ .../lama/QuantumHarmonicPrecisionOptimizer.py | 83 +++++++ ...antumHarmonicResilientEvolutionStrategy.py | 82 ++++++ .../optimization/lama/QuantumHarmonizedPSO.py | 72 ++++++ .../lama/QuantumHarmonyMemeticAlgorithm.py | 70 ++++++ .../QuantumHarmonyMemeticAlgorithmImproved.py | 83 +++++++ .../QuantumHarmonyMemeticAlgorithmRefined.py | 83 +++++++ .../optimization/lama/QuantumHarmonySearch.py | 42 ++++ .../lama/QuantumHybridAdaptiveStrategy.py | 66 +++++ .../lama/QuantumHybridAdaptiveStrategyV2.py | 61 +++++ .../lama/QuantumHybridAdaptiveStrategyV8.py | 74 ++++++ .../lama/QuantumHybridAdaptiveStrategyV9.py | 74 ++++++ .../QuantumHybridDifferentialEvolution.py | 190 ++++++++++++++ .../lama/QuantumHybridDynamicAdaptiveDE.py | 179 +++++++++++++ .../lama/QuantumHybridDynamicAdaptiveDE_v2.py | 181 ++++++++++++++ .../lama/QuantumHybridDynamicAdaptiveDE_v3.py | 181 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v2.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v3.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v4.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v5.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v6.py | 192 ++++++++++++++ .../lama/QuantumHybridEliteAdaptiveDE_v7.py | 192 ++++++++++++++ .../lama/QuantumHybridImprovedDE.py | 192 ++++++++++++++ ...QuantumHybridParticleDifferentialSearch.py | 135 ++++++++++ .../QuantumInfluenceCrossoverOptimizer.py | 65 +++++ ...ntumInfluencedAdaptiveDifferentialSwarm.py | 59 +++++ .../QuantumInformedAdaptiveHybridSearch.py | 96 +++++++ .../QuantumInformedAdaptiveHybridSearchV4.py | 82 ++++++ ...QuantumInformedAdaptiveInertiaOptimizer.py | 69 +++++ .../lama/QuantumInformedAdaptivePSO.py | 69 +++++ .../lama/QuantumInformedAdaptiveSearchV4.py | 84 +++++++ .../lama/QuantumInformedAdaptiveSearchV5.py | 84 +++++++ .../lama/QuantumInformedAdaptiveSearchV6.py | 84 +++++++ .../QuantumInformedCooperativeSearchV1.py | 83 +++++++ .../lama/QuantumInformedCrossoverEvolution.py | 84 +++++++ .../QuantumInformedDifferentialStrategy.py | 73 ++++++ .../QuantumInformedDynamicSwarmOptimizer.py | 70 ++++++ .../lama/QuantumInformedEvolutionStrategy.py | 71 ++++++ .../lama/QuantumInformedGradientOptimizer.py | 79 ++++++ .../QuantumInformedHyperStrategicOptimizer.py | 73 ++++++ .../lama/QuantumInformedOptimizer.py | 89 +++++++ .../optimization/lama/QuantumInformedPSO.py | 79 ++++++ .../QuantumInformedParticleSwarmOptimizer.py | 70 ++++++ .../lama/QuantumInformedStrategicOptimizer.py | 73 ++++++ .../lama/QuantumInfusedAdaptiveStrategy.py | 74 ++++++ ...tumInspiredAdaptiveDEElitistLocalSearch.py | 109 ++++++++ ...ntumInspiredAdaptiveDEHybridLocalSearch.py | 123 +++++++++ ...eDifferentialEvolutionWithEliteLearning.py | 165 ++++++++++++ ...iveDifferentialEvolutionWithLocalSearch.py | 143 +++++++++++ .../QuantumInspiredAdaptiveHybridDEPSO.py | 162 ++++++++++++ .../QuantumInspiredAdaptiveHybridOptimizer.py | 175 +++++++++++++ ...QuantumInspiredAdaptiveMemeticOptimizer.py | 181 ++++++++++++++ .../QuantumInspiredDifferentialEvolution.py | 98 ++++++++ ...piredDifferentialParticleSwarmOptimizer.py | 164 ++++++++++++ .../lama/QuantumInspiredHybridOptimizer.py | 65 +++++ .../lama/QuantumInspiredMetaheuristic.py | 51 ++++ .../lama/QuantumInspiredOptimization.py | 49 ++++ .../lama/QuantumInspiredSpiralOptimizer.py | 83 +++++++ .../QuantumIterativeDeepeningHybridSearch.py | 109 ++++++++ .../QuantumIterativeRefinementOptimizer.py | 63 +++++ .../optimization/lama/QuantumLeapOptimizer.py | 75 ++++++ .../lama/QuantumLeapOptimizerV2.py | 75 ++++++ .../QuantumLevyAdaptiveDEHybridLocalSearch.py | 130 ++++++++++ ...ntumLevyAdaptiveDifferentialOptimizerV2.py | 156 ++++++++++++ ...ntumLevyAdaptiveDifferentialOptimizerV3.py | 156 ++++++++++++ ...ntumLevyAdaptiveDifferentialOptimizerV4.py | 156 ++++++++++++ ...ntumLevyAdaptiveDifferentialOptimizerV5.py | 156 ++++++++++++ ...ntumLevyAdaptiveDifferentialOptimizerV6.py | 156 ++++++++++++ .../QuantumLevyAdaptiveMemeticOptimizerV3.py | 139 +++++++++++ ...QuantumLevyDifferentialDynamicOptimizer.py | 156 ++++++++++++ ...antumLevyDifferentialDynamicOptimizerV2.py | 156 ++++++++++++ ...antumLevyDifferentialDynamicOptimizerV3.py | 156 ++++++++++++ .../QuantumLevyDifferentialHybridOptimizer.py | 160 ++++++++++++ ...uantumLevyDifferentialHybridOptimizerV2.py | 160 ++++++++++++ .../QuantumLevyDifferentialHybridSearch.py | 158 ++++++++++++ ...LevyDynamicDifferentialSwarmOptimizerV3.py | 160 ++++++++++++ .../QuantumLevyDynamicDifferentialSwarmV5.py | 160 ++++++++++++ .../lama/QuantumLevyDynamicParticleSwarm.py | 160 ++++++++++++ .../QuantumLevyDynamicSwarmOptimization.py | 145 +++++++++++ ...uantumLevyEliteMemeticDEHybridOptimizer.py | 131 ++++++++++ .../lama/QuantumLevyEliteMemeticOptimizer.py | 139 +++++++++++ ...vyEnhancedAdaptiveDifferentialOptimizer.py | 177 +++++++++++++ .../QuantumLevyEnhancedAdaptiveOptimizerV2.py | 158 ++++++++++++ ...uantumLevyEnhancedDifferentialOptimizer.py | 158 ++++++++++++ .../QuantumLevyEnhancedMemeticOptimizerV2.py | 139 +++++++++++ ...vyImprovedDifferentialSwarmOptimization.py | 145 +++++++++++ ...QuantumLevyParticleAdaptiveOptimization.py | 163 ++++++++++++ .../lama/QuantumLevySwarmOptimizationV3.py | 145 +++++++++++ .../optimization/lama/QuantumLocustSearch.py | 75 ++++++ .../lama/QuantumLocustSearchV2.py | 75 ++++++ ...tumOrbitalAdaptiveCrossoverOptimizerV20.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV12.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV13.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV14.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV15.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV16.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV17.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV18.py | 68 +++++ .../lama/QuantumOrbitalDynamicEnhancerV24.py | 72 ++++++ .../lama/QuantumOrbitalDynamicEnhancerV25.py | 75 ++++++ .../lama/QuantumOrbitalDynamicEnhancerV26.py | 67 +++++ .../lama/QuantumOrbitalDynamicEnhancerV27.py | 67 +++++ .../lama/QuantumOrbitalDynamicEnhancerV28.py | 67 +++++ .../lama/QuantumOrbitalDynamicEnhancerV29.py | 78 ++++++ .../lama/QuantumOrbitalDynamicEnhancerV30.py | 67 +++++ .../lama/QuantumOrbitalDynamicEnhancerV31.py | 65 +++++ .../lama/QuantumOrbitalDynamicEnhancerV32.py | 64 +++++ .../lama/QuantumOrbitalDynamicEnhancerV33.py | 66 +++++ .../lama/QuantumOrbitalDynamicEnhancerV34.py | 64 +++++ .../lama/QuantumOrbitalDynamicOptimizerV11.py | 68 +++++ ...tumOrbitalEnhancedCrossoverOptimizerV22.py | 72 ++++++ ...uantumOrbitalEnhancedDynamicEnhancerV19.py | 68 +++++ .../QuantumOrbitalHarmonicOptimizerV10.py | 69 +++++ .../QuantumOrbitalPrecisionOptimizerV34.py | 66 +++++ ...ntumOrbitalRefinedCrossoverOptimizerV21.py | 72 ++++++ ...ntumOrbitalRefinedCrossoverOptimizerV23.py | 72 ++++++ ...antumParticleSwarmDifferentialEvolution.py | 164 ++++++++++++ .../lama/QuantumParticleSwarmOptimization.py | 107 ++++++++ .../QuantumReactiveCooperativeStrategy.py | 77 ++++++ ...mRefinedAdaptiveExplorationOptimization.py | 210 ++++++++++++++++ .../QuantumRefinedAdaptiveHybridStrategyV5.py | 64 +++++ ...uantumRefinedAdaptiveStrategicOptimizer.py | 72 ++++++ ...uantumRefinedDynamicAdaptiveHybridDEPSO.py | 160 ++++++++++++ .../QuantumReinforcedNesterovAccelerator.py | 68 +++++ .../QuantumResonanceEvolutionaryStrategy.py | 62 +++++ nevergrad/optimization/lama/QuantumSearch.py | 28 +++ .../lama/QuantumSimulatedAnnealing.py | 40 +++ ...uantumSimulatedAnnealingHybridOptimizer.py | 150 +++++++++++ .../lama/QuantumSimulatedAnnealingImproved.py | 42 ++++ .../QuantumSpectralAdaptiveHybridStrategy.py | 70 ++++++ .../QuantumSpectralAdaptiveOptimizerV2.py | 66 +++++ .../QuantumSpectralAdaptiveOptimizerV3.py | 68 +++++ .../lama/QuantumSpectralDynamicOptimizer.py | 68 +++++ .../QuantumSpectralEnhancedOptimizerV5.py | 68 +++++ .../lama/QuantumSpectralRefinedOptimizerV4.py | 68 +++++ ...uantumStabilizedDynamicBalanceOptimizer.py | 75 ++++++ .../lama/QuantumStateConvergenceOptimizer.py | 50 ++++ .../lama/QuantumStateCrossoverOptimization.py | 84 +++++++ .../lama/QuantumStateHybridStrategy.py | 65 +++++ .../lama/QuantumStateRefinedHybridStrategy.py | 65 +++++ ...antumStochasticGradientDescentFireworks.py | 41 +++ .../QuantumStochasticGradientOptimizer.py | 45 ++++ .../lama/QuantumSwarmOptimization.py | 54 ++++ .../lama/QuantumSwarmOptimizationImproved.py | 62 +++++ .../QuantumSymbioticEnhancedStrategyV3.py | 75 ++++++ .../lama/QuantumTunedGradientSearchV2.py | 90 +++++++ .../lama/QuantumTunnelingOptimizer.py | 72 ++++++ .../lama/QuantumTunnelingOptimizerV10.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV11.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV12.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV13.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV14.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV15.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV16.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV17.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV18.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV2.py | 74 ++++++ .../lama/QuantumTunnelingOptimizerV3.py | 74 ++++++ .../lama/QuantumTunnelingOptimizerV4.py | 74 ++++++ .../lama/QuantumTunnelingOptimizerV5.py | 75 ++++++ .../lama/QuantumTunnelingOptimizerV6.py | 75 ++++++ .../lama/QuantumTunnelingOptimizerV7.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV8.py | 76 ++++++ .../lama/QuantumTunnelingOptimizerV9.py | 76 ++++++ nevergrad/optimization/lama/RADE.py | 62 +++++ nevergrad/optimization/lama/RADEA.py | 74 ++++++ nevergrad/optimization/lama/RADECM.py | 65 +++++ nevergrad/optimization/lama/RADEDM.py | 59 +++++ nevergrad/optimization/lama/RADEEM.py | 71 ++++++ nevergrad/optimization/lama/RADEPM.py | 70 ++++++ nevergrad/optimization/lama/RADSDiffEvo.py | 81 ++++++ nevergrad/optimization/lama/RAGCES.py | 66 +++++ nevergrad/optimization/lama/RAGEA.py | 63 +++++ nevergrad/optimization/lama/RAHDEMI.py | 87 +++++++ nevergrad/optimization/lama/RALES.py | 61 +++++ nevergrad/optimization/lama/RAMDE.py | 91 +++++++ nevergrad/optimization/lama/RAMEDS.py | 85 +++++++ nevergrad/optimization/lama/RAMEDSPlus.py | 87 +++++++ nevergrad/optimization/lama/RAMEDSPro.py | 89 +++++++ nevergrad/optimization/lama/RAMSDiffEvo.py | 76 ++++++ nevergrad/optimization/lama/RAPDE.py | 60 +++++ nevergrad/optimization/lama/RASES.py | 66 +++++ nevergrad/optimization/lama/RAVDE.py | 70 ++++++ nevergrad/optimization/lama/RDACE.py | 68 +++++ nevergrad/optimization/lama/RDSAS.py | 59 +++++ nevergrad/optimization/lama/READEPMC.py | 73 ++++++ nevergrad/optimization/lama/REAMSEA.py | 76 ++++++ nevergrad/optimization/lama/RE_ADMMMS.py | 80 ++++++ nevergrad/optimization/lama/RPWDE.py | 70 ++++++ .../lama/RankingDifferentialEvolution.py | 44 ++++ ...dAdaptiveClusteredDifferentialEvolution.py | 129 ++++++++++ ...finedAdaptiveCovarianceMatrixAdaptation.py | 114 +++++++++ ...efinedAdaptiveCovarianceMatrixEvolution.py | 96 +++++++ ...finedAdaptiveCrossoverElitistStrategyV7.py | 88 +++++++ .../RefinedAdaptiveDifferentialEvolution.py | 160 ++++++++++++ ...edAdaptiveDifferentialEvolutionStrategy.py | 54 ++++ ...entialEvolutionWithAdaptivePerturbation.py | 112 +++++++++ ...eDifferentialEvolutionWithGradientBoost.py | 112 +++++++++ .../lama/RefinedAdaptiveDifferentialSearch.py | 66 +++++ ...RefinedAdaptiveDifferentialSpiralSearch.py | 63 +++++ ...inedAdaptiveDimensionalClimbingStrategy.py | 79 ++++++ ...inedAdaptiveDimensionalCrossoverEvolver.py | 87 +++++++ ...aptiveDirectionalBiasQuorumOptimization.py | 76 ++++++ ...finedAdaptiveDivergenceClusteringSearch.py | 78 ++++++ .../lama/RefinedAdaptiveDiversityPSO.py | 82 ++++++ .../lama/RefinedAdaptiveDualPhaseStrategy.py | 81 ++++++ .../RefinedAdaptiveDualPhaseStrategyV3.py | 80 ++++++ .../lama/RefinedAdaptiveDynamicDE.py | 69 +++++ ...inedAdaptiveDynamicDualPhaseStrategyV14.py | 87 +++++++ ...inedAdaptiveDynamicDualPhaseStrategyV17.py | 88 +++++++ ...inedAdaptiveDynamicDualPhaseStrategyV20.py | 84 +++++++ ...dAdaptiveDynamicExplorationOptimization.py | 166 +++++++++++++ ...tiveDynamicMemeticEvolutionaryAlgorithm.py | 98 ++++++++ .../lama/RefinedAdaptiveDynamicStrategyV25.py | 76 ++++++ .../lama/RefinedAdaptiveEliteGuidedDE.py | 122 +++++++++ .../RefinedAdaptiveEliteGuidedMutationDE.py | 117 +++++++++ ...RefinedAdaptiveEliteGuidedMutationDE_v5.py | 105 ++++++++ .../lama/RefinedAdaptiveElitistDE_v4.py | 126 ++++++++++ ...nhancedFireworkAlgorithmWithLocalSearch.py | 111 +++++++++ ...AdaptiveEnhancedGradientGuidedHybridPSO.py | 69 +++++ ...EnhancedSuperchargedAQAPSO_LS_DIW_AP_V2.py | 108 ++++++++ .../lama/RefinedAdaptiveEvolutionStrategy.py | 78 ++++++ .../RefinedAdaptiveExplorationOptimizer.py | 70 ++++++ ...efinedAdaptiveGlobalClimbingOptimizerV5.py | 73 ++++++ .../RefinedAdaptiveGlobalClimbingStrategy.py | 89 +++++++ .../lama/RefinedAdaptiveGradientCrossover.py | 89 +++++++ ...edAdaptiveGradientDifferentialEvolution.py | 112 +++++++++ .../RefinedAdaptiveGradientEnhancedRAMEDS.py | 90 +++++++ .../lama/RefinedAdaptiveGradientEvolverV2.py | 95 +++++++ .../RefinedAdaptiveGradientGuidedEvolution.py | 81 ++++++ .../RefinedAdaptiveGradientHybridOptimizer.py | 86 +++++++ .../RefinedAdaptiveGuidedEvolutionStrategy.py | 67 +++++ .../lama/RefinedAdaptiveHybridDE.py | 64 +++++ ...efinedAdaptiveHybridEvolutionStrategyV6.py | 62 +++++ .../lama/RefinedAdaptiveHybridOptimization.py | 163 ++++++++++++ .../lama/RefinedAdaptiveHybridOptimizer.py | 61 +++++ ...ybridParticleSwarmDifferentialEvolution.py | 136 ++++++++++ ...inedAdaptiveHybridQuasiRandomGradientDE.py | 129 ++++++++++ ...daptiveHybridSwarmEvolutionOptimization.py | 145 +++++++++++ .../RefinedAdaptiveIncrementalCrossover.py | 71 ++++++ .../RefinedAdaptiveIslandEvolutionStrategy.py | 94 +++++++ ...nedAdaptiveMemeticDifferentialEvolution.py | 167 +++++++++++++ .../RefinedAdaptiveMemeticDiverseOptimizer.py | 157 ++++++++++++ .../RefinedAdaptiveMemoryEnhancedSearch.py | 81 ++++++ ...efinedAdaptiveMemoryEnhancedStrategyV55.py | 73 ++++++ .../lama/RefinedAdaptiveMemoryStrategyV67.py | 93 +++++++ .../RefinedAdaptiveMultiOperatorSearch.py | 146 +++++++++++ .../lama/RefinedAdaptiveMultiStrategyDE.py | 162 ++++++++++++ .../lama/RefinedAdaptiveMultiStrategyDE_v2.py | 140 +++++++++++ ...ptiveMultiStrategyDifferentialEvolution.py | 135 ++++++++++ ...iveMultiStrategyDifferentialEvolutionV2.py | 154 ++++++++++++ .../RefinedAdaptiveParameterStrategyV38.py | 74 ++++++ ...ustDifferentialEvolutionWithEliteSearch.py | 157 ++++++++++++ ...ustDifferentialEvolutionWithEliteSearch.py | 157 ++++++++++++ ...RefinedAdaptivePrecisionBalanceStrategy.py | 72 ++++++ ...edAdaptivePrecisionCohortOptimizationV4.py | 66 +++++ ...edAdaptivePrecisionCohortOptimizationV6.py | 71 ++++++ ...dAdaptivePrecisionDifferentialEvolution.py | 66 +++++ .../RefinedAdaptivePrecisionDivideSearch.py | 55 ++++ ...finedAdaptivePrecisionEvolutionStrategy.py | 83 +++++++ .../RefinedAdaptivePrecisionFocalHybrid.py | 100 ++++++++ .../RefinedAdaptivePrecisionHybridSearch.py | 73 ++++++ ...inedAdaptivePrecisionStrategicOptimizer.py | 74 ++++++ ...finedAdaptiveQuantumCrossoverStrategyV3.py | 91 +++++++ ...nedAdaptiveQuantumDifferentialEvolution.py | 138 ++++++++++ ...daptiveQuantumDifferentialEvolutionPlus.py | 89 +++++++ .../lama/RefinedAdaptiveQuantumEliteDE.py | 186 ++++++++++++++ .../lama/RefinedAdaptiveQuantumEntropyDE.py | 153 ++++++++++++ ...tiveQuantumGradientBoostedMemeticSearch.py | 151 +++++++++++ ...eQuantumGradientExplorationOptimization.py | 221 ++++++++++++++++ ...dAdaptiveQuantumGradientHybridOptimizer.py | 92 +++++++ .../lama/RefinedAdaptiveQuantumPSO.py | 111 +++++++++ .../RefinedAdaptiveQuantumSwarmOptimizerV3.py | 88 +++++++ ...dAdaptiveQuasiRandomDEGradientAnnealing.py | 152 +++++++++++ ...uasiRandomEnhancedDifferentialEvolution.py | 120 +++++++++ .../lama/RefinedAdaptiveRefinementPSO.py | 65 +++++ ...aptiveSimulatedAnnealingWithSmartMemory.py | 157 ++++++++++++ ...inedAdaptiveSpatialExplorationOptimizer.py | 74 ++++++ .../lama/RefinedAdaptiveSpatialOptimizer.py | 69 +++++ .../lama/RefinedAdaptiveSpectralEvolution.py | 80 ++++++ .../RefinedAdaptiveSpiralGradientSearch.py | 62 +++++ ...iveStochasticGradientQuorumOptimization.py | 77 ++++++ ...efinedAdaptiveStochasticHybridEvolution.py | 62 +++++ ...finedAdaptiveSwarmDifferentialEvolution.py | 50 ++++ ...namicMultiStrategyDifferentialEvolution.py | 159 ++++++++++++ ...namicAdaptiveHybridDEPSOWithEliteMemory.py | 160 ++++++++++++ ...veEnhancedAdaptiveDifferentialEvolution.py | 166 +++++++++++++ .../lama/RefinedAttenuatedAdaptiveEvolver.py | 74 ++++++ .../RefinedBalancedAdaptiveElitistStrategy.py | 79 ++++++ .../RefinedBalancedExplorationOptimizer.py | 80 ++++++ .../optimization/lama/RefinedCMADiffEvoPSO.py | 127 ++++++++++ .../RefinedConcentricDiversityStrategy.py | 113 +++++++++ ...nedConcentricQuantumCrossoverStrategyV5.py | 91 +++++++ .../RefinedConvergenceAdaptiveOptimizer.py | 96 +++++++ .../optimization/lama/RefinedConvergenceDE.py | 68 +++++ ...inedConvergentAdaptiveEvolutionStrategy.py | 74 ++++++ ...RefinedCooperativeDifferentialEvolution.py | 121 +++++++++ .../RefinedCosineAdaptiveDifferentialSwarm.py | 54 ++++ ...entialEvolutionWithAdaptiveLearningRate.py | 143 +++++++++++ ...edDifferentialParticleSwarmOptimization.py | 158 ++++++++++++ ...efinedDimensionalCyclicCrossoverEvolver.py | 90 +++++++ .../RefinedDimensionalFeedbackEvolverV2.py | 87 +++++++ .../RefinedDimensionalFeedbackEvolverV4.py | 95 +++++++ ...RefinedDualConvergenceEvolutiveStrategy.py | 67 +++++ .../RefinedDualPhaseADPSO_DE_V3_Enhanced.py | 144 +++++++++++ .../lama/RefinedDualPhaseOptimization.py | 59 +++++ .../lama/RefinedDualStrategyAdaptiveDE.py | 118 +++++++++ .../lama/RefinedDynamicAdaptiveDE.py | 149 +++++++++++ .../lama/RefinedDynamicAdaptiveHybridDE.py | 76 ++++++ ...namicAdaptiveHybridDEPSOWithEliteMemory.py | 152 +++++++++++ .../RefinedDynamicAdaptiveHybridOptimizer.py | 142 +++++++++++ ...RefinedDynamicAdaptiveHybridOptimizerV2.py | 144 +++++++++++ .../lama/RefinedDynamicAdaptiveStrategyV23.py | 77 ++++++ ...finedDynamicClusterHybridOptimizationV3.py | 108 ++++++++ ...finedDynamicClusterHybridOptimizationV4.py | 109 ++++++++ .../lama/RefinedDynamicClusteringPSO.py | 79 ++++++ .../RefinedDynamicCrowdingHybridOptimizer.py | 189 ++++++++++++++ ...inedDynamicEliteAdaptiveHybridOptimizer.py | 168 +++++++++++++ .../RefinedDynamicEnhancedHybridOptimizer.py | 186 ++++++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 140 +++++++++++ ...inedDynamicHybridDEPSOWithEliteMemoryV2.py | 168 +++++++++++++ .../lama/RefinedDynamicHybridOptimizer.py | 76 ++++++ .../lama/RefinedDynamicQuantumEvolution.py | 186 ++++++++++++++ .../lama/RefinedEliteAdaptiveHybridDEPSO.py | 152 +++++++++++ ...eAdaptiveMemoryDynamicCrowdingOptimizer.py | 197 +++++++++++++++ ...daptiveMemoryDynamicCrowdingOptimizerV3.py | 199 +++++++++++++++ ...finedEliteAdaptiveMemoryHybridOptimizer.py | 197 +++++++++++++++ ...nedEliteAdaptiveMemoryHybridOptimizerV3.py | 206 +++++++++++++++ ...nedEliteAdaptiveMemoryHybridOptimizerV4.py | 197 +++++++++++++++ ...nedEliteAdaptiveMemoryHybridOptimizerV5.py | 206 +++++++++++++++ ...aptiveQuantumDEWithEnhancedHybridSearch.py | 172 +++++++++++++ .../RefinedEliteDynamicHybridOptimizer.py | 136 ++++++++++ ...efinedEliteDynamicMemoryHybridOptimizer.py | 197 +++++++++++++++ .../lama/RefinedEliteGuidedAdaptiveDE.py | 99 ++++++++ .../lama/RefinedEliteGuidedMutationDE.py | 127 ++++++++++ .../lama/RefinedEliteGuidedMutationDE_v3.py | 97 ++++++++ ...ncedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py | 84 +++++++ ...CovarianceMatrixDifferentialEvolutionV5.py | 122 +++++++++ ...eDifferentialEvolutionWithGradientBoost.py | 106 ++++++++ ...inedEnhancedAdaptiveDualPhaseStrategyV9.py | 79 ++++++ ...cedAdaptiveGradientBalancedCrossoverPSO.py | 80 ++++++ ...cedAdaptiveHarmonyMemeticOptimizationV9.py | 95 +++++++ .../RefinedEnhancedAdaptiveHarmonySearch.py | 81 ++++++ ...articleSwarmDifferentialEvolutionPlusV2.py | 125 ++++++++++ ...cedAdaptiveMemeticEvolutionaryAlgorithm.py | 98 ++++++++ ...inedEnhancedAdaptiveMultiOperatorSearch.py | 141 +++++++++++ .../RefinedEnhancedAdaptiveMultiStrategyDE.py | 128 ++++++++++ .../lama/RefinedEnhancedAdaptiveQGSA_v45.py | 93 +++++++ .../lama/RefinedEnhancedAdaptiveQGSA_v46.py | 93 +++++++ .../lama/RefinedEnhancedAdaptiveQGSA_v48.py | 91 +++++++ ...dEnhancedBalancedDualStrategyAdaptiveDE.py | 127 ++++++++++ ...edCovarianceMatrixDifferentialEvolution.py | 161 ++++++++++++ ...cedDifferentialEvolutionLocalSearch_v42.py | 85 +++++++ ...edDualPhaseAdaptiveHybridOptimizationV3.py | 146 +++++++++++ ...inedEnhancedDualPhaseHybridOptimization.py | 145 +++++++++++ ...edEnhancedDualPhaseHybridOptimizationV3.py | 145 +++++++++++ ...efinedEnhancedDualStrategyAdaptiveDE_v2.py | 125 ++++++++++ ...efinedEnhancedDualStrategyAdaptiveDE_v3.py | 125 ++++++++++ .../RefinedEnhancedDualStrategyDynamicDE.py | 145 +++++++++++ ...RefinedEnhancedDualStrategyElitistDE_v2.py | 125 ++++++++++ ...hancedDynamicAdaptiveHybridOptimization.py | 141 +++++++++++ ...inedEnhancedDynamicDualStrategyHybridDE.py | 145 +++++++++++ ...nedEnhancedEliteGuidedAdaptiveRestartDE.py | 115 +++++++++ .../RefinedEnhancedEliteGuidedMassQGSA_v87.py | 129 ++++++++++ ...cedHybridAdaptiveMultiStageOptimization.py | 139 +++++++++++ ...CovarianceMatrixDifferentialEvolutionV3.py | 177 +++++++++++++ ...ancedHybridDEPSOWithQuantumLevyFlightV2.py | 172 +++++++++++++ ...edEnhancedHybridExplorationOptimization.py | 163 ++++++++++++ ...RefinedEnhancedHyperAdaptiveHybridDEPSO.py | 149 +++++++++++ ...timizedEvolutionaryGradientOptimizerV63.py | 82 ++++++ ...finedEnhancedHyperStrategicOptimizerV57.py | 82 ++++++ .../lama/RefinedEnhancedMetaNetAQAPSOv7.py | 123 +++++++++ ...finedEnhancedOptimizedEvolutiveStrategy.py | 70 ++++++ ...hancedPrecisionEvolutionaryOptimizerV40.py | 74 ++++++ .../lama/RefinedEnhancedQAPSOAIRVCHRLS.py | 114 +++++++++ ...CovarianceMatrixDifferentialEvolutionV2.py | 194 +++++++++++++++ .../lama/RefinedEnhancedRAMEDSProV3.py | 79 ++++++ .../lama/RefinedEnhancedRAMEDSv3.py | 76 ++++++ .../lama/RefinedEnhancedRAMEDSv4.py | 95 +++++++ .../lama/RefinedEnhancedStrategyDE.py | 75 ++++++ .../lama/RefinedEnhancedUltraRefinedRAMEDS.py | 81 ++++++ .../lama/RefinedEnsembleAdaptiveQuantumDE.py | 130 ++++++++++ ...edEvolutionaryGradientHybridOptimizerV3.py | 79 ++++++ .../lama/RefinedEvolutionaryTuningStrategy.py | 80 ++++++ .../lama/RefinedGlobalClimbingOptimizerV2.py | 77 ++++++ .../RefinedGlobalLocalBalancingOptimizer.py | 69 +++++ ...RefinedGlobalStructureAdaptiveEvolverV2.py | 82 ++++++ .../RefinedGlobalStructureAwareOptimizerV2.py | 91 +++++++ .../RefinedGlobalStructureAwareOptimizerV3.py | 92 +++++++ .../RefinedGradientBalancedExplorationPSO.py | 70 ++++++ ...nealingWithAdaptiveMemoryAndExploration.py | 180 ++++++++++++++ .../RefinedGradientBoostedMemoryAnnealing.py | 141 +++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 140 +++++++++++ ...ientBoostedMemorySimulatedAnnealingPlus.py | 174 +++++++++++++ .../lama/RefinedGradientBoostedOptimizer.py | 63 +++++ .../RefinedGradientGuidedEvolutionStrategy.py | 66 +++++ ...veCovarianceMatrixDifferentialEvolution.py | 122 +++++++++ ...inedHybridAdaptiveDifferentialEvolution.py | 63 +++++ .../lama/RefinedHybridAdaptiveGradientPSO.py | 90 +++++++ ...nedHybridAdaptiveMultiStageOptimization.py | 139 +++++++++++ ...idCovarianceMatrixDifferentialEvolution.py | 156 ++++++++++++ .../optimization/lama/RefinedHybridDEPSO.py | 160 ++++++++++++ .../RefinedHybridDEPSOWithAdaptiveMemoryV4.py | 152 +++++++++++ ...finedHybridDEPSOWithDynamicAdaptationV3.py | 149 +++++++++++ ...PhaseParticleSwarmDifferentialEvolution.py | 144 +++++++++++ ...RefinedHybridDynamicClusterOptimization.py | 153 ++++++++++++ .../RefinedHybridEliteGuidedMutationDE.py | 88 +++++++ .../RefinedHybridEliteGuidedMutationDE_v2.py | 99 ++++++++ .../RefinedHybridEliteGuidedMutationDE_v3.py | 111 +++++++++ .../lama/RefinedHybridEvolutionStrategyV4.py | 70 ++++++ ...nedHybridEvolutionaryAnnealingOptimizer.py | 54 ++++ .../lama/RefinedHybridOptimizer.py | 127 ++++++++++ .../lama/RefinedHybridPSODEOptimizer.py | 95 +++++++ .../RefinedHybridPSODESimulatedAnnealing.py | 118 +++++++++ .../optimization/lama/RefinedHybridPSO_DE.py | 109 ++++++++ .../lama/RefinedHybridPrecisionSearch.py | 71 ++++++ .../lama/RefinedHybridQuantumAdaptiveDE.py | 136 ++++++++++ .../RefinedHybridQuantumLevyAdaptiveSwarm.py | 163 ++++++++++++ ...nedHybridQuasiRandomDEGradientAnnealing.py | 142 +++++++++++ ...erAdaptiveSinusoidalDifferentialSwarmV2.py | 55 ++++ .../lama/RefinedHyperEvolvedDynamicRAMEDS.py | 85 +++++++ ...HyperOptimizedDynamicPrecisionOptimizer.py | 60 +++++ ...erOptimizedThermalEvolutionaryOptimizer.py | 59 +++++ ...yperRefinedDynamicPrecisionOptimizerV50.py | 58 +++++ .../lama/RefinedHyperStrategicOptimizerV52.py | 80 ++++++ .../lama/RefinedHyperStrategicOptimizerV55.py | 82 ++++++ ...ptiveMultiStrategyDifferentialEvolution.py | 173 +++++++++++++ ...iveParticleSwarmDifferentialEvolutionV2.py | 139 +++++++++++ ...ovedDynamicHybridDEPSOWithEliteMemoryV4.py | 167 +++++++++++++ .../lama/RefinedInertiaFocalOptimizer.py | 70 ++++++ ...dIntelligentEvolvingAdaptiveStrategyV35.py | 75 ++++++ .../RefinedIslandEvolutionStrategyV10Plus.py | 109 ++++++++ .../lama/RefinedIslandEvolutionStrategyV2.py | 98 ++++++++ .../lama/RefinedIslandEvolutionStrategyV6.py | 109 ++++++++ .../lama/RefinedIslandEvolutionStrategyV9.py | 109 ++++++++ .../RefinedMemeticDifferentialEvolution.py | 84 +++++++ .../lama/RefinedMemeticDiverseOptimizer.py | 186 ++++++++++++++ .../lama/RefinedMemeticDiverseOptimizerV4.py | 186 ++++++++++++++ ...inedMemeticQuantumDifferentialOptimizer.py | 157 ++++++++++++ ...nedMemoryAdaptiveDynamicHybridOptimizer.py | 199 +++++++++++++++ .../RefinedMemoryAdaptiveHybridOptimizer.py | 158 ++++++++++++ ...nedMemoryEnhancedDynamicHybridOptimizer.py | 199 +++++++++++++++ .../RefinedMemoryEnhancedHybridOptimizerV2.py | 166 +++++++++++++ ...emoryGuidedAdaptiveDualPhaseStrategyV72.py | 84 +++++++ .../RefinedMemoryGuidedHybridStrategyV63.py | 69 +++++ .../optimization/lama/RefinedMetaNetAQAPSO.py | 123 +++++++++ ...inedMultiFocalAdaptiveElitistStrategyV4.py | 83 +++++++ ...efinedMultiOperatorAdaptiveOptimization.py | 163 ++++++++++++ .../RefinedMultiPhaseAdaptiveHybridDEPSO.py | 187 ++++++++++++++ .../lama/RefinedMultiStageAdaptiveSearch.py | 74 ++++++ ...finedMultiStrategyDifferentialEvolution.py | 156 ++++++++++++ .../RefinedMultiStrategySelfAdaptiveDE.py | 114 +++++++++ ...MultiStrategySwarmDifferentialEvolution.py | 58 +++++ ...NicheDifferentialParticleSwarmOptimizer.py | 149 +++++++++++ ...inedOptimalDynamicPrecisionOptimizerV15.py | 62 +++++ .../lama/RefinedOptimalEnhancedRAMEDS.py | 85 +++++++ ...OptimalEvolutionaryGradientOptimizerV12.py | 79 ++++++ ...edDualPhaseAdaptiveHybridOptimizationV5.py | 146 +++++++++++ ...GradientBoostedMemorySimulatedAnnealing.py | 141 +++++++++++ ...OptimizedEnhancedDualStrategyAdaptiveDE.py | 127 ++++++++++ ...zedHybridAdaptiveMultiStageOptimization.py | 139 +++++++++++ .../lama/RefinedPrecisionAdaptivePSO.py | 52 ++++ ...dPrecisionEnhancedDualStrategyOptimizer.py | 73 ++++++ ...PrecisionEnhancedSpatialAdaptiveEvolver.py | 86 +++++++ ...edPrecisionEvolutionaryThermalOptimizer.py | 58 +++++ ...ecisionTunedCrossoverElitistStrategyV12.py | 82 ++++++ ...nedProgressiveParticleSwarmOptimization.py | 65 +++++ ...finedProgressiveQuorumEvolutionStrategy.py | 67 +++++ ...finedQuadraticAdaptiveEvolutionStrategy.py | 72 ++++++ ...dQuantumAdaptiveExplorationOptimization.py | 212 ++++++++++++++++ ...RefinedQuantumAdaptiveHybridOptimizerV4.py | 81 ++++++ .../RefinedQuantumAdaptiveHybridSearchV3.py | 104 ++++++++ ...nedQuantumAdaptiveLevySwarmOptimization.py | 160 ++++++++++++ ...RefinedQuantumAdaptiveMultiPopulationDE.py | 178 +++++++++++++ .../lama/RefinedQuantumAdaptiveOptimizerV2.py | 86 +++++++ ...RefinedQuantumAdaptiveVelocityOptimizer.py | 78 ++++++ ...ntumCognitionAdaptiveTuningOptimizerV15.py | 77 ++++++ ...finedQuantumCognitionHybridOptimizerV22.py | 84 +++++++ .../RefinedQuantumCognitionOptimizerV13.py | 78 ++++++ .../RefinedQuantumCognitionOptimizerV4.py | 90 +++++++ ...CovarianceMatrixDifferentialEvolutionV4.py | 194 +++++++++++++++ ...utionWithAdaptiveHybridSearchAndElitism.py | 152 +++++++++++ ...fferentialEvolutionWithAdaptiveLearning.py | 166 +++++++++++++ ...nWithAdaptiveMemoryAndHybridLocalSearch.py | 183 ++++++++++++++ ...EvolutionWithAdaptiveRestartsAndElitism.py | 138 ++++++++++ ...inedQuantumDifferentialMemeticOptimizer.py | 156 ++++++++++++ ...ifferentialParticleOptimizerWithElitism.py | 119 +++++++++ ...inedQuantumEnhancedAdaptiveMultiPhaseDE.py | 136 ++++++++++ ...dQuantumEnhancedAdaptiveMultiPhaseDE_v2.py | 136 ++++++++++ ...umEnhancedDynamicAdaptiveHybridDEPSO_V6.py | 160 ++++++++++++ .../lama/RefinedQuantumEnhancedHybridDEPSO.py | 162 ++++++++++++ .../RefinedQuantumEvolutionaryAdaptation.py | 59 +++++ ...nedQuantumEvolutionaryAdaptiveOptimizer.py | 80 ++++++ .../RefinedQuantumFluxDifferentialSwarm.py | 60 +++++ ...GradientAdaptiveExplorationOptimization.py | 222 +++++++++++++++++ .../lama/RefinedQuantumGradientSearch.py | 88 +++++++ .../RefinedQuantumGuidedHybridSearchV6.py | 90 +++++++ .../RefinedQuantumGuidedHybridSearchV8.py | 90 +++++++ .../RefinedQuantumHybridAdaptiveStrategyV3.py | 67 +++++ .../RefinedQuantumHybridDynamicAdaptiveDE.py | 184 ++++++++++++++ .../RefinedQuantumHybridEliteAdaptiveDE.py | 192 ++++++++++++++ ...nedQuantumInfluenceLocalSearchOptimizer.py | 86 +++++++ ...QuantumInformedAdaptiveInertiaOptimizer.py | 74 ++++++ .../lama/RefinedQuantumInformedAdaptivePSO.py | 71 ++++++ ...edQuantumInformedDifferentialStrategyV2.py | 78 ++++++ ...RefinedQuantumInformedGradientOptimizer.py | 85 +++++++ .../lama/RefinedQuantumInformedPSO.py | 79 ++++++ ...RefinedQuantumInfusedAdaptiveStrategyV2.py | 82 ++++++ ...QuantumLevyMemeticDifferentialEvolution.py | 132 ++++++++++ ...RefinedQuantumMultiStrategyOptimization.py | 184 ++++++++++++++ .../lama/RefinedQuantumNesterovSynergyV2.py | 68 +++++ ...efinedQuantumResilientCrossoverEnhancer.py | 70 ++++++ .../lama/RefinedQuantumSwarmOptimizer.py | 76 ++++++ .../lama/RefinedQuantumSymbioticStrategyV2.py | 77 ++++++ .../lama/RefinedQuantumSymbioticStrategyV4.py | 75 ++++++ .../RefinedQuantumTunnelingOptimizerV19.py | 75 ++++++ .../optimization/lama/RefinedRAMEDSPro.py | 78 ++++++ .../optimization/lama/RefinedRAMEDSv2.py | 90 +++++++ .../lama/RefinedSpatialAdaptiveOptimizer.py | 97 ++++++++ .../lama/RefinedSpiralSearchOptimizer.py | 56 +++++ .../RefinedStochasticBalancingOptimizer.py | 81 ++++++ ...dStrategicAdaptiveDifferentialEvolution.py | 65 +++++ .../RefinedStrategicDiminishingEvolver.py | 70 ++++++ ...finedStrategicQuorumWithDirectionalBias.py | 77 ++++++ .../lama/RefinedSuperiorAdaptiveStrategyDE.py | 68 +++++ ...edTemporalAdaptiveDifferentialEvolution.py | 52 ++++ ...finedUltimateEnhancedGuidedMassQGSA_v71.py | 124 +++++++++ ...ltimateEvolutionaryGradientOptimizerV16.py | 85 +++++++ ...ltimateEvolutionaryGradientOptimizerV17.py | 85 +++++++ ...ltimateEvolutionaryGradientOptimizerV34.py | 81 ++++++ .../RefinedUltimateEvolutionaryOptimizer.py | 62 +++++ ...timatePrecisionEvolutionaryOptimizerV42.py | 82 ++++++ ...ancedEliteAdaptiveMemoryHybridOptimizer.py | 174 +++++++++++++ ...edUltraEvolutionaryGradientOptimizerV28.py | 79 ++++++ ...raOptimizedDynamicPrecisionOptimizerV20.py | 60 +++++ ...timizedEvolutionaryGradientOptimizerV31.py | 79 ++++++ .../lama/RefinedUltraRefinedRAMEDS.py | 74 ++++++ .../lama/RefinementEnhancedHybridOptimizer.py | 130 ++++++++++ .../RefinementSelectiveCohortOptimization.py | 64 +++++ .../optimization/lama/RefinementTunedPSO.py | 81 ++++++ .../optimization/lama/ResilientAdaptivePSO.py | 57 +++++ .../ResponsiveAdaptiveMemoryStrategyV52.py | 99 ++++++++ .../lama/ResponsiveAdaptiveStrategyV27.py | 79 ++++++ ...RestartAdaptiveDifferentialEvolutionPSO.py | 109 ++++++++ ...edDifferentialEvolutionLSRefinement_v20.py | 85 +++++++ .../lama/RevolutionaryFireworkAlgorithm.py | 77 ++++++ .../RobustAdaptiveDifferentialEvolution.py | 139 +++++++++++ ...obustAdaptiveMemoryLeveragedStrategyV43.py | 83 +++++++ ...CovarianceMatrixAdaptationMemeticSearch.py | 102 ++++++++ nevergrad/optimization/lama/SADE.py | 64 +++++ nevergrad/optimization/lama/SADEEM.py | 64 +++++ nevergrad/optimization/lama/SADEIOL.py | 79 ++++++ nevergrad/optimization/lama/SADEPF.py | 61 +++++ nevergrad/optimization/lama/SAGEA.py | 59 +++++ nevergrad/optimization/lama/SGAE.py | 76 ++++++ nevergrad/optimization/lama/SGE.py | 71 ++++++ nevergrad/optimization/lama/SORAMED.py | 85 +++++++ .../lama/ScaledHybridDifferentialEvolution.py | 56 +++++ ...fAdaptingDifferentialEvolutionOptimizer.py | 58 +++++ ...veCovarianceMatrixDifferentialEvolution.py | 115 +++++++++ .../lama/SelfAdaptiveDifferentialEvolution.py | 79 ++++++ ...veDifferentialEvolutionWithLocalRestart.py | 101 ++++++++ ...eDifferentialEvolutionWithMemeticSearch.py | 128 ++++++++++ ...daptiveDifferentialEvolutionWithRestart.py | 93 +++++++ ...lfAdaptiveDifferentialSwarmOptimization.py | 97 ++++++++ .../lama/SelfAdaptiveEvolutionaryAlgorithm.py | 60 +++++ .../lama/SelfAdaptiveHybridOptimizer.py | 130 ++++++++++ .../SelfAdaptiveInterleavedOptimization.py | 105 ++++++++ .../lama/SelfAdaptiveMemeticAlgorithmV2.py | 109 ++++++++ ...elfAdaptiveMemeticEvolutionaryAlgorithm.py | 109 ++++++++ ...fAdaptiveOppositionBasedHarmonySearchDE.py | 107 ++++++++ .../SelfAdaptiveQuantumMemeticAlgorithm.py | 118 +++++++++ ...SequentialAdaptiveDifferentialEvolution.py | 45 ++++ ...ntialQuadraticAdaptiveEvolutionStrategy.py | 63 +++++ .../SequentialQuadraticExploitationSearch.py | 73 ++++++ nevergrad/optimization/lama/SimpleHybridDE.py | 73 ++++++ ...fiedAdaptiveDynamicDualPhaseStrategyV18.py | 70 ++++++ .../lama/SimulatedAnnealingOptimizer.py | 41 +++ .../lama/SpiralSearchOptimizer.py | 52 ++++ .../StabilizedQuantumCognitionOptimizerV11.py | 78 ++++++ .../StabilizedQuantumConcentricOptimizer.py | 65 +++++ ...lizedRefinedEnhancedDynamicBalancingPSO.py | 64 +++++ ...StochasticAdaptiveEvolutionaryOptimizer.py | 97 ++++++++ .../lama/StochasticBalancingOptimizer.py | 78 ++++++ .../lama/StochasticGradientEnhancedDE.py | 99 ++++++++ .../lama/StochasticGradientExploration.py | 66 +++++ .../StochasticGradientHybridOptimization.py | 54 ++++ .../StochasticGradientQuorumOptimization.py | 75 ++++++ .../StrategicAdaptiveDifferentialEvolution.py | 69 +++++ .../lama/StrategicDifferentialEvolution.py | 57 +++++ .../StrategicDiminishingAdaptiveEvolver.py | 69 +++++ .../optimization/lama/StrategicHybridDE.py | 68 +++++ ...trategicMultiPhaseEvolutionaryAlgorithm.py | 93 +++++++ ...rategicQuorumMutationWithAdaptiveElites.py | 72 ++++++ .../lama/StrategicResilienceAdaptiveSearch.py | 72 ++++++ .../SuperDynamicQuantumSwarmOptimization.py | 91 +++++++ ...DynamicQuantumSwarmOptimizationImproved.py | 93 +++++++ .../optimization/lama/SuperOptimizedRAMEDS.py | 89 +++++++ .../optimization/lama/SuperRefinedRAMEDSv5.py | 82 ++++++ ...rgedEnhancedAQAPSO_LS_DIW_AP_Refined_V5.py | 85 +++++++ ...ncedAdvancedQuantumSwarmOptimizationV16.py | 95 +++++++ .../lama/SuperiorAdaptiveStrategyDE.py | 69 +++++ ...riorEnhancedDynamicPrecisionOptimizerV1.py | 61 +++++ ...iorHybridEvolutionaryAnnealingOptimizer.py | 56 +++++ .../lama/SuperiorOptimalEnhancedStrategyDE.py | 65 +++++ ...RefinedEvolutionaryGradientOptimizerV13.py | 81 ++++++ .../lama/SupremeDynamicAdaptiveOptimizerV5.py | 59 +++++ .../SupremeDynamicPrecisionOptimizerV1.py | 61 +++++ .../SupremeDynamicPrecisionOptimizerV2.py | 57 +++++ ...meEvolutionaryGradientHybridOptimizerV6.py | 78 ++++++ ...alPrecisionEvolutionaryThermalOptimizer.py | 57 +++++ ...premeUltraEnhancedEvolutionaryOptimizer.py | 60 +++++ .../TemporalAdaptiveDifferentialEvolution.py | 51 ++++ .../lama/TurbochargedDifferentialEvolution.py | 72 ++++++ .../lama/UltimateDynamicFireworkAlgorithm.py | 98 ++++++++ ...ltimateDynamicFireworkAlgorithmImproved.py | 96 +++++++ ...RefinedEvolutionaryGradientOptimizerV19.py | 82 ++++++ ...ltimateEvolutionaryGradientOptimizerV15.py | 86 +++++++ ...ltimateEvolutionaryGradientOptimizerV26.py | 81 ++++++ ...ltimateEvolutionaryGradientOptimizerV33.py | 79 ++++++ .../lama/UltimateEvolutionaryOptimizer.py | 60 +++++ .../lama/UltimateRefinedAQAPSO_LS_DIW_AP.py | 84 +++++++ ...efinedPrecisionEvolutionaryOptimizerV41.py | 80 ++++++ ...RefinedEvolutionaryGradientOptimizerV18.py | 85 +++++++ .../lama/UltraDynamicAdaptiveRAMEDS.py | 92 +++++++ ...traDynamicDualPhaseOptimizedStrategyV16.py | 88 +++++++ ...nhancedAdaptiveMemoryHybridOptimizerV10.py | 181 ++++++++++++++ ...nhancedAdaptiveMemoryHybridOptimizerV11.py | 184 ++++++++++++++ ...nhancedAdaptiveMemoryHybridOptimizerV12.py | 190 ++++++++++++++ ...EnhancedAdaptiveMemoryHybridOptimizerV2.py | 174 +++++++++++++ ...EnhancedAdaptiveMemoryHybridOptimizerV3.py | 174 +++++++++++++ ...EnhancedAdaptiveMemoryHybridOptimizerV4.py | 172 +++++++++++++ ...EnhancedAdaptiveMemoryHybridOptimizerV7.py | 172 +++++++++++++ .../lama/UltraEnhancedAdaptiveRAMEDS.py | 88 +++++++ .../lama/UltraEnhancedDynamicDE.py | 69 +++++ ...ancedEliteAdaptiveMemoryHybridOptimizer.py | 174 +++++++++++++ ...nhancedEvolutionaryGradientOptimizerV14.py | 84 +++++++ ...aEnhancedPrecisionEvolutionaryOptimizer.py | 60 +++++ .../UltraEvolutionaryGradientOptimizerV27.py | 81 ++++++ .../UltraFineSpiralDifferentialOptimizerV7.py | 76 ++++++ .../UltraFineTunedEvolutionaryOptimizer.py | 59 +++++ .../UltraFineTunedEvolutionaryOptimizerV24.py | 79 ++++++ ...raOptimizedDynamicPrecisionOptimizerV18.py | 60 +++++ ...raOptimizedDynamicPrecisionOptimizerV19.py | 62 +++++ ...raOptimizedDynamicPrecisionOptimizerV52.py | 59 +++++ ...raOptimizedDynamicPrecisionOptimizerV53.py | 59 +++++ ...timizedEvolutionaryGradientOptimizerV30.py | 79 ++++++ ...dPrecisionAdaptiveEvolutionaryOptimizer.py | 57 +++++ .../optimization/lama/UltraOptimizedRAMEDS.py | 73 ++++++ ...traOptimizedSpiralDifferentialEvolution.py | 57 +++++ .../lama/UltraPreciseDynamicOptimizerV26.py | 61 +++++ ...aPrecisionSpiralDifferentialOptimizerV9.py | 75 ++++++ .../UltraQuantumReactiveHybridStrategy.py | 94 +++++++ nevergrad/optimization/lama/UltraRAMEDS.py | 86 +++++++ ...UltraRefinedAdaptiveConvergenceStrategy.py | 68 +++++ ...aRefinedAdaptiveMemoryHybridOptimizerV5.py | 172 +++++++++++++ ...aRefinedAdaptiveMemoryHybridOptimizerV6.py | 172 +++++++++++++ ...aRefinedAdaptiveMemoryHybridOptimizerV8.py | 181 ++++++++++++++ ...aRefinedAdaptiveMemoryHybridOptimizerV9.py | 181 ++++++++++++++ .../UltraRefinedAdaptivePrecisionOptimizer.py | 60 +++++ .../lama/UltraRefinedAdaptiveRAMEDS.py | 88 +++++++ .../UltraRefinedConvergenceSpiralSearch.py | 82 ++++++ ...ltraRefinedDynamicPrecisionOptimizerV10.py | 60 +++++ ...ltraRefinedDynamicPrecisionOptimizerV11.py | 59 +++++ ...ltraRefinedDynamicPrecisionOptimizerV17.py | 62 +++++ ...ltraRefinedDynamicPrecisionOptimizerV22.py | 58 +++++ ...ltraRefinedDynamicPrecisionOptimizerV23.py | 59 +++++ ...ltraRefinedDynamicPrecisionOptimizerV24.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV25.py | 61 +++++ ...ltraRefinedDynamicPrecisionOptimizerV26.py | 58 +++++ ...ltraRefinedDynamicPrecisionOptimizerV27.py | 59 +++++ ...ltraRefinedDynamicPrecisionOptimizerV28.py | 55 ++++ ...ltraRefinedDynamicPrecisionOptimizerV29.py | 59 +++++ ...ltraRefinedDynamicPrecisionOptimizerV30.py | 61 +++++ ...ltraRefinedDynamicPrecisionOptimizerV31.py | 61 +++++ ...ltraRefinedDynamicPrecisionOptimizerV32.py | 60 +++++ ...ltraRefinedDynamicPrecisionOptimizerV33.py | 61 +++++ ...ltraRefinedDynamicPrecisionOptimizerV34.py | 55 ++++ ...ltraRefinedDynamicPrecisionOptimizerV35.py | 58 +++++ ...ltraRefinedDynamicPrecisionOptimizerV36.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV37.py | 61 +++++ ...ltraRefinedDynamicPrecisionOptimizerV38.py | 63 +++++ ...ltraRefinedDynamicPrecisionOptimizerV39.py | 55 ++++ ...UltraRefinedDynamicPrecisionOptimizerV4.py | 59 +++++ ...ltraRefinedDynamicPrecisionOptimizerV40.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV41.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV44.py | 58 +++++ ...ltraRefinedDynamicPrecisionOptimizerV45.py | 58 +++++ ...ltraRefinedDynamicPrecisionOptimizerV46.py | 56 +++++ ...ltraRefinedDynamicPrecisionOptimizerV47.py | 54 ++++ ...UltraRefinedDynamicPrecisionOptimizerV5.py | 60 +++++ ...ltraRefinedDynamicPrecisionOptimizerV54.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV55.py | 57 +++++ ...ltraRefinedDynamicPrecisionOptimizerV56.py | 57 +++++ ...UltraRefinedDynamicPrecisionOptimizerV9.py | 58 +++++ ...eAdaptiveMemoryDynamicCrowdingOptimizer.py | 203 +++++++++++++++ ...edEvolutionaryGradientHybridOptimizerV5.py | 82 ++++++ ...RefinedEvolutionaryGradientOptimizerV10.py | 76 ++++++ ...RefinedEvolutionaryGradientOptimizerV32.py | 81 ++++++ ...nedHybridEvolutionaryAnnealingOptimizer.py | 57 +++++ .../UltraRefinedHyperStrategicOptimizerV50.py | 78 ++++++ .../UltraRefinedHyperStrategicOptimizerV54.py | 80 ++++++ ...efinedPrecisionEvolutionaryOptimizerV43.py | 80 ++++++ .../optimization/lama/UltraRefinedRAMEDS.py | 86 +++++++ ...UltraRefinedSpiralDifferentialClimberV3.py | 72 ++++++ ...efinedStrategicEvolutionaryOptimizerV60.py | 81 ++++++ .../lama/UltraRefinedStrategyDE.py | 66 +++++ ...meEvolutionaryGradientHybridOptimizerV7.py | 80 ++++++ .../lama/UnifiedAdaptiveMemeticOptimizer.py | 155 ++++++++++++ .../lama/VectorizedRefinedSpiralSearch.py | 52 ++++ nevergrad/optimization/lama/eQGSA_v2.py | 58 +++++ 3763 files changed, 378874 insertions(+) create mode 100644 nevergrad/optimization/lama/AADCCS.py create mode 100644 nevergrad/optimization/lama/AADEHLS.py create mode 100644 nevergrad/optimization/lama/AADMEM.py create mode 100644 nevergrad/optimization/lama/AAES.py create mode 100644 nevergrad/optimization/lama/ACDE.py create mode 100644 nevergrad/optimization/lama/ACMDEOBD.py create mode 100644 nevergrad/optimization/lama/ADAEDA.py create mode 100644 nevergrad/optimization/lama/ADCE.py create mode 100644 nevergrad/optimization/lama/ADEA.py create mode 100644 nevergrad/optimization/lama/ADEAS.py create mode 100644 nevergrad/optimization/lama/ADECMS.py create mode 100644 nevergrad/optimization/lama/ADEDCA.py create mode 100644 nevergrad/optimization/lama/ADEDE.py create mode 100644 nevergrad/optimization/lama/ADEDLR.py create mode 100644 nevergrad/optimization/lama/ADEDM.py create mode 100644 nevergrad/optimization/lama/ADEEM.py create mode 100644 nevergrad/optimization/lama/ADEGE.py create mode 100644 nevergrad/optimization/lama/ADEGM.py create mode 100644 nevergrad/optimization/lama/ADEGS.py create mode 100644 nevergrad/optimization/lama/ADEM.py create mode 100644 nevergrad/optimization/lama/ADEMSC.py create mode 100644 nevergrad/optimization/lama/ADEPF.py create mode 100644 nevergrad/optimization/lama/ADEPM.py create mode 100644 nevergrad/optimization/lama/ADEPMC.py create mode 100644 nevergrad/optimization/lama/ADEPMI.py create mode 100644 nevergrad/optimization/lama/ADEPR.py create mode 100644 nevergrad/optimization/lama/ADES.py create mode 100644 nevergrad/optimization/lama/ADESA.py create mode 100644 nevergrad/optimization/lama/ADE_FPC.py create mode 100644 nevergrad/optimization/lama/ADGD.py create mode 100644 nevergrad/optimization/lama/ADGE.py create mode 100644 nevergrad/optimization/lama/ADMDE.py create mode 100644 nevergrad/optimization/lama/ADMEMS.py create mode 100644 nevergrad/optimization/lama/ADSDiffEvo.py create mode 100644 nevergrad/optimization/lama/ADSEA.py create mode 100644 nevergrad/optimization/lama/ADSEAPlus.py create mode 100644 nevergrad/optimization/lama/AGBES.py create mode 100644 nevergrad/optimization/lama/AGCES.py create mode 100644 nevergrad/optimization/lama/AGDE.py create mode 100644 nevergrad/optimization/lama/AGDELS.py create mode 100644 nevergrad/optimization/lama/AGDiffEvo.py create mode 100644 nevergrad/optimization/lama/AGEA.py create mode 100644 nevergrad/optimization/lama/AGESA.py create mode 100644 nevergrad/optimization/lama/AGGE.py create mode 100644 nevergrad/optimization/lama/AGGES.py create mode 100644 nevergrad/optimization/lama/AGIDE.py create mode 100644 nevergrad/optimization/lama/AHDEMI.py create mode 100644 nevergrad/optimization/lama/ALDEEM.py create mode 100644 nevergrad/optimization/lama/ALES.py create mode 100644 nevergrad/optimization/lama/ALSS.py create mode 100644 nevergrad/optimization/lama/AMDE.py create mode 100644 nevergrad/optimization/lama/AMES.py create mode 100644 nevergrad/optimization/lama/AMSDiffEvo.py create mode 100644 nevergrad/optimization/lama/AMSEA.py create mode 100644 nevergrad/optimization/lama/AN_MDEPSO.py create mode 100644 nevergrad/optimization/lama/APBES.py create mode 100644 nevergrad/optimization/lama/APDE.py create mode 100644 nevergrad/optimization/lama/APDETL.py create mode 100644 nevergrad/optimization/lama/APES.py create mode 100644 nevergrad/optimization/lama/AQAPSO_LS_DIW.py create mode 100644 nevergrad/optimization/lama/AQAPSO_LS_DIW_AP.py create mode 100644 nevergrad/optimization/lama/ARDLS.py create mode 100644 nevergrad/optimization/lama/ARESM.py create mode 100644 nevergrad/optimization/lama/ARISA.py create mode 100644 nevergrad/optimization/lama/ASADEA.py create mode 100644 nevergrad/optimization/lama/ASO.py create mode 100644 nevergrad/optimization/lama/AVDE.py create mode 100644 nevergrad/optimization/lama/AcceleratedAdaptivePrecisionCrossoverEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveAnnealingDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveArchiveDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveCMADiffEvoPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveChaoticFireworksOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveClusterBasedHybridOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveClusterHybridOptimizationV5.py create mode 100644 nevergrad/optimization/lama/AdaptiveClusteredDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveCohortHarmonizationOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveCohortMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveControlledMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveCooperativeDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveCooperativeDifferentialMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceGradientSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptation.py create mode 100644 nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveCrossoverDEPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveCrossoverElitistStrategyV6.py create mode 100644 nevergrad/optimization/lama/AdaptiveCrossoverSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalCooperativeSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalDifferentialMemeticEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveCulturalMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDEPSOOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDEWithElitismAndLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDEWithOrthogonalCrossover.py create mode 100644 nevergrad/optimization/lama/AdaptiveDecayOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialCrossover.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithAdaptivePerturbation.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithBayesianLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithDynamicPopulationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGradientBoost.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGuidedSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithMemeticSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithSurrogateAssistance.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialQuantumEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialQuantumMetaheuristic.py create mode 100644 nevergrad/optimization/lama/AdaptiveDifferentialSpiralSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDimensionalClimbingEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveDimensionalCrossoverEvolver.py create mode 100644 nevergrad/optimization/lama/AdaptiveDirectionalBiasQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveDirectionalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDivergenceClusteringSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiverseHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversifiedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversifiedSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversityDifferentialHybrid.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversityDifferentialMemeticHybrid.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversityMaintainedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversityMaintainingGradientEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDiversityPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveDolphinPodOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualPhaseDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualPhaseEvolutionarySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualPhaseOptimizationWithDynamicParameterControl.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualPhaseStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualPopulationDE_LS.py create mode 100644 nevergrad/optimization/lama/AdaptiveDualStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicDualPhaseEnhancedStrategyV20.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicDualPhaseStrategyV11.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithmRedesigned.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicFireworkDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteCovarianceMatrixMemeticSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteDiverseHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedDE_LS_v2.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedDE_v2.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v3.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v4.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteGuidedRestartDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV5.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV6.py create mode 100644 nevergrad/optimization/lama/AdaptiveEliteMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveElitistDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveElitistDE_v3.py create mode 100644 nevergrad/optimization/lama/AdaptiveElitistMutationDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveElitistPopulationStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveElitistQuasiRandomDEGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch_v2.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedExplorationGravitationalSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGradientGuidedHybridPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV18.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV22.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV29.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV33.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedHarmonicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch_v2.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv10.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv11.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseOptimizationAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedQGSA_v7.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28.py create mode 100644 nevergrad/optimization/lama/AdaptiveEnsembleMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialPopulationStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveEvolutionaryFireworksSearch_v1.py create mode 100644 nevergrad/optimization/lama/AdaptiveEvolutionaryGradientSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveExplorationEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveExplorationExploitationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveExplorationExploitationHybridAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveExploratoryOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveFeedbackControlStrategyV61.py create mode 100644 nevergrad/optimization/lama/AdaptiveFeedbackEnhancedMemoryStrategyV71.py create mode 100644 nevergrad/optimization/lama/AdaptiveFireworkAlgorithmEnhanced.py create mode 100644 nevergrad/optimization/lama/AdaptiveFireworkAlgorithmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveFireworksEnhancedHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveFocusedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveFuzzyDynamicDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveGaussianSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveGlobalLocalSearchStrategyV62.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientAssistedEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBalancedCrossoverPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBalancedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryExploration.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientClusteringEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientCrossoverOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionEnhanced.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientDifferentialHybrid.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientEnhancedExplorationPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientEnhancedMultiPhaseAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientExploration.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientExplorationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientGuidedEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientInformedPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientSampling.py create mode 100644 nevergrad/optimization/lama/AdaptiveGradientSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV15.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV26.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV4.py create mode 100644 nevergrad/optimization/lama/AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py create mode 100644 nevergrad/optimization/lama/AdaptiveGuidedCulturalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveGuidedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveGuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveGuidedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveGuidedMutationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV12.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV17.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV20.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV8.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithmV15.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV27.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyMemeticSearchV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithCuckooInspiration.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlight.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithLevyFlightImprovement.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationImproved.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonySearchWithSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveHarmonyTabuOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridAnnealingWithGradientBoost.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridAnnealingWithMemoryRefinement.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridCMAESDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridCulturalOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridDEPSOWithDynamicRestart.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridDEWithIntensifiedLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridEvolutionStrategyV5.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridGradientAnnealingWithVariableMemory.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridMetaOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridQuasiRandomGradientDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridRecombinativeStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveHybridSwarmEvolutionOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveHyperQuantumStateCrossoverOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveIncrementalCrossoverEnhancement.py create mode 100644 nevergrad/optimization/lama/AdaptiveInertiaHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveInertiaParticleOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveInertiaParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveLearningDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveLocalSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveLocalSearchQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV5.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV6.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV7.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialQuantumSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticDiverseOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticEvolutionarySearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimizationV5.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemeticParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryAssistedStrategyV41.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryEnhancedDualStrategyV45.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryEnhancedStrategyV42.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingWithExplorationBoost.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryGradientSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryGuidedEvolutionStrategyV57.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryHybridAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO_V2.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemoryParticleDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemorySelfTuningStrategyV60.py create mode 100644 nevergrad/optimization/lama/AdaptiveMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveMetaNetAQAPSOv13.py create mode 100644 nevergrad/optimization/lama/AdaptiveMetaNetPSO_v3.py create mode 100644 nevergrad/optimization/lama/AdaptiveMetaNetPSOv3.py create mode 100644 nevergrad/optimization/lama/AdaptiveMetaheuristicOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMomentumOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiExplorationAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiOperatorDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealingV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiPhaseOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiPopulationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyDEWithMemory.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveNicheDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveNichingDE_PSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolutionImproved.py create mode 100644 nevergrad/optimization/lama/AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveOrthogonalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveOscillatoryCrossoverDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveParticleDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptivePerturbationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptivePopulationDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py create mode 100644 nevergrad/optimization/lama/AdaptivePopulationMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptivePopulationResizingOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionCohortOptimizationV3.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionControlDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionCrossoverEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionDivideSearch.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionDynamicMemoryStrategyV48.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionFocalStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionHybridSearch.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionMemoryStrategyV47.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionRotationalClimbOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionSearch.py create mode 100644 nevergrad/optimization/lama/AdaptivePrecisionStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQGSA.py create mode 100644 nevergrad/optimization/lama/AdaptiveQGSA_EC.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumAnnealingDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumAnnealingDEv2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumCognitionOptimizerV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumCrossoverOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDiversityEnhancerV7.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumDynamicTuningOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumEliteDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumEliteMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumEntropyDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumEvolvedDiversityExplorerV15.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedMemeticSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientEnhancedOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumHarmonizedPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumHybridSearchV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumInfluencedMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumInformedDifferentialStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumInformedGradientEnhancer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLeapOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialEnhancedOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyEnhancedDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLevyTreeOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumLocalSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticGradientBoost.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerPlus.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV3.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumMetaheuristic.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumPSOEnhanced.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumParticleDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumResonanceOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizerV2.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuantumSymbioticStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuasiGradientEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuasiRandomEnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuasiRandomGradientDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveQuorumWithStrategicMutation.py create mode 100644 nevergrad/optimization/lama/AdaptiveRefinedGradientBoostedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveRefinedHybridPSO_DE.py create mode 100644 nevergrad/optimization/lama/AdaptiveRefinementEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveRefinementPSO.py create mode 100644 nevergrad/optimization/lama/AdaptiveRefinementSearchStrategyV30.py create mode 100644 nevergrad/optimization/lama/AdaptiveResilientQuantumCrossoverStrategy.py create mode 100644 nevergrad/optimization/lama/AdaptiveRestartDE.py create mode 100644 nevergrad/optimization/lama/AdaptiveRestartHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveRotationalClimbOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveSigmaCrossoverEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdaptiveSimulatedAnnealingSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveSimulatedAnnealingWithSmartMemory.py create mode 100644 nevergrad/optimization/lama/AdaptiveSineCosineDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveSinusoidalDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/AdaptiveSpatialExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveSpiralGradientSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveStepSearch.py create mode 100644 nevergrad/optimization/lama/AdaptiveStochasticGradientQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveStochasticHybridEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveStochasticTunneling.py create mode 100644 nevergrad/optimization/lama/AdaptiveStrategicExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdaptiveSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdaptiveSwarmGradientOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveSwarmHarmonicOptimizationV4.py create mode 100644 nevergrad/optimization/lama/AdaptiveSwarmHybridOptimization.py create mode 100644 nevergrad/optimization/lama/AdaptiveThresholdDifferentialStrategy.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveDualPhaseStrategy.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveDynamicMemoryStrategyV64.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveExplorationExploitationAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveExplorationOptimizationAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveGlobalClimbingOptimizerV6.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveGradientBoostedMemoryExploration.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV56.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV73.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveMemoryGuidedStrategyV77.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveQuantumEntropyDE.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV1.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/AdvancedAdaptiveStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedAttenuatedAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/AdvancedBalancedAdaptiveElitistStrategyV2.py create mode 100644 nevergrad/optimization/lama/AdvancedBalancedExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRate.py create mode 100644 nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2.py create mode 100644 nevergrad/optimization/lama/AdvancedDifferentialParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedDimensionalCyclicCrossoverEvolver.py create mode 100644 nevergrad/optimization/lama/AdvancedDimensionalFeedbackEvolver.py create mode 100644 nevergrad/optimization/lama/AdvancedDiversityAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedDiversityDE.py create mode 100644 nevergrad/optimization/lama/AdvancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedDualStrategyHybridDE.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicCrowdedDE.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicDualPhaseStrategyV37.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicHybridOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicMultimodalSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedDynamicStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedEliteAdaptiveCrowdingHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedEliteDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedAdaptiveMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedDifferentialEvolutionLocalSearch_v55.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedEnhancedGuidedMassQGSA_v69.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedGuidedMassQGSA_v65.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizerV16.py create mode 100644 nevergrad/optimization/lama/AdvancedExplorativeConvergenceEnhancer.py create mode 100644 nevergrad/optimization/lama/AdvancedFireworkAlgorithmWithAdaptiveMutation.py create mode 100644 nevergrad/optimization/lama/AdvancedFocusedAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedGlobalClimbingOptimizerV4.py create mode 100644 nevergrad/optimization/lama/AdvancedGlobalStructureAwareOptimizerV3.py create mode 100644 nevergrad/optimization/lama/AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration.py create mode 100644 nevergrad/optimization/lama/AdvancedGradientEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/AdvancedGradientEvolutionStrategyV2.py create mode 100644 nevergrad/optimization/lama/AdvancedHarmonyMemeticOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedHarmonySearch.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridAdaptiveOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridCovarianceMatrixDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridDEPSOWithAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridDEPSOWithDynamicAdaptationAndRestart.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridExplorationExploitationOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridLocalOptimizationDE.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridMetaheuristic.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridQuantumAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithAdaptiveMemory.py create mode 100644 nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithGuidedExploration.py create mode 100644 nevergrad/optimization/lama/AdvancedImprovedMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV5.py create mode 100644 nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV8.py create mode 100644 nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV9.py create mode 100644 nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedMemoryAdaptiveStrategyV50.py create mode 100644 nevergrad/optimization/lama/AdvancedMemoryEnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedMemoryGuidedAdaptiveStrategyV68.py create mode 100644 nevergrad/optimization/lama/AdvancedMemoryGuidedDualStrategyV80.py create mode 100644 nevergrad/optimization/lama/AdvancedMultiModalAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedMultiStrategySelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AdvancedNicheDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py create mode 100644 nevergrad/optimization/lama/AdvancedOptimalHybridDifferentialAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedParallelDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdvancedPrecisionEvolver.py create mode 100644 nevergrad/optimization/lama/AdvancedPrecisionGuidedStrategy.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumCognitionTrajectoryOptimizerV29.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumControlledDiversityStrategy.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumCrossoverOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumGradientDescent.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumGradientExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumHarmonicFeedbackOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumInfusedAdaptiveStrategyV3.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumStateCrossoverOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/AdvancedQuantumVelocityOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedRAMEDSv6.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedAdaptiveMemoryEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedGradientBoostedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedHybridEvolutionaryAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedRAMEDSPro.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedSpiralSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedRefinedUltraEvolutionaryGradientOptimizerV29.py create mode 100644 nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v3.py create mode 100644 nevergrad/optimization/lama/AdvancedSpatialAdaptiveConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedSpatialGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/AdvancedStrategicHybridDE.py create mode 100644 nevergrad/optimization/lama/ArchiveEnhancedAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/AttenuatedAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/BalancedAdaptiveMemeticDE.py create mode 100644 nevergrad/optimization/lama/BalancedCulturalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/BalancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/BalancedDynamicQuantumLevySwarm.py create mode 100644 nevergrad/optimization/lama/BalancedQuantumLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/BalancedQuantumLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/BayesianAdaptiveMemeticSearch.py create mode 100644 nevergrad/optimization/lama/CAMSQSOB.py create mode 100644 nevergrad/optimization/lama/CGES.py create mode 100644 nevergrad/optimization/lama/CMADifferentialEvolutionPSO.py create mode 100644 nevergrad/optimization/lama/CMDEALX.py create mode 100644 nevergrad/optimization/lama/ClusterAdaptiveQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/ClusterBasedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ClusteredAdaptiveHybridPSODESimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/ClusteredDifferentialEvolutionWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/CoevolutionaryDualPopulationSearch.py create mode 100644 nevergrad/optimization/lama/CohortDiversityDrivenOptimization.py create mode 100644 nevergrad/optimization/lama/CohortEvolutionWithDynamicSelection.py create mode 100644 nevergrad/optimization/lama/ConcentricConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/ConcentricDiversityStrategy.py create mode 100644 nevergrad/optimization/lama/ConcentricGradientDescentEvolver.py create mode 100644 nevergrad/optimization/lama/ConcentricGradientEnhancedEvolver.py create mode 100644 nevergrad/optimization/lama/ConcentricQuantumCrossoverStrategyV4.py create mode 100644 nevergrad/optimization/lama/ConvergenceAcceleratedSpiralSearch.py create mode 100644 nevergrad/optimization/lama/ConvergentAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/ConvergentAdaptiveEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/CooperativeAdaptiveCulturalSearch.py create mode 100644 nevergrad/optimization/lama/CooperativeAdaptiveEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/CooperativeCulturalAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/CooperativeCulturalDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/CooperativeCulturalEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/CooperativeEvolutionaryGradientSearch.py create mode 100644 nevergrad/optimization/lama/CooperativeParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/CoordinatedAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/CovarianceMatrixAdaptationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/CulturalAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/CulturalGuidedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DADERC.py create mode 100644 nevergrad/optimization/lama/DADESM.py create mode 100644 nevergrad/optimization/lama/DADe.py create mode 100644 nevergrad/optimization/lama/DAEA.py create mode 100644 nevergrad/optimization/lama/DAES.py create mode 100644 nevergrad/optimization/lama/DAESF.py create mode 100644 nevergrad/optimization/lama/DASES.py create mode 100644 nevergrad/optimization/lama/DASOGG.py create mode 100644 nevergrad/optimization/lama/DDCEA.py create mode 100644 nevergrad/optimization/lama/DDPO.py create mode 100644 nevergrad/optimization/lama/DEAMC.py create mode 100644 nevergrad/optimization/lama/DEAMC_DSR.py create mode 100644 nevergrad/optimization/lama/DEAMC_LSI.py create mode 100644 nevergrad/optimization/lama/DEWithNelderMead.py create mode 100644 nevergrad/optimization/lama/DHDGE.py create mode 100644 nevergrad/optimization/lama/DLASS.py create mode 100644 nevergrad/optimization/lama/DMDE.py create mode 100644 nevergrad/optimization/lama/DMDESM.py create mode 100644 nevergrad/optimization/lama/DMES.py create mode 100644 nevergrad/optimization/lama/DNAS.py create mode 100644 nevergrad/optimization/lama/DPADE.py create mode 100644 nevergrad/optimization/lama/DPES.py create mode 100644 nevergrad/optimization/lama/DSDE.py create mode 100644 nevergrad/optimization/lama/DSEDES.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionAdaptiveCrossover.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionHybrid.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionPSOHybrid.py create mode 100644 nevergrad/optimization/lama/DifferentialEvolutionSearch.py create mode 100644 nevergrad/optimization/lama/DifferentialFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/DifferentialGradientEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/DifferentialHarmonySearch.py create mode 100644 nevergrad/optimization/lama/DifferentialMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/DifferentialQuantumMetaheuristic.py create mode 100644 nevergrad/optimization/lama/DifferentialSimulatedAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolution.py create mode 100644 nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolutionV2.py create mode 100644 nevergrad/optimization/lama/DolphinPodOptimization.py create mode 100644 nevergrad/optimization/lama/DualAdaptiveRestartDE.py create mode 100644 nevergrad/optimization/lama/DualAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/DualConvergenceEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/DualModeOptimization.py create mode 100644 nevergrad/optimization/lama/DualPhaseAdaptiveGradientEvolution.py create mode 100644 nevergrad/optimization/lama/DualPhaseAdaptiveHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced.py create mode 100644 nevergrad/optimization/lama/DualPhaseDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DualPhaseOptimizationStrategy.py create mode 100644 nevergrad/optimization/lama/DualPhaseQuantumMemeticSearch.py create mode 100644 nevergrad/optimization/lama/DualPhaseRefinedQuantumLocalSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/DualPopulationADE.py create mode 100644 nevergrad/optimization/lama/DualPopulationAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/DualPopulationCovarianceMatrixGradientSearch.py create mode 100644 nevergrad/optimization/lama/DualPopulationEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/DualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/DualStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DualStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/DualStrategyQuantumEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveClimbingStrategy.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveCohortOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveEliteHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveEnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligenceV2.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveHybridAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveHybridDE.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptivePopulationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveQuasiRandomDEGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicAdaptiveSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicBalancingPSO.py create mode 100644 nevergrad/optimization/lama/DynamicClusterHybridOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicCohortAdaptiveEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicCohortMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicCohortOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicCrowdedDE.py create mode 100644 nevergrad/optimization/lama/DynamicCulturalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicEliteAdaptiveHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/DynamicEliteAnnealingDE.py create mode 100644 nevergrad/optimization/lama/DynamicEliteCovarianceMemeticSearch.py create mode 100644 nevergrad/optimization/lama/DynamicEliteEnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicElitistHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicEnhancedDifferentialFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicEnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicExplorationExploitationAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicExplorationExploitationDE.py create mode 100644 nevergrad/optimization/lama/DynamicExplorationExploitationMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicFireworksSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicFractionalClusterOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealingV2.py create mode 100644 nevergrad/optimization/lama/DynamicGradientBoostedRefinementAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicGradientEnhancedAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicHybridAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicHybridQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicHybridSelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/DynamicLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/DynamicLocalSearchFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/DynamicMemeticDifferentialEvolutionWithAdaptiveElitism.py create mode 100644 nevergrad/optimization/lama/DynamicMemoryAdaptiveConvergenceStrategyV76.py create mode 100644 nevergrad/optimization/lama/DynamicMemoryEnhancedDualPhaseStrategyV66.py create mode 100644 nevergrad/optimization/lama/DynamicMemoryHybridSearch.py create mode 100644 nevergrad/optimization/lama/DynamicMultiPhaseAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/DynamicMultiStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicNichePSO_DE_LS.py create mode 100644 nevergrad/optimization/lama/DynamicNichingDEPSOWithRestart.py create mode 100644 nevergrad/optimization/lama/DynamicPopulationAdaptiveGradientEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicPopulationMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicPrecisionBalancedEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicPrecisionCosineDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/DynamicPrecisionExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicPrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumGuidedHybridSearchV7.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumLevyDifferentialHybridSearch.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumLevyDifferentialSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/DynamicQuantumSwarmOptimizationRefined.py create mode 100644 nevergrad/optimization/lama/DynamicQuasiRandomAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/DynamicRefinedGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicRefinementGradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/DynamicScaleSearch.py create mode 100644 nevergrad/optimization/lama/DynamicSelfAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/DynamicStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/DynamicallyAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EACDE.py create mode 100644 nevergrad/optimization/lama/EADE.py create mode 100644 nevergrad/optimization/lama/EADEA.py create mode 100644 nevergrad/optimization/lama/EADEDM.py create mode 100644 nevergrad/optimization/lama/EADEDMGM.py create mode 100644 nevergrad/optimization/lama/EADEPC.py create mode 100644 nevergrad/optimization/lama/EADEPM.py create mode 100644 nevergrad/optimization/lama/EADEPMC.py create mode 100644 nevergrad/optimization/lama/EADES.py create mode 100644 nevergrad/optimization/lama/EADESC.py create mode 100644 nevergrad/optimization/lama/EADEWM.py create mode 100644 nevergrad/optimization/lama/EADE_FIDM.py create mode 100644 nevergrad/optimization/lama/EADGM.py create mode 100644 nevergrad/optimization/lama/EADMMMS.py create mode 100644 nevergrad/optimization/lama/EADSEA.py create mode 100644 nevergrad/optimization/lama/EADSM.py create mode 100644 nevergrad/optimization/lama/EAMDE.py create mode 100644 nevergrad/optimization/lama/EAMES.py create mode 100644 nevergrad/optimization/lama/EAMSDiffEvo.py create mode 100644 nevergrad/optimization/lama/EAMSEA.py create mode 100644 nevergrad/optimization/lama/EAPBES.py create mode 100644 nevergrad/optimization/lama/EAPDELS.py create mode 100644 nevergrad/optimization/lama/EARESDM.py create mode 100644 nevergrad/optimization/lama/EASO.py create mode 100644 nevergrad/optimization/lama/EDAEA.py create mode 100644 nevergrad/optimization/lama/EDAG.py create mode 100644 nevergrad/optimization/lama/EDASOGG.py create mode 100644 nevergrad/optimization/lama/EDDCEA.py create mode 100644 nevergrad/optimization/lama/EDEAS.py create mode 100644 nevergrad/optimization/lama/EDEPM.py create mode 100644 nevergrad/optimization/lama/EDGB.py create mode 100644 nevergrad/optimization/lama/EDMDESM.py create mode 100644 nevergrad/optimization/lama/EDMRL.py create mode 100644 nevergrad/optimization/lama/EDMS.py create mode 100644 nevergrad/optimization/lama/EDNAS.py create mode 100644 nevergrad/optimization/lama/EDNAS_SAMRA.py create mode 100644 nevergrad/optimization/lama/EDSDiffEvoM.py create mode 100644 nevergrad/optimization/lama/EGBDE.py create mode 100644 nevergrad/optimization/lama/EGGEO.py create mode 100644 nevergrad/optimization/lama/EHADEEM.py create mode 100644 nevergrad/optimization/lama/EHADEMI.py create mode 100644 nevergrad/optimization/lama/EHDAM.py create mode 100644 nevergrad/optimization/lama/EHDE.py create mode 100644 nevergrad/optimization/lama/EIADEA.py create mode 100644 nevergrad/optimization/lama/EMIDE.py create mode 100644 nevergrad/optimization/lama/EMSADE.py create mode 100644 nevergrad/optimization/lama/EMSEAS.py create mode 100644 nevergrad/optimization/lama/EORAMED.py create mode 100644 nevergrad/optimization/lama/EPADE.py create mode 100644 nevergrad/optimization/lama/EPDE.py create mode 100644 nevergrad/optimization/lama/EPWDEM.py create mode 100644 nevergrad/optimization/lama/ERADE.py create mode 100644 nevergrad/optimization/lama/ERADS.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptiveDynamic.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptiveDynamicPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptiveHybrid.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptivePlus.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptiveProgressive.py create mode 100644 nevergrad/optimization/lama/ERADS_AdaptiveRefinement.py create mode 100644 nevergrad/optimization/lama/ERADS_Advanced.py create mode 100644 nevergrad/optimization/lama/ERADS_AdvancedDynamic.py create mode 100644 nevergrad/optimization/lama/ERADS_AdvancedRefined.py create mode 100644 nevergrad/optimization/lama/ERADS_DynamicPrecision.py create mode 100644 nevergrad/optimization/lama/ERADS_Enhanced.py create mode 100644 nevergrad/optimization/lama/ERADS_EnhancedPrecision.py create mode 100644 nevergrad/optimization/lama/ERADS_HyperOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_NextGen.py create mode 100644 nevergrad/optimization/lama/ERADS_Optimized.py create mode 100644 nevergrad/optimization/lama/ERADS_Precision.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressiveAdaptive.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressiveAdaptivePlus.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressiveDynamic.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressiveOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressivePrecision.py create mode 100644 nevergrad/optimization/lama/ERADS_ProgressiveRefinement.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumFlux.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumFluxPro.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumFluxUltra.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefined.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefinedPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_QuantumLeap.py create mode 100644 nevergrad/optimization/lama/ERADS_Refined.py create mode 100644 nevergrad/optimization/lama/ERADS_Superior.py create mode 100644 nevergrad/optimization/lama/ERADS_Ultra.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamic.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMax.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxEnhanced.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHybrid.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyper.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimizedV4.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefined.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV2.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV3.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimal.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimizedPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxPrecision.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefined.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefinedPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxSupreme.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltra.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefined.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV2.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV3.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV4.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV5.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV6.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV7.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV8.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicPlus.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionEnhanced.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraEnhanced.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraMax.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraOptimized.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraPrecise.py create mode 100644 nevergrad/optimization/lama/ERADS_UltraRefined.py create mode 100644 nevergrad/optimization/lama/ERAMEDS.py create mode 100644 nevergrad/optimization/lama/ESADE.py create mode 100644 nevergrad/optimization/lama/ESADEPFLLP.py create mode 100644 nevergrad/optimization/lama/ESBASM.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveCrowdingHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveMemoryDynamicCrowdingOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch.py create mode 100644 nevergrad/optimization/lama/EliteCovarianceMatrixAdaptationMemeticSearch.py create mode 100644 nevergrad/optimization/lama/EliteDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteDynamicMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteDynamicMultiStrategyHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EliteGuidedAdaptiveRestartDE.py create mode 100644 nevergrad/optimization/lama/EliteGuidedDualStrategyDE.py create mode 100644 nevergrad/optimization/lama/EliteGuidedHybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EliteGuidedHybridDE.py create mode 100644 nevergrad/optimization/lama/EliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/EliteGuidedMutationDE_v2.py create mode 100644 nevergrad/optimization/lama/EliteGuidedQuantumAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EliteHybridAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteMemoryEnhancedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteMultiStrategySelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/ElitePreservingDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EliteQuantumAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteRefinedAdaptivePrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/EliteTranscendentalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/ElitistAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW_AP.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Final.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined_Final.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined.py create mode 100644 nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveCohortMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveControlledMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDEPSOOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiffEvolutionGradientDescent.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamic.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamicImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionEnhanced.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV28.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDifferentialMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDirectionalBiasQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDolphinPodOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDualStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicDE.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveEliteDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveEliteGuidedMutationDE_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveEnvironmentalStrategyV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveExplorationExploitationAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGaussianSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGradientBalancedCrossoverPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGranularStrategyV26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV28.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGuidedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveGuidedMutationOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV29.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV30.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV31.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV28.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV29.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV30.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV31.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV32.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV33.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithHybridInspirationV16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlight.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridMetaOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveInertiaHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemeticOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryControlStrategyV49.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryDualPhaseStrategyV46.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV54.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV79.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealingWithGradient.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiPopulationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveOrthogonalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptivePrecisionCohortOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptivePrecisionFocalStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v28.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v29.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v30.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v31.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v32.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v33.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v34.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v35.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v36.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v38.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v39.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v40.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v41.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v42.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v43.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v44.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v47.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumDEWithDynamicElitistLearning.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumDynamicLevyOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumGradientMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGB.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinal.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchFinal.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImprovedRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevyMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumMemeticOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSOv2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealingOptimized.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV27.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV28.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV29.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV30.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV31.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSinusoidalDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveSwarmHarmonicOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedDifferentialEvolutionLocalSearch_v56.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedHybridDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV17.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV18.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV19.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV1.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV14.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78.py create mode 100644 nevergrad/optimization/lama/EnhancedAdvancedUltimateGuidedMassQGSA_v79.py create mode 100644 nevergrad/optimization/lama/EnhancedArchiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedBalancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedCMAES.py create mode 100644 nevergrad/optimization/lama/EnhancedCMAESv2.py create mode 100644 nevergrad/optimization/lama/EnhancedChaoticFireworksOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedClusterDifferentialCrossover.py create mode 100644 nevergrad/optimization/lama/EnhancedClusteredDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedConvergenceAcceleratedSpiralSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/EnhancedCooperativeCulturalDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedCovarianceGradientSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedCrossoverElitistStrategyV9.py create mode 100644 nevergrad/optimization/lama/EnhancedCrowdingMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedCulturalAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedCulturalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedCulturalMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v15.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v16.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v17.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v18.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v19.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v21.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v22.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v23.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v24.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v25.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v26.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v27.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v28.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v29.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v30.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v31.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v32.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v33.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v34.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v35.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v36.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v37.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v38.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v39.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v40.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v41.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v43.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v44.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v45.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v46.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v47.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v48.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v49.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v50.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v51.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v52.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v53.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v59.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v60.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v62.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v63.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v64.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v66.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v67.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v68.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v69.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v70.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v71.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v72.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v73.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v74.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v75.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v76.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v77.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v78.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v79.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v80.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialEvolutionWithAdaptiveMutationControl.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentialSimulatedAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDifferentiatedAdaptiveEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDimensionalFeedbackEvolverV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDiverseMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedAdaptiveHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V2.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V3.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonyAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDualStrategyAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedDualStrategyHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveClimbingStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryStrategyV59.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveOptimizerV8.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptivePopulationDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicBalancingPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicClusterOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicClusterSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicCohortOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicCrossoverRAMEDS.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicCuckooHarmonyAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDiversifiedHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicDualPhaseStrategyV12.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicEliteAnnealingDE.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicEscapeStrategyV32.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmFinal.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRedesigned.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithHybridSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonyFireworksSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV5.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV6.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV7.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV8.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHarmonyTabuSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicMemoryStrategyV51.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicMultiPhaseAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicMutationSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicNichePSO_DE_LS.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicNichingDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicPrecisionBalancedEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicPrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationFinal.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV14.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV15.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV16.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV17.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV18.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV19.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV20.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV21.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV22.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV23.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV24.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV25.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV26.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV27.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV28.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicRefinementGradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicRestartAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteCrowdingMemoryHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveRestartDE.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedDualMutationDE.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v81.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v82.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v83.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v85.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v86.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteGuidedMutationDE_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedEliteQuantumAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonicTabuSearchV24.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v4.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v63.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v64.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v68.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV10.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV11.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV12.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV13.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV14.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV8.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV9.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedMetaHeuristicOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py create mode 100644 nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV1.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV12.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV13.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV14.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV15.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV16.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV17.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV18.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV19.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV20.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV21.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV22.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV23.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV24.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV25.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV26.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV27.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV28.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV29.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV30.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v4.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v5.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v6.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryGradientSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedEvolutionaryStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedExplorativeHarmonicSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithDynamicMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithHybridLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithImprovedMutation.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinal.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchOptimized.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworkSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedFireworksSwarmOptimization_v4.py create mode 100644 nevergrad/optimization/lama/EnhancedFocusedBalancedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedGlobalStructureAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/EnhancedGlobalStructureAwareOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedGlobalStructureOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedGradientBoostedAnnealingWithAdaptiveMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedGradientGuidedClusterSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedGradientGuidedEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedGradientGuidedHybridPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedGradualAdaptiveRAMEDS.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV10.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV11.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV12.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV13.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV14.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV15.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV16.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV17.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV18.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV19.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV2.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV20.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV21.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV22.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV23.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV24.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV25.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV3.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV30.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV31.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV32.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV4.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV6.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV7.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV8.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV9.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDiversityPreservation.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v62.py create mode 100644 nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v94.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicLevyDolphinOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV11.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV13.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV14.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV15.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV16.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV19.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyDiversifiedCuckooAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyFireworkOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV14.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV15.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV16.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV17.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV34.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonySearchOB.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV6.py create mode 100644 nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV7.py create mode 100644 nevergrad/optimization/lama/EnhancedHierarchicalCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveGeneticSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveHarmonicFireworksTabuSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiPhaseEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveQuantumOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridCMAESDE.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridDEPSOWithDynamicAdaptationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridDEPSOWithQuantumLevyFlight.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridDEPSOwithAdaptiveRestart.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridDifferentialEvolutionMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridDynamicAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridGradientAnnealingWithMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridGradientBasedStrategyV8.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridGradientPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridHarmonySearchWithAdaptiveMutationV20.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMemoryAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMemoryPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV10.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV11.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV12.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV15.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV3.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV7.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV8.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV9.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridQuantumDifferentialPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridQuasiRandomGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedHybridSimulatedAnnealingOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperOptimizedMultiStrategicOptimizerV49.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperParameterTunedMetaHeuristicOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedHyperStrategicOptimizerV56.py create mode 100644 nevergrad/optimization/lama/EnhancedImprovedDifferentialEvolutionLocalSearch_v58.py create mode 100644 nevergrad/optimization/lama/EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77.py create mode 100644 nevergrad/optimization/lama/EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedIslandEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV10.py create mode 100644 nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV3.py create mode 100644 nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV7.py create mode 100644 nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV8.py create mode 100644 nevergrad/optimization/lama/EnhancedLocalSearchAdaptiveStrategyV29.py create mode 100644 nevergrad/optimization/lama/EnhancedLocalSearchQuantumSimulatedAnnealingV6.py create mode 100644 nevergrad/optimization/lama/EnhancedMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedMemeticEvolutionarySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedMemeticHarmonyOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedMemoryAdaptiveDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77.py create mode 100644 nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV41.py create mode 100644 nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV69.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaDynamicPrecisionOptimizerV1.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaHeuristicOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V1.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V2.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V3.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V4.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V5.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V6.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V7.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv2.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv3.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv4.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv5.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv6.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaNetPSOv2.py create mode 100644 nevergrad/optimization/lama/EnhancedMetaPopulationAdaptiveGradientSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiFocalAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiModalAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiModalConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiModalExplorationStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiModalMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiOperatorSearch2.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiPhaseAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiPhaseOptimizationAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiStageGradientBoostedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedMultiStrategyQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedNicheDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidth.py create mode 100644 nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC.py create mode 100644 nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py create mode 100644 nevergrad/optimization/lama/EnhancedOptimalEvolutionaryGradientOptimizerV9.py create mode 100644 nevergrad/optimization/lama/EnhancedOptimalPrecisionEvolutionaryThermalOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedOptimizedEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDE.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/EnhancedParallelDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV6.py create mode 100644 nevergrad/optimization/lama/EnhancedPhaseAdaptiveMemoryStrategyV75.py create mode 100644 nevergrad/optimization/lama/EnhancedPhaseTransitionMemoryStrategyV82.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionAdaptiveCohortOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionAdaptiveGradientClusteringPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionBoostedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV38.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV39.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionGuidedQuantumStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionHybridSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedPrecisionTunedCrossoverElitistStrategyV14.py create mode 100644 nevergrad/optimization/lama/EnhancedProgressiveAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedQAPSOAIRVCHR.py create mode 100644 nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLS.py create mode 100644 nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLSDP.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveCrossover.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveEliteGuidedSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveFireworksOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveGradientDiversityExplorer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridDEPSO_V4.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridSearchV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiPhaseDE_v3.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiStrategyEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveNesterovStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCognitionFocusedOptimizerV18.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCognitionOptimizerV12.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCooperativeStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDiversityDE.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDynamicAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDynamicBalanceOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumDynamicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumGradientOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonicAdaptationStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonyMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonySearchAB.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGB.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGBRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumInformedGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumInfusedAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumInspiredHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumIterativeRefinement.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLeapGradientBoostPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLeapPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialDynamicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLevyMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLevyParticleOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLocalSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumLocalSearchImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizerV5.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumMultiPhaseAdaptiveDE_v10.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumMultiStrategyOptimization_v2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumReactiveCooperativeStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumReinforcedNesterovAcceleratorV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumResilientCrossoverStrategyV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingOptimized.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumStateConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationRefined.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV10.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV11.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV12.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV13.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV7.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV8.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV9.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSymbioticStrategyV5.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumSynergyStrategyV2.py create mode 100644 nevergrad/optimization/lama/EnhancedQuantumTunnelingOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/EnhancedRAMEDSPro.py create mode 100644 nevergrad/optimization/lama/EnhancedRAMEDSProV2.py create mode 100644 nevergrad/optimization/lama/EnhancedRAMEDSv3.py create mode 100644 nevergrad/optimization/lama/EnhancedRAMEDSv4.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSpiralSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDE.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveFocusedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveHarmonySearch.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveMemeticDiverseOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v4.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v5.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v49.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v52.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v53.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v54.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v55.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v56.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v57.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v58.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v59.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v60.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveSpiralGradientSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedEliteDynamicMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedEvolutionaryGradientHybridOptimizerV4.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v88.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v89.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v90.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v91.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v92.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v93.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHybridDEPSOWithDynamicAdaptation.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv8.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv9.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedOptimalDynamicPrecisionOptimizerV16.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedSpatialOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v72.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v73.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v74.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v76.py create mode 100644 nevergrad/optimization/lama/EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43.py create mode 100644 nevergrad/optimization/lama/EnhancedResilientAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch.py create mode 100644 nevergrad/optimization/lama/EnhancedRotationalClimbOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedSelectiveEvolutionaryOptimizerV21.py create mode 100644 nevergrad/optimization/lama/EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedSelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EnhancedSelfAdaptiveDE2.py create mode 100644 nevergrad/optimization/lama/EnhancedSelfAdaptiveMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedSequentialQuadraticAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/EnhancedSpatialAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/EnhancedSpatialAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedSpectralHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover.py create mode 100644 nevergrad/optimization/lama/EnhancedStochasticGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/EnhancedStochasticMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedStrategicAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/EnhancedStrategicMemoryAdaptiveStrategyV44.py create mode 100644 nevergrad/optimization/lama/EnhancedStrategicPSO.py create mode 100644 nevergrad/optimization/lama/EnhancedStrategyDE.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV4.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV5.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV6.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9.py create mode 100644 nevergrad/optimization/lama/EnhancedSuperiorUltimateGuidedMassQGSA_v80.py create mode 100644 nevergrad/optimization/lama/EnhancedSupremeDynamicPrecisionOptimizerV1.py create mode 100644 nevergrad/optimization/lama/EnhancedSwarmHybridOptimization.py create mode 100644 nevergrad/optimization/lama/EnhancedTwoPhaseDynamicStrategyV39.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateEvolutionaryGradientOptimizerV36.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2.py create mode 100644 nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3.py create mode 100644 nevergrad/optimization/lama/EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44.py create mode 100644 nevergrad/optimization/lama/EnsembleAdaptiveEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnsembleAdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/EnsembleAdaptiveQuantumDE.py create mode 100644 nevergrad/optimization/lama/EnsembleDE.py create mode 100644 nevergrad/optimization/lama/EnsembleEvolutionaryCulturalSearch.py create mode 100644 nevergrad/optimization/lama/EnsembleHybridSearch.py create mode 100644 nevergrad/optimization/lama/EnsembleMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/EnsembleMutationAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/EntropyEnhancedAdaptiveStrategyV61.py create mode 100644 nevergrad/optimization/lama/EvolutionaryConvergenceSpiralSearch.py create mode 100644 nevergrad/optimization/lama/EvolutionaryDynamicGradientSearch.py create mode 100644 nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/EvolutionaryGradientSearch.py create mode 100644 nevergrad/optimization/lama/EvolutionaryHarmonicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/EvolutionaryParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/ExDADe.py create mode 100644 nevergrad/optimization/lama/FEDE.py create mode 100644 nevergrad/optimization/lama/FTADEEM.py create mode 100644 nevergrad/optimization/lama/FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py create mode 100644 nevergrad/optimization/lama/FinalEnhancedDynamicLocalSearchFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py create mode 100644 nevergrad/optimization/lama/FinalEnhancedRefinedUltimateGuidedMassQGSA_v75.py create mode 100644 nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined.py create mode 100644 nevergrad/optimization/lama/FineTunedCohortDiversityOptimizer.py create mode 100644 nevergrad/optimization/lama/FineTunedFocusedAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/FineTunedProgressiveAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/FocusedBalancedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/FocusedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/FractionalOrderClusterHybridOptimization.py create mode 100644 nevergrad/optimization/lama/FurtherEnhancedHybridMetaHeuristicOptimizerV13.py create mode 100644 nevergrad/optimization/lama/GEEA.py create mode 100644 nevergrad/optimization/lama/GESA.py create mode 100644 nevergrad/optimization/lama/GGAES.py create mode 100644 nevergrad/optimization/lama/GIDE.py create mode 100644 nevergrad/optimization/lama/GaussianAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/GaussianEnhancedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/GradientAssistedDifferentialCrossover.py create mode 100644 nevergrad/optimization/lama/GradientBalancedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/GradientBasedAdaptiveCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/GradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/GradientEnhancedAdaptiveAnnealing.py create mode 100644 nevergrad/optimization/lama/GradientEnhancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/GradientEstimationSearch.py create mode 100644 nevergrad/optimization/lama/GradientGuidedClusterSearch.py create mode 100644 nevergrad/optimization/lama/GradientGuidedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/GradientGuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/GradientGuidedHybridPSO.py create mode 100644 nevergrad/optimization/lama/GradientInformedAdaptiveDirectionSearch.py create mode 100644 nevergrad/optimization/lama/GradientInformedAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/GradientInformedParticleOptimizer.py create mode 100644 nevergrad/optimization/lama/GradientSpiralDifferentialEnhancerV5.py create mode 100644 nevergrad/optimization/lama/GravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/GreedyDiversityMultiStrategySADE.py create mode 100644 nevergrad/optimization/lama/GreedyDynamicMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/GuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/GuidedMutationOptimizer.py create mode 100644 nevergrad/optimization/lama/HADE.py create mode 100644 nevergrad/optimization/lama/HADEEM.py create mode 100644 nevergrad/optimization/lama/HADEMI.py create mode 100644 nevergrad/optimization/lama/HAVCDE.py create mode 100644 nevergrad/optimization/lama/HEAS.py create mode 100644 nevergrad/optimization/lama/HarmonyFireworkOptimizer.py create mode 100644 nevergrad/optimization/lama/HarmonyTabuOptimization.py create mode 100644 nevergrad/optimization/lama/HierarchicalAdaptiveAnnealing.py create mode 100644 nevergrad/optimization/lama/HierarchicalAdaptiveCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/HierarchicalAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/HierarchicalDiversityEnhancedCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/HighPerformanceAdaptiveDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/HyGDAE.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveCrossoverElitistStrategyV10.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDifferentialQuantumSearch.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDiversityMaintainingGradientEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveDualPhaseStrategyV6.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizerV2.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveGradientPSO.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveHarmonicFireworksTabuSearch.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMemeticOptimizerV4.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolutionV2.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveNesterovSynergy.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveOptimization.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveOrthogonalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveParallelDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveParameterTuningOptimization.py create mode 100644 nevergrad/optimization/lama/HybridAdaptivePopulationDE.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveQuantumLevySearch.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveSearchStrategy.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveSelfAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridAdaptiveSimulatedAnnealingDE.py create mode 100644 nevergrad/optimization/lama/HybridCosineSineDualPhaseStrategyV10.py create mode 100644 nevergrad/optimization/lama/HybridCovarianceMatrixAdaptionDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights.py create mode 100644 nevergrad/optimization/lama/HybridCulturalDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridDEPSO.py create mode 100644 nevergrad/optimization/lama/HybridDEPSOWithDynamicAdaptation.py create mode 100644 nevergrad/optimization/lama/HybridDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridDifferentialEvolutionMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridDifferentialEvolutionParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridDifferentialEvolutionWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/HybridDifferentialLocalSearch.py create mode 100644 nevergrad/optimization/lama/HybridDualLocalOptimizationDE.py create mode 100644 nevergrad/optimization/lama/HybridDualPhaseParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridDynamicAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/HybridDynamicAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/HybridDynamicClusterOptimization.py create mode 100644 nevergrad/optimization/lama/HybridDynamicCuckooHarmonyAlgorithm.py create mode 100644 nevergrad/optimization/lama/HybridDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridDynamicDifferentialEvolutionGradient.py create mode 100644 nevergrad/optimization/lama/HybridDynamicElitistDE.py create mode 100644 nevergrad/optimization/lama/HybridDynamicQuantumLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/HybridDynamicSearch.py create mode 100644 nevergrad/optimization/lama/HybridEnhancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridEnhancedDualPhaseAdaptiveOptimizationV6.py create mode 100644 nevergrad/optimization/lama/HybridEnhancedGravitationalSwarmIntelligence.py create mode 100644 nevergrad/optimization/lama/HybridEvolutionaryAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridEvolutionaryOptimization.py create mode 100644 nevergrad/optimization/lama/HybridEvolvingAdaptiveStrategyV28.py create mode 100644 nevergrad/optimization/lama/HybridExploitationExplorationGradientSearch.py create mode 100644 nevergrad/optimization/lama/HybridGradientAnnealingWithMemory.py create mode 100644 nevergrad/optimization/lama/HybridGradientBoostedMemoryAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/HybridGradientCrossoverOptimization.py create mode 100644 nevergrad/optimization/lama/HybridGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridGradientEvolution.py create mode 100644 nevergrad/optimization/lama/HybridGradientMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/HybridGradientMemoryAnnealingV2.py create mode 100644 nevergrad/optimization/lama/HybridGradientMemoryAnnealingV3.py create mode 100644 nevergrad/optimization/lama/HybridGradientMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/HybridGradientPSO.py create mode 100644 nevergrad/optimization/lama/HybridGuidedEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridMemoryAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/HybridMultiDimensionalAnnealing.py create mode 100644 nevergrad/optimization/lama/HybridPSO_DE.py create mode 100644 nevergrad/optimization/lama/HybridPSO_DE_GradientOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridParticleDE.py create mode 100644 nevergrad/optimization/lama/HybridParticleDE_v2.py create mode 100644 nevergrad/optimization/lama/HybridParticleDE_v3.py create mode 100644 nevergrad/optimization/lama/HybridParticleSwarmDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/HybridQuantumAdaptiveMemeticSearch.py create mode 100644 nevergrad/optimization/lama/HybridQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart.py create mode 100644 nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch.py create mode 100644 nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory.py create mode 100644 nevergrad/optimization/lama/HybridQuantumDifferentialParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/HybridQuantumEnhancedMultiPhaseAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/HybridQuantumEvolution.py create mode 100644 nevergrad/optimization/lama/HybridQuantumGradientEvolution.py create mode 100644 nevergrad/optimization/lama/HybridQuantumLevyAdaptiveSwarmV2.py create mode 100644 nevergrad/optimization/lama/HybridQuantumMemeticOptimization.py create mode 100644 nevergrad/optimization/lama/HybridQuasiRandomDEGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/HybridQuasiRandomGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost.py create mode 100644 nevergrad/optimization/lama/HybridSelfAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveConvergenceStrategy.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveGradientRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveHybridDEPSOwithDynamicRestart.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveMemoryGuidedStrategyV74.py create mode 100644 nevergrad/optimization/lama/HyperAdaptivePrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveSinusoidalDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/HyperAdaptiveStrategyDE.py create mode 100644 nevergrad/optimization/lama/HyperAdvancedDynamicPrecisionOptimizerV41.py create mode 100644 nevergrad/optimization/lama/HyperEvolvedDynamicPrecisionOptimizerV48.py create mode 100644 nevergrad/optimization/lama/HyperEvolvedDynamicRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperEvolvedRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperFocusedAdaptiveElitistStrategyV5.py create mode 100644 nevergrad/optimization/lama/HyperOptimalRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperOptimalStrategicEvolutionaryOptimizerV58.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV12.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV42.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV43.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV57.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedEvolutionaryGradientOptimizerV61.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedGradientEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV47.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV48.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedSpiralDifferentialOptimizerV8.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedThermalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperOptimizedUltraRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperPreciseEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperPrecisionEvolutionaryOptimizerV23.py create mode 100644 nevergrad/optimization/lama/HyperQuantumConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperQuantumStateCrossoverOptimization.py create mode 100644 nevergrad/optimization/lama/HyperRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperRefinedAdaptiveDynamicPrecisionOptimizerV52.py create mode 100644 nevergrad/optimization/lama/HyperRefinedAdaptiveGuidedMutationOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionSearch.py create mode 100644 nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV3.py create mode 100644 nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV49.py create mode 100644 nevergrad/optimization/lama/HyperRefinedEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/HyperRefinedQuantumVelocityOptimizer.py create mode 100644 nevergrad/optimization/lama/HyperSpiralDifferentialClimber.py create mode 100644 nevergrad/optimization/lama/HyperSpiralDifferentialClimberV2.py create mode 100644 nevergrad/optimization/lama/IADEA.py create mode 100644 nevergrad/optimization/lama/IAGEA.py create mode 100644 nevergrad/optimization/lama/IALNF.py create mode 100644 nevergrad/optimization/lama/IASDD.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveCovarianceGradientSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveEliteGuidedRestartDE.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveEnhancedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveEvolutionaryHyperHeuristic.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveExplorationExploitationAlgorithm.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHarmonyMemeticAlgorithmV17.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHarmonySearchWithCuckooInspiration.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHybridMetaOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveHybridSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveMemeticHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptivePopulationMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveQuantumEntropyDE.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/ImprovedAdaptiveQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedAdvancedHybridAdaptiveOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedBalancedQuantumLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedCooperativeAdaptiveEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedCulturalDifferentialMemeticEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedCulturalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedDiversifiedHarmonySearchOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveMemoryStrategyV58.py create mode 100644 nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicHarmonyFireworksSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicHybridDEPSOWithEliteMemoryV3.py create mode 100644 nevergrad/optimization/lama/ImprovedDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteAdaptiveCrowdingHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteGuidedHybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE_v2.py create mode 100644 nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDynamicHarmonySearchV4.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdaptiveLevyHarmonySearchV4.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdaptiveMetaNetAQAPSOv4.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v54.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v61.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v65.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDiversifiedGravitationalSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDynamicHarmonyAlgorithm.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDynamicLevyHarmonySearch.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedEliteGuidedMassQGSA_v84.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedEvolutionaryFireworksSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchOB.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedMemeticHarmonyOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedRefinedAdaptiveQGSA_v61.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedSADE.py create mode 100644 nevergrad/optimization/lama/ImprovedEnhancedStochasticMetaHeuristicOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedEnsembleMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridAdaptiveGeneticSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridAdaptiveHarmonicFireworksTabuSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridCMAESDE.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridGeneticPSO.py create mode 100644 nevergrad/optimization/lama/ImprovedHybridPSODEOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedIterativeAdaptiveGradientEvolver.py create mode 100644 nevergrad/optimization/lama/ImprovedMetaDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedMultiStrategySelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/ImprovedOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedPrecisionAdaptiveEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning.py create mode 100644 nevergrad/optimization/lama/ImprovedQuantumEnhancedDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/ImprovedQuantumLevyAdaptiveHybridSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedQuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedAdaptiveDynamicExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedAdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/ImprovedSelfAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedSelfAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ImprovedUnifiedAdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/IncrementalCrossoverOptimization.py create mode 100644 nevergrad/optimization/lama/IntelligentDynamicDualPhaseStrategyV39.py create mode 100644 nevergrad/optimization/lama/IntelligentEvolvingAdaptiveStrategyV34.py create mode 100644 nevergrad/optimization/lama/IntelligentPerturbationSearch.py create mode 100644 nevergrad/optimization/lama/IterativeAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/IterativeProgressiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/LADESA.py create mode 100644 nevergrad/optimization/lama/LAOS.py create mode 100644 nevergrad/optimization/lama/LearningAdaptiveMemoryEnhancedStrategyV42.py create mode 100644 nevergrad/optimization/lama/LearningAdaptiveStrategyV24.py create mode 100644 nevergrad/optimization/lama/LevyEnhancedAdaptiveSimulatedAnnealingDE.py create mode 100644 nevergrad/optimization/lama/MADE.py create mode 100644 nevergrad/optimization/lama/MIDEAT.py create mode 100644 nevergrad/optimization/lama/MSADE.py create mode 100644 nevergrad/optimization/lama/MSEAS.py create mode 100644 nevergrad/optimization/lama/MemeticAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/MemeticDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/MemeticElitistDifferentialEvolutionWithDynamicFandCR.py create mode 100644 nevergrad/optimization/lama/MemeticEnhancedParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/MemeticSpatialDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/MemoryBasedSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/MemoryEnhancedAdaptiveAnnealing.py create mode 100644 nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealing.py create mode 100644 nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient.py create mode 100644 nevergrad/optimization/lama/MemoryEnhancedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/MemoryGuidedAdaptiveDualPhaseStrategyV40.py create mode 100644 nevergrad/optimization/lama/MemoryHybridAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/MetaDynamicPrecisionOptimizerV1.py create mode 100644 nevergrad/optimization/lama/MetaDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/MetaHarmonicSearch.py create mode 100644 nevergrad/optimization/lama/MetaHarmonicSearch2.py create mode 100644 nevergrad/optimization/lama/MetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/MomentumGradientExploration.py create mode 100644 nevergrad/optimization/lama/MultiFacetAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/MultiFocalAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/MultiLayeredAdaptiveCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/MultiModalMemoryEnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66.py create mode 100644 nevergrad/optimization/lama/MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67.py create mode 100644 nevergrad/optimization/lama/MultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/MultiPhaseAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/MultiPhaseAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/MultiPhaseAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/MultiPhaseAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/MultiPhaseDiversityAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/MultiPopulationAdaptiveMemorySearch.py create mode 100644 nevergrad/optimization/lama/MultiScaleAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/MultiScaleGradientExploration.py create mode 100644 nevergrad/optimization/lama/MultiScaleGradientSearch.py create mode 100644 nevergrad/optimization/lama/MultiScaleQuadraticSearch.py create mode 100644 nevergrad/optimization/lama/MultiStageAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/MultiStageHybridGradientBoostedAnnealing.py create mode 100644 nevergrad/optimization/lama/MultiStrategyAdaptiveGradientEvolution.py create mode 100644 nevergrad/optimization/lama/MultiStrategyAdaptiveSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/MultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/MultiStrategyMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/MultiStrategyQuantumCognitionOptimizerV9.py create mode 100644 nevergrad/optimization/lama/MultiStrategyQuantumLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/MultiStrategySelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/MultiSwarmAdaptiveDE_PSO.py create mode 100644 nevergrad/optimization/lama/NovelAdaptiveHarmonicFireworksTabuSearch.py create mode 100644 nevergrad/optimization/lama/NovelDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/NovelHarmonyTabuSearch.py create mode 100644 nevergrad/optimization/lama/ODEMF.py create mode 100644 nevergrad/optimization/lama/ORAMED.py create mode 100644 nevergrad/optimization/lama/OctopusSwarmAlgorithm.py create mode 100644 nevergrad/optimization/lama/OptimalAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/OptimalAdaptiveDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/OptimalAdaptiveMutationEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/OptimalAdaptiveSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/OptimalBalanceSearch.py create mode 100644 nevergrad/optimization/lama/OptimalCohortDiversityOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalConvergenceDE.py create mode 100644 nevergrad/optimization/lama/OptimalDynamicAdaptiveEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalDynamicMutationSearch.py create mode 100644 nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV14.py create mode 100644 nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV21.py create mode 100644 nevergrad/optimization/lama/OptimalEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/OptimalEnhancedStrategyDE.py create mode 100644 nevergrad/optimization/lama/OptimalEvolutionaryGradientHybridOptimizerV8.py create mode 100644 nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV11.py create mode 100644 nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV25.py create mode 100644 nevergrad/optimization/lama/OptimalHybridDifferentialAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalHyperStrategicOptimizerV51.py create mode 100644 nevergrad/optimization/lama/OptimalPrecisionDynamicAdaptationOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalPrecisionEvolutionaryOptimizerV37.py create mode 100644 nevergrad/optimization/lama/OptimalPrecisionEvolutionaryThermalOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalPrecisionHybridSearchV3.py create mode 100644 nevergrad/optimization/lama/OptimalQuantumSynergyStrategy.py create mode 100644 nevergrad/optimization/lama/OptimalRefinedEnhancedUltraRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/OptimalSelectiveEvolutionaryOptimizerV20.py create mode 100644 nevergrad/optimization/lama/OptimalSmartRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/OptimalSpiralCentroidSearch.py create mode 100644 nevergrad/optimization/lama/OptimalStrategicAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimalStrategicHybridDE.py create mode 100644 nevergrad/optimization/lama/OptimallyBalancedQuantumStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveDifferentialClimber.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategyV4.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveDynamicStrategyV34.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveGlobalLocalSearch.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveQuantumGradientHybridStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedAdaptiveSimulatedAnnealingWithSmartMemory.py create mode 100644 nevergrad/optimization/lama/OptimizedBalancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/OptimizedConvergenceIslandStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedConvergentAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/OptimizedCrossoverElitistStrategyV8.py create mode 100644 nevergrad/optimization/lama/OptimizedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/OptimizedDualPhaseAdaptiveHybridOptimizationV4.py create mode 100644 nevergrad/optimization/lama/OptimizedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicDualPhaseStrategyV13.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicGradientBoostedSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/OptimizedDynamicRestartAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/OptimizedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/OptimizedEnhancedAdaptiveMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/OptimizedEnhancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/OptimizedEnhancedDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/OptimizedEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedExplorationConvergenceStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedGlobalStructureAwareEvolver.py create mode 100644 nevergrad/optimization/lama/OptimizedGradientBalancedPSO.py create mode 100644 nevergrad/optimization/lama/OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/OptimizedGradientMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/OptimizedHybridAdaptiveDualPhaseStrategyV7.py create mode 100644 nevergrad/optimization/lama/OptimizedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/OptimizedHybridExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/OptimizedHybridSearch.py create mode 100644 nevergrad/optimization/lama/OptimizedHybridStrategyDE.py create mode 100644 nevergrad/optimization/lama/OptimizedHyperStrategicOptimizerV53.py create mode 100644 nevergrad/optimization/lama/OptimizedIslandEvolutionStrategyV4.py create mode 100644 nevergrad/optimization/lama/OptimizedMemoryEnhancedAdaptiveStrategyV70.py create mode 100644 nevergrad/optimization/lama/OptimizedMemoryGuidedAdaptiveStrategyV81.py create mode 100644 nevergrad/optimization/lama/OptimizedMemoryResponsiveAdaptiveStrategyV78.py create mode 100644 nevergrad/optimization/lama/OptimizedParallelStrategyDE.py create mode 100644 nevergrad/optimization/lama/OptimizedPrecisionAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/OptimizedPrecisionTunedCrossoverElitistStrategyV13.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumFluxDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumGradientExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/OptimizedQuantumLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/OptimizedRAMEDS.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedAdaptiveHybridSearch.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedAdaptiveMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedAdaptiveRefinementPSO.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedEnhancedRAMEDSv5.py create mode 100644 nevergrad/optimization/lama/OptimizedRefinedMemoryDualPhaseStrategyV65.py create mode 100644 nevergrad/optimization/lama/OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45.py create mode 100644 nevergrad/optimization/lama/OscillatoryCrossoverDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/PADE.py create mode 100644 nevergrad/optimization/lama/PAMDMDESM.py create mode 100644 nevergrad/optimization/lama/PDEAF.py create mode 100644 nevergrad/optimization/lama/PGDE.py create mode 100644 nevergrad/optimization/lama/PMFSA.py create mode 100644 nevergrad/optimization/lama/PPDE.py create mode 100644 nevergrad/optimization/lama/PWDE.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimization.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimizationV2.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveDecayOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveDynamicStrategyV33.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveGlobalClimbingEnhancer.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptiveGradientClusteringPSO.py create mode 100644 nevergrad/optimization/lama/PrecisionAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/PrecisionBalancedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/PrecisionBalancedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/PrecisionBalancedOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionBoostedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/PrecisionCosineAdaptiveDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/PrecisionDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/PrecisionDynamicAdaptiveOptimizerV6.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedDualStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedDynamicOptimizerV13.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedSpatialAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedSpiralDifferentialClimberV4.py create mode 100644 nevergrad/optimization/lama/PrecisionEnhancedStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionEvolutionaryThermalOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionFocusedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/PrecisionGuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/PrecisionGuidedEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/PrecisionGuidedQuantumStrategy.py create mode 100644 nevergrad/optimization/lama/PrecisionIncrementalEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/PrecisionOptimizedEvolutionaryOptimizerV22.py create mode 100644 nevergrad/optimization/lama/PrecisionRotationalClimbOptimizer.py create mode 100644 nevergrad/optimization/lama/PrecisionScaledEvolutionarySearch.py create mode 100644 nevergrad/optimization/lama/PrecisionSpiralDifferentialOptimizerV6.py create mode 100644 nevergrad/optimization/lama/PrecisionTunedCrossoverElitistStrategyV11.py create mode 100644 nevergrad/optimization/lama/PrecisionTunedEvolver.py create mode 100644 nevergrad/optimization/lama/PrecisionTunedHybridSearch.py create mode 100644 nevergrad/optimization/lama/PrecisionTunedPSO.py create mode 100644 nevergrad/optimization/lama/PrecisionTunedQuantumHarmonicFeedbackOptimizer.py create mode 100644 nevergrad/optimization/lama/ProgressiveAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ProgressiveAdaptiveGlobalLocalSearch.py create mode 100644 nevergrad/optimization/lama/ProgressiveCohortDiversityOptimization.py create mode 100644 nevergrad/optimization/lama/ProgressiveDimensionalOptimizer.py create mode 100644 nevergrad/optimization/lama/ProgressiveEvolutionaryFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/ProgressiveHybridAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/ProgressiveParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/ProgressivePopulationRefinementStrategy.py create mode 100644 nevergrad/optimization/lama/ProgressiveQuorumEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/ProgressiveRefinementSearch.py create mode 100644 nevergrad/optimization/lama/QAPSO.py create mode 100644 nevergrad/optimization/lama/QAPSOAIR.py create mode 100644 nevergrad/optimization/lama/QAPSOAIRVC.py create mode 100644 nevergrad/optimization/lama/QAPSOAIRVCHR.py create mode 100644 nevergrad/optimization/lama/QAPSOAIW.py create mode 100644 nevergrad/optimization/lama/QAPSOAIWRR.py create mode 100644 nevergrad/optimization/lama/QPSO.py create mode 100644 nevergrad/optimization/lama/QuantumAcceleratedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumAcceleratedNesterovOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAcceleratedNesterovPlusOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV5.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveCrossoverRefinement.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV10.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV11.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV12.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV11.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV12.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV13.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV14.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV15.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDiversifiedHybridSearchV10.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExploration.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV2.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV3.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV4.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV5.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV6.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV7.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveDynamicStrategyV7.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveEliteGuidedSearch.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveFireworksOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveGradientDiversityExplorer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveGradientSearch.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveHarmonicOptimizerV8.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveHybridDEPSO_V7.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveHybridStrategyV4.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveLevyDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveLevyDynamicDifferentialSwarmV4.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveLevyMemeticSearch.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveLevyOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveLevySwarmOptimizationV2.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMemeticSearchV2.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMultiPhaseDE_v6.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMultiPopulationDE.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveMultiStrategyEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveNesterovGradientEnhancer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveNesterovSynergy.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveRefinementOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategyV2.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveStrategicEnhancer.py create mode 100644 nevergrad/optimization/lama/QuantumAdaptiveVelocityOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumAnnealingDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumAssistedHybridOptimizerV1.py create mode 100644 nevergrad/optimization/lama/QuantumBalancedAdaptiveNesterovStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumBalancedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancedOptimizerV16.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancerV8.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionAdaptiveTuningOptimizerV14.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionDynamicAdaptationOptimizerV30.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionEnhancedOptimizerV7.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionFocusedHybridOptimizerV21.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionFocusedOptimizerV17.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV19.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV20.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV23.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV24.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV25.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV26.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV27.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumCognitionTrajectoryOptimizerV28.py create mode 100644 nevergrad/optimization/lama/QuantumCognitiveAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumControlledDiversityStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumCooperativeCrossoverStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicElitismAndRestarts.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEliteGuidance.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitism.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialEvolutionWithMultiStrategyLearning.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithElitism.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py create mode 100644 nevergrad/optimization/lama/QuantumDifferentialParticleSwarmRefinement.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalAcceleratorV19.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancer.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV10.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV11.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV12.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV13.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV14.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV15.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV16.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV17.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV18.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV2.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV3.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV4.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV5.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV6.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV7.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV8.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalEnhancerV9.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalFusionOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalFusionOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV20.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV21.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV22.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV23.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV24.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV25.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV26.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV27.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV28.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV29.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV30.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV31.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV32.py create mode 100644 nevergrad/optimization/lama/QuantumDirectionalRefinerV33.py create mode 100644 nevergrad/optimization/lama/QuantumDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicAdaptationStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicBalanceOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicBalancedOptimizerV7.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicExplorationOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicGradientClimberV2.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicGradientClimberV3.py create mode 100644 nevergrad/optimization/lama/QuantumDynamicallyAdaptiveFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/QuantumEliteMemeticAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v4.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v5.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveDiversityStrategyV6.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveDualStrategyDE.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE_v7.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedAdaptiveSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDiversityExplorerV8.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v2.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v3.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicHybridSearchV9.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE_v2.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedGlobalTacticalOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedGradientClimber.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMemeticAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMemeticSearch.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v8.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v9.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v2.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v3.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v4.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v5.py create mode 100644 nevergrad/optimization/lama/QuantumEnhancedRefinedAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumEntropyEnhancedDE.py create mode 100644 nevergrad/optimization/lama/QuantumEvolutionaryAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategyV2.py create mode 100644 nevergrad/optimization/lama/QuantumEvolutionaryOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV10.py create mode 100644 nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV11.py create mode 100644 nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV12.py create mode 100644 nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV9.py create mode 100644 nevergrad/optimization/lama/QuantumFeedbackEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/QuantumFluxDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/QuantumGeneticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV2.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV3.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV4.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV5.py create mode 100644 nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationRefinedOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumGradientBalancedOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumGradientBoostedMemeticSearch.py create mode 100644 nevergrad/optimization/lama/QuantumGradientEnhancedExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumGradientFusionOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumGradientGuidedFireworksAlgorithm.py create mode 100644 nevergrad/optimization/lama/QuantumGradientHybridOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumGradientHybridOptimizationV2.py create mode 100644 nevergrad/optimization/lama/QuantumGradientHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/QuantumGradientHybridOptimizationV4.py create mode 100644 nevergrad/optimization/lama/QuantumGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumGradientMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumGradientMemeticSearch.py create mode 100644 nevergrad/optimization/lama/QuantumGradientMemeticSearchV2.py create mode 100644 nevergrad/optimization/lama/QuantumGradientMemeticSearchV3.py create mode 100644 nevergrad/optimization/lama/QuantumGuidedAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumGuidedCrossoverAdaptation.py create mode 100644 nevergrad/optimization/lama/QuantumGuidedHybridDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/QuantumGuidedLevyAdaptiveSwarm.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicAdaptationStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicAdaptiveFeedbackOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicAdaptiveRefinementOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicDynamicAdaptation.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicDynamicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFeedbackOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV4.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV5.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV7.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicImpulseOptimizerV9.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicPrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonicResilientEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonizedPSO.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmRefined.py create mode 100644 nevergrad/optimization/lama/QuantumHarmonySearch.py create mode 100644 nevergrad/optimization/lama/QuantumHybridAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV2.py create mode 100644 nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV8.py create mode 100644 nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV9.py create mode 100644 nevergrad/optimization/lama/QuantumHybridDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v3.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v3.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v4.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v5.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v6.py create mode 100644 nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v7.py create mode 100644 nevergrad/optimization/lama/QuantumHybridImprovedDE.py create mode 100644 nevergrad/optimization/lama/QuantumHybridParticleDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/QuantumInfluenceCrossoverOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInfluencedAdaptiveDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearchV4.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveInertiaOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV4.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV5.py create mode 100644 nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV6.py create mode 100644 nevergrad/optimization/lama/QuantumInformedCooperativeSearchV1.py create mode 100644 nevergrad/optimization/lama/QuantumInformedCrossoverEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumInformedDifferentialStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumInformedDynamicSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumInformedGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedHyperStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedPSO.py create mode 100644 nevergrad/optimization/lama/QuantumInformedParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInformedStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInfusedAdaptiveStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveDEElitistLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveDEHybridLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredAdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredMetaheuristic.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumInspiredSpiralOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumIterativeDeepeningHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumIterativeRefinementOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLeapOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLeapOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDEHybridLocalSearch.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV4.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV5.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumLevyAdaptiveMemeticOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDifferentialHybridSearch.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmV5.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDynamicParticleSwarm.py create mode 100644 nevergrad/optimization/lama/QuantumLevyDynamicSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEliteMemeticDEHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEliteMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEnhancedDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumLevyEnhancedMemeticOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumLevyImprovedDifferentialSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumLevyParticleAdaptiveOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumLevySwarmOptimizationV3.py create mode 100644 nevergrad/optimization/lama/QuantumLocustSearch.py create mode 100644 nevergrad/optimization/lama/QuantumLocustSearchV2.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalAdaptiveCrossoverOptimizerV20.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV12.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV13.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV14.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV15.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV16.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV17.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV18.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV24.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV25.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV26.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV27.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV28.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV29.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV30.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV31.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV32.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV33.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV34.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalDynamicOptimizerV11.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalEnhancedCrossoverOptimizerV22.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalEnhancedDynamicEnhancerV19.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalHarmonicOptimizerV10.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalPrecisionOptimizerV34.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV21.py create mode 100644 nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV23.py create mode 100644 nevergrad/optimization/lama/QuantumParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/QuantumParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumReactiveCooperativeStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumRefinedAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumRefinedAdaptiveHybridStrategyV5.py create mode 100644 nevergrad/optimization/lama/QuantumRefinedAdaptiveStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumRefinedDynamicAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/QuantumReinforcedNesterovAccelerator.py create mode 100644 nevergrad/optimization/lama/QuantumResonanceEvolutionaryStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumSearch.py create mode 100644 nevergrad/optimization/lama/QuantumSimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/QuantumSimulatedAnnealingHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumSimulatedAnnealingImproved.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralAdaptiveHybridStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralDynamicOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralEnhancedOptimizerV5.py create mode 100644 nevergrad/optimization/lama/QuantumSpectralRefinedOptimizerV4.py create mode 100644 nevergrad/optimization/lama/QuantumStabilizedDynamicBalanceOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumStateConvergenceOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumStateCrossoverOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumStateHybridStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumStateRefinedHybridStrategy.py create mode 100644 nevergrad/optimization/lama/QuantumStochasticGradientDescentFireworks.py create mode 100644 nevergrad/optimization/lama/QuantumStochasticGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/QuantumSwarmOptimizationImproved.py create mode 100644 nevergrad/optimization/lama/QuantumSymbioticEnhancedStrategyV3.py create mode 100644 nevergrad/optimization/lama/QuantumTunedGradientSearchV2.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizer.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV10.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV11.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV12.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV13.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV14.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV15.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV16.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV17.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV18.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV2.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV3.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV4.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV5.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV6.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV7.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV8.py create mode 100644 nevergrad/optimization/lama/QuantumTunnelingOptimizerV9.py create mode 100644 nevergrad/optimization/lama/RADE.py create mode 100644 nevergrad/optimization/lama/RADEA.py create mode 100644 nevergrad/optimization/lama/RADECM.py create mode 100644 nevergrad/optimization/lama/RADEDM.py create mode 100644 nevergrad/optimization/lama/RADEEM.py create mode 100644 nevergrad/optimization/lama/RADEPM.py create mode 100644 nevergrad/optimization/lama/RADSDiffEvo.py create mode 100644 nevergrad/optimization/lama/RAGCES.py create mode 100644 nevergrad/optimization/lama/RAGEA.py create mode 100644 nevergrad/optimization/lama/RAHDEMI.py create mode 100644 nevergrad/optimization/lama/RALES.py create mode 100644 nevergrad/optimization/lama/RAMDE.py create mode 100644 nevergrad/optimization/lama/RAMEDS.py create mode 100644 nevergrad/optimization/lama/RAMEDSPlus.py create mode 100644 nevergrad/optimization/lama/RAMEDSPro.py create mode 100644 nevergrad/optimization/lama/RAMSDiffEvo.py create mode 100644 nevergrad/optimization/lama/RAPDE.py create mode 100644 nevergrad/optimization/lama/RASES.py create mode 100644 nevergrad/optimization/lama/RAVDE.py create mode 100644 nevergrad/optimization/lama/RDACE.py create mode 100644 nevergrad/optimization/lama/RDSAS.py create mode 100644 nevergrad/optimization/lama/READEPMC.py create mode 100644 nevergrad/optimization/lama/REAMSEA.py create mode 100644 nevergrad/optimization/lama/RE_ADMMMS.py create mode 100644 nevergrad/optimization/lama/RPWDE.py create mode 100644 nevergrad/optimization/lama/RankingDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveClusteredDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixAdaptation.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveCrossoverElitistStrategyV7.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithGradientBoost.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDifferentialSpiralSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDimensionalClimbingStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDimensionalCrossoverEvolver.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDirectionalBiasQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDivergenceClusteringSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDiversityPSO.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategyV3.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV14.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV17.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV20.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveDynamicStrategyV25.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE_v5.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveElitistDE_v4.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEnhancedGradientGuidedHybridPSO.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingOptimizerV5.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientCrossover.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientEvolverV2.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientGuidedEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveGuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridEvolutionStrategyV6.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridQuasiRandomGradientDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveHybridSwarmEvolutionOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveIncrementalCrossover.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveIslandEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMemeticDiverseOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedStrategyV55.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMemoryStrategyV67.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE_v2.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveParameterStrategyV38.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionBalanceStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV4.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV6.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionDivideSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionFocalHybrid.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionHybridSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptivePrecisionStrategicOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumCrossoverStrategyV3.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolutionPlus.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumEliteDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumEntropyDE.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientBoostedMemeticSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumPSO.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuantumSwarmOptimizerV3.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomDEGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveRefinementPSO.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSimulatedAnnealingWithSmartMemory.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSpatialExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSpatialOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSpectralEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSpiralGradientSearch.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveStochasticGradientQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveStochasticHybridEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdaptiveSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/RefinedArchiveEnhancedAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedAttenuatedAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/RefinedBalancedAdaptiveElitistStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedBalancedExplorationOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedCMADiffEvoPSO.py create mode 100644 nevergrad/optimization/lama/RefinedConcentricDiversityStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedConcentricQuantumCrossoverStrategyV5.py create mode 100644 nevergrad/optimization/lama/RefinedConvergenceAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedConvergenceDE.py create mode 100644 nevergrad/optimization/lama/RefinedConvergentAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedCooperativeDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedCosineAdaptiveDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/RefinedDifferentialEvolutionWithAdaptiveLearningRate.py create mode 100644 nevergrad/optimization/lama/RefinedDifferentialParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedDimensionalCyclicCrossoverEvolver.py create mode 100644 nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV2.py create mode 100644 nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV4.py create mode 100644 nevergrad/optimization/lama/RefinedDualConvergenceEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedDualPhaseADPSO_DE_V3_Enhanced.py create mode 100644 nevergrad/optimization/lama/RefinedDualPhaseOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDE.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicAdaptiveStrategyV23.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV4.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicClusteringPSO.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicCrowdingHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicEliteAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicEnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicHybridDEPSOWithEliteMemoryV2.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedDynamicQuantumEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV4.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV5.py create mode 100644 nevergrad/optimization/lama/RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch.py create mode 100644 nevergrad/optimization/lama/RefinedEliteDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedEliteDynamicMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedEliteGuidedAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedEliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/RefinedEliteGuidedMutationDE_v3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveDualPhaseStrategyV9.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonySearch.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiOperatorSearch.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiStrategyDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v45.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v46.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v48.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedBalancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDifferentialEvolutionLocalSearch_v42.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimizationV3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v2.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualStrategyDynamicDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDualStrategyElitistDE_v2.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDynamicAdaptiveHybridOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedDynamicDualStrategyHybridDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedEliteGuidedAdaptiveRestartDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedEliteGuidedMassQGSA_v87.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHybridExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHyperAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedHyperStrategicOptimizerV57.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedMetaNetAQAPSOv7.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedOptimizedEvolutiveStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedPrecisionEvolutionaryOptimizerV40.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedQAPSOAIRVCHRLS.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedRAMEDSProV3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedRAMEDSv3.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedRAMEDSv4.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedStrategyDE.py create mode 100644 nevergrad/optimization/lama/RefinedEnhancedUltraRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/RefinedEnsembleAdaptiveQuantumDE.py create mode 100644 nevergrad/optimization/lama/RefinedEvolutionaryGradientHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/RefinedEvolutionaryTuningStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedGlobalClimbingOptimizerV2.py create mode 100644 nevergrad/optimization/lama/RefinedGlobalLocalBalancingOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedGlobalStructureAdaptiveEvolverV2.py create mode 100644 nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV2.py create mode 100644 nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV3.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBalancedExplorationPSO.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBoostedMemoryAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealingPlus.py create mode 100644 nevergrad/optimization/lama/RefinedGradientBoostedOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedGradientGuidedEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedHybridAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedHybridAdaptiveGradientPSO.py create mode 100644 nevergrad/optimization/lama/RefinedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedHybridCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/RefinedHybridDEPSOWithAdaptiveMemoryV4.py create mode 100644 nevergrad/optimization/lama/RefinedHybridDEPSOWithDynamicAdaptationV3.py create mode 100644 nevergrad/optimization/lama/RefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedHybridDynamicClusterOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE.py create mode 100644 nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v2.py create mode 100644 nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v3.py create mode 100644 nevergrad/optimization/lama/RefinedHybridEvolutionStrategyV4.py create mode 100644 nevergrad/optimization/lama/RefinedHybridEvolutionaryAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedHybridPSODEOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedHybridPSODESimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedHybridPSO_DE.py create mode 100644 nevergrad/optimization/lama/RefinedHybridPrecisionSearch.py create mode 100644 nevergrad/optimization/lama/RefinedHybridQuantumAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedHybridQuantumLevyAdaptiveSwarm.py create mode 100644 nevergrad/optimization/lama/RefinedHybridQuasiRandomDEGradientAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2.py create mode 100644 nevergrad/optimization/lama/RefinedHyperEvolvedDynamicRAMEDS.py create mode 100644 nevergrad/optimization/lama/RefinedHyperOptimizedDynamicPrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedHyperOptimizedThermalEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedHyperRefinedDynamicPrecisionOptimizerV50.py create mode 100644 nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV52.py create mode 100644 nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV55.py create mode 100644 nevergrad/optimization/lama/RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2.py create mode 100644 nevergrad/optimization/lama/RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4.py create mode 100644 nevergrad/optimization/lama/RefinedInertiaFocalOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedIntelligentEvolvingAdaptiveStrategyV35.py create mode 100644 nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV10Plus.py create mode 100644 nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV2.py create mode 100644 nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV6.py create mode 100644 nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV9.py create mode 100644 nevergrad/optimization/lama/RefinedMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedMemeticDiverseOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedMemeticDiverseOptimizerV4.py create mode 100644 nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryAdaptiveDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryEnhancedDynamicHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryEnhancedHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72.py create mode 100644 nevergrad/optimization/lama/RefinedMemoryGuidedHybridStrategyV63.py create mode 100644 nevergrad/optimization/lama/RefinedMetaNetAQAPSO.py create mode 100644 nevergrad/optimization/lama/RefinedMultiFocalAdaptiveElitistStrategyV4.py create mode 100644 nevergrad/optimization/lama/RefinedMultiOperatorAdaptiveOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedMultiPhaseAdaptiveHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/RefinedMultiStageAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/RefinedMultiStrategyDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedMultiStrategySelfAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedMultiStrategySwarmDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedNicheDifferentialParticleSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedOptimalDynamicPrecisionOptimizerV15.py create mode 100644 nevergrad/optimization/lama/RefinedOptimalEnhancedRAMEDS.py create mode 100644 nevergrad/optimization/lama/RefinedOptimalEvolutionaryGradientOptimizerV12.py create mode 100644 nevergrad/optimization/lama/RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5.py create mode 100644 nevergrad/optimization/lama/RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing.py create mode 100644 nevergrad/optimization/lama/RefinedOptimizedEnhancedDualStrategyAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedOptimizedHybridAdaptiveMultiStageOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedPrecisionAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/RefinedPrecisionEnhancedDualStrategyOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedPrecisionEnhancedSpatialAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/RefinedPrecisionEvolutionaryThermalOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedPrecisionTunedCrossoverElitistStrategyV12.py create mode 100644 nevergrad/optimization/lama/RefinedProgressiveParticleSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedProgressiveQuorumEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedQuadraticAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridOptimizerV4.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridSearchV3.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveLevySwarmOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveMultiPopulationDE.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveOptimizerV2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumAdaptiveVelocityOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumCognitionAdaptiveTuningOptimizerV15.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumCognitionHybridOptimizerV22.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV13.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV4.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumCovarianceMatrixDifferentialEvolutionV4.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveLearning.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumDifferentialParticleOptimizerWithElitism.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEnhancedHybridDEPSO.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptation.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumFluxDifferentialSwarm.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumGradientAdaptiveExplorationOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumGradientSearch.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV6.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV8.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumHybridAdaptiveStrategyV3.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumHybridDynamicAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumHybridEliteAdaptiveDE.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInfluenceLocalSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInformedAdaptiveInertiaOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInformedAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInformedDifferentialStrategyV2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInformedGradientOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInformedPSO.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumInfusedAdaptiveStrategyV2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumLevyMemeticDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumMultiStrategyOptimization.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumNesterovSynergyV2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumResilientCrossoverEnhancer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumSwarmOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV2.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV4.py create mode 100644 nevergrad/optimization/lama/RefinedQuantumTunnelingOptimizerV19.py create mode 100644 nevergrad/optimization/lama/RefinedRAMEDSPro.py create mode 100644 nevergrad/optimization/lama/RefinedRAMEDSv2.py create mode 100644 nevergrad/optimization/lama/RefinedSpatialAdaptiveOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedSpiralSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedStochasticBalancingOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedStrategicAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedStrategicDiminishingEvolver.py create mode 100644 nevergrad/optimization/lama/RefinedStrategicQuorumWithDirectionalBias.py create mode 100644 nevergrad/optimization/lama/RefinedSuperiorAdaptiveStrategyDE.py create mode 100644 nevergrad/optimization/lama/RefinedTemporalAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RefinedUltimateEnhancedGuidedMassQGSA_v71.py create mode 100644 nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV16.py create mode 100644 nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV17.py create mode 100644 nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV34.py create mode 100644 nevergrad/optimization/lama/RefinedUltimateEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedUltimatePrecisionEvolutionaryOptimizerV42.py create mode 100644 nevergrad/optimization/lama/RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinedUltraEvolutionaryGradientOptimizerV28.py create mode 100644 nevergrad/optimization/lama/RefinedUltraOptimizedDynamicPrecisionOptimizerV20.py create mode 100644 nevergrad/optimization/lama/RefinedUltraOptimizedEvolutionaryGradientOptimizerV31.py create mode 100644 nevergrad/optimization/lama/RefinedUltraRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/RefinementEnhancedHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/RefinementSelectiveCohortOptimization.py create mode 100644 nevergrad/optimization/lama/RefinementTunedPSO.py create mode 100644 nevergrad/optimization/lama/ResilientAdaptivePSO.py create mode 100644 nevergrad/optimization/lama/ResponsiveAdaptiveMemoryStrategyV52.py create mode 100644 nevergrad/optimization/lama/ResponsiveAdaptiveStrategyV27.py create mode 100644 nevergrad/optimization/lama/RestartAdaptiveDifferentialEvolutionPSO.py create mode 100644 nevergrad/optimization/lama/RevisedEnhancedDifferentialEvolutionLSRefinement_v20.py create mode 100644 nevergrad/optimization/lama/RevolutionaryFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/RobustAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/RobustAdaptiveMemoryLeveragedStrategyV43.py create mode 100644 nevergrad/optimization/lama/RobustCovarianceMatrixAdaptationMemeticSearch.py create mode 100644 nevergrad/optimization/lama/SADE.py create mode 100644 nevergrad/optimization/lama/SADEEM.py create mode 100644 nevergrad/optimization/lama/SADEIOL.py create mode 100644 nevergrad/optimization/lama/SADEPF.py create mode 100644 nevergrad/optimization/lama/SAGEA.py create mode 100644 nevergrad/optimization/lama/SGAE.py create mode 100644 nevergrad/optimization/lama/SGE.py create mode 100644 nevergrad/optimization/lama/SORAMED.py create mode 100644 nevergrad/optimization/lama/ScaledHybridDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/SelfAdaptingDifferentialEvolutionOptimizer.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveCovarianceMatrixDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithLocalRestart.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithMemeticSearch.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithRestart.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveDifferentialSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveInterleavedOptimization.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveMemeticAlgorithmV2.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveMemeticEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveOppositionBasedHarmonySearchDE.py create mode 100644 nevergrad/optimization/lama/SelfAdaptiveQuantumMemeticAlgorithm.py create mode 100644 nevergrad/optimization/lama/SequentialAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/SequentialQuadraticAdaptiveEvolutionStrategy.py create mode 100644 nevergrad/optimization/lama/SequentialQuadraticExploitationSearch.py create mode 100644 nevergrad/optimization/lama/SimpleHybridDE.py create mode 100644 nevergrad/optimization/lama/SimplifiedAdaptiveDynamicDualPhaseStrategyV18.py create mode 100644 nevergrad/optimization/lama/SimulatedAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/SpiralSearchOptimizer.py create mode 100644 nevergrad/optimization/lama/StabilizedQuantumCognitionOptimizerV11.py create mode 100644 nevergrad/optimization/lama/StabilizedQuantumConcentricOptimizer.py create mode 100644 nevergrad/optimization/lama/StabilizedRefinedEnhancedDynamicBalancingPSO.py create mode 100644 nevergrad/optimization/lama/StochasticAdaptiveEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/StochasticBalancingOptimizer.py create mode 100644 nevergrad/optimization/lama/StochasticGradientEnhancedDE.py create mode 100644 nevergrad/optimization/lama/StochasticGradientExploration.py create mode 100644 nevergrad/optimization/lama/StochasticGradientHybridOptimization.py create mode 100644 nevergrad/optimization/lama/StochasticGradientQuorumOptimization.py create mode 100644 nevergrad/optimization/lama/StrategicAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/StrategicDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/StrategicDiminishingAdaptiveEvolver.py create mode 100644 nevergrad/optimization/lama/StrategicHybridDE.py create mode 100644 nevergrad/optimization/lama/StrategicMultiPhaseEvolutionaryAlgorithm.py create mode 100644 nevergrad/optimization/lama/StrategicQuorumMutationWithAdaptiveElites.py create mode 100644 nevergrad/optimization/lama/StrategicResilienceAdaptiveSearch.py create mode 100644 nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimization.py create mode 100644 nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimizationImproved.py create mode 100644 nevergrad/optimization/lama/SuperOptimizedRAMEDS.py create mode 100644 nevergrad/optimization/lama/SuperRefinedRAMEDSv5.py create mode 100644 nevergrad/optimization/lama/SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5.py create mode 100644 nevergrad/optimization/lama/SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16.py create mode 100644 nevergrad/optimization/lama/SuperiorAdaptiveStrategyDE.py create mode 100644 nevergrad/optimization/lama/SuperiorEnhancedDynamicPrecisionOptimizerV1.py create mode 100644 nevergrad/optimization/lama/SuperiorHybridEvolutionaryAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/SuperiorOptimalEnhancedStrategyDE.py create mode 100644 nevergrad/optimization/lama/SuperiorRefinedEvolutionaryGradientOptimizerV13.py create mode 100644 nevergrad/optimization/lama/SupremeDynamicAdaptiveOptimizerV5.py create mode 100644 nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV1.py create mode 100644 nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV2.py create mode 100644 nevergrad/optimization/lama/SupremeEvolutionaryGradientHybridOptimizerV6.py create mode 100644 nevergrad/optimization/lama/SupremeOptimalPrecisionEvolutionaryThermalOptimizer.py create mode 100644 nevergrad/optimization/lama/SupremeUltraEnhancedEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/TemporalAdaptiveDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/TurbochargedDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithm.py create mode 100644 nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithmImproved.py create mode 100644 nevergrad/optimization/lama/UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19.py create mode 100644 nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV15.py create mode 100644 nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV26.py create mode 100644 nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV33.py create mode 100644 nevergrad/optimization/lama/UltimateEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/UltimateRefinedAQAPSO_LS_DIW_AP.py create mode 100644 nevergrad/optimization/lama/UltimateRefinedPrecisionEvolutionaryOptimizerV41.py create mode 100644 nevergrad/optimization/lama/UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18.py create mode 100644 nevergrad/optimization/lama/UltraDynamicAdaptiveRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraDynamicDualPhaseOptimizedStrategyV16.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV10.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV11.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV12.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV2.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV3.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV4.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV7.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedAdaptiveRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedDynamicDE.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedEvolutionaryGradientOptimizerV14.py create mode 100644 nevergrad/optimization/lama/UltraEnhancedPrecisionEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraEvolutionaryGradientOptimizerV27.py create mode 100644 nevergrad/optimization/lama/UltraFineSpiralDifferentialOptimizerV7.py create mode 100644 nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizerV24.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV18.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV19.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV52.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV53.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedEvolutionaryGradientOptimizerV30.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraOptimizedSpiralDifferentialEvolution.py create mode 100644 nevergrad/optimization/lama/UltraPreciseDynamicOptimizerV26.py create mode 100644 nevergrad/optimization/lama/UltraPrecisionSpiralDifferentialOptimizerV9.py create mode 100644 nevergrad/optimization/lama/UltraQuantumReactiveHybridStrategy.py create mode 100644 nevergrad/optimization/lama/UltraRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveConvergenceStrategy.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV5.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV6.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV8.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV9.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptivePrecisionOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraRefinedAdaptiveRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraRefinedConvergenceSpiralSearch.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV10.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV11.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV17.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV22.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV23.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV24.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV25.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV26.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV27.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV28.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV29.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV30.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV31.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV32.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV33.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV34.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV35.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV36.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV37.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV38.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV39.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV4.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV40.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV41.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV44.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV45.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV46.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV47.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV5.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV54.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV55.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV56.py create mode 100644 nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV9.py create mode 100644 nevergrad/optimization/lama/UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientHybridOptimizerV5.py create mode 100644 nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV10.py create mode 100644 nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV32.py create mode 100644 nevergrad/optimization/lama/UltraRefinedHybridEvolutionaryAnnealingOptimizer.py create mode 100644 nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV50.py create mode 100644 nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV54.py create mode 100644 nevergrad/optimization/lama/UltraRefinedPrecisionEvolutionaryOptimizerV43.py create mode 100644 nevergrad/optimization/lama/UltraRefinedRAMEDS.py create mode 100644 nevergrad/optimization/lama/UltraRefinedSpiralDifferentialClimberV3.py create mode 100644 nevergrad/optimization/lama/UltraRefinedStrategicEvolutionaryOptimizerV60.py create mode 100644 nevergrad/optimization/lama/UltraRefinedStrategyDE.py create mode 100644 nevergrad/optimization/lama/UltraSupremeEvolutionaryGradientHybridOptimizerV7.py create mode 100644 nevergrad/optimization/lama/UnifiedAdaptiveMemeticOptimizer.py create mode 100644 nevergrad/optimization/lama/VectorizedRefinedSpiralSearch.py create mode 100644 nevergrad/optimization/lama/eQGSA_v2.py diff --git a/nevergrad/optimization/lama/AADCCS.py b/nevergrad/optimization/lama/AADCCS.py new file mode 100644 index 000000000..e2e8d0532 --- /dev/null +++ b/nevergrad/optimization/lama/AADCCS.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AADCCS: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=150, + F_base=0.5, + CR_base=0.8, + learning_rate=0.1, + p=0.25, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base # Initial mutation factor + self.CR_base = CR_base # Initial crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive parameters + self.p = p # Probability of using best individual updates + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mutation and crossover probabilities + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Choose different indices for mutation, ensuring all are unique + indices = np.random.choice(self.population_size, 4, replace=False) + a, b, c, d = population[indices] + + # Mutation with best individual influence + if np.random.rand() < self.p: + a = best_individual # Using best individual to guide mutation + + # Differential mutation and crossover + mutant = np.clip(a + F_adaptive[i] * ((b - c) + (a - d)), self.lower_bound, self.upper_bound) + trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection and adaptivity update + if trial_fitness < fitness[i]: + population[i], fitness[i] = trial, trial_fitness + if trial_fitness < best_fitness: + best_fitness, best_individual = trial_fitness, trial.copy() + # Adaptive factor update towards successful mutations + F_adaptive[i] = max(0.1, F_adaptive[i] + self.learning_rate * (1.0 - F_adaptive[i])) + CR_adaptive[i] = min(1.0, CR_adaptive[i] - self.learning_rate * CR_adaptive[i]) + else: + # Adaptive factor degradation towards unsuccessful mutations + F_adaptive[i] = max(0.1, F_adaptive[i] - self.learning_rate * F_adaptive[i]) + CR_adaptive[i] = min(1.0, CR_adaptive[i] + self.learning_rate * (1.0 - CR_adaptive[i])) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AADEHLS.py b/nevergrad/optimization/lama/AADEHLS.py new file mode 100644 index 000000000..bbbfae22f --- /dev/null +++ b/nevergrad/optimization/lama/AADEHLS.py @@ -0,0 +1,83 @@ +import numpy as np + + +class AADEHLS: + def __init__(self, budget, population_size=50, F_init=0.5, CR_init=0.9): + self.budget = budget + self.CR_init = CR_init + self.F_init = F_init + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def opposite_point(self, x): + return self.lower_bound + self.upper_bound - x + + def __call__(self, func): + # Initialize population with Opposition-Based Learning + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + opposite_population = self.opposite_point(population) + combined_population = np.vstack((population, opposite_population)) + fitness = np.array([func(ind) for ind in combined_population]) + indices = np.argsort(fitness) + population = combined_population[indices[: self.population_size]] + fitness = fitness[indices[: self.population_size]] + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + F = self.F_init + CR = self.CR_init + successful_F = [] + successful_CR = [] + + evaluations = self.population_size * 2 + while evaluations < self.budget: + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + successful_F.append(F) + successful_CR.append(CR) + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + # Update F and CR adaptively based on successes + if successful_F: + F = np.mean(successful_F) + CR = np.mean(successful_CR) + + # Enhanced hybrid local search phase + local_best = best_solution.copy() + for _ in range(10): + perturbation = np.random.normal(0, 0.1, self.dimension) + local_trial = np.clip(local_best + perturbation, self.lower_bound, self.upper_bound) + local_fitness = func(local_trial) + evaluations += 1 + + if local_fitness < best_fitness: + best_solution = local_trial + best_fitness = local_fitness + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AADMEM.py b/nevergrad/optimization/lama/AADMEM.py new file mode 100644 index 000000000..a2dd273dc --- /dev/null +++ b/nevergrad/optimization/lama/AADMEM.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AADMEM: + def __init__( + self, budget, population_size=50, crossover_rate=0.9, F_base=0.5, F_amp=0.3, memory_size=100 + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory to store good solutions + memory = np.empty((0, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor that changes over time to increase exploration + F = self.F_base + self.F_amp * np.sin(np.pi * evaluations / self.budget) + + # Mutation vectors from population and occasionally from memory + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if memory.shape[0] > 0 and np.random.rand() < 0.1: # 10% chance to use memory + a = memory[np.random.randint(0, memory.shape[0])] + + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with the old solution + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + else: + # Replace a random entry in memory + memory[np.random.randint(0, self.memory_size)] = population[i] + + # Update population with the new better solution + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Check if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AAES.py b/nevergrad/optimization/lama/AAES.py new file mode 100644 index 000000000..4691a2c3a --- /dev/null +++ b/nevergrad/optimization/lama/AAES.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AAES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.F = 0.8 # Mutation factor + self.CR = 0.9 # Crossover rate + self.stagnation_threshold = 20 # Threshold for enhanced local search and rejuvenation + self.no_improvement_intervals = 0 + self.momentum_F = 0.95 + self.momentum_CR = 0.05 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, idx, population): + indices = np.delete(np.arange(self.population_size), idx) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.CR + return np.where(cross_points, mutant, target) + + def select(self, target, trial, f_target, f_trial): + return trial if f_trial < f_target else target + + def local_search(self, best_individual, func): + step_size = 0.1 + for _ in range(10): # Perform 10 steps of local search + neighbor = best_individual + np.random.uniform(-step_size, step_size, self.dimension) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + if func(neighbor) < func(best_individual): + best_individual = neighbor + return best_individual + + def update_parameters(self, successes, trials): + self.F = max(0.1, self.F * (self.momentum_F if successes / trials < 0.2 else 1.05)) + self.CR = 0.1 + 0.8 * (self.momentum_CR * successes + (1 - self.momentum_CR) * trials) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_score = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.copy(population) + successes = 0 + + for i in range(self.population_size): + mutant = self.mutation(i, population) + trial = self.crossover(population[i], mutant) + f_trial = func(trial) + f_target = fitness[i] + + if f_trial < f_target: + new_population[i] = trial + fitness[i] = f_trial + successes += 1 + + evaluations += 1 + if evaluations >= self.budget: + break + + if successes == 0: + self.no_improvement_intervals += 1 + if self.no_improvement_intervals >= self.stagnation_threshold: + population[np.argsort(fitness)[-10:]] = self.initialize_population()[ + :10 + ] # Rejuvenate worst 10 + fitness[np.argsort(fitness)[-10:]] = self.evaluate( + population[np.argsort(fitness)[-10:]], func + ) + self.no_improvement_intervals = 0 + + self.update_parameters(successes, self.population_size) + population = new_population + current_best = np.argmin(fitness) + if fitness[current_best] < best_score: + best_idx = current_best + best_score = fitness[best_idx] + + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/ACDE.py b/nevergrad/optimization/lama/ACDE.py new file mode 100644 index 000000000..8f3bfb90c --- /dev/null +++ b/nevergrad/optimization/lama/ACDE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ACDE: + def __init__(self, budget, population_size=100, F=0.8, CR=0.9, cluster_ratio=0.2, adaptation_rate=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F = F # Differential weight + self.CR = CR # Crossover probability + self.cluster_ratio = cluster_ratio # Ratio of population to consider for clustering + self.adaptation_rate = adaptation_rate # Rate of adaptation for F and CR + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Clustering top-performing individuals + top_cluster_size = int(self.population_size * self.cluster_ratio) + top_indices = np.argsort(fitness)[:top_cluster_size] + top_cluster = population[top_indices] + + # Update strategy parameters adaptively + if num_evals > self.population_size and num_evals % 100 == 0: + self.F += self.adaptation_rate * (1 - 2 * np.random.rand()) + self.CR += self.adaptation_rate * (1 - 2 * np.random.rand()) + self.F, self.CR = np.clip(self.F, 0.5, 1), np.clip(self.CR, 0.8, 1) + + for i in range(self.population_size): + if i in top_indices: + # Evolve using cluster members + a, b, c = np.random.choice(top_indices, 3, replace=False) + else: + # Regular DE operation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + + mutant = population[a] + self.F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial vector + trial_fitness = func(trial) + num_evals += 1 + if num_evals >= self.budget: + break + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ACMDEOBD.py b/nevergrad/optimization/lama/ACMDEOBD.py new file mode 100644 index 000000000..2819dbbbf --- /dev/null +++ b/nevergrad/optimization/lama/ACMDEOBD.py @@ -0,0 +1,77 @@ +import numpy as np + + +class ACMDEOBD: + def __init__( + self, budget, population_size=50, F_init=0.5, CR_init=0.9, local_search_factor=0.1, max_local_steps=20 + ): + self.budget = budget + self.CR_init = CR_init + self.F_init = F_init + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.local_search_factor = local_search_factor + self.max_local_steps = max_local_steps + + def opposition_based_learning(self, population): + return self.lower_bound + self.upper_bound - population + + def __call__(self, func): + # Initialize population with Opposition-Based Learning + initial_population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + opposite_population = self.opposition_based_learning(initial_population) + combined_population = np.vstack((initial_population, opposite_population)) + fitness = np.array([func(ind) for ind in combined_population]) + indices = np.argsort(fitness)[: self.population_size] + population = combined_population[indices] + fitness = fitness[indices] + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + F = self.F_init + CR = self.CR_init + evaluations = 2 * self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d, e = np.random.choice(idxs, 5, replace=False) + + # Adaptive parameters + F = self.F_init * (1 - evaluations / self.budget) + CR = self.CR_init * (evaluations / self.budget) + + # Cross-mutative strategy + mutant = ( + population[a] + F * (population[b] - population[c]) + F * (population[d] - population[e]) + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Periodic Opposition-Based Learning + if evaluations % (self.population_size * 5) == 0: + population = self.opposition_based_learning(population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADAEDA.py b/nevergrad/optimization/lama/ADAEDA.py new file mode 100644 index 000000000..a841fc193 --- /dev/null +++ b/nevergrad/optimization/lama/ADAEDA.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ADAEDA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.initial_cr = 0.9 + self.initial_f = 0.8 + self.initial_temp = 1.0 + self.final_temp = 0.01 + self.alpha = 0.95 # Cooling rate + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, cr, f): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = population[best_idx] + f * (x1 - x2 + x3 - population[best_idx]) + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, cr): + crossover_mask = np.random.rand(self.dimension) < cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + temperature = self.initial_temp + cr = self.initial_cr + f = self.initial_f + + while evaluations < self.budget: + mutated_population = self.mutate(population, best_idx, cr, f) + offspring_population = np.array( + [ + self.crossover(population[i], mutated_population[i], cr) + for i in range(self.population_size) + ] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i]: + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution, best_idx = fitness[i], population[i], i + + # Adaptive selective pressure based on temperature + if temperature > self.final_temp: + temperature *= self.alpha + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADCE.py b/nevergrad/optimization/lama/ADCE.py new file mode 100644 index 000000000..30f346ae6 --- /dev/null +++ b/nevergrad/optimization/lama/ADCE.py @@ -0,0 +1,60 @@ +import numpy as np + + +class ADCE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_base = 0.5 + self.crossover_base = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx): + mutants = np.empty_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i and idx != best_idx] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutation_factor = self.mutation_base + np.random.rand() * (1.0 - self.mutation_base) + mutant = population[a] + mutation_factor * (population[b] - population[c]) + mutants[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return mutants + + def crossover(self, target, mutant): + crossover_prob = self.crossover_base + np.random.rand() * (1.0 - self.crossover_base) + mask = np.random.rand(self.dimension) < crossover_prob + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + for i in range(self.population_size): + trial = self.crossover(population[i], mutants[i]) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + return new_population, new_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + best_idx = np.argmin(fitness) + mutants = self.mutate(population, best_idx) + population, fitness = self.select(population, fitness, mutants, func) + evaluations += self.population_size + + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ADEA.py b/nevergrad/optimization/lama/ADEA.py new file mode 100644 index 000000000..b11055c5e --- /dev/null +++ b/nevergrad/optimization/lama/ADEA.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ADEA: + def __init__(self, budget, population_size=30, crossover_rate=0.8, F=0.5, archive_size=50): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F = F + self.archive_size = archive_size + + def __call__(self, func): + # Bounds and dimensionality + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize archive + archive = np.empty((0, dimension)) + + # Best solution found + best_idx = np.argmin(fitness) + best_solution = population[best_idx, :] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation (DE/rand/1/bin) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update archive + if archive.shape[0] < self.archive_size: + archive = np.vstack([archive, population[i]]) + else: + archive[np.random.randint(self.archive_size)] = population[i] + + # Update population + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADEAS.py b/nevergrad/optimization/lama/ADEAS.py new file mode 100644 index 000000000..9389123c7 --- /dev/null +++ b/nevergrad/optimization/lama/ADEAS.py @@ -0,0 +1,97 @@ +import numpy as np + + +class ADEAS: + def __init__(self, budget): + self.budget = budget + self.initial_population_size = 20 + self.dimension = 5 + self.low = -5.0 + self.high = 5.0 + self.T_initial = 1.0 + self.T_min = 0.01 + self.decay_rate = 0.95 + + def initialize(self): + population_size = self.initial_population_size + population = np.random.uniform(self.low, self.high, (population_size, self.dimension)) + F = np.random.normal(0.5, 0.1, population_size) + CR = np.random.normal(0.9, 0.05, population_size) + return population, F, CR, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, population, F): + mutant = np.zeros_like(population) + for i in range(len(population)): + indices = np.random.choice(len(population), 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant[i] = np.clip(mutant_vector, self.low, self.high) + return mutant + + def crossover(self, population, mutant, CR): + crossover = np.where( + np.random.rand(len(population), self.dimension) < CR[:, None], mutant, population + ) + return crossover + + def select(self, population, fitness, trial_population, trial_fitness, F, CR): + improved = trial_fitness < fitness + population[improved] = trial_population[improved] + fitness[improved] = trial_fitness[improved] + + # Update F and CR adaptively + F[improved] = np.clip(F[improved] * 1.1, 0.1, 1.0) + CR[improved] = np.clip(CR[improved] * 0.95, 0.1, 1.0) + F[~improved] = np.clip(F[~improved] * 0.9, 0.1, 1.0) + CR[~improved] = np.clip(CR[~improved] * 1.05, 0.1, 1.0) + + return population, fitness, F, CR + + def adaptive_local_search(self, individual, func, T): + for _ in range(10): + neighbor = individual + np.random.normal(0, T, self.dimension) + neighbor = np.clip(neighbor, self.low, self.high) + if func(neighbor) < func(individual): + individual = neighbor + return individual + + def __call__(self, func): + population, F, CR, population_size = self.initialize() + fitness = self.evaluate(population, func) + T = self.T_initial + evaluations = population_size + + while evaluations < self.budget: + mutant = self.mutation(population, F) + trial_population = self.crossover(population, mutant, CR) + trial_fitness = self.evaluate(trial_population, func) + evaluations += len(trial_population) + + population, fitness, F, CR = self.select( + population, fitness, trial_population, trial_fitness, F, CR + ) + + # Dynamic population adjustment + if np.std(fitness) < np.mean(fitness) * 0.1 and len(population) < 40: + additional_members = np.random.uniform(self.low, self.high, (10, self.dimension)) + population = np.vstack([population, additional_members]) + additional_fitness = self.evaluate(additional_members, func) + fitness = np.concatenate([fitness, additional_fitness]) + evaluations += len(additional_members) + F = np.concatenate([F, np.random.normal(0.5, 0.1, 10)]) + CR = np.concatenate([CR, np.random.normal(0.9, 0.05, 10)]) + + # Local search with adaptive temperature + selected_indices = np.random.choice(len(population), size=5, replace=False) + for idx in selected_indices: + population[idx] = self.adaptive_local_search(population[idx], func, T) + fitness[idx] = func(population[idx]) + T = max(T * self.decay_rate, self.T_min) + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADECMS.py b/nevergrad/optimization/lama/ADECMS.py new file mode 100644 index 000000000..90deb3a28 --- /dev/null +++ b/nevergrad/optimization/lama/ADECMS.py @@ -0,0 +1,60 @@ +import numpy as np + + +class ADECMS: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Adaptive F scaling based on the linear progression from initial to end value + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Select three random distinct indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Combine two mutation strategies: DE/current-to-best/1 and DE/rand/1 + best = population[np.argmin(fitness)] + mutant_best = x1 + F_current * (best - x1 + x2 - x3) + mutant_rand = x1 + F_current * (x2 - x3) + + # Select mutation based on a random choice + mutant = mutant_best if np.random.rand() < 0.5 else mutant_rand + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + + # Evaluate the new candidate + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ADEDCA.py b/nevergrad/optimization/lama/ADEDCA.py new file mode 100644 index 000000000..3ddbd9b16 --- /dev/null +++ b/nevergrad/optimization/lama/ADEDCA.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ADEDCA: + def __init__(self, budget, population_size=150, F_base=0.8, CR_init=0.5, adapt_F=0.1, adapt_CR=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Initial base differential weight + self.CR_init = CR_init # Initial crossover probability + self.adapt_F = adapt_F # Rate of adaptation for F + self.adapt_CR = adapt_CR # Rate of adaptation for CR + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Update F and CR for each generation to adapt to landscape + Fs = np.clip(np.random.normal(self.F_base, self.adapt_F, self.population_size), 0.4, 1.2) + CRs = np.clip(np.random.normal(self.CR_init, self.adapt_CR, self.population_size), 0, 1) + + for i in range(self.population_size): + # Mutation using current F values + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + Fs[i] * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover using current CR values + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + if num_evals >= self.budget: + break + + # Dynamic adaptation of CR and F + self.CR_init = ( + np.mean(CRs[fitness < np.array([func(ind) for ind in population])]) + if len(CRs[fitness < fitness]) > 0 + else self.CR_init + ) + self.F_base = ( + np.mean(Fs[fitness < np.array([func(ind) for ind in population])]) + if len(Fs[fitness < fitness]) > 0 + else self.F_base + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEDE.py b/nevergrad/optimization/lama/ADEDE.py new file mode 100644 index 000000000..0f24b6aa5 --- /dev/null +++ b/nevergrad/optimization/lama/ADEDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ADEDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.adaptation_rate = 0.05 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, diversity): + mutants = np.empty_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.mutation_factor * (population[b] - population[c]) + + # Leverages diversity to adjust mutation + mutant = mutant + diversity * (population[best_idx] - mutant) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + mutants[i] = mutant + return mutants + + def crossover(self, target, mutant): + mask = np.random.rand(self.dimension) < self.crossover_probability + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + for i in range(self.population_size): + trial = self.crossover(population[i], mutants[i]) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + return new_population, new_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + + while evaluations < self.budget: + diversity = np.std(population, axis=0).mean() / (self.bounds[1] - self.bounds[0]) + mutants = self.mutate(population, best_idx, diversity) + population, fitness = self.select(population, fitness, mutants, func) + evaluations += self.population_size + best_idx = np.argmin(fitness) + + # Update mutation factor based on diversity + self.mutation_factor = np.clip(self.mutation_factor - self.adaptation_rate + diversity, 0.1, 1.0) + + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ADEDLR.py b/nevergrad/optimization/lama/ADEDLR.py new file mode 100644 index 000000000..1bd7439a5 --- /dev/null +++ b/nevergrad/optimization/lama/ADEDLR.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ADEDLR: + def __init__(self, budget, population_size=40, CR=0.9, F=0.8): + self.budget = budget + self.CR = CR # Crossover probability + self.F = F # Differential weight + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + crossover = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Adaptive parameter adjustment + if evaluations % 10 == 0: + improvement_rate = np.mean(fitness) - best_fitness + if improvement_rate < 1e-5: + self.F = min(self.F * 1.1, 1.0) + self.CR = min(self.CR * 0.9, 1.0) + + if evaluations >= self.budget: + break + + # Local search phase + local_best = best_solution.copy() + for j in range(10): + local_trial = local_best + np.random.normal(0, 0.1, self.dimension) + local_trial = np.clip(local_trial, self.lower_bound, self.upper_bound) + local_fitness = func(local_trial) + evaluations += 1 + + if local_fitness < best_fitness: + best_solution = local_trial + best_fitness = local_fitness + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADEDM.py b/nevergrad/optimization/lama/ADEDM.py new file mode 100644 index 000000000..4397d3f47 --- /dev/null +++ b/nevergrad/optimization/lama/ADEDM.py @@ -0,0 +1,85 @@ +import numpy as np + + +class ADEDM: + def __init__( + self, + budget, + population_size=60, + crossover_rate=0.9, + F_min=0.6, + F_max=0.9, + memory_size=60, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADEEM.py b/nevergrad/optimization/lama/ADEEM.py new file mode 100644 index 000000000..1e41431c0 --- /dev/null +++ b/nevergrad/optimization/lama/ADEEM.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ADEEM: + def __init__(self, budget, population_size=50, F=0.8, CR=0.9, alpha=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F = F # Mutation factor + self.CR = CR # Crossover probability + self.alpha = alpha # Rate of adaptive adjustment + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation strategy using "DE/rand/1/bin" + perm = np.random.permutation(self.population_size) + perm = perm[perm != i][:3] + + a, b, c = population[perm[0]], population[perm[1]], population[perm[2]] + mutant = a + self.F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + # Adaptation of F and CR using feedback from the current population + mean_fitness = np.mean(fitness) + self.F = np.clip(self.F * (1 + self.alpha * (best_fitness - mean_fitness)), 0.5, 1) + self.CR = np.clip(self.CR * (1 - self.alpha * (best_fitness - mean_fitness)), 0.6, 0.95) + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEGE.py b/nevergrad/optimization/lama/ADEGE.py new file mode 100644 index 000000000..3f90c42fc --- /dev/null +++ b/nevergrad/optimization/lama/ADEGE.py @@ -0,0 +1,78 @@ +import numpy as np + + +class ADEGE: + def __init__( + self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=40, F=0.8, CR=0.9 + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F = F + self.CR = CR + + def __call__(self, func): + # Initialize population uniformly + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Strategy adaptation coefficients + adaptation_frequency = max(1, int(0.1 * self.budget)) + success_memory = [] + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation using "best/2" strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = np.random.choice(idxs, 4, replace=False) + mutant = best_individual + self.F * ( + population[a] + population[b] - population[c] - population[d] + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + j_rand = np.random.randint(self.dimension) + trial = np.array( + [ + mutant[j] if np.random.rand() < self.CR or j == j_rand else population[i][j] + for j in range(self.dimension) + ] + ) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + success_memory.append(1) + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + success_memory.append(0) + + # Adapt strategy parameters if enough trials have been made + if len(success_memory) >= adaptation_frequency: + success_rate = np.mean(success_memory) + self.F = self.F * (0.85 if success_rate < 0.15 else 1.15) + self.CR = self.CR * (0.85 if success_rate > 0.15 else 1.15) + self.F = max(0.5, min(self.F, 0.95)) + self.CR = max(0.5, min(self.CR, 0.95)) + success_memory = [] + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEGM.py b/nevergrad/optimization/lama/ADEGM.py new file mode 100644 index 000000000..ac95f25ef --- /dev/null +++ b/nevergrad/optimization/lama/ADEGM.py @@ -0,0 +1,56 @@ +import numpy as np + + +class ADEGM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Adaptive F scaling based on the linear progression from initial to end value + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Select three random distinct indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Guided mutation: DE/current-to-best/1 + best = population[np.argmin(fitness)] + mutant = x1 + F_current * (best - x1 + x2 - x3) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + + # Evaluate the new candidate + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ADEGS.py b/nevergrad/optimization/lama/ADEGS.py new file mode 100644 index 000000000..93516fdb9 --- /dev/null +++ b/nevergrad/optimization/lama/ADEGS.py @@ -0,0 +1,61 @@ +import numpy as np + + +class ADEGS: + def __init__(self, budget, population_size=50, crossover_rate=0.8, scaling_factor=0.8): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.scaling_factor = scaling_factor + self.dimension = 5 # Given dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + # Find the best solution in the initial population + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + best_individual = population[best_index] + + num_evals = self.population_size # Initial population evaluation + + while num_evals < self.budget: + for i in range(self.population_size): + # Mutation using "DE/rand/1/bin" strategy + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + self.scaling_factor * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) # Ensure mutant is within bounds + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + # Stop if budget is exhausted + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of ADEGS: +# optimizer = ADEGS(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/ADEM.py b/nevergrad/optimization/lama/ADEM.py new file mode 100644 index 000000000..f63c26888 --- /dev/null +++ b/nevergrad/optimization/lama/ADEM.py @@ -0,0 +1,75 @@ +import numpy as np + + +class ADEM: + def __init__(self, budget, population_size=50, crossover_rate=0.9, scaling_factor=0.8, memory_factor=0.1): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.scaling_factor = scaling_factor + self.memory_factor = memory_factor + self.dimension = 5 # Given dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + # Initialize memory for best solutions + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + # Find the best solution in the initial population + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + best_individual = population[best_index] + + num_evals = self.population_size # Initial population evaluation + + while num_evals < self.budget: + for i in range(self.population_size): + # Mutation using "DE/current-to-best/1" strategy + best_idx = np.argmin(memory_fitness) + a, b = population[np.random.choice(self.population_size, 2, replace=False)] + mutant = ( + population[i] + + self.scaling_factor * (memory[best_idx] - population[i]) + + self.scaling_factor * (a - b) + ) + mutant = np.clip(mutant, self.lb, self.ub) # Ensure mutant is within bounds + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update memory + if trial_fitness < memory_fitness[i]: + memory[i] = trial_vector + memory_fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + # Stop if budget is exhausted + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of ADEM: +# optimizer = ADEM(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/ADEMSC.py b/nevergrad/optimization/lama/ADEMSC.py new file mode 100644 index 000000000..3b2f874fd --- /dev/null +++ b/nevergrad/optimization/lama/ADEMSC.py @@ -0,0 +1,88 @@ +import numpy as np + + +class ADEMSC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.8, + CR_base=0.9, + adaptive=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.adaptive = adaptive + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive parameters initialization + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + chosen_indices = np.random.choice(indices, 3, replace=False) + x0, x1, x2 = population[chosen_indices] + mutant = x0 + F[i] * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Multi-Strategy Crossover + if np.random.rand() > 0.5: + # Binomial Crossover + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + else: + # Exponential Crossover + start = np.random.randint(self.dimension) + length = np.random.randint(1, self.dimension) + cross_points = np.array([False] * self.dimension) + for j in range(length): + cross_points[(start + j) % self.dimension] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptive parameter update + if self.adaptive: + success = trial_fitness < fitness[i] + F[i] += 0.01 * (success - 0.5) + CR[i] += 0.01 * (success - 0.5) + F[i] = np.clip(F[i], 0.1, 1.0) + CR[i] = np.clip(CR[i], 0.1, 1.0) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEPF.py b/nevergrad/optimization/lama/ADEPF.py new file mode 100644 index 000000000..cddb9a132 --- /dev/null +++ b/nevergrad/optimization/lama/ADEPF.py @@ -0,0 +1,66 @@ +import numpy as np + + +class ADEPF: + def __init__(self, budget, population_size=40, base_cr=0.9, base_f=0.5): + self.budget = budget + self.population_size = population_size + self.base_cr = base_cr # Base crossover probability + self.base_f = base_f # Base scaling factor + self.dimension = 5 # Given dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + # Find the best solution in the initial population + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + num_evals = self.population_size # Initial population evaluation + + while num_evals < self.budget: + for i in range(self.population_size): + # Adaptation logic for control parameters + progress = num_evals / self.budget + cr = self.base_cr * (1 - progress) + 0.1 # Crossover probability decreases over time + f = self.base_f * (1 - progress) + 0.1 # Scaling factor decreases over time + + # Mutation using "DE/rand/1/bin" strategy + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + f * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) # Ensure mutant is within bounds + + # Crossover + cross_points = np.random.rand(self.dimension) < cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + # Stop if budget is exhausted + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of ADEPF: +# optimizer = ADEPF(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/ADEPM.py b/nevergrad/optimization/lama/ADEPM.py new file mode 100644 index 000000000..54f555511 --- /dev/null +++ b/nevergrad/optimization/lama/ADEPM.py @@ -0,0 +1,60 @@ +import numpy as np + + +class ADEPM: + def __init__(self, budget, population_size=30, F_mean=0.5, CR_mean=0.9, learning_rate=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_mean = F_mean # Mean differential weight + self.CR_mean = CR_mean # Mean crossover probability + self.learning_rate = learning_rate # Learning rate for adaptation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Mutation and crossover parameters adaptively updated + F = np.clip(np.random.normal(self.F_mean, 0.1), 0.1, 1) + CR = np.clip(np.random.normal(self.CR_mean, 0.1), 0.1, 1) + + for i in range(self.population_size): + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial vector + trial_fitness = func(trial) + num_evals += 1 + if num_evals >= self.budget: + break + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adapt F and CR means + self.F_mean += self.learning_rate * (F - self.F_mean) + self.CR_mean += self.learning_rate * (CR - self.CR_mean) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEPMC.py b/nevergrad/optimization/lama/ADEPMC.py new file mode 100644 index 000000000..c0c95d907 --- /dev/null +++ b/nevergrad/optimization/lama/ADEPMC.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ADEPMC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + F_base=0.5, + CR_base=0.5, + learning_rate=0.1, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive parameters + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + evaluations = self.population_size + + # Adaptive mutation and crossover probabilities + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + F_adaptive[i] * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection and adaptivity update + if trial_fitness < fitness[i]: + population[i], fitness[i] = trial, trial_fitness + F_adaptive[i] += self.learning_rate * (1.0 - F_adaptive[i]) # Increase mutation factor + CR_adaptive[i] -= self.learning_rate * CR_adaptive[i] # Decrease crossover probability + if trial_fitness < best_fitness: + best_fitness, best_individual = trial_fitness, trial.copy() + else: + F_adaptive[i] -= self.learning_rate * F_adaptive[i] # Decrease mutation factor + CR_adaptive[i] += self.learning_rate * ( + 1.0 - CR_adaptive[i] + ) # Increase crossover probability + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADEPMI.py b/nevergrad/optimization/lama/ADEPMI.py new file mode 100644 index 000000000..3159f8214 --- /dev/null +++ b/nevergrad/optimization/lama/ADEPMI.py @@ -0,0 +1,81 @@ +import numpy as np + + +class ADEPMI: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.5, + F_amp=0.4, + memory_size=100, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for storing historically good solutions + memory = np.empty((0, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor based on oscillating function to encourage exploration and exploitation + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # DE/rand/1 mutation strategy with memory integration + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if memory.shape[0] > 0 and np.random.rand() < 0.1: # Probability to use memory + a = memory[np.random.randint(0, memory.shape[0])] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Store replaced solution to memory + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + else: + # Replace randomly in memory with a small probability + if np.random.rand() < 0.05: + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADEPR.py b/nevergrad/optimization/lama/ADEPR.py new file mode 100644 index 000000000..3e4b1b824 --- /dev/null +++ b/nevergrad/optimization/lama/ADEPR.py @@ -0,0 +1,50 @@ +import numpy as np + + +class ADEPR: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.8): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Differential weight + self.CR_base = CR_base # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population randomly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Main optimization loop + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation: DE/rand/1 scheme + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + mutant = np.clip(x1 + self.F_base * (x2 - x3), self.bounds[0], self.bounds[1]) + + # Crossover: Binomial with adaptive CR + trial = np.copy(population[i]) + cr = self.CR_base if np.random.rand() < 0.1 else np.random.normal(self.CR_base, 0.1) + cr = np.clip(cr, 0, 1) + crossover = np.random.rand(self.dimension) < cr + trial[crossover] = mutant[crossover] + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ADES.py b/nevergrad/optimization/lama/ADES.py new file mode 100644 index 000000000..ed3ee882e --- /dev/null +++ b/nevergrad/optimization/lama/ADES.py @@ -0,0 +1,79 @@ +import numpy as np + + +class ADES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.initial_population_size = 100 + self.min_mutation_factor = 0.1 + self.max_mutation_factor = 0.9 + self.min_crossover_rate = 0.1 + self.max_crossover_rate = 0.9 + + def initialize_population(self): + return np.random.uniform( + self.bounds[0], self.bounds[1], (self.initial_population_size, self.dimension) + ) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, success_rate): + mutation_factor = ( + self.min_mutation_factor + (self.max_mutation_factor - self.min_mutation_factor) * success_rate + ) + mutants = np.empty_like(population) + for i in range(len(population)): + idxs = [idx for idx in range(len(population)) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + mutation_factor * (population[b] - population[c]) + mutants[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return mutants + + def crossover(self, target, mutant, success_rate): + crossover_rate = ( + self.min_crossover_rate + (self.max_crossover_rate - self.min_crossover_rate) * success_rate + ) + mask = np.random.rand(self.dimension) < crossover_rate + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + successful_trials = 0 + for i in range(len(population)): + trial = self.crossover(population[i], mutants[i], successful_trials / max(1, i)) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + successful_trials += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + success_rate = successful_trials / len(population) + return new_population, new_fitness, success_rate + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = len(population) + best_idx = np.argmin(fitness) + success_rate = 0.5 # Start with a neutral success rate + + while evaluations < self.budget: + if evaluations + len(population) > self.budget: + # Reduce population size to fit within budget + excess = evaluations + len(population) - self.budget + population = population[:-excess] + fitness = fitness[:-excess] + mutants = self.mutate(population, best_idx, success_rate) + population, fitness, success_rate = self.select(population, fitness, mutants, func) + evaluations += len(population) + best_idx = np.argmin(fitness) + + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADESA.py b/nevergrad/optimization/lama/ADESA.py new file mode 100644 index 000000000..6c8daa758 --- /dev/null +++ b/nevergrad/optimization/lama/ADESA.py @@ -0,0 +1,84 @@ +import numpy as np + + +class ADESA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive_size = 20 + self.mutation_scale = 0.5 # Initial mutation scale + self.crossover_prob = 0.7 # Initial crossover probability + self.elite_size = int(self.population_size * 0.1) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, F): + mutants = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + mutants[i] = mutant_vector + return mutants + + def crossover(self, target, mutant, strategy="binomial"): + if strategy == "uniform": + cross_points = np.random.rand(self.dimension) < self.crossover_prob + else: # binomial + cross_points = np.random.rand(self.dimension) < self.crossover_prob + j_rand = np.random.randint(self.dimension) + cross_points[j_rand] = True + offspring = np.where(cross_points, mutant, target) + return offspring + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + archive = population[np.argsort(fitness)[: self.archive_size]] # Initialize archive + best_fitness = np.min(fitness) + + while evaluations < self.budget: + F = np.clip(np.random.normal(self.mutation_scale, 0.1), 0.1, 1.0) + mutants = self.mutate(population, np.argmin(fitness), F) + trials = np.array( + [ + self.crossover( + population[i], mutants[i], strategy=np.random.choice(["uniform", "binomial"]) + ) + for i in range(self.population_size) + ] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + improvement_mask = fitness_trials < fitness + population[improvement_mask] = trials[improvement_mask] + fitness[improvement_mask] = fitness_trials[improvement_mask] + + # Update archive and re-introduce archived solutions + archive_fitness = self.evaluate(archive, func) + combined_population = np.vstack([population, archive]) + combined_fitness = np.hstack([fitness, archive_fitness]) + best_indices = np.argsort(combined_fitness)[: self.archive_size] + archive = combined_population[best_indices] + + if evaluations % 500 == 0: + # Re-seed to maintain diversity + reseed_indices = np.random.choice( + self.population_size, size=int(self.population_size * 0.1), replace=False + ) + population[reseed_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reseed_indices), self.dimension) + ) + fitness[reseed_indices] = self.evaluate(population[reseed_indices], func) + + best_fitness = np.min(fitness) + + return best_fitness, population[np.argmin(fitness)] diff --git a/nevergrad/optimization/lama/ADE_FPC.py b/nevergrad/optimization/lama/ADE_FPC.py new file mode 100644 index 000000000..b41142cd6 --- /dev/null +++ b/nevergrad/optimization/lama/ADE_FPC.py @@ -0,0 +1,53 @@ +import numpy as np + + +class ADE_FPC: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + for i in range(population_size): + if num_evals >= self.budget: + break + + # Adaptive parameters based on normalized fitness values + norm_fitness = (fitness - np.min(fitness)) / (np.max(fitness) - np.min(fitness) + 1e-10) + F = 0.5 + 0.5 * norm_fitness[i] # Higher mutation for worse solutions + CR = 0.5 * (1 - norm_fitness[i]) # Higher crossover for better solutions + + # Mutation and Crossover + indices = np.random.choice(np.delete(np.arange(population_size), i), 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + F * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADGD.py b/nevergrad/optimization/lama/ADGD.py new file mode 100644 index 000000000..b38eea152 --- /dev/null +++ b/nevergrad/optimization/lama/ADGD.py @@ -0,0 +1,62 @@ +import numpy as np + + +class ADGD: + def __init__( + self, budget, population_size=100, initial_step=0.5, step_decay=0.98, differential_weight=0.8 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_step = initial_step + self.step_decay = step_decay + self.differential_weight = differential_weight + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + step_size = self.initial_step + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.zeros_like(population) + + # Generate new candidates + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + + mutant = population[a] + self.differential_weight * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + cross_points = np.random.rand(self.dimension) < 0.5 + trial = np.where(cross_points, mutant, population[i]) + + # Gradient-inspired step + gradient_direction = best_individual - population[i] + trial += step_size * gradient_direction + trial = np.clip(trial, self.lb, self.ub) + + trial_fitness = func(trial) + num_evals += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + step_size *= self.step_decay # Decay the step size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADGE.py b/nevergrad/optimization/lama/ADGE.py new file mode 100644 index 000000000..f50eb6161 --- /dev/null +++ b/nevergrad/optimization/lama/ADGE.py @@ -0,0 +1,51 @@ +import numpy as np + + +class ADGE: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def __call__(self, func): + population_size = 100 + mutation_factor = 0.8 + recombination_crossover = 0.9 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + evaluations = population_size + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lower_bound, self.upper_bound) + trial = np.array( + [ + mutant[j] if np.random.rand() < recombination_crossover else population[i][j] + for j in range(self.dimension) + ] + ) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adapt mutation factor and crossover based on progress + if evaluations % 100 == 0: + mutation_factor = max(0.5, mutation_factor * 0.95) + recombination_crossover = min(1.0, recombination_crossover + 0.05) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADMDE.py b/nevergrad/optimization/lama/ADMDE.py new file mode 100644 index 000000000..354786b2f --- /dev/null +++ b/nevergrad/optimization/lama/ADMDE.py @@ -0,0 +1,89 @@ +import numpy as np + + +class ADMDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=100, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite structures + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Track the best solution found + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices] + elite_fitness = fitness[elite_indices] + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic oscillation + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 with probability tweaking + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + best = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best - population[i] + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memorize ousted solutions + memory_idx = np.argmax(memory_fitness) + if trial_fitness < memory_fitness[memory_idx]: + memory[memory_idx] = population[i] + memory_fitness[memory_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADMEMS.py b/nevergrad/optimization/lama/ADMEMS.py new file mode 100644 index 000000000..578705fd1 --- /dev/null +++ b/nevergrad/optimization/lama/ADMEMS.py @@ -0,0 +1,64 @@ +import numpy as np + + +class ADMEMS: + def __init__(self, budget, population_size=50, crossover_rate=0.95, F_min=0.5, F_max=0.9, memory_size=50): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize the best solution and its fitness + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor with a linear decrease strategy + F = self.F_max - (self.F_max - self.F_min) * (evaluations / self.budget) + + # Mutation: DE/rand/1/bin strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and Memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory by replacing the worst entry if improving + worst_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_idx]: + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ADSDiffEvo.py b/nevergrad/optimization/lama/ADSDiffEvo.py new file mode 100644 index 000000000..77862aa29 --- /dev/null +++ b/nevergrad/optimization/lama/ADSDiffEvo.py @@ -0,0 +1,74 @@ +import numpy as np + + +class ADSDiffEvo: + def __init__(self, budget, population_size=100, F_base=0.6, CR_base=0.7): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + + def __call__(self, func): + # Initialize population and fitness evaluations + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Adaptive strategy, alternating mutation strategies + if num_evals % 2 == 0: + strategy_type = "rand1bin" + else: + strategy_type = "best1bin" + + F = self.F_base + 0.1 * np.random.randn() # Perturbed mutation factor + CR = self.CR_base + 0.1 * np.random.randn() # Perturbed crossover rate + + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 3, replace=False) + a, b, c = population[chosen] + + # Mutation strategies + if strategy_type == "rand1bin": + mutant = a + F * (b - c) + elif strategy_type == "best1bin": + mutant = best_individual + F * (b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ADSEA.py b/nevergrad/optimization/lama/ADSEA.py new file mode 100644 index 000000000..83cdc9416 --- /dev/null +++ b/nevergrad/optimization/lama/ADSEA.py @@ -0,0 +1,74 @@ +import numpy as np + + +class ADSEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive_size = 20 + self.mutation_factor = 0.8 + self.crossover_prob = 0.7 + self.archive = [] + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best, func): + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(len(population), 3, replace=False) + new_population[i] = population[idxs[0]] + self.mutation_factor * ( + best - population[idxs[1]] + population[idxs[2]] - population[idxs[0]] + ) + new_population[i] = np.clip(new_population[i], self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + return np.where(cross_points, mutant, target) + + def select(self, population, trials, fitness_trials): + for i in range(self.population_size): + if fitness_trials[i] < self.fitness[i]: + population[i] = trials[i] + self.fitness[i] = fitness_trials[i] + + def update_archive(self, population, fitness): + if len(self.archive) < self.archive_size: + self.archive.extend(population) + if len(self.archive) > self.archive_size: + self.archive = self.archive[: self.archive_size] + # Keep the best solutions in the archive + combined = list(zip(self.archive, fitness)) + combined.sort(key=lambda x: x[1]) + self.archive = [x[0] for x in combined[: self.archive_size]] + + def __call__(self, func): + population = self.initialize_population() + self.fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + best_idx = np.argmin(self.fitness) + best = population[best_idx] + + mutants = self.mutate(population, best, func) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + self.select(population, trials, fitness_trials) + evaluations += len(trials) + + self.update_archive(population, self.fitness) + + if evaluations + self.population_size > self.budget: + break + + best_idx = np.argmin(self.fitness) + return self.fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/ADSEAPlus.py b/nevergrad/optimization/lama/ADSEAPlus.py new file mode 100644 index 000000000..523350cb1 --- /dev/null +++ b/nevergrad/optimization/lama/ADSEAPlus.py @@ -0,0 +1,78 @@ +import numpy as np + + +class ADSEAPlus: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive_size = 20 + self.initial_mutation_factor = 0.8 + self.initial_crossover_prob = 0.7 + self.archive = [] + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def adapt_parameters(self, progress): + # Adapt the mutation factor and crossover probability as the search progresses + self.mutation_factor = self.initial_mutation_factor * (1 - progress) + self.crossover_prob = self.initial_crossover_prob + progress * (0.9 - self.initial_crossover_prob) + + def mutate(self, population, best, func, progress): + self.adapt_parameters(progress) + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(len(population), 4, replace=False) + mutated = ( + best + + self.mutation_factor * (population[idxs[0]] - population[idxs[1]]) + + self.mutation_factor * (population[idxs[2]] - population[idxs[3]]) + ) + new_population[i] = np.clip(mutated, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + return np.where(cross_points, mutant, target) + + def select(self, population, trials, fitness_trials): + for i in range(self.population_size): + if fitness_trials[i] < self.fitness[i]: + population[i] = trials[i] + self.fitness[i] = fitness_trials[i] + + def update_archive(self, population, fitness): + combined = list(zip(self.archive + list(population), list(self.fitness) + list(fitness))) + combined.sort(key=lambda x: x[1]) + self.archive = [x[0] for x in combined[: self.archive_size]] + + def __call__(self, func): + population = self.initialize_population() + self.fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + best_idx = np.argmin(self.fitness) + best = population[best_idx] + progress = evaluations / self.budget + + mutants = self.mutate(population, best, func, progress) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + self.select(population, trials, fitness_trials) + evaluations += len(trials) + + self.update_archive(population, self.fitness) + + if evaluations + self.population_size > self.budget: + break + + best_idx = np.argmin(self.fitness) + return self.fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AGBES.py b/nevergrad/optimization/lama/AGBES.py new file mode 100644 index 000000000..cce6b5549 --- /dev/null +++ b/nevergrad/optimization/lama/AGBES.py @@ -0,0 +1,63 @@ +import numpy as np + + +class AGBES: + def __init__(self, budget, population_size=100, gradient_weight=0.3, mutation_rate=0.1, elite_ratio=0.2): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.gradient_weight = gradient_weight + self.mutation_rate = mutation_rate + self.elite_ratio = elite_ratio + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + elite_count = int(self.population_size * self.elite_ratio) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + + # Generate new population + new_population = np.zeros_like(population) + + # Reproduction with mutation and gradient information + for i in range(self.population_size): + if i < elite_count: + # Elites undergo mutation only + mutation = np.random.randn(self.dimension) * self.mutation_rate + new_individual = elite_individuals[i % elite_count] + mutation + else: + # Non-elites are generated from random elite and gradient information + parent = elite_individuals[np.random.randint(0, elite_count)] + gradient = best_individual - parent + perturbation = np.random.randn(self.dimension) * self.mutation_rate + new_individual = parent + self.gradient_weight * gradient + perturbation + + new_individual = np.clip(new_individual, self.lb, self.ub) + new_population[i] = new_individual + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + num_evals += self.population_size + + # Selection process: Elitism combined with direct competition + for j in range(self.population_size): + if new_fitness[j] < fitness[j]: + population[j] = new_population[j] + fitness[j] = new_fitness[j] + if new_fitness[j] < best_fitness: + best_fitness = new_fitness[j] + best_individual = new_population[j].copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGCES.py b/nevergrad/optimization/lama/AGCES.py new file mode 100644 index 000000000..1c10cbd5c --- /dev/null +++ b/nevergrad/optimization/lama/AGCES.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AGCES: + def __init__( + self, budget, population_size=100, F_base=0.5, CR_base=0.9, adapt_rate=0.1, gradient_weight=0.05 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base scaling factor for differential evolution + self.CR_base = CR_base # Base crossover rate + self.adapt_rate = adapt_rate # Rate of adaptation for F and CR + self.gradient_weight = gradient_weight # Weighting for gradient influence in mutation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while num_evals < self.budget: + # Adapt F and CR adaptively + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.1, 1.0) + + # Compute numerical gradients for population + gradients = np.zeros_like(population) + for i in range(self.population_size): + for d in range(self.dimension): + original = population[i][d] + increment = 0.01 * (self.ub - self.lb) + + population[i][d] += increment + f_plus = func(population[i]) + population[i][d] = original + + population[i][d] -= increment + f_minus = func(population[i]) + population[i][d] = original + + gradients[i][d] = (f_plus - f_minus) / (2 * increment) + num_evals += 2 + if num_evals >= self.budget: + return best_fitness, best_individual + + # Mutation, Crossover and Selection + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation with gradient guidance + indices = [idx for idx in range(self.population_size) if idx != i] + a, b = np.random.choice(indices, 2, replace=False) + mutant = population[i] + Fs[i] * ( + best_individual + - population[i] + + population[a] + - population[b] + - self.gradient_weight * gradients[i] + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGDE.py b/nevergrad/optimization/lama/AGDE.py new file mode 100644 index 000000000..45e659010 --- /dev/null +++ b/nevergrad/optimization/lama/AGDE.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AGDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.elite_size = 5 + self.mutation_factor = 0.5 + self.crossover_prob = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index): + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant_vector = population[a] + self.mutation_factor * (population[b] - population[c]) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, trials, fitness_trials): + for i in range(self.population_size): + if fitness_trials[i] < self.fitness[i]: + population[i] = trials[i] + self.fitness[i] = fitness_trials[i] + + def enhance_elites(self, population, func): + elites_indices = np.argsort(self.fitness)[: self.elite_size] + for idx in elites_indices: + local_search_vector = population[idx] + np.random.normal(0, 0.1, self.dimension) + local_search_vector = np.clip(local_search_vector, self.bounds[0], self.bounds[1]) + f_local = func(local_search_vector) + if f_local < self.fitness[idx]: + population[idx] = local_search_vector + self.fitness[idx] = f_local + + def __call__(self, func): + population = self.initialize_population() + self.fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + mutants = self.mutate(population, np.argmin(self.fitness)) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + self.select(population, trials, fitness_trials) + evaluations += self.population_size + + if evaluations + self.population_size > self.budget: + break + + if evaluations % 100 == 0: + self.enhance_elites(population, func) + self.mutation_factor *= 0.98 + self.crossover_prob = np.clip( + self.crossover_prob * (0.99 if np.random.rand() < 0.5 else 1.01), 0.1, 0.9 + ) + + best_idx = np.argmin(self.fitness) + return self.fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AGDELS.py b/nevergrad/optimization/lama/AGDELS.py new file mode 100644 index 000000000..a36e30f43 --- /dev/null +++ b/nevergrad/optimization/lama/AGDELS.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AGDELS: + def __init__(self, budget, population_size=100, F_base=0.8, CR_base=0.9, local_search_prob=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = np.full(self.dimension, -5.0) + self.ub = np.full(self.dimension, 5.0) + self.F_base = F_base + self.CR_base = CR_base + self.local_search_prob = local_search_prob + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Adaptive mutation parameters with Gaussian perturbation + F = np.clip(np.random.normal(self.F_base, 0.1), 0.1, 1.0) + CR = np.clip(np.random.normal(self.CR_base, 0.05), 0.1, 1.0) + + # Mutation and crossover + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + # Performing local search with a certain probability + if np.random.rand() < self.local_search_prob: + local_point = trial + np.random.normal(0, 0.1, self.dimension) + local_point = np.clip(local_point, self.lb, self.ub) + local_fitness = func(local_point) + num_evals += 1 + if local_fitness < fitness[i]: + trial = local_point + trial_fitness = local_fitness + else: + trial_fitness = func(trial) + num_evals += 1 + else: + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGDiffEvo.py b/nevergrad/optimization/lama/AGDiffEvo.py new file mode 100644 index 000000000..4b742848e --- /dev/null +++ b/nevergrad/optimization/lama/AGDiffEvo.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AGDiffEvo: + def __init__(self, budget, population_size=150, F_base=0.6, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Track the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation parameters with Gaussian perturbation + F = np.clip(np.random.normal(self.F_base, 0.1), 0.1, 1.0) + CR = np.clip(np.random.normal(self.CR_base, 0.05), 0.1, 1.0) + + # Select individuals for mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 4, replace=False) + a, b, c, d = population[chosen] + + # Mutation: DE/current-to-best/1/bin + mutant = population[i] + F * (best_individual - population[i]) + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGEA.py b/nevergrad/optimization/lama/AGEA.py new file mode 100644 index 000000000..1b829f59c --- /dev/null +++ b/nevergrad/optimization/lama/AGEA.py @@ -0,0 +1,57 @@ +import numpy as np + + +class AGEA: + def __init__(self, budget, population_size=50, crossover_prob=0.7, mutation_factor=0.8): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.crossover_prob = crossover_prob + self.mutation_factor = mutation_factor + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + while num_evals < self.budget: + new_population = [] + for i in range(self.population_size): + # Mutation: DE/rand/1/bin + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + self.mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + else: + new_population.append(population[i]) + + if num_evals >= self.budget: + break + + population = np.array(new_population) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGESA.py b/nevergrad/optimization/lama/AGESA.py new file mode 100644 index 000000000..a86a53d73 --- /dev/null +++ b/nevergrad/optimization/lama/AGESA.py @@ -0,0 +1,69 @@ +import numpy as np + + +class AGESA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.initial_cr = 0.9 + self.initial_f = 0.8 + self.initial_temp = 1.0 + self.final_temp = 0.01 + self.alpha = 0.95 # Cooling rate + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, f, temperature): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = population[best_idx] + f * temperature * (x1 - x2 + x3 - population[best_idx]) + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, cr): + crossover_mask = np.random.rand(self.dimension) < cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + temperature = self.initial_temp + cr = self.initial_cr + f = self.initial_f + + while evaluations < self.budget: + mutated_population = self.mutate(population, best_idx, f, temperature) + offspring_population = np.array( + [ + self.crossover(population[i], mutated_population[i], cr) + for i in range(self.population_size) + ] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i] or np.random.rand() < np.exp( + (fitness[i] - offspring_fitness[i]) / temperature + ): + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution, best_idx = fitness[i], population[i], i + + cr = max(0.1, cr * 0.99) # Adaptive cr decrease + f = max(0.5, f * 0.98) # Adaptive f decrease + temperature *= self.alpha # Cool down + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AGGE.py b/nevergrad/optimization/lama/AGGE.py new file mode 100644 index 000000000..3de84b859 --- /dev/null +++ b/nevergrad/optimization/lama/AGGE.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AGGE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Adaptive parameters + mutation_factor = 0.5 + crossover_rate = 0.9 + gradient_boost = 0.2 + + while num_evals < self.budget: + # Elite gradient computation + elite_idx = np.argmin(fitness) + grad_direction = np.zeros(self.dimension) + base_fitness = fitness[elite_idx] + + for d in range(self.dimension): + perturb = np.zeros(self.dimension) + perturb[d] = 0.01 * (self.upper_bound - self.lower_bound) + + perturbed_individual = np.clip( + population[elite_idx] + perturb, self.lower_bound, self.upper_bound + ) + perturbed_fitness = func(perturbed_individual) + num_evals += 1 + + if num_evals >= self.budget: + break + + grad_direction[d] = (perturbed_fitness - base_fitness) / perturb[d] + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Mutation incorporating gradient + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = ( + population[a] + + mutation_factor * (population[b] - population[c]) + - gradient_boost * grad_direction + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGGES.py b/nevergrad/optimization/lama/AGGES.py new file mode 100644 index 000000000..71784bd3e --- /dev/null +++ b/nevergrad/optimization/lama/AGGES.py @@ -0,0 +1,61 @@ +import numpy as np + + +class AGGES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 150 # Further increased population size for broader exploration + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Evolution parameters + learning_rate = 0.01 # Slight decrease in individual learning rate for finer adjustments + global_learning_rate = ( + 0.3 # Slightly increased to pull population towards better regions more strongly + ) + mutation_strength = 1.0 # Increased mutation strength for initial broader exploration + mutation_decay = 0.98 # Slightly slower decay rate + elite_fraction = 0.15 # Adjust elite fraction for a balance between exploration and exploitation + elite_size = int(population_size * elite_fraction) + + while num_evals < self.budget: + elite_indices = np.argsort(fitness)[:elite_size] + global_mean = np.mean(population[elite_indices], axis=0) + + for i in range(population_size): + if num_evals >= self.budget: + break + + step = mutation_strength * ( + np.random.randn(self.dimension) + learning_rate * (global_mean - population[i]) + ) + individual = population[i] + step + individual = np.clip(individual, self.lower_bound, self.upper_bound) + + # Stronger pull towards global mean to accelerate convergence + individual = individual + global_learning_rate * (global_mean - individual) + individual_fitness = func(individual) + num_evals += 1 + + # Selection process + if individual_fitness < fitness[i]: + population[i] = individual + fitness[i] = individual_fitness + if individual_fitness < best_fitness: + best_fitness = individual_fitness + best_individual = individual.copy() + + # Update mutation strength adaptively based on elite performance improvement + mutation_strength *= mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AGIDE.py b/nevergrad/optimization/lama/AGIDE.py new file mode 100644 index 000000000..24165a381 --- /dev/null +++ b/nevergrad/optimization/lama/AGIDE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class AGIDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 120 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Adaptation parameters + mutation_factor = 0.5 + crossover_rate = 0.9 + learning_rate = 0.05 + + while num_evals < self.budget: + # Gradient Estimation and Mutation Adaptation + gradients = np.zeros((population_size, self.dimension)) + elite_idx = np.argsort(fitness)[: population_size // 4] + + for i in elite_idx: + perturb = np.random.normal(0, 0.1, self.dimension) + perturbed_individual = np.clip(population[i] + perturb, self.lower_bound, self.upper_bound) + perturbed_fitness = func(perturbed_individual) + num_evals += 1 + + if num_evals >= self.budget: + break + + gradient_estimate = (perturbed_fitness - fitness[i]) / perturb + gradients[i] = -gradient_estimate + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Dynamic Mutation Strategy + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[c] + mutation_factor * (population[a] - population[b]) + mutant += learning_rate * gradients[i] + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AHDEMI.py b/nevergrad/optimization/lama/AHDEMI.py new file mode 100644 index 000000000..8495476b0 --- /dev/null +++ b/nevergrad/optimization/lama/AHDEMI.py @@ -0,0 +1,87 @@ +import numpy as np + + +class AHDEMI: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=100, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((0, dimension)) + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx] + + for i in range(self.population_size): + # Adaptive mutation factor that changes dynamically + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/rand-to-best/1 strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = ( + best_solution if np.random.rand() < 0.8 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best - a + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with the old good solutions + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + elif np.random.rand() < 0.1: # Occasionally replace memory entries + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ALDEEM.py b/nevergrad/optimization/lama/ALDEEM.py new file mode 100644 index 000000000..fa8785533 --- /dev/null +++ b/nevergrad/optimization/lama/ALDEEM.py @@ -0,0 +1,73 @@ +import numpy as np + + +class ALDEEM: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive_size = 30 + self.mutation_scale = 0.5 + self.crossover_prob = 0.7 + self.elite_size = int(self.population_size * 0.2) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, F): + mutants = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + mutants[i] = mutant_vector + return mutants + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + j_rand = np.random.randint(self.dimension) + cross_points[j_rand] = True + offspring = np.where(cross_points, mutant, target) + return offspring + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + F = np.clip(np.random.normal(self.mutation_scale, 0.1), 0.1, 1.0) + mutants = self.mutate(population, np.argmin(fitness), F) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + improvement_mask = fitness_trials < fitness + population[improvement_mask] = trials[improvement_mask] + fitness[improvement_mask] = fitness_trials[improvement_mask] + + # Maintain and utilize memory for diversity + if evaluations % 500 == 0: + self.memory.extend(population[np.argsort(fitness)[: self.elite_size]].tolist()) + if len(self.memory) > self.archive_size: + self.memory = self.memory[-self.archive_size :] + reseed_indices = np.random.choice(self.population_size, size=self.elite_size, replace=False) + population[reseed_indices] = np.array(self.memory[: self.elite_size]) + fitness[reseed_indices] = self.evaluate(population[reseed_indices], func) + + # Adaptive parameter control + if evaluations % 100 == 0: + self.mutation_scale *= 0.95 if np.min(fitness) < np.mean(fitness) else 1.05 + self.crossover_prob = max( + min(self.crossover_prob + (0.05 if np.min(fitness) < np.mean(fitness) else -0.05), 1.0), + 0.1, + ) + + return np.min(fitness), population[np.argmin(fitness)] diff --git a/nevergrad/optimization/lama/ALES.py b/nevergrad/optimization/lama/ALES.py new file mode 100644 index 000000000..49d01c79b --- /dev/null +++ b/nevergrad/optimization/lama/ALES.py @@ -0,0 +1,62 @@ +import numpy as np + + +class ALES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Evolution parameters + learning_rate = 0.1 + global_learning_rate = 0.2 + mutation_strength = 0.5 + mutation_decay = 0.99 + elite_fraction = 0.1 + elite_size = max(int(population_size * elite_fraction), 1) + + while num_evals < self.budget: + elite_indices = np.argsort(fitness)[:elite_size] + global_mean = np.mean(population[elite_indices], axis=0) + + for i in range(population_size): + if num_evals >= self.budget: + break + + if i in elite_indices: + # Elites undergo less mutation + step = mutation_strength * np.random.randn(self.dimension) * 0.5 + else: + step = mutation_strength * np.random.randn(self.dimension) + + individual = population[i] + step + individual = np.clip(individual, self.lower_bound, self.upper_bound) + + # Perform a global pull move towards the mean of elites + individual = individual + global_learning_rate * (global_mean - individual) + individual_fitness = func(individual) + num_evals += 1 + + # Selection process + if individual_fitness < fitness[i]: + population[i] = individual + fitness[i] = individual_fitness + if individual_fitness < best_fitness: + best_fitness = individual_fitness + best_individual = individual.copy() + + # Decay the mutation strength + mutation_strength *= mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ALSS.py b/nevergrad/optimization/lama/ALSS.py new file mode 100644 index 000000000..8232bc60c --- /dev/null +++ b/nevergrad/optimization/lama/ALSS.py @@ -0,0 +1,56 @@ +import numpy as np + + +class ALSS: + def __init__(self, budget, population_size=50, learning_rate=0.6): + self.budget = budget + self.population_size = population_size + self.learning_rate = learning_rate # Learning rate for step size adjustment + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Initialize step sizes for each individual in each dimension + step_sizes = np.random.uniform(0.1, 1.0, (self.population_size, self.dimension)) + + while num_evals < self.budget: + for i in range(self.population_size): + # Perturb each individual based on its step size + perturbation = np.random.normal(0, step_sizes[i], self.dimension) + trial_vector = population[i] + perturbation + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Fitness evaluation + trial_fitness = func(trial_vector) + num_evals += 1 + + # Adaptive adjustment + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + step_sizes[i] *= 1 + self.learning_rate # Increase step size + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + else: + step_sizes[i] *= 1 - self.learning_rate # Decrease step size + + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of ALSS: +# optimizer = ALSS(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/AMDE.py b/nevergrad/optimization/lama/AMDE.py new file mode 100644 index 000000000..01a7b469d --- /dev/null +++ b/nevergrad/optimization/lama/AMDE.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AMDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.8, + F_base=0.5, + F_amp=0.5, + memory_size=20, + elite_size=3, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = population[: self.memory_size].copy() + memory_fitness = fitness[: self.memory_size].copy() + + # Elite solutions tracking + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor with a decaying amplitude + F = self.F_base + self.F_amp * np.cos(2 * np.pi * evaluations / self.budget) + + # Mutation using memory, elite, or random selection + if np.random.rand() < 0.2: # Mutation from memory + m_idx = np.random.randint(self.memory_size) + a, b, c = memory[m_idx], population[np.random.randint(self.population_size)], elite[0] + else: + idxs = np.random.choice(self.population_size, 3, replace=False) + a, b, c = population[idxs] + + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover: Exponential + start = np.random.randint(dimension) + length = np.random.randint(1, dimension) + cross_points = [(start + j) % dimension for j in range(length)] + trial = population[i].copy() + trial[cross_points] = mutant[cross_points] + + # Fitness evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory if better + worst_memory_idx = np.argmax(memory_fitness) + if trial_fitness < memory_fitness[worst_memory_idx]: + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AMES.py b/nevergrad/optimization/lama/AMES.py new file mode 100644 index 000000000..66ba6d34d --- /dev/null +++ b/nevergrad/optimization/lama/AMES.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AMES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.elite_size = 5 # Elitism parameter + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def adaptive_mutation(self, individual, iteration): + # Decrease mutation strength as iterations increase + mutation_strength = np.maximum(0.1, 1 - (iteration / self.budget)) + mutation_vector = np.random.normal(0, mutation_strength, self.dimension) + return np.clip(individual + mutation_vector, self.bounds[0], self.bounds[1]) + + def recombine(self, elite_population): + # Randomly recombine pairs of elite individuals + indices = np.random.permutation(self.elite_size) + parent1 = elite_population[indices[0]] + parent2 = elite_population[indices[1]] + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + + evaluations = self.population_size + iteration = 0 + + while evaluations < self.budget: + elite_population, elite_fitness = self.select_elites(population, fitness) + + for i in range(self.population_size): + if i < self.elite_size: + # Preserve elites + continue + + # Recombine and mutate + child = self.recombine(elite_population) + child = self.adaptive_mutation(child, iteration) + child_fitness = func(child) + evaluations += 1 + + # Selection step + if child_fitness < fitness[i]: + population[i] = child + fitness[i] = child_fitness + + if evaluations >= self.budget: + break + + iteration += 1 + + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AMSDiffEvo.py b/nevergrad/optimization/lama/AMSDiffEvo.py new file mode 100644 index 000000000..5147e7f9a --- /dev/null +++ b/nevergrad/optimization/lama/AMSDiffEvo.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AMSDiffEvo: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.9, perturbation=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.perturbation = perturbation # Perturbation for adaptive parameters + + def __call__(self, func): + # Initialize population and fitness assessments + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Adaptive mutation strategy + strategy_type = np.random.choice( + ["best", "rand", "rand-to-best", "current-to-rand"], p=[0.25, 0.25, 0.25, 0.25] + ) + F = np.clip(self.F_base + self.perturbation * np.random.randn(), 0.1, 1.0) + CR = np.clip(self.CR_base + self.perturbation * np.random.randn(), 0.0, 1.0) + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = population[np.random.choice(idxs, 4, replace=False)] + + if strategy_type == "best": + mutant = population[i] + F * (best_individual - population[i]) + F * (a - b) + elif strategy_type == "rand": + mutant = a + F * (b - c) + elif strategy_type == "rand-to-best": + mutant = population[i] + F * (best_individual - population[i]) + F * (a - b) + F * (b - c) + else: # 'current-to-rand' + mutant = population[i] + F * (a - population[i]) + F * (b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AMSEA.py b/nevergrad/optimization/lama/AMSEA.py new file mode 100644 index 000000000..090ce8a09 --- /dev/null +++ b/nevergrad/optimization/lama/AMSEA.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AMSEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.last_best_fitness = np.inf + self.stagnation_counter = 0 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, adaptation_factor): + F = 0.5 + 0.5 * adaptation_factor # Dynamically adapted mutation factor + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(np.delete(np.arange(self.population_size), best_index), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = a + F * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, adaptation_factor): + CR = 0.2 + 0.6 * adaptation_factor # Dynamically adapted crossover probability + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def calculate_adaptation_factor(self, current_best_fitness): + if current_best_fitness < self.last_best_fitness: + self.last_best_fitness = current_best_fitness + self.stagnation_counter = 0 + return 1 # High exploration when progressing + else: + self.stagnation_counter += 1 + return max(0, 1 - self.stagnation_counter / 50) # Increase exploitation if stagnating + + def local_search(self, best_individual, func): + perturbations = np.random.normal(0, 0.1, (10, self.dimension)) + candidates = np.clip(best_individual + perturbations, self.bounds[0], self.bounds[1]) + fitnesses = self.evaluate(candidates, func) + best_local_idx = np.argmin(fitnesses) + return candidates[best_local_idx] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + adaptation_factor = self.calculate_adaptation_factor(fitness[best_index]) + mutants = self.mutate(population, best_index, adaptation_factor) + trials = np.array( + [ + self.crossover(population[i], mutants[i], adaptation_factor) + for i in range(self.population_size) + ] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + combined_population = np.vstack((population, trials)) + combined_fitness = np.hstack((fitness, fitness_trials)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + best_index = np.argmin(fitness) + population[best_index] = self.local_search(population[best_index], func) + + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/AN_MDEPSO.py b/nevergrad/optimization/lama/AN_MDEPSO.py new file mode 100644 index 000000000..8ef4cc139 --- /dev/null +++ b/nevergrad/optimization/lama/AN_MDEPSO.py @@ -0,0 +1,125 @@ +import numpy as np +from scipy.optimize import minimize + + +class AN_MDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.num_niches = 5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) for _ in range(self.num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.num_niches)] + local_bests = [niche[np.argmin(fit)] for niche, fit in zip(niches, fitness)] + local_best_fits = [min(fit) for fit in fitness] + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + best_niche_idx = np.argmin(local_best_fits) + + for n in range(self.num_niches): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 + * r2 + * (niches[best_niche_idx][np.argmin(fitness[best_niche_idx])] - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.5 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(self.num_niches): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Adaptive parameter adjustment + if np.random.rand() < 0.1: + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/APBES.py b/nevergrad/optimization/lama/APBES.py new file mode 100644 index 000000000..4052ad475 --- /dev/null +++ b/nevergrad/optimization/lama/APBES.py @@ -0,0 +1,63 @@ +import numpy as np + + +class APBES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.elite_size = 5 # Top 10% as elite + + def initialize(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation_rate = 0.1 # 10% mutation rate + mutation_mask = np.random.rand(self.dimension) < mutation_rate + individual[mutation_mask] += np.random.normal(0, 0.1, np.sum(mutation_mask)) + return np.clip(individual, self.bounds[0], self.bounds[1]) + + def crossover(self, parent1, parent2): + if np.random.rand() < 0.5: # 50% chance for one-point crossover + point = np.random.randint(self.dimension) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: # uniform crossover + mask = np.random.rand(self.dimension) < 0.5 + child = parent1 * mask + parent2 * (1 - mask) + return child + + def __call__(self, func): + population = self.initialize() + best_fitness = np.inf + best_individual = None + + evaluations = 0 + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + if np.min(fitness) < best_fitness: + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)].copy() + + elites, elite_fitness = self.select_elites(population, fitness) + + # Generate new population + new_population = elites.copy() + while len(new_population) < self.population_size: + parent1, parent2 = population[np.random.choice(len(population), 2, replace=False)] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population = np.vstack([new_population, child]) + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/APDE.py b/nevergrad/optimization/lama/APDE.py new file mode 100644 index 000000000..dcaa801d5 --- /dev/null +++ b/nevergrad/optimization/lama/APDE.py @@ -0,0 +1,66 @@ +import numpy as np + + +class APDE: + def __init__( + self, budget, population_size=50, F_base=0.5, CR_base=0.9, adapt_rate=0.1, precision_factor=0.05 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base scaling factor for differential evolution + self.CR_base = CR_base # Base crossover rate + self.adapt_rate = adapt_rate # Rate of adaptation for F and CR + self.precision_factor = precision_factor # Adaptively adjusts mutation precision over iterations + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while num_evals < self.budget: + # Adapt F, CR, and precision adaptively + Fs = np.clip( + np.random.normal( + self.F_base, self.adapt_rate * np.exp(-num_evals / self.budget), self.population_size + ), + 0.1, + 1.0, + ) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.0, 1.0) + precisions = self.precision_factor / np.log2(2 + num_evals / self.budget) + + # Mutation, Crossover, and Selection + for i in range(self.population_size): + if num_evals >= self.budget: + break + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + Fs[i] * (population[b] - population[c]) * precisions + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/APDETL.py b/nevergrad/optimization/lama/APDETL.py new file mode 100644 index 000000000..698c7b22e --- /dev/null +++ b/nevergrad/optimization/lama/APDETL.py @@ -0,0 +1,86 @@ +import numpy as np + + +class APDETL: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.5, + CR_base=0.8, + mutation_strategy="best", + adaptive=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.mutation_strategy = mutation_strategy + self.adaptive = adaptive + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + F_adaptive = self.F_base + CR_adaptive = self.CR_base + + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation + if self.mutation_strategy == "best": + indices = [idx for idx in range(self.population_size) if idx != i] + chosen_indices = np.random.choice(indices, 2, replace=False) + x1, x2 = population[chosen_indices] + mutant = best_individual + F_adaptive * (x1 - x2) + else: + indices = [idx for idx in range(self.population_size) if idx != i] + chosen_indices = np.random.choice(indices, 3, replace=False) + x0, x1, x2 = population[chosen_indices] + mutant = x0 + F_adaptive * (x1 - x2) + + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR_adaptive + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptive updates + if self.adaptive: + successful = trial_fitness < fitness[i] + F_adaptive += 0.1 * (successful - 0.5) + CR_adaptive += 0.1 * (successful - 0.5) + F_adaptive = np.clip(F_adaptive, 0.1, 0.9) + CR_adaptive = np.clip(CR_adaptive, 0.1, 0.9) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/APES.py b/nevergrad/optimization/lama/APES.py new file mode 100644 index 000000000..ccc869be6 --- /dev/null +++ b/nevergrad/optimization/lama/APES.py @@ -0,0 +1,60 @@ +import numpy as np + + +class APES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.mutation_factor = 0.5 + self.crossover_prob = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, scaling_factor): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + scaling_factor * (b - c), self.bounds[0], self.bounds[1]) + new_population[i] = mutant + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, fitness, new_population, new_fitness): + for i in range(self.population_size): + if new_fitness[i] < fitness[i]: + population[i], fitness[i] = new_population[i], new_fitness[i] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + + while evaluations < self.budget: + scaling_factor = self.mutation_factor / (1 + np.exp(-0.1 * (evaluations / self.budget - 5))) + mutated_population = self.mutate(population, best_idx, scaling_factor) + trial_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + trial_fitness = self.evaluate(trial_population, func) + evaluations += self.population_size + + self.select(population, fitness, trial_population, trial_fitness) + best_idx = np.argmin(fitness) + if fitness[best_idx] < best_fitness: + best_fitness, best_solution = fitness[best_idx], population[best_idx] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AQAPSO_LS_DIW.py b/nevergrad/optimization/lama/AQAPSO_LS_DIW.py new file mode 100644 index 000000000..7356065d3 --- /dev/null +++ b/nevergrad/optimization/lama/AQAPSO_LS_DIW.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AQAPSO_LS_DIW: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + local_search_radius=0.05, + local_search_samples=20, + inertia_weight=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.local_search_radius = local_search_radius + self.local_search_samples = local_search_samples + self.inertia_weight = inertia_weight + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_samples): + x_new = x + np.random.uniform(-self.local_search_radius, self.local_search_radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return self.inertia_weight + 0.5 * (1 - t / self.budget) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AQAPSO_LS_DIW_AP.py b/nevergrad/optimization/lama/AQAPSO_LS_DIW_AP.py new file mode 100644 index 000000000..eda93e1f5 --- /dev/null +++ b/nevergrad/optimization/lama/AQAPSO_LS_DIW_AP.py @@ -0,0 +1,85 @@ +import numpy as np + + +class AQAPSO_LS_DIW_AP: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(10): # Increased the local search iterations for better refinement + x_new = x + np.random.uniform(-0.1, 0.1, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.5 * t / self.budget # Improved inertia weight update + + def update_parameters(self, t): + return ( + 1.5 - 0.5 * t / self.budget, + 2.0 - 0.5 * t / self.budget, + ) # Adaptive cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.1 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ARDLS.py b/nevergrad/optimization/lama/ARDLS.py new file mode 100644 index 000000000..c36345f5d --- /dev/null +++ b/nevergrad/optimization/lama/ARDLS.py @@ -0,0 +1,76 @@ +import numpy as np + + +class ARDLS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.improvement_threshold = 0.10 # 10% non-improvement needed to switch layers + self.no_improve_in = 100 # number of evaluations to check for improvement + + # Initially set for global search + self.mutation_factor = 0.8 + self.crossover_prob = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + new_population[i] = mutant + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + return np.where(cross_points, mutant, target) + + def select(self, population, fitness, new_population, new_fitness): + for i in range(self.population_size): + if new_fitness[i] < fitness[i]: + population[i], fitness[i] = new_population[i], new_fitness[i] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + improvements = 0 + + while evaluations < self.budget: + if improvements < self.improvement_threshold * self.population_size: + self.mutation_factor = 0.1 # switch to local search + self.crossover_prob = 0.9 + else: + self.mutation_factor = 0.8 # switch to global search + self.crossover_prob = 0.7 + + mutated_population = self.mutate(population, best_idx) + trial_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + trial_fitness = self.evaluate(trial_population, func) + evaluations += self.population_size + old_best = best_fitness + + self.select(population, fitness, trial_population, trial_fitness) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + + if best_fitness < old_best: + best_solution = population[best_idx] + improvements += 1 + else: + improvements = 0 # reset improvements counter if no improvement + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ARESM.py b/nevergrad/optimization/lama/ARESM.py new file mode 100644 index 000000000..c292628b5 --- /dev/null +++ b/nevergrad/optimization/lama/ARESM.py @@ -0,0 +1,94 @@ +import numpy as np + + +class ARESM: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for high-quality solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions more frequently based on the budget progression + if evaluations % (self.budget // 10) == 0: # More frequent elite updates + elite_idxs = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idxs].copy() + elite_fitness = fitness[elite_idxs].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic amplitude adjustment + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation strategy: best or elite solution hybridized with random solutions + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if np.random.rand() < 0.5: + base_solution = best_solution # Bias towards the best solution + else: + base_solution = elite[ + np.random.randint(self.elite_size) + ] # Occasionally use elite solutions + + mutant = np.clip(base_solution + F * (b - c), lb, ub) + + # Binomial crossover with an increasing strategy + cross_points = np.random.rand(dimension) < ( + self.crossover_rate + 0.05 * np.sin(2 * np.pi * evaluations / self.budget) + ) + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection and update memory if better + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Replace worst memory entry if trial is better + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ARISA.py b/nevergrad/optimization/lama/ARISA.py new file mode 100644 index 000000000..167b0dc63 --- /dev/null +++ b/nevergrad/optimization/lama/ARISA.py @@ -0,0 +1,58 @@ +import numpy as np + + +class ARISA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 50 + population = np.random.uniform(*self.bounds, (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def adaptive_search(self, population, func): + global_best_fitness = np.Inf + global_best_individual = None + + evaluations = 0 + history_changes = [] + + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + # Update best solution + best_idx = np.argmin(fitness) + if fitness[best_idx] < global_best_fitness: + global_best_fitness = fitness[best_idx] + global_best_individual = population[best_idx] + + # Adaptive mutation based on historical performance + if history_changes: + mutation_scale = 0.1 if np.std(history_changes) < 0.1 else 0.5 + else: + mutation_scale = 0.3 + mutations = np.random.normal(0, mutation_scale, (len(population), self.dimension)) + population = population + mutations + population = np.clip(population, *self.bounds) + + # Record and adjust + current_best_fitness = fitness[best_idx] + history_changes.append(current_best_fitness) + + # Thresholding for dynamic adaptation + if len(history_changes) > 5: + if np.std(history_changes[-5:]) < 0.01 * np.abs(history_changes[-1]): + population = np.random.uniform(*self.bounds, (len(population), self.dimension)) + + return global_best_fitness, global_best_individual + + def __call__(self, func): + initial_population, _ = self.initialize() + best_fitness, best_solution = self.adaptive_search(initial_population, func) + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ASADEA.py b/nevergrad/optimization/lama/ASADEA.py new file mode 100644 index 000000000..139f07b6f --- /dev/null +++ b/nevergrad/optimization/lama/ASADEA.py @@ -0,0 +1,81 @@ +import numpy as np + + +class ASADEA: + def __init__(self, budget): + self.budget = budget + self.population_size = 20 + self.dimension = 5 + self.low = -5.0 + self.high = 5.0 + self.archive = [] + self.archive_max_size = 100 + + def initialize(self): + population = np.random.uniform(self.low, self.high, (self.population_size, self.dimension)) + # Initialize F and CR for each individual + F = np.random.normal(0.5, 0.1, self.population_size) + CR = np.random.normal(0.9, 0.05, self.population_size) + return population, F, CR + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, population, archive, F): + mutant = np.zeros_like(population) + combined = np.vstack([population] + [archive]) if archive else population + num_candidates = len(combined) + + for i in range(self.population_size): + indices = np.random.choice(num_candidates, 3, replace=False) + x1, x2, x3 = combined[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant[i] = np.clip(mutant_vector, self.low, self.high) + return mutant + + def crossover(self, population, mutant, CR): + crossover = np.where( + np.random.rand(self.population_size, self.dimension) < CR[:, None], mutant, population + ) + return crossover + + def select(self, population, fitness, trial_population, trial_fitness, F, CR): + improved = trial_fitness < fitness + population[improved] = trial_population[improved] + fitness[improved] = trial_fitness[improved] + # Adapt F and CR + F[improved] *= 1.1 + CR[improved] *= 0.95 + F[~improved] *= 0.9 + CR[~improved] *= 1.05 + F = np.clip(F, 0.1, 1.0) + CR = np.clip(CR, 0.1, 1.0) + return population, fitness, F, CR + + def __call__(self, func): + population, F, CR = self.initialize() + fitness = self.evaluate(population, func) + iterations = self.budget // self.population_size + + for _ in range(iterations): + if np.random.rand() < 0.1: # Introduce perturbation with 10% chance + random_archive_idx = np.random.choice(len(self.archive)) if self.archive else 0 + population += np.random.normal(0, 0.1) * self.archive[random_archive_idx] + + mutant = self.mutation(population, self.archive, F) + trial_population = self.crossover(population, mutant, CR) + trial_fitness = self.evaluate(trial_population, func) + population, fitness, F, CR = self.select( + population, fitness, trial_population, trial_fitness, F, CR + ) + + # Update the archive with new solutions + for ind in population: + self.archive.append(ind.copy()) + if len(self.archive) > self.archive_max_size: + self.archive.pop(0) + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ASO.py b/nevergrad/optimization/lama/ASO.py new file mode 100644 index 000000000..c7b8e32c4 --- /dev/null +++ b/nevergrad/optimization/lama/ASO.py @@ -0,0 +1,53 @@ +import numpy as np + + +class ASO: + def __init__(self, budget, population_size=100, spiral_rate=0.5): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.spiral_rate = spiral_rate + + def __call__(self, func): + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + velocities = np.zeros((self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Evolutionary loop + while num_evals < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) # Random coefficients + # Spiral updating rule + velocities[i] = ( + r1 * velocities[i] + + r2 * self.spiral_rate * (best_individual - population[i]) + + self.spiral_rate * (np.random.uniform(self.lb, self.ub, self.dimension) - population[i]) + ) + + # Update position + population[i] += velocities[i] + population[i] = np.clip(population[i], self.lb, self.ub) + + # Evaluate + updated_fitness = func(population[i]) + num_evals += 1 + + # Selection + if updated_fitness < fitness[i]: + fitness[i] = updated_fitness + if updated_fitness < best_fitness: + best_fitness = updated_fitness + best_individual = population[i] + + if num_evals >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AVDE.py b/nevergrad/optimization/lama/AVDE.py new file mode 100644 index 000000000..6d1a7858d --- /dev/null +++ b/nevergrad/optimization/lama/AVDE.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AVDE: + def __init__(self, budget, population_size=100, F_base=0.5, CR_init=0.7, adapt_F=0.02, adapt_CR=0.01): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_init = CR_init # Initial crossover probability + self.adapt_F = adapt_F # Rate of adaptation for F + self.adapt_CR = adapt_CR # Rate of adaptation for CR + + def __call__(self, func): + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + velocities = np.zeros_like(population) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Update differential weight (F) and crossover probability (CR) dynamically + Fs = np.clip(np.random.normal(self.F_base, self.adapt_F, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_init, self.adapt_CR, self.population_size), 0.1, 1.0) + + for i in range(self.population_size): + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + Fs[i] * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + velocities[i] = trial - population[i] + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + if num_evals >= self.budget: + break + + # Adjust F_base and CR_init based on successful strategies + successful_Fs = Fs[fitness < np.array([func(ind) for ind in population])] + successful_CRs = CRs[fitness < np.array([func(ind) for ind in population])] + if successful_Fs.size > 0: + self.F_base = np.mean(successful_Fs) + if successful_CRs.size > 0: + self.CR_init = np.mean(successful_CRs) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AcceleratedAdaptivePrecisionCrossoverEvolution.py b/nevergrad/optimization/lama/AcceleratedAdaptivePrecisionCrossoverEvolution.py new file mode 100644 index 000000000..0c381cfb8 --- /dev/null +++ b/nevergrad/optimization/lama/AcceleratedAdaptivePrecisionCrossoverEvolution.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AcceleratedAdaptivePrecisionCrossoverEvolution: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 250 # Further increased population for enhanced diversity + self.elite_size = 50 # Larger elite size for more aggressive exploitation + self.offspring_size = 200 # Larger offspring size for more search potential + self.mutation_scale = 0.005 # Further reduced mutation scale for precision + self.crossover_prob = 0.9 # Even higher crossover probability + self.mutation_prob = 0.05 # Lower mutation probability for stability + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_prob: + cross_point = np.random.randint(1, self.dim) + child = np.empty(self.dim) + child[:cross_point] = parent1[:cross_point] + child[cross_point:] = parent2[cross_point:] + return child + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + if np.random.rand() < self.mutation_prob: + mutation_points = np.random.randint(0, self.dim) + individual[mutation_points] += np.random.normal(0, self.mutation_scale) + individual = np.clip(individual, self.lower_bound, self.upper_bound) + return individual + + def reproduce(self, parents): + offspring = np.empty((self.offspring_size, self.dim)) + num_parents = len(parents) + for i in range(self.offspring_size): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + child = self.crossover(parents[p1], parents[p2]) + child = self.mutate(child) + offspring[i] = child + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_survivors(population, fitness) + + offspring = self.reproduce(elite_population) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveAnnealingDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveAnnealingDifferentialEvolution.py new file mode 100644 index 000000000..29194dac6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveAnnealingDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveAnnealingDifferentialEvolution: + def __init__(self, budget, population_size=20, init_crossover_rate=0.7, init_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = init_crossover_rate + self.mutation_factor = init_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def fitness_sharing(population, fitness, sigma_share=0.1): + shared_fitness = np.copy(fitness) + for i in range(len(population)): + for j in range(len(population)): + if i != j and np.linalg.norm(population[i] - population[j]) < sigma_share: + shared_fitness[i] += fitness[j] + return shared_fitness + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + max_generations = self.budget // self.population_size + temperature = 1.0 + + for generation in range(max_generations): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = self.mutation_factor * (1 - generation / max_generations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = temperature * np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + shared_fitness = fitness_sharing(population, fitness) + population = [population[i] for i in np.argsort(shared_fitness)[: self.population_size]] + fitness = [fitness[i] for i in np.argsort(shared_fitness)[: self.population_size]] + + temperature *= 0.99 + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveAnnealingDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveArchiveDE.py b/nevergrad/optimization/lama/AdaptiveArchiveDE.py new file mode 100644 index 000000000..3ac6e7037 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveArchiveDE.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveArchiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Combine elite and new population + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Increment generation count + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCMADiffEvoPSO.py b/nevergrad/optimization/lama/AdaptiveCMADiffEvoPSO.py new file mode 100644 index 000000000..9096f111a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCMADiffEvoPSO.py @@ -0,0 +1,127 @@ +import numpy as np + + +class AdaptiveCMADiffEvoPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.min_pop_size = 20 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.5 + self.restart_threshold = 100 + self.sigma = 0.3 + self.diversity_threshold = 0.1 # Threshold for population diversity + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.initial_pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return parent1 + F * (parent2 - parent3) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def cma_update(self, population, mean, cov_matrix): + new_samples = np.random.multivariate_normal(mean, cov_matrix, size=population.shape[0]) + return np.clip(new_samples, -5.0, 5.0) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + no_improvement_counter = 0 + + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.initial_pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F = np.random.uniform(0.4, 0.9) + CR = np.random.uniform(0.6, 1.0) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Check for diversity + if ( + self.diversity(population) < self.diversity_threshold + or no_improvement_counter >= self.restart_threshold + ): + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + evaluations += self.initial_pop_size + + # CMA Update + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + population = self.cma_update(population, mean, cov_matrix) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveChaoticFireworksOptimization.py b/nevergrad/optimization/lama/AdaptiveChaoticFireworksOptimization.py new file mode 100644 index 000000000..d1642314b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveChaoticFireworksOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptiveChaoticFireworksOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, budget): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func, budget): + for i in range(self.n_fireworks): + p_diversify = 0.1 + 0.4 * np.exp( + -5 * budget / self.budget + ) # Adaptive probability for diversification + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func, i) + fireworks = self.diversify_fireworks(fireworks, func, i) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveClusterBasedHybridOptimization.py b/nevergrad/optimization/lama/AdaptiveClusterBasedHybridOptimization.py new file mode 100644 index 000000000..30189a652 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveClusterBasedHybridOptimization.py @@ -0,0 +1,196 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.cluster import KMeans + + +class AdaptiveClusterBasedHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.4 + self.F_max = 0.9 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + # Elitism + self.elite_fraction = 0.1 + + # Memory Mechanism + self.memory_size = 10 + self.memory = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + + def _nelder_mead_local_search(self, x, func): + res = minimize(func, x, method="nelder-mead", options={"xtol": 1e-8, "disp": False}) + return res.x, res.fun + + def _adaptive_parameter_adjustment(self): + self.F_max = ( + min(1.0, self.F_max + 0.1) + if self.stagnation_counter > self.stagnation_threshold + else max(self.F_min, self.F_max - 0.1) + ) + self.CR_max = ( + min(1.0, self.CR_max + 0.1) + if self.stagnation_counter > self.stagnation_threshold + else max(self.CR_min, self.CR_max - 0.1) + ) + self.inertia_weight = 0.4 + 0.5 * (self.budget - self.evaluations) / self.budget + + def _cluster_based_search(self, population, fitness, func): + if len(population) > 10: + kmeans = KMeans(n_clusters=10).fit(population) + cluster_centers = kmeans.cluster_centers_ + for center in cluster_centers: + local_candidate, f_local_candidate = self._nelder_mead_local_search(center, func) + self.evaluations += 1 + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + self.stagnation_counter = 0 + + def __call__(self, func): + # Initialize population + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + self.evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while self.evaluations < self.budget: + # Elitism Preservation + elite_count = int(self.elite_fraction * self.population_size) + elites = population[np.argsort(fitness)[:elite_count]].copy() + elite_fitness = np.sort(fitness)[:elite_count].copy() + + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + f_candidate = func(trial_vector) + self.evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if self.evaluations >= self.budget: + break + + # Update personal best + if f_candidate < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = f_candidate + + # Integrate Elitism + population[np.argsort(fitness)[-elite_count:]] = elites + fitness[np.argsort(fitness)[-elite_count:]] = elite_fitness + + # Update velocities and positions (PSO component) + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + self.evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if self.evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment + self._adaptive_parameter_adjustment() + + # Adjust population size dynamically + if self.stagnation_counter > self.stagnation_threshold * 2: + new_population_size = min(self.population_size + 10, 200) + if new_population_size > self.population_size: + new_individuals = np.random.uniform( + self.lb, self.ub, (new_population_size - self.population_size, self.dim) + ) + population = np.vstack((population, new_individuals)) + new_velocities = np.random.uniform( + -1, 1, (new_population_size - self.population_size, self.dim) + ) + velocities = np.vstack((velocities, new_velocities)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.hstack((fitness, new_fitness)) + personal_best_positions = np.vstack((personal_best_positions, new_individuals)) + personal_best_fitness = np.hstack((personal_best_fitness, new_fitness)) + self.population_size = new_population_size + self.evaluations += new_population_size - self.population_size + + # Memory mechanism + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmin([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + # Cluster-Based Enhanced Local Search + self._cluster_based_search(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveClusterHybridOptimizationV5.py b/nevergrad/optimization/lama/AdaptiveClusterHybridOptimizationV5.py new file mode 100644 index 000000000..3d7ab086f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveClusterHybridOptimizationV5.py @@ -0,0 +1,115 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class AdaptiveClusterHybridOptimizationV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def __call__(self, func): + population_size = 80 # Increased population size for diversity + + # Enhanced Initialization using Sobol Sequence + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size) + population = qmc.scale(sample, self.lb, self.ub) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + last_improvement = 0 + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations - last_improvement > self.budget // 10: + strategy = "DE" # Switch to DE if no improvement for a while + else: + strategy = "PSO" + + if strategy == "PSO": + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy with Enhanced Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + scaling_factor = 0.5 + np.random.rand() * 0.5 + mutant_vector = np.clip(a + scaling_factor * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + last_improvement = evaluations + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveClusteredDifferentialEvolutionV2.py b/nevergrad/optimization/lama/AdaptiveClusteredDifferentialEvolutionV2.py new file mode 100644 index 000000000..d91702983 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveClusteredDifferentialEvolutionV2.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats.qmc import Sobol + + +class AdaptiveClusteredDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem statement + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.memory_size = 20 + self.elite_size = 5 + self.memory = [] + self.elite = [] + self.mutation_strategies = [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2] + self.strategy_weights = np.ones(len(self.mutation_strategies)) + self.strategy_success = np.zeros(len(self.mutation_strategies)) + self._dynamic_parameters() + self.no_improvement_count = 0 + + def _initialize_population(self): + sobol_engine = Sobol(d=self.dim, scramble=False) + sobol_samples = sobol_engine.random_base2(m=int(np.log2(self.pop_size // 2))) + sobol_samples = self.lb + (self.ub - self.lb) * sobol_samples + + random_samples = np.random.uniform(self.lb, self.ub, (self.pop_size - len(sobol_samples), self.dim)) + return np.vstack((sobol_samples, random_samples)) + + def _local_search(self, x, func): + res = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim, options={"disp": False} + ) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.random.uniform(0.5, 1.0) + self.CR = np.random.uniform(0.4, 0.9) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _select_strategy(self): + return np.random.choice( + self.mutation_strategies, p=self.strategy_weights / self.strategy_weights.sum() + ) + + def _opposition_based_learning(self, population): + opp_population = self.lb + self.ub - population + return opp_population + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + else: # strategy == self._mutation_rand_2 + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = self.mutation_strategies.index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if self.no_improvement_count >= 5: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + elite_indices = np.argsort(fitness)[: self.elite_size] + self.elite = [population[idx] for idx in elite_indices] + + if self.evaluations < self.budget: + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + self._dynamic_parameters() + + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + population = np.concatenate((population, opp_population), axis=0) + fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(fitness)[: self.pop_size] + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCohortHarmonizationOptimization.py b/nevergrad/optimization/lama/AdaptiveCohortHarmonizationOptimization.py new file mode 100644 index 000000000..82243171e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCohortHarmonizationOptimization.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveCohortHarmonizationOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_intensity=0.05, + crossover_rate=0.7, + adaptive_intensity=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_intensity = adaptive_intensity + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elite_population = population[elite_indices] + global_best = population[elite_indices[0]] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_rate: + # Crossover from elite and a random member + random_member = population[np.random.randint(0, self.population_size)] + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate((global_best[:crossover_point], random_member[crossover_point:])) + else: + # Mutation based on current member and global best + current_member = population[np.random.randint(0, self.population_size)] + mutation_vector = self.mutation_intensity * (global_best - current_member) + child = current_member + mutation_vector + + # Ensure child stays within bounds + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adaptively adjust mutation intensity and crossover rate + self.mutation_intensity *= self.adaptive_intensity + self.crossover_rate = min(self.crossover_rate + 0.01, 1.0) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveCohortMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveCohortMemeticAlgorithm.py new file mode 100644 index 000000000..a0d9ce8d2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCohortMemeticAlgorithm.py @@ -0,0 +1,127 @@ +import numpy as np + + +class AdaptiveCohortMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + self.global_mutation_factor = 0.5 + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + self.global_search_intensity = 10 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.diversity_cycle == 0: + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, 0.1, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * (1 + 0.1 * remaining_budget_ratio) + self.mutation_factor = self.mutation_factor * (1 + 0.1 * remaining_budget_ratio) + + if diversity < self.diversity_threshold / 2 and remaining_budget_ratio > 0.5: + self.global_search(population, func) + + def global_search(self, population, func): + global_search_population = np.random.uniform( + self.lb, self.ub, (self.global_search_intensity, self.dim) + ) + + for ind in global_search_population: + f_ind = func(ind) + if f_ind < self.f_opt: + self.f_opt = f_ind + self.x_opt = ind + + population[: self.global_search_intensity] = global_search_population diff --git a/nevergrad/optimization/lama/AdaptiveControlledMemoryAnnealing.py b/nevergrad/optimization/lama/AdaptiveControlledMemoryAnnealing.py new file mode 100644 index 000000000..22b08d8d2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveControlledMemoryAnnealing.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptiveControlledMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.98 # Cooling rate, slightly less aggressive cooling + beta = 1.5 # Control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + # Dynamically adjust control parameter beta + if evaluations < self.budget / 2: + beta = 1.5 + else: + beta = 2.0 # Increase acceptance of worse solutions for late exploration + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialEvolution.py new file mode 100644 index 000000000..a6a940b78 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialEvolution.py @@ -0,0 +1,121 @@ +import numpy as np + + +class AdaptiveCooperativeDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.02 # Reduced step size for finer local adjustments + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.10: # Lower probability for local search + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.4 + (0.1 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) # Adjusted influence factors + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialMemeticAlgorithm.py new file mode 100644 index 000000000..c39eb92c2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCooperativeDifferentialMemeticAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveCooperativeDifferentialMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.7 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=15, step_size=0.03 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + worst_indices = np.argsort(fitness)[-int(0.3 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 4) == 0 and population_size > 20: + best_indices = np.argsort(fitness)[: int(0.5 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceGradientSearch.py b/nevergrad/optimization/lama/AdaptiveCovarianceGradientSearch.py new file mode 100644 index 000000000..a635f6a91 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceGradientSearch.py @@ -0,0 +1,150 @@ +import numpy as np + + +class AdaptiveCovarianceGradientSearch: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + return x - self.learning_rate * grad + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to elite individuals in the population + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + for i in range(len(elite_pop)): + elite_pop[i] = self.__gradient_local_search(func, elite_pop[i]) + if func(elite_pop[i]) < scores[np.argsort(scores)[: len(elite_pop)][i]]: + scores[np.argsort(scores)[: len(elite_pop)][i]] = func(elite_pop[i]) + + # Update global best after local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..a2df3cf46 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + # Covariance matrix for adaptive strategies + covariance_matrix = np.eye(self.dim) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Apply covariance matrix to enhance mutation + mutant = mutant + np.random.multivariate_normal(np.zeros(self.dim), covariance_matrix) + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Update the covariance matrix based on the new population + mean = np.mean(new_population, axis=0) + deviations = new_population - mean + covariance_matrix = np.dot(deviations.T, deviations) / population_size + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching.py new file mode 100644 index 000000000..5b97c916c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(population, fitness): + """Switch strategy based on current performance.""" + strategy = "default" + if self.eval_count < self.budget * 0.33: + strategy = "explorative" + self.F = 0.9 + self.CR = 0.9 + elif self.eval_count < self.budget * 0.66: + strategy = "balanced" + self.F = 0.7 + self.CR = 0.85 + else: + strategy = "exploitative" + self.F = 0.5 + self.CR = 0.75 + return strategy + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching(population, fitness) + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolution.py new file mode 100644 index 000000000..d9311d5fb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolution.py @@ -0,0 +1,104 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixEvolution: + def __init__(self, budget=10000, population_size=20): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = (-5.0, 5.0) + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Evolution parameters + mu = self.population_size // 2 + weights = np.log(mu + 0.5) - np.log(np.arange(1, mu + 1)) + weights /= np.sum(weights) + mueff = np.sum(weights) ** 2 / np.sum(weights**2) + sigma = 0.3 + cs = (mueff + 2) / (self.dim + mueff + 5) + ds = 1 + 2 * max(0, np.sqrt((mueff - 1) / (self.dim + 1)) - 1) + cs + enn = np.sqrt(self.dim) * (1 - 1 / (4 * self.dim) + 1 / (21 * self.dim**2)) + cc = (4 + mueff / self.dim) / (self.dim + 4 + 2 * mueff / self.dim) + c1 = 2 / ((self.dim + 1.3) ** 2 + mueff) + cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((self.dim + 2) ** 2 + mueff)) + hthresh = (1.4 + 2 / (self.dim + 1)) * enn + + # Evolution strategy state variables + pc = np.zeros(self.dim) + ps = np.zeros(self.dim) + B = np.eye(self.dim) + D = np.ones(self.dim) + C = np.eye(self.dim) + invsqrtC = np.eye(self.dim) + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = 0 + + while evaluations < self.budget: + # Sample new population + arz = np.random.randn(self.population_size, self.dim) + arx = self.x_opt + sigma * np.dot(arz, B * D) + + # Boundary handling + arx = np.clip(arx, self.bounds[0], self.bounds[1]) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in arx]) + evaluations += self.population_size + + # Sort by fitness + sorted_indices = np.argsort(new_fitness) + arx = arx[sorted_indices] + arz = arz[sorted_indices] + new_fitness = new_fitness[sorted_indices] + + # Update best solution found + if new_fitness[0] < self.f_opt: + self.f_opt = new_fitness[0] + self.x_opt = arx[0] + + # Update evolution strategy state variables + xmean = np.dot(weights, arx[:mu]) + zmean = np.dot(weights, arz[:mu]) + + ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(invsqrtC, zmean) + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - cs) ** (2 * evaluations / self.population_size)) / enn + < hthresh + ) + pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * np.dot(B, D * zmean) + + artmp = (arx[:mu] - self.x_opt) / sigma + C = ( + (1 - c1 - cmu) * C + + c1 * (np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + + cmu * np.dot((weights * artmp.T), artmp) + ) + + sigma *= np.exp((np.linalg.norm(ps) / enn - 1) * cs / ds) + + if eigenval_update_counter <= 0: + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = eigenval_update_freq + C = np.triu(C) + np.triu(C, 1).T + D, B = np.linalg.eigh(C) + D = np.sqrt(D) + invsqrtC = np.dot(B, np.dot(np.diag(D**-1), B.T)) + else: + eigenval_update_counter -= 1 + + self.x_opt = xmean + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionStrategy.py new file mode 100644 index 000000000..e2fbb6925 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixEvolutionStrategy: + def __init__(self, budget, population_size=30, elite_fraction=0.2, initial_sigma=0.3): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + + def adaptive_covariance_matrix_adaptation(self, func, pop, scores, mean, C, sigma): + n_samples = len(pop) + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.adaptive_covariance_matrix_adaptation(func, pop, scores, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + # Adaptive sigma + sigma = self.initial_sigma * (1 - iteration / max_iterations) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation.py new file mode 100644 index 000000000..e4a2c60e1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation.py @@ -0,0 +1,88 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Population size + self.sigma = 0.3 # Initial step size + self.c1 = 0.01 # Learning rate for rank-one update + self.cmu = 0.01 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) # Damping factor for step size + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) # Number of parents for recombination + self.adaptive_learning_rate = 0.1 # Learning rate for adaptive self-adaptive mutation + self.eval_count = 0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_fitness = np.array([func(ind) for ind in offspring]) + self.eval_count += self.population_size + + population = offspring + fitness = new_fitness + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptation.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptation.py new file mode 100644 index 000000000..b666cda32 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptation.py @@ -0,0 +1,114 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixSelfAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.3, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma, weights, pc, ps): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation( + func, pop, mean, C, sigma, np.ones(self.population_size) / self.population_size, pc, ps + ) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean_new = np.dot(np.ones(elite_count) / elite_count, elite_pop) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_count + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptationV2.py b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptationV2.py new file mode 100644 index 000000000..526f110f1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCovarianceMatrixSelfAdaptationV2.py @@ -0,0 +1,112 @@ +import numpy as np + + +class AdaptiveCovarianceMatrixSelfAdaptationV2: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.3, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean_new = np.dot(np.ones(elite_count) / elite_count, elite_pop) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_count + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCrossoverDEPSO.py b/nevergrad/optimization/lama/AdaptiveCrossoverDEPSO.py new file mode 100644 index 000000000..508ae19c7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCrossoverDEPSO.py @@ -0,0 +1,155 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveCrossoverDEPSO: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.adaptive_crossover_prob = [0.9, 0.8, 0.7, 0.6, 0.5] + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 # Archive size for memory-based learning + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + archive_fitness = np.array([evaluate(ind) for ind in archive]) + eval_count += len(archive) + if best_fitness not in archive_fitness: + worst_index = np.argmax(archive_fitness) + if best_fitness < archive_fitness[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/AdaptiveCrossoverElitistStrategyV6.py b/nevergrad/optimization/lama/AdaptiveCrossoverElitistStrategyV6.py new file mode 100644 index 000000000..eb1ac5c02 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCrossoverElitistStrategyV6.py @@ -0,0 +1,87 @@ +import numpy as np + + +class AdaptiveCrossoverElitistStrategyV6: + def __init__( + self, + budget, + dimension=5, + population_size=120, + elite_fraction=0.25, + mutation_intensity=0.1, + crossover_rate=0.85, + adaptive_crossover=True, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_crossover = adaptive_crossover + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform adaptive crossover + parent1, parent2 = self.select_parents(elites, population) + child = self.recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation intensity + scale = self.mutation_intensity * np.exp(-evaluations / self.budget * 10) + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2, evaluations): + # Adaptive recombination based on the stage of optimization + alpha = np.random.uniform(0.4, 0.6) + if self.adaptive_crossover: + alpha *= np.exp(-evaluations / self.budget) + return alpha * parent1 + (1 - alpha) * parent2 + + def select_parents(self, elites, population): + if self.adaptive_crossover: + parent1 = elites[np.random.choice(len(elites))] + parent2 = population[np.random.randint(0, self.population_size)] + else: + parent_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parent_indices[0]], elites[parent_indices[1]] + return parent1, parent2 diff --git a/nevergrad/optimization/lama/AdaptiveCrossoverSearch.py b/nevergrad/optimization/lama/AdaptiveCrossoverSearch.py new file mode 100644 index 000000000..13ad5ab3e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCrossoverSearch.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveCrossoverSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize parameters + population_size = 20 + crossover_rate = 0.7 + mutation_rate = 0.1 + elitism_rate = 0.1 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = population_size + + while evaluations < self.budget: + new_population = [] + + # Elitism: carry forward the best solutions + elitism_count = int(elitism_rate * population_size) + elite_indices = np.argsort(fitness)[:elitism_count] + new_population.extend(population[elite_indices]) + + while len(new_population) < population_size: + # Select parents + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + # Crossover + if np.random.rand() < crossover_rate: + crossover_point = np.random.randint(1, self.dim) + child1 = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + child2 = np.concatenate((parent2[:crossover_point], parent1[crossover_point:])) + else: + child1, child2 = parent1, parent2 + + # Mutation + for child in [child1, child2]: + if np.random.rand() < mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.lb, self.ub) + + new_population.append(child) + if len(new_population) >= population_size: + break + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += population_size + + # Update population + population = np.array(new_population) + fitness = new_fitness + + # Update best solution + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < self.f_opt: + self.f_opt = current_best_fitness + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalCooperativeSearch.py b/nevergrad/optimization/lama/AdaptiveCulturalCooperativeSearch.py new file mode 100644 index 000000000..542e2e5d9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalCooperativeSearch.py @@ -0,0 +1,108 @@ +import numpy as np + + +class AdaptiveCulturalCooperativeSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size // 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.2 + + # Cooperative cultural influence updates + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * np.random.normal( + 0, 0.1, self.dim + ) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveCulturalDifferentialEvolution.py new file mode 100644 index 000000000..29d91565a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalDifferentialEvolution.py @@ -0,0 +1,121 @@ +import numpy as np + + +class AdaptiveCulturalDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.9): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.1: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.5 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalDifferentialMemeticEvolution.py b/nevergrad/optimization/lama/AdaptiveCulturalDifferentialMemeticEvolution.py new file mode 100644 index 000000000..717ebd597 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalDifferentialMemeticEvolution.py @@ -0,0 +1,130 @@ +import numpy as np + + +class AdaptiveCulturalDifferentialMemeticEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def update_knowledge_base(self, knowledge_base, population, fitness): + best_individual = population[np.argmin(fitness)] + mean_position = np.mean(population, axis=0) + knowledge_base["best_solution"] = best_individual + knowledge_base["best_fitness"] = np.min(fitness) + knowledge_base["mean_position"] = mean_position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.2: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.5 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + self.update_knowledge_base(knowledge_base, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveCulturalEvolutionStrategy.py new file mode 100644 index 000000000..8a53bc186 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalEvolutionStrategy.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveCulturalEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.1: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size // 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.1 + + # Cooperative cultural influence updates + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * np.random.normal( + 0, 0.1, self.dim + ) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/AdaptiveCulturalEvolutionaryAlgorithm.py new file mode 100644 index 000000000..f19e64e79 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalEvolutionaryAlgorithm.py @@ -0,0 +1,117 @@ +import numpy as np + + +class AdaptiveCulturalEvolutionaryAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 30 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: # More infrequent updates + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveCulturalMemeticAlgorithm.py new file mode 100644 index 000000000..81e35a93f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalMemeticAlgorithm.py @@ -0,0 +1,115 @@ +import numpy as np + + +class AdaptiveCulturalMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func): + """Simple local search around a point with adaptive perturbation""" + best_x = x + best_f = func(x) + perturbation_std = 0.1 + for _ in range(10): + perturbation = np.random.normal(0, perturbation_std, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + perturbation_std *= 0.9 # decrease perturbation if improvement is found + else: + perturbation_std *= 1.1 # increase perturbation if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "worst_fitness": -np.inf, + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation and Crossover using Differential Evolution + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < CR[i] + if not np.any(crossover_points): + crossover_points[np.random.randint(0, self.dim)] = True + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + F[i] = F[i] + 0.1 * (np.random.rand() - 0.5) + F[i] = np.clip(F[i], 0.5, 1.0) + CR[i] = CR[i] + 0.1 * (np.random.rand() - 0.5) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + if trial_fitness > knowledge_base["worst_fitness"]: + knowledge_base["worst_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.1: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size // 2) == 0: + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.1 + population += cultural_shift + population = np.clip(population, self.lb, self.ub) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveCulturalMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveCulturalMemeticDifferentialEvolution.py new file mode 100644 index 000000000..c5d5a2bb7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveCulturalMemeticDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveCulturalMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 # Set to 100 for optimized balance + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.3: # Reduced probability for more global exploration + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.2 + (0.3 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDEPSOOptimizer.py b/nevergrad/optimization/lama/AdaptiveDEPSOOptimizer.py new file mode 100644 index 000000000..d38d9e846 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDEPSOOptimizer.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveDEPSOOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.crossover_probability: + mutant = self.differential_mutation(population, i) + trial = self.differential_crossover(population[i], mutant) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: self.population_size // 4] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def differential_mutation(self, population, current_idx): + indices = [idx for idx in range(self.population_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def differential_crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + trial = np.where(crossover_mask, mutant, target) + return trial + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/AdaptiveDEWithElitismAndLocalSearch.py b/nevergrad/optimization/lama/AdaptiveDEWithElitismAndLocalSearch.py new file mode 100644 index 000000000..419a23a4b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDEWithElitismAndLocalSearch.py @@ -0,0 +1,86 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveDEWithElitismAndLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.pop_size = 20 + self.F = 0.8 # Initial differential weight + self.CR = 0.9 # Initial crossover probability + self.local_search_prob = 0.1 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def nelder_mead(self, x, func): + result = minimize(func, x, method="Nelder-Mead", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + # Select three distinct individuals (but different from i) + indices = np.arange(self.pop_size) + indices = indices[indices != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive differential weight and crossover probability + F = 0.5 + np.random.rand() * 0.5 + CR = 0.5 + np.random.rand() * 0.5 + + # Differential Evolution mutation and crossover + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Local Search with an adaptive probability + if np.random.rand() < self.local_search_prob and evaluations + 1 <= self.budget: + trial, f_trial = self.nelder_mead(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + # Check if we've exhausted our budget + if evaluations >= self.budget: + break + + # Elitism: Keep the best individual + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + if best_fitness < self.f_opt: + self.f_opt = best_fitness + self.x_opt = best_individual + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDEWithOrthogonalCrossover.py b/nevergrad/optimization/lama/AdaptiveDEWithOrthogonalCrossover.py new file mode 100644 index 000000000..e6881cb45 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDEWithOrthogonalCrossover.py @@ -0,0 +1,45 @@ +import numpy as np + + +class AdaptiveDEWithOrthogonalCrossover: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + # Introducing orthogonal crossover + orthogonal_vector = np.random.uniform(-1, 1, size=dimension) + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(population_fitness) + if population_fitness[best_idx] < self.f_opt: + self.f_opt = population_fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/AdaptiveDecayOptimizer.py b/nevergrad/optimization/lama/AdaptiveDecayOptimizer.py new file mode 100644 index 000000000..339340ff6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDecayOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveDecayOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the optimization problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 150 + num_elites = 15 + mutation_factor = 0.9 + crossover_rate = 0.6 + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elite preservation + elite_indices = np.argsort(fitness)[:num_elites] + new_population[:num_elites] = population[elite_indices] + new_fitness[:num_elites] = fitness[elite_indices] + + # Generate new solutions + for i in range(num_elites, population_size): + if current_budget >= self.budget: + break + + # Differential mutation based on random selection + indices = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate new solution + trial_fitness = func(trial) + current_budget += 1 + + # Selection step + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Update the population and fitness + population = new_population + fitness = new_fitness + + # Adapt mutation factor and crossover rate dynamically + mutation_factor *= 0.99 # Decay mutation factor gradually + crossover_rate = min(0.9, crossover_rate * 1.02) # Gradually increase crossover rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialCrossover.py b/nevergrad/optimization/lama/AdaptiveDifferentialCrossover.py new file mode 100644 index 000000000..406c7b3f3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialCrossover.py @@ -0,0 +1,55 @@ +import numpy as np + + +class AdaptiveDifferentialCrossover: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.mutation_factor = 0.8 + self.crossover_rate = 0.7 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population): + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3): + return parent1 + self.mutation_factor * (parent2 - parent3) + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dim) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + for _ in range(self.budget // self.pop_size - 1): + for i in range(self.pop_size): + parent1, parent2, parent3 = self.select_parents(population) + mutant = self.mutate(parent1, parent2, parent3) + trial = self.crossover(population[i], mutant) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..70f43ae56 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolution.py @@ -0,0 +1,54 @@ +import numpy as np + + +class AdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 10 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + while evaluations < self.budget: + for i in range(population_size): + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionHarmonySearch.py new file mode 100644 index 000000000..14b2acc4f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionHarmonySearch.py @@ -0,0 +1,87 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionHarmonySearch: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.9, + par=0.4, + bw=0.5, + bw_decay=0.95, + f_weight=0.8, + cr=0.9, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + self.f_weight = f_weight # Differential evolution weighting factor + self.cr = cr # Differential evolution crossover rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def differential_evolution(self, func): + idxs = np.random.choice(self.harmony_memory_size, 3, replace=False) + v = self.harmony_memory[idxs[0]] + self.f_weight * ( + self.harmony_memory[idxs[1]] - self.harmony_memory[idxs[2]] + ) + mask = np.random.rand(self.dim) < self.cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + u = np.where(mask, v, self.harmony_memory[np.random.randint(0, self.harmony_memory_size)]) + + return u + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + new_fitness = func(new_harmony) + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + de_harmony = self.differential_evolution(func) + de_fitness = func(de_harmony) + if de_fitness < self.f_opt: + self.f_opt = de_fitness + self.x_opt = de_harmony + + idx_worst_de = np.argmax(self.harmony_memory_fitness) + if de_fitness < self.harmony_memory_fitness[idx_worst_de]: + self.harmony_memory[idx_worst_de] = de_harmony + self.harmony_memory_fitness[idx_worst_de] = de_fitness + + self.bw *= self.bw_decay # Decay the bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..746f1cdbd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, F=0.8, CR=0.9, adapt_factor=0.99): + self.budget = budget + self.pop_size = pop_size + self.F = F # Differential weight + self.CR = CR # Crossover probability + self.adapt_factor = adapt_factor # Factor for adapting F and CR + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Evolutionary loop + while self.eval_count < self.budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + + if self.eval_count >= self.budget: + break + + # Adapt F and CR + self.F *= self.adapt_factor + self.CR *= self.adapt_factor + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPSO.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPSO.py new file mode 100644 index 000000000..37eac38f1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPSO.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.min_pop_size = 20 + self.initial_F = 0.5 # Initial mutation factor + self.initial_CR = 0.9 # Initial crossover rate + self.c1 = 1.5 # Cognitive parameter + self.c2 = 1.5 # Social parameter + self.w = 0.5 # Inertia weight + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.initial_pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return parent1 + F * (parent2 - parent3) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.initial_pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F = np.random.uniform(0.4, 0.9) # Adaptive mutation factor + CR = np.random.uniform(0.6, 1.0) # Adaptive crossover rate + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPlus.py new file mode 100644 index 000000000..ea781984e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionPlus.py @@ -0,0 +1,59 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionPlus: + def __init__(self, budget=10000, population_size=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Add adaptiveness to mutation factor and crossover probability + adaptive_rate = 0.1 # rate of adaptation + while eval_count < self.budget: + for i in range(self.population_size): + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + # Selection + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + # Adapt mutation factor and crossover probability + self.mutation_factor = min(1.0, self.mutation_factor + adaptive_rate) + self.crossover_probability = min(1.0, self.crossover_probability + adaptive_rate) + else: + self.mutation_factor = max(0.1, self.mutation_factor - adaptive_rate) + self.crossover_probability = max(0.1, self.crossover_probability - adaptive_rate) + + if eval_count >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithAdaptivePerturbation.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithAdaptivePerturbation.py new file mode 100644 index 000000000..af05f2df6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithAdaptivePerturbation.py @@ -0,0 +1,112 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithAdaptivePerturbation: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + else: + self.base_lr *= 0.95 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveDifferentialEvolutionWithAdaptivePerturbation(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithBayesianLocalSearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithBayesianLocalSearch.py new file mode 100644 index 000000000..bbf71ed55 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithBayesianLocalSearch.py @@ -0,0 +1,146 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import Matern + + +class AdaptiveDifferentialEvolutionWithBayesianLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, gp, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + else: + # Use the GP model for local search guidance + new_f = gp.predict([new_x])[0] + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def surrogate_model(self, X, y): + kernel = Matern(nu=2.5) + gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gp.fit(X, y) + return gp + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply Bayesian guided local search + if np.random.rand() < 0.5: + gp = self.surrogate_model(population, fitness) + local_best_x, local_best_f = self.local_search( + population[i], func, gp, step_size=0.01, max_iter=10 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reinitialize worst individuals more frequently + if evaluations + int(0.20 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.20 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Elite Preservation with larger perturbations + elite_size = int(0.2 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.05, 0.05, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + # New Guided Search component + if evaluations + population_size <= self.budget: + guided_population = np.array([self.guided_search(elites) for _ in range(population_size)]) + guided_fitness = np.array([func(ind) for ind in guided_population]) + evaluations += population_size + + combined_population = np.vstack((population, guided_population)) + combined_fitness = np.hstack((fitness, guided_fitness)) + + best_indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[best_indices] + fitness = combined_fitness[best_indices] + + if combined_fitness[best_indices[0]] < self.f_opt: + self.f_opt = combined_fitness[best_indices[0]] + self.x_opt = combined_population[best_indices[0]] + + iteration += 1 + + return self.f_opt, self.x_opt + + def guided_search(self, elites): + elite_mean = np.mean(elites, axis=0) + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + guided_individual = np.clip(elite_mean + perturbation, self.lb, self.ub) + return guided_individual diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..e1a97b0db --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation.py @@ -0,0 +1,107 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Initial differential weight + CR = 0.9 # Initial crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + while evaluations < self.budget: + # Covariance Matrix Adaptation + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 + np.eye( + self.dim + ) * 1e-6 # Ensure positive semi-definiteness + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Apply Covariance Matrix Adaptation + covariance_update_population = np.zeros_like(population) + for i in range(population_size): + perturbation = np.random.multivariate_normal(np.zeros(self.dim), cov_matrix) + covariance_update_population[i] = np.clip(mean + perturbation, bounds[0], bounds[1]) + f_trial = func(covariance_update_population[i]) + evaluations += 1 + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = covariance_update_population[i] + + if evaluations >= self.budget: + break + + population = np.concatenate((population, covariance_update_population), axis=0) + fitness = np.array([func(ind) for ind in population]) + best_indices = np.argsort(fitness)[:population_size] + population = population[best_indices] + fitness = fitness[best_indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithDynamicPopulationV2.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithDynamicPopulationV2.py new file mode 100644 index 000000000..9944a3ff9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithDynamicPopulationV2.py @@ -0,0 +1,103 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithDynamicPopulationV2: + def __init__( + self, + budget=1000, + init_population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.init_population_size = init_population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population_size = self.init_population_size + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=population_size + ) + + for _ in range(self.budget): + for i in range(population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + if np.random.rand() < 0.1: + if population_size > 2: + population_size -= 1 + idx = np.argmin(fitness_values) + population = np.delete(population, idx, axis=0) + fitness_values = np.delete(fitness_values, idx) + scaling_factors = np.delete(scaling_factors, idx) + crossover_rates = np.delete(crossover_rates, idx) + elif population_size < self.init_population_size: + population_size += 1 + new_individual = np.random.uniform(func.bounds.lb, func.bounds.ub, size=dimension) + population = np.vstack([population, new_individual]) + fitness_values = np.append(fitness_values, func(new_individual)) + scaling_factors = np.append( + scaling_factors, np.random.uniform(*self.scaling_factor_range) + ) + crossover_rates = np.append( + crossover_rates, np.random.uniform(*self.crossover_rate_range) + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGradientBoost.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGradientBoost.py new file mode 100644 index 000000000..00fb37af9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGradientBoost.py @@ -0,0 +1,112 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithGradientBoost: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + elif success_count / self.population_size < 0.1: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveDifferentialEvolutionWithGradientBoost(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGuidedSearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGuidedSearch.py new file mode 100644 index 000000000..7d7430b75 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithGuidedSearch.py @@ -0,0 +1,148 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import Matern + + +class AdaptiveDifferentialEvolutionWithGuidedSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def surrogate_model(self, X, y): + kernel = Matern(nu=2.5) + gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gp.fit(X, y) + return gp + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Use surrogate model to approximate fitness + if evaluations < self.budget / 2: + trial_fitness = func(trial_vector) + else: + X = population[: i + 1] + y = fitness[: i + 1] + surrogate = self.surrogate_model(X, y) + trial_fitness = surrogate.predict([trial_vector])[0] + + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search more aggressively + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=10 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reinitialize worst individuals more frequently + if evaluations + int(0.20 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.20 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Elite Preservation with larger perturbations + elite_size = int(0.2 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.05, 0.05, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + # New Guided Search component + if evaluations + population_size <= self.budget: + guided_population = np.array([self.guided_search(elites) for _ in range(population_size)]) + guided_fitness = np.array([func(ind) for ind in guided_population]) + evaluations += population_size + + combined_population = np.vstack((population, guided_population)) + combined_fitness = np.hstack((fitness, guided_fitness)) + + best_indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[best_indices] + fitness = combined_fitness[best_indices] + + if combined_fitness[best_indices[0]] < self.f_opt: + self.f_opt = combined_fitness[best_indices[0]] + self.x_opt = combined_population[best_indices[0]] + + iteration += 1 + + return self.f_opt, self.x_opt + + def guided_search(self, elites): + elite_mean = np.mean(elites, axis=0) + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + guided_individual = np.clip(elite_mean + perturbation, self.lb, self.ub) + return guided_individual diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithLocalSearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithLocalSearch.py new file mode 100644 index 000000000..c686a7573 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithLocalSearch.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithLocalSearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-6 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_pos[i] += epsilon + f_pos = func(x_pos) + x_neg = np.copy(x) + x_neg[i] -= epsilon + f_neg = func(x_neg) + grad[i] = (f_pos - f_neg) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.8 - 0.7 * (iteration / max_iterations) + self.crossover_rate = 0.9 - 0.4 * (iteration / max_iterations) + self.learning_rate = 0.01 * np.exp(-iteration / (0.5 * max_iterations)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Perform local search on all population members + for i in range(self.population_size): + pop[i], scores[i] = self.local_search(func, pop[i], scores[i]) + evaluations += 1 + if evaluations >= self.budget: + break + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithMemeticSearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithMemeticSearch.py new file mode 100644 index 000000000..3df0dd57e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithMemeticSearch.py @@ -0,0 +1,126 @@ +import numpy as np + + +class AdaptiveDifferentialEvolutionWithMemeticSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Apply memetic search on the best solution found so far + if evaluations < self.budget: + local_search_budget = int(self.budget * 0.1) # allocate 10% of the budget for local search + for _ in range(local_search_budget): + perturbation = np.random.normal(0, 0.1, self.dim) + local_trial = np.clip(self.x_opt + perturbation, bounds[0], bounds[1]) + f_local_trial = func(local_trial) + evaluations += 1 + + if f_local_trial < self.f_opt: + self.f_opt = f_local_trial + self.x_opt = local_trial + + if evaluations >= self.budget: + break + + # Additional memetic search using a gradient-based method + if evaluations < self.budget: + local_search_budget = int( + self.budget * 0.1 + ) # allocate another 10% of the budget for local search + for _ in range(local_search_budget): + gradient = self.compute_gradient(func, self.x_opt) + local_trial = np.clip(self.x_opt - 0.01 * gradient, bounds[0], bounds[1]) + f_local_trial = func(local_trial) + evaluations += 1 + + if f_local_trial < self.f_opt: + self.f_opt = f_local_trial + self.x_opt = local_trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + def compute_gradient(self, func, x): + epsilon = 1e-8 + gradient = np.zeros(self.dim) + for i in range(self.dim): + x_upper = x.copy() + x_lower = x.copy() + x_upper[i] += epsilon + x_lower[i] -= epsilon + f_upper = func(x_upper) + f_lower = func(x_lower) + gradient[i] = (f_upper - f_lower) / (2 * epsilon) + return gradient diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithSurrogateAssistance.py b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithSurrogateAssistance.py new file mode 100644 index 000000000..0a016bd79 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialEvolutionWithSurrogateAssistance.py @@ -0,0 +1,125 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import Matern + + +class AdaptiveDifferentialEvolutionWithSurrogateAssistance: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def surrogate_model(self, X, y): + kernel = Matern(nu=2.5) + gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gp.fit(X, y) + return gp + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Use surrogate model to approximate fitness + if evaluations < self.budget / 2: + trial_fitness = func(trial_vector) + else: + X = population[: i + 1] + y = fitness[: i + 1] + surrogate = self.surrogate_model(X, y) + trial_fitness = surrogate.predict([trial_vector])[0] + + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search more aggressively + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=10 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reinitialize worst individuals more frequently + if evaluations + int(0.20 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.20 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Elite Preservation with larger perturbations + elite_size = int(0.2 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.05, 0.05, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialHarmonySearch.py new file mode 100644 index 000000000..cf069720f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialHarmonySearch.py @@ -0,0 +1,52 @@ +import numpy as np + + +class AdaptiveDifferentialHarmonySearch: + def __init__(self, budget=10000, harmony_memory_size=20, hmcr=0.9, par=0.4, bw=0.5, bw_decay=0.95): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + new_fitness = func(new_harmony) + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + self.bw *= self.bw_decay # Decay the bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveDifferentialMemeticAlgorithm.py new file mode 100644 index 000000000..a5ef59b33 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialMemeticAlgorithm.py @@ -0,0 +1,122 @@ +import numpy as np + + +class AdaptiveDifferentialMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.F_min = 0.5 + self.F_max = 0.9 + self.CR_min = 0.3 + self.CR_max = 0.8 + self.local_search_chance = 0.2 + self.elite_ratio = 0.1 + self.diversity_threshold = 0.05 + self.cauchy_step_scale = 0.01 + self.gaussian_step_scale = 0.01 + self.reinitialization_rate = 0.1 + self.hyper_heuristic_probability = 0.7 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + self.adaptive_parameters_adjustment(evaluations) + + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(20): + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_parameters_adjustment(self, evaluations): + progress_ratio = evaluations / self.budget + self.F = self.F_min + (self.F_max - self.F_min) * progress_ratio + self.CR = self.CR_min + (self.CR_max - self.CR_min) * progress_ratio + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialQuantumEvolution.py b/nevergrad/optimization/lama/AdaptiveDifferentialQuantumEvolution.py new file mode 100644 index 000000000..51405968b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialQuantumEvolution.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdaptiveDifferentialQuantumEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + quantum_size = 10 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + F = initial_F + CR = initial_CR + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + eval_count = 0 + convergence_threshold = 1e-6 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = initial_F * (1 - eval_count / budget) + adaptive_CR = initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + # Quantum-inspired update + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + if abs(best_value - candidate_value) < convergence_threshold: + break + + population = new_population + + # Refine convergence by scaling the bounds adaptively + if eval_count % (self.budget // 10) == 0 and eval_count > 0: + self.lower_bound /= 1.1 + self.upper_bound /= 1.1 + self.lower_bound = max(self.lower_bound, -5.0) + self.upper_bound = min(self.upper_bound, 5.0) + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveDifferentialQuantumEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialQuantumMetaheuristic.py b/nevergrad/optimization/lama/AdaptiveDifferentialQuantumMetaheuristic.py new file mode 100644 index 000000000..cca29534f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialQuantumMetaheuristic.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveDifferentialQuantumMetaheuristic: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + quantum_size = 10 + initial_F = 0.5 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + F = initial_F + CR = initial_CR + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + eval_count = 0 + convergence_threshold = 1e-6 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + return initial_F * (1 - eval_count / budget), initial_CR * (1 - eval_count / budget) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + if abs(best_value - candidate_value) < convergence_threshold: + break + + population = new_population + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveDifferentialQuantumMetaheuristic(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDifferentialSpiralSearch.py b/nevergrad/optimization/lama/AdaptiveDifferentialSpiralSearch.py new file mode 100644 index 000000000..749f4816d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDifferentialSpiralSearch.py @@ -0,0 +1,59 @@ +import numpy as np + + +class AdaptiveDifferentialSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize a population around the search space + population_size = 50 + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Initialize spiral dynamics + radius = 5.0 + angle_increment = 2 * np.pi / population_size + evaluations_left = self.budget - population_size + radius_decay = 0.98 + angle_speed_increase = 1.03 + + while evaluations_left > 0: + # Select three random indices for differential mutation + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation and crossover + mutant = a + 0.8 * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Spiral step modification on the mutant + angle = np.random.uniform(0, 2 * np.pi) + offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + candidate = mutant + offset + candidate = np.clip(candidate, -5.0, 5.0) + + # Evaluate candidate + f_candidate = func(candidate) + evaluations_left -= 1 + + # Selection + worst_index = np.argmax(fitness) + if f_candidate < fitness[worst_index]: + population[worst_index] = candidate + fitness[worst_index] = f_candidate + + # Update the optimal solution found + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + + # Update spiral dynamics parameters + radius *= radius_decay + angle_increment *= angle_speed_increase + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDimensionalClimbingEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveDimensionalClimbingEvolutionStrategy.py new file mode 100644 index 000000000..4da15b3d5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDimensionalClimbingEvolutionStrategy.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveDimensionalClimbingEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + # Population settings + population_size = 100 + elite_size = 10 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.1 + adaptive_factor = 0.99 + recombination_prob = 0.6 + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + # Recombination of two randomly selected individuals + parents_indices = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parents_indices] + child = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + # Select one parent and clone + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + # Adaptive mutation based on individual performance + individual_mutation_scale = mutation_scale * adaptive_factor ** (self.f_opt - fitness[i]) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Introduce elitism + if evaluations % 500 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + replace_indices = np.random.choice(population_size, elite_size, replace=False) + population[replace_indices] = elite_individuals + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDimensionalCrossoverEvolver.py b/nevergrad/optimization/lama/AdaptiveDimensionalCrossoverEvolver.py new file mode 100644 index 000000000..0501c6dcb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDimensionalCrossoverEvolver.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveDimensionalCrossoverEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.15, + mutation_intensity=0.1, + crossover_rate=0.75, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + child = np.where(crossover_mask, parent1, parent2) + return child + + def reproduce(self, elites, elite_fitness): + new_population = np.empty((self.population_size, self.dimension)) + for i in range(self.population_size): + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + child = self.mutate(child) + new_population[i] = child + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveDirectionalBiasQuorumOptimization.py b/nevergrad/optimization/lama/AdaptiveDirectionalBiasQuorumOptimization.py new file mode 100644 index 000000000..ba6909d73 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDirectionalBiasQuorumOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptiveDirectionalBiasQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_scale=0.25, + momentum=0.9, + adaptive_rate=0.1, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = max(1, int(population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.adaptive_rate = adaptive_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Initialize best solution tracking + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Select a quorum including the best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Determine the local best + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Adaptive mutation based on local best, global best, and velocity + direction = best_individual - local_best + random_direction = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = random_direction * direction + self.momentum * velocity + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution and velocity + if child_fitness < best_fitness: + velocity = self.momentum * velocity + self.adaptive_rate * (child - best_individual) + best_fitness = child_fitness + best_individual = child + + new_population.append(child) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Adaptively update mutation scale and elite count + self.mutation_scale *= 1 + self.adaptive_rate * np.random.uniform(-1, 1) + self.elite_count = max( + 1, int(self.elite_count * (1 + self.adaptive_rate * np.random.uniform(-0.1, 0.1))) + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveDirectionalSearch.py b/nevergrad/optimization/lama/AdaptiveDirectionalSearch.py new file mode 100644 index 000000000..8ee003f28 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDirectionalSearch.py @@ -0,0 +1,62 @@ +import numpy as np + + +class AdaptiveDirectionalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + self.alpha = 0.1 # Step size + self.beta = 0.5 # Contraction factor + self.gamma = 2.0 # Expansion factor + self.delta = 1e-5 # Small perturbation to avoid stagnation + + x = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f = func(x) + evaluations = 1 + + while evaluations < self.budget: + direction = np.random.randn(self.dim) + direction /= np.linalg.norm(direction) # Normalize direction vector + + # Try expanding + x_new = x + self.gamma * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.gamma + else: + # Try contracting + x_new = x + self.beta * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.beta + else: + # Apply small perturbation to avoid getting stuck + x_new = x + self.delta * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDivergenceClusteringSearch.py b/nevergrad/optimization/lama/AdaptiveDivergenceClusteringSearch.py new file mode 100644 index 000000000..afe3ec79e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDivergenceClusteringSearch.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveDivergenceClusteringSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # given as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population_size = 10 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + + # Evaluate initial population + fitness = np.array([func(individual) for individual in population]) + + # Main optimization loop + iteration = 0 + while iteration < self.budget: + # Select the best solution for breeding + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + # Update optimal solution + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = best_individual + + # Generate new solutions based on adaptive divergence and clustering + new_population = [] + cluster_center = np.mean(population, axis=0) + for i in range(population_size): + if np.random.rand() < 0.5: + # Diverge away from the cluster center + new_individual = population[i] + np.random.normal(0, 1, self.dimension) * ( + population[i] - cluster_center + ) + else: + # Converge towards the best solution + new_individual = population[i] + np.random.normal(0, 1, self.dimension) * ( + best_individual - population[i] + ) + + # Ensure new individual is within bounds + new_individual = np.clip(new_individual, self.lower_bound, self.upper_bound) + new_population.append(new_individual) + + # Evaluate new population + new_fitness = np.array([func(individual) for individual in new_population]) + + # Replace old population with new if better + for i in range(population_size): + if new_fitness[i] < fitness[i]: + population[i] = new_population[i] + fitness[i] = new_fitness[i] + + iteration += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiverseHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveDiverseHybridOptimizer.py new file mode 100644 index 000000000..1f580fa9d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiverseHybridOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveDiverseHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.diversity_threshold = diversity_threshold + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def calculate_diversity(self, population): + pairwise_distances = np.sqrt(((population[:, np.newaxis] - population) ** 2).sum(axis=2)) + diversity = np.mean(pairwise_distances) + return diversity + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Diversity preservation mechanism + diversity = self.calculate_diversity(population) + if diversity < self.diversity_threshold: + new_population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim) + ) + population = np.concatenate((population, new_population), axis=0) + fitness = np.concatenate((fitness, [func(ind) for ind in new_population])) + self.eval_count += self.init_pop_size + velocities = np.concatenate( + (velocities, np.random.uniform(-1, 1, (self.init_pop_size, self.dim))), axis=0 + ) + F_values = np.concatenate((F_values, np.full(self.init_pop_size, self.init_F)), axis=0) + CR_values = np.concatenate((CR_values, np.full(self.init_pop_size, self.init_CR)), axis=0) + p_best = np.concatenate((p_best, new_population), axis=0) + p_best_fitness = np.concatenate( + (p_best_fitness, [func(ind) for ind in new_population]), axis=0 + ) + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversifiedEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveDiversifiedEvolutionStrategy.py new file mode 100644 index 000000000..789c75fc4 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversifiedEvolutionStrategy.py @@ -0,0 +1,61 @@ +import numpy as np + + +class AdaptiveDiversifiedEvolutionStrategy: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = 20 + self.mutation_rate = 1.0 / dimension + self.mutation_scale = 0.1 + self.crossover_probability = 0.7 + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + f_opt = np.min(fitness) + x_opt = population[np.argmin(fitness)] + + # Evolutionary loop + iterations = self.budget // self.population_size + for _ in range(iterations): + new_population = [] + + for idx in range(self.population_size): + # Mutation + if np.random.rand() < self.mutation_rate: + mutant = population[idx] + np.random.normal(0, self.mutation_scale, self.dimension) + mutant = np.clip(mutant, self.bounds["lb"], self.bounds["ub"]) + else: + mutant = population[idx] + + # Crossover + if np.random.rand() < self.crossover_probability: + partner_idx = np.random.randint(self.population_size) + crossover_point = np.random.randint(self.dimension) + offspring = np.concatenate( + (population[idx][:crossover_point], population[partner_idx][crossover_point:]) + ) + else: + offspring = mutant + + # Selection + offspring_fitness = func(offspring) + if offspring_fitness < fitness[idx]: + new_population.append(offspring) + fitness[idx] = offspring_fitness + + # Update the best solution found + if offspring_fitness < f_opt: + f_opt = offspring_fitness + x_opt = offspring + else: + new_population.append(population[idx]) + + population = np.array(new_population) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearch.py new file mode 100644 index 000000000..9d4bfc3dd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearch.py @@ -0,0 +1,93 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveDiversifiedHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearchOptimizer.py b/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearchOptimizer.py new file mode 100644 index 000000000..af47dd192 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversifiedHarmonySearchOptimizer.py @@ -0,0 +1,106 @@ +import numpy as np + + +class AdaptiveDiversifiedHarmonySearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 # Initial diversification rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return min(1.5, bandwidth * 1.1) + else: + return max(0.5, bandwidth * 0.9) + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.01, self.exploration_rate * 0.95) + else: + return min(0.3, self.exploration_rate * 1.05) + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + if abs(best_fitness - prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveDiversifiedSearch.py b/nevergrad/optimization/lama/AdaptiveDiversifiedSearch.py new file mode 100644 index 000000000..59bef05c6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversifiedSearch.py @@ -0,0 +1,55 @@ +import numpy as np + + +class AdaptiveDiversifiedSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialize solution and function value tracking + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Main optimization loop + for iteration in range(self.budget): + for i in range(population_size): + # Mutation strategy: Adaptive perturbation + perturbation_scale = 0.5 * (1 - iteration / self.budget) # Decreases over time + perturbation = np.random.normal(0, perturbation_scale, self.dim) + candidate = population[i] + perturbation + + # Ensure candidate stays within bounds + candidate = np.clip(candidate, self.lb, self.ub) + + # Evaluate candidate + candidate_fitness = func(candidate) + + # Acceptance condition: Greedy selection + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + # Update the global best solution + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate.copy() + + return self.f_opt, self.x_opt + + +# Example of use (requires a function `func` and bounds setup to run): +# optimizer = AdaptiveDiversifiedSearch(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDiversityDifferentialHybrid.py b/nevergrad/optimization/lama/AdaptiveDiversityDifferentialHybrid.py new file mode 100644 index 000000000..220bebf6b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversityDifferentialHybrid.py @@ -0,0 +1,88 @@ +import numpy as np + + +class AdaptiveDiversityDifferentialHybrid: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=10): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.5 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func, step_size=0.05) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Re-initialize worst individuals to maintain diversity + worst_indices = np.argsort(fitness)[-int(0.3 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversityDifferentialMemeticHybrid.py b/nevergrad/optimization/lama/AdaptiveDiversityDifferentialMemeticHybrid.py new file mode 100644 index 000000000..91edb7b6e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversityDifferentialMemeticHybrid.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveDiversityDifferentialMemeticHybrid: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=10): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = np.clip(self.crossover(population[i], mutant_vector, CR), self.lb, self.ub) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.3 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func, step_size=0.1) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced Diversity Maintenance: Reinitialize 20% worst individuals + if evaluations + int(0.2 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversityMaintainedDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveDiversityMaintainedDifferentialEvolution.py new file mode 100644 index 000000000..51e8a173e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversityMaintainedDifferentialEvolution.py @@ -0,0 +1,97 @@ +import numpy as np + + +class AdaptiveDiversityMaintainedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.8 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.02 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Re-initialize half of the worst individuals to maintain diversity + worst_indices = np.argsort(fitness)[-int(0.5 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 3) == 0 and population_size > 20: + elite_indices = np.argsort(fitness)[: int(0.6 * population_size)] + population = population[elite_indices] + fitness = fitness[elite_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDiversityMaintainingGradientEvolution.py b/nevergrad/optimization/lama/AdaptiveDiversityMaintainingGradientEvolution.py new file mode 100644 index 000000000..584b69e08 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversityMaintainingGradientEvolution.py @@ -0,0 +1,112 @@ +import numpy as np + + +class AdaptiveDiversityMaintainingGradientEvolution: + def __init__(self, budget, initial_population_size=20): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = initial_population_size + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + self.diversity_threshold = 1e-3 + self.elite_rate = 0.2 # Proportion of elite members in selection + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + def elite_selection(population, fitness): + elite_count = int(self.elite_rate * len(fitness)) + sorted_indices = np.argsort(fitness) + elite_indices = sorted_indices[:elite_count] + return [population[i] for i in elite_indices], [fitness[i] for i in elite_indices] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + # Elite selection + elite_pop, elite_fit = elite_selection(population, fitness) + elite_size = len(elite_pop) + + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(elite_size), size=2, replace=False) + parent1, parent2 = elite_pop[parents_idx[0]], elite_pop[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, i) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, i, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worst member of the population with the new child + worst_idx = np.argmax(fitness) + population[worst_idx] = new_x + fitness[worst_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveDiversityMaintainingGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDiversityPSO.py b/nevergrad/optimization/lama/AdaptiveDiversityPSO.py new file mode 100644 index 000000000..84c3e17ba --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDiversityPSO.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptiveDiversityPSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_start=0.9, + omega_end=0.4, + phi_p=0.1, + phi_g=0.1, + beta=0.2, + ): + self.budget = budget + self.population_size = population_size + # Inertia weight decreases linearly from omega_start to omega_end + self.omega_start = omega_start + self.omega_end = omega_end + # Personal and global acceleration coefficients + self.phi_p = phi_p + self.phi_g = phi_g + # Diversity control parameter + self.beta = beta + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_start - ((self.omega_start - self.omega_end) * evaluations / self.budget) + mean_position = np.mean(particles, axis=0) + diversity = np.mean(np.linalg.norm(particles - mean_position, axis=1)) + + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + r_b = np.random.random(self.dim) + + # Update velocities considering diversity + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + + self.beta * r_b * (mean_position - particles[i]) + ) + + # Update positions + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/AdaptiveDolphinPodOptimization.py b/nevergrad/optimization/lama/AdaptiveDolphinPodOptimization.py new file mode 100644 index 000000000..12be4843d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDolphinPodOptimization.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveDolphinPodOptimization: + def __init__(self, budget=1000, num_dolphins=20, num_dimensions=5, alpha=0.1, beta=0.5, gamma=0.1): + self.budget = budget + self.num_dolphins = num_dolphins + self.num_dimensions = num_dimensions + self.alpha = alpha + self.beta = beta + self.gamma = gamma + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_dolphins, self.num_dimensions)) + + def levy_flight(self): + sigma = 1.0 + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1.5) + return step + + def move_dolphin(self, current_position, best_position, previous_best_position, bounds): + step = ( + self.alpha * (best_position - current_position) + + self.beta * (previous_best_position - current_position) + + self.gamma * self.levy_flight() + ) + new_position = current_position + step + new_position = np.clip(new_position, bounds.lb, bounds.ub) + return new_position + + def update_parameters(self, iteration): + self.alpha = max(0.01, self.alpha * (1 - 0.9 * iteration / self.budget)) + self.beta = min(0.9, self.beta + 0.1 * iteration / self.budget) + self.gamma = max(0.01, self.gamma * (1 - 0.8 * iteration / self.budget)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + positions = self.initialize_positions(bounds) + best_position = positions[0].copy() + previous_best_position = best_position.copy() + + for i in range(self.budget): + self.update_parameters(i) + for j in range(self.num_dolphins): + new_position = self.move_dolphin(positions[j], best_position, previous_best_position, bounds) + f_new = func(new_position) + f_current = func(positions[j]) + + if f_new < f_current: + positions[j] = new_position + if f_new < func(best_position): + best_position = new_position.copy() + + previous_best_position = best_position + + self.f_opt = func(best_position) + self.x_opt = best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDualPhaseDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveDualPhaseDifferentialEvolution.py new file mode 100644 index 000000000..9a7443d17 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualPhaseDifferentialEvolution.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveDualPhaseDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.inf + + eval_count = 0 + phase_switch_threshold = self.budget // 2 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = initial_F * (1 - eval_count / budget) + adaptive_CR = initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if eval_count < phase_switch_threshold: + candidate = trial # Exploration phase, use the trial vector + else: + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + # Update population for the next iteration + population = new_population + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveDualPhaseDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDualPhaseEvolutionarySwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveDualPhaseEvolutionarySwarmOptimization.py new file mode 100644 index 000000000..d6f9e8465 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualPhaseEvolutionarySwarmOptimization.py @@ -0,0 +1,146 @@ +import numpy as np + + +class AdaptiveDualPhaseEvolutionarySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Tuned for balanced exploration + self.initial_F = 0.8 # Tuned for effective mutation + self.initial_CR = 0.9 # Tuned for effective crossover + self.elite_rate = 0.1 # Elite rate + self.local_search_rate = 0.3 # Local search probability + self.memory_size = 20 # Memory size for adaptive parameters + self.w = 0.7 # Inertia weight for velocity update + self.c1 = 1.5 # Cognitive component + self.c2 = 1.5 # Social component + self.adaptive_phase_ratio = 0.5 # Ratio of budget for evolutionary phase + self.alpha = 0.6 # Differential weight for local search + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Slightly increased for effective local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveDualPhaseEvolutionarySwarmOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDualPhaseOptimizationWithDynamicParameterControl.py b/nevergrad/optimization/lama/AdaptiveDualPhaseOptimizationWithDynamicParameterControl.py new file mode 100644 index 000000000..9fb518936 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualPhaseOptimizationWithDynamicParameterControl.py @@ -0,0 +1,146 @@ +import numpy as np + + +class AdaptiveDualPhaseOptimizationWithDynamicParameterControl: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Adjusted population size for balance + self.initial_F = 0.8 # Initial mutation factor + self.initial_CR = 0.8 # Initial crossover rate + self.elite_rate = 0.1 # Lower elite preservation rate for more exploration + self.local_search_rate = 0.3 # Local search rate + self.memory_size = 20 # Memory size for parameter adaptation + self.w = 0.7 # Inertia weight for swarm phase + self.c1 = 1.5 # Cognitive component + self.c2 = 1.7 # Social component + self.adaptive_phase_ratio = 0.5 # Equal emphasis on both phases + self.alpha = 0.6 # Differential weight for faster convergence + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Smaller local search step size for fine exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveDualPhaseOptimizationWithDynamicParameterControl(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveDualPhaseStrategy.py b/nevergrad/optimization/lama/AdaptiveDualPhaseStrategy.py new file mode 100644 index 000000000..501471337 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualPhaseStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdaptiveDualPhaseStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using an additional differential vector + d = np.random.choice(idxs, 1, replace=False)[0] + mutant = population[a] + self.F * ( + population[b] - population[c] + population[best_idx] - population[d] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR with more aggressive changes + scale = iteration / total_iterations + self.F = np.clip(0.75 * np.sin(1.5 * np.pi * scale) + 0.75, 0.1, 1) + self.CR = np.clip(0.75 * np.cos(1.5 * np.pi * scale) + 0.75, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveDualPopulationDE_LS.py b/nevergrad/optimization/lama/AdaptiveDualPopulationDE_LS.py new file mode 100644 index 000000000..160e5f9a3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualPopulationDE_LS.py @@ -0,0 +1,95 @@ +import numpy as np + + +class AdaptiveDualPopulationDE_LS: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.mutation_factor = 0.5 + self.crossover_prob = 0.9 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + while self.budget > 0: + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + self.mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + if self.budget % 100 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Update archive + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + pop = np.array(new_pop) + + # Local search enhancement + if self.budget > 0: + best_ind = pop[np.argmin(fitness)] + local_search_result = self.local_search(func, best_ind, lower_bound, upper_bound) + pop[np.argmin(fitness)] = local_search_result + + return self.f_opt, self.x_opt + + def local_search(self, func, best_ind, lower_bound, upper_bound): + local_steps = 10 + step_size = 0.1 + + for _ in range(local_steps): + if self.budget <= 0: + break + + perturbed_ind = best_ind + np.random.uniform(-step_size, step_size, self.dim) + perturbed_ind = np.clip(perturbed_ind, lower_bound, upper_bound) + f_perturbed = func(perturbed_ind) + self.budget -= 1 + + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed_ind + best_ind = perturbed_ind + + return best_ind diff --git a/nevergrad/optimization/lama/AdaptiveDualStrategyOptimizer.py b/nevergrad/optimization/lama/AdaptiveDualStrategyOptimizer.py new file mode 100644 index 000000000..49709de2d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDualStrategyOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptiveDualStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + base_mutation_factor = 0.5 # Starting mutation factor + mutation_factor_increase = 0.05 # Increment in mutation factor + crossover_rate = 0.7 # Crossover rate + elite_size = 5 # Number of elite individuals to preserve + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + mutation_factor = base_mutation_factor + mutation_factor_increase * (evaluations / self.budget) + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveDynamicDE.py b/nevergrad/optimization/lama/AdaptiveDynamicDE.py new file mode 100644 index 000000000..7e9bcf1a3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicDE.py @@ -0,0 +1,95 @@ +import numpy as np + + +class AdaptiveDynamicDE: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 100 # Number of generations to consider for stagnation + stagnation_counter = 0 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Mutation and crossover + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Adaptive parameter control based on success rates + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(1.0, F * 1.2) + Cr = max(0.1, Cr * 0.9) + else: + F = max(0.4, F * 0.8) + Cr = min(1.0, Cr * 1.1) + + # Enhanced restart mechanism with diversity consideration + if stagnation_counter > stagnation_threshold: + # Re-initialize population if stuck + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveDynamicDifferentialEvolution.py new file mode 100644 index 000000000..207a8098a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveDynamicDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveDynamicDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseEnhancedStrategyV20.py b/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseEnhancedStrategyV20.py new file mode 100644 index 000000000..3e772be3b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseEnhancedStrategyV20.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdaptiveDynamicDualPhaseEnhancedStrategyV20: + def __init__(self, budget, dimension=5, population_size=100, F_base=0.5, CR_base=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy: introduces additional diversity by considering more vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + # Adaptive crossover rate according to the performance of individuals + CR = self.CR * (1 + (np.std(target) / (np.mean(target) + 1e-10))) + crossover_mask = np.random.rand(self.dimension) < CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adjustment of parameters using a sigmoid function + scale = iteration / total_iterations + logistic = 1 / (1 + np.exp(-10 * (scale - 0.5))) + self.F = np.clip(self.F_base + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(self.CR_base + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + self.adjust_parameters( + iteration, switch_point if evaluations < switch_point else self.budget - switch_point + ) + phase = 1 if evaluations < switch_point else 2 + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + return fitnesses[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseStrategyV11.py b/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseStrategyV11.py new file mode 100644 index 000000000..545a0e3d6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicDualPhaseStrategyV11.py @@ -0,0 +1,83 @@ +import numpy as np + + +class AdaptiveDynamicDualPhaseStrategyV11: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Standard mutation strategy for phase 1 + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using more vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic parameter adjustment using a sigmoid-based function to emphasize middle-phase aggressiveness + scale = iteration / total_iterations + scale = 1 / (1 + np.exp(-10 * (scale - 0.5))) # Sigmoid function for smoother transition + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveDynamicEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveDynamicEvolutionStrategy.py new file mode 100644 index 000000000..ca64945ef --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicEvolutionStrategy.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveDynamicEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 300 + mutation_rate = 0.1 + mutation_scale = 0.3 + crossover_rate = 0.6 + elite_size = int(0.1 * population_size) + + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + # Safe roulette wheel selection using max fitness scaling + max_fitness = np.max(fitness) + adjusted_fitness = max_fitness - fitness + 1e-9 # Adding small constant to avoid zero probability + probabilities = adjusted_fitness / adjusted_fitness.sum() + + chosen_parents = np.random.choice( + population_size, size=population_size - elite_size, p=probabilities + ) + parents = population[chosen_parents] + + # Crossover and mutation + np.random.shuffle(parents) + for i in range(0, len(parents) - 1, 2): + parent1, parent2 = parents[i], parents[i + 1] + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child1 = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + child2 = np.concatenate([parent2[:cross_point], parent1[cross_point:]]) + else: + child1, child2 = parent1.copy(), parent2.copy() + + new_population.extend([child1, child2]) + + # Mutation in the new population + new_population = np.array(new_population) + mutation_masks = np.random.rand(len(new_population), self.dim) < mutation_rate + mutations = np.random.normal(0, mutation_scale, (len(new_population), self.dim)) + new_population = np.clip(new_population + mutation_masks * mutations, self.lb, self.ub) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += len(new_population) + + # Replace the worst with new individuals + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + # Update best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithm.py b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithm.py new file mode 100644 index 000000000..83d2bafd3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithm.py @@ -0,0 +1,111 @@ +import numpy as np + + +class AdaptiveDynamicExplorationExploitationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 50 + self.F = np.random.uniform(0.5, 0.9) # Differential weight + self.CR = np.random.uniform(0.1, 0.9) # Crossover probability + self.local_search_chance = np.random.uniform(0.1, 0.3) # Probability to perform local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-5 # Threshold to switch between exploration and exploitation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_F_CR(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + step_size = np.random.uniform(-0.1, 0.1, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_F_CR(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Switch to exploitation: increase local search chance + self.local_search_chance = np.random.uniform(0.2, 0.4) + self.F = np.random.uniform(0.4, 0.6) + self.CR = np.random.uniform(0.7, 0.9) + else: + # Switch to exploration: decrease local search chance + self.local_search_chance = np.random.uniform(0.1, 0.2) + self.F = np.random.uniform(0.6, 0.9) + self.CR = np.random.uniform(0.1, 0.3) + + # Increase population diversity by re-initializing some individuals + reinit_percentage = 0.1 + num_reinit = int(reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) diff --git a/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV2.py b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV2.py new file mode 100644 index 000000000..3cf259fb1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV2.py @@ -0,0 +1,150 @@ +import numpy as np + + +class AdaptiveDynamicExplorationExploitationAlgorithmV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + self.global_mutation_factor = 0.5 + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + self.global_search_intensity = 10 + + # New parameters + self.local_search_radius = 0.1 + self.global_search_radius = 0.5 + self.reduction_factor = 0.98 # To reduce the mutation factor over time + self.mutation_scale = 0.1 # To scale the random mutations + self.adaptive_crossover_rate = 0.5 # To adjust crossover probability based on diversity + self.mutation_adjustment_factor = 0.1 # Dynamic adjustment for mutation rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.diversity_cycle == 0: + self.adaptive_population_control(population, fitness, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, self.local_search_radius, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_control(self, population, fitness, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * (1 + 0.1 * remaining_budget_ratio) + self.mutation_factor = self.mutation_factor * (1 + 0.1 * remaining_budget_ratio) + + # New adaptation strategies + self.crossover_probability *= self.adaptive_crossover_rate + self.mutation_factor *= self.reduction_factor + self.global_mutation_factor *= self.reduction_factor + self.local_search_radius *= self.reduction_factor + + if diversity < self.diversity_threshold / 2 and remaining_budget_ratio > 0.5: + self.global_search_reset(population, fitness, evaluations) + + # Dynamically adjust mutation rate based on performance + if evaluations % self.diversity_cycle == 0: + best_fitness = np.min(fitness) + if best_fitness < self.f_opt: + self.mutation_factor *= 1 - self.mutation_adjustment_factor + else: + self.mutation_factor *= 1 + self.mutation_adjustment_factor + + def global_search_reset(self, population, fitness, evaluations): + global_search_population = np.random.uniform( + self.lb, self.ub, (self.global_search_intensity, self.dim) + ) + + for ind in global_search_population: + f_ind = func(ind) + evaluations += 1 + if f_ind < self.f_opt: + self.f_opt = f_ind + self.x_opt = ind + + population[: self.global_search_intensity] = global_search_population diff --git a/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV3.py b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV3.py new file mode 100644 index 000000000..c1a43d5e7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicExplorationExploitationAlgorithmV3.py @@ -0,0 +1,161 @@ +import numpy as np + + +class AdaptiveDynamicExplorationExploitationAlgorithmV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + self.global_search_intensity = 10 + + # Enhanced parameters + self.local_search_radius = 0.1 + self.global_search_radius = 0.5 + self.reduction_factor = 0.98 + self.mutation_scale = 0.1 + self.adaptive_crossover_rate = 0.5 + self.mutation_adjustment_factor = 0.1 + + # Further enhancements + self.exploration_factor = 1.5 + self.exploitation_factor = 0.5 + self.dynamic_adjustment_interval = 20 + self.search_balance = 0.5 # Controls the balance between exploration and exploitation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + elite_fitness = np.array([func(ind) for ind in elite_population]) + fitness = np.hstack((fitness, elite_fitness)) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.dynamic_adjustment_interval == 0: + self.adaptive_population_control(population, fitness, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, self.local_search_radius, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_control(self, population, fitness, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * ( + 1 + self.search_balance * remaining_budget_ratio + ) + self.mutation_factor = self.mutation_factor * (1 + self.search_balance * remaining_budget_ratio) + + # Enhanced adaptation strategies + self.crossover_probability *= self.adaptive_crossover_rate + self.mutation_factor *= self.reduction_factor + self.local_search_radius *= self.reduction_factor + + if diversity < self.diversity_threshold / 2 and remaining_budget_ratio > 0.5: + self.global_search_reset(population, fitness, evaluations) + + # Dynamic mutation adjustment + if evaluations % self.dynamic_adjustment_interval == 0: + best_fitness = np.min(fitness) + if best_fitness < self.f_opt: + self.mutation_factor *= 1 - self.mutation_adjustment_factor + self.local_search_radius *= 1 - self.mutation_adjustment_factor + else: + self.mutation_factor *= 1 + self.mutation_adjustment_factor + self.local_search_radius *= 1 + self.mutation_adjustment_factor + + def global_search_reset(self, population, fitness, evaluations): + global_search_population = np.random.uniform( + self.lb, self.ub, (self.global_search_intensity, self.dim) + ) + + for ind in global_search_population: + f_ind = func(ind) + evaluations += 1 + if f_ind < self.f_opt: + self.f_opt = f_ind + self.x_opt = ind + + for i in range(self.global_search_intensity): + population[i] = global_search_population[i] diff --git a/nevergrad/optimization/lama/AdaptiveDynamicExplorationOptimization.py b/nevergrad/optimization/lama/AdaptiveDynamicExplorationOptimization.py new file mode 100644 index 000000000..fd48e8b42 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class AdaptiveDynamicExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 100 + + # New parameters for dynamic exploration + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveDynamicExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..3808e4b47 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithm.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveDynamicFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithmRedesigned.py b/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithmRedesigned.py new file mode 100644 index 000000000..68928c86f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicFireworkAlgorithmRedesigned.py @@ -0,0 +1,108 @@ +import numpy as np + + +class AdaptiveDynamicFireworkAlgorithmRedesigned: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicFireworkDifferentialEvolutionV4.py b/nevergrad/optimization/lama/AdaptiveDynamicFireworkDifferentialEvolutionV4.py new file mode 100644 index 000000000..504e73043 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicFireworkDifferentialEvolutionV4.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveDynamicFireworkDifferentialEvolutionV4: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, f_init=0.5, f_final=0.2, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) ** 0.5 + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) ** 0.5 + return f, cr + + def adapt_n_sparks(self, iteration): + n_sparks = min(30, 3 + int(12 * (iteration / self.budget) ** 2)) + return n_sparks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + n_sparks = self.adapt_n_sparks(i) + self.n_sparks = n_sparks + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveDynamicHarmonySearch.py new file mode 100644 index 000000000..ab7f99615 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicHarmonySearch.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveDynamicHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizationV2.py new file mode 100644 index 000000000..f674f76cf --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizationV2.py @@ -0,0 +1,163 @@ +import numpy as np + + +class AdaptiveDynamicHybridOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 15 # Reduced maximum exploration cycles for quicker reaction + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.85 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveDynamicHybridOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizer.py new file mode 100644 index 000000000..21df6d53f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicHybridOptimizer.py @@ -0,0 +1,126 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveDynamicHybridOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.6 + self.F = 0.8 + self.CR = 0.6 + self.memory_size = 5 + self.strategy_switch_threshold = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.2 + c2 = 1.2 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdaptiveDynamicHybridOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveDynamicMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/AdaptiveDynamicMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..da60ed0b0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdaptiveDynamicMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=50): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.8 + np.random.rand() * 0.2 # Modified mutation factor range + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.5 * ((iteration / max_iterations) ** 0.5) + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + return crossover_rate, learning_rate, memetic_probability + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..70b8788de --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,155 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 30 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..ae4c2b76e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveDynamicQuantumSwarmOptimization.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.6, + cognitive_weight=1.7, + social_weight=2.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters adaptively based on performance + if iteration % 1000 == 0 and iteration > 0: + best_value_avg = np.mean(self.personal_best_values) + global_improvement = abs(self.global_best_value - best_value_avg) / self.global_best_value + if global_improvement < 0.01: + self.inertia_weight *= 0.9 + self.cognitive_weight *= 1.1 + self.social_weight *= 1.1 + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/AdaptiveEliteCovarianceMatrixMemeticSearch.py b/nevergrad/optimization/lama/AdaptiveEliteCovarianceMatrixMemeticSearch.py new file mode 100644 index 000000000..272fabb7c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteCovarianceMatrixMemeticSearch.py @@ -0,0 +1,114 @@ +import numpy as np + + +class AdaptiveEliteCovarianceMatrixMemeticSearch: + def __init__( + self, + budget, + population_size=50, + memetic_rate=0.5, + elite_fraction=0.2, + initial_learning_rate=0.01, + initial_sigma=0.3, + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.elite_fraction = elite_fraction + self.initial_learning_rate = initial_learning_rate + self.initial_sigma = initial_sigma + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_covariance_matrix_adaptation(self, func, pop, scores, mean, C, sigma): + n_samples = len(pop) + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, learning rate, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + learning_rate = self.initial_learning_rate + sigma = self.initial_sigma + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.adaptive_covariance_matrix_adaptation(func, pop, scores, mean, C, sigma) + + # Perform memetic local search + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + pop[i], scores[i] = self.local_search(func, pop[i], scores[i], learning_rate) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, learning rate, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + # Adaptive learning rate and sigma + learning_rate = self.initial_learning_rate * (1 - iteration / max_iterations) + sigma = self.initial_sigma * (1 - iteration / max_iterations) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveEliteDifferentialEvolution.py new file mode 100644 index 000000000..5eef2e3f6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteDifferentialEvolution.py @@ -0,0 +1,122 @@ +import numpy as np + + +class AdaptiveEliteDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 # More reasonable initial population size + self.F_min = 0.5 + self.F_max = 0.9 + self.CR_min = 0.7 + self.CR_max = 0.9 + self.local_search_chance = 0.2 + self.elite_ratio = 0.1 + self.diversity_threshold = 0.05 + self.cauchy_step_scale = 0.02 + self.gaussian_step_scale = 0.005 + self.reinitialization_rate = 0.1 + self.hyper_heuristic_probability = 0.5 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + self.adaptive_parameters_adjustment(evaluations) + + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(20): # Fewer local search iterations for more efficient exploration + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_parameters_adjustment(self, evaluations): + progress_ratio = evaluations / self.budget + self.F = self.F_min + (self.F_max - self.F_min) * progress_ratio + self.CR = self.CR_min + (self.CR_max - self.CR_min) * progress_ratio + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdaptiveEliteDiverseHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveEliteDiverseHybridOptimizer.py new file mode 100644 index 000000000..44ff9c973 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteDiverseHybridOptimizer.py @@ -0,0 +1,136 @@ +import numpy as np + + +class AdaptiveEliteDiverseHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Increased population size for better diversity + self.initial_F = 0.8 # Adjusted to promote higher mutation step + self.initial_CR = 0.9 # High crossover rate to maintain genetic diversity + self.c1 = 1.4 # Increased cognitive coefficient for personal best attraction + self.c2 = 1.4 # Increased social coefficient for global best attraction + self.w = 0.6 # Increased inertia weight for maintaining momentum + self.elite_fraction = 0.15 # Reduced to focus on more varied solutions + self.diversity_threshold = 1e-5 # Higher threshold to reinitialize earlier + self.tau1 = 0.1 # Parameter adaptation probability + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 20, self.budget - evaluations + ) # Increased iterations for better local search + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.01, bounds.lb, bounds.ub + ) # Reduced perturbation for precision + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_LS_v2.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_LS_v2.py new file mode 100644 index 000000000..709cf0967 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_LS_v2.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveEliteGuidedDE_LS_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.archive = [] + self.local_search_prob = 0.3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local Search mechanism + for idx in elite_indices: + if np.random.rand() < self.local_search_prob: + local_search_ind = pop[idx] + np.random.normal(0, 0.1, self.dim) + local_search_ind = np.clip(local_search_ind, lower_bound, upper_bound) + f_local = func(local_search_ind) + self.budget -= 1 + if f_local < fitness[idx]: + pop[idx] = local_search_ind + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = local_search_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_v2.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_v2.py new file mode 100644 index 000000000..9a375588d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedDE_v2.py @@ -0,0 +1,113 @@ +import numpy as np + + +class AdaptiveEliteGuidedDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.restart_threshold = 0.01 + self.max_generations = int(self.budget / self.pop_size) + + def __call__(self, func): + def initialize_population(): + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + return pop, fitness + + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + pop, fitness = initialize_population() + self.budget -= self.pop_size + + generation = 0 + best_fitness_history = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / self.max_generations) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Track the best fitness value over generations + best_fitness_history.append(np.min(fitness)) + + # Check if a restart is needed + if len(best_fitness_history) > 10: + recent_improvement = np.abs(best_fitness_history[-10] - best_fitness_history[-1]) + if recent_improvement < self.restart_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + continue + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE.py new file mode 100644 index 000000000..590674ded --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveEliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.local_search_rate = 0.3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local Search on Elite Solutions + for idx in elite_indices: + if np.random.rand() < self.local_search_rate: + local_search_ind = pop[idx] + np.random.normal(0, 0.1, self.dim) + local_search_ind = np.clip(local_search_ind, lower_bound, upper_bound) + f_local = func(local_search_ind) + self.budget -= 1 + if f_local < fitness[idx]: + pop[idx] = local_search_ind + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = local_search_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v3.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v3.py new file mode 100644 index 000000000..736c3aeab --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v3.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveEliteGuidedMutationDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.stagnation_threshold = 20 + self.local_search_prob = 0.3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + new_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in new_pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v4.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v4.py new file mode 100644 index 000000000..b19034ae1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedMutationDE_v4.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdaptiveEliteGuidedMutationDE_v4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Mutation + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteGuidedRestartDE.py b/nevergrad/optimization/lama/AdaptiveEliteGuidedRestartDE.py new file mode 100644 index 000000000..fbd589cca --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteGuidedRestartDE.py @@ -0,0 +1,121 @@ +import numpy as np + + +class AdaptiveEliteGuidedRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.stagnation_threshold = 50 # Number of iterations to consider as stagnation + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive = new_pop + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local Search mechanism + for idx in elite_indices: + if np.random.rand() < self.local_search_prob: + local_search_ind = pop[idx] + np.random.normal(0, 0.1, self.dim) + local_search_ind = np.clip(local_search_ind, lower_bound, upper_bound) + f_local = func(local_search_ind) + self.budget -= 1 + if f_local < fitness[idx]: + pop[idx] = local_search_ind + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = local_search_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Check for stagnation and restart if needed + if best_fitness == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + best_fitness = self.f_opt + + if stagnation_counter >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveEliteHybridOptimizer.py new file mode 100644 index 000000000..769799dd8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteHybridOptimizer.py @@ -0,0 +1,126 @@ +import numpy as np + + +class AdaptiveEliteHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.min_pop_size = 10 + self.max_pop_size = 100 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 2.0 + self.c2 = 2.0 + self.w = 0.7 + self.elite_fraction = 0.1 + self.diversity_threshold = 0.1 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.crossover_rate = 0.9 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elitism + elite_count = max(1, int(self.elite_fraction * current_pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + # Check for diversity + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveEliteMemeticDifferentialEvolution.py new file mode 100644 index 000000000..1b297491f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteMemeticDifferentialEvolution.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveEliteMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search more aggressively + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=10 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reinitialize worst individuals more frequently + if evaluations + int(0.20 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.20 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Elite Preservation with larger perturbations + elite_size = int(0.2 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.05, 0.05, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizer.py new file mode 100644 index 000000000..c7a991a46 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizer.py @@ -0,0 +1,107 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveEliteMemeticOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + while eval_count < self.budget: + new_population = [] + + for i in range(self.population_size): + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + if not np.any(crossover_mask): + trial[np.random.randint(0, self.dim)] = mutant[np.random.randint(0, self.dim)] + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + else: + new_population.append(population[i]) + + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: self.population_size // 4] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + if eval_count < self.budget: + population, fitness, eval_count = self.adaptive_local_refinement( + population, fitness, func, eval_count + ) + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + def adaptive_local_refinement(self, population, fitness, func, eval_count): + refined_population = [] + refined_fitness = fitness.copy() + for i, individual in enumerate(population): + if np.random.rand() < 0.1: # 10% chance to refine + res = self.local_search(func, individual) + eval_count += res[2]["nit"] + if res[1] < refined_fitness[i]: + refined_population.append(res[0]) + refined_fitness[i] = res[1] + else: + refined_population.append(individual) + else: + refined_population.append(individual) + if eval_count >= self.budget: + break + return np.array(refined_population), refined_fitness, eval_count diff --git a/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV5.py b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV5.py new file mode 100644 index 000000000..8dab52c65 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV5.py @@ -0,0 +1,139 @@ +import numpy as np + + +class AdaptiveEliteMemeticOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.memory_size = 30 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 100 + self.elitism_rate = 0.3 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.9 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV6.py b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV6.py new file mode 100644 index 000000000..9b6d81446 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteMemeticOptimizerV6.py @@ -0,0 +1,144 @@ +import numpy as np + + +class AdaptiveEliteMemeticOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.memory_size = 30 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 50 # Reduced local search iterations for efficiency + self.elitism_rate = 0.2 # Reduced elitism rate for better exploration + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.5 # Reduced local search probability for better balance + self.alpha = 0.01 + self.beta = 0.01 # Learning rate for adaptive parameters + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = (1 - self.beta) * self.memory_F[ + self.memory_index + ] + self.beta * F + self.memory_CR[self.memory_index] = (1 - self.beta) * self.memory_CR[ + self.memory_index + ] + self.beta * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEliteMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveEliteMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..bc318f6a2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEliteMultiStrategyDifferentialEvolution.py @@ -0,0 +1,151 @@ +import numpy as np + + +class AdaptiveEliteMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + elite_idx = np.argmin(fitness) + elite = population[elite_idx] + elite_fitness = fitness[elite_idx] + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + self.mutation_factor = np.clip( + self.mutation_factor * (1.1 if success_rate > 0.2 else 0.9), 0.4, 1.0 + ) + self.crossover_rate = np.clip( + self.crossover_rate * (1.05 if success_rate > 0.2 else 0.95), 0.6, 1.0 + ) + + if elite_fitness < min(fitness): + weakest_idx = np.argmax(fitness) + population[weakest_idx] = elite + fitness[weakest_idx] = elite_fitness + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveEliteMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveElitistDE.py b/nevergrad/optimization/lama/AdaptiveElitistDE.py new file mode 100644 index 000000000..e18dca75a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveElitistDE.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdaptiveElitistDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.5 + self.crossover_prob = 0.9 + self.elitism_rate = 0.15 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Combine elite and new population + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Increment generation count + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveElitistDE_v3.py b/nevergrad/optimization/lama/AdaptiveElitistDE_v3.py new file mode 100644 index 000000000..6562e5532 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveElitistDE_v3.py @@ -0,0 +1,128 @@ +import numpy as np + + +class AdaptiveElitistDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.local_search_prob = 0.3 + self.stagnation_threshold = 20 + self.stagnation_counter = 0 + self.best_fitness_history = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Check for stagnation and restart if needed + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + self.best_fitness_history.append(self.f_opt) + + if self.stagnation_counter >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveElitistMutationDE.py b/nevergrad/optimization/lama/AdaptiveElitistMutationDE.py new file mode 100644 index 000000000..cfb6b2d35 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveElitistMutationDE.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveElitistMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.learning_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elitist mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Combine new population with elites + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Learning step: Adjust population towards elites + for i in range(self.pop_size): + if np.random.rand() < self.learning_rate: + closest_elite_idx = np.argmin(np.linalg.norm(elite_pop - pop[i], axis=1)) + pop[i] += (elite_pop[closest_elite_idx] - pop[i]) * np.random.rand() + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveElitistPopulationStrategy.py b/nevergrad/optimization/lama/AdaptiveElitistPopulationStrategy.py new file mode 100644 index 000000000..3f25e8f78 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveElitistPopulationStrategy.py @@ -0,0 +1,58 @@ +import numpy as np + + +class AdaptiveElitistPopulationStrategy: + def __init__(self, budget, dimension=5, population_size=50, elite_fraction=0.2, mutation_intensity=0.05): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Mutation intensity factor + + def __call__(self, func): + # Initialize the population within bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Identify elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if i < self.elite_count: + # Elite individuals are carried over unchanged + new_population[i] = population[elite_indices[i]] + else: + # Generate new individuals by mutating elite individuals + elite = elites[np.random.randint(0, self.elite_count)] + mutation = np.random.normal(0, self.adaptive_mutation_scale(evaluations), self.dimension) + new_individual = np.clip(elite + mutation, -5.0, 5.0) + new_population[i] = new_individual + + # Evaluate new individual's fitness + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutation_scale(self, evaluations): + # Decrease mutation scale as the number of evaluations increases + return self.mutation_intensity * (1 - (evaluations / self.budget)) diff --git a/nevergrad/optimization/lama/AdaptiveElitistQuasiRandomDEGradientAnnealing.py b/nevergrad/optimization/lama/AdaptiveElitistQuasiRandomDEGradientAnnealing.py new file mode 100644 index 000000000..13845b26d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveElitistQuasiRandomDEGradientAnnealing.py @@ -0,0 +1,145 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveElitistQuasiRandomDEGradientAnnealing: + def __init__(self, budget, population_size=30, initial_crossover_rate=0.7, initial_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.initial_crossover_rate = initial_crossover_rate + self.initial_mutation_factor = initial_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.elitism_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + threshold = 1e-3 + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + crossover_rate = self.initial_crossover_rate + mutation_factor = self.initial_mutation_factor + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution with Elitism + elite_count = int(self.elitism_rate * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + # Cool down the temperature + self.temperature *= self.cooling_rate + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adaptive parameter control + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + crossover_rate *= 1.05 + mutation_factor = min(1.0, mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + crossover_rate *= 0.95 + mutation_factor = max(0.5, mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + crossover_rate = np.clip(crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveElitistQuasiRandomDEGradientAnnealing(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm.py new file mode 100644 index 000000000..ce91f3ad7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm: + def __init__( + self, + budget=10000, + n_fireworks=50, + n_sparks=15, + alpha_min=0.1, + alpha_max=0.5, + f_min=0.5, + f_max=1.0, + cr_min=0.5, + cr_max=1.0, + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = np.random.uniform(self.alpha_min, self.alpha_max) + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + f = np.random.uniform(self.f_min, self.f_max) + cr = np.random.uniform(self.cr_min, self.cr_max) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch.py b/nevergrad/optimization/lama/AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch.py new file mode 100644 index 000000000..fa25700c6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch.py @@ -0,0 +1,118 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + colony_size=15, + max_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.colony_size = colony_size + self.max_trials = max_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.max_trials}, + ) + return res.x + + def bee_colony_optimization(self, x, func): + best_x = np.copy(x) + best_fitness = func(x) + + for _ in range(self.colony_size): + new_x = self.explosion_operator(x, func, np.random.uniform(0.1, 0.5)) + new_x = self.local_search(new_x, func) + new_fitness = func(new_x) + + if new_fitness < best_fitness: + best_x = np.copy(new_x) + best_fitness = new_fitness + + return best_x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + else: + new_spark = self.bee_colony_optimization(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch.py b/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch.py new file mode 100644 index 000000000..87070cee2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveEnhancedEvolutionaryFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + self.best_firework = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.995) # Adjusted sigma update rule + return self.sigma + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + self.best_firework = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch_v2.py b/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch_v2.py new file mode 100644 index 000000000..4cb2cfb97 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedEvolutionaryFireworksSearch_v2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveEnhancedEvolutionaryFireworksSearch_v2: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma *= 0.995 # Updated sigma update rule + return max(0.1, self.sigma) + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedExplorationGravitationalSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveEnhancedExplorationGravitationalSwarmOptimization.py new file mode 100644 index 000000000..92b42635c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedExplorationGravitationalSwarmOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptiveEnhancedExplorationGravitationalSwarmOptimization: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(10000): # Increase the number of optimization runs to 10000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20000): # Increase the number of iterations within each optimization run to 20000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithm.py new file mode 100644 index 000000000..927ba1558 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithm.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveEnhancedFireworkAlgorithm: + def __init__( + self, + budget=10000, + n_fireworks=20, + n_sparks=10, + initial_alpha=0.5, + initial_beta=2.0, + initial_mutation_rate=0.1, + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = initial_alpha + self.beta = initial_beta + self.mutation_rate = initial_mutation_rate + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.iteration = 0 + + def initialize_fireworks(self, func): + self.fireworks = np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + self.firework_fitness = np.array([func(x) for x in self.fireworks]) + + def explode_firework(self, firework, func): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + sparks_fitness = np.array([func(x) for x in sparks]) + return sparks, sparks_fitness + + def apply_mutation(self, sparks): + mutated_sparks = sparks + np.random.normal(0, self.mutation_rate, sparks.shape) + return np.clip(mutated_sparks, self.bounds[0], self.bounds[1]) + + def update_fireworks(self, sparks, sparks_fitness): + for i in range(self.n_fireworks): + if i < len(sparks) and sparks_fitness[i] < self.firework_fitness[i]: + self.fireworks[i] = sparks[i] + self.firework_fitness[i] = sparks_fitness[i] + + def adapt_parameters(self): + self.alpha = max(self.alpha * 0.95, 0.01) + self.beta = max(self.beta * 0.9, 1.0) + self.mutation_rate = max(self.mutation_rate * 0.9, 0.01) + + def local_search(self, func): + for i in range(self.n_fireworks): + best_firework = self.fireworks[i].copy() + best_fitness = self.firework_fitness[i] + + for _ in range(3): + new_firework = self.fireworks[i] + np.random.normal(0, 0.1, self.dim) + new_fitness = func(new_firework) + + if new_fitness < best_fitness: + best_firework = new_firework + best_fitness = new_fitness + + self.fireworks[i] = best_firework + self.firework_fitness[i] = best_fitness + + def update_iter(self): + self.iteration += 1 + + def adjust_parameters(self): + if self.iteration % 200 == 0: + self.adapt_parameters() + + def __call__(self, func): + self.initialize_fireworks(func) + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks, sparks_fitness = self.explode_firework(self.fireworks[i], func) + mutated_sparks = self.apply_mutation(sparks) + self.update_fireworks(mutated_sparks, sparks_fitness) + + self.local_search(func) + self.update_iter() + self.adjust_parameters() + + best_idx = np.argmin(self.firework_fitness) + if self.firework_fitness[best_idx] < self.f_opt: + self.f_opt = self.firework_fitness[best_idx] + self.x_opt = self.fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py b/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py new file mode 100644 index 000000000..0b6496082 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py @@ -0,0 +1,111 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveEnhancedFireworkAlgorithmWithLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.best_individual = x + self.best_fitness = func(self.best_individual) + + def adaptive_local_search(self, func): + improved = False + for i in range(self.population_size): + current_fitness = func(self.fireworks[i][0]) + new_individual = self.local_search(self.fireworks[i][0], func) + new_fitness = func(new_individual) + if new_fitness < current_fitness: + self.fireworks[i] = (np.copy(new_individual), 0) + improved = True + + return improved + + def __call__(self, func): + self.run_firework_algorithm(func) + + improved = self.adaptive_local_search(func) + if improved: + self.run_firework_algorithm(func) + + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGradientGuidedHybridPSO.py b/nevergrad/optimization/lama/AdaptiveEnhancedGradientGuidedHybridPSO.py new file mode 100644 index 000000000..946f43f20 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGradientGuidedHybridPSO.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveEnhancedGradientGuidedHybridPSO: + def __init__( + self, + budget=10000, + population_size=50, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.0, + social_weight=1.8, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Adaptive inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + # Adaptive gradient-guided component + gradient_guided_component = ( + 0.1 + * (global_best_position - particles[i]) + / (1 + np.sqrt(np.sum((global_best_position - particles[i]) ** 2))) + ) + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + gradient_guided_component + ) # Adaptive hybridization with gradient direction + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..81c64b223 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligence.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligence: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-self.alpha * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha * (1.0 - self.delta) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.beta_max = self.update_beta(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV18.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV18.py new file mode 100644 index 000000000..5c38fd5a8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV18.py @@ -0,0 +1,115 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligenceV18: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def check_premature_convergence(self, f_vals): + sorted_vals = np.sort(f_vals) + diff = np.diff(sorted_vals) + quartile = np.percentile(diff, 75) # 75th percentile of the differences + return quartile < self.epsilon + + def adaptive_population_size(self, func, t): + return min(max(int(20 + 0.1 * t), 10), 50) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population_size = self.population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + if t % 100 == 0: + population_size = self.adaptive_population_size(func, t) + + if population_size != self.population_size: + self.population_size = population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + if self.check_premature_convergence(f_vals): + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV2.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV2.py new file mode 100644 index 000000000..bbfd2a05f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV2.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligenceV2: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-self.alpha * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha * (1.0 - self.delta) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.beta_max = self.update_beta(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV22.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV22.py new file mode 100644 index 000000000..8852118b7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV22.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligenceV22: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = 1.0 + best_x_opt = None + best_std = np.Inf + + for _ in range(50): # Increase the number of optimization runs to 50 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20): # Increase the number of iterations within each optimization run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV29.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV29.py new file mode 100644 index 000000000..4e362fa63 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV29.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligenceV29: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = 1.0 + best_x_opt = None + best_std = np.Inf + + for _ in range(200): # Increased the number of optimization runs to 200 for better exploration + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(50): # Increased the number of iterations within each optimization run to 50 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV33.py b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV33.py new file mode 100644 index 000000000..527888c9d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedGravitationalSwarmIntelligenceV33.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveEnhancedGravitationalSwarmIntelligenceV33: + def __init__( + self, + budget=3000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=300, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(300): # Increased the number of optimization runs to 300 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(4000): # Increased the number of iterations within each optimization run to 4000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedHarmonicFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonicFireworkAlgorithm.py new file mode 100644 index 000000000..990792bad --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonicFireworkAlgorithm.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveEnhancedHarmonicFireworkAlgorithm: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, scale_factor=0.2, levy_step_size=0.1, alpha=0.9 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.scale_factor = scale_factor + self.levy_step_size = levy_step_size + self.alpha = alpha + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scale_factor, firework + self.scale_factor, (self.n_sparks, self.dim) + ) + return sparks + + def levy_flight(self): + beta = 1.5 + u = np.random.normal(0, 1, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / np.abs(v) ** (1 / beta) + return self.levy_step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + 0.5 * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_parameters(self): + self.scale_factor *= self.alpha + self.levy_step_size *= self.alpha + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + fireworks = self.evolve_fireworks(fireworks, func) + self.adapt_parameters() + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch.py b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch.py new file mode 100644 index 000000000..4f56560cb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch.py @@ -0,0 +1,83 @@ +import numpy as np + + +class AdaptiveEnhancedHarmonyFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def adapt_alpha_beta(self, iteration): + alpha = 0.1 / (1 + 0.01 * iteration) # Adaptive alpha decay + beta = 2 / (1 + 0.01 * iteration) # Adaptive beta decay + return alpha, beta + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + alpha, beta = self.adapt_alpha_beta(it) + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + # Randomly reset some fireworks to encourage exploration + reset_idx = np.random.choice(self.n_fireworks, int(0.1 * self.n_fireworks), replace=False) + fireworks[reset_idx] = self.initialize_fireworks()[reset_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch_v2.py b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch_v2.py new file mode 100644 index 000000000..0d8c52865 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonyFireworksSearch_v2.py @@ -0,0 +1,102 @@ +import numpy as np + + +class AdaptiveEnhancedHarmonyFireworksSearch_v2: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def adapt_alpha_beta(self, iteration): + alpha = 0.1 / (1 + 0.01 * iteration) # Adaptive alpha decay + beta = 2 / (1 + 0.01 * iteration) # Adaptive beta decay + return alpha, beta + + def local_search(self, fireworks, func): + updated_fireworks = fireworks.copy() + + for i in range(self.n_fireworks): + trial = fireworks[i] + self.gamma * np.random.normal(0, 1, self.dim) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + updated_fireworks[i] = trial + + return updated_fireworks + + def global_search(self, fireworks, func): + updated_fireworks = fireworks.copy() + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(updated_fireworks[i]): + updated_fireworks[i] = trial + + return updated_fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + alpha, beta = self.adapt_alpha_beta(it) + + fireworks = self.local_search(fireworks, func) + fireworks = self.global_search(fireworks, func) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + # Randomly reset some fireworks to encourage exploration + reset_idx = np.random.choice(self.n_fireworks, int(0.1 * self.n_fireworks), replace=False) + fireworks[reset_idx] = self.initialize_fireworks()[reset_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration.py b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration.py new file mode 100644 index 000000000..0709b6f07 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration: + def __init__(self, budget, harmony_memory_size=10, bandwidth=0.1, mutation_rate=0.2): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(self.f_opt) + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: # Introduce Adaptive Levy Flight + levy = self.generate_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_adaptive_levy_flight(self, dimension): + alpha = 1.5 # tuning parameter + beta = 0.5 # tuning parameter + + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / abs(v) ** (1 / beta) + levy = step * alpha + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveEnhancedMemeticDifferentialEvolution.py new file mode 100644 index 000000000..d513d98b5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMemeticDifferentialEvolution.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveEnhancedMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5, step_size=0.01): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3.py b/nevergrad/optimization/lama/AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3.py new file mode 100644 index 000000000..cbee3ce60 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3: + def __init__(self, budget, population_size=50): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.8 + np.random.rand() * 0.2 # Modified mutation factor range + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.5 * ((iteration / max_iterations) ** 0.5) + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + return crossover_rate, learning_rate, memetic_probability + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv10.py b/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv10.py new file mode 100644 index 000000000..0fca10239 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv10.py @@ -0,0 +1,124 @@ +import numpy as np + + +class AdaptiveEnhancedMetaNetAQAPSOv10: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.25 + self.adaptive_lr = adaptive_lr + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv11.py b/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv11.py new file mode 100644 index 000000000..2f2a74f05 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMetaNetAQAPSOv11.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class AdaptiveEnhancedMetaNetAQAPSOv11: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.25 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseDifferentialEvolution.py new file mode 100644 index 000000000..31074fb1c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseDifferentialEvolution.py @@ -0,0 +1,153 @@ +import numpy as np + + +class AdaptiveEnhancedMultiPhaseDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def adaptive_factors(success_rate): + if success_rate > 0.2: + return self.mutation_factor * 1.1, self.crossover_rate * 1.05 + else: + return self.mutation_factor * 0.9, self.crossover_rate * 0.95 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + phase = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + self.mutation_factor, self.crossover_rate = adaptive_factors(success_rate) + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.6, 1.0) + + # Adjust phase based on progress + if evaluations > self.budget * 0.5 and self.f_opt < 1e-2: + phase = 1 + self.mutation_factor *= 0.5 + self.crossover_rate *= 1.2 + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveEnhancedMultiPhaseDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseOptimizationAlgorithm.py b/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseOptimizationAlgorithm.py new file mode 100644 index 000000000..b9e8920ca --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedMultiPhaseOptimizationAlgorithm.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdaptiveEnhancedMultiPhaseOptimizationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 500 # Further increased population size for initial exploration + self.F = 0.8 # Differential weight for exploration + self.CR = 0.9 # Increased crossover probability for exploitation + self.local_search_chance_initial = 0.5 # Further increased local search probability + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-5 # Tighter threshold to switch between exploration and exploitation + self.reinit_percentage = 0.3 # Increased reinitialization percentage for diversity + self.cauchy_step_scale = 0.01 # Fine-tuned scale for Cauchy distribution steps + self.gaussian_step_scale = 0.001 # Fine-tuned scale for Gaussian distribution steps + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(50): # Further increased iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max(0.2, self.local_search_chance_initial * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedQGSA_v7.py b/nevergrad/optimization/lama/AdaptiveEnhancedQGSA_v7.py new file mode 100644 index 000000000..47183d44c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedQGSA_v7.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveEnhancedQGSA_v7: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedQuantumHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveEnhancedQuantumHarmonySearch.py new file mode 100644 index 000000000..e22ac5c59 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedQuantumHarmonySearch.py @@ -0,0 +1,58 @@ +import numpy as np + + +class AdaptiveEnhancedQuantumHarmonySearch: + def __init__( + self, budget, harmony_memory_size=10, pitch_adjustment_rate=0.1, bandwidth=0.01, mutation_rate=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(self.f_opt) + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < self.pitch_adjustment_rate: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + return new_harmony diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveEnhancedQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..ed9f5a193 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedQuantumSimulatedAnnealing.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveEnhancedQuantumSimulatedAnnealing: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + damp_ratio=0.9, + perturb_factor=0.01, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.damp_ratio = damp_ratio + self.perturb_factor = perturb_factor + self.success_count = 0 + self.failure_count = 0 + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def _perturb_solution(self, x): + return x + np.random.normal(0, self.perturb_factor, size=self.dim) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + self.success_count += 1 + else: + self.failure_count += 1 + + current_x = self._perturb_solution(current_x) + current_x = np.clip(current_x, -5.0, 5.0) + current_f = func(current_x) + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + self.explore_ratio *= self.damp_ratio + + success_rate = self.success_count / (self.success_count + self.failure_count) + if success_rate < 0.2: + self.perturb_factor *= 1.5 + elif success_rate > 0.6: + self.perturb_factor *= 0.5 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11.py b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11.py new file mode 100644 index 000000000..faf776be8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11.py @@ -0,0 +1,97 @@ +import numpy as np + + +class AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 # Number of iterations to adapt parameter values + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / ( + 2.0 * self.budget + ) # Refined cognitive and social weights update + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14.py b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14.py new file mode 100644 index 000000000..0a7acb5aa --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28.py b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28.py new file mode 100644 index 000000000..af74130f2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28.py @@ -0,0 +1,108 @@ +import numpy as np + + +class AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 # Velocity limit for exploration + self.step_size = 0.1 # Initial local search step size + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget # Updated inertia weight update + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 # Adjusted parameters update + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration factor + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/AdaptiveEnsembleMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveEnsembleMemeticAlgorithm.py new file mode 100644 index 000000000..6a884cb39 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEnsembleMemeticAlgorithm.py @@ -0,0 +1,110 @@ +import numpy as np + + +class AdaptiveEnsembleMemeticAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step(func, pop, scores, crossover_rates, mutation_factors) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialOptimization.py b/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialOptimization.py new file mode 100644 index 000000000..905004f5c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialOptimization.py @@ -0,0 +1,42 @@ +import numpy as np + + +class AdaptiveEvolutionaryDifferentialOptimization: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.5, crossover_rate=0.9): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) # Retrieve the dimension of the problem + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(population_fitness) + if population_fitness[best_idx] < self.f_opt: + self.f_opt = population_fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialPopulationStrategy.py b/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialPopulationStrategy.py new file mode 100644 index 000000000..59c105cc8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEvolutionaryDifferentialPopulationStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdaptiveEvolutionaryDifferentialPopulationStrategy: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=self.population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=self.population_size + ) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/AdaptiveEvolutionaryFireworksSearch_v1.py b/nevergrad/optimization/lama/AdaptiveEvolutionaryFireworksSearch_v1.py new file mode 100644 index 000000000..671241787 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEvolutionaryFireworksSearch_v1.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveEvolutionaryFireworksSearch_v1: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5, gamma=1.0, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = 1.0 + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.995) # Adapt sigma parameter + self.beta = max(1.0, self.beta * 0.995) # Adapt beta parameter + return self.sigma, self.beta + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma, self.beta = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveEvolutionaryGradientSearch.py b/nevergrad/optimization/lama/AdaptiveEvolutionaryGradientSearch.py new file mode 100644 index 000000000..47f2d8980 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveEvolutionaryGradientSearch.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveEvolutionaryGradientSearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.9 - 0.8 * (iteration / max_iterations) + self.crossover_rate = 0.9 - 0.5 * (iteration / max_iterations) + self.learning_rate = 0.02 * np.exp(-iteration / (0.5 * max_iterations)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform evolutionary step + pop, scores = self.evolutionary_step(func, pop, scores) + evaluations += self.population_size + + # Perform local search on elite individuals + if evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < 0.3: # 30% probability to apply local search + pop[i], scores[i] = self.local_search(func, pop[i], scores[i]) + evaluations += 1 + if evaluations >= self.budget: + break + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveExplorationEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveExplorationEvolutionStrategy.py new file mode 100644 index 000000000..5103cfb2e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveExplorationEvolutionStrategy.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveExplorationEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_rate_initial=0.3, + mutation_decrease=0.99, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_rate = mutation_rate_initial + self.mutation_decrease = mutation_decrease + self.mutation_rate_initial = mutation_rate_initial + + def __call__(self, func): + # Initialize population within given bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate offspring from elites with mutation and crossover + new_population = np.empty((self.population_size, self.dimension)) + for i in range(self.population_size): + parent1, parent2 = elites[np.random.choice(self.elite_count, 2, replace=False)] + crossover_point = np.random.randint(self.dimension) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + mutation = np.random.normal(0, self.mutation_rate, self.dimension) + child = np.clip(child + mutation, -5.0, 5.0) + + # Evaluate the child + child_fitness = func(child) + evaluations += 1 + + # Store child + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adapt mutation rate + if evaluations < self.budget / 2: + self.mutation_rate *= self.mutation_decrease + else: + # Increase mutation rate later in the search to escape local optima + self.mutation_rate = min( + self.mutation_rate_initial, self.mutation_rate / self.mutation_decrease + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveExplorationExploitationDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveExplorationExploitationDifferentialEvolution.py new file mode 100644 index 000000000..a9dad9a6b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveExplorationExploitationDifferentialEvolution.py @@ -0,0 +1,130 @@ +import numpy as np + + +class AdaptiveExplorationExploitationDifferentialEvolution: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def adaptive_local_search(self, x, func, budget, exploration): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + if exploration: + perturbation = np.random.uniform(-1.0, 1.0, self.dim) # Larger perturbation for exploration + else: + perturbation = np.random.normal(0, 0.1, self.dim) # Smaller perturbation for exploitation + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Adaptive Elitism: Adjust elite size based on convergence rate + elite_size = max( + 1, int(self.elite_fraction * self.pop_size * (1 - self.eval_count / global_search_budget)) + ) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + if self.eval_count >= global_search_budget: + break + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform adaptive local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + # Switch between exploration and exploitation based on convergence state + exploration = np.random.rand() < (1 - self.eval_count / self.budget) + new_x, new_f = self.adaptive_local_search(population[i], func, local_budget, exploration) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveExplorationExploitationHybridAlgorithm.py b/nevergrad/optimization/lama/AdaptiveExplorationExploitationHybridAlgorithm.py new file mode 100644 index 000000000..b3b2fde9f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveExplorationExploitationHybridAlgorithm.py @@ -0,0 +1,107 @@ +import numpy as np + + +class AdaptiveExplorationExploitationHybridAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 200 # Larger initial population size for better diversity + self.F = 0.5 # Reduced differential weight for better exploration + self.CR = 0.8 # Reduced crossover probability for better exploration + self.local_search_chance_initial = ( + 0.3 # Increased initial local search chance for better exploitation + ) + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-5 # Threshold to switch between exploration and exploitation + self.reinit_percentage = 0.3 # Higher reinitialization percentage + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + step_size = np.random.uniform(-0.1, 0.1, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max(0.1, self.local_search_chance_initial * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdaptiveExploratoryOptimizer.py b/nevergrad/optimization/lama/AdaptiveExploratoryOptimizer.py new file mode 100644 index 000000000..4b4b5ecf8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveExploratoryOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdaptiveExploratoryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 50 # Reduced for more focused search + mutation_factor = 1.0 # Increased initial mutation for broader initial exploration + crossover_rate = 0.8 # Slightly reduced crossover for preserving diversity + elite_size = 2 # More focused elitism + learning_period = max(100, self.budget // 100) # Introduce a learning period for adaptation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main loop + while evaluations < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elitism: carry forward best solutions + sorted_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[sorted_indices] + new_fitness[:elite_size] = fitness[sorted_indices] + + # Generate new candidates for the rest of the population + for i in range(elite_size, population_size): + # Differential Evolution Strategy: "best/1/bin" for more exploitation + best = population[best_index] + idxs = [idx for idx in range(population_size) if idx != best_index] + x1, x2 = population[np.random.choice(idxs, 2, replace=False)] + + # Mutation + mutant = best + mutation_factor * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial = np.where(crossover_mask, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + population = new_population + fitness = new_fitness + + # Update the best solution found + current_best_index = np.argmin(fitness) + current_best_fitness = fitness[current_best_index] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_solution = population[current_best_index] + best_index = current_best_index + + # Adaptive strategies + if evaluations % learning_period == 0: + mutation_factor *= 0.98 # Gradually reduce mutation factor for finer exploitation + crossover_rate = min( + 0.95, crossover_rate + 0.02 + ) # Increase crossover rate for better exploration + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveFeedbackControlStrategyV61.py b/nevergrad/optimization/lama/AdaptiveFeedbackControlStrategyV61.py new file mode 100644 index 000000000..eb1b36f99 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFeedbackControlStrategyV61.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdaptiveFeedbackControlStrategyV61: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.8, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 10: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, successes, trials): + # Adaptive feedback control for mutation and crossover rates + success_ratio = successes / trials if trials > 0 else 0.1 + self.F = np.clip(0.5 + 0.5 * success_ratio, 0.1, 1) + self.CR = np.clip(0.9 - 0.4 * success_ratio, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + successes, trials = 0, 0 + + while evaluations < self.budget: + phase = 1 if evaluations < self.budget * self.switch_ratio else 2 + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + trials += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + successes += 1 + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + self.adjust_parameters(successes, trials) + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveFeedbackEnhancedMemoryStrategyV71.py b/nevergrad/optimization/lama/AdaptiveFeedbackEnhancedMemoryStrategyV71.py new file mode 100644 index 000000000..7931b4400 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFeedbackEnhancedMemoryStrategyV71.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveFeedbackEnhancedMemoryStrategyV71: + def __init__( + self, + budget, + dimension=5, + population_size=50, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover factor + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.best_f = float("inf") + self.best_solution = None + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[c] + self.F * (population[a] - population[b]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic parameter adjustment based on feedback and performance + self.F = 0.5 + (0.5 * np.sin(np.pi * (iteration / total_iterations))) + self.CR = 0.9 - (0.4 * np.cos(np.pi * (iteration / total_iterations))) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + self.best_f = fitnesses[best_idx] + self.best_solution = population[best_idx] + total_iterations = self.budget // self.pop_size + + for iteration in range(total_iterations): + phase = 1 if iteration < total_iterations * self.switch_ratio else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < self.best_f: + best_idx = i + self.best_f = trial_fitness + self.best_solution = trial + + if evaluations >= self.budget: + break + + return self.best_f, self.best_solution diff --git a/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmEnhanced.py b/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmEnhanced.py new file mode 100644 index 000000000..ae4bd874c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmEnhanced.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveFireworkAlgorithmEnhanced: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmOptimization.py b/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmOptimization.py new file mode 100644 index 000000000..d83d2fd9c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFireworkAlgorithmOptimization.py @@ -0,0 +1,59 @@ +import numpy as np + + +class AdaptiveFireworkAlgorithmOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) # Adaptive alpha + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveFireworksEnhancedHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveFireworksEnhancedHarmonySearch.py new file mode 100644 index 000000000..70f6d132b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFireworksEnhancedHarmonySearch.py @@ -0,0 +1,85 @@ +import numpy as np + + +class AdaptiveFireworksEnhancedHarmonySearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def adapt_alpha(self, iteration): + return 0.1 / (1 + 0.01 * iteration) # Adaptive alpha decay + + def adapt_beta(self, iteration): + return 2 / (1 + 0.01 * iteration) # Adaptive beta decay + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + alpha = self.adapt_alpha(it) + beta = self.adapt_beta(it) + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + # Randomly reset some fireworks to encourage exploration + reset_idx = np.random.choice(self.n_fireworks, int(0.1 * self.n_fireworks), replace=False) + fireworks[reset_idx] = self.initialize_fireworks()[reset_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveFocusedEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveFocusedEvolutionStrategy.py new file mode 100644 index 000000000..9d993c807 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFocusedEvolutionStrategy.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveFocusedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial populations and parameters + population_size = 100 + sigma = 0.5 # Initial standard deviation for Gaussian mutation + elite_size = max(1, int(population_size * 0.05)) + learning_rate = 0.1 # Learning rate for adaptive sigma + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + for _ in range(int(self.budget / population_size)): + # Elitism: keep the best solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population = population[elite_indices].copy() + new_fitness = fitness[elite_indices].copy() + + # Generate new population based on best solutions + for i in range(elite_size, population_size): + # Select parent from elite randomly + parent_index = np.random.choice(elite_indices) + parent = population[parent_index] + + # Apply adaptive Gaussian mutation + offspring = parent + np.random.normal(0, sigma, self.dim) + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + offspring_fitness = func(offspring) + + # Replace if better + if offspring_fitness < fitness[parent_index]: + new_population = np.vstack([new_population, offspring]) + new_fitness = np.append(new_fitness, offspring_fitness) + else: + new_population = np.vstack([new_population, parent]) + new_fitness = np.append(new_fitness, fitness[parent_index]) + + # Update population + population = new_population + fitness = new_fitness + + # Adaptive mutation step size + sigma *= np.exp(learning_rate * (1.0 - np.mean(fitness) / best_fitness)) + + # Update the best solution found + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveFuzzyDynamicDE.py b/nevergrad/optimization/lama/AdaptiveFuzzyDynamicDE.py new file mode 100644 index 000000000..0828816dd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveFuzzyDynamicDE.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveFuzzyDynamicDE: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 100 # Number of generations to consider for stagnation + stagnation_counter = 0 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Mutation strategy + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Fuzzy logic to adapt parameters + if success_count > self.population_size * 0.3: + F = min(1.0, F * 1.1) + Cr = max(0.1, Cr * 0.9) + else: + F = max(0.4, F * 0.9) + Cr = min(1.0, Cr * 1.1) + + # Enhanced restart mechanism with diversity consideration + if stagnation_counter > stagnation_threshold: + # Re-initialize half of the population to maintain diversity + half_pop = int(self.population_size / 2) + population[:half_pop] = np.random.uniform( + self.bounds[0], self.bounds[1], (half_pop, self.dim) + ) + fitness[:half_pop] = np.array([func(ind) for ind in population[:half_pop]]) + evaluations += half_pop + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGaussianSearch.py b/nevergrad/optimization/lama/AdaptiveGaussianSearch.py new file mode 100644 index 000000000..24055afd5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGaussianSearch.py @@ -0,0 +1,47 @@ +import numpy as np + + +class AdaptiveGaussianSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + # Initialize variables + self.f_opt = np.inf + self.x_opt = None + # Initial guess at the center of the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update optimal solution if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale of the Gaussian perturbations + scale = 1.0 + + # Main optimization loop + for i in range(self.budget - 1): + # Generate a new candidate by perturbing the current point + candidate = current_point + np.random.normal(0, scale, self.dim) + # Ensure the candidate stays within bounds + candidate = np.clip(candidate, -5.0, 5.0) + candidate_f = func(candidate) + + # If the candidate is better, move there and increase the perturbation scale + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + scale *= 1.1 # Encourage exploration + # Update the optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # If not better, decrease the perturbation scale to refine search + else: + scale *= 0.9 # Encourage exploitation + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGlobalLocalSearchStrategyV62.py b/nevergrad/optimization/lama/AdaptiveGlobalLocalSearchStrategyV62.py new file mode 100644 index 000000000..f317bd1f3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGlobalLocalSearchStrategyV62.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveGlobalLocalSearchStrategyV62: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover probability + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(target - trial) # Store successful direction + if len(self.memory) > 20: # Maintain a bounded memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + # Use memory to influence future mutations dynamically + if self.memory: + direction = np.mean(self.memory, axis=0) + self.F = np.clip( + np.linalg.norm(direction), 0.1, 1 + ) # Adaptive mutation factor based on memory + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveGradientAssistedEvolution.py b/nevergrad/optimization/lama/AdaptiveGradientAssistedEvolution.py new file mode 100644 index 000000000..34105aa2d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientAssistedEvolution.py @@ -0,0 +1,83 @@ +import numpy as np + + +class AdaptiveGradientAssistedEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialize parameters + population_size = 40 + children_multiplier = 5 # Number of children per parent + mutation_strength = 0.5 # Initial mutation strength + success_threshold = 0.15 # Threshold for successful mutations + + # Create initial population and evaluate it + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Track the best solution found + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + evaluations = population_size + successful_mutations = 0 + attempted_mutations = 0 + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for parent in population: + # Generate children using mutated gradients + gradients = np.random.normal(0, 1, (children_multiplier, self.dim)) + for gradient in gradients: + child = parent + mutation_strength * gradient + child = np.clip(child, self.lb, self.ub) + child_fitness = func(child) + + new_population.append(child) + new_fitness.append(child_fitness) + evaluations += 1 + + attempted_mutations += 1 + if child_fitness < func(parent): + successful_mutations += 1 + + if evaluations >= self.budget: + break + if evaluations >= self.budget: + break + + # Update the population with the best performing individuals + total_population = np.vstack((population, new_population)) + total_fitness = np.hstack((fitness, new_fitness)) + best_indices = np.argsort(total_fitness)[:population_size] + population = total_population[best_indices] + fitness = total_fitness[best_indices] + + # Update the best found solution + best_idx = np.argmin(fitness) + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Adaptive mutation strength adjustment + if attempted_mutations > 0: + success_ratio = successful_mutations / attempted_mutations + if success_ratio > success_threshold: + mutation_strength *= 1.1 # Increase mutation strength + else: + mutation_strength *= 0.9 # Decrease mutation strength + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientAssistedEvolution(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveGradientBalancedCrossoverPSO.py b/nevergrad/optimization/lama/AdaptiveGradientBalancedCrossoverPSO.py new file mode 100644 index 000000000..0a56ac018 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBalancedCrossoverPSO.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveGradientBalancedCrossoverPSO: + def __init__( + self, + budget=10000, + population_size=200, + initial_inertia=0.95, + final_inertia=0.35, + cognitive_weight=2.0, + social_weight=1.8, + crossover_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.crossover_rate = crossover_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = self.inertia_weight * velocities[i] + personal_component + social_component + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + if np.random.rand() < self.crossover_rate: + j = np.random.choice([x for x in range(self.population_size) if x != i]) + crossover_point = np.random.randint(self.dim) + particles[i][:crossover_point], particles[j][:crossover_point] = ( + particles[j][:crossover_point].copy(), + particles[i][:crossover_point].copy(), + ) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveGradientBalancedEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveGradientBalancedEvolutionStrategy.py new file mode 100644 index 000000000..e659daf28 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBalancedEvolutionStrategy.py @@ -0,0 +1,101 @@ +import numpy as np + + +class AdaptiveGradientBalancedEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=40, + initial_step_size=0.9, + step_decay=0.98, + elite_ratio=0.15, + mutation_intensity=0.1, + local_search_prob=0.25, + refinement_steps=15, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, current_step_size): + mutation = np.random.normal(0, current_step_size * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual, current_step_size): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, current_step_size * 0.01, self.dimension), + self.bounds[0], + self.bounds[1], + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + current_step_size = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], current_step_size) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: + for idx in range(self.population_size): + if evaluations + self.refinement_steps > self.budget: + break + local_individual, local_fitness = self.local_search( + func, new_population[idx], current_step_size + ) + evaluations += self.refinement_steps + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingPlus.py b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingPlus.py new file mode 100644 index 000000000..9d81f8ee2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingPlus.py @@ -0,0 +1,174 @@ +import numpy as np + + +class AdaptiveGradientBoostedMemoryAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 25 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Adaptive exploration control + if evaluations % (self.budget // 5) == 0: + adaptive_exploration_radius = 0.2 + 0.8 * (1 - T / T_initial) + for _ in range(memory_size // 3): + x_candidate = memory[ + np.random.randint(memory_size) + ] + adaptive_exploration_radius * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Control periodic exploration + if evaluations % (self.budget // 6) == 0: + for _ in range(memory_size // 3): + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl.py b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl.py new file mode 100644 index 000000000..04e172c0b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl.py @@ -0,0 +1,174 @@ +import numpy as np + + +class AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 25 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Adaptive exploration control + if evaluations % (self.budget // 5) == 0: + adaptive_exploration_radius = 0.2 + 0.8 * (1 - T / T_initial) + for _ in range(memory_size // 3): + x_candidate = memory[ + np.random.randint(memory_size) + ] + adaptive_exploration_radius * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Control periodic exploration + if evaluations % (self.budget // 6) == 0: + for _ in range(memory_size // 3): + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryExploration.py b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryExploration.py new file mode 100644 index 000000000..64cffc951 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemoryExploration.py @@ -0,0 +1,158 @@ +import numpy as np + + +class AdaptiveGradientBoostedMemoryExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha = 0.95 # Cooling rate + beta = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Introduce a hybrid crossover mechanism to exploit best solutions in memory + if evaluations % (self.budget // 5) == 0: + for i in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + # Introduce a mutation mechanism to diversify solutions in memory + if evaluations % (self.budget // 3) == 0: + for i in range(memory_size // 3): + x_mut = memory[np.random.randint(memory_size)] + x_mut += np.random.normal(0, 0.1, self.dim) + x_mut = np.clip(x_mut, func.bounds.lb, func.bounds.ub) + f_mut = func(x_mut) + evaluations += 1 + if f_mut < self.f_opt: + self.f_opt = f_mut + self.x_opt = x_mut + + worst_idx = np.argmax(memory_scores) + if f_mut < memory_scores[worst_idx]: + memory[worst_idx] = x_mut + memory_scores[worst_idx] = f_mut + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/AdaptiveGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..e29b70f56 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class AdaptiveGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveGradientClusteringEvolution.py b/nevergrad/optimization/lama/AdaptiveGradientClusteringEvolution.py new file mode 100644 index 000000000..347f1073b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientClusteringEvolution.py @@ -0,0 +1,87 @@ +import numpy as np + + +class AdaptiveGradientClusteringEvolution: + def __init__( + self, budget, dim=5, pop_size=50, num_clusters=5, sigma_init=0.3, learning_rate=0.05, gradient_steps=5 + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.num_clusters = num_clusters # Number of clusters to group individuals + self.sigma_init = sigma_init # Initial mutation strength + self.learning_rate = learning_rate # Learning rate for gradient updates + self.gradient_steps = gradient_steps # Steps to approximate gradient + self.bounds = np.array([-5.0, 5.0]) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, individual, sigma): + mutant = individual + sigma * np.random.randn(self.dim) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def estimate_gradient(self, func, individual): + grad = np.zeros(self.dim) + f_base = func(individual) + for i in range(self.dim): + perturb = np.zeros(self.dim) + eps = self.sigma_init / np.sqrt(self.dim) + perturb[i] = eps + f_plus = func(individual + perturb) + grad[i] = (f_plus - f_base) / eps + return grad + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(x) for x in population]) + evaluations = len(population) + sigma = np.full(self.pop_size, self.sigma_init) + + while evaluations < self.budget: + # Cluster population based on feature similarity + from sklearn.cluster import KMeans + + kmeans = KMeans(n_clusters=min(self.num_clusters, len(population))) + labels = kmeans.fit_predict(population) + + new_population = [] + new_f_values = [] + + for cluster_id in range(self.num_clusters): + cluster_indices = np.where(labels == cluster_id)[0] + if len(cluster_indices) == 0: + continue + + # Calculate cluster centroid gradient + cluster_gradient = np.mean( + [self.estimate_gradient(func, population[idx]) for idx in cluster_indices], axis=0 + ) + cluster_f_values = f_values[cluster_indices] + best_idx = cluster_indices[np.argmin(cluster_f_values)] + + # Update best individual in the cluster using the average gradient + best_individual = population[best_idx] + updated_individual = np.clip( + best_individual - self.learning_rate * cluster_gradient, self.bounds[0], self.bounds[1] + ) + new_f_value = func(updated_individual) + evaluations += 1 + + # Updating population and tracking best solution + new_population.append(updated_individual) + new_f_values.append(new_f_value) + + if evaluations >= self.budget: + break + + if evaluations >= self.budget: + break + + population = np.array(new_population) + f_values = np.array(new_f_values) + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGradientCrossoverOptimizer.py b/nevergrad/optimization/lama/AdaptiveGradientCrossoverOptimizer.py new file mode 100644 index 000000000..700146ec8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientCrossoverOptimizer.py @@ -0,0 +1,61 @@ +import numpy as np + + +class AdaptiveGradientCrossoverOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 200 + mutation_factor = 0.8 # High initial mutation factor for broader search + crossover_rate = 0.7 # Moderately high crossover to balance exploration and exploitation + grad_step_size = 0.01 # Step size for gradient approximation + adaptive_rate = 0.05 # Adaptive rate for adjusting mutation and crossover + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary process with gradient-based mutation + while evaluations < self.budget: + for i in range(population_size): + # Select three different members for mutation + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Gradient-based mutation + grad_direction = (func(a + grad_step_size) - func(a)) / grad_step_size + mutant_vector = a + mutation_factor * grad_direction * (b - c) + mutant_vector = np.clip(mutant_vector, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial_vector + + # Adaptive mechanism to adjust mutation and crossover rates + mutation_factor = max(0.1, mutation_factor - adaptive_rate * np.random.randn()) + crossover_rate = min(1.0, max(0.5, crossover_rate + adaptive_rate * np.random.randn())) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolution.py new file mode 100644 index 000000000..d274d0408 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolution.py @@ -0,0 +1,104 @@ +import numpy as np + + +class AdaptiveGradientDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.decay_factor = 0.99 # Decay factor for learning rate reduction + self.success_threshold = 0.1 # Threshold for successful adaptations + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + if success_rate > self.success_threshold: + self.base_lr *= 1 + (1 - self.decay_factor) + else: + self.base_lr *= self.decay_factor + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionEnhanced.py b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionEnhanced.py new file mode 100644 index 000000000..417e6ab73 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionEnhanced.py @@ -0,0 +1,105 @@ +import numpy as np + + +class AdaptiveGradientDifferentialEvolutionEnhanced: + def __init__(self, budget, population_size=25, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.lr_decay = 0.99 # Learning rate decay for more stable convergence + self.success_threshold = 0.1 # Threshold for increasing learning rate + self.diversity_threshold = 0.1 # Threshold for maintaining diversity + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + if success_rate > self.success_threshold: + self.base_lr *= 1 + (1 - self.lr_decay) + else: + self.base_lr *= self.lr_decay + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientDifferentialEvolutionEnhanced(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionPlus.py new file mode 100644 index 000000000..ded1a1817 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientDifferentialEvolutionPlus.py @@ -0,0 +1,132 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveGradientDifferentialEvolutionPlus: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientDifferentialEvolutionPlus(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientDifferentialHybrid.py b/nevergrad/optimization/lama/AdaptiveGradientDifferentialHybrid.py new file mode 100644 index 000000000..987e54c14 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientDifferentialHybrid.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveGradientDifferentialHybrid: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + elite_size = 5 + mutation_factor = 0.85 + crossover_rate = 0.8 + adaptive_factor = 0.1 + + # Initialize population and evaluate fitness + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Differential mutation with adaptive factor + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + mutant = np.clip(mutant + adaptive_factor * (self.x_opt - mutant), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + + # Adaptive selection + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Adaptive mutation factor update + mutation_factor = np.clip(mutation_factor - 0.01 * (1 - np.mean(fitness) / self.f_opt), 0.5, 1) + + # Elitism + elite_indices = np.argsort(fitness)[:elite_size] + for elite in elite_indices: + population[np.random.randint(population_size)] = population[elite] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGradientEnhancedExplorationPSO.py b/nevergrad/optimization/lama/AdaptiveGradientEnhancedExplorationPSO.py new file mode 100644 index 000000000..60fb346f0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientEnhancedExplorationPSO.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveGradientEnhancedExplorationPSO: + def __init__( + self, + budget=10000, + population_size=80, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.1, + social_weight=2.0, + adaptive_factor=0.03, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.adaptive_factor = adaptive_factor + self.dim = 5 # Fixed problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Search space boundaries + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + adaptive_exploration = ( + self.adaptive_factor + * np.random.normal(0, 1, self.dim) + * (1 - (evaluation_counter / self.budget)) + ) # Decreases with time + + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + adaptive_exploration + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveGradientEnhancedMultiPhaseAnnealing.py b/nevergrad/optimization/lama/AdaptiveGradientEnhancedMultiPhaseAnnealing.py new file mode 100644 index 000000000..287ce3d2c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientEnhancedMultiPhaseAnnealing.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveGradientEnhancedMultiPhaseAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define adaptive phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=5, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveGradientEnhancedRAMEDS.py b/nevergrad/optimization/lama/AdaptiveGradientEnhancedRAMEDS.py new file mode 100644 index 000000000..449dd8688 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientEnhancedRAMEDS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdaptiveGradientEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Gradually adjust mutation factor using adaptive feedback + historical_gradients = np.diff(memory_fitness[np.isfinite(memory_fitness)]) + if len(historical_gradients) > 1: + mean_gradient = np.mean(historical_gradients) + F = self.F_min + (self.F_max - self.F_min) * np.tanh(mean_gradient) + else: + F = np.random.uniform(self.F_min, self.F_max) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update focusing on recent better changes + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveGradientEvolution.py b/nevergrad/optimization/lama/AdaptiveGradientEvolution.py new file mode 100644 index 000000000..c86251970 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveGradientEvolution: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = 10 + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(self.population_size), size=2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, i) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, i, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worse parent with the new child + worse_parent_idx = ( + parents_idx[0] if fitness[parents_idx[0]] > fitness[parents_idx[1]] else parents_idx[1] + ) + population[worse_parent_idx] = new_x + fitness[worse_parent_idx] = new_f + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientExploration.py b/nevergrad/optimization/lama/AdaptiveGradientExploration.py new file mode 100644 index 000000000..666d871bd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientExploration.py @@ -0,0 +1,56 @@ +import numpy as np + + +class AdaptiveGradientExploration: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.learning_rate = 0.1 + self.epsilon = 1e-8 + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + for i in range(1, self.budget): + grad = gradient_estimate(x) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + perturbation = random_vector() * adapt_lr + new_x = x - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + x = new_x + else: + x = random_vector() # Restart exploration from random point + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientExploration(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientExplorationV2.py b/nevergrad/optimization/lama/AdaptiveGradientExplorationV2.py new file mode 100644 index 000000000..c86e42cef --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientExplorationV2.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptiveGradientExplorationV2: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.learning_rate = 0.1 + self.epsilon = 1e-8 + self.exploration_prob = 0.1 # Probability of performing a random exploration step + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + for i in range(1, self.budget): + if np.random.rand() < self.exploration_prob: + # Perform random exploration + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + else: + # Perform gradient-based exploitation + grad = gradient_estimate(x) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + perturbation = random_vector() * adapt_lr + new_x = x - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + x = new_x + else: + x = random_vector() # Restart exploration from random point + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveGradientExplorationV2(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveGradientGuidedEvolution.py b/nevergrad/optimization/lama/AdaptiveGradientGuidedEvolution.py new file mode 100644 index 000000000..bacf4a23f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientGuidedEvolution.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveGradientGuidedEvolution: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + mutation_intensity=0.1, + gradient_sampling=10, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.mutation_intensity = mutation_intensity + self.gradient_sampling = gradient_sampling # Number of points to estimate gradient + self.sigma = 0.2 # Standard deviation for mutations + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation operation + return np.clip( + individual + np.random.normal(0, self.sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def approximate_gradient(self, individual, func): + # Approximate gradient by sampling around the individual + gradients = [] + initial_fitness = func(individual) + for _ in range(self.gradient_sampling): + perturbation = np.random.normal(0, self.sigma, self.dimension) + neighbor = np.clip(individual + perturbation, self.bounds[0], self.bounds[1]) + neighbor_fitness = func(neighbor) + gradient = ( + (neighbor_fitness - initial_fitness) / (np.linalg.norm(perturbation) + 1e-6) * perturbation + ) + gradients.append(gradient) + return np.mean(gradients, axis=0) + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + gradient = self.approximate_gradient(population[i], func) + individual = population[i] - self.mutation_intensity * gradient # Gradient descent step + individual = self.mutate(individual) # Mutation step + individual_fitness = func(individual) + evaluations += 1 + + if individual_fitness < fitness[i]: + population[i] = individual + fitness[i] = individual_fitness + + if individual_fitness < best_fitness: + best_individual = individual + best_fitness = individual_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/AdaptiveGradientInformedPSO.py b/nevergrad/optimization/lama/AdaptiveGradientInformedPSO.py new file mode 100644 index 000000000..c9be9eecb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientInformedPSO.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveGradientInformedPSO: + def __init__( + self, + budget=10000, + population_size=100, + initial_inertia=1.0, + final_inertia=0.4, + cognitive_weight=0.7, + social_weight=0.3, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Decaying inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * (personal_best_positions[i] - particles[i]) + social_component = r2 * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * personal_component + + self.social_weight * social_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveGradientSampling.py b/nevergrad/optimization/lama/AdaptiveGradientSampling.py new file mode 100644 index 000000000..a5958137b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientSampling.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveGradientSampling: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + lb, ub = -5.0, 5.0 # Bounds of the search space + + # Initial random point + x_current = np.random.uniform(lb, ub, self.dim) + f_current = func(x_current) + + # Update best found solution if it's better + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Adaptive step size + step_size = 0.5 + + # Gradient approximation parameters + epsilon = 1e-5 + for i in range(self.budget - 1): + gradients = np.zeros(self.dim) + for j in range(self.dim): + x_temp = np.array(x_current) + x_temp[j] += epsilon + f_temp = func(x_temp) + gradients[j] = (f_temp - f_current) / epsilon + + # Normalize the gradient vector to make it step-size independent + norm = np.linalg.norm(gradients) + if norm == 0: + gradients = np.random.normal(0, 1, self.dim) # random restart if gradient is zero + else: + gradients /= norm + + # Update current point with adaptive step + x_new = x_current - step_size * gradients + x_new = np.clip(x_new, lb, ub) # Ensure new points are within bounds + + # Evaluate new point + f_new = func(x_new) + + # Update current point if new point is better + if f_new < f_current: + x_current = x_new + f_current = f_new + step_size *= 1.1 # Increase step size slightly as we are in a good direction + + # Update the best found solution + if f_new < self.f_opt: + self.f_opt = f_new + self.x_opt = x_current + else: + step_size *= 0.9 # Reduce step size as there was no improvement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGradientSearch.py b/nevergrad/optimization/lama/AdaptiveGradientSearch.py new file mode 100644 index 000000000..0969e02ec --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGradientSearch.py @@ -0,0 +1,56 @@ +import numpy as np + + +class AdaptiveGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Random initial point + x = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + velocity = np.zeros_like(x) + + for i in range(self.budget): + # Evaluate the function at the current point + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Estimate gradient via finite differences + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position + velocity = beta * velocity - alpha * grad + x = x + velocity + + # Ensure x stays within bounds + x = np.clip(x, self.lower_bound, self.upper_bound) + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..2c8cfe575 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligence.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligence: + def __init__(self, budget=1000, population_size=20, G0=100.0, alpha=0.1, beta=0.9, delta=0.1, gamma=0.1): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.delta = delta + self.gamma = gamma + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, f, F): + return x + F + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], best_pos, F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + for i in range(self.population_size): + if np.random.rand() < self.beta: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], population[random_index], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.G0 * np.exp(-self.alpha * t) + self.alpha *= 1.0 - self.delta + self.beta += self.gamma + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV15.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV15.py new file mode 100644 index 000000000..bafc5a829 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV15.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligenceV15: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV2.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV2.py new file mode 100644 index 000000000..6c8c2adac --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV2.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligenceV2: + def __init__(self, budget=1000, population_size=20, G0=100.0, alpha=0.1, beta=0.9, delta=0.1, gamma=0.1): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.delta = delta + self.gamma = gamma + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, f, F): + return x + F + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], best_pos, F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + for i in range(self.population_size): + if np.random.rand() < self.beta: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], population[random_index], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.G0 * np.exp(-self.alpha * t) + self.alpha *= 1.0 - self.delta + self.beta = np.clip(self.beta + self.gamma, 0.1, 0.9) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV26.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV26.py new file mode 100644 index 000000000..821ef2d8f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV26.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligenceV26: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV3.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV3.py new file mode 100644 index 000000000..4cd4589b8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV3.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligenceV3: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, f, F): + return x + F + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], best_pos, F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], population[random_index], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.G0 * np.exp(-self.alpha * t) + self.alpha *= 1.0 - self.delta + self.beta_max = self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV4.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV4.py new file mode 100644 index 000000000..c89f4d7cc --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmIntelligenceV4.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmIntelligenceV4: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, f, F): + return x + F + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], best_pos, F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], population[random_index], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.G0 * np.exp(-self.alpha * t) + self.alpha *= 1.0 - self.delta + self.beta_max = self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py new file mode 100644 index 000000000..5f36a2dbb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py @@ -0,0 +1,101 @@ +import numpy as np + + +class AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(50): # Increase the number of optimization runs to 50 for more exploration + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range( + 1000 + ): # Increase the number of iterations within each optimization run to 1000 for more thorough optimization + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/AdaptiveGuidedCulturalSearch.py b/nevergrad/optimization/lama/AdaptiveGuidedCulturalSearch.py new file mode 100644 index 000000000..86ccc5c4e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGuidedCulturalSearch.py @@ -0,0 +1,113 @@ +import numpy as np + + +class AdaptiveGuidedCulturalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % population_size == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Cultural shift based on best solution and mean position + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.3 + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGuidedDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveGuidedDifferentialEvolution.py new file mode 100644 index 000000000..4e579d95b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGuidedDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveGuidedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 80 # Adjusted population size for better balance + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.4: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.2 + (0.3 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGuidedEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveGuidedEvolutionStrategy.py new file mode 100644 index 000000000..ca78f0afa --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGuidedEvolutionStrategy.py @@ -0,0 +1,60 @@ +import numpy as np + + +class AdaptiveGuidedEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + step_size=0.1, + decay_rate=0.995, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = step_size # Initial step size for mutation + self.decay_rate = decay_rate # Decay rate for step size to reduce it each generation + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, gen): + # Mutation using adaptive step size + mutation_strength = self.step_size * (self.decay_rate**gen) # Decaying step size + mutation = np.random.normal(0, mutation_strength, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def select_best(self, population, fitness): + # Tournament selection for simplicity + best_index = np.argmin(fitness) + return population[best_index], fitness[best_index] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = self.select_best(population, fitness) + + evaluations = self.population_size + + for gen in range(1, self.budget // self.population_size): + new_population = np.array([self.mutate(ind, gen) for ind in population]) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + for i in range(self.population_size): + if new_fitness[i] < fitness[i]: + population[i] = new_population[i] + fitness[i] = new_fitness[i] + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_individual = new_population[i] + + # Update the step size adaptively + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveGuidedHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveGuidedHybridOptimizer.py new file mode 100644 index 000000000..eefa0992c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGuidedHybridOptimizer.py @@ -0,0 +1,123 @@ +import numpy as np + + +class AdaptiveGuidedHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def guided_search(self, x, func, step_size=0.1, max_iter=10): + """Guided search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Differential Evolution Strategy + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + + # Evaluate mutant vector + mutant_fitness = func(mutant_vector) + evaluations += 1 + if mutant_fitness < fitness[i]: + population[i] = mutant_vector + fitness[i] = mutant_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if mutant_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = mutant_vector + knowledge_base["best_fitness"] = mutant_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if mutant_fitness < self.f_opt: + self.f_opt = mutant_fitness + self.x_opt = mutant_vector + + # Apply guided search on some individuals + if np.random.rand() < 0.3: + guided_best_x, guided_best_f = self.guided_search(population[i], func) + evaluations += 10 # Assuming guided search uses 10 evaluations + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveGuidedMutationOptimizer.py b/nevergrad/optimization/lama/AdaptiveGuidedMutationOptimizer.py new file mode 100644 index 000000000..760acae46 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveGuidedMutationOptimizer.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveGuidedMutationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 250 # Increased population size for better exploration + mutation_factor = 0.9 # Higher initial mutation factor to promote exploration + crossover_prob = 0.6 # Lower initial crossover probability to ensure good individuals are retained + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Guided mutation strategy using best solution + indices = np.arange(population_size) + indices = np.delete(indices, i) + + # Select three random indices, ensuring they are distinct from 'i' + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + + # Mutation considering current solution, best solution and three random solutions + mutant = population[i] + mutation_factor * ( + best_solution - population[i] + x1 - (x2 + x3) / 2 + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + best_index = np.argmin(fitness) + + # Adaptive mutation and crossover probability adjustment + mutation_factor = max(0.5, mutation_factor - 0.01) # Slower decrease in mutation factor + crossover_prob = min(0.9, crossover_prob + 0.01) # Slower increase in crossover probability + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveHarmonicFireworkAlgorithm.py new file mode 100644 index 000000000..a43ebecdf --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicFireworkAlgorithm.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveHarmonicFireworkAlgorithm: + def __init__( + self, budget=10000, n_fireworks=30, n_sparks=10, scale_factor=0.1, levy_step_size=0.1, alpha=0.9 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.scale_factor = scale_factor + self.levy_step_size = levy_step_size + self.alpha = alpha + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scale_factor, firework + self.scale_factor, (self.n_sparks, self.dim) + ) + return sparks + + def levy_flight(self): + beta = 1.5 + u = np.random.normal(0, 1, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / np.abs(v) ** (1 / beta) + return self.levy_step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + 0.5 * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_parameters(self): + self.scale_factor *= self.alpha + self.levy_step_size *= self.alpha + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + fireworks = self.evolve_fireworks(fireworks, func) + self.adapt_parameters() + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicSearchOptimizer.py b/nevergrad/optimization/lama/AdaptiveHarmonicSearchOptimizer.py new file mode 100644 index 000000000..f47763f6a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicSearchOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveHarmonicSearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=2.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution): + exploration = np.random.normal(0, self.bandwidth, (self.population_size, self.dim)) + new_population = population + exploration + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.1 + else: + return bandwidth * 0.9 + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimization.py new file mode 100644 index 000000000..953399a68 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimization.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AdaptiveHarmonicSwarmOptimization: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) / 10.0 + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV2.py new file mode 100644 index 000000000..082610ccd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV2.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveHarmonicSwarmOptimizationV2: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + step_size_factor=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size_factor = step_size_factor + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) * self.step_size_factor + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV3.py b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV3.py new file mode 100644 index 000000000..c7df23e02 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicSwarmOptimizationV3.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveHarmonicSwarmOptimizationV3: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + step_size_factor=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size_factor = step_size_factor + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + indexes = np.random.choice(range(self.num_particles), size=2, replace=False) + new_solution[i] = np.mean(memory_matrix[indexes, i]) + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) * self.step_size_factor + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV12.py b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV12.py new file mode 100644 index 000000000..a8fa7cf68 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV12.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdaptiveHarmonicTabuSearchV12: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + self.bandwidth_decay = 0.95 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.95 + self.bandwidth *= self.bandwidth_decay + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.05 + self.bandwidth *= 1.05 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV17.py b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV17.py new file mode 100644 index 000000000..5f2eb7b33 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV17.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveHarmonicTabuSearchV17: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, 0.1, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + self.pitch_adjustment_rate = ( + 0.1 + 0.4 * (self.budget - i) / self.budget + ) # Adapt pitch adjustment rate + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV20.py b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV20.py new file mode 100644 index 000000000..ba92d98e0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV20.py @@ -0,0 +1,100 @@ +import numpy as np + + +class AdaptiveHarmonicTabuSearchV20: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, 0.1, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.1 # Increase the tabu ratio for more exploration + self.bandwidth *= 0.9 # Decrease the bandwidth for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV8.py b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV8.py new file mode 100644 index 000000000..df87714ec --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonicTabuSearchV8.py @@ -0,0 +1,95 @@ +import numpy as np + + +class AdaptiveHarmonicTabuSearchV8: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyFireworksAlgorithm.py b/nevergrad/optimization/lama/AdaptiveHarmonyFireworksAlgorithm.py new file mode 100644 index 000000000..d6fda6330 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyFireworksAlgorithm.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptiveHarmonyFireworksAlgorithm: + def __init__( + self, + budget=10000, + population_size=30, + harmony_memory_size=10, + pitch_adjust_rate=0.5, + mutation_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = best_solution[i] + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + + return np.clip(new_solution, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithm.py new file mode 100644 index 000000000..b6921e404 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithm.py @@ -0,0 +1,90 @@ +import numpy as np + + +class AdaptiveHarmonyMemeticAlgorithm: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=100, memetic_prob=0.8): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Adaptive adjustment of HMCR + self.par = min(self.par + 0.0001, 0.5) # Adaptive adjustment of PAR + self.bw = max(self.bw - 0.00001, 0.01) # Adaptive adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Adaptive adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithmV15.py b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithmV15.py new file mode 100644 index 000000000..c7f4b80f6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticAlgorithmV15.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdaptiveHarmonyMemeticAlgorithmV15: + def __init__( + self, budget=10000, hmcr=0.7, par=0.3, bw=0.05, memetic_iter=200, memetic_prob=0.9, memetic_step=0.025 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV2.py new file mode 100644 index 000000000..17727c368 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV2.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdaptiveHarmonyMemeticOptimizationV2: + def __init__(self, budget=10000, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.2, memory_size=100): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, self.memory_size + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV27.py b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV27.py new file mode 100644 index 000000000..ab64751b1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticOptimizationV27.py @@ -0,0 +1,107 @@ +import numpy as np + + +class AdaptiveHarmonyMemeticOptimizationV27: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.9, + pitch_bandwidth=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.uniform( + -self.memetic_step, self.memetic_step, size=self.dim + ) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + # Adaptive adjustment of pitch adjustment rate + if i % 100 == 0 and i != 0: + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + if success_rate > 0.5: + self.pitch_adjustment_rate *= 1.1 + else: + self.pitch_adjustment_rate *= 0.9 + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyMemeticSearchV2.py b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticSearchV2.py new file mode 100644 index 000000000..c137ea89d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyMemeticSearchV2.py @@ -0,0 +1,101 @@ +import numpy as np + + +class AdaptiveHarmonyMemeticSearchV2: + def __init__( + self, budget=10000, hmcr=0.7, par=0.4, bw=0.6, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchOptimizerV2.py new file mode 100644 index 000000000..468c314fc --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchOptimizerV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveHarmonySearchOptimizerV2: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.1 + else: + return bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return 1.0 - self.memory_update_rate + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.05, self.exploration_rate * 0.9) + else: + return min(0.5, self.exploration_rate * 1.1) + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithCuckooInspiration.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithCuckooInspiration.py new file mode 100644 index 000000000..0b16943a3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithCuckooInspiration.py @@ -0,0 +1,63 @@ +import numpy as np + + +class AdaptiveHarmonySearchWithCuckooInspiration: + def __init__( + self, budget, harmony_memory_size=10, bandwidth=0.1, mutation_rate=0.3, cuckoo_probability=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + self.cuckoo_probability = cuckoo_probability + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(self.f_opt) + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < self.cuckoo_probability: + cuckoo_index = np.random.randint(0, self.harmony_memory_size) + cuckoo_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + new_harmony[cuckoo_index] = cuckoo_harmony + + return new_harmony diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2.py new file mode 100644 index 000000000..7ae94a4d3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlight.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlight.py new file mode 100644 index 000000000..64f96c0bb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlight.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AdaptiveHarmonySearchWithImprovedLevyFlight: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, leviness=1.5): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.leviness = leviness + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(size=self.harmony_memory_size, dimension=len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + new_harmony = np.clip(new_harmony, func.bounds.lb, func.bounds.ub) + + return new_harmony + + def generate_levy_flight(self, size, dimension): + levy = np.zeros((size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.0 + self.leviness) + * np.sin(np.pi * self.leviness / 2) + / (np.math.gamma(1.0 + 2 * self.leviness) * (self.leviness**0.5)) + ) ** (1.0 / self.leviness) + + for i in range(size): + for j in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1.0 / self.leviness) + epsilon) + levy[i, j] = step + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlightInspiration.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlightInspiration.py new file mode 100644 index 000000000..f3d977061 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithImprovedLevyFlightInspiration.py @@ -0,0 +1,100 @@ +import numpy as np + + +class AdaptiveHarmonySearchWithImprovedLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=15, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: # Introduce Improved Adaptive Levy Flight + levy = self.generate_improved_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_improved_adaptive_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) # Randomly select beta in range + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.05 # Smaller increment in beta change + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLevyFlightImprovement.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLevyFlightImprovement.py new file mode 100644 index 000000000..35d86bdd8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLevyFlightImprovement.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveHarmonySearchWithLevyFlightImprovement: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimization.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimization.py new file mode 100644 index 000000000..a4f4183b2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimization.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveHarmonySearchWithLocalOptimization: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationImproved.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationImproved.py new file mode 100644 index 000000000..0041e6979 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationImproved.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveHarmonySearchWithLocalOptimizationImproved: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(10): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationV2.py new file mode 100644 index 000000000..1c7a87fcf --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithLocalOptimizationV2.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveHarmonySearchWithLocalOptimizationV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_search(self, harmony_memory, func): + for i in range(len(harmony_memory)): + harmony_memory[i] = self.simulated_annealing( + harmony_memory[i], func, func.bounds, max_iter=5, initial_temp=5.0 + ) + return harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonySearchWithSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithSimulatedAnnealing.py new file mode 100644 index 000000000..19ea9f474 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonySearchWithSimulatedAnnealing.py @@ -0,0 +1,98 @@ +import numpy as np +from scipy.stats import cauchy + + +class AdaptiveHarmonySearchWithSimulatedAnnealing: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.98): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHarmonyTabuOptimization.py b/nevergrad/optimization/lama/AdaptiveHarmonyTabuOptimization.py new file mode 100644 index 000000000..6da622e7a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHarmonyTabuOptimization.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveHarmonyTabuOptimization: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list, iteration): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list, iteration) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.2 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list, i) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + num_improvements += 1 + + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + num_improvements = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridAlgorithm.py b/nevergrad/optimization/lama/AdaptiveHybridAlgorithm.py new file mode 100644 index 000000000..d884df698 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridAlgorithm.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveHybridAlgorithm: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.3 + self.local_search_probability = 0.9 + self.F = 0.7 + self.CR = 0.8 + self.memory_size = 30 + self.strategy_switch_threshold = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.4 + c1 = 1.4 + c2 = 1.4 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdaptiveHybridAlgorithm(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithGradientBoost.py b/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithGradientBoost.py new file mode 100644 index 000000000..70eba2c8a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithGradientBoost.py @@ -0,0 +1,104 @@ +import numpy as np + + +class AdaptiveHybridAnnealingWithGradientBoost: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithMemoryRefinement.py b/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithMemoryRefinement.py new file mode 100644 index 000000000..587ae9cc6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridAnnealingWithMemoryRefinement.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveHybridAnnealingWithMemoryRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define adaptive phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Hybrid approach: Periodically do gradient-based local search and dimensional adjustments + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=5, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveHybridCMAESDE.py b/nevergrad/optimization/lama/AdaptiveHybridCMAESDE.py new file mode 100644 index 000000000..54d084ac6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridCMAESDE.py @@ -0,0 +1,183 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveHybridCMAESDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 50 + self.strategy_weights = np.ones(3) + self.strategy_success = np.zeros(3) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 20 + self.dynamic_parameters_adjustment_threshold = 30 + self.pop_shrink_factor = 0.1 + self.diversification_period = 50 + self.sigma = 0.3 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_cmaes(self, population, cma_es): + z = np.random.randn(self.dim) + return cma_es.mean + self.sigma * cma_es.cov.dot(z) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _diversify_population(self, population, fitness, func): + num_new_individuals = int(self.pop_size * 0.1) # 10% of the population + new_individuals = np.random.uniform(self.lb, self.ub, (num_new_individuals, self.dim)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + self.evaluations += num_new_individuals + + combined_population = np.vstack((population, new_individuals)) + combined_fitness = np.hstack((fitness, new_fitness)) + + best_indices = np.argsort(combined_fitness)[: self.pop_size] + return combined_population[best_indices], combined_fitness[best_indices] + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + cma_es = CMAES(self.dim, self.lb, self.ub) + + iteration = 0 + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3 = np.random.choice(indices, 3, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + else: # strategy == self._mutation_cmaes + donor = self._mutation_cmaes(population, cma_es) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes].index( + strategy + ) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + cma_es.update(population, fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 3) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self._dynamic_parameters() + + if self.no_improvement_count >= self.dynamic_adjustment_period: + new_pop_size = max(20, int(self.pop_size * (1 - self.pop_shrink_factor))) + population = population[:new_pop_size] + fitness = fitness[:new_pop_size] + self.pop_size = new_pop_size + self.no_improvement_count = 0 + + if iteration % self.diversification_period == 0 and self.evaluations < self.budget: + population, fitness = self._diversify_population(population, fitness, func) + + iteration += 1 + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt + + +class CMAES: + def __init__(self, dim, lb, ub): + self.dim = dim + self.lb = lb + self.ub = ub + self.mean = np.random.uniform(self.lb, self.ub, self.dim) + self.cov = np.eye(self.dim) + self.sigma = 0.5 + + def update(self, population, fitness): + best_idx = np.argmin(fitness) + self.mean = population[best_idx] + cov_update = np.cov(population.T) + self.cov = 0.9 * self.cov + 0.1 * cov_update diff --git a/nevergrad/optimization/lama/AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3.py b/nevergrad/optimization/lama/AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3.py new file mode 100644 index 000000000..1b0612298 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3.py @@ -0,0 +1,122 @@ +import numpy as np + + +class AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased population size for diverse exploration + self.sigma = 0.2 # Step size for initial exploration + self.c1 = 0.05 # Learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.015 # Learning rate for mutation adaptability + self.elitism_rate = 0.2 # Reduced elitism rate to allow more exploration + self.eval_count = 0 + self.F = 0.7 # Tuned differential weight for balanced exploration and exploitation + self.CR = 0.8 # Tuned crossover probability for better offspring variation + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveHybridCulturalOptimizer.py b/nevergrad/optimization/lama/AdaptiveHybridCulturalOptimizer.py new file mode 100644 index 000000000..27e17bf19 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridCulturalOptimizer.py @@ -0,0 +1,123 @@ +import numpy as np + + +class AdaptiveHybridCulturalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Differential Evolution Strategy + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + + # Evaluate mutant vector + mutant_fitness = func(mutant_vector) + evaluations += 1 + if mutant_fitness < fitness[i]: + population[i] = mutant_vector + fitness[i] = mutant_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if mutant_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = mutant_vector + knowledge_base["best_fitness"] = mutant_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if mutant_fitness < self.f_opt: + self.f_opt = mutant_fitness + self.x_opt = mutant_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.adaptive_local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridDEPSOWithDynamicRestart.py b/nevergrad/optimization/lama/AdaptiveHybridDEPSOWithDynamicRestart.py new file mode 100644 index 000000000..a7f66d48c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridDEPSOWithDynamicRestart.py @@ -0,0 +1,149 @@ +import numpy as np + + +class AdaptiveHybridDEPSOWithDynamicRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridDEWithIntensifiedLocalSearch.py b/nevergrad/optimization/lama/AdaptiveHybridDEWithIntensifiedLocalSearch.py new file mode 100644 index 000000000..96fed773c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridDEWithIntensifiedLocalSearch.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveHybridDEWithIntensifiedLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.initial_pop_size = 20 + self.F = 0.8 # Initial differential weight + self.CR = 0.9 # Initial crossover probability + self.local_search_prob = 0.2 # Increased local search probability + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def nelder_mead(self, x, func): + result = minimize(func, x, method="Nelder-Mead", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.initial_pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + pop_size = len(population) + + for i in range(pop_size): + # Select mutation strategy adaptively + strategy = np.random.choice(["rand/1", "best/1"]) + + if strategy == "rand/1": + # Select three distinct individuals (but different from i) + indices = np.arange(pop_size) + indices = indices[indices != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + elif strategy == "best/1": + best_idx = np.argmin(fitness) + best = population[best_idx] + indices = np.arange(pop_size) + indices = indices[indices != best_idx] + b, c = population[np.random.choice(indices, 2, replace=False)] + mutant = np.clip(best + self.F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Local Search with an increased probability + if np.random.rand() < self.local_search_prob and evaluations + 1 <= self.budget: + trial, f_trial = self.nelder_mead(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + # Check if we've exhausted our budget + if evaluations >= self.budget: + break + + # Elitism: Keep the best individual + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + if best_fitness < self.f_opt: + self.f_opt = best_fitness + self.x_opt = best_individual + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Adjust population size based on convergence + if np.std(fitness) < 1e-5: + if len(population) > 10: + population = population[: len(population) // 2] + fitness = fitness[: len(fitness) // 2] + else: + if ( + len(population) < self.initial_pop_size * 2 + and evaluations + len(population) <= self.budget + ): + new_individuals = np.array([self.random_bounds() for _ in range(len(population))]) + new_fitnesses = np.array([func(ind) for ind in new_individuals]) + population = np.vstack((population, new_individuals)) + fitness = np.hstack((fitness, new_fitnesses)) + evaluations += len(new_individuals) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveHybridDifferentialEvolution.py new file mode 100644 index 000000000..ecaa76ae2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridDifferentialEvolution.py @@ -0,0 +1,124 @@ +import numpy as np + + +class AdaptiveHybridDifferentialEvolution: + def __init__(self, budget, population_size=20, init_crossover_rate=0.7, init_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = init_crossover_rate + self.mutation_factor = init_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def fitness_sharing(population, fitness, sigma_share=0.1): + shared_fitness = np.copy(fitness) + for i in range(len(population)): + for j in range(len(population)): + if i != j and np.linalg.norm(population[i] - population[j]) < sigma_share: + shared_fitness[i] += fitness[j] + return shared_fitness + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + max_generations = self.budget // self.population_size + temperature = 1.0 + + for generation in range(max_generations): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = self.mutation_factor * (1 - generation / max_generations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + shared_fitness = fitness_sharing(population, fitness) + population = [population[i] for i in np.argsort(shared_fitness)[: self.population_size]] + fitness = [fitness[i] for i in np.argsort(shared_fitness)[: self.population_size]] + + temperature *= 0.99 + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveHybridDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridEvolutionStrategyV5.py b/nevergrad/optimization/lama/AdaptiveHybridEvolutionStrategyV5.py new file mode 100644 index 000000000..cea581c09 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridEvolutionStrategyV5.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveHybridEvolutionStrategyV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + elite_size = int(0.15 * population_size) # Increased elite size for more retention of good solutions + mutation_rate = 0.1 # Higher mutation rate for greater exploration + mutation_scale = lambda t: 0.2 * np.exp( + -0.0002 * t + ) # Adjusted mutation scale for dynamic exploration + crossover_rate = 0.95 # Very high crossover rate for extensive recombination + + local_search_prob = 0.4 # Higher local search probability + local_search_step_scale = lambda t: 0.05 * np.exp(-0.0001 * t) # More aggressive local search step + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridFireworkAlgorithm.py b/nevergrad/optimization/lama/AdaptiveHybridFireworkAlgorithm.py new file mode 100644 index 000000000..e929591c2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridFireworkAlgorithm.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdaptiveHybridFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + for i, (x, _) in enumerate(self.fireworks): + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridGradientAnnealingWithVariableMemory.py b/nevergrad/optimization/lama/AdaptiveHybridGradientAnnealingWithVariableMemory.py new file mode 100644 index 000000000..5f0425b69 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridGradientAnnealingWithVariableMemory.py @@ -0,0 +1,135 @@ +import numpy as np + + +class AdaptiveHybridGradientAnnealingWithVariableMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions with variable size + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Adjust memory size dynamically based on progress + if evaluations % (self.budget // 4) == 0: + memory_size = min(20, memory_size + 2) + new_memory = np.zeros((memory_size, self.dim)) + new_memory_scores = np.full(memory_size, np.Inf) + new_memory[: len(memory)] = memory + new_memory_scores[: len(memory_scores)] = memory_scores + memory = new_memory + memory_scores = new_memory_scores + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveHybridHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveHybridHarmonySearch.py new file mode 100644 index 000000000..1b64ca2e7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridHarmonySearch.py @@ -0,0 +1,57 @@ +import numpy as np + + +class AdaptiveHybridHarmonySearch: + def __init__(self, budget=10000, harmony_memory_size=20, hmcr=0.9, par=0.4, bw=0.5, bw_decay=0.95): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + self.bw *= self.bw_decay # Decay the bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridMetaOptimizer.py b/nevergrad/optimization/lama/AdaptiveHybridMetaOptimizer.py new file mode 100644 index 000000000..42c4d755e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridMetaOptimizer.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveHybridMetaOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.9 + self.F = 0.7 + self.CR = 0.9 + self.memory_size = 30 + self.strategy_switch_threshold = 0.02 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdaptiveHybridMetaOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridOptimization.py b/nevergrad/optimization/lama/AdaptiveHybridOptimization.py new file mode 100644 index 000000000..3ff47f7ad --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridOptimization.py @@ -0,0 +1,161 @@ +import numpy as np + + +class AdaptiveHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_decay = 0.99 + + # Differential Evolution parameters + F = 0.8 + CR = 0.9 + + # Gradient-based search parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = 0.9 + adaptive_F = 0.8 + adaptive_alpha = 0.1 + adaptive_beta = 0.9 + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = 0.9 - 0.8 * (i / self.budget) + adaptive_F = 0.8 + 0.4 * (i / self.budget) + adaptive_alpha = 0.1 + 0.2 * (i / self.budget) + adaptive_beta = 0.9 - 0.4 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.1 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(0.4, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHybridOptimizationV2.py new file mode 100644 index 000000000..d64444b5d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridOptimizationV2.py @@ -0,0 +1,161 @@ +import numpy as np + + +class AdaptiveHybridOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_decay = 0.99 + + # Differential Evolution parameters + F = 0.8 + CR = 0.9 + + # Gradient-based search parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = 0.9 + adaptive_F = 0.8 + adaptive_alpha = 0.1 + adaptive_beta = 0.9 + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = 0.9 - 0.8 * (i / self.budget) + adaptive_F = 0.8 + 0.4 * (i / self.budget) + adaptive_alpha = 0.1 + 0.2 * (i / self.budget) + adaptive_beta = 0.9 - 0.4 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.1 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(0.4, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveHybridOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridOptimizationV3.py b/nevergrad/optimization/lama/AdaptiveHybridOptimizationV3.py new file mode 100644 index 000000000..798e36f59 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridOptimizationV3.py @@ -0,0 +1,102 @@ +import numpy as np + + +class AdaptiveHybridOptimizationV3: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.crossover_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.learning_rate = 0.01 * np.exp(-iteration / max_iterations) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = (self.budget // self.population_size) * 2 + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step + pop, scores = self.local_search(func, pop, scores) + evaluations += self.population_size + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveHybridOptimizer.py new file mode 100644 index 000000000..171ad8df0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridOptimizer.py @@ -0,0 +1,148 @@ +import numpy as np + + +class AdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # Dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def gradient_descent(self, x, func, budget, step_size=0.01): + best_x = x.copy() + best_f = func(x) + grad = np.zeros(self.dim) + for _ in range(budget): + for i in range(self.dim): + x_plus = x.copy() + x_plus[i] += step_size + f_plus = func(x_plus) + grad[i] = (f_plus - best_f) / step_size + + x = np.clip(x - step_size * grad, self.bounds[0], self.bounds[1]) + f = func(x) + if f < best_f: + best_x = x + best_f = f + + return best_x, best_f + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + progress = self.eval_count / global_search_budget + self.w = 0.4 + 0.5 * (1 - progress) + self.c1 = 1.5 - 0.5 * progress + self.c2 = 1.5 + 0.5 * progress + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + if np.random.rand() < 0.3: + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + if np.random.rand() < 0.5: + new_x, new_f = self.gradient_descent(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + else: + new_x, new_f = self.local_search(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..cf3ba9a1c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolution.py @@ -0,0 +1,134 @@ +import numpy as np + + +class AdaptiveHybridParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Differential weight + self.initial_CR = 0.9 # Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.local_search_rate = 0.1 # Probability for local search + self.memory_size = 5 # Memory size for self-adaptation + self.w = 0.5 # Inertia weight for PSO + self.c1 = 1.5 # Cognitive coefficient for PSO + self.c2 = 1.5 # Social coefficient for PSO + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal bests + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + # Simple local search strategy + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + return candidate + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with memory + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + # Update memory + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # PSO update for non-elite particles + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = np.clip(population[i] + velocities[i], self.lower_bound, self.upper_bound) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + # Update personal bests + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + # Update global best + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + # Update population and fitness + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveHybridParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py new file mode 100644 index 000000000..7b9839bb9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py @@ -0,0 +1,134 @@ +import numpy as np + + +class AdaptiveHybridParticleSwarmDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Differential weight + self.initial_CR = 0.9 # Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.local_search_rate = 0.1 # Probability for local search + self.memory_size = 5 # Memory size for self-adaptation + self.w = 0.7 # Inertia weight for PSO + self.c1 = 1.7 # Cognitive coefficient for PSO + self.c2 = 1.7 # Social coefficient for PSO + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal bests + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + # Simple local search strategy + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + return candidate + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with memory + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + # Update memory + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # PSO update for non-elite particles + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = np.clip(population[i] + velocities[i], self.lower_bound, self.upper_bound) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + # Update personal bests + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + # Update global best + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + # Update population and fitness + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveHybridParticleSwarmDifferentialEvolutionPlus(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveHybridQuasiRandomGradientDE.py b/nevergrad/optimization/lama/AdaptiveHybridQuasiRandomGradientDE.py new file mode 100644 index 000000000..06d6422d1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridQuasiRandomGradientDE.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveHybridQuasiRandomGradientDE: + def __init__(self, budget, population_size=30, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j])) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveHybridQuasiRandomGradientDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveHybridRecombinativeStrategy.py b/nevergrad/optimization/lama/AdaptiveHybridRecombinativeStrategy.py new file mode 100644 index 000000000..f95e0282b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridRecombinativeStrategy.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveHybridRecombinativeStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_top_individuals(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def adapt_mutation_rate(self, best_fitness, current_fitness): + improvement = best_fitness - current_fitness + if improvement > 0: + return max(0.01, 1 - np.log1p(improvement)) + else: + return 0.1 + + def mutate_population(self, population, mutation_rate): + mutations = np.random.normal(0, mutation_rate, population.shape) + return np.clip(population + mutations, self.lower_bound, self.upper_bound) + + def recombine_population(self, best_individuals, population_size): + num_top = best_individuals.shape[0] + choices = np.random.choice(num_top, size=population_size) + mix_ratio = np.random.beta( + 2, 5, size=(population_size, self.dim) + ) # Ensuring dimensionality is maintained + recombined_population = ( + mix_ratio * best_individuals[choices] + (1 - mix_ratio) * best_individuals[choices[::-1]] + ) + return recombined_population + + def __call__(self, func): + population_size = 100 + num_best = 10 # Elite group size + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + while self.budget > 0: + fitness = self.evaluate_population(func, population) + self.budget -= population_size # Reducing the remaining budget + + best_individuals, best_fitness = self.select_top_individuals(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_individuals[0] + + mutation_rate = self.adapt_mutation_rate(best_score, best_fitness[0]) + new_population = self.recombine_population(best_individuals, population_size) + population = self.mutate_population(new_population, mutation_rate) + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveHybridSearchOptimizer.py b/nevergrad/optimization/lama/AdaptiveHybridSearchOptimizer.py new file mode 100644 index 000000000..c9e95da85 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridSearchOptimizer.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveHybridSearchOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.3 + self.local_search_probability = 0.9 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 20 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + phase_one_budget = int(self.budget * 0.5) # Increase exploration phase budget + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(-1, 1, self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (phase_one_budget - eval_count) / phase_one_budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + if neighbor_fitness < fitness[i]: + new_population[i] = neighbor + fitness[i] = neighbor_fitness + if neighbor_fitness < best_fitness: + best_individual = neighbor + best_fitness = neighbor_fitness + + if eval_count >= phase_one_budget: + break + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/AdaptiveHybridSwarmEvolutionOptimization.py b/nevergrad/optimization/lama/AdaptiveHybridSwarmEvolutionOptimization.py new file mode 100644 index 000000000..736233f70 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHybridSwarmEvolutionOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class AdaptiveHybridSwarmEvolutionOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 # Optimized population size + self.initial_F = 0.8 # Balanced mutation factor + self.initial_CR = 0.7 # Balanced crossover rate + self.elite_rate = 0.1 # Increased elite rate for better exploitation + self.local_search_rate = 0.1 # Decreased local search rate for better balance + self.memory_size = 20 # Memory size for parameter adaptation + self.w = 0.5 # Inertia weight for PSO + self.c1 = 1.5 # Cognitive component + self.c2 = 1.5 # Social component + self.phase_switch_ratio = 0.6 # More budget for DE phase + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.005 # Finer local search step + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveHybridSwarmEvolutionOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveHyperQuantumStateCrossoverOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveHyperQuantumStateCrossoverOptimizationV2.py new file mode 100644 index 000000000..c8bac2cad --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveHyperQuantumStateCrossoverOptimizationV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveHyperQuantumStateCrossoverOptimizationV2: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.05, + mutation_intensity=0.05, + crossover_rate=0.9, + quantum_prob=0.25, + gamma=0.2, + beta=0.5, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + self.beta = beta # Coefficient for dynamic mutation intensity adjustment + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum-inspired updates + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Dynamic mutation intensity based on progress + intensity = self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Enhanced quantum inspired state update to adaptively explore based on the best solution""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/AdaptiveIncrementalCrossoverEnhancement.py b/nevergrad/optimization/lama/AdaptiveIncrementalCrossoverEnhancement.py new file mode 100644 index 000000000..ebf422f6a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveIncrementalCrossoverEnhancement.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveIncrementalCrossoverEnhancement: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize_population(self, num_individuals=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (num_individuals, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness, top_k=20): + indices = np.argsort(fitness)[:top_k] + return population[indices], fitness[indices] + + def crossover(self, parents, offspring_size=50): + num_parents = len(parents) + offspring = np.empty((offspring_size, self.dim)) + for i in range(offspring_size): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i, :cross_point] = parents[p1, :cross_point] + offspring[i, cross_point:] = parents[p2, cross_point:] + return offspring + + def mutate(self, population, scale=0.05): + perturbation = np.random.normal(0, scale, size=population.shape) + mutated = np.clip(population + perturbation, self.lower_bound, self.upper_bound) + return mutated + + def __call__(self, func): + population_size = 100 + elite_size = 20 + mutation_scale = 0.05 + offspring_size = 80 + + population = self.initialize_population(population_size) + best_score = float("inf") + best_solution = None + + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + # Selection of the elite + elite_population, elite_fitness = self.select_survivors(population, fitness, elite_size) + + # Crossover and Mutation: generate offspring with potential greater diversity + offspring = self.crossover(elite_population, offspring_size) + offspring = self.mutate(offspring, mutation_scale) + + # Reinsert best found solution into population to ensure retention of good genes + elite_population[0] = best_solution.copy() + + # Merge elite and offspring into a new population + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveInertiaHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveInertiaHybridOptimizer.py new file mode 100644 index 000000000..4793d529b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveInertiaHybridOptimizer.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AdaptiveInertiaHybridOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.7 + self.global_coeff = 0.8 + self.local_coeff = 0.8 + self.initial_inertia = 1.2 + self.final_inertia = 0.4 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + inertia_weight = (self.final_inertia - self.initial_inertia) * ( + evaluations / self.budget + ) + self.initial_inertia + + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + inertia_weight * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/AdaptiveInertiaParticleOptimizer.py b/nevergrad/optimization/lama/AdaptiveInertiaParticleOptimizer.py new file mode 100644 index 000000000..617565dc6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveInertiaParticleOptimizer.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveInertiaParticleOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.5 + self.global_coeff = 0.8 + self.local_coeff = 0.8 + self.inertia_max = 1.5 + self.inertia_min = 0.5 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + inertia = self.inertia_min + (self.inertia_max - self.inertia_min) * ( + 1 - evaluations / self.budget + ) + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + inertia * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/AdaptiveInertiaParticleSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveInertiaParticleSwarmOptimization.py new file mode 100644 index 000000000..adb0456bb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveInertiaParticleSwarmOptimization.py @@ -0,0 +1,57 @@ +import numpy as np + + +class AdaptiveInertiaParticleSwarmOptimization: + def __init__(self, budget=10000, population_size=40, omega_max=0.9, omega_min=0.4, phi_p=0.2, phi_g=0.5): + self.budget = budget + self.population_size = population_size + self.omega_max = omega_max # Maximum inertia weight + self.omega_min = omega_min # Minimum inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_fitness = np.array([func(p) for p in particles]) + + global_best = particles[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_max - (self.omega_max - self.omega_min) * (evaluations / self.budget) + for i in range(self.population_size): + # Update velocity and position of particles + velocity[i] = ( + omega * velocity[i] + + self.phi_p * np.random.rand(self.dim) * (personal_best[i] - particles[i]) + + self.phi_g * np.random.rand(self.dim) * (global_best - particles[i]) + ) + particles[i] += velocity[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate particle's fitness + current_fitness = func(particles[i]) + evaluations += 1 + + # Update personal and global bests + if current_fitness < personal_best_fitness[i]: + personal_best[i] = particles[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best = particles[i] + global_best_fitness = current_fitness + + # Dynamic update of learning coefficients + self.phi_p = max(0.1, self.phi_p - 0.5 * evaluations / self.budget) + self.phi_g = min(0.6, self.phi_g + 0.5 * evaluations / self.budget) + + return global_best_fitness, global_best diff --git a/nevergrad/optimization/lama/AdaptiveLearningDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/AdaptiveLearningDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..a728dca80 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLearningDifferentialEvolutionOptimizer.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveLearningDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.dim = 5 # Dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize archive to store successful mutation vectors + archive = [] + + while self.eval_count < self.budget: + new_population = [] + new_fitness = [] + for i in range(self.pop_size): + # Mutation with archive usage + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if archive: + d = archive[np.random.randint(len(archive))] + mutant = np.clip( + a + F_values[i] * (b - c) + F_values[i] * (a - d), self.bounds[0], self.bounds[1] + ) + else: + mutant = np.clip(a + F_values[i] * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + archive.append(population[i]) + # Limit archive size + if len(archive) > self.pop_size: + archive.pop(np.random.randint(len(archive))) + # Self-adapting parameters + F_values[i] = F_values[i] * 1.1 if F_values[i] < 1 else F_values[i] + CR_values[i] = CR_values[i] * 1.1 if CR_values[i] < 1 else CR_values[i] + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + F_values[i] = F_values[i] * 0.9 if F_values[i] > 0 else F_values[i] + CR_values[i] = CR_values[i] * 0.9 if CR_values[i] > 0 else CR_values[i] + + if self.eval_count >= self.budget: + break + + # Replace the old population with the new one + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Learning Phase: Adjust F and CR based on the success rate + success_rate = np.count_nonzero(np.array(new_fitness) < np.array(fitness)) / self.pop_size + if success_rate > 0.2: + self.init_F = min(1.0, self.init_F * 1.1) + self.init_CR = min(1.0, self.init_CR * 1.1) + else: + self.init_F = max(0.1, self.init_F * 0.9) + self.init_CR = max(0.1, self.init_CR * 0.9) + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py b/nevergrad/optimization/lama/AdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py new file mode 100644 index 000000000..48907f9c1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AdaptiveLevyDiversifiedMetaHeuristicAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.3, + levy_beta=1.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveLevyHarmonySearch.py b/nevergrad/optimization/lama/AdaptiveLevyHarmonySearch.py new file mode 100644 index 000000000..eada53c85 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLevyHarmonySearch.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveLevyHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, levy_step_size=0.5): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..67d5f8707 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing.py @@ -0,0 +1,60 @@ +import numpy as np + + +class AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_range=0.1 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(10): # Adaptive local search + perturb_range = search_range * np.exp(-_ / 10) # Reduce perturbation range over iterations + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveLocalSearchOptimizer.py b/nevergrad/optimization/lama/AdaptiveLocalSearchOptimizer.py new file mode 100644 index 000000000..2c19f1596 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLocalSearchOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class AdaptiveLocalSearchOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.initial_step_size = (upper_bound - lower_bound) / 4 + self.min_step_size = (upper_bound - lower_bound) / 1000 + + def local_search(self, func, current_point, step_size): + """Perform a local search from the current point with the given step size.""" + best_point = current_point + best_value = func(current_point) + self.evaluations += 1 + + while self.evaluations < self.budget: + for i in range(self.dimension): + for direction in [-1, 1]: + new_point = np.copy(current_point) + new_point[i] += direction * step_size + # Ensure new_point stays within bounds + new_point = np.clip(new_point, self.bounds[0], self.bounds[1]) + + new_value = func(new_point) + self.evaluations += 1 + if new_value < best_value: + best_value = new_value + best_point = new_point + + if self.evaluations >= self.budget: + return best_point, best_value + + if np.array_equal(best_point, current_point): + break + current_point = best_point + + return best_point, best_value + + def __call__(self, func): + # Initialize at a random starting point + current_point = np.random.uniform(self.bounds[0], self.bounds[1], self.dimension) + step_size = self.initial_step_size + self.evaluations = 0 + + best_point, best_value = self.local_search(func, current_point, step_size) + + # Perform iterative reduction of step size and local search + while step_size > self.min_step_size and self.evaluations < self.budget: + step_size *= 0.5 + new_point, new_value = self.local_search(func, best_point, step_size) + + if new_value < best_value: + best_value = new_value + best_point = new_point + + return best_value, best_point diff --git a/nevergrad/optimization/lama/AdaptiveLocalSearchQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveLocalSearchQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..f6a45a92f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveLocalSearchQuantumSimulatedAnnealing.py @@ -0,0 +1,59 @@ +import numpy as np + + +class AdaptiveLocalSearchQuantumSimulatedAnnealing: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_range=0.1 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(10): # Adaptive local search + perturb_range = search_range * np.exp(-_ / 10) # Reduce perturbation range over iterations + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveMemeticAlgorithm.py new file mode 100644 index 000000000..e6fab6aea --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticAlgorithm.py @@ -0,0 +1,72 @@ +import numpy as np + + +class AdaptiveMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + self.population_size = 50 + self.mutation_factor = 0.8 + self.crossover_rate = 0.9 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation (Differential Evolution) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + # Crossover + crossover = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover, mutant, population[i]) + + # Local Search (Adaptive Hill Climbing) + if np.random.rand() < 0.2: # Adaptive probability to invoke local search + trial = self.local_search(trial, func) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + + for _ in range(10): # Perform multiple iterations of hill climbing + for i in range(self.dim): + x_new = best_x.copy() + x_new[i] += step_size * (np.random.rand() * 2 - 1) # Small random perturbation + x_new = np.clip(x_new, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x diff --git a/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..8e2cfcc2b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9, local_search_budget_ratio=0.1): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer.py new file mode 100644 index 000000000..e709d502a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer.py @@ -0,0 +1,124 @@ +import numpy as np + + +class AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Elitism: Keep the best individuals + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + if self.eval_count >= global_search_budget: + break + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..39683752a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.5 + self.CR = 0.9 + self.local_search_prob = 0.1 + self.restart_threshold = 50 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.history = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + x_local, f_local = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim + ).x, func(x) + return x_local, f_local + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(len(population)) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, len(population) - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(0.1 * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..579e89279 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9, local_search_budget_ratio=0.1): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV2.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV2.py new file mode 100644 index 000000000..86a2a4427 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV2.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 * (1 + np.random.rand()) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with mixed strategies + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reset part of the population if stagnation is detected + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV3.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV3.py new file mode 100644 index 000000000..9e443f512 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV3.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.cos(np.pi * iteration / max_iterations) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with mixed strategies + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reset part of the population if stagnation is detected + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV4.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV4.py new file mode 100644 index 000000000..5bd3e142f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV4.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.cos(np.pi * iteration / max_iterations) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def elitist_selection(self, population, fitness): + elite_size = max(1, len(population) // 10) + elite_indices = np.argsort(fitness)[:elite_size] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with refined strategies + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Elitist selection to preserve the best individuals + elites, elite_fitness = self.elitist_selection(population, fitness) + + # Reset part of the population if stagnation is detected + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + # Inject elite individuals back into the population + elite_size = len(elites) + population[:elite_size] = elites + fitness[:elite_size] = elite_fitness + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV5.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV5.py new file mode 100644 index 000000000..15407fa2c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV5.py @@ -0,0 +1,110 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.cos(np.pi * iteration / max_iterations) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def elitist_selection(self, population, fitness): + elite_size = max(1, len(population) // 10) + elite_indices = np.argsort(fitness)[:elite_size] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with refined strategies + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Elitist selection to preserve the best individuals + elites, elite_fitness = self.elitist_selection(population, fitness) + + # Reset part of the population if stagnation is detected + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + # Inject elite individuals back into the population + elite_size = len(elites) + population[:elite_size] = elites + fitness[:elite_size] = elite_fitness + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV6.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV6.py new file mode 100644 index 000000000..c47ba3b82 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV6.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=3): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * np.exp(-iteration / max_iterations) # Exponential decay + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 # Reduced population size for faster convergence + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if ( + np.random.rand() < 0.15 and evaluations + 2 <= self.budget + ): # increased probability for local search + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.03, max_iter=3 + ) # smaller step size + evaluations += 2 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations + int(0.10 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.10 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV7.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV7.py new file mode 100644 index 000000000..aa005146a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionV7.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): # Ensure at least one parameter is taken from mutant + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=3): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) # Inversely scaled exponential increase + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 # Slightly increased population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if ( + np.random.rand() < 0.20 and evaluations + 2 <= self.budget + ): # increased probability for local search + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.02, max_iter=3 + ) # smaller step size, more fine-tuning + evaluations += 2 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations + int(0.10 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.10 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR.py new file mode 100644 index 000000000..c629cb3b9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + step_size = (self.bounds[1] - self.bounds[0]) * 0.05 + perturbation = np.random.uniform(-step_size, step_size, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.05 if F < 1 else F + CR_values[i] = CR * 1.05 if CR < 1 else CR + else: + F_values[i] = F * 0.95 if F > 0 else F + CR_values[i] = CR * 0.95 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Elitism: Keep the best individuals + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + if self.eval_count >= global_search_budget: + break + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance.py new file mode 100644 index 000000000..cd7fa2d7b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance.py @@ -0,0 +1,130 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import Matern + + +class AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.surrogate_update_frequency = 50 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.cos(np.pi * iteration / max_iterations) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def elitist_selection(self, population, fitness): + elite_size = max(1, len(population) // 10) + elite_indices = np.argsort(fitness)[:elite_size] + return population[elite_indices], fitness[elite_indices] + + def update_surrogate_model(self, population, fitness): + kernel = Matern(nu=2.5) + self.surrogate_model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=5) + self.surrogate_model.fit(population, fitness) + + def surrogate_assisted_evaluation(self, x): + return self.surrogate_model.predict([x], return_std=True) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + self.update_surrogate_model(population, fitness) + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + if evaluations % self.surrogate_update_frequency == 0: + self.update_surrogate_model(population, fitness) + + trial_fitness, uncertainty = self.surrogate_assisted_evaluation(trial_vector) + if uncertainty < 0.1: + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + elites, elite_fitness = self.elitist_selection(population, fitness) + + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + elite_size = len(elites) + population[:elite_size] = elites + fitness[:elite_size] = elite_fitness + + iteration += 1 + + return self.f_opt, self.x_opt + + +# Example usage: +# optimizer = AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance(budget=10000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialQuantumSearch.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialQuantumSearch.py new file mode 100644 index 000000000..a4c700105 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialQuantumSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialQuantumSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.5 + social_coefficient = 2.0 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 20 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDifferentialSearch.py b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialSearch.py new file mode 100644 index 000000000..46b838abe --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDifferentialSearch.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveMemeticDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = np.clip(self.crossover(population[i], mutant_vector, CR), self.lb, self.ub) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.4 and evaluations + 3 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func, step_size=0.05) + evaluations += 3 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced Diversity Maintenance: Reinitialize 10% worst individuals + if evaluations + int(0.1 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.1 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticDiverseOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticDiverseOptimizer.py new file mode 100644 index 000000000..a569bddcc --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticDiverseOptimizer.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveMemeticDiverseOptimizer: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=15): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.6 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/AdaptiveMemeticEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionStrategy.py new file mode 100644 index 000000000..b0e026ac4 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionStrategy.py @@ -0,0 +1,97 @@ +import numpy as np + + +class AdaptiveMemeticEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func): + """Simple local search around a point""" + best_x = x + best_f = func(x) + for _ in range(10): + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation and Crossover using Differential Evolution + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < CR[i] + if not np.any(crossover_points): + crossover_points[np.random.randint(0, self.dim)] = True + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + F[i] = F[i] + 0.1 * (np.random.rand() - 0.5) + F[i] = np.clip(F[i], 0.5, 1.0) + CR[i] = CR[i] + 0.1 * (np.random.rand() - 0.5) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.2: # Increased probability of local search + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Periodically introduce new random solutions (to avoid local optima) + if evaluations % (population_size // 2) == 0: + new_population = np.random.uniform(self.lb, self.ub, (population_size // 5, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += len(new_fitness) + + # Replace worst individuals with new random individuals + worst_indices = fitness.argsort()[-(population_size // 5) :] + population[worst_indices] = new_population + fitness[worst_indices] = new_fitness + + # Reinitialize strategy parameters for new individuals + F[worst_indices] = np.random.uniform(0.5, 1.0, population_size // 5) + CR[worst_indices] = np.random.uniform(0.1, 0.9, population_size // 5) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..b0b0c7b57 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptiveMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.5 + np.random.rand() * 0.3 + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < 0.9 + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.3 * (iteration / max_iterations) + learning_rate = 0.01 * np.exp(-iteration / (0.5 * max_iterations)) + return crossover_rate, learning_rate + + def hybrid_step(self, func, pop, scores, learning_rate): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < 0.5: # 50% probability to apply local search + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate = self.adaptive_parameters(iteration, max_iterations) + + # Perform hybrid step + pop, scores = self.hybrid_step(func, pop, scores, learning_rate) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryOptimizer.py new file mode 100644 index 000000000..564e249aa --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionaryOptimizer.py @@ -0,0 +1,94 @@ +import numpy as np + + +class AdaptiveMemeticEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5, step_size=0.01): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticEvolutionarySearch.py b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionarySearch.py new file mode 100644 index 000000000..8edeb023b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticEvolutionarySearch.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdaptiveMemeticEvolutionarySearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.9 - 0.8 * (iteration / max_iterations) + self.crossover_rate = 0.9 - 0.5 * (iteration / max_iterations) + self.learning_rate = 0.02 * np.exp(-iteration / (0.5 * max_iterations)) + + def hybrid_step(self, func, pop, scores): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < 0.5: # 50% probability to apply local search + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform hybrid step + pop, scores = self.hybrid_step(func, pop, scores) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimization.py b/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimization.py new file mode 100644 index 000000000..41ac87d94 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimization.py @@ -0,0 +1,85 @@ +import numpy as np + + +class AdaptiveMemeticHarmonyOptimization: + def __init__(self, budget=10000, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.3: + new_harmony[i] += np.random.normal(0, 1) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimizationV5.py b/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimizationV5.py new file mode 100644 index 000000000..04d4b525c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticHarmonyOptimizationV5.py @@ -0,0 +1,85 @@ +import numpy as np + + +class AdaptiveMemeticHarmonyOptimizationV5: + def __init__(self, budget=10000, memetic_iter=500, memetic_prob=0.6, memetic_step=0.1): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/AdaptiveMemeticHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticHybridOptimizer.py new file mode 100644 index 000000000..1b04cfd7c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticHybridOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class AdaptiveMemeticHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.5 + self.elite_fraction = 0.1 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func, budget): + for _ in range(budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + if budget <= 0: + break + return individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(20, self.budget - evaluations) + elite_population[idx] = self.local_search( + elite_population[idx], bounds, func, local_search_budget + ) + evaluations += local_search_budget + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + # Additional mechanism for maintaining diversity + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..7a22f0be0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticOptimizer.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveMemeticOptimizer: + def __init__(self, budget=10000, population_size=40): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.4 + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + self.inertia_weight = max(0.4, self.inertia_weight * 0.98) + self.mutation_factor = np.random.uniform(0.5, 1.0) + self.crossover_probability = np.random.uniform(0.7, 1.0) + self.cognitive_coeff = np.random.uniform(1.2, 2.0) + self.social_coeff = np.random.uniform(1.2, 2.0) + + if eval_count >= self.budget: + break + + # Local search with Nelder-Mead on the best solutions + if eval_count + self.population_size <= self.budget: + for i in range(self.population_size): + res = self.nelder_mead(func, population[i]) + if res[1] < fitness[i]: + population[i] = res[0] + fitness[i] = res[1] + if res[1] < global_best_fitness: + global_best = res[0] + global_best_fitness = res[1] + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt + + def nelder_mead(self, func, x_start, tol=1e-6, max_iter=100): + from scipy.optimize import minimize + + res = minimize(func, x_start, method="Nelder-Mead", tol=tol, options={"maxiter": max_iter}) + return res.x, res.fun diff --git a/nevergrad/optimization/lama/AdaptiveMemeticOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveMemeticOptimizerV2.py new file mode 100644 index 000000000..b246435a0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticOptimizerV2.py @@ -0,0 +1,91 @@ +import numpy as np + + +class AdaptiveMemeticOptimizerV2: + def __init__(self, budget=10000, population_size=40): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.4 + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + self.inertia_weight = max(0.4, self.inertia_weight * 0.98) + self.mutation_factor = np.random.uniform(0.6, 0.9) + self.crossover_probability = np.random.uniform(0.7, 0.95) + self.cognitive_coeff = np.random.uniform(1.3, 1.7) + self.social_coeff = np.random.uniform(1.3, 1.7) + + if eval_count >= self.budget: + break + + # Enhanced Local Search on selected individuals + if eval_count + self.population_size / 2 <= self.budget: + for i in np.random.choice(self.population_size, self.population_size // 2, replace=False): + res = self.nelder_mead(func, population[i]) + if res[1] < fitness[i]: + population[i] = res[0] + fitness[i] = res[1] + if res[1] < global_best_fitness: + global_best = res[0] + global_best_fitness = res[1] + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt + + def nelder_mead(self, func, x_start, tol=1e-6, max_iter=100): + from scipy.optimize import minimize + + res = minimize(func, x_start, method="Nelder-Mead", tol=tol, options={"maxiter": max_iter}) + return res.x, res.fun diff --git a/nevergrad/optimization/lama/AdaptiveMemeticParticleSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveMemeticParticleSwarmOptimization.py new file mode 100644 index 000000000..315a83bf0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemeticParticleSwarmOptimization.py @@ -0,0 +1,101 @@ +import numpy as np + + +class AdaptiveMemeticParticleSwarmOptimization: + def __init__( + self, + budget, + population_size=50, + w=0.5, + c1=2, + c2=2, + local_search_budget_ratio=0.1, + adaptivity_factor=0.7, + ): + self.budget = budget + self.population_size = population_size + self.w = w # inertia weight + self.c1 = c1 # cognitive coefficient + self.c2 = c2 # social coefficient + self.local_search_budget_ratio = local_search_budget_ratio + self.adaptivity_factor = adaptivity_factor # adaptive factor for tuning parameters dynamically + + def local_search(self, func, x, search_budget): + best_score = func(x) + best_x = np.copy(x) + dim = len(x) + + for _ in range(search_budget): + new_x = x + np.random.uniform(-0.1, 0.1, dim) + new_x = np.clip(new_x, -5.0, 5.0) + new_score = func(new_x) + if new_score < best_score: + best_score = new_score + best_x = np.copy(new_x) + + return best_x, best_score + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize the swarm + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + velocities = np.random.uniform(-1, 1, (self.population_size, dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in personal_best_positions]) + + best_idx = np.argmin(personal_best_scores) + global_best_position = personal_best_positions[best_idx] + global_best_score = personal_best_scores[best_idx] + + evaluations = self.population_size + local_search_budget = int(self.budget * self.local_search_budget_ratio) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Update velocity + r1, r2 = np.random.rand(dim), np.random.rand(dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (global_best_position - population[i]) + ) + + # Update position + population[i] = np.clip(population[i] + velocities[i], lower_bound, upper_bound) + + # Evaluate fitness + score = func(population[i]) + evaluations += 1 + + # Update personal best + if score < personal_best_scores[i]: + personal_best_scores[i] = score + personal_best_positions[i] = population[i] + + # Update global best + if score < global_best_score: + global_best_score = score + global_best_position = population[i] + + # Apply local search on global best position for further refinement + if evaluations + local_search_budget <= self.budget: + global_best_position, global_best_score = self.local_search( + func, global_best_position, local_search_budget + ) + evaluations += local_search_budget + + # Dynamically adapt parameters based on current best performance + self.w *= self.adaptivity_factor + self.c1 *= self.adaptivity_factor + self.c2 *= self.adaptivity_factor + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemoryAssistedStrategyV41.py b/nevergrad/optimization/lama/AdaptiveMemoryAssistedStrategyV41.py new file mode 100644 index 000000000..4c266bb67 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryAssistedStrategyV41.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdaptiveMemoryAssistedStrategyV41: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_init = F_init + self.CR_init = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.learning_rate = 0.1 + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F_init * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F_init * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR_init + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 10: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_dynamic_parameters(self, iteration, total_iterations): + progress = iteration / total_iterations + self.F_init = np.clip(self.F_init - self.learning_rate * np.sin(np.pi * progress), 0.1, 1) + self.CR_init = np.clip(self.CR_init + self.learning_rate * np.cos(np.pi * progress), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_dynamic_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemoryEnhancedDualStrategyV45.py b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedDualStrategyV45.py new file mode 100644 index 000000000..078c4b0cd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedDualStrategyV45.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveMemoryEnhancedDualStrategyV45: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.8, memory_size=20): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover probability + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adaptive control, refining the tuning of F and CR based on convergence trends + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + iteration = 0 + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemoryEnhancedSearch.py b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedSearch.py new file mode 100644 index 000000000..35a775fe2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedSearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveMemoryEnhancedSearch: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Track the best solution and its fitness + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor with linear interpolation + F = self.F_min + (self.F_max - self.F_min) * (1 - evaluations / self.budget) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(population[i] + F * (best_solution - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > trial_fitness: + memory[worst_idx] = trial.copy() + memory_fitness[worst_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemoryEnhancedStrategyV42.py b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedStrategyV42.py new file mode 100644 index 000000000..1eb41410e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryEnhancedStrategyV42.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptiveMemoryEnhancedStrategyV42: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + # Introduce memory-guided mutation + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 10: # Limit memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjust parameters using a sigmoid-based adaptive method + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemoryEvolutionaryOptimizer.py b/nevergrad/optimization/lama/AdaptiveMemoryEvolutionaryOptimizer.py new file mode 100644 index 000000000..051669977 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryEvolutionaryOptimizer.py @@ -0,0 +1,105 @@ +import numpy as np + + +class AdaptiveMemoryEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 # Increased population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if ( + np.random.rand() < 0.2 and evaluations + 5 <= self.budget + ): # Adjusted local search probability + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=15, step_size=0.1 + ) # Adjusted local search parameters + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Adaptive population size adjustment + if iteration % (max_iterations // 5) == 0 and population_size > 20: + best_indices = np.argsort(fitness)[: int(0.8 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealing.py b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealing.py new file mode 100644 index 000000000..5f15be1dd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealing.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveMemoryGradientAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingPlus.py b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingPlus.py new file mode 100644 index 000000000..639c1d5bf --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingPlus.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdaptiveMemoryGradientAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingWithExplorationBoost.py b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingWithExplorationBoost.py new file mode 100644 index 000000000..170c73989 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryGradientAnnealingWithExplorationBoost.py @@ -0,0 +1,141 @@ +import numpy as np + + +class AdaptiveMemoryGradientAnnealingWithExplorationBoost: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Annealing properties + T_initial = 1.0 + T_min = 1e-5 + alpha_initial = 0.97 + beta_initial = 1.5 + + # Initial solution + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Larger memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + # Initial settings + T = T_initial + alpha = alpha_initial + beta = beta_initial + + # Define dynamic phases + phase1 = self.budget // 3 # Exploration phase + phase2 = 2 * self.budget // 3 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.5 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + else: + beta = 3.0 # Higher acceptance for local search refinement + alpha = 0.90 # Faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 8) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 6) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveMemoryGradientSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveMemoryGradientSimulatedAnnealing.py new file mode 100644 index 000000000..0dc8a6bc5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryGradientSimulatedAnnealing.py @@ -0,0 +1,120 @@ +import numpy as np + + +class AdaptiveMemoryGradientSimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Simulated Annealing parameters + T_initial = 1.0 + T_min = 1e-5 + alpha = 0.98 + beta_initial = 1.5 + + # Initialize current solution + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Dynamic Phases + phase1 = self.budget // 3 + phase2 = 2 * self.budget // 3 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + else: + beta = 2.5 + alpha = 0.95 + + # Gradient-based refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=30, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveMemoryGuidedEvolutionStrategyV57.py b/nevergrad/optimization/lama/AdaptiveMemoryGuidedEvolutionStrategyV57.py new file mode 100644 index 000000000..ac59cf7ea --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryGuidedEvolutionStrategyV57.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveMemoryGuidedEvolutionStrategyV57: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=20): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.success_memory = [] + self.failure_memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = ( + np.mean(self.success_memory, axis=0) if self.success_memory else np.zeros(self.dimension) + ) + failure_effect = ( + np.mean(self.failure_memory, axis=0) if self.failure_memory else np.zeros(self.dimension) + ) + F = np.clip( + self.F + 0.1 * np.sin(len(self.success_memory)), 0.1, 1.0 + ) # Adaptive F based on memory size + mutant = population[a] + F * (population[b] - population[c] + memory_effect - 0.1 * failure_effect) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.success_memory.append(trial - target) + if len(self.success_memory) > self.memory_size: + self.success_memory.pop(0) + return trial, f_trial + else: + self.failure_memory.append(trial - target) + if len(self.failure_memory) > self.memory_size: + self.failure_memory.pop(0) + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemoryHybridAnnealing.py b/nevergrad/optimization/lama/AdaptiveMemoryHybridAnnealing.py new file mode 100644 index 000000000..e83425c99 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryHybridAnnealing.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveMemoryHybridAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.99 # Cooling rate + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + # Adaptive memory factor + memory_factor = 0.1 + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < memory_factor: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = x_current + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + + # Hybrid component: local search around the candidate + local_search_range = 0.1 * T + x_local_candidate = x_candidate + local_search_range * np.random.randn(self.dim) + x_local_candidate = np.clip(x_local_candidate, func.bounds.lb, func.bounds.ub) + + f_candidate = func(x_candidate) + f_local_candidate = func(x_local_candidate) + evaluations += 2 + + # Use the better of the candidate and local candidate + if f_local_candidate < f_candidate: + x_candidate = x_local_candidate + f_candidate = f_local_candidate + + if f_candidate < f_current or np.exp((f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + # Adapt memory factor based on temperature + memory_factor = max(0.1, memory_factor * (1 - T / T_initial)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO.py b/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO.py new file mode 100644 index 000000000..151deb719 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO.py @@ -0,0 +1,162 @@ +import numpy as np + + +class AdaptiveMemoryHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + archive_size = 5 # Memory archive size + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_archive(archive, new_solution, new_fitness): + if len(archive) < archive_size: + archive.append((new_solution, new_fitness)) + else: + archive.sort(key=lambda x: x[1]) + if new_fitness < archive[-1][1]: + archive[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + archive = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + + update_archive(archive, trial, f_trial) + + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO_V2.py b/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO_V2.py new file mode 100644 index 000000000..368f33eb3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryHybridDEPSO_V2.py @@ -0,0 +1,164 @@ +import numpy as np + + +class AdaptiveMemoryHybridDEPSO_V2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = ( + 0.1 * self.budget + ) # Lower restart threshold to 10% of budget to encourage exploration + archive_size = 10 # Increase memory archive size for better exploration + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_archive(archive, new_solution, new_fitness): + if len(archive) < archive_size: + archive.append((new_solution, new_fitness)) + else: + archive.sort(key=lambda x: x[1]) + if new_fitness < archive[-1][1]: + archive[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + archive = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + + update_archive(archive, trial, f_trial) + + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMemoryParticleDifferentialSearch.py b/nevergrad/optimization/lama/AdaptiveMemoryParticleDifferentialSearch.py new file mode 100644 index 000000000..118693464 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemoryParticleDifferentialSearch.py @@ -0,0 +1,113 @@ +import numpy as np + + +class AdaptiveMemoryParticleDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 50 + inertia_weight = 0.7 + cognitive_coefficient = 1.5 + social_coefficient = 1.5 + differential_weight = 0.8 + crossover_rate = 0.9 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory_size = 5 + memory = [] + + while evaluations < self.budget: + for i in range(population_size): + # Particle Swarm Optimization Part + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential Evolution Part with Adaptive Memory + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = AdaptiveMemoryParticleDifferentialSearch(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/AdaptiveMemorySelfTuningStrategyV60.py b/nevergrad/optimization/lama/AdaptiveMemorySelfTuningStrategyV60.py new file mode 100644 index 000000000..ad510fb49 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemorySelfTuningStrategyV60.py @@ -0,0 +1,95 @@ +import numpy as np + + +class AdaptiveMemorySelfTuningStrategyV60: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + max_memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.max_memory_size = max_memory_size + self.success_rate = 0.1 # Initial estimate + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + self.success_rate += 0.1 * (1 - self.success_rate) # Increase success rate + if len(self.memory) > self.max_memory_size: + self.memory.pop(0) + return trial, f_trial + else: + self.success_rate *= 0.9 # Decay success rate + return target, f_target + + def adjust_parameters(self): + # Adjust memory size and parameters based on success rate + self.max_memory_size = min(20, max(5, int(5 + 15 * self.success_rate))) + # Continuous adaptation using performance feedback + self.F = np.clip(0.5 + 0.4 * self.success_rate, 0.1, 1) + self.CR = np.clip(0.9 - 0.4 * self.success_rate, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters() + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveMemorySimulatedAnnealing.py new file mode 100644 index 000000000..6a853153f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMemorySimulatedAnnealing.py @@ -0,0 +1,63 @@ +import numpy as np + + +class AdaptiveMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.99 # Cooling rate + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + # Adaptive memory factor + memory_factor = 0.1 + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + # Adaptive memory influence + if np.random.rand() < memory_factor: + x_candidate = memory[i] + T * np.random.randn(self.dim) + else: + x_candidate = x_current + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp((f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + # Adapt memory factor based on temperature + memory_factor = max(0.1, memory_factor * (1 - T / T_initial)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSO.py b/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSO.py new file mode 100644 index 000000000..9487e33c2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class AdaptiveMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 1500 + self.meta_net_lr = 0.4 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSOv13.py b/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSOv13.py new file mode 100644 index 000000000..ffe533c17 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMetaNetAQAPSOv13.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class AdaptiveMetaNetAQAPSOv13: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.25 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMetaNetPSO_v3.py b/nevergrad/optimization/lama/AdaptiveMetaNetPSO_v3.py new file mode 100644 index 000000000..d9848d9e1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMetaNetPSO_v3.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class AdaptiveMetaNetPSO_v3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 2.0 + self.social_weight = 2.5 + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMetaNetPSOv3.py b/nevergrad/optimization/lama/AdaptiveMetaNetPSOv3.py new file mode 100644 index 000000000..fe2cf9a5e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMetaNetPSOv3.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class AdaptiveMetaNetPSOv3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 2.0 + self.social_weight = 2.5 + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMetaheuristicOptimization.py b/nevergrad/optimization/lama/AdaptiveMetaheuristicOptimization.py new file mode 100644 index 000000000..524a48de0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMetaheuristicOptimization.py @@ -0,0 +1,146 @@ +import numpy as np + + +class AdaptiveMetaheuristicOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_decay = 0.99 + + # Differential Evolution parameters + F = 0.8 + CR = 0.9 + + # Gradient-based search parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(0.4, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveMetaheuristicOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMomentumOptimization.py b/nevergrad/optimization/lama/AdaptiveMomentumOptimization.py new file mode 100644 index 000000000..67a1353b0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMomentumOptimization.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveMomentumOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.alpha = 0.1 # Learning rate + self.beta1 = 0.9 # Momentum term + self.beta2 = 0.999 # RMSProp term + self.epsilon = 1e-8 # To prevent division by zero + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + x = np.random.uniform(self.lb, self.ub, self.dim) + m = np.zeros(self.dim) + v = np.zeros(self.dim) + + for t in range(1, self.budget + 1): + # Evaluate function + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Numerical gradient estimation + grad = self._approx_gradient(func, x) + + # Update biased first moment estimate + m = self.beta1 * m + (1 - self.beta1) * grad + + # Update biased second raw moment estimate + v = self.beta2 * v + (1 - self.beta2) * (grad**2) + + # Compute bias-corrected first moment estimate + m_hat = m / (1 - self.beta1**t) + + # Compute bias-corrected second raw moment estimate + v_hat = v / (1 - self.beta2**t) + + # Update parameters + x -= self.alpha * m_hat / (np.sqrt(v_hat) + self.epsilon) + + # Ensure the solutions remain within bounds + x = np.clip(x, self.lb, self.ub) + + return self.f_opt, self.x_opt + + def _approx_gradient(self, func, x): + # Gradient approximation using central difference + grad = np.zeros(self.dim) + h = 1e-5 # Step size for numerical differentiation + for i in range(self.dim): + x_forward = x.copy() + x_backward = x.copy() + x_forward[i] += h + x_backward[i] -= h + + f_forward = func(x_forward) + f_backward = func(x_backward) + + grad[i] = (f_forward - f_backward) / (2 * h) + + return grad diff --git a/nevergrad/optimization/lama/AdaptiveMultiExplorationAlgorithm.py b/nevergrad/optimization/lama/AdaptiveMultiExplorationAlgorithm.py new file mode 100644 index 000000000..a08823db6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiExplorationAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdaptiveMultiExplorationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 50 + self.F = 0.5 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_chance = 0.2 # Probability to perform local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters + self.adaptive_F_CR(evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + for i in range(self.dim): + x_new = best_x.copy() + step_size = np.random.uniform(-0.1, 0.1) + x_new[i] = np.clip(best_x[i] + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_F_CR(self, evaluations): + # Adaptive parameters adjustment + if evaluations % 100 == 0: + self.F = np.random.uniform(0.4, 0.9) + self.CR = np.random.uniform(0.1, 0.9) + self.local_search_chance = np.random.uniform(0.1, 0.3) diff --git a/nevergrad/optimization/lama/AdaptiveMultiMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveMultiMemorySimulatedAnnealing.py new file mode 100644 index 000000000..7b457fcf6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiMemorySimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdaptiveMultiMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + worst_memory_idx = np.argmax(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + if f_candidate < memory_scores[worst_memory_idx]: + memory[worst_memory_idx] = x_candidate + memory_scores[worst_memory_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i]) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveMultiOperatorDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveMultiOperatorDifferentialEvolution.py new file mode 100644 index 000000000..7198ded1e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiOperatorDifferentialEvolution.py @@ -0,0 +1,162 @@ +import numpy as np + + +class AdaptiveMultiOperatorDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + + # For adaptive population sizing + self.min_pop_size = 30 + self.max_pop_size = 100 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + # Use memory to adapt parameters F and CR + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(10): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if np.random.rand() < 0.5: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + mutant = self.mutate_best_1(global_best_position, population[i], parent1, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..b0bb98bda --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearch.py @@ -0,0 +1,141 @@ +import numpy as np + + +class AdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 15 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 100 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV2.py b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV2.py new file mode 100644 index 000000000..fc75a496f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV2.py @@ -0,0 +1,141 @@ +import numpy as np + + +class AdaptiveMultiOperatorSearchV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveMultiOperatorSearchV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV3.py b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV3.py new file mode 100644 index 000000000..7c1d2946b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiOperatorSearchV3.py @@ -0,0 +1,145 @@ +import numpy as np + + +class AdaptiveMultiOperatorSearchV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.8 # Cognitive constant + c2 = 1.8 # Social constant + w = 0.6 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.9 # Differential weight + CR = 0.8 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 100 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.85 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + # Reset personal bests and scores + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveMultiOperatorSearchV3(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealing.py b/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealing.py new file mode 100644 index 000000000..0449197fc --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealing.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdaptiveMultiPhaseAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealingV2.py b/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealingV2.py new file mode 100644 index 000000000..b1f739db7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiPhaseAnnealingV2.py @@ -0,0 +1,108 @@ +import numpy as np + + +class AdaptiveMultiPhaseAnnealingV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Periodically perform a local refinement using gradient approximation + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=5, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/AdaptiveMultiPhaseOptimization.py b/nevergrad/optimization/lama/AdaptiveMultiPhaseOptimization.py new file mode 100644 index 000000000..1fc9ebde0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiPhaseOptimization.py @@ -0,0 +1,147 @@ +import numpy as np + + +class AdaptiveMultiPhaseOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Local Search) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Local Search for fine-tuning solutions + if i % 10 == 0: # Perform local search every 10 iterations + for _ in range(5): # Number of local search steps + x_ls = x + np.random.normal(0, 0.1, self.dim) + x_ls = np.clip(x_ls, self.lower_bound, self.upper_bound) + f_ls = func(x_ls) + + if f_ls < f: + positions[idx] = x_ls + f = f_ls + + if f_ls < personal_best_scores[idx]: + personal_best_scores[idx] = f_ls + personal_bests[idx] = x_ls.copy() + + if f_ls < global_best_score: + global_best_score = f_ls + global_best_position = x_ls.copy() + + if f_ls < self.f_opt: + self.f_opt = f_ls + self.x_opt = x_ls.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveMultiPhaseOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiPopulationDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveMultiPopulationDifferentialEvolution.py new file mode 100644 index 000000000..eefc13dbd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiPopulationDifferentialEvolution.py @@ -0,0 +1,177 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import cdist + + +class AdaptiveMultiPopulationDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.num_subpopulations = 5 + self.subpop_size = self.pop_size // self.num_subpopulations + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 + self.CR = 0.9 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(self.pop_size) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, self.pop_size - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + subpopulations = np.array_split(population, self.num_subpopulations) + subfitness = np.array_split(fitness, self.num_subpopulations) + + new_population = [] + new_fitness = [] + + for subpop, subfit in zip(subpopulations, subfitness): + for i in range(self.subpop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.subpop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(subfit) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(subpop, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(subpop, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(subpop, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(subpop, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < subfit[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(subpop[i]) + new_fitness.append(subfit[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Perform local search on elite solutions + elite_indices = np.argsort(fitness)[: self.num_subpopulations] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= 5: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Crowding distance to maintain diversity + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + # Opposition-based learning + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/AdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..311f04957 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class AdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased for better search space coverage + self.initial_F = 0.9 # Higher mutation factor for broader search + self.initial_CR = 0.9 # Higher crossover rate for better recombination + self.elite_rate = 0.25 # Increased elite rate for stronger convergence + self.local_search_rate = 0.6 # Increased for more intensive local searches + self.memory_size = 15 # Increased memory size for better adaptive parameters + self.w = 0.7 # Lower inertia weight for convergence + self.c1 = 1.7 # Stronger cognitive component + self.c2 = 1.7 # Stronger social component + self.phase_switch_ratio = 0.4 # Longer evolutionary phase + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # More precise local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategicOptimizer.py b/nevergrad/optimization/lama/AdaptiveMultiStrategicOptimizer.py new file mode 100644 index 000000000..4b0e015e8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategicOptimizer.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdaptiveMultiStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Parameters and initial conditions + population_size = 200 + mutation_rate = 0.9 + recombination_rate = 0.1 + sigma = 0.5 # Mutation step size + elite_size = int(0.1 * population_size) + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + while evaluations < self.budget: + new_population = [] + indices = np.arange(population_size) + + # Elitism + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + new_population.append(population[idx]) + + # Main evolutionary process + while len(new_population) < population_size: + if np.random.rand() < mutation_rate: + # Mutation strategy + idx = np.random.choice(indices) + individual = population[idx] + mutant = individual + sigma * np.random.randn(self.dim) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + mutant_fitness = func(mutant) + evaluations += 1 + + # Acceptance of new mutant + if mutant_fitness < fitness[idx]: + new_population.append(mutant) + if mutant_fitness < best_fitness: + best_solution = mutant + best_fitness = mutant_fitness + else: + new_population.append(individual) + else: + # Recombination + parents = np.random.choice(indices, 2, replace=False) + alpha = np.random.rand() + offspring = alpha * population[parents[0]] + (1 - alpha) * population[parents[1]] + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + offspring_fitness = func(offspring) + evaluations += 1 + + # Acceptance of new offspring + if offspring_fitness < fitness[parents[0]] and offspring_fitness < fitness[parents[1]]: + new_population.append(offspring) + if offspring_fitness < best_fitness: + best_solution = offspring + best_fitness = offspring_fitness + else: + new_population.append(population[parents[0]]) + + population = np.array(new_population) + fitness = np.array([func(x) for x in population]) + + # Adaptive mutation rate adjustment + mutation_rate = min(1.0, mutation_rate + np.random.uniform(-0.1, 0.1)) + sigma = max(0.001, sigma * np.exp(0.1 * (np.mean(fitness) - best_fitness) / best_fitness)) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyDE.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyDE.py new file mode 100644 index 000000000..4e1e2e2d6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyDE.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveMultiStrategyDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 20 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation and crossover factors + success_rate = max(0, (self.budget - self.pop_size * generation) / self.budget) + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) * success_rate + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) * success_rate + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Hybrid mutation strategy based on success rate + if success_rate < 0.3: + mutant = x1 + mutation_factor * (x2 - x3) + elif success_rate < 0.6: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + if np.random.rand() < 0.5: + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + else: + # Gradient-based adjustment + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + result = minimize(func, best_x + perturbation, method="BFGS", options={"maxiter": 10}) + + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyDEWithMemory.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyDEWithMemory.py new file mode 100644 index 000000000..92839e769 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyDEWithMemory.py @@ -0,0 +1,128 @@ +import numpy as np + + +class AdaptiveMultiStrategyDEWithMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.5, 0.9 + CR_min, CR_max = 0.1, 1.0 + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + memory_size = 5 + memory_F = np.full(memory_size, 0.5) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), F_min, F_max) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), CR_min, CR_max) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..dc31064c7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolution.py @@ -0,0 +1,124 @@ +import numpy as np + + +class AdaptiveMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + new_x = trial - self.epsilon * grad + perturbation + levy_step + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolutionPlus.py new file mode 100644 index 000000000..28fd308e1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyDifferentialEvolutionPlus.py @@ -0,0 +1,135 @@ +import numpy as np + + +class AdaptiveMultiStrategyDifferentialEvolutionPlus: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveMultiStrategyDifferentialEvolutionPlus(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizer.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizer.py new file mode 100644 index 000000000..908b94086 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizer.py @@ -0,0 +1,125 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveMultiStrategyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.5 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 50 + self.strategy_switch_threshold = 0.1 # Threshold for switching strategies + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True # Start with DE strategy + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + # PSO strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + # Strategy switch based on performance improvement + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdaptiveMultiStrategyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizerV2.py new file mode 100644 index 000000000..df6d626d0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveMultiStrategyOptimizerV2.py @@ -0,0 +1,155 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveMultiStrategyOptimizerV2: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.9 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.adaptive_crossover_prob = [0.9, 0.8, 0.7, 0.6, 0.5] + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + archive_fitness = np.array([evaluate(ind) for ind in archive]) + eval_count += len(archive) + if best_fitness not in archive_fitness: + worst_index = np.argmax(archive_fitness) + if best_fitness < archive_fitness[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/AdaptiveNicheDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/AdaptiveNicheDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..c13e1a8b6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveNicheDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveNicheDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 40 # Increased swarm size for better exploration + self.init_num_niches = 5 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + self.local_search_prob = 0.3 # Probability for performing local search + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adjust local search probability based on progress + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveNichingDE_PSO.py b/nevergrad/optimization/lama/AdaptiveNichingDE_PSO.py new file mode 100644 index 000000000..f9a22736d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveNichingDE_PSO.py @@ -0,0 +1,125 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveNichingDE_PSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.num_niches = 5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) for _ in range(self.num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.num_niches)] + local_bests = [niche[np.argmin(fit)] for niche, fit in zip(niches, fitness)] + local_best_fits = [min(fit) for fit in fitness] + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + best_niche_idx = np.argmin(local_best_fits) + + for n in range(self.num_niches): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 + * r2 + * (niches[best_niche_idx][np.argmin(fitness[best_niche_idx])] - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.25 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(self.num_niches): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Adaptive parameter adjustment + if np.random.rand() < 0.1: + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..5b055684b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveOppositionBasedDifferentialEvolution: + def __init__(self, budget=10000, pop_size=30, f_init=0.8, cr_init=0.9, scaling_factor=0.1): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + success_rate = 1.0 * success / self.pop_size + f_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) * (1.0 - success_rate) + cr_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) * (1.0 - success_rate) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + success_count = 0 + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + success_count += 1 + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + success_count += 1 + + f_current, cr_current = self.adaptive_parameter_update( + success_count, f_current, cr_current, self.scaling_factor + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolutionImproved.py b/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolutionImproved.py new file mode 100644 index 000000000..6f72e4225 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveOppositionBasedDifferentialEvolutionImproved.py @@ -0,0 +1,71 @@ +import numpy as np + + +class AdaptiveOppositionBasedDifferentialEvolutionImproved: + def __init__(self, budget=10000, pop_size=20, f_min=0.4, f_max=0.9, cr_min=0.1, cr_max=0.9): + self.budget = budget + self.pop_size = pop_size + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def self_adaptive_parameter_update(self, success, f, cr): + f_new = f * (1.0 + 0.1 * (1.0 if success else -1.0)) + cr_new = cr + 0.1 * (0.9 if success else -0.9) + return max(self.f_min, min(self.f_max, f_new)), max(self.cr_min, min(self.cr_max, cr_new)) + + def __call__(self, func): + self.initialize_population(func) + f_current = (self.f_min + self.f_max) / 2 + cr_current = (self.cr_min + self.cr_max) / 2 + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + f_current, cr_current = self.self_adaptive_parameter_update(True, f_current, cr_current) + + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + if func(trial_solution) < func(current_solution): + self.population[j] = trial_solution + self.pop_fitness[j] = func(trial_solution) + f_current, cr_current = self.self_adaptive_parameter_update(True, f_current, cr_current) + else: + f_current, cr_current = self.self_adaptive_parameter_update(False, f_current, cr_current) + + if func(opponent_solution) < func(current_solution): + self.population[j] = opponent_solution + self.pop_fitness[j] = func(opponent_solution) + f_current, cr_current = self.self_adaptive_parameter_update(True, f_current, cr_current) + else: + f_current, cr_current = self.self_adaptive_parameter_update(False, f_current, cr_current) + + if func(trial_solution) < self.f_opt: + self.f_opt = func(trial_solution) + self.x_opt = trial_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE.py b/nevergrad/optimization/lama/AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE.py new file mode 100644 index 000000000..f369be751 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE.py @@ -0,0 +1,108 @@ +import numpy as np + + +class AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + de_scale=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Initial Bandwidth + self.bw_min = bw_min # Minimum Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + self.bw_range = bw_range # Bandwidth range for dynamic adjustment + self.de_scale = de_scale # Scale factor for differential evolution + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func, bandwidth): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += bandwidth * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return max(self.bw_range / (1 + iteration), self.bw_min) # Dynamic adjustment of bandwidth + + def differential_evolution(self, func, current_harmony, best_harmony): + mutant_harmony = current_harmony + self.de_scale * (best_harmony - current_harmony) + return np.clip(mutant_harmony, func.bounds.lb, func.bounds.ub) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = self.adjust_bandwidth(i) # Update bandwidth dynamically + + new_harmony = self.harmony_search(func, self.bw) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + best_harmony = self.harmony_memory[np.argmin(self.harmony_memory_fitness)] + trial_harmony = self.differential_evolution(func, new_harmony, best_harmony) + trial_fitness = func(trial_harmony) + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_harmony + + idx_worst_trial = np.argmax(self.harmony_memory_fitness) + if trial_fitness < self.harmony_memory_fitness[idx_worst_trial]: + self.harmony_memory[idx_worst_trial] = trial_harmony + self.harmony_memory_fitness[idx_worst_trial] = trial_fitness + + self.bw = self.bw * self.bw_decay # Decay bandwidth at each iteration + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveOrthogonalDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveOrthogonalDifferentialEvolution.py new file mode 100644 index 000000000..cda23264b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveOrthogonalDifferentialEvolution.py @@ -0,0 +1,58 @@ +import numpy as np + + +class AdaptiveOrthogonalDifferentialEvolution: + def __init__( + self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, orthogonal_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + self.orthogonal_factor_min = 0.1 + self.orthogonal_factor_max = 0.9 + self.orthogonal_factor_decay = 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + orthogonal_factor = self.orthogonal_factor + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + orthogonal_vector = np.random.normal(0, orthogonal_factor, size=dimension) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + orthogonal_factor = max( + orthogonal_factor * self.orthogonal_factor_decay, self.orthogonal_factor_min + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.delete(np.arange(len(population)), current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/AdaptiveOscillatoryCrossoverDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveOscillatoryCrossoverDifferentialEvolution.py new file mode 100644 index 000000000..17f25d413 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveOscillatoryCrossoverDifferentialEvolution.py @@ -0,0 +1,53 @@ +import numpy as np + + +class AdaptiveOscillatoryCrossoverDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Adjusted population size for balance between exploration and exploitation + self.F = 0.5 # Mutation factor initially set to 0.5 + self.CR_init = 0.5 # Initial crossover probability is moderate + self.CR_final = 0.1 # Final crossover probability is low to focus search later + self.alpha = 0.1 # Adaptive factor for mutation rate + self.mutation_strategy = "best/2/bin" # Mutation strategy using the best individual + + def __call__(self, func): + # Initial population uniformly distributed within the search space + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Tracking the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Oscillatory crossover rate and adaptive mutation factor + CR = self.CR_final + (self.CR_init - self.CR_final) * np.cos(np.pi * iteration / n_iterations) + F = self.F + self.alpha * np.sin(np.pi * iteration / n_iterations) + + for i in range(self.pop_size): + # Mutation using best/2/bin strategy + idxs = np.random.choice([idx for idx in range(self.pop_size) if idx != i], 3, replace=False) + a, b, c = pop[idxs] + mutant = best_ind + F * (a - b + c - best_ind) + + # Clipping to ensure individuals stay within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptiveParticleDifferentialSearch.py b/nevergrad/optimization/lama/AdaptiveParticleDifferentialSearch.py new file mode 100644 index 000000000..09a418772 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveParticleDifferentialSearch.py @@ -0,0 +1,97 @@ +import numpy as np + + +class AdaptiveParticleDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 40 + inertia_weight = 0.5 + cognitive_coefficient = 2.0 + social_coefficient = 2.0 + differential_weight = 0.8 + crossover_rate = 0.9 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + # Particle Swarm Optimization Part + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential Evolution Part + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = AdaptiveParticleDifferentialSearch(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/AdaptiveParticleSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveParticleSwarmOptimization.py new file mode 100644 index 000000000..5b1ec4766 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveParticleSwarmOptimization.py @@ -0,0 +1,62 @@ +import numpy as np + + +class AdaptiveParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.population_size = 100 + self.w_min = 0.4 + self.w_max = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.velocity_limit = 0.2 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocity = np.random.uniform( + -self.velocity_limit, self.velocity_limit, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_position = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + + while evaluations < self.budget: + w = self.w_max - ((self.w_max - self.w_min) * (evaluations / self.budget)) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + velocity[i] = ( + w * velocity[i] + + self.c1 * r1 * (personal_best_position[i] - population[i]) + + self.c2 * r2 * (self.x_opt - population[i]) + ) + velocity[i] = np.clip(velocity[i], -self.velocity_limit, self.velocity_limit) + population[i] = np.clip(population[i] + velocity[i], self.lb, self.ub) + + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < personal_best_fitness[i]: + personal_best_position[i] = population[i].copy() + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i].copy() + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePerturbationDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptivePerturbationDifferentialEvolution.py new file mode 100644 index 000000000..5229442e0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePerturbationDifferentialEvolution.py @@ -0,0 +1,51 @@ +import numpy as np + + +class AdaptivePerturbationDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 150 # Population size adjusted for efficiency + self.F_base = 0.5 # Base differential weight + self.CR = 0.7 # Crossover probability + self.adapt_rate = 0.1 # Rate at which F is adapted + + def __call__(self, func): + # Initialize population uniformly within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + # Get the initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Iteration loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + F = self.F_base + self.adapt_rate * np.sin(iteration / n_iterations * np.pi) + for i in range(self.pop_size): + # Rand/1/bin strategy with adaptive perturbation + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F * (b - c) + + # Clip within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Evaluate + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptivePopulationDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/AdaptivePopulationDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..14bffacfa --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePopulationDifferentialEvolutionOptimizer.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptivePopulationDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + while self.eval_count < self.budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= self.budget: + break + + # Population adaptation + if self.eval_count % (self.budget // 10) == 0: + mean_fitness = np.mean(fitness) + std_dev_fitness = np.std(fitness) + new_pop_size = int(self.pop_size * (1 + std_dev_fitness / mean_fitness)) + new_pop_size = min( + max(new_pop_size, 10), 100 + ) # Keep population size within reasonable limits + + if new_pop_size != self.pop_size: + if new_pop_size > self.pop_size: + new_individuals = np.random.uniform( + self.bounds[0], self.bounds[1], (new_pop_size - self.pop_size, self.dim) + ) + new_fitness = np.array([func(ind) for ind in new_individuals]) + self.eval_count += new_fitness.size + population = np.concatenate((population, new_individuals)) + fitness = np.concatenate((fitness, new_fitness)) + F_values = np.concatenate((F_values, np.full(new_individuals.shape[0], self.init_F))) + CR_values = np.concatenate( + (CR_values, np.full(new_individuals.shape[0], self.init_CR)) + ) + else: + selected_indices = np.argsort(fitness)[:new_pop_size] + population = population[selected_indices] + fitness = fitness[selected_indices] + F_values = F_values[selected_indices] + CR_values = CR_values[selected_indices] + + self.pop_size = new_pop_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py b/nevergrad/optimization/lama/AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py new file mode 100644 index 000000000..e910b18ff --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py @@ -0,0 +1,155 @@ +import numpy as np + + +class AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-5 + self.learning_rate = 0.1 + + # For adaptive population sizing + self.min_pop_size = 20 + self.max_pop_size = 70 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(5): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self, current_pop_size): + new_pop_size = np.random.randint(self.min_pop_size, self.max_pop_size + 1) + return new_pop_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size( + self.initial_pop_size + ) # Adapt population size here + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size # Update population size + + population = np.copy(new_population) + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePopulationMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptivePopulationMemeticOptimizer.py new file mode 100644 index 000000000..e05fe5718 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePopulationMemeticOptimizer.py @@ -0,0 +1,101 @@ +import numpy as np + + +class AdaptivePopulationMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5, step_size=0.01): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Adaptive population size adjustment + if iteration % (max_iterations // 10) == 0 and population_size > 10: + best_indices = np.argsort(fitness)[: int(0.8 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePopulationResizingOptimizer.py b/nevergrad/optimization/lama/AdaptivePopulationResizingOptimizer.py new file mode 100644 index 000000000..cd17b8c98 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePopulationResizingOptimizer.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptivePopulationResizingOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + pop_resize_factor=1.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.pop_resize_factor = pop_resize_factor + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="Nelder-Mead", options={"maxfev": budget, "xatol": 1e-4, "disp": False} + ) + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + self.eval_count = self.init_pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + pop_size = self.init_pop_size + while self.eval_count < global_search_budget: + for i in range(pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Adjust population size adaptively + fitness_std = np.std(fitness) + if fitness_std < 1e-3: # If the population is converging + pop_size = min(int(pop_size / self.pop_resize_factor), self.init_pop_size) + else: # If the population is diverging + pop_size = min(int(pop_size * self.pop_resize_factor), self.budget - self.eval_count) + + # Reinitialize if population size increases + if pop_size > len(population): + new_individuals = np.random.uniform( + self.bounds[0], self.bounds[1], (pop_size - len(population), self.dim) + ) + new_fitness = np.array([func(ind) for ind in new_individuals]) + new_velocities = np.random.uniform(-1, 1, (pop_size - len(population), self.dim)) + self.eval_count += len(new_individuals) + + population = np.vstack((population, new_individuals)) + fitness = np.hstack((fitness, new_fitness)) + velocities = np.vstack((velocities, new_velocities)) + F_values = np.hstack((F_values, np.full(len(new_individuals), self.init_F))) + CR_values = np.hstack((CR_values, np.full(len(new_individuals), self.init_CR))) + p_best = np.vstack((p_best, new_individuals)) + p_best_fitness = np.hstack((p_best_fitness, new_fitness)) + + # Perform local search on the best individuals + for i in range(len(population)): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePrecisionCohortOptimizationV3.py b/nevergrad/optimization/lama/AdaptivePrecisionCohortOptimizationV3.py new file mode 100644 index 000000000..857096df0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionCohortOptimizationV3.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptivePrecisionCohortOptimizationV3: + def __init__(self, budget, dimension=5, population_size=150, elite_fraction=0.15, mutation_intensity=0.5): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Base intensity for mutation + + def __call__(self, func): + # Initialize the population uniformly within the search space [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select the elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population members + for i in range(self.population_size): + if np.random.rand() < self.adaptive_mutation_rate(evaluations): + # Mutation: pick a random elite, apply Gaussian noise + parent_idx = np.random.choice(elite_indices) + mutation = np.random.normal(0, self.adaptive_mutation_scale(evaluations), self.dimension) + child = np.clip(population[parent_idx] + mutation, -5.0, 5.0) + else: + # Crossover: pick two different elites, combine their features + parents = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + (population[parents[0], :crossover_point], population[parents[1], crossover_point:]) + ) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def adaptive_mutation_rate(self, evaluations): + # Gradually decrease mutation rate to shift from exploration to exploitation + return max(0.05, 1 - (evaluations / self.budget) ** 0.5) + + def adaptive_mutation_scale(self, evaluations): + # Decay mutation scale to fine-tune search in later stages + return self.mutation_intensity * np.exp(-4 * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/AdaptivePrecisionControlDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptivePrecisionControlDifferentialEvolution.py new file mode 100644 index 000000000..b76f05a00 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionControlDifferentialEvolution.py @@ -0,0 +1,54 @@ +import numpy as np + + +class AdaptivePrecisionControlDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Increased population size for more diversity + self.F = 0.5 # Base mutation factor + self.CR = 0.9 # Base crossover probability + self.adaptive_F = True # Flag to adaptively adjust F + self.adaptive_CR = True # Flag to adaptively adjust CR + + def __call__(self, func): + # Initialize population within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the best initial solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + for i in range(int(self.budget / self.pop_size)): + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + self.F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive strategy updates + if self.adaptive_F: + self.F = max(0.1, self.F * (0.99 if best_fitness < 1e-6 else 1.01)) + if self.adaptive_CR: + self.CR = min(1.0, self.CR * (1.01 if best_fitness < 1e-4 else 0.99)) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptivePrecisionCrossoverEvolution.py b/nevergrad/optimization/lama/AdaptivePrecisionCrossoverEvolution.py new file mode 100644 index 000000000..52d1d256d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionCrossoverEvolution.py @@ -0,0 +1,77 @@ +import numpy as np + + +class AdaptivePrecisionCrossoverEvolution: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 200 # Increased population for more diversity + self.elite_size = 40 # Increased elite size for better exploitation + self.offspring_size = 160 # Adjusted offspring size for balance + self.mutation_scale = 0.01 # Reduced mutation scale for finer adjustments + self.crossover_prob = 0.85 # Higher crossover probability to encourage more mixing + self.mutation_prob = 0.1 # Probability of mutation happening per offspring + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_prob: + cross_point = np.random.randint(1, self.dim) + child = np.empty(self.dim) + child[:cross_point] = parent1[:cross_point] + child[cross_point:] = parent2[cross_point:] + return child + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + if np.random.rand() < self.mutation_prob: + mutation_points = np.random.randint(0, self.dim) + individual[mutation_points] += np.random.normal(0, self.mutation_scale) + individual = np.clip(individual, self.lower_bound, self.upper_bound) + return individual + + def reproduce(self, parents): + offspring = np.empty((self.offspring_size, self.dim)) + num_parents = len(parents) + for i in range(self.offspring_size): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + child = self.crossover(parents[p1], parents[p2]) + child = self.mutate(child) + offspring[i] = child + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_survivors(population, fitness) + + offspring = self.reproduce(elite_population) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptivePrecisionDifferentialEvolution.py new file mode 100644 index 000000000..e8e20455a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionDifferentialEvolution.py @@ -0,0 +1,60 @@ +import numpy as np + + +class AdaptivePrecisionDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The given dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 150 # Slightly reduced population for faster generations + mutation_factor = 0.8 # Increased mutation factor for more aggressive exploration + crossover_prob = 0.9 # High crossover for aggressive recombination + adaptive_threshold = 0.1 # Threshold for adapting mutation and crossover + + # Initialize population randomly + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + # Main optimization loop + for generation in range(self.budget // population_size): + # Adapt mutation factor and crossover probability based on progress + progress = generation / (self.budget // population_size) + if progress > adaptive_threshold: + mutation_factor *= 0.95 # Decrease to fine-tune exploration + crossover_prob *= 0.98 # Decrease to stabilize gene propagation + + for i in range(population_size): + # Mutation using "rand/1" strategy + indices = [j for j in range(population_size) if j != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover - Binomial + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionDivideSearch.py b/nevergrad/optimization/lama/AdaptivePrecisionDivideSearch.py new file mode 100644 index 000000000..5af571419 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionDivideSearch.py @@ -0,0 +1,44 @@ +import numpy as np + + +class AdaptivePrecisionDivideSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize center point + center_point = np.random.uniform(-5.0, 5.0, self.dim) + center_f = func(center_point) + if center_f < self.f_opt: + self.f_opt = center_f + self.x_opt = center_point + + # Division strategy parameters + num_divisions = 10 + division_size = 10.0 / num_divisions + refine_factor = 0.9 # Factor to reduce division size for further refinements + exploration_steps = self.budget // (num_divisions**self.dim) # Exploration steps per division + + # Generate a grid around the center point and explore each grid division + grid_offsets = np.linspace(-5.0, 5.0, num_divisions) + for offset_dims in np.ndindex(*(num_divisions,) * self.dim): + local_center = center_point + np.array([grid_offsets[dim] for dim in offset_dims]) + local_center = np.clip(local_center, -5.0, 5.0) # Ensure it is within bounds + local_scale = division_size / 2 + + # Local search within the grid division + for _ in range(exploration_steps): + candidate = local_center + np.random.uniform(-local_scale, local_scale, self.dim) + candidate_f = func(candidate) + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Refine the division size for further precision + division_size *= refine_factor + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePrecisionDynamicMemoryStrategyV48.py b/nevergrad/optimization/lama/AdaptivePrecisionDynamicMemoryStrategyV48.py new file mode 100644 index 000000000..ee6c3cad2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionDynamicMemoryStrategyV48.py @@ -0,0 +1,90 @@ +import numpy as np + + +class AdaptivePrecisionDynamicMemoryStrategyV48: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=10, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover probability + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + 0.1 * memory_effect + else: + mutant = population[a] + self.F * (population[b] - population[c]) + 0.3 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + iteration = 0 + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptivePrecisionEvolutionStrategy.py new file mode 100644 index 000000000..3459772c2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionEvolutionStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptivePrecisionEvolutionStrategy: + def __init__( + self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=40, elite_fraction=0.1 + ): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.elite_fraction = elite_fraction + self.sigma = 0.5 # Initial standard deviation for Gaussian mutation + self.learning_rate = 0.2 # Learning rate for sigma adaptation + + def mutate(self, individual): + """Gaussian mutation""" + mutation = np.random.normal(0, self.sigma, self.dimension) + return np.clip(individual + mutation, self.bounds["lb"], self.bounds["ub"]) + + def select_elites(self, population, fitness, num_elites): + """Select elite individuals""" + elite_indices = np.argsort(fitness)[:num_elites] + return population[elite_indices] + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + num_elites = int(self.population_size * self.elite_fraction) + elites = self.select_elites(population, fitness, num_elites) + + # Create the offspring by mutation + offspring = np.array([self.mutate(ind) for ind in population]) + offspring_fitness = np.array([func(ind) for ind in offspring]) + evaluations += self.population_size + + # Combine and select the next generation + combined_population = np.vstack((elites, offspring[num_elites:])) + combined_fitness = np.concatenate((fitness[:num_elites], offspring_fitness[num_elites:])) + + # Environment selection + selection_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[selection_indices] + fitness = combined_fitness[selection_indices] + + # Update best solution found + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + # Adapt mutation step size + successful_mutations = (offspring_fitness < fitness).mean() + self.sigma *= np.exp(self.learning_rate * (successful_mutations - 0.2) / (1 - 0.2)) + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptivePrecisionFocalStrategy.py b/nevergrad/optimization/lama/AdaptivePrecisionFocalStrategy.py new file mode 100644 index 000000000..ac5339a2f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionFocalStrategy.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdaptivePrecisionFocalStrategy: + def __init__( + self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=100, focal_ratio=0.2 + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.focal_population_size = int(population_size * focal_ratio) + self.sigma = 0.3 # Initial standard deviation for mutations + self.learning_rate = 0.1 # Learning rate for self-adaptation of sigma + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation with adaptive sigma + return np.clip( + individual + np.random.normal(0, self.sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def select_focal_group(self, population, fitness): + # Select a smaller focal group based on the best fitness values + sorted_indices = np.argsort(fitness) + return population[sorted_indices[: self.focal_population_size]] + + def recombine(self, focal_group): + # Global intermediate recombination from a focal group + return np.mean(focal_group, axis=0) + + def adapt_sigma(self, success_rate): + # Dynamically adjust sigma based on observed mutation success + if success_rate > 0.2: + self.sigma /= self.learning_rate + elif success_rate < 0.2: + self.sigma *= self.learning_rate + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = np.min(population[np.argmin(fitness)], axis=0), np.min(fitness) + + evaluations = self.population_size + successful_mutations = 0 + + while evaluations < self.budget: + focal_group = self.select_focal_group(population, fitness) + recombined_individual = self.recombine(focal_group) + + for i in range(self.population_size): + if i < self.focal_population_size: + mutant = self.mutate(recombined_individual) + else: + mutant = self.mutate(population[i]) + + mutant_fitness = func(mutant) + + if mutant_fitness < fitness[i]: + population[i] = mutant + fitness[i] = mutant_fitness + successful_mutations += 1 + + if mutant_fitness < best_fitness: + best_individual = mutant + best_fitness = mutant_fitness + + evaluations += 1 + if evaluations >= self.budget: + break + + # Adjust mutation strategy based on success + success_rate = successful_mutations / self.population_size + self.adapt_sigma(success_rate) + successful_mutations = 0 # Reset for next generation + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/AdaptivePrecisionHybridSearch.py b/nevergrad/optimization/lama/AdaptivePrecisionHybridSearch.py new file mode 100644 index 000000000..5a6ae5bd7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionHybridSearch.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptivePrecisionHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 300 + elite_size = int(0.20 * population_size) # Increase the elite size for better retention + mutation_rate = 0.20 # Slightly reduce to balance exploration and exploitation + mutation_scale = lambda t: 0.08 * np.exp(-0.0003 * t) # Adjust mutation scale for fine-grained search + crossover_rate = 0.88 + + local_search_prob_base = 0.15 # Increase the initial probability for local search + local_search_decay = 0.00015 # Decrease decay rate to sustain local search longer + local_search_step_scale = lambda t: 0.02 * np.exp(-0.0001 * t) # Adaptive step scale for local search + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + local_search_prob = local_search_prob_base * np.exp(-local_search_decay * evaluations) + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptivePrecisionMemoryStrategyV47.py b/nevergrad/optimization/lama/AdaptivePrecisionMemoryStrategyV47.py new file mode 100644 index 000000000..b5252ec8c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionMemoryStrategyV47.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdaptivePrecisionMemoryStrategyV47: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=10, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover probability + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Use memory to guide mutation in phase 2 + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + # Update memory with successful mutations + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adaptive parameter adjustment based on the sigmoid function + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + iteration = 0 + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionRotationalClimbOptimizer.py b/nevergrad/optimization/lama/AdaptivePrecisionRotationalClimbOptimizer.py new file mode 100644 index 000000000..5a35a0d79 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionRotationalClimbOptimizer.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptivePrecisionRotationalClimbOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension fixed as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 30 # Reduced population for more focused exploration + mutation_rate = 0.08 # Further reduced mutation rate for finer adjustments + rotation_rate = 0.03 # Adaptive rotation rate for small precise rotations + blend_factor = 0.7 # Increased blend factor for stronger pull towards better solutions + + # Initialize population within the bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + for i in range(population_size): + # Select mutation indices ensuring unique entries + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation and rotational operation + direction = b - c + theta = rotation_rate * 2 * np.pi # Complete rotation consideration + rotation_matrix = np.eye(self.dim) + if self.dim >= 2: # Ensure rotation is only applied if dimensionality permits + np.fill_diagonal(rotation_matrix[:2, :2], np.cos(theta)) + rotation_matrix[0, 1], rotation_matrix[1, 0] = -np.sin(theta), np.sin(theta) + + rotated_vector = np.dot(rotation_matrix, direction) + mutant = a + mutation_rate * rotated_vector + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover with adaptive precision + trial = best_solution + blend_factor * (mutant - best_solution) + trial = np.clip(trial, self.lower_bound, self.upper_bound) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + + # Adaptive adjustment of rotation and mutation parameters based on progress + if evaluations % (self.budget // 10) == 0: # Every 10% of the budget + rotation_rate *= 0.95 # Gradually decrease rotation rate + mutation_rate *= 0.95 # Gradually decrease mutation rate + blend_factor = min(blend_factor + 0.02, 1.0) # Gradually increase blend factor to 1.0 + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionSearch.py b/nevergrad/optimization/lama/AdaptivePrecisionSearch.py new file mode 100644 index 000000000..4a4988495 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionSearch.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AdaptivePrecisionSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration parameters + population_size = 100 + mutation_factor = 0.5 + crossover_rate = 0.9 + elite_size = max(1, int(population_size * 0.1)) + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + for _ in range(int(self.budget / population_size)): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Keep a portion of the best solutions (elitism) + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate new candidates + for i in range(elite_size, population_size): + # Mutation (differential evolution strategy) + idxs = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[idxs] + + mutant = x0 + mutation_factor * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial = np.where(crossover_mask, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Replace old population + population = new_population + fitness = new_fitness + + # Update the best solution found + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptivePrecisionStrategicOptimizer.py b/nevergrad/optimization/lama/AdaptivePrecisionStrategicOptimizer.py new file mode 100644 index 000000000..841dc45c2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptivePrecisionStrategicOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptivePrecisionStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + mutation_factor = 0.8 # Adjusted mutation factor + crossover_probability = 0.75 # Adjusted crossover probability + elite_size = 5 + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mechanism for mutation and crossover + success_rate = np.zeros(population_size) # Track success for each individual + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with adaptive factor + adaptive_mutation_factor = mutation_factor + 0.1 * (2 * success_rate[i] - 1) + mutant = a + adaptive_mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Adaptive crossover probability + adaptive_crossover_probability = crossover_probability + 0.1 * (2 * success_rate[i] - 1) + + # Crossover + trial_vector = np.where( + np.random.rand(self.dim) < adaptive_crossover_probability, mutant, population[i] + ) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + success_rate[i] += 1 # Increment success count for this individual + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + success_rate[i] = max(0, success_rate[i] - 1) # Decrement or maintain success rate + + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQGSA.py b/nevergrad/optimization/lama/AdaptiveQGSA.py new file mode 100644 index 000000000..8693df2bc --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQGSA.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveQGSA: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.9 # Decrease gravitational constant over time + self.alpha *= 0.95 # Reduce step size over time + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQGSA_EC.py b/nevergrad/optimization/lama/AdaptiveQGSA_EC.py new file mode 100644 index 000000000..f0edf0c58 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQGSA_EC.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveQGSA_EC: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDE.py b/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDE.py new file mode 100644 index 000000000..1f8278a7b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDE.py @@ -0,0 +1,152 @@ +import numpy as np + + +class AdaptiveQuantumAnnealingDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature + self.cooling_rate = 0.85 # Cooling rate for annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the elite fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDEv2.py b/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDEv2.py new file mode 100644 index 000000000..d9aa608fe --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumAnnealingDEv2.py @@ -0,0 +1,157 @@ +import numpy as np + + +class AdaptiveQuantumAnnealingDEv2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature + self.cooling_rate = 0.9 # Cooling rate for annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the elite fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Retain top individuals to maintain high quality solutions in the population + top_indices = np.argsort(fitness)[: self.pop_size // 2] + for i in top_indices: + new_population[i] = np.copy(new_population[i]) + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumCognitionOptimizerV3.py b/nevergrad/optimization/lama/AdaptiveQuantumCognitionOptimizerV3.py new file mode 100644 index 000000000..433a2c009 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumCognitionOptimizerV3.py @@ -0,0 +1,84 @@ +import numpy as np + + +class AdaptiveQuantumCognitionOptimizerV3: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.7, + cognitive_coefficient=1.5, + social_coefficient=1.5, + inertia_decay=0.99, + quantum_jump_rate=0.2, + quantum_scale=0.05, + adaptive_scale_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum jump with adaptive scale handling + if np.random.rand() < self.quantum_jump_rate: + # Adjust quantum scale dynamically based on the global best score + quantum_deviation = np.random.normal( + 0, + max( + 0.0001, + self.quantum_scale + * (1 + self.adaptive_scale_factor * np.log(1 + abs(global_best_score))), + ), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Standard PSO update with inertia, cognitive, and social components + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Decay inertia weight to promote convergence + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/AdaptiveQuantumCrossoverOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumCrossoverOptimizer.py new file mode 100644 index 000000000..eaff7ee7b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumCrossoverOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class AdaptiveQuantumCrossoverOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 200 # Increased population size for more diversity + mutation_factor = 0.9 # Initial mutation factor + crossover_prob = 0.8 # Initial crossover probability + adaptivity_rate = 0.05 # Rate at which parameters adapt + + # Initialize population and fitness + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + + # Elite-based reproduction with adaptation + elite_size = int(population_size * 0.1) # Top 10% as elite + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Select parents from elite + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices] + + # Crossover + mask = np.random.rand(self.dim) < crossover_prob + child = np.where(mask, parent1, parent2) + + # Quantum-Inspired mutation based on Gaussian noise + quantum_noise = np.random.randn(self.dim) * mutation_factor + child += quantum_noise + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + # Update the best solution if found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive mechanism for mutation and crossover, responding to landscape + mutation_factor -= mutation_factor * adaptivity_rate + crossover_prob += (1 - crossover_prob) * adaptivity_rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolution.py new file mode 100644 index 000000000..109b19c54 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolution.py @@ -0,0 +1,69 @@ +import numpy as np + + +class AdaptiveQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_l = 0.5 + self.F_u = 1.0 + self.CR_l = 0.1 + self.CR_u = 0.9 + + # Quantum Inspired Parameters + self.alpha = 0.75 + self.beta = 0.25 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_l + np.random.rand() * (self.F_u - self.F_l) + CR_adaptive = self.CR_l + np.random.rand() * (self.CR_u - self.CR_l) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + # Quantum Inspired Adjustment + quantum_perturbation = np.random.normal(0, 1, self.dim) * ( + self.alpha * (self.x_opt - population[i]) + self.beta * (population[i] - self.lb) + ) + trial_vector = np.clip(trial_vector + quantum_perturbation, self.lb, self.ub) + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionPlus.py new file mode 100644 index 000000000..89e8a1421 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionPlus.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveQuantumDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Initial Differential weight + self.initial_CR = 0.9 # Initial Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.amplitude = 0.15 # Quantum amplitude + self.eval_count = 0 + + def __call__(self, func): + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-self.amplitude, self.amplitude, position.shape) * ( + best_position - position + ) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with random components + adaptive_F = self.initial_F * np.random.rand() + adaptive_CR = self.initial_CR * np.random.rand() + return adaptive_F, adaptive_CR + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if self.eval_count % 2 == 0: # Apply quantum every second step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionV2.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionV2.py new file mode 100644 index 000000000..c74c18fee --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionV2.py @@ -0,0 +1,113 @@ +import numpy as np + + +class AdaptiveQuantumDifferentialEvolutionV2: + def __init__(self, budget=10000, population_size=50, elite_size=5, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-inspired mutation strategy with elite guidance + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Quantum-inspired restart mechanism + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on distance to the best solution + distances = np.linalg.norm(population - self.x_opt, axis=1) + reinit_indices = distances.argsort()[-int(self.population_size / 2) :] + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py new file mode 100644 index 000000000..5eedfef50 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + self.adaptive_memory_rate = 0.5 + self.diversity_tracking_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def enhanced_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC", "SLSQP"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.enhanced_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = population[np.argsort(fitness)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(population, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py new file mode 100644 index 000000000..0a26681b4 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def hybrid_search(self, x, func): + candidate_positions = [x + np.random.randn(self.dim) * 0.1 for _ in range(10)] + candidate_positions = [np.clip(pos, self.bounds[0], self.bounds[1]) for pos in candidate_positions] + candidate_fitness = [func(pos) for candidate in candidate_positions] + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + memory_candidates = [ + self.memorized_individuals[np.random.randint(len(self.memorized_individuals))] + for _ in range(self.memory_size) + ] + for mem_ind in memory_candidates: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + # Apply enhanced hybrid search mechanism + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch.py new file mode 100644 index 000000000..6670ed183 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory.py new file mode 100644 index 000000000..3c85ab10c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def hybrid_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.hybrid_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + # Memory update + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = self.memory_rate * memory[i] + (1 - self.memory_rate) * population[i] + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Elitist learning phase + learned_population = self.elitist_learning(personal_bests, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < global_best_fit: + global_best_fit = learned_fitness[i] + global_best = learned_population[i] + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement.py new file mode 100644 index 000000000..1676cb6a4 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def hybrid_search(self, x, func): + candidate_positions = [x + np.random.randn(self.dim) * 0.1 for _ in range(10)] + candidate_positions = [np.clip(pos, self.bounds[0], self.bounds[1]) for pos in candidate_positions] + candidate_fitness = [func(pos) for pos in candidate_positions] + + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + memory_candidates = [ + self.memorized_individuals[np.random.randint(len(self.memorized_individuals))] + for _ in range(self.elite_size) + ] + for mem_ind in memory_candidates: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + # Apply enhanced hybrid search mechanism + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch.py new file mode 100644 index 000000000..85cd1fa5b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.initial_num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.8 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.6 + self.learning_rate = 0.6 + self.num_learning_agents = 15 + self.adaptive_memory_rate = 0.6 + self.diversity_tracking_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = population[np.argsort(fitness)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(population, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDiversityEnhancerV7.py b/nevergrad/optimization/lama/AdaptiveQuantumDiversityEnhancerV7.py new file mode 100644 index 000000000..573d2c0ce --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDiversityEnhancerV7.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdaptiveQuantumDiversityEnhancerV7: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.2, + mutation_intensity=0.05, + crossover_rate=0.85, + quantum_prob=0.6, + gamma=0.3, + beta=0.3, + epsilon=0.0005, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Further increased probability for quantum-inspired state update + self.gamma = gamma # Increased gamma for deeper exploration in quantum state updates + self.beta = beta # Adjusted beta to manage mutation intensity more aggressively + self.epsilon = epsilon # Lower threshold for mutation intensity to ensure fine-tuning + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites based on the fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Apply quantum state update with a higher probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population and update the best solution found + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Enhanced quantum state update for potentially better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/AdaptiveQuantumDynamicTuningOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumDynamicTuningOptimizer.py new file mode 100644 index 000000000..1b8b4ecb6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumDynamicTuningOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveQuantumDynamicTuningOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 60 # Slightly increased population size for better initial exploration + inertia_weight = 0.9 # Initial high inertia for broad exploration + cognitive_coefficient = 2.05 # Fine-tuned cognitive learning rate + social_coefficient = 2.05 # Fine-tuned social learning rate + velocity_limit = 0.25 # Optimized velocity limit for enhanced particle movement + quantum_momentum = 0.03 # Increased quantum momentum for stronger quantum jumps + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Dynamic decay of inertia weight for smooth transition from exploration to exploitation + w = inertia_weight * (1 - 2 * (current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Conditionally apply quantum jumps with dynamically decreasing probability + quantum_probability = 0.1 * np.exp(-12 * (current_budget / self.budget)) + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions using modified PSO dynamics + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Evaluate fitness and update personal and global bests + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveQuantumEliteDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveQuantumEliteDifferentialEvolution.py new file mode 100644 index 000000000..30b830cb9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumEliteDifferentialEvolution.py @@ -0,0 +1,186 @@ +import numpy as np + + +class AdaptiveQuantumEliteDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumEliteMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumEliteMemeticOptimizer.py new file mode 100644 index 000000000..17b1fed2f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumEliteMemeticOptimizer.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumEliteMemeticOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.5 # Enhanced quantum influence + self.elite_fraction = 0.3 # Further increased elite fraction + self.memory_size = 25 # Further increased memory size + self.local_search_probability = 0.6 # Higher probability for local search + self.stagnation_threshold = 2 # Reduced threshold for quicker adjustment + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.98 # Faster annealing for quicker adaptation + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumEliteMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumEntropyDE.py b/nevergrad/optimization/lama/AdaptiveQuantumEntropyDE.py new file mode 100644 index 000000000..15bd8a68a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumEntropyDE.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdaptiveQuantumEntropyDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, local_search_steps=100): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumEvolutionStrategy.py b/nevergrad/optimization/lama/AdaptiveQuantumEvolutionStrategy.py new file mode 100644 index 000000000..90d1270c3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumEvolutionStrategy.py @@ -0,0 +1,60 @@ +import numpy as np + + +class AdaptiveQuantumEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 200 # Adjusted population size for enhanced exploration + self.sigma_initial = 0.5 # Adjusted initial standard deviation for mutation + self.learning_rate = 0.1 # Learning rate for adaptive quantum impact + self.CR = 0.8 # Adjusted crossover probability for robustness + self.q_impact_initial = 0.05 # Initial quantum impact in mutation + self.q_impact_decay = 0.995 # Decay rate for quantum impact + self.sigma_decay = 0.995 # Adjusted decay rate for sigma + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + # Adapt sigma and quantum impact + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + + # Generate new trial vectors + for i in range(self.pop_size): + # Mutation using differential evolution strategy and quantum impact + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + sigma * (a - b) + q_impact * np.random.standard_cauchy(self.dim) + ) # Quantum influenced mutation + mutant = np.clip(mutant, -5.0, 5.0) + + # Adaptive Crossover + CRi = self.CR + self.learning_rate * (np.random.randn()) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Evaluate + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptiveQuantumEvolvedDiversityExplorerV15.py b/nevergrad/optimization/lama/AdaptiveQuantumEvolvedDiversityExplorerV15.py new file mode 100644 index 000000000..59a147284 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumEvolvedDiversityExplorerV15.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdaptiveQuantumEvolvedDiversityExplorerV15: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_intensity=0.8, + crossover_rate=0.6, + quantum_prob=0.9, + gamma=0.9, + beta=0.1, + epsilon=0.01, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gamma = gamma # Quantum state update influence + self.beta = beta # Mutation decay rate + self.epsilon = epsilon # Minimum mutation factor + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = self.mutate(population[parent_idx], evaluations) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, self.gamma, self.dimension) * (best_individual - individual) + return individual + perturbation diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch.py new file mode 100644 index 000000000..17287e0f5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch.py @@ -0,0 +1,151 @@ +import numpy as np + + +class AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch: + def __init__( + self, + budget, + population_size=100, + tau1=0.1, + tau2=0.1, + memetic_rate=0.6, + alpha=0.2, + learning_rate=0.01, + elite_fraction=0.1, + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.normal(size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedMemeticSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedMemeticSearch.py new file mode 100644 index 000000000..47f2b9171 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientBoostedMemeticSearch.py @@ -0,0 +1,133 @@ +import numpy as np + + +class AdaptiveQuantumGradientBoostedMemeticSearch: + def __init__( + self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.6, alpha=0.2, learning_rate=0.01 + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientEnhancedOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientEnhancedOptimizer.py new file mode 100644 index 000000000..4d253b2be --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientEnhancedOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdaptiveQuantumGradientEnhancedOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 40 # Increased population size for better exploration + mutation_scale = 0.3 # Initial mutation scale + crossover_probability = 0.8 # Higher initial crossover probability + learning_rate = 0.01 # Learning rate for gradient descent + + # Initialize population within bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + new_population = np.copy(population) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Mutation: Quantum-inspired normal perturbation + mutation = np.random.normal(0, mutation_scale, self.dim) + candidate = population[i] + mutation + + # Gradient calculation for local search + grad = np.zeros(self.dim) + for d in range(self.dim): + if current_budget + 2 > self.budget: + break + perturb = np.zeros(self.dim) + perturb[d] = learning_rate + f_plus = func(population[i] + perturb) + f_minus = func(population[i] - perturb) + current_budget += 2 + + grad[d] = (f_plus - f_minus) / (2 * learning_rate) + + # Update candidate using gradient information + candidate -= learning_rate * grad + + # Crossover with random individual + if np.random.rand() < crossover_probability: + partner_index = np.random.randint(population_size) + mask = np.random.rand(self.dim) < 0.5 # Uniform mask + candidate[mask] = population[partner_index][mask] + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_fitness = func(candidate) + current_budget += 1 + + # Greedy selection + if candidate_fitness < fitness[i]: + new_population[i] = candidate + fitness[i] = candidate_fitness + + # Update best found solution + if candidate_fitness < best_fitness: + best_fitness = candidate_fitness + best_solution = candidate + + population = new_population + # Adaptive update of mutation scale and crossover probability + mutation_scale *= 0.98 + crossover_probability *= 0.97 + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimization.py new file mode 100644 index 000000000..107105bfe --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimization.py @@ -0,0 +1,213 @@ +import numpy as np + + +class AdaptiveQuantumGradientExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size for better exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.6 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + + # Differential Evolution parameters + F = 0.5 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + thetas = [np.pi / 4, np.pi / 6, np.pi / 8] # Multiple rotation angles + rotation_matrices = [ + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) for theta in thetas + ] + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrices + if i % 100 == 0 and i > 0: + for rotation_matrix in rotation_matrices: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveQuantumGradientExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimizationV2.py new file mode 100644 index 000000000..10207247a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientExplorationOptimizationV2.py @@ -0,0 +1,216 @@ +import numpy as np + + +class AdaptiveQuantumGradientExplorationOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size for better exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant for balance + c2 = 2.0 # Social constant for stronger global search + w = 0.7 # Adaptive inertia weight initialization + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.5 # Differential weight for finer adjustments + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.3 # Increased threshold for diversity + stagnation_counter = 0 + max_stagnation = 10 # Reduced max stagnation to trigger diversity enforcement earlier + + # Exploration improvement parameters + exploration_factor = 0.4 # Increased exploration factor + + # Quantum-inspired exploration + theta = np.pi / 6 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + initial_mutation_factor = 0.3 + mutation_factor = initial_mutation_factor + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.2 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + sub_pos = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = sub_pos + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Adaptive mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation_factor = initial_mutation_factor * (1 - i / self.budget) + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + # Update inertia weight adaptively + w = 0.9 - (0.9 - 0.4) * (i / self.budget) + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveQuantumGradientExplorationOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientHybridOptimizer.py new file mode 100644 index 000000000..b9b3b668d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientHybridOptimizer.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdaptiveQuantumGradientHybridOptimizer: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_ratio=0.25, + mutation_intensity=1.5, + crossover_rate=0.75, + quantum_prob=0.80, + gradient_boost_prob=0.30, + adaptive_factor=0.1, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gradient_boost_prob = gradient_boost_prob + self.adaptive_factor = adaptive_factor + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = population[parent_idx].copy() + + if np.random.random() < self.gradient_boost_prob: + child = self.gradient_boost(child, func) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + mutation_scale = self.adaptive_mutation_scale(evaluations) + child = np.clip(child + np.random.normal(0, mutation_scale, self.dimension), -5, 5) + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def gradient_boost(self, individual, func, lr=0.02): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = np.array(individual) + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - lr * grad_est + + def quantum_state_update(self, individual, best_individual): + return individual + np.random.normal(0, self.adaptive_factor, self.dimension) * ( + best_individual - individual + ) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_intensity * np.exp(-self.adaptive_factor * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumGradientOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumGradientOptimizer.py new file mode 100644 index 000000000..e1118b441 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumGradientOptimizer.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdaptiveQuantumGradientOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is constant at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 30 # Smaller population size to increase individual scrutiny + mutation_rate = 0.2 # Initial mutation rate + crossover_rate = 0.7 # Initial crossover rate + gradient_step = 0.01 # Step size for gradient estimation + + # Initialize population within bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + new_population = population.copy() + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum mutation + quantum_perturbation = np.random.normal(0, mutation_rate, self.dim) + child = population[i] + quantum_perturbation + + # Gradient-based refinement + grad = np.zeros(self.dim) + for d in range(self.dim): + if current_budget + 2 > self.budget: + break + plus = np.array(population[i]) + minus = np.array(population[i]) + plus[d] += gradient_step + minus[d] -= gradient_step + + f_plus = func(plus) + f_minus = func(minus) + current_budget += 2 + + grad[d] = (f_plus - f_minus) / (2 * gradient_step) + + child -= grad * gradient_step # Move against the gradient + + # Crossover + if np.random.rand() < crossover_rate: + partner_idx = np.random.randint(population_size) + for j in range(self.dim): + if np.random.rand() < 0.5: + child[j] = population[partner_idx][j] + + child = np.clip(child, self.lower_bound, self.upper_bound) + child_fitness = func(child) + current_budget += 1 + + # Selection + if child_fitness < fitness[i]: + new_population[i] = child + fitness[i] = child_fitness + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + population = new_population + mutation_rate *= 0.99 # Gradual decrease in mutation rate + crossover_rate *= 0.99 # Gradual decrease in crossover rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumHarmonizedPSO.py b/nevergrad/optimization/lama/AdaptiveQuantumHarmonizedPSO.py new file mode 100644 index 000000000..60cb5184c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumHarmonizedPSO.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveQuantumHarmonizedPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # Smaller population size to increase focus + inertia_weight = 0.9 # High initial inertia for exploration + cognitive_coefficient = 1.5 # Moderately high for personal learning + social_coefficient = 1.5 # Moderately high for social influence + final_inertia_weight = 0.2 # Lower final inertia for sharper focus towards end + adaptive_cognitive = 0.05 # Incremental increase factor for cognitive component + adaptive_social = 0.05 # Incremental increase factor for social component + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + w = inertia_weight - ((inertia_weight - final_inertia_weight) * (current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired stochastic component with dampening factor + quantum_factor = np.random.normal(0, 0.1, self.dim) # Dynamic adjustments through noise + + # Update velocity with adaptive increments in cognitive and social coefficients + inertia = w * velocity[i] + cognitive_component = ( + (cognitive_coefficient + adaptive_cognitive * (current_budget / self.budget)) + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + (social_coefficient + adaptive_social * (current_budget / self.budget)) + * np.random.rand(self.dim) + * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + quantum_factor + + # Update position + population[i] += velocity[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveQuantumHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumHybridOptimizer.py new file mode 100644 index 000000000..690e86c20 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumHybridOptimizer.py @@ -0,0 +1,187 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumHybridOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 3, 1 / 3, 1 / 3] + self.strategy_rewards = [0, 0, 0] + self.strategy_uses = [0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 3, 1 / 3, 1 / 3] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + # Standard PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + # Quantum PSO update + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + # Hybrid update with local search + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdaptiveQuantumHybridOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumHybridSearchV2.py b/nevergrad/optimization/lama/AdaptiveQuantumHybridSearchV2.py new file mode 100644 index 000000000..15fd9bc91 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumHybridSearchV2.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdaptiveQuantumHybridSearchV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 # Increased population for more diverse solutions + elite_size = 20 # Increased elite size for better exploitation + evaluations = 0 + mutation_factor = 0.7 # More robust mutation factor for consistent exploration + crossover_probability = 0.85 # Higher crossover to exploit good genes + quantum_probability = 0.05 # Higher base quantum probability for more exploration + convergence_threshold = 1e-7 # More sensitive threshold for detecting stagnation + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + # Adjust mutation factor based on convergence rate + if abs(previous_best - self.f_opt) < convergence_threshold: + mutation_factor *= 0.9 # Gradual reduction of mutation factor + else: + mutation_factor *= 1.1 # Increase the mutation factor to escape local minima + previous_best = self.f_opt + + # Quantum-inspired exploration + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Differential evolution steps + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + mutation_factor * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability = min( + 0.1, quantum_probability * 1.02 + ) # Progressive increase in quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumInfluencedMemeticAlgorithm.py b/nevergrad/optimization/lama/AdaptiveQuantumInfluencedMemeticAlgorithm.py new file mode 100644 index 000000000..cdb35638b --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumInfluencedMemeticAlgorithm.py @@ -0,0 +1,115 @@ +import numpy as np + + +class AdaptiveQuantumInfluencedMemeticAlgorithm: + def __init__( + self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.7, alpha=0.2, learning_rate=0.01 + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumInformedDifferentialStrategy.py b/nevergrad/optimization/lama/AdaptiveQuantumInformedDifferentialStrategy.py new file mode 100644 index 000000000..554b4dcb9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumInformedDifferentialStrategy.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveQuantumInformedDifferentialStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 150 + elite_size = 15 + evaluations = 0 + mutation_scale = 0.5 # Start with a higher mutation scale + recombination_prob = 0.9 # Increase recombination probability for robust exploration + quantum_factor = 0.1 # Reduced quantum factor to balance exploration + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Quantum-inspired solution space exploration + num_quantum_individuals = int(population_size * quantum_factor) + quantum_population = np.random.uniform(self.lb, self.ub, (num_quantum_individuals, self.dim)) + quantum_fitness = np.array([func(ind) for ind in quantum_population]) + evaluations += num_quantum_individuals + + combined_population = np.vstack((population, quantum_population)) + combined_fitness = np.hstack((fitness, quantum_fitness)) + + # Select the top-performing individuals as elite + elite_indices = np.argsort(combined_fitness)[:elite_size] + elite_individuals = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + + # Differential evolution mutation and recombination + new_population = [] + for _ in range(population_size - elite_size): + indices = np.random.choice(elite_size, 3, replace=False) + x1, x2, x3 = elite_individuals[indices] + mutant = x1 + mutation_scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + if np.random.rand() < recombination_prob: + cross_points = np.random.rand(self.dim) < 0.5 + child = np.where(cross_points, mutant, x1) + else: + child = mutant + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + + # Update population and fitness + population = np.vstack((elite_individuals, new_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += len(new_population) + + # Adapt mutation scale and quantum factor based on performance + mutation_scale *= 0.95 # Gradual reduction of mutation scale + if evaluations % 1000 == 0 and quantum_factor < 0.3: + quantum_factor += 0.02 # Incrementally increase quantum factor + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumInformedGradientEnhancer.py b/nevergrad/optimization/lama/AdaptiveQuantumInformedGradientEnhancer.py new file mode 100644 index 000000000..b4e5fe532 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumInformedGradientEnhancer.py @@ -0,0 +1,88 @@ +import numpy as np + + +class AdaptiveQuantumInformedGradientEnhancer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 100 # Adjusted population size for refined search + mutation_factor = 0.8 # Initially higher mutation for broader search + crossover_prob = 0.7 # Initially higher crossover probability for diverse search patterns + learning_rate = 0.1 # Starting learning rate for gradient-based steps + + # Initialize population within bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + gradients = np.zeros_like(population) + + # Gradient estimation with central difference method + for i in range(population_size): + if current_budget >= self.budget: + break + + base_ind = population[i] + for d in range(self.dim): + perturbed_ind_plus = np.array(base_ind) + perturbed_ind_minus = np.array(base_ind) + perturbed_ind_plus[d] += learning_rate + perturbed_ind_minus[d] -= learning_rate + + if current_budget + 2 <= self.budget: + fitness_plus = func(perturbed_ind_plus) + fitness_minus = func(perturbed_ind_minus) + current_budget += 2 + gradient = (fitness_plus - fitness_minus) / (2 * learning_rate) + gradients[i, d] = gradient + + new_population = population.copy() # Start with a copy of the current population + + # Generate new solutions based on gradients, mutation, and crossover + for i in range(population_size): + if current_budget >= self.budget: + break + + # Apply gradient descent + child = population[i] - learning_rate * gradients[i] + + # Mutation step + child += np.random.randn(self.dim) * mutation_factor + + # Crossover step + if np.random.rand() < crossover_prob: + partner_idx = np.random.randint(population_size) + crossover_mask = np.random.rand(self.dim) < 0.5 + child = child * crossover_mask + population[partner_idx] * (1 - crossover_mask) + + child = np.clip(child, self.lower_bound, self.upper_bound) + child_fitness = func(child) + current_budget += 1 + + if child_fitness < fitness[i]: + new_population[i] = child + fitness[i] = child_fitness + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + population = new_population + + # Adaptively adjust learning rate, mutation factor, and crossover probability + mutation_factor *= 0.98 # Gradual decline in mutation factor + learning_rate *= 0.98 # Gradual decrease in learning rate + crossover_prob *= 0.98 # Reduce the crossover probability to stabilize final convergence + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLeapOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumLeapOptimizer.py new file mode 100644 index 000000000..bafaabb13 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLeapOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdaptiveQuantumLeapOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 500 # Increase in population size to intensify exploration + mutation_factor = 0.9 # Maintains a slightly higher mutation factor for robust exploration + crossover_prob = 0.8 # High crossover probability to encourage information sharing + adaptivity_rate = 0.03 # Reduced adaptation rate to provide stability over generations + + # Initialize population and fitness + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + + # Adjust population dynamics for improved elite focus + elite_size = int(population_size * 0.35) # Increased elite size to 35% + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Enhanced parent selection mechanism, favoring elites + if np.random.rand() < 0.8: # Increased chance to pull parents from elites + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices] + else: + parent1, parent2 = population[np.random.choice(range(population_size), 2, replace=False)] + + # Crossover + mask = np.random.rand(self.dim) < crossover_prob + child = np.where(mask, parent1, parent2) + + # Mutation with quantum influence + dynamic_mutation = mutation_factor * (1 + np.sin(2 * np.pi * current_budget / self.budget)) + quantum_noise = np.random.randn(self.dim) * dynamic_mutation + ( + np.random.randn(self.dim) * 0.1 + ) + child += quantum_noise + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive mutation and crossover adjustments + mutation_factor *= 1 - adaptivity_rate + crossover_prob = np.clip(crossover_prob + adaptivity_rate * (np.random.rand() - 0.5), 0.5, 1) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialEnhancedOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialEnhancedOptimizer.py new file mode 100644 index 000000000..24999e276 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialEnhancedOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class AdaptiveQuantumLevyDifferentialEnhancedOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.4 - 1.0 * progress + social_coefficient = 1.4 + 0.6 * progress + differential_weight = 0.6 + 0.3 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.3 - 0.1 * progress + levy_factor = 0.4 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.2, 0.2, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.2: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizer.py new file mode 100644 index 000000000..9d4d01cb2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class AdaptiveQuantumLevyDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.7 - 1.0 * progress + social_coefficient = 1.7 + 0.3 * progress + differential_weight = 0.8 + 0.4 * progress + crossover_rate = 0.9 - 0.6 * progress + quantum_factor = 0.5 - 0.1 * progress + levy_factor = 0.1 + 0.5 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.05: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizerV2.py new file mode 100644 index 000000000..3469099eb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialOptimizerV2.py @@ -0,0 +1,156 @@ +import numpy as np + + +class AdaptiveQuantumLevyDifferentialOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 2.0 - 1.5 * progress + social_coefficient = 2.0 + 0.5 * progress + differential_weight = 0.8 + 0.3 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.4 - 0.1 * progress + levy_factor = 0.2 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.1: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialSwarmOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialSwarmOptimizationV2.py new file mode 100644 index 000000000..4905ea187 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDifferentialSwarmOptimizationV2.py @@ -0,0 +1,157 @@ +import numpy as np + + +class AdaptiveQuantumLevyDifferentialSwarmOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.7 - 0.3 * progress + quantum_factor = 0.1 + 0.1 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.4: + local_search_iters = 15 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicOptimization.py new file mode 100644 index 000000000..fbc9b6509 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicOptimization.py @@ -0,0 +1,160 @@ +import numpy as np + + +class AdaptiveQuantumLevyDynamicOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.6 * progress + cognitive_coefficient = 1.5 + 0.4 * progress + social_coefficient = 1.5 - 0.4 * progress + differential_weight = 0.8 - 0.5 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.4 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 40 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 3 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimization.py new file mode 100644 index 000000000..0322a59af --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimization.py @@ -0,0 +1,147 @@ +import numpy as np + + +class AdaptiveQuantumLevyDynamicSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 80 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.5 + social_coefficient = 2.0 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.15: # Reduced selection probability for local search to 15% + local_search_iters = ( + 10 # Increased local search iterations to 10 for better refinement + ) + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimizationV2.py new file mode 100644 index 000000000..5283472e9 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyDynamicSwarmOptimizationV2.py @@ -0,0 +1,147 @@ +import numpy as np + + +class AdaptiveQuantumLevyDynamicSwarmOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 100 # Increased population size for better diversity + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.4 # Reduced for slightly more global search + social_coefficient = 1.6 # Reduced for slightly more global search + differential_weight = 0.9 # Increased for stronger mutation effects + crossover_rate = 0.95 # Increased for better recombination + quantum_factor = 0.1 # Increased to allow for more exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.2: # Increased selection probability for local search to 20% + local_search_iters = ( + 15 # Further increased local search iterations to 15 for thorough refinement + ) + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyEnhancedDifferentialOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyEnhancedDifferentialOptimizer.py new file mode 100644 index 000000000..57fc489ab --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyEnhancedDifferentialOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class AdaptiveQuantumLevyEnhancedDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.5 - 0.4 * progress + levy_factor = 0.7 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 # Increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizer.py new file mode 100644 index 000000000..5c97a2e5a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizer.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumLevyMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return step + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumLevyMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizerV2.py new file mode 100644 index 000000000..5466f8188 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyMemeticOptimizerV2.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumLevyMemeticOptimizerV2: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumLevyMemeticOptimizerV2(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevySwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumLevySwarmOptimization.py new file mode 100644 index 000000000..0a352eedf --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevySwarmOptimization.py @@ -0,0 +1,143 @@ +import numpy as np + + +class AdaptiveQuantumLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 40 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.7 + social_coefficient = 2.2 + differential_weight = 0.5 + crossover_rate = 0.8 + quantum_factor = 0.1 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLevyTreeOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumLevyTreeOptimization.py new file mode 100644 index 000000000..e1cf94556 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLevyTreeOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class AdaptiveQuantumLevyTreeOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.5 + social_coefficient = 2.0 + differential_weight = 0.7 + crossover_rate = 0.7 + quantum_factor = 0.05 + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.3: # Select about 30% of the population for local search + local_search_iters = 3 # Reduce local search iterations for faster execution + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumLocalSearch.py b/nevergrad/optimization/lama/AdaptiveQuantumLocalSearch.py new file mode 100644 index 000000000..9efbafab2 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumLocalSearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveQuantumLocalSearch: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + adaptive_local_search=True, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + self.adaptive_local_search = adaptive_local_search + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = ( + search_range * np.exp(-_ / self.local_search_iters) + if self.adaptive_local_search + else search_range + ) + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step( + candidate_x, func, search_range=self.perturb_range + ) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticEvolutionaryOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticEvolutionaryOptimizer.py new file mode 100644 index 000000000..9cf3322eb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticEvolutionaryOptimizer.py @@ -0,0 +1,181 @@ +import numpy as np + + +class AdaptiveQuantumMemeticEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.3 + self.local_search_iters = 5 + self.adaptive_switch_threshold = 0.2 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def mutate_quantum(self, current, best, F): + return np.clip(current + F * np.tanh(best - current), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + if np.random.rand() < 0.5: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + else: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + if np.random.rand() < self.adaptive_switch_threshold: + mutant = self.mutate_quantum(population[i], global_best_position, F) + else: + if np.random.rand() < 0.5: + mutant = self.mutate_best_1(global_best_position, population[i], parent1, F) + else: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticGradientBoost.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticGradientBoost.py new file mode 100644 index 000000000..270092332 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticGradientBoost.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdaptiveQuantumMemeticGradientBoost: + def __init__( + self, + budget, + population_size=100, + memetic_rate=0.6, + alpha=0.2, + learning_rate=0.01, + elite_fraction=0.2, + mutation_factor=0.8, + crossover_prob=0.9, + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + self.mutation_factor = mutation_factor + self.crossover_prob = crossover_prob + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.normal(size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def ensemble_step(self, func, pop, scores, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Perform hybrid step + pop, scores = self.ensemble_step(func, pop, scores, global_best_position) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizer.py new file mode 100644 index 000000000..49627a44d --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizer.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.adaptive_threshold = 0.1 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * (1 - self.adaptive_threshold): + adaptive_factor *= 0.9 + self.quantum_weight *= adaptive_factor + else: + adaptive_factor *= 1.1 + self.quantum_weight *= adaptive_factor + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerPlus.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerPlus.py new file mode 100644 index 000000000..972bd851f --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerPlus.py @@ -0,0 +1,133 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumMemeticOptimizerPlus: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + self.stagnation_threshold = 10 # No improvement iterations before triggering local search + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 # Annealing factor for inertia weight + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + # Trigger local search after a certain number of iterations without improvement + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 # Reset the counter on improvement + + if eval_count >= self.budget: + break + + # Reset no improvement count after local search + self.no_improvement_count = 0 + + # Anneal inertia weight to enhance exploration-exploitation balance + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumMemeticOptimizerPlus(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV2.py new file mode 100644 index 000000000..8c9c84798 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV2.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumMemeticOptimizerV2: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.5 + self.social_weight = 1.5 + self.quantum_weight = 0.2 + self.elite_fraction = 0.4 + self.memory_size = 20 + self.local_search_probability = 0.75 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.92 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumMemeticOptimizerV2(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV3.py b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV3.py new file mode 100644 index 000000000..533cadd76 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMemeticOptimizerV3.py @@ -0,0 +1,128 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumMemeticOptimizerV3: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 10 # Increased memory size for better performance tracking + self.local_search_probability = 0.3 + self.stagnation_threshold = 5 # Reduced threshold for quicker adaptation + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.995 # Slower annealing for sustained exploration + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumMemeticOptimizerV3(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumMetaheuristic.py b/nevergrad/optimization/lama/AdaptiveQuantumMetaheuristic.py new file mode 100644 index 000000000..425a00211 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumMetaheuristic.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveQuantumMetaheuristic: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + quantum_size = 10 + initial_position = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + def quantum_position_update(position, best_position, adapt_factor): + return ( + position + + np.random.uniform(-adapt_factor, adapt_factor, position.shape) + * (best_position - position) + / 2 + ) + + eval_count = 0 + convergence_threshold = 1e-6 + adapt_factor = 1.0 + + while eval_count < self.budget: + for i in range(population_size): + if eval_count >= self.budget: + break + for q in range(quantum_size): + if eval_count >= self.budget: + break + # Quantum-inspired position update with adaptive factor + candidate = quantum_position_update( + initial_position[i], + best_position if best_position is not None else initial_position[i], + adapt_factor, + ) + # Ensure candidate is within bounds + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + if candidate_value < best_value: + # Update the best solution found so far + best_value = candidate_value + best_position = candidate + initial_position[i] = candidate + adapt_factor = max( + 0.1, adapt_factor * 0.9 + ) # Decrease the adaptiveness if improvement is found + else: + adapt_factor = min( + 2.0, adapt_factor * 1.1 + ) # Increase the adaptiveness if no improvement + + if abs(best_value - candidate_value) < convergence_threshold: + break + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdaptiveQuantumMetaheuristic(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumPSO.py b/nevergrad/optimization/lama/AdaptiveQuantumPSO.py new file mode 100644 index 000000000..36cba0a64 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumPSO.py @@ -0,0 +1,102 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.3 # parameter for quantum behavior + self.adaptive_threshold = 0.1 # threshold for triggering adaptive behavior + self.elite_fraction = 0.25 # fraction of population considered as elite + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + last_best_fitness = best_fitness + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + population[i] = best_individual + 0.5 * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + # Adaptive behavior + if best_fitness < last_best_fitness * (1 - self.adaptive_threshold): + self.quantum_weight *= 0.9 + last_best_fitness = best_fitness + else: + self.quantum_weight *= 1.1 + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/AdaptiveQuantumPSOEnhanced.py b/nevergrad/optimization/lama/AdaptiveQuantumPSOEnhanced.py new file mode 100644 index 000000000..06478eb54 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumPSOEnhanced.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveQuantumPSOEnhanced: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * adaptive_factor) + else: + adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * adaptive_factor) + + if eval_count < self.budget and np.random.rand() < self.local_search_probability: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = AdaptiveQuantumPSOEnhanced(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumParticleDifferentialSwarm.py b/nevergrad/optimization/lama/AdaptiveQuantumParticleDifferentialSwarm.py new file mode 100644 index 000000000..bd833790e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumParticleDifferentialSwarm.py @@ -0,0 +1,138 @@ +import numpy as np + + +class AdaptiveQuantumParticleDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 30 + inertia_weight = 0.7 # Slightly higher inertia for better exploration + cognitive_coefficient = 1.5 + social_coefficient = 1.3 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 5 + memory = [] + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + # Adaptive Inertia Weight + inertia_weight = 0.9 - 0.5 * (evaluations / self.budget) + + # Particle Swarm Optimization Part + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential Evolution Part with Adaptive Memory + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum behavior implementation + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = AdaptiveQuantumParticleDifferentialSwarm(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumParticleSwarmOptimization.py b/nevergrad/optimization/lama/AdaptiveQuantumParticleSwarmOptimization.py new file mode 100644 index 000000000..681541512 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumParticleSwarmOptimization.py @@ -0,0 +1,174 @@ +import numpy as np + + +class AdaptiveQuantumParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Quantum tunable parameters + delta = 0.05 # Step size for quantum movement + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + prev_f = np.inf + + def quantum_move(x, g_best): + return x + delta * (np.random.random(self.dim) * 2 - 1) * np.abs(g_best - x) + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, Quantum Movement, and Local Search) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Apply quantum movement + if np.random.rand() < 0.05: + quantum_position = quantum_move(x, global_best_position) + quantum_f = func(quantum_position) + + if quantum_f < f: + positions[idx] = quantum_position + f = quantum_f + + if quantum_f < personal_best_scores[idx]: + personal_best_scores[idx] = quantum_f + personal_bests[idx] = quantum_position.copy() + + if quantum_f < global_best_score: + global_best_score = quantum_f + global_best_position = quantum_position.copy() + + if quantum_f < self.f_opt: + self.f_opt = quantum_f + self.x_opt = quantum_position.copy() + + # Local Search for fine-tuning solutions + if i % 10 == 0: # Perform local search every 10 iterations + for _ in range(5): # Number of local search steps + x_ls = x + np.random.normal(0, 0.1, self.dim) + x_ls = np.clip(x_ls, self.lower_bound, self.upper_bound) + f_ls = func(x_ls) + + if f_ls < f: + positions[idx] = x_ls + f = f_ls + + if f_ls < personal_best_scores[idx]: + personal_best_scores[idx] = f_ls + personal_bests[idx] = x_ls.copy() + + if f_ls < global_best_score: + global_best_score = f_ls + global_best_position = x_ls.copy() + + if f_ls < self.f_opt: + self.f_opt = f_ls + self.x_opt = x_ls.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveQuantumParticleSwarmOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuantumResonanceOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumResonanceOptimizer.py new file mode 100644 index 000000000..a980446ce --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumResonanceOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class AdaptiveQuantumResonanceOptimizer: + def __init__(self, budget, dim=5, pop_size=100, learning_rate=0.1, elite_rate=0.1, resonance_factor=0.05): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.learning_rate = learning_rate + self.resonance_factor = resonance_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and select elites + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with a resonance factor + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + random_resonance = np.random.uniform(-self.resonance_factor, self.resonance_factor, self.dim) + mutation = np.random.normal(0, self.resonance_factor, self.dim) + self.population[idx] = elite_sample + mutation + self.learning_rate * random_resonance + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/AdaptiveQuantumStrategicOptimizer.py b/nevergrad/optimization/lama/AdaptiveQuantumStrategicOptimizer.py new file mode 100644 index 000000000..cbe35e9dd --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumStrategicOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveQuantumStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 200 # Further increased population for even broader initial exploration + inertia_weight = 0.9 # Initial higher inertia for broader exploration + cognitive_coefficient = 1.5 # Increased for enhanced individual learning + social_coefficient = 1.5 # Increased to emphasize collective intelligence + velocity_limit = 0.3 # Slightly higher to allow more dynamic movements + quantum_momentum = 0.15 # Higher quantum influences for better global search + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Adaptive inertia weight adjustment for strategic exploration-exploitation balance + w = inertia_weight * (0.5 + 0.5 * np.cos(2 * np.pi * current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics incorporated more adaptively + if np.random.rand() < 0.1 * (1 - np.cos(2 * np.pi * current_budget / self.budget)): + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions with strategic constraints + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and personal and global best updates + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..fdf808b51 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizationV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.5, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + self.adapt_weights() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizerV2.py b/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizerV2.py new file mode 100644 index 000000000..d9e7072e1 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumSwarmOptimizerV2.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdaptiveQuantumSwarmOptimizerV2: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coefficient=1.8, + social_coefficient=1.8, + inertia_decay=0.99, + quantum_jump_rate=0.1, + quantum_scale=0.1, + adaptive_depth=10, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_depth = ( + adaptive_depth # Depth of historical performance to adapt parameters dynamically + ) + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + performance_history = [] + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Quantum jump for exploration + particles[i] = global_best + np.random.normal(0, self.quantum_scale, self.dim) * ( + self.ub - self.lb + ) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Classical PSO update for exploitation + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + performance_history.append(global_best_score) + + # Adaptive parameter tuning based on recent history + if len(performance_history) > self.adaptive_depth: + recent_progress = np.mean(np.diff(performance_history[-self.adaptive_depth :])) + if recent_progress > 0: + self.quantum_jump_rate *= 1.1 + else: + self.quantum_jump_rate *= 0.9 + self.quantum_scale *= self.inertia_decay + performance_history = performance_history[-self.adaptive_depth :] + + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/AdaptiveQuantumSymbioticStrategy.py b/nevergrad/optimization/lama/AdaptiveQuantumSymbioticStrategy.py new file mode 100644 index 000000000..2bf2d8ab6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuantumSymbioticStrategy.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveQuantumSymbioticStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 200 + elite_size = 40 + evaluations = 0 + mutation_factor = 0.75 + crossover_probability = 0.8 + quantum_probability = 0.15 + adaptive_scaling_factor = lambda t: 0.4 * np.exp(-0.075 * t) # More aggressive adaptive decay + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Symbiotic mutation and crossover + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 5, replace=False) + x1, x2, x3, x4, x5 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + (x4 - x5) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveQuasiGradientEvolution.py b/nevergrad/optimization/lama/AdaptiveQuasiGradientEvolution.py new file mode 100644 index 000000000..5ec5f3941 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuasiGradientEvolution.py @@ -0,0 +1,118 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveQuasiGradientEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveQuasiGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuasiRandomEnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveQuasiRandomEnhancedDifferentialEvolution.py new file mode 100644 index 000000000..ce7810418 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuasiRandomEnhancedDifferentialEvolution.py @@ -0,0 +1,120 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveQuasiRandomEnhancedDifferentialEvolution: + def __init__( + self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8, base_lr=0.1, epsilon=1e-8 + ): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = base_lr + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + self.crossover_rate *= 1.1 + else: + self.base_lr *= 0.9 + self.crossover_rate *= 0.9 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveQuasiRandomEnhancedDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuasiRandomGradientDE.py b/nevergrad/optimization/lama/AdaptiveQuasiRandomGradientDE.py new file mode 100644 index 000000000..ca332fd18 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuasiRandomGradientDE.py @@ -0,0 +1,116 @@ +import numpy as np +from scipy.stats import qmc + + +class AdaptiveQuasiRandomGradientDE: + def __init__(self, budget, population_size=16, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def sobol_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random_base2(m=int(np.log2(size))) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_local_search(x): + grad = gradient_estimate(x) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = x - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + return new_x + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + population = sobol_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + iterations = self.budget // self.population_size + for i in range(iterations): + success_count = 0 + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = self.mutation_factor * (1 - i / iterations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + new_x = adaptive_local_search(trial) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveQuasiRandomGradientDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveQuorumWithStrategicMutation.py b/nevergrad/optimization/lama/AdaptiveQuorumWithStrategicMutation.py new file mode 100644 index 000000000..cf8eb3c18 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveQuorumWithStrategicMutation.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveQuorumWithStrategicMutation: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + initial_mutation_scale=0.5, + quorum_size=5, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.initial_mutation_scale = initial_mutation_scale + self.quorum_size = quorum_size + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + mutation_scale = self.initial_mutation_scale + + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select quorum of individuals, find their elite + quorum_indices = np.random.choice(self.population_size, self.quorum_size, replace=False) + elite_idx = quorum_indices[np.argmin(fitness[quorum_indices])] + elite = population[elite_idx] + + # Strategic mutation based on best and local elites + direction = best_individual - elite + mutation = np.random.normal(0, 1, self.dimension) * mutation_scale + direction * 0.1 + child = np.clip(elite + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update best if necessary + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + # Update population and fitness + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adapt mutation scale + mutation_scale = max(0.01, mutation_scale * 0.99) # Decay mutation scale + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveRefinedGradientBoostedAnnealing.py b/nevergrad/optimization/lama/AdaptiveRefinedGradientBoostedAnnealing.py new file mode 100644 index 000000000..ba2766ceb --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRefinedGradientBoostedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class AdaptiveRefinedGradientBoostedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha = 0.96 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Periodic intensive localized search for memory refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/AdaptiveRefinedHybridPSO_DE.py b/nevergrad/optimization/lama/AdaptiveRefinedHybridPSO_DE.py new file mode 100644 index 000000000..ee50a5e85 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRefinedHybridPSO_DE.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveRefinedHybridPSO_DE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.initial_pop_size = 20 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.initial_pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.initial_pop_size + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(len(population)): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + if self.x_opt is not None: + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (self.x_opt - population[i]) + + c2 * r2 * (population[np.argmin(fitness)] - population[i]) + ) + else: + velocities[i] = w * velocities[i] + c2 * r2 * ( + population[np.argmin(fitness)] - population[i] + ) + + trial_pso = population[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(population)) + indices = np.delete(indices, i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.25 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + if evaluations >= self.budget: + break + + # Elitism: Keep the best individual + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + if best_fitness < self.f_opt: + self.f_opt = best_fitness + self.x_opt = best_individual + + # Update population and fitness + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Diversity Maintenance: Re-initialize if the population converges too tightly + if np.std(fitness) < 1e-5 and evaluations < self.budget: + population = np.array([self.random_bounds() for _ in range(self.initial_pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.initial_pop_size + + # Adaptive parameter adjustment + if np.random.rand() < 0.1: + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveRefinementEvolutiveStrategy.py b/nevergrad/optimization/lama/AdaptiveRefinementEvolutiveStrategy.py new file mode 100644 index 000000000..f98d41752 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRefinementEvolutiveStrategy.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveRefinementEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate=0.05, mutation_strength=0.5): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, crossover_rate=0.95): + new_population = [] + for _ in range(len(parents)): + if np.random.rand() < crossover_rate: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + # Parameters + population_size = 100 + num_generations = self.budget // population_size + mutation_rate = 0.05 + mutation_strength = 0.5 + crossover_rate = 0.95 + + # Initialize + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + # Evolution loop + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, population_size // 5) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + # Generate new population using crossover and mutation + population = self.crossover(best_population, crossover_rate) + population = self.mutate(population, mutation_rate, mutation_strength) + + # Adaptive mutation adjustments + if gen % 10 == 0 and gen > 0: + mutation_rate /= 1.1 # Decrease mutation rate slowly + mutation_strength /= 1.1 # Decrease mutation strength to fine-tune exploration + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveRefinementPSO.py b/nevergrad/optimization/lama/AdaptiveRefinementPSO.py new file mode 100644 index 000000000..3b6bd12b6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRefinementPSO.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdaptiveRefinementPSO: + def __init__(self, budget=10000, population_size=50, omega=0.6, phi_p=0.2, phi_g=0.3, adapt_factor=0.05): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.dim = 5 # Dimension of the problem + self.adapt_factor = adapt_factor # Adaptation factor for dynamic parameter adjustment + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_fitness = np.array([func(p) for p in particles]) + + global_best = particles[np.argmin(personal_best_fitness)] + global_best_fitness = min(personal_best_fitness) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + for i in range(self.population_size): + # Dynamic adjustment of inertia weight + current_phase = evaluations / self.budget + dynamic_omega = self.omega * (1 - current_phase) + self.adapt_factor * current_phase + + # Update velocity and position + velocity[i] = ( + dynamic_omega * velocity[i] + + self.phi_p * np.random.rand(self.dim) * (personal_best[i] - particles[i]) + + self.phi_g * np.random.rand(self.dim) * (global_best - particles[i]) + ) + + particles[i] += velocity[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate particle's fitness + current_fitness = func(particles[i]) + evaluations += 1 + + # Update personal and global bests + if current_fitness < personal_best_fitness[i]: + personal_best[i] = particles[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best = particles[i] + global_best_fitness = current_fitness + + # Dynamic adjustment of learning factors + if evaluations % 1000 == 0: + self.phi_p += self.adapt_factor * (1 - self.phi_p) # Increase exploration + self.phi_g -= self.adapt_factor * self.phi_g # Decrease exploitation smoothness + + # Logging for monitoring + if evaluations % 1000 == 0: + print(f"Evaluation: {evaluations}, Best Fitness: {global_best_fitness}") + + return global_best_fitness, global_best diff --git a/nevergrad/optimization/lama/AdaptiveRefinementSearchStrategyV30.py b/nevergrad/optimization/lama/AdaptiveRefinementSearchStrategyV30.py new file mode 100644 index 000000000..ec923b637 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRefinementSearchStrategyV30.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveRefinementSearchStrategyV30: + def __init__( + self, budget, dimension=5, population_size=100, F_max=0.9, F_min=0.1, CR_max=0.9, CR_min=0.4 + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_max = F_max + self.F_min = F_min + self.CR_max = CR_max + self.CR_min = CR_min + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + scale = np.random.uniform(self.F_min, self.F_max) + mutant = population[best_idx] + scale * ( + population[a] - population[b] + population[c] - population[best_idx] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + CR = np.random.uniform(self.CR_min, self.CR_max) + crossover_mask = np.random.rand(self.dimension) < CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func, target_fitness, trial_fitness): + if trial_fitness < target_fitness: + return trial, trial_fitness + else: + return target, target_fitness + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial_fitness = func(trial) + evaluations += 1 + population[i], fitnesses[i] = self.select( + population[i], trial, func, fitnesses[i], trial_fitness + ) + + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveResilientQuantumCrossoverStrategy.py b/nevergrad/optimization/lama/AdaptiveResilientQuantumCrossoverStrategy.py new file mode 100644 index 000000000..9b70b0a28 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveResilientQuantumCrossoverStrategy.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdaptiveResilientQuantumCrossoverStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed to 5 as per the problem description + self.lb = -5.0 * np.ones(self.dim) # Lower bounds + self.ub = 5.0 * np.ones(self.dim) # Upper bounds + + def __call__(self, func): + population_size = 100 # Manageable population size + elite_size = 10 # Number of top performers + evaluations = 0 + mutation_factor = 0.5 # Starting mutation factor + crossover_probability = 0.7 # Probability of crossover + quantum_probability = 0.1 # Chance of quantum-informed mutation + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Evolution loop + while evaluations < self.budget: + # Quantum step + quantum_mutants = population[:elite_size] + np.random.normal(0, 0.1, (elite_size, self.dim)) + quantum_mutants = np.clip(quantum_mutants, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_mutants]) + evaluations += elite_size + + # Implement elitism + for i in range(elite_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_mutants[i] + fitness[i] = quantum_fitness[i] + + # Genetic operators + for i in range(population_size): + if evaluations >= self.budget: + break + # Tournament selection + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = indices + if fitness[a] < fitness[b]: + if fitness[a] < fitness[c]: + best = a + else: + best = c + else: + if fitness[b] < fitness[c]: + best = b + else: + best = c + + mutant = population[best] + mutation_factor * ( + population[a] - population[b] + population[c] - population[best] + ) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveRestartDE.py b/nevergrad/optimization/lama/AdaptiveRestartDE.py new file mode 100644 index 000000000..7f573d5ba --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRestartDE.py @@ -0,0 +1,151 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdaptiveRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 20 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + unique_archive = np.vstack({tuple(row) for row in self.archive + new_pop}) + if len(unique_archive) > self.pop_size: + self.archive = unique_archive[-self.pop_size :].tolist() + else: + self.archive = unique_archive.tolist() + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x diff --git a/nevergrad/optimization/lama/AdaptiveRestartHybridOptimizer.py b/nevergrad/optimization/lama/AdaptiveRestartHybridOptimizer.py new file mode 100644 index 000000000..3d4ce4978 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRestartHybridOptimizer.py @@ -0,0 +1,159 @@ +import numpy as np + + +class AdaptiveRestartHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.min_pop_size = 10 + self.max_pop_size = 100 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 2.0 + self.c2 = 2.0 + self.w = 0.7 + self.elite_fraction = 0.1 + self.restart_threshold = 200 + self.diversity_threshold = 0.1 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + + def cma_update(self, population, mean, cov_matrix): + new_samples = np.random.multivariate_normal(mean, cov_matrix, size=population.shape[0]) + return np.clip(new_samples, -5.0, 5.0) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + no_improvement_counter = 0 + restart_counter = 0 + + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + if restart_counter >= self.restart_threshold: + # New improved restart logic + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + restart_counter = 0 + evaluations += self.pop_size + continue + + current_pop_size = max( + self.min_pop_size, int(self.pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + restart_counter = 0 + else: + no_improvement_counter += 1 + restart_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elitism + elite_count = max(1, int(self.elite_fraction * current_pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + # Check for diversity + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + restart_counter = 0 + evaluations += self.pop_size + else: + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + population = self.cma_update(population, mean, cov_matrix) + # Re-inject elites + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveRotationalClimbOptimizer.py b/nevergrad/optimization/lama/AdaptiveRotationalClimbOptimizer.py new file mode 100644 index 000000000..e89ec7f4c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveRotationalClimbOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class AdaptiveRotationalClimbOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the search space + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 150 # Adjusted population size for balance + mutation_rate = 0.1 # Base mutation rate + rotation_rate = 0.05 # Rotation applied to the difference vectors + alpha = 0.9 # Factor for blending the mutant vector with the current best + + # Initialize population and evaluate + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + for i in range(population_size): + # Select random indices for mutation + idxs = np.random.choice(population_size, 3, replace=False) + a, b, c = population[idxs] + + # Perform mutation with rotational component + direction = b - c + theta = rotation_rate * np.pi + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + if self.dim > 2: + extended_rotation = np.eye(self.dim) + extended_rotation[:2, :2] = rotation_matrix + else: + extended_rotation = rotation_matrix + + rotated_direction = np.dot(extended_rotation, direction) + mutant = a + mutation_rate * rotated_direction + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover current best with mutant + trial = best_solution + alpha * (mutant - best_solution) + trial = np.clip(trial, self.lower_bound, self.upper_bound) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveSigmaCrossoverEvolution.py b/nevergrad/optimization/lama/AdaptiveSigmaCrossoverEvolution.py new file mode 100644 index 000000000..6cbd44fe6 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSigmaCrossoverEvolution.py @@ -0,0 +1,59 @@ +import numpy as np + + +class AdaptiveSigmaCrossoverEvolution: + def __init__(self, budget, dimension=5, population_size=50, sigma_init=1.0, crossover_prob=0.9): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.sigma = np.full(self.population_size, sigma_init) # Initial sigma for each individual + self.crossover_prob = crossover_prob # Probability of crossover + + def __call__(self, func): + # Initialize population within bounds [-5.0, 5.0] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + best_idx = np.argmin(fitness) + f_opt = fitness[best_idx] + x_opt = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Parent selection + parent1_idx, parent2_idx = np.random.choice(self.population_size, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.rand() < self.crossover_prob: + cross_points = np.random.rand(self.dimension) < 0.5 + offspring = np.where(cross_points, parent1, parent2) + else: + offspring = parent1.copy() + + # Mutation + offspring += self.sigma[i] * np.random.randn(self.dimension) + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring is within bounds + + # Evaluate offspring + offspring_fitness = func(offspring) + evaluations += 1 + + # Selection + if offspring_fitness < fitness[i]: + population[i] = offspring + fitness[i] = offspring_fitness + self.sigma[i] *= 0.95 # Reduce sigma if improvement + else: + self.sigma[i] *= 1.05 # Increase sigma if no improvement + + # Update optimum if found a new best + if offspring_fitness < f_opt: + f_opt = offspring_fitness + x_opt = offspring + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/AdaptiveSimulatedAnnealing.py b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealing.py new file mode 100644 index 000000000..34f45f388 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealing.py @@ -0,0 +1,38 @@ +import numpy as np + + +class AdaptiveSimulatedAnnealing: + def __init__(self, budget=10000, initial_temp=10.0, cooling_rate=0.95, min_temp=1e-5): + self.budget = budget + self.dim = 5 + self.initial_temp = initial_temp + self.cooling_rate = cooling_rate + self.min_temp = min_temp + + def acceptance_probability(self, energy, new_energy, temp): + if new_energy < energy: + return 1.0 + return np.exp((energy - new_energy) / temp) + + def __call__(self, func): + current_state = np.random.uniform(-5.0, 5.0, self.dim) + best_state = current_state + current_energy = func(current_state) + best_energy = current_energy + temp = self.initial_temp + + while temp > self.min_temp: + for _ in range(self.budget): + new_state = current_state + np.random.normal(0, 1, self.dim) + new_state = np.clip(new_state, -5.0, 5.0) + new_energy = func(new_state) + ap = self.acceptance_probability(current_energy, new_energy, temp) + if ap > np.random.rand(): + current_state = new_state + current_energy = new_energy + if new_energy < best_energy: + best_state = new_state + best_energy = new_energy + temp *= self.cooling_rate + + return best_energy, best_state diff --git a/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingSearch.py b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingSearch.py new file mode 100644 index 000000000..90a67065e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingSearch.py @@ -0,0 +1,58 @@ +import numpy as np + + +class AdaptiveSimulatedAnnealingSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Bounds of the search space + temperature = 1.0 # Initial temperature for simulated annealing + cooling_rate = 0.99 # Cooling rate for the annealing schedule + step_size = 0.5 # Initial step size + + # Generate an initial point randomly + x_current = np.random.uniform(lb, ub, self.dim) + f_current = func(x_current) + self.f_opt = f_current + self.x_opt = x_current + + for i in range(1, self.budget): + # Cooling down the temperature + temperature *= cooling_rate + + # Generate a new point by perturbing the current point + perturbation = np.random.normal(0, step_size, self.dim) + x_new = x_current + perturbation + x_new = np.clip(x_new, lb, ub) # Ensure new points are within bounds + + # Evaluate the new point + f_new = func(x_new) + + # Calculate the probability of accepting the new point + if f_new < f_current: + accept = True + else: + # Acceptance probability in case the new function value is worse + # It depends on the difference between new and current function values and the temperature + delta = f_new - f_current + probability = np.exp(-delta / temperature) + accept = np.random.rand() < probability + + # Accept the new point if it is better or by the criterion of simulated annealing + if accept: + x_current = x_new + f_current = f_new + # If a new optimum is found, update the best known values + if f_new < self.f_opt: + self.f_opt = f_new + self.x_opt = x_current + + # Adaptively adjust the step size based on acceptance + if accept: + step_size *= 1.1 # Increase step size if moving in a good direction + else: + step_size *= 0.9 # Decrease step size if stuck or not making progress + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingWithSmartMemory.py b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingWithSmartMemory.py new file mode 100644 index 000000000..741c347d8 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSimulatedAnnealingWithSmartMemory.py @@ -0,0 +1,157 @@ +import numpy as np + + +class AdaptiveSimulatedAnnealingWithSmartMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Smart Memory Reinforcement + if evaluations % (self.budget // 10) == 0: + best_idx = np.argmin(memory_scores) + for _ in range(memory_size // 4): + x_candidate = memory[best_idx] + np.random.normal(0, T, self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdaptiveSineCosineDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveSineCosineDifferentialEvolution.py new file mode 100644 index 000000000..3281863c5 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSineCosineDifferentialEvolution.py @@ -0,0 +1,50 @@ +import numpy as np + + +class AdaptiveSineCosineDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 250 # Adjusted population size for better diversity + self.F_base = 0.5 # Base factor for mutation + self.F_max = 0.9 # Maximum factor for mutation + self.CR = 0.8 # Crossover probability + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + # Identify the best initial agent + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop within the budget constraint + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Dynamically adjust F with a sine modulation to balance exploration and exploitation + F_dynamic = self.F_base + (self.F_max - self.F_base) * np.sin(np.pi * iteration / n_iterations) + for i in range(self.pop_size): + # Mutation using DE/rand/1 strategy with dynamic F + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F_dynamic * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: DE/binomial strategy + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptiveSinusoidalDifferentialSwarm.py b/nevergrad/optimization/lama/AdaptiveSinusoidalDifferentialSwarm.py new file mode 100644 index 000000000..09a0c0dad --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSinusoidalDifferentialSwarm.py @@ -0,0 +1,55 @@ +import numpy as np + + +class AdaptiveSinusoidalDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Adjusted population size for better exploration/exploitation balance + self.F_base = 0.5 # Base mutation factor + self.CR_base = 0.9 # Base crossover probability + self.adaptive_F_amplitude = 0.3 # Increased amplitude for mutation factor oscillation + self.adaptive_CR_amplitude = 0.2 # Increased amplitude for crossover rate oscillation + self.epsilon = 1e-10 # To avoid division by zero + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors using sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size + self.epsilon) + F = self.F_base + self.adaptive_F_amplitude * np.sin(2 * np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.cos(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Mutation strategy: DE/rand/1/bin with dynamic F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptiveSpatialExplorationOptimizer.py b/nevergrad/optimization/lama/AdaptiveSpatialExplorationOptimizer.py new file mode 100644 index 000000000..854fb26df --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSpatialExplorationOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdaptiveSpatialExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension fixed as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 # Increased population for broader coverage + mutation_factor = 0.5 # Initial higher mutation factor for aggressive exploration + crossover_rate = 0.9 # High crossover to encourage information sharing + elite_size = 5 # Number of top solutions to keep unchanged + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Sort by fitness and adopt elitism + sorted_indices = np.argsort(fitness) + for i in range(elite_size): + idx = sorted_indices[i] + new_population[i] = population[idx] + new_fitness[i] = fitness[idx] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + # Tournament selection + idxs = np.random.choice(population_size, 3, replace=False) + if fitness[idxs[0]] < fitness[idxs[1]]: + better_idx = idxs[0] + else: + better_idx = idxs[1] + + target = population[better_idx] + a, b, c = population[np.random.choice(population_size, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, target) + trial_fitness = func(trial) + evaluations += 1 + + new_population[i] = trial if trial_fitness < fitness[better_idx] else population[better_idx] + new_fitness[i] = trial_fitness if trial_fitness < fitness[better_idx] else fitness[better_idx] + + population = new_population + fitness = new_fitness + + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + # Dynamically adjust mutation factor and crossover rate based on progress + if evaluations % (self.budget // 10) == 0: + mutation_factor *= 0.9 + crossover_rate *= 0.95 + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdaptiveSpiralGradientSearch.py b/nevergrad/optimization/lama/AdaptiveSpiralGradientSearch.py new file mode 100644 index 000000000..105087cea --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSpiralGradientSearch.py @@ -0,0 +1,65 @@ +import numpy as np + + +class AdaptiveSpiralGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial setup + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Start with a full range + angle_increment = np.pi / 4 # Broader angle for initial exploration + + # Adaptive parameters + radius_decay = 0.95 # Slowly decrease radius + angle_refinement = 0.90 # Refine angles for closer exploration + evaluations_left = self.budget + min_radius = 0.01 # Prevent the radius from becoming too small + + # This array holds the last few best points to calculate a moving centroid + historical_best = centroid.copy() + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max(int(2 * np.pi / angle_increment), 3) # Ensure at least 3 points + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + for offset in np.linspace(0, 2 * np.pi, num_points, endpoint=False): + displacement = radius * np.array( + [np.cos(angle + offset), np.sin(angle + offset)] + [0] * (self.dim - 2) + ) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Update the centroid towards the best found point in this iteration + if points: + best_index = np.argmin(function_values) + historical_best = 0.8 * historical_best + 0.2 * points[best_index] + centroid = historical_best + + # Dynamically update radius and angle increment + radius *= radius_decay + radius = max(radius, min_radius) + angle_increment *= angle_refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveStepSearch.py b/nevergrad/optimization/lama/AdaptiveStepSearch.py new file mode 100644 index 000000000..90b26ed9e --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveStepSearch.py @@ -0,0 +1,50 @@ +import numpy as np + + +class AdaptiveStepSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = (-5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + self.step_size = [(self.bounds[1] - self.bounds[0]) / 10] * self.dim # Initial step size + + # Random initialization + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + fx = func(x) + if fx < self.f_opt: + self.f_opt = fx + self.x_opt = x + + evaluations = 1 + + while evaluations < self.budget: + for d in range(self.dim): + for direction in [-1, 1]: + step = np.zeros(self.dim) + step[d] = direction * self.step_size[d] + x_new = x + step + + # Ensure the new solution is within bounds + x_new = np.clip(x_new, self.bounds[0], self.bounds[1]) + + fx_new = func(x_new) + evaluations += 1 + + if fx_new < self.f_opt: + self.f_opt = fx_new + self.x_opt = x_new + x = x_new # Move to the new position + + if evaluations >= self.budget: + break + if evaluations >= self.budget: + break + + # Adaptively reduce step size + self.step_size = [s * 0.9 for s in self.step_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveStochasticGradientQuorumOptimization.py b/nevergrad/optimization/lama/AdaptiveStochasticGradientQuorumOptimization.py new file mode 100644 index 000000000..58a63a9c7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveStochasticGradientQuorumOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdaptiveStochasticGradientQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_scale=0.1, + momentum=0.9, + learning_rate=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(max(1, population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.learning_rate = learning_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Track best solution + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + # Optimization loop + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select elite indices including the best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Determine the local best + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Gradient-inspired mutation and update strategy + gradient = best_individual - local_best + random_noise = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = gradient * random_noise * self.learning_rate + self.momentum * velocity + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution and velocity + if child_fitness < best_fitness: + velocity = child - best_individual + self.momentum * velocity + best_fitness = child_fitness + best_individual = child + + new_population[i, :] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Dynamically adapt mutation scale, elite count, and learning rate + self.mutation_scale *= 1 + np.random.uniform(-0.05, 0.05) + self.elite_count = int(max(1, self.elite_count * np.random.uniform(0.95, 1.05))) + self.learning_rate *= 0.99 # Gradually reduce the learning rate to enhance convergence + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveStochasticHybridEvolution.py b/nevergrad/optimization/lama/AdaptiveStochasticHybridEvolution.py new file mode 100644 index 000000000..dcb9e2be0 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveStochasticHybridEvolution.py @@ -0,0 +1,60 @@ +import numpy as np + + +class AdaptiveStochasticHybridEvolution: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=150): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_top_individuals(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def adapt_mutation_strength(self, best_score, current_score, base_strength=0.5, scale_factor=0.9): + if current_score < best_score: + return base_strength * scale_factor + else: + return base_strength / scale_factor + + def mutate_population(self, population, strength): + mutations = np.random.normal(0, strength, population.shape) + return np.clip(population + mutations, self.lower_bound, self.upper_bound) + + def recombine_population(self, best_individuals, population_size): + num_top = len(best_individuals) + extended_population = np.repeat(best_individuals, population_size // num_top, axis=0) + random_indices = np.random.randint(0, num_top, size=(population_size, self.dim)) + for i in range(self.dim): + extended_population[:, i] = best_individuals[random_indices[:, i], i] + return extended_population + + def __call__(self, func): + population_size = 150 + num_generations = max(1, self.budget // population_size) + num_best = 5 # Top individuals to focus on + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_individuals, best_fitness = self.select_top_individuals(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_individuals[0] + + strength = self.adapt_mutation_strength(best_score, best_fitness[0]) + new_population = self.recombine_population(best_individuals, population_size) + population = self.mutate_population(new_population, strength) + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveStochasticTunneling.py b/nevergrad/optimization/lama/AdaptiveStochasticTunneling.py new file mode 100644 index 000000000..6701820d7 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveStochasticTunneling.py @@ -0,0 +1,64 @@ +import numpy as np + + +class AdaptiveStochasticTunneling: + def __init__(self, budget, dim=5, pop_size=50, F=0.8, CR=0.9, alpha=0.5, gamma=0.1): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.F = F # Mutation factor + self.CR = CR # Crossover probability + self.alpha = alpha # Scaling factor for tunneling function + self.gamma = gamma # Curvature parameter for tunneling function + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def tunnel_fitness(self, fitness, best_f): + # Transform fitness using a tunneling function to escape local minima + return best_f - self.alpha * np.exp(-self.gamma * (fitness - best_f)) + + def mutate(self, population, idx): + indices = [i for i in range(self.pop_size) if i != idx] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = np.clip( + population[a] + self.F * (population[b] - population[c]), self.bounds[0], self.bounds[1] + ) + return mutant + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, f_values, trial, trial_f, trial_idx, best_f): + transformed_trial_f = self.tunnel_fitness(trial_f, best_f) + transformed_target_f = self.tunnel_fitness(f_values[trial_idx], best_f) + if transformed_trial_f < transformed_target_f: + population[trial_idx] = trial + f_values[trial_idx] = trial_f + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + best_f = np.min(f_values) + + while n_evals < self.budget: + for idx in range(self.pop_size): + mutant = self.mutate(population, idx) + trial = self.crossover(population[idx], mutant) + trial_f = func(trial) + n_evals += 1 + self.select(population, f_values, trial, trial_f, idx, best_f) + if n_evals >= self.budget: + break + best_f = np.min(f_values) # Update best found solution + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveStrategicExplorationOptimizer.py b/nevergrad/optimization/lama/AdaptiveStrategicExplorationOptimizer.py new file mode 100644 index 000000000..a94a1c835 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveStrategicExplorationOptimizer.py @@ -0,0 +1,83 @@ +import numpy as np + + +class AdaptiveStrategicExplorationOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + elite_fraction=0.1, + mutation_intensity=0.5, + crossover_rate=0.7, + ): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity # Initial intensity of mutation + self.crossover_rate = crossover_rate # Crossover probability + self.sigma = 0.1 # Initial standard deviation for normal distribution in mutation + + def mutate(self, individual): + """Adaptive mutation based on a decreasing strategy""" + mutation_scale = self.mutation_intensity * (self.budget - self.evaluations) / self.budget + mutation = individual + mutation_scale * np.random.randn(self.dimension) + return np.clip(mutation, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent, donor): + """Blended crossover which can adjust the influence of each parent""" + alpha = np.random.uniform(-0.1, 1.1, size=self.dimension) + offspring = alpha * parent + (1 - alpha) * donor + return np.clip(offspring, self.bounds["lb"], self.bounds["ub"]) + + def __call__(self, func): + # Initialize the population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + self.evaluations = self.population_size + + while self.evaluations < self.budget: + # Elite selection + num_elites = int(self.population_size * self.elite_fraction) + elites_indices = np.argsort(fitness)[:num_elites] + elites = population[elites_indices] + + # Generate offspring using mutation and crossover + new_population = np.empty_like(population) + for i in range(self.population_size): + if i < num_elites: + # Preserve elites without changes + new_population[i] = elites[i] + else: + # select random elite for crossover + elite = elites[np.random.randint(num_elites)] + mutated = self.mutate(population[np.random.randint(self.population_size)]) + new_population[i] = self.crossover(elite, mutated) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + self.evaluations += self.population_size + + # Select the best solutions to form the new population + combined_population = np.vstack((population, new_population)) + combined_fitness = np.concatenate((fitness, new_fitness)) + indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[current_best_idx] + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdaptiveSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/AdaptiveSwarmDifferentialEvolution.py new file mode 100644 index 000000000..5f6687e6a --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSwarmDifferentialEvolution.py @@ -0,0 +1,50 @@ +import numpy as np + + +class AdaptiveSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Population size + self.F_base = 0.5 # Base mutation factor + self.CR = 0.9 # Crossover probability + self.adapt_rate = 0.1 # Rate at which F adapts + self.swarm_factor = np.linspace(0.1, 0.9, self.pop_size) # Swarm interaction factor + + def __call__(self, func): + # Initialize population within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the best initial solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + for i in range(int(self.budget / self.pop_size)): + F_adapted = self.F_base + self.adapt_rate * np.sin(2 * np.pi * i / (self.budget / self.pop_size)) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin with swarm interaction + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = pop[j] + self.swarm_factor[j] * (best_ind - pop[j]) + F_adapted * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/AdaptiveSwarmGradientOptimization.py b/nevergrad/optimization/lama/AdaptiveSwarmGradientOptimization.py new file mode 100644 index 000000000..b1361e360 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSwarmGradientOptimization.py @@ -0,0 +1,143 @@ +import numpy as np + + +class AdaptiveSwarmGradientOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Dynamic adaptive loop (combining PSO, Gradient-Based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdaptiveSwarmGradientOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdaptiveSwarmHarmonicOptimizationV4.py b/nevergrad/optimization/lama/AdaptiveSwarmHarmonicOptimizationV4.py new file mode 100644 index 000000000..b9fd2046c --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSwarmHarmonicOptimizationV4.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdaptiveSwarmHarmonicOptimizationV4: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + step_size_factor=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size_factor = step_size_factor + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + indexes = np.random.choice(range(self.num_particles), size=2, replace=False) + new_solution[i] = np.mean(memory_matrix[indexes, i]) + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) * self.step_size_factor + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveSwarmHybridOptimization.py b/nevergrad/optimization/lama/AdaptiveSwarmHybridOptimization.py new file mode 100644 index 000000000..b107d1b48 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveSwarmHybridOptimization.py @@ -0,0 +1,121 @@ +import numpy as np + + +class AdaptiveSwarmHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Adaptive Learning Rate parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + fitness_history = [] + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient descent + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adaptive learning rate strategy + if i > 0 and len(fitness_history) > 0: + recent_improvement = np.mean(np.diff(fitness_history[-5:])) + if recent_improvement < 0: + alpha = min(alpha * 1.05, 1.0) # Increase learning rate if recent improvement + else: + alpha = max(alpha * 0.7, 0.01) # Decrease learning rate if no recent improvement + + fitness_history.append(f) + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdaptiveThresholdDifferentialStrategy.py b/nevergrad/optimization/lama/AdaptiveThresholdDifferentialStrategy.py new file mode 100644 index 000000000..7207366d3 --- /dev/null +++ b/nevergrad/optimization/lama/AdaptiveThresholdDifferentialStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdaptiveThresholdDifferentialStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialization parameters + population_size = 100 + mutation_factor = 0.8 + crossover_prob = 0.9 + adaptivity_rate = 0.05 + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Track the best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + evaluations = population_size + + while evaluations < self.budget: + for i in range(population_size): + # Differential evolution mutation and crossover + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Adaptive threshold for mutation and crossover adjustment + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + # Increase mutation and crossover probability if improvement found + mutation_factor = min(mutation_factor + adaptivity_rate, 1.0) + crossover_prob = min(crossover_prob + adaptivity_rate, 1.0) + else: + # Decrease mutation and crossover probability if no improvement + mutation_factor = max(mutation_factor - adaptivity_rate / 2, 0.1) + crossover_prob = max(crossover_prob - adaptivity_rate / 2, 0.5) + + # Update the best found solution + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial.copy() + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdaptiveThresholdDifferentialStrategy(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/AdvancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..4af5f2b17 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,151 @@ +import numpy as np + + +class AdvancedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def adaptive_factors(success_rate): + if success_rate > 0.2: + return self.mutation_factor * 1.1, self.crossover_rate * 1.05 + else: + return self.mutation_factor * 0.9, self.crossover_rate * 0.95 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + def mutation_strategy(target, a, b, c): + if np.random.rand() < 0.5: + return np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + else: + return np.clip(a + self.mutation_factor * (target - b), self.bounds[0], self.bounds[1]) + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = mutation_strategy(target, a, b, c) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + self.mutation_factor, self.crossover_rate = adaptive_factors(success_rate) + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.6, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdvancedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveDualPhaseStrategy.py b/nevergrad/optimization/lama/AdvancedAdaptiveDualPhaseStrategy.py new file mode 100644 index 000000000..746741cb2 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveDualPhaseStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdvancedAdaptiveDualPhaseStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using an additional differential vector + d = np.random.choice(idxs, 1, replace=False)[0] + mutant = population[a] + self.F * ( + population[b] - population[c] + population[best_idx] - population[d] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Progressive adjustment for convergence acceleration + scale = iteration / total_iterations + self.F = np.clip(0.5 * np.cos(np.pi * scale) + 0.5, 0.1, 1) + self.CR = np.clip(0.5 * np.sin(np.pi * scale) + 0.5, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMemoryStrategyV64.py b/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMemoryStrategyV64.py new file mode 100644 index 000000000..b83aa4043 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMemoryStrategyV64.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdvancedAdaptiveDynamicMemoryStrategyV64: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover rate + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 10: # Manage memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def update_parameters(self, iteration, total_iterations): + # Dynamically adjust F and CR based on a sigmoidal function of the iteration number + scale = iteration / total_iterations + self.F = np.clip(0.8 - 0.7 / (1 + np.exp(-10 * (scale - 0.5))), 0.1, 0.8) + self.CR = np.clip(0.9 - 0.8 / (1 + np.exp(10 * (scale - 0.5))), 0.1, 0.9) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + + for iteration in range(total_iterations): + self.update_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..07ab8c663 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,159 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 30 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 4) + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + # Recording history + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveExplorationExploitationAlgorithm.py b/nevergrad/optimization/lama/AdvancedAdaptiveExplorationExploitationAlgorithm.py new file mode 100644 index 000000000..91fe7a5e0 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveExplorationExploitationAlgorithm.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdvancedAdaptiveExplorationExploitationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 300 # Initial population size for diversity + self.F = 0.8 # Differential weight for exploration + self.CR = 0.9 # Crossover probability for exploitation + self.local_search_chance_initial = 0.3 # Initial local search probability + self.elite_ratio = 0.2 # Ratio of elite members to retain + self.diversity_threshold = 1e-3 # Threshold to switch between exploration and exploitation + self.reinit_percentage = 0.3 # Reinitialization percentage for diversity + self.cauchy_step_scale = 0.01 # Scale for Cauchy distribution steps + self.gaussian_step_scale = 0.001 # Scale for Gaussian distribution steps + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(20): # Increase iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max(0.1, self.local_search_chance_initial * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveExplorationOptimizationAlgorithm.py b/nevergrad/optimization/lama/AdvancedAdaptiveExplorationOptimizationAlgorithm.py new file mode 100644 index 000000000..fb79d9760 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveExplorationOptimizationAlgorithm.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdvancedAdaptiveExplorationOptimizationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 600 # Further increased population size for initial exploration + self.F = 0.7 # Differential weight for exploration + self.CR = 0.8 # Crossover probability for exploitation + self.local_search_chance_initial = 0.4 # Increased local search probability + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-4 # Tighter threshold to switch between exploration and exploitation + self.reinit_percentage = 0.2 # Reinitialization percentage for diversity + self.cauchy_step_scale = 0.02 # Further improved scale for Cauchy distribution steps + self.gaussian_step_scale = 0.002 # Further improved scale for Gaussian distribution steps + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(40): # Adjusted iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max(0.1, self.local_search_chance_initial * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/AdvancedAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..8eeb948c5 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveFireworkAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdvancedAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + exploration_range=0.5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveGlobalClimbingOptimizerV6.py b/nevergrad/optimization/lama/AdvancedAdaptiveGlobalClimbingOptimizerV6.py new file mode 100644 index 000000000..715a19cf4 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveGlobalClimbingOptimizerV6.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdvancedAdaptiveGlobalClimbingOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 100 # Optimized population size + elite_size = 10 # Optimized elite size + evaluations = 0 + + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.1 # Moderate mutation scale + adaptive_factor = 0.95 # Less aggressive scale down for mutation + recombination_prob = 0.8 # Increased recombination probability + + # Main loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Adaptively adjust mutation scale + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + # Regenerate non-elite individuals + for idx in range(population_size - elite_size): + if np.random.rand() < 0.1: # Regeneration rate + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveGradientBoostedMemoryExploration.py b/nevergrad/optimization/lama/AdvancedAdaptiveGradientBoostedMemoryExploration.py new file mode 100644 index 000000000..fafcfdf6d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveGradientBoostedMemoryExploration.py @@ -0,0 +1,180 @@ +import numpy as np + + +class AdvancedAdaptiveGradientBoostedMemoryExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive beta and alpha adjustments based on phases + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Periodic hybrid crossover mechanism + if evaluations % (self.budget // 5) == 0: + for i in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + # Periodic mutation mechanism + if evaluations % (self.budget // 3) == 0: + for i in range(memory_size // 3): + x_mut = memory[np.random.randint(memory_size)] + x_mut += np.random.normal(0, 0.1, self.dim) + x_mut = np.clip(x_mut, func.bounds.lb, func.bounds.ub) + f_mut = func(x_mut) + evaluations += 1 + if f_mut < self.f_opt: + self.f_opt = f_mut + self.x_opt = x_mut + + worst_idx = np.argmax(memory_scores) + if f_mut < memory_scores[worst_idx]: + memory[worst_idx] = x_mut + memory_scores[worst_idx] = f_mut + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveGradientHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedAdaptiveGradientHybridOptimizer.py new file mode 100644 index 000000000..f233d1625 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveGradientHybridOptimizer.py @@ -0,0 +1,79 @@ +import numpy as np + + +class AdvancedAdaptiveGradientHybridOptimizer: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.5, + F_range=0.3, + CR=0.85, + elite_fraction=0.1, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.5: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV56.py b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV56.py new file mode 100644 index 000000000..eba3b2cfa --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV56.py @@ -0,0 +1,76 @@ +import numpy as np + + +class AdvancedAdaptiveMemoryEnhancedStrategyV56: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=20): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + # Adaptive mutation factor based on progress + current_progress = len(self.memory) / self.memory_size + F = self.F * (1 - current_progress) + 0.1 * current_progress + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + + # Memory-guided mutation + if self.memory: + memory_effect = np.mean(self.memory, axis=0) + mutant += 0.1 * memory_effect + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + # Adaptive crossover rate + crossover_rate = self.CR * (1 - (len(self.memory) / self.memory_size)) + crossover_mask = np.random.rand(self.dimension) < crossover_rate + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV73.py b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV73.py new file mode 100644 index 000000000..9021b54fc --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryEnhancedStrategyV73.py @@ -0,0 +1,78 @@ +import numpy as np + + +class AdvancedAdaptiveMemoryEnhancedStrategyV73: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = 20 # Enhanced memory management + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + candidates = list(range(size)) + candidates.remove(index) + a, b, c = np.random.choice(candidates, 3, replace=False) + mutation_factor = self.F * np.tanh(4 * (1 - (index / self.pop_size))) # Adaptive mutation factor + mutant = ( + population[a] + + mutation_factor * (population[b] - population[c]) + + 0.1 * (population[best_idx] - population[index]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(len(self.memory))] = ( + trial - target + ) # Random replacement strategy + return trial, f_trial + return target, f_target + + def adaptive_memory_effect(self): + if self.memory: + return np.mean(self.memory, axis=0) + return np.zeros(self.dimension) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveMemoryGuidedStrategyV77.py b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryGuidedStrategyV77.py new file mode 100644 index 000000000..05e5200bf --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveMemoryGuidedStrategyV77.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedAdaptiveMemoryGuidedStrategyV77: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx): + size = len(population) + idxs = np.random.choice(size, 3, replace=False) + a, b, c = idxs[0], idxs[1], idxs[2] + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = ( + population[a] + + self.F * (population[best_idx] - population[b]) + + self.F * (population[c] - population[a]) + + memory_effect + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(len(self.memory))] = trial - target + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + phase_progress = iteration / total_iterations + self.F = np.clip(0.5 + 0.4 * np.sin(2 * np.pi * phase_progress), 0.1, 1) + self.CR = np.clip(0.5 + 0.4 * np.cos(2 * np.pi * phase_progress), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + iteration = 0 + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdvancedAdaptiveMemorySimulatedAnnealing.py new file mode 100644 index 000000000..6ce7a1060 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveMemorySimulatedAnnealing.py @@ -0,0 +1,124 @@ +import numpy as np + + +class AdvancedAdaptiveMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Parameters for simulated annealing + T_initial = 1.0 + T_min = 1e-5 + alpha = 0.97 + beta_initial = 1.5 + + # Initialize current solution + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedAdaptivePSO.py b/nevergrad/optimization/lama/AdvancedAdaptivePSO.py new file mode 100644 index 000000000..f4829b815 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptivePSO.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdvancedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=200, + omega_initial=0.95, + omega_final=0.2, + phi_p=0.25, + phi_g=0.75, + critical_depth=30, + adaptive_depth=10, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal preference influence + self.phi_g = phi_g # Global preference influence + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.critical_depth = critical_depth # Depth of performance evaluation for adaptive inertia + self.adaptive_depth = adaptive_depth # Depth used for quick adaptation checks + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.critical_depth :] + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + if len(scores) > self.adaptive_depth and np.std(scores[-self.adaptive_depth :]) < 0.005: + return max( + self.omega_final, + self.omega_initial + - (evaluation_counter / self.budget) * (self.omega_initial - self.omega_final) * 1.5, + ) + else: + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveQuantumEntropyDE.py b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumEntropyDE.py new file mode 100644 index 000000000..c58226bf3 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumEntropyDE.py @@ -0,0 +1,153 @@ +import numpy as np + + +class AdvancedAdaptiveQuantumEntropyDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveQuantumLevyOptimizer.py b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumLevyOptimizer.py new file mode 100644 index 000000000..d0177365c --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumLevyOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedAdaptiveQuantumLevyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.5 + self.social_weight = 1.5 + self.quantum_weight = 0.4 + self.elite_fraction = 0.2 + self.memory_size = 30 + self.local_search_probability = 0.7 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + self.strategy_rewards = [0, 0, 0, 0] + self.strategy_uses = [0, 0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2, 3], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 3: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = AdvancedAdaptiveQuantumLevyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV1.py b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV1.py new file mode 100644 index 000000000..89ddd85bb --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV1.py @@ -0,0 +1,120 @@ +import numpy as np + + +class AdvancedAdaptiveQuantumSwarmOptimizationV1: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..0b48becfe --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveQuantumSwarmOptimizationV2.py @@ -0,0 +1,120 @@ +import numpy as np + + +class AdvancedAdaptiveQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/AdvancedAdaptiveStrategyOptimizer.py b/nevergrad/optimization/lama/AdvancedAdaptiveStrategyOptimizer.py new file mode 100644 index 000000000..29908f564 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAdaptiveStrategyOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class AdvancedAdaptiveStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 150 # Adjust population size for balance between exploration and computation + mutation_factors = [0.8, 1.2] # Dual mutation factors for diverse exploration tactics + crossover_rate = 0.7 # Modulating crossover rate for robustness + elite_size = 10 # Adjust elite size to preserve top solutions + strategy_switch = 15 # Strategy switch frequency for mutation factor adaptation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Adaptive mutation factor based on periodic strategy switch + mutation_factor = mutation_factors[(evaluations // strategy_switch) % len(mutation_factors)] + + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedAttenuatedAdaptiveEvolver.py b/nevergrad/optimization/lama/AdvancedAttenuatedAdaptiveEvolver.py new file mode 100644 index 000000000..c5ee24d8d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedAttenuatedAdaptiveEvolver.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdvancedAttenuatedAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=150, + initial_step_size=0.5, + step_decay=0.97, + elite_ratio=0.2, + mutation_probability=0.2, + recombination_rate=0.3, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_probability = mutation_probability + self.recombination_rate = recombination_rate + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + if np.random.rand() < self.mutation_probability: + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + return individual + + def crossover(self, parent1, parent2): + if np.random.rand() < self.recombination_rate: + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + return child + return parent1 + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * ( + self.step_decay**generation + ) # Dynamic step size for exploration adjustment + + # Pair and possibly crossover individuals for new population + np.random.shuffle(population) + new_population = [] + for i in range(0, self.population_size, 2): + parent1 = population[i] + parent2 = population[i + 1] if i + 1 < self.population_size else population[0] + child1 = self.crossover(parent1, parent2) + child2 = self.crossover(parent2, parent1) + new_population.extend([self.mutate(child1, scale), self.mutate(child2, scale)]) + new_population = np.array(new_population[: self.population_size]) + new_fitness = self.evaluate_population(func, new_population) + + population = np.vstack((population, new_population)) + fitness = np.hstack((fitness, new_fitness)) + indices = np.argsort(fitness) + population = population[indices[: self.population_size]] + fitness = fitness[indices[: self.population_size]] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedBalancedAdaptiveElitistStrategyV2.py b/nevergrad/optimization/lama/AdvancedBalancedAdaptiveElitistStrategyV2.py new file mode 100644 index 000000000..f0482194a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedBalancedAdaptiveElitistStrategyV2.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedBalancedAdaptiveElitistStrategyV2: + def __init__( + self, + budget, + dimension=5, + population_size=50, + elite_fraction=0.2, + mutation_intensity=0.1, + crossover_rate=0.7, + recombination_factor=0.5, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial mutation intensity factor + self.crossover_rate = crossover_rate # Probability of crossover + self.recombination_factor = recombination_factor # Weight factor for recombination + + def __call__(self, func): + # Initialize the population within bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Identify elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if i < self.elite_count: + # Elite individuals are carried over unchanged + new_population[i] = population[elite_indices[i]] + else: + # Generate new individuals by mutation and crossover + if np.random.random() < self.crossover_rate: + # Perform crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.recombination(parent1, parent2) + else: + # Directly mutate an elite + parent = elites[np.random.randint(0, self.elite_count)] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + + # Evaluate new individual's fitness + new_fitness = func(new_population[i]) + if new_fitness < fitness[i]: + fitness[i] = new_fitness + population[i] = new_population[i] + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation scale decreases over time + scale = self.mutation_intensity * (1 - (evaluations / self.budget)) + return individual + np.random.normal(0, scale, self.dimension) + + def recombination(self, parent1, parent2): + # Blended recombination + return self.recombination_factor * parent1 + (1 - self.recombination_factor) * parent2 diff --git a/nevergrad/optimization/lama/AdvancedBalancedExplorationOptimizer.py b/nevergrad/optimization/lama/AdvancedBalancedExplorationOptimizer.py new file mode 100644 index 000000000..3f7550a9a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedBalancedExplorationOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedBalancedExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 80 + mutation_factor = 0.8 + crossover_probability = 0.7 + elite_size = 5 + + # Initialize population and evaluate + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptation factors + adaptive_mutation = np.full(population_size, mutation_factor) + adaptive_crossover = np.full(population_size, crossover_probability) + success_tracker = np.zeros(population_size) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Elite retention + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + # Mutation and Crossover + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with adaptive factors + mutant = a + adaptive_mutation[i] * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < adaptive_crossover[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + success_tracker[i] += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + success_tracker[i] = max(0, success_tracker[i] - 1) + + # Update adaptive mutation and crossover probabilities + if success_tracker[i] > 2: + adaptive_mutation[i] = min(1.0, adaptive_mutation[i] + 0.05) + adaptive_crossover[i] = min(1.0, adaptive_crossover[i] + 0.05) + elif success_tracker[i] == 0: + adaptive_mutation[i] = max(0.5, adaptive_mutation[i] - 0.05) + adaptive_crossover[i] = max(0.5, adaptive_crossover[i] - 0.05) + + # Update best solution + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRate.py b/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRate.py new file mode 100644 index 000000000..492e8095d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRate.py @@ -0,0 +1,107 @@ +import numpy as np + + +class AdvancedDifferentialEvolutionWithAdaptiveLearningRate: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def enhance_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + enhance_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.2 + else: + self.base_lr *= 0.8 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdvancedDifferentialEvolutionWithAdaptiveLearningRate(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2.py b/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2.py new file mode 100644 index 000000000..b4fa328a1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2.py @@ -0,0 +1,107 @@ +import numpy as np + + +class AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def enhance_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + enhance_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.2 + else: + self.base_lr *= 0.8 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedDifferentialParticleSwarmOptimization.py b/nevergrad/optimization/lama/AdvancedDifferentialParticleSwarmOptimization.py new file mode 100644 index 000000000..e3e3ed52f --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDifferentialParticleSwarmOptimization.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedDifferentialParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.init_num_niches = 5 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + best_niche_idx = np.argmin(local_best_fits) + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (local_bests[best_niche_idx] - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < 0.5 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDimensionalCyclicCrossoverEvolver.py b/nevergrad/optimization/lama/AdvancedDimensionalCyclicCrossoverEvolver.py new file mode 100644 index 000000000..4d8cb3fba --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDimensionalCyclicCrossoverEvolver.py @@ -0,0 +1,90 @@ +import numpy as np + + +class AdvancedDimensionalCyclicCrossoverEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.25, + mutation_intensity=0.01, + crossover_probability=0.9, + momentum=0.2, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.momentum = momentum + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def cyclic_crossover(self, parent1, parent2): + start = np.random.randint(self.dimension) + cycle_length = np.random.randint(1, self.dimension) + indices = np.arange(start, start + cycle_length) % self.dimension + child = parent1.copy() + child[indices] = parent2[indices] + return child + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.cyclic_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + child = child + self.momentum * (child - previous_population[i]) + new_population[i] = child + + # Ensuring the best previous individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedDimensionalFeedbackEvolver.py b/nevergrad/optimization/lama/AdvancedDimensionalFeedbackEvolver.py new file mode 100644 index 000000000..c1689fd31 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDimensionalFeedbackEvolver.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdvancedDimensionalFeedbackEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=250, + elite_fraction=0.2, + mutation_intensity=0.01, + crossover_probability=0.9, + feedback_factor=0.4, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.feedback_factor = feedback_factor + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + child = np.where(np.random.rand(self.dimension) < 0.5, parent1, parent2) + return child + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + child += self.feedback_factor * (previous_best - previous_population[i]) + new_population[i] = child + + # Ensuring the best previous individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedDiversityAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedDiversityAdaptiveDE.py new file mode 100644 index 000000000..da9c10e6f --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDiversityAdaptiveDE.py @@ -0,0 +1,169 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedDiversityAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.8 + self.final_crossover_prob = 0.2 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 20 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution with multi-phase mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Multi-phase mutation + if generation % 2 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + else: + mutant = ( + x1 + + mutation_factor * (x2 - x3) + + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x1) + ) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/AdvancedDiversityDE.py b/nevergrad/optimization/lama/AdvancedDiversityDE.py new file mode 100644 index 000000000..bf41ad870 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDiversityDE.py @@ -0,0 +1,55 @@ +import numpy as np + + +class AdvancedDiversityDE: + def __init__(self, budget=10000, population_size=100, F_init=0.5, CR=0.8, learning_rate=0.1): + self.budget = budget + self.population_size = population_size + self.F = F_init # Initial Differential weight + self.CR = CR # Crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive F + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive F update + self.F *= (1 - self.learning_rate) + self.learning_rate * np.random.normal(loc=0.5, scale=0.1) + + # Mutation using "best" individual + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(best + self.F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < fitness[best_idx]: + best_idx = i + best = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Find and return the best solution + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AdvancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..5afbe7c8a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDualStrategyAdaptiveDE.py @@ -0,0 +1,130 @@ +import numpy as np + + +class AdvancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.6 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor and crossover probability + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(range(elite_count), 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = np.random.normal(0, 0.01, self.dim) # Gaussian perturbation for effective local search + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/AdvancedDualStrategyHybridDE.py b/nevergrad/optimization/lama/AdvancedDualStrategyHybridDE.py new file mode 100644 index 000000000..8cde8ed29 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDualStrategyHybridDE.py @@ -0,0 +1,125 @@ +import numpy as np + + +class AdvancedDualStrategyHybridDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 80 # Increase population size for better diversity + self.initial_mutation_factor = 0.9 # Sligthly increase mutation factor + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.95 # Increase crossover probability + self.elitism_rate = 0.3 # Increase elitism rate + self.local_search_prob = 0.25 # Increase local search probability + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..d2db0306f --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py @@ -0,0 +1,174 @@ +import numpy as np + + +class AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.6 # Adjusted inertia weight for PSO + c1 = 1.2 # Adjusted cognitive coefficient for PSO + c2 = 1.4 # Adjusted social coefficient for PSO + initial_F = 0.9 # Adjusted initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + memory_size = 5 # Size of historical memory for adaptive parameter tuning + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + historical_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + # Update historical memory and adapt parameters based on memory analysis + if len(historical_memory) >= memory_size: + historical_memory.pop(0) + historical_memory.append((population.copy(), fitness.copy())) + + if len(historical_memory) >= memory_size: + for i in range(population_size): + historical_fitness = [hist[1][i] for hist in historical_memory] + if np.std(historical_fitness) < 1e-5: # Detect stagnation + F_values[i] = 0.1 + 0.9 * np.random.rand() + CR_values[i] = np.random.rand() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..27699fb95 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicAdaptiveHybridOptimizer.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize +import cma + + +class AdvancedDynamicAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + restart_threshold=100, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.restart_threshold = restart_threshold + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def cma_es_local_search(self, x, func, budget): + es = cma.CMAEvolutionStrategy( + x, 0.1, {"bounds": [self.bounds[0], self.bounds[1]], "maxfevals": budget} + ) + while not es.stop(): + solutions = es.ask() + es.tell(solutions, [func(s) for s in solutions]) + es.disp() + result = es.result + self.eval_count += result.evaluations + return result.xbest, result.fbest + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + best_fitness_history = [g_best_fitness] + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + best_fitness_history.append(g_best_fitness) + + # Population resizing based on convergence rate + if len(best_fitness_history) > 10 and best_fitness_history[-10] == g_best_fitness: + self.init_pop_size = max(5, self.init_pop_size // 2) + population = population[: self.init_pop_size] + fitness = fitness[: self.init_pop_size] + velocities = velocities[: self.init_pop_size] + F_values = F_values[: self.init_pop_size] + CR_values = CR_values[: self.init_pop_size] + p_best = p_best[: self.init_pop_size] + p_best_fitness = p_best_fitness[: self.init_pop_size] + + # Restart mechanism if stagnation detected + if ( + len(best_fitness_history) > self.restart_threshold + and best_fitness_history[-self.restart_threshold] == g_best_fitness + ): + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + best_fitness_history = [g_best_fitness] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.cma_es_local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDynamicCrowdedDE.py b/nevergrad/optimization/lama/AdvancedDynamicCrowdedDE.py new file mode 100644 index 000000000..82af82dc7 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicCrowdedDE.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.spatial import distance + + +class AdvancedDynamicCrowdedDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + init_population_size = 20 + F = 0.8 # Initial Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(size): + population = np.random.uniform(bounds[0], bounds[1], (size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(len(F_values)): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def crowding_distance_sort(population, fitness): + distances = distance.cdist(population, population, "euclidean") + sorted_indices = np.argsort(fitness) + crowding_distances = np.zeros(len(population)) + crowding_distances[sorted_indices[0]] = np.inf + crowding_distances[sorted_indices[-1]] = np.inf + + for i in range(1, len(population) - 1): + crowding_distances[sorted_indices[i]] = distances[ + sorted_indices[i - 1], sorted_indices[i + 1] + ] + + return np.argsort(crowding_distances) + + def mutation_strategy_1(population, i, F): + indices = list(range(len(population))) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(len(population))) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(score): + return mutation_strategy_1 if score < 0.5 else mutation_strategy_2 + + def archive_mutation(archive, population, F): + if len(archive) > 0: + idx = np.random.randint(0, len(archive)) + return np.clip(population + F * (archive[idx] - population), bounds[0], bounds[1]) + else: + return population + + population, fitness = initialize_population(init_population_size) + evaluations = init_population_size + archive = [] + + F_values = np.full(init_population_size, F) + CR_values = np.full(init_population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(len(population), F) + CR_values = np.full(len(population), CR) + last_improvement = evaluations + + sorted_indices = crowding_distance_sort(population, fitness) + new_population = np.zeros_like(population) + new_fitness = np.zeros(len(population)) + new_F_values = np.zeros(len(population)) + new_CR_values = np.zeros(len(population)) + + for idx in range(len(population)): + i = sorted_indices[idx] + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy(fitness[i]) + mutant = mutation_strategy(population, i, F_values[i]) + + # Archive-based mutation + if np.random.rand() < 0.3: + mutant = archive_mutation(archive, mutant, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + + # Update Archive + archive.append(population[i]) + if len(archive) > init_population_size: + archive.pop(0) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + if evaluations - last_improvement > restart_threshold // 2: + population_size = int(len(population) * 0.9) + else: + population_size = int(len(population) * 1.1) + population_size = max(10, min(30, population_size)) + + population, fitness = new_population[:population_size], new_fitness[:population_size] + F_values, CR_values = new_F_values[:population_size], new_CR_values[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDynamicDualPhaseStrategyV37.py b/nevergrad/optimization/lama/AdvancedDynamicDualPhaseStrategyV37.py new file mode 100644 index 000000000..2564f096a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicDualPhaseStrategyV37.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdvancedDynamicDualPhaseStrategyV37: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Standard mutation strategy for phase 1 + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using extra differential vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adjustment of parameters using a smooth transition function + scale = iteration / total_iterations + self.F = 0.5 * (1 + np.sin(np.pi * scale - np.pi / 2)) # Oscillates between 0 and 1 + self.CR = 0.9 # Constant recombination rate + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedDynamicExplorationOptimizer.py b/nevergrad/optimization/lama/AdvancedDynamicExplorationOptimizer.py new file mode 100644 index 000000000..21282f886 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicExplorationOptimizer.py @@ -0,0 +1,166 @@ +import numpy as np + + +class AdvancedDynamicExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 40 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor for better exploration + max_exploration_cycles = 40 # Reduced maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdvancedDynamicExplorationOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/AdvancedDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..7a2690d84 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdvancedDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdvancedDynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..1f429126d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdvancedDynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedDynamicHybridOptimization.py b/nevergrad/optimization/lama/AdvancedDynamicHybridOptimization.py new file mode 100644 index 000000000..fb931b78c --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicHybridOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class AdvancedDynamicHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 25 # Reduced maximum exploration cycles for quicker reaction + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.85 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdvancedDynamicHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedDynamicHybridOptimizer.py new file mode 100644 index 000000000..fbaefe5d3 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicHybridOptimizer.py @@ -0,0 +1,179 @@ +import numpy as np +from scipy.optimize import minimize +from concurrent.futures import ThreadPoolExecutor + + +class AdvancedDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + max_workers=4, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.max_workers = max_workers + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def simulated_annealing(self, x, func, budget): + T = 1.0 + T_min = 0.0001 + alpha = 0.9 + best = x + best_score = func(x) + self.eval_count += 1 + while self.eval_count < budget and T > T_min: + i = 1 + while i <= 100: + candidate = x + np.random.normal(0, 1, self.dim) + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_score = func(candidate) + self.eval_count += 1 + if candidate_score < best_score: + best = candidate + best_score = candidate_score + else: + ap = np.exp((best_score - candidate_score) / T) + if np.random.rand() < ap: + best = candidate + best_score = candidate_score + i += 1 + T = T * alpha + return best, best_score + + def evaluate_population(self, func, population): + with ThreadPoolExecutor(max_workers=self.max_workers) as executor: + fitness = list(executor.map(func, population)) + self.eval_count += len(population) + return np.array(fitness) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = self.evaluate_population(func, population) + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + best_fitness_history = [g_best_fitness] + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + best_fitness_history.append(g_best_fitness) + + # Population resizing based on convergence rate + if len(best_fitness_history) > 10 and best_fitness_history[-10] == g_best_fitness: + self.init_pop_size = max(5, self.init_pop_size // 2) + population = population[: self.init_pop_size] + fitness = fitness[: self.init_pop_size] + velocities = velocities[: self.init_pop_size] + F_values = F_values[: self.init_pop_size] + CR_values = CR_values[: self.init_pop_size] + p_best = p_best[: self.init_pop_size] + p_best_fitness = p_best_fitness[: self.init_pop_size] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + # Hybridization with Simulated Annealing + if self.eval_count < self.budget: + remaining_budget = self.budget - self.eval_count + g_best, g_best_fitness = self.simulated_annealing(g_best, func, remaining_budget) + + self.f_opt = g_best_fitness + self.x_opt = g_best + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedDynamicMultimodalSimulatedAnnealing.py b/nevergrad/optimization/lama/AdvancedDynamicMultimodalSimulatedAnnealing.py new file mode 100644 index 000000000..617c3b010 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicMultimodalSimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdvancedDynamicMultimodalSimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedDynamicStrategyAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedDynamicStrategyAdaptiveDE.py new file mode 100644 index 000000000..8d4564736 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedDynamicStrategyAdaptiveDE.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedDynamicStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Dynamic strategy: switch mutation strategy based on generation + if generation % 3 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + elif generation % 3 == 1: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/AdvancedEliteAdaptiveCrowdingHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedEliteAdaptiveCrowdingHybridOptimizer.py new file mode 100644 index 000000000..94ce9d6f1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEliteAdaptiveCrowdingHybridOptimizer.py @@ -0,0 +1,195 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedEliteAdaptiveCrowdingHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + dist[i] += np.linalg.norm(population[i] - population[j]) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Maintain diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individuals = np.random.uniform(self.bounds[0], self.bounds[1], (1, self.dim)) + distances = self.crowding_distance(new_individuals) + if np.min(distances) > np.min(dist): + population = np.vstack([population, new_individuals]) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, (1, self.dim))]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individuals]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEliteDynamicHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedEliteDynamicHybridOptimizer.py new file mode 100644 index 000000000..9cb0913ca --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEliteDynamicHybridOptimizer.py @@ -0,0 +1,136 @@ +import numpy as np + + +class AdvancedEliteDynamicHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Adjusted population size for better balance + self.initial_F = 0.8 # Adjusted for balanced mutation step + self.initial_CR = 0.9 # High crossover rate to maintain genetic diversity + self.c1 = 1.5 # Increased cognitive coefficient for personal best attraction + self.c2 = 1.5 # Increased social coefficient for global best attraction + self.w = 0.5 # Adjusted inertia weight for maintaining momentum + self.elite_fraction = 0.1 # Fraction of elite population + self.diversity_threshold = 1e-3 # Adjusted threshold for reinitialization + self.tau1 = 0.1 # Parameter adaptation probability + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 20, self.budget - evaluations + ) # Reduced iterations for quicker runs + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.01, bounds.lb, bounds.ub + ) # Reduced perturbation for precision + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..911207c0a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdvancedEnhancedAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveMetaNetAQAPSO.py b/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveMetaNetAQAPSO.py new file mode 100644 index 000000000..c1f0cc098 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedAdaptiveMetaNetAQAPSO.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class AdvancedEnhancedAdaptiveMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEnhancedDifferentialEvolutionLocalSearch_v55.py b/nevergrad/optimization/lama/AdvancedEnhancedDifferentialEvolutionLocalSearch_v55.py new file mode 100644 index 000000000..2d87a7a8b --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedDifferentialEvolutionLocalSearch_v55.py @@ -0,0 +1,104 @@ +import numpy as np + + +class AdvancedEnhancedDifferentialEvolutionLocalSearch_v55: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def advanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(50): # Increased the number of runs for better results + best_fitness, _ = self.advanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, func( + np.random.uniform(-5.0, 5.0, self.dim) + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/AdvancedEnhancedEnhancedGuidedMassQGSA_v69.py b/nevergrad/optimization/lama/AdvancedEnhancedEnhancedGuidedMassQGSA_v69.py new file mode 100644 index 000000000..d799e556d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedEnhancedGuidedMassQGSA_v69.py @@ -0,0 +1,105 @@ +import numpy as np + + +class AdvancedEnhancedEnhancedGuidedMassQGSA_v69: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_advanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_advanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEnhancedGuidedMassQGSA_v65.py b/nevergrad/optimization/lama/AdvancedEnhancedGuidedMassQGSA_v65.py new file mode 100644 index 000000000..1d1ef2a8b --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedGuidedMassQGSA_v65.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdvancedEnhancedGuidedMassQGSA_v65: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def _calculate_area_over_convergence_curve(self): + if len(self.best_fitness_history) <= 1: + return 1.0 + aocc = np.trapz(self.best_fitness_history, dx=1) / (len(self.best_fitness_history) - 1) + return 1.0 / (1.0 + aocc) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + self.best_fitness_history.append(self.f_opt) + + # Calculate AOCC + aocc = self._calculate_area_over_convergence_curve() + + return aocc, self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizer.py new file mode 100644 index 000000000..eac007da6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdvancedEnhancedHybridMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.7, + inertia_weight=0.8, + cognitive_weight=1.6, + social_weight=1.6, + max_velocity=0.5, + mutation_rate=0.1, + num_generations=300, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizerV16.py b/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizerV16.py new file mode 100644 index 000000000..1736c4f34 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedEnhancedHybridMetaHeuristicOptimizerV16.py @@ -0,0 +1,110 @@ +import numpy as np + + +class AdvancedEnhancedHybridMetaHeuristicOptimizerV16: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=3, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/AdvancedExplorativeConvergenceEnhancer.py b/nevergrad/optimization/lama/AdvancedExplorativeConvergenceEnhancer.py new file mode 100644 index 000000000..71b4a83e1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedExplorativeConvergenceEnhancer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdvancedExplorativeConvergenceEnhancer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.05, + mutation_intensity=0.3, + crossover_probability=0.7, + elite_boosting_factor=1.1, + mutation_decay=0.98, + stabilization_period=10, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.elite_boosting_factor = elite_boosting_factor + self.mutation_decay = mutation_decay + self.stabilization_period = stabilization_period + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def elite_boosting(self, elite_individual): + perturbation = np.random.normal( + 0, self.mutation_intensity * self.elite_boosting_factor, self.dimension + ) + return np.clip(elite_individual + perturbation, self.lower_bound, self.upper_bound) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + stabilization_counter = 0 + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = np.array([self.elite_boosting(elite) for elite in elites]) + + while len(new_population) < self.population_size: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population = np.append(new_population, [child], axis=0) + + population = new_population + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + stabilization_counter = 0 + else: + stabilization_counter += 1 + + evaluations += self.population_size + + if stabilization_counter >= self.stabilization_period: + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedFireworkAlgorithmWithAdaptiveMutation.py b/nevergrad/optimization/lama/AdvancedFireworkAlgorithmWithAdaptiveMutation.py new file mode 100644 index 000000000..9a5e8fb82 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedFireworkAlgorithmWithAdaptiveMutation.py @@ -0,0 +1,114 @@ +import numpy as np + + +class AdvancedFireworkAlgorithmWithAdaptiveMutation: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate_range=(0.01, 0.1), + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate_range = mutation_rate_range + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [ + ( + np.copy(x), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + for x in self.population + ] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, alpha, beta): + alpha *= 0.9 # Decrease alpha + beta *= 1.1 # Increase beta + return alpha, beta + + def adapt_mutation_rate(self, fitness_diff, mutation_rate): + if fitness_diff < 0: + mutation_rate *= 0.9 # Decrease mutation rate + else: + mutation_rate *= 1.1 # Increase mutation rate + return np.clip(mutation_rate, *self.mutation_rate_range) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta, mutation_rate) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + new_spark += np.random.normal(0, mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = ( + np.copy(new_spark), + 0, + *self.update_parameters(i, alpha, beta), + mutation_rate, + ) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + self.adapt_mutation_rate(fitness_diff, mutation_rate), + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedFocusedAdaptiveOptimizer.py b/nevergrad/optimization/lama/AdvancedFocusedAdaptiveOptimizer.py new file mode 100644 index 000000000..842768181 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedFocusedAdaptiveOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdvancedFocusedAdaptiveOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.8 # Adjusted global influence + self.local_influence = 0.2 # Adjusted local influence + self.vel_scale = 0.1 # Adjusted velocity scaling for stabilization + self.learning_factor = 0.5 # New: Doubling initial learning factor to speed up convergence + + def initialize_particles(self): + # Initialize positions in the search space and velocities + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + self.learning_factor * r1 * (personal_best_positions[i] - positions[i]) + + self.learning_factor * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/AdvancedGlobalClimbingOptimizerV4.py b/nevergrad/optimization/lama/AdvancedGlobalClimbingOptimizerV4.py new file mode 100644 index 000000000..f2649f076 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedGlobalClimbingOptimizerV4.py @@ -0,0 +1,73 @@ +import numpy as np + + +class AdvancedGlobalClimbingOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 100 # Optimized population size for balance + elite_size = 15 # Adjusted elite size to promote better exploitation + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.2 # Increased mutation scale for better global search + adaptive_factor = 0.95 # Adjusted reduction factor + recombination_prob = 0.7 # Higher recombination to enhance diversity + + # Main evolutionary loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if evaluations % 200 == 0: + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size - elite_size): + if np.random.rand() < 0.15: + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedGlobalStructureAwareOptimizerV3.py b/nevergrad/optimization/lama/AdvancedGlobalStructureAwareOptimizerV3.py new file mode 100644 index 000000000..b359456ba --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedGlobalStructureAwareOptimizerV3.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdvancedGlobalStructureAwareOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 120 + elite_size = 30 + evaluations = 0 + mutation_scale = 0.1 + adaptive_factor = 0.98 + recombination_prob = 0.92 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + for idx in range(population_size - elite_size): + if np.random.rand() < 0.3: # Enhanced mutation within elites + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + np.random.normal( + 0, mutation_scale, self.dim + ) + population[idx] = np.clip(population[idx], self.lb, self.ub) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if evaluations % 300 == 0: # More frequent global structure mutations + structure_scale = 0.5 + structure_population = np.random.normal(0, structure_scale, (population_size // 3, self.dim)) + structure_population = np.clip( + structure_population + + population[np.random.choice(population_size, population_size // 3)], + self.lb, + self.ub, + ) + structure_fitness = np.array([func(ind) for ind in structure_population]) + evaluations += population_size // 3 + + combined_population = np.concatenate((population, structure_population), axis=0) + combined_fitness = np.concatenate((fitness, structure_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration.py b/nevergrad/optimization/lama/AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration.py new file mode 100644 index 000000000..1df4b8494 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration.py @@ -0,0 +1,159 @@ +import numpy as np + + +class AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha = 0.98 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 30 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Adaptive exploration based on current progress + if evaluations % (self.budget // 5) == 0: + adaptive_exploration_radius = 0.2 + 0.8 * (1 - T / T_initial) + for _ in range(memory_size // 3): + x_candidate = memory[ + np.random.randint(memory_size) + ] + adaptive_exploration_radius * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=60, step_size=0.004): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.15): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategy.py b/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategy.py new file mode 100644 index 000000000..07588bde4 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategy.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdvancedGradientEvolutionStrategy: + def __init__(self, budget, dim=5, pop_size=100, tau=0.2, sigma_init=0.3, learning_rate=0.01): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.tau = tau # Learning rate for step size adaptation + self.sigma_init = sigma_init # Initial step size + self.learning_rate = learning_rate # Gradient descent learning rate + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, individual, sigma): + return np.clip(individual + sigma * np.random.randn(self.dim), self.bounds[0], self.bounds[1]) + + def estimate_gradient(self, func, individual): + grad = np.zeros(self.dim) + f_base = func(individual) + for i in range(self.dim): + perturb = np.zeros(self.dim) + perturb[i] = self.sigma_init + f_plus = func(individual + perturb) + grad[i] = (f_plus - f_base) / self.sigma_init + return grad + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + sigma_values = np.full(self.pop_size, self.sigma_init) + + while n_evals < self.budget: + new_population = [] + new_f_values = [] + + for idx in range(self.pop_size): + individual = population[idx] + sigma = sigma_values[idx] + gradient = self.estimate_gradient(func, individual) + individual_new = np.clip( + individual - self.learning_rate * gradient, self.bounds[0], self.bounds[1] + ) + f_new = func(individual_new) + n_evals += 1 + + new_population.append(individual_new) + new_f_values.append(f_new) + + if n_evals >= self.budget: + break + + # Update population and sigma values for the next generation + population = np.array(new_population) + f_values = np.array(new_f_values) + rankings = np.argsort(f_values) + best_sigma = sigma_values[rankings[0]] + sigma_values = best_sigma * np.exp(self.tau * (np.arange(self.pop_size) / self.pop_size - 0.5)) + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategyV2.py b/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategyV2.py new file mode 100644 index 000000000..412783f69 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedGradientEvolutionStrategyV2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class AdvancedGradientEvolutionStrategyV2: + def __init__( + self, budget, dim=5, pop_size=100, tau=0.3, sigma_init=0.5, learning_rate=0.02, grad_approx_steps=10 + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.tau = tau # Enhanced learning rate for step size adaptation + self.sigma_init = sigma_init # Adjusted initial step size + self.learning_rate = learning_rate # Adjusted gradient descent learning rate + self.grad_approx_steps = grad_approx_steps # Steps to approximate gradient + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, individual, sigma): + return np.clip(individual + sigma * np.random.randn(self.dim), self.bounds[0], self.bounds[1]) + + def estimate_gradient(self, func, individual): + grad = np.zeros(self.dim) + f_base = func(individual) + for i in range(self.dim): + perturb = np.zeros(self.dim) + eps = self.sigma_init / np.sqrt(self.dim) + perturb[i] = eps + f_plus = func(individual + perturb) + grad[i] = (f_plus - f_base) / eps + return grad + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + sigma_values = np.full(self.pop_size, self.sigma_init) + + while n_evals < self.budget: + new_population = [] + new_f_values = [] + + for idx in range(self.pop_size): + individual = population[idx] + sigma = sigma_values[idx] + gradient = np.zeros(self.dim) + + for step in range(self.grad_approx_steps): + gradient += self.estimate_gradient(func, individual) + gradient /= self.grad_approx_steps + + individual_new = np.clip( + individual - self.learning_rate * gradient, self.bounds[0], self.bounds[1] + ) + f_new = func(individual_new) + n_evals += 1 + + new_population.append(individual_new) + new_f_values.append(f_new) + + if n_evals >= self.budget: + break + + population = np.array(new_population) + f_values = np.array(new_f_values) + rankings = np.argsort(f_values) + best_sigma = sigma_values[rankings[0]] + sigma_values = best_sigma * np.exp(self.tau * (np.random.randn(self.pop_size) - 0.5)) + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHarmonyMemeticOptimization.py b/nevergrad/optimization/lama/AdvancedHarmonyMemeticOptimization.py new file mode 100644 index 000000000..0e1c7f56c --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHarmonyMemeticOptimization.py @@ -0,0 +1,115 @@ +import numpy as np + + +class AdvancedHarmonyMemeticOptimization: + def __init__( + self, budget=10000, hmcr=0.7, par=0.4, bw=0.6, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.explore_prob = 0.1 + self.local_search_prob = 0.7 + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + convergence_curve = [] + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < self.explore_prob: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + convergence_curve.append(self.f_opt) + + mean_aocc = np.mean(np.array(convergence_curve)) + std_dev = np.std(np.array(convergence_curve)) + + return mean_aocc, std_dev diff --git a/nevergrad/optimization/lama/AdvancedHarmonySearch.py b/nevergrad/optimization/lama/AdvancedHarmonySearch.py new file mode 100644 index 000000000..ef412aa50 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHarmonySearch.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdvancedHarmonySearch: + def __init__( + self, budget=10000, hmcr=0.9, par=0.3, bw=0.5, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + min_idx = np.argmin(harmony_memory_costs) + if new_cost < harmony_memory_costs[min_idx]: + harmony_memory[min_idx] = new_harmony + harmony_memory_costs[min_idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedHybridAdaptiveDE.py new file mode 100644 index 000000000..9b7a97e13 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridAdaptiveDE.py @@ -0,0 +1,133 @@ +import numpy as np + + +class AdvancedHybridAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + memory_size = 5 # Memory size for adaptive parameters + memory_F = np.full(memory_size, 0.7) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), 0.1, 1.0) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), 0.0, 1.0) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def mutation_strategy_3(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c, d = population[np.random.choice(indices, 4, replace=False)] + return np.clip(a + F * (b - c + d - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + strategies = [mutation_strategy_1, mutation_strategy_2, mutation_strategy_3] + return np.random.choice(strategies) + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridAdaptiveOptimization.py b/nevergrad/optimization/lama/AdvancedHybridAdaptiveOptimization.py new file mode 100644 index 000000000..a0b9974a2 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridAdaptiveOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class AdvancedHybridAdaptiveOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 # Optimized for better performance + self.initial_F = 0.8 # More aggressive mutation factor + self.initial_CR = 0.7 # Reduced for less crossover + self.elite_rate = 0.1 # Lowered elite rate for more diversity + self.local_search_rate = 0.3 # Reduced local search intensity + self.memory_size = 20 # Moderate memory size for parameter adaptation + self.w = 0.5 # Further reduced inertia weight for finer control in PSO + self.c1 = 2.0 # Enhanced cognitive component for better exploration + self.c2 = 1.5 # Enhanced social component for better convergence + self.phase_switch_ratio = 0.4 # Adjusted switch to PSO for balance + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Adjusted step for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdvancedHybridAdaptiveOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdvancedHybridCovarianceMatrixDifferentialEvolutionV3.py b/nevergrad/optimization/lama/AdvancedHybridCovarianceMatrixDifferentialEvolutionV3.py new file mode 100644 index 000000000..5d4e8bfb7 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridCovarianceMatrixDifferentialEvolutionV3.py @@ -0,0 +1,201 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedHybridCovarianceMatrixDifferentialEvolutionV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.1 + self.c1 = 0.02 + self.cmu = 0.03 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.8 + self.elitism_rate = 0.1 + self.eval_count = 0 + self.alpha_levy = 0.1 + self.levy_prob = 0.1 + self.adaptive_learning_rate = 0.1 + self.strategy_switches = [0.25, 0.5, 0.75] + self.local_opt_prob = 0.2 + self.learning_rate_decay = 0.95 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.1 # Introduced hybridization probability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + def self_adaptive_differential_evolution_parameters(): + """Self-adaptive parameter adjustment for F and CR.""" + if np.random.rand() < 0.1: # 10% chance to adjust parameters + self.F = np.random.uniform(0.5, 1) + self.CR = np.random.uniform(0.1, 1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + self_adaptive_differential_evolution_parameters() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = AdvancedHybridCovarianceMatrixDifferentialEvolutionV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/AdvancedHybridDEPSOWithAdaptiveRestarts.py b/nevergrad/optimization/lama/AdvancedHybridDEPSOWithAdaptiveRestarts.py new file mode 100644 index 000000000..2ba82d69e --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridDEPSOWithAdaptiveRestarts.py @@ -0,0 +1,152 @@ +import numpy as np + + +class AdvancedHybridDEPSOWithAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 + elite_size = 5 + w = 0.5 # Inertia weight for PSO + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridDEPSOWithDynamicAdaptationAndRestart.py b/nevergrad/optimization/lama/AdvancedHybridDEPSOWithDynamicAdaptationAndRestart.py new file mode 100644 index 000000000..232e3c438 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridDEPSOWithDynamicAdaptationAndRestart.py @@ -0,0 +1,152 @@ +import numpy as np + + +class AdvancedHybridDEPSOWithDynamicAdaptationAndRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.7 # Adaptive inertia weight for PSO + c1 = 1.0 # Cognitive coefficient for PSO + c2 = 1.0 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridExplorationExploitationOptimizer.py b/nevergrad/optimization/lama/AdvancedHybridExplorationExploitationOptimizer.py new file mode 100644 index 000000000..6cded5285 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridExplorationExploitationOptimizer.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedHybridExplorationExploitationOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.7, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridLocalOptimizationDE.py b/nevergrad/optimization/lama/AdvancedHybridLocalOptimizationDE.py new file mode 100644 index 000000000..6440372a7 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridLocalOptimizationDE.py @@ -0,0 +1,193 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedHybridLocalOptimizationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.25 + self.local_search_prob = 0.20 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.crossover_prob * (1 - (generation / (self.budget / self.pop_size))) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.hybrid_local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def hybrid_local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # PSO-based local search + best_x = self.pso_local_search(best_x, func) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev # Account for the function evaluations + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Simulated Annealing + best_x, best_f = self.simulated_annealing(best_x, func, best_f) + + return best_x + + def pso_local_search(self, x, func): + # PSO parameters + inertia_weight = 0.729 + cognitive_coeff = 1.49445 + social_coeff = 1.49445 + max_iter = 10 + swarm_size = 10 + + # Initialize PSO swarm + swarm = np.random.uniform(-0.1, 0.1, (swarm_size, self.dim)) + x + swarm = np.clip(swarm, -5.0, 5.0) + velocities = np.zeros_like(swarm) + personal_best_positions = swarm.copy() + personal_best_fitness = np.array([func(p) for p in personal_best_positions]) + global_best_position = personal_best_positions[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # PSO iterations + for _ in range(max_iter): + if self.budget <= 0: + break + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + inertia_weight * velocities + + cognitive_coeff * r1 * (personal_best_positions - swarm) + + social_coeff * r2 * (global_best_position - swarm) + ) + swarm = np.clip(swarm + velocities, -5.0, 5.0) + fitness = np.array([func(p) for p in swarm]) + self.budget -= swarm_size + + # Update personal and global bests + better_mask = fitness < personal_best_fitness + personal_best_positions[better_mask] = swarm[better_mask] + personal_best_fitness[better_mask] = fitness[better_mask] + global_best_idx = np.argmin(personal_best_fitness) + global_best_position = personal_best_positions[global_best_idx] + global_best_fitness = personal_best_fitness[global_best_idx] + + return global_best_position + + def simulated_annealing(self, x, func, current_best_f): + T = 1.0 + T_min = 0.0001 + alpha = 0.9 + + best_x = x.copy() + best_f = current_best_f + while T > T_min and self.budget > 0: + new_x = best_x + np.random.uniform(-0.5, 0.5, self.dim) + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 + if new_f < best_f or np.exp((best_f - new_f) / T) > np.random.rand(): + best_x = new_x + best_f = new_f + T *= alpha + + return best_x, best_f diff --git a/nevergrad/optimization/lama/AdvancedHybridMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/AdvancedHybridMetaHeuristicOptimizer.py new file mode 100644 index 000000000..fc7ce70f2 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridMetaHeuristicOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class AdvancedHybridMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.95, + inertia_weight=0.8, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.6, + mutation_rate=0.15, + num_generations=80, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/AdvancedHybridMetaheuristic.py b/nevergrad/optimization/lama/AdvancedHybridMetaheuristic.py new file mode 100644 index 000000000..a9a1aaa37 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridMetaheuristic.py @@ -0,0 +1,144 @@ +import numpy as np + + +class AdvancedHybridMetaheuristic: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 2.0, 2.0 # Increased cognitive and social constants + w = 0.5 # Reduced inertia weight + w_decay = 0.99 # Inertia weight decay + + # Differential Evolution parameters + F = 0.9 # Increased differential weight + CR = 0.9 # Crossover probability + + # Gradient-based search parameters + alpha = 0.01 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.5 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(0.1, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdvancedHybridMetaheuristic(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedHybridOptimization.py b/nevergrad/optimization/lama/AdvancedHybridOptimization.py new file mode 100644 index 000000000..af3448b90 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridOptimization.py @@ -0,0 +1,142 @@ +import numpy as np +import random + + +class AdvancedHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Cognitive constant + c2 = 2.0 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 100 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = random.sample(indices, 3) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdvancedHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedHybridOptimizer.py new file mode 100644 index 000000000..cbf4524a6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + var_threshold=1e-4, + pop_resize_factor=1.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.var_threshold = var_threshold + self.pop_resize_factor = pop_resize_factor + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + result = minimize(func, x, method="BFGS", options={"maxiter": budget, "disp": False}) + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + self.eval_count = self.init_pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + pop_size = self.init_pop_size + while self.eval_count < global_search_budget: + for i in range(pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Adjust population size adaptively based on variance of fitness values + fitness_var = np.var(fitness) + if fitness_var < self.var_threshold: # If the population is converging + pop_size = max( + int(pop_size / self.pop_resize_factor), 10 + ) # Ensure minimum population size of 10 + else: # If the population is diverging + pop_size = min(int(pop_size * self.pop_resize_factor), self.budget - self.eval_count) + + # Reinitialize if population size increases + if pop_size > len(population): + new_individuals = np.random.uniform( + self.bounds[0], self.bounds[1], (pop_size - len(population), self.dim) + ) + new_fitness = np.array([func(ind) for ind in new_individuals]) + new_velocities = np.random.uniform(-1, 1, (pop_size - len(population), self.dim)) + self.eval_count += len(new_individuals) + + population = np.vstack((population, new_individuals)) + fitness = np.hstack((fitness, new_fitness)) + velocities = np.vstack((velocities, new_velocities)) + F_values = np.hstack((F_values, np.full(len(new_individuals), self.init_F))) + CR_values = np.hstack((CR_values, np.full(len(new_individuals), self.init_CR))) + p_best = np.vstack((p_best, new_individuals)) + p_best_fitness = np.hstack((p_best_fitness, new_fitness)) + + # Perform local search on the best individuals + for i in range(len(population)): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridQuantumAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedHybridQuantumAdaptiveDE.py new file mode 100644 index 000000000..7d0e40c86 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridQuantumAdaptiveDE.py @@ -0,0 +1,137 @@ +import numpy as np + + +class AdvancedHybridQuantumAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(50): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + evaluations += len(new_pop) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithAdaptiveMemory.py b/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithAdaptiveMemory.py new file mode 100644 index 000000000..47211c8f6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithAdaptiveMemory.py @@ -0,0 +1,140 @@ +import numpy as np + + +class AdvancedHybridSimulatedAnnealingWithAdaptiveMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithGuidedExploration.py b/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithGuidedExploration.py new file mode 100644 index 000000000..8f4d68b86 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedHybridSimulatedAnnealingWithGuidedExploration.py @@ -0,0 +1,145 @@ +import numpy as np + + +class AdvancedHybridSimulatedAnnealingWithGuidedExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + guided_exploration_rate = 0.2 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + guided_exploration_rate = 0.4 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + guided_exploration_rate = 0.6 + else: + beta = 2.5 + alpha = 0.92 + guided_exploration_rate = 0.8 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved guided exploration + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < guided_exploration_rate: + direction = memory[best_memory_idx] - memory[np.random.randint(memory_size)] + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1) * direction + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedImprovedMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/AdvancedImprovedMetaHeuristicOptimizer.py new file mode 100644 index 000000000..a377209da --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedImprovedMetaHeuristicOptimizer.py @@ -0,0 +1,104 @@ +import numpy as np + + +class AdvancedImprovedMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.6, + mutation_rate=0.03, + num_generations=500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + improvement_counter = 0 # Track the number of consecutive non-improvements + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + improvement_counter = 0 + else: + improvement_counter += 1 + if ( + improvement_counter >= 20 + ): # Reinitialize the particle if no improvement after 20 iterations + swarm[i] = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + personal_best[i] = np.copy(swarm[i]) + improvement_counter = 0 + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV5.py b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV5.py new file mode 100644 index 000000000..490858482 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV5.py @@ -0,0 +1,103 @@ +import numpy as np + + +class AdvancedIslandEvolutionStrategyV5: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=12, + population_per_island=50, + migration_rate=0.25, + mutation_intensity=1.2, + mutation_decay=0.95, + elite_ratio=0.15, + crossover_probability=0.7, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + # If no crossover, return a copy of parent1 + return parent1.copy() + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + # Introduce new genetic material by shuffling some individuals between islands + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) # Shuffle the migration indices to mix individuals + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV8.py b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV8.py new file mode 100644 index 000000000..101164cf4 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV8.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdvancedIslandEvolutionStrategyV8: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=25, + population_per_island=80, + migration_rate=0.2, + mutation_intensity=0.8, + mutation_decay=0.98, + elite_ratio=0.05, + crossover_probability=0.9, + tournament_size=5, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV9.py b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV9.py new file mode 100644 index 000000000..c698da187 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedIslandEvolutionStrategyV9.py @@ -0,0 +1,109 @@ +import numpy as np + + +class AdvancedIslandEvolutionStrategyV9: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=20, + population_per_island=100, + migration_rate=0.15, + mutation_intensity=0.8, + mutation_decay=0.95, + elite_ratio=0.2, + crossover_probability=0.85, + tournament_size=3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.uniform(0.3, 0.7, self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py b/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py new file mode 100644 index 000000000..d24ce299a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedMemeticQuantumDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.c1 = 1.5 + self.c2 = 1.5 + self.epsilon = 1e-6 # Convergence threshold + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < self.epsilon or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + self.c1 * r1 * (personal_bests[i] - particles[i]) + + self.c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + self.c1 = np.random.uniform(1.0, 2.5) + self.c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedMemoryAdaptiveStrategyV50.py b/nevergrad/optimization/lama/AdvancedMemoryAdaptiveStrategyV50.py new file mode 100644 index 000000000..adf063f1e --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMemoryAdaptiveStrategyV50.py @@ -0,0 +1,102 @@ +import numpy as np + + +class AdvancedMemoryAdaptiveStrategyV50: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=10, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + # Use memory more effectively + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + memory_factor = 0.1 if phase == 1 else 0.3 # More aggressive memory use in phase 2 + + if phase == 1: + mutant = ( + population[best_idx] + + self.F * (population[a] - population[b]) + + memory_factor * memory_effect + ) + else: + mutant = population[a] + self.F * (population[b] - population[c]) + memory_factor * memory_effect + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + # Replace oldest memory with the new, more successful difference + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedMemoryEnhancedHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedMemoryEnhancedHybridOptimizer.py new file mode 100644 index 000000000..be53fca40 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMemoryEnhancedHybridOptimizer.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats import levy_stable + + +class AdvancedMemoryEnhancedHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def levy_flight(self, size, alpha=1.5): + return levy_stable.rvs(alpha, 0, size=size) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation with Levy flight + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + step_size = F * (b - c) + self.levy_flight(size=self.dim) + mutant = np.clip(a + step_size, self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedMemoryGuidedAdaptiveStrategyV68.py b/nevergrad/optimization/lama/AdvancedMemoryGuidedAdaptiveStrategyV68.py new file mode 100644 index 000000000..1626a7c59 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMemoryGuidedAdaptiveStrategyV68.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdvancedMemoryGuidedAdaptiveStrategyV68: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover rate + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced memory guidance in mutation + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + self.F * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Sigmoid-based dynamic parameter adaptation + scale = 1 / (1 + np.exp(-12 * (iteration / total_iterations - 0.5))) + self.F = 0.5 + 0.4 * np.sin(np.pi * scale) + self.CR = 0.5 + 0.4 * np.cos(np.pi * scale) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + + for iteration in range(total_iterations): + phase = 1 if iteration < total_iterations * self.switch_ratio else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedMemoryGuidedDualStrategyV80.py b/nevergrad/optimization/lama/AdvancedMemoryGuidedDualStrategyV80.py new file mode 100644 index 000000000..047466e3a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMemoryGuidedDualStrategyV80.py @@ -0,0 +1,84 @@ +import numpy as np + + +class AdvancedMemoryGuidedDualStrategyV80: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, phase): + size = len(population) + indices = np.random.choice(size, 3, replace=False) + a, b, c = indices[0], indices[1], indices[2] + # Leveraging memory for mutation depending on the optimization phase + if phase == 1: + # Exploration phase + mutant = population[best_idx] + self.F * (population[b] - population[c]) + else: + # Exploitation phase + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + 0.1 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < 10: # Limit memory size + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adjustment of F and CR + self.F = 0.5 + 0.5 * np.sin(np.pi * (iteration / total_iterations)) + self.CR = 0.9 - 0.4 * np.cos(np.pi * (iteration / total_iterations)) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedMultiModalAdaptiveOptimizer.py b/nevergrad/optimization/lama/AdvancedMultiModalAdaptiveOptimizer.py new file mode 100644 index 000000000..5357ed9b4 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMultiModalAdaptiveOptimizer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class AdvancedMultiModalAdaptiveOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=150, + elite_fraction=0.2, + mutation_intensity=0.3, + crossover_probability=0.9, + gradient_step=0.1, + mutation_decay=0.95, + gradient_enhancement_cycle=10, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement_cycle = gradient_enhancement_cycle + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def adaptive_gradient(self, individual, func, best_individual, iteration): + if iteration % self.gradient_enhancement_cycle == 0: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.linalg.norm(gradient_direction)) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + iteration = 0 + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = [] + for i in range(self.population_size): + if i < len(elites): + new_population.append(self.adaptive_gradient(elites[i], func, best_individual, iteration)) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population.append(child) + + population = np.array(new_population) + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + + evaluations += self.population_size + iteration += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedMultiStrategySelfAdaptiveDE.py b/nevergrad/optimization/lama/AdvancedMultiStrategySelfAdaptiveDE.py new file mode 100644 index 000000000..fd4667cab --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedMultiStrategySelfAdaptiveDE.py @@ -0,0 +1,135 @@ +import numpy as np + + +class AdvancedMultiStrategySelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.5, 0.9 + CR_min, CR_max = 0.1, 1.0 + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + memory_size = 5 # Memory size for adaptive parameters + memory_F = np.full(memory_size, 0.7) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), F_min, F_max) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), CR_min, CR_max) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def mutation_strategy_3(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c, d = population[np.random.choice(indices, 4, replace=False)] + return np.clip(a + F * (b - c + d - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + strategies = [mutation_strategy_1, mutation_strategy_2, mutation_strategy_3] + return np.random.choice(strategies) + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedNicheDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/AdvancedNicheDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..922b21f10 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedNicheDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,149 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedNicheDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.init_num_niches = 6 + self.alpha = 0.5 + self.beta = 0.5 + self.local_search_prob = 0.1 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py b/nevergrad/optimization/lama/AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py new file mode 100644 index 000000000..7be12d050 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py @@ -0,0 +1,116 @@ +import numpy as np + + +class AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + de_sf_min=0.5, + de_sf_max=1.0, + de_sf_decay=0.99, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr + self.par = par + self.bw = bw + self.bw_min = bw_min + self.bw_decay = bw_decay + self.bw_range = bw_range + self.de_sf_min = de_sf_min + self.de_sf_max = de_sf_max + self.de_sf_decay = de_sf_decay + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func, bandwidth): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += bandwidth * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return max(self.bw_range / (1 + iteration), self.bw_min) + + def adapt_de_scale_factor(self): + return max(self.de_sf_min, self.de_sf_max * self.de_sf_decay) + + def differential_evolution(self, func, current_harmony, best_harmony, scale_factor): + mutant_harmony = current_harmony + scale_factor * (best_harmony - current_harmony) + return np.clip(mutant_harmony, func.bounds.lb, func.bounds.ub) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = self.adjust_bandwidth(i) + + new_harmony = self.harmony_search(func, self.bw) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + best_harmony = self.harmony_memory[np.argmin(self.harmony_memory_fitness)] + scale_factor = self.adapt_de_scale_factor() + trial_harmony = self.differential_evolution(func, new_harmony, best_harmony, scale_factor) + trial_fitness = func(trial_harmony) + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_harmony + + idx_worst_trial = np.argmax(self.harmony_memory_fitness) + if trial_fitness < self.harmony_memory_fitness[idx_worst_trial]: + self.harmony_memory[idx_worst_trial] = trial_harmony + self.harmony_memory_fitness[idx_worst_trial] = trial_fitness + + self.bw = self.bw * self.bw_decay + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedOptimalHybridDifferentialAnnealingOptimizer.py b/nevergrad/optimization/lama/AdvancedOptimalHybridDifferentialAnnealingOptimizer.py new file mode 100644 index 000000000..4ac9422be --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedOptimalHybridDifferentialAnnealingOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class AdvancedOptimalHybridDifferentialAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The problem dimensionality is fixed at 5 as per the description + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Fine-tuned temperature parameters for simulated annealing + T = 1.0 + T_min = 0.001 # Lower minimum temperature for precise late-stage optimization + alpha = 0.95 # Cooling rate, slower to allow deeper exploration + + # Parameters for differential evolution + F = 0.8 # Mutation factor adjusted for more aggressive mutations + CR = 0.85 # Crossover probability to ensure good mixing of attributes + + # Population size adjusted for a balanced exploration-exploitation trade-off + population_size = 75 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + # Selecting indices for mutation + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor based on temperature to adjust aggressiveness + dynamic_F = F * (1 - 0.1 * np.tanh(T)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Acceptance based on simulated annealing principle with a temperature-dependent probability + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling rate based on progress to ensure adequate time for local exploitation + adaptive_cooling = alpha ** (1 - 0.3 * (evaluation_count / self.budget)) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/AdvancedParallelDifferentialEvolution.py b/nevergrad/optimization/lama/AdvancedParallelDifferentialEvolution.py new file mode 100644 index 000000000..0558034f5 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedParallelDifferentialEvolution.py @@ -0,0 +1,53 @@ +import numpy as np + + +class AdvancedParallelDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, F=0.8, CR=0.9, strategy="rand/1/bin"): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight + self.CR = CR # Crossover probability + self.strategy = strategy # Mutation and crossover strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation: different strategies can be implemented + if self.strategy == "best/1/bin": + best_idx = np.argmin(fitness) + base = population[best_idx] + elif self.strategy == "rand/1/bin": + idxs = [idx for idx in range(self.population_size) if idx != i] + base = population[np.random.choice(idxs)] + else: + raise ValueError("Unsupported strategy") + + # DE/rand/1/bin: mutation and crossover + idxs = np.delete(np.arange(self.population_size), i) + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + self.F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + if evaluations >= self.budget: + break + + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/AdvancedPrecisionEvolver.py b/nevergrad/optimization/lama/AdvancedPrecisionEvolver.py new file mode 100644 index 000000000..20e527fab --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedPrecisionEvolver.py @@ -0,0 +1,75 @@ +import numpy as np + + +class AdvancedPrecisionEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.1, + mutation_factor=0.8, + crossover_probability=0.7, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_factor = mutation_factor + self.crossover_probability = crossover_probability + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual, global_best): + mutation_strength = np.abs(global_best - individual) * self.mutation_factor + mutation = np.random.normal(0, mutation_strength) + new_individual = np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return new_individual + + def crossover(self, parent1, parent2): + child = np.where(np.random.rand(self.dimension) < self.crossover_probability, parent1, parent2) + return child + + def reproduce(self, elites, elite_fitness, global_best): + new_population = np.empty((self.population_size, self.dimension)) + for i in range(self.population_size): + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.crossover(elites[parents[0]], elites[parents[1]]) + child = self.mutate(child, global_best) + new_population[i] = child + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + global_best = elites[np.argmin(elite_fitness)] + population = self.reproduce(elites, elite_fitness, global_best) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedPrecisionGuidedStrategy.py b/nevergrad/optimization/lama/AdvancedPrecisionGuidedStrategy.py new file mode 100644 index 000000000..644093e24 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedPrecisionGuidedStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class AdvancedPrecisionGuidedStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + population_size = 100 + elite_size = int(0.1 * population_size) + mutation_rate = 0.6 + mutation_scale = lambda t: 0.5 * np.exp(-0.0005 * t) # Decaying mutation scale + crossover_rate = 0.85 + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + + # Select elites to carry over to next generation + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + # Generate the rest of the new population + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + new_population.append(child) + + new_population = np.vstack((new_population)) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + # Combine new population with elites + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedQuantumCognitionTrajectoryOptimizerV29.py b/nevergrad/optimization/lama/AdvancedQuantumCognitionTrajectoryOptimizerV29.py new file mode 100644 index 000000000..611193fb5 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumCognitionTrajectoryOptimizerV29.py @@ -0,0 +1,85 @@ +import numpy as np + + +class AdvancedQuantumCognitionTrajectoryOptimizerV29: + def __init__( + self, + budget=10000, + population_size=500, + inertia_weight=0.95, + cognitive_coeff=2.0, + social_coeff=2.0, + inertia_decay=0.99, + quantum_jump_rate=0.5, + quantum_scale=0.35, + quantum_decay=0.95, + mutation_rate=0.02, + mutation_scale=0.05, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_individual_positions = particles.copy() + best_individual_scores = np.array([func(p) for p in particles]) + global_best_position = best_individual_positions[np.argmin(best_individual_scores)] + global_best_score = min(best_individual_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Perform a quantum jump for global exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best_position + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_individual_positions[i] - particles[i]) + + self.social_coeff * r2 * (global_best_position - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation for enhanced local exploration + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_individual_scores[i]: + best_individual_positions[i] = candidate_position + best_individual_scores[i] = score + + if score < global_best_score: + global_best_position = candidate_position + global_best_score = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/AdvancedQuantumControlledDiversityStrategy.py b/nevergrad/optimization/lama/AdvancedQuantumControlledDiversityStrategy.py new file mode 100644 index 000000000..7d0016a28 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumControlledDiversityStrategy.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdvancedQuantumControlledDiversityStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_ratio=0.05, + mutation_scale_base=1.0, + mutation_decay=0.005, + crossover_rate=0.9, + quantum_intensity=0.98, + quantum_fluctuation=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_scale_base = mutation_scale_base + self.mutation_decay = mutation_decay + self.crossover_rate = crossover_rate + self.quantum_intensity = quantum_intensity + self.quantum_fluctuation = quantum_fluctuation + + def __call__(self, func): + # Initialize population randomly within the search space + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + evaluations = self.population_size + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + mutation_scale = self.adaptive_mutation_scale(evaluations) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent1, parent2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[parent1], population[parent2]) + else: + offspring = population[np.random.choice(elite_indices)] + + if np.random.random() < self.quantum_intensity: + offspring = self.quantum_state_update(offspring, best_individual) + + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) + + new_population[i] = offspring + + # Evaluate new population + fitness = np.array([func(x) for x in new_population]) + evaluations += self.population_size + + # Update best individual if a better one is found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = new_population[current_best_idx] + + population = new_population + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, self.quantum_fluctuation, self.dimension) + return individual + perturbation * (best_individual - individual) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_scale_base * np.exp(-self.mutation_decay * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/AdvancedQuantumCrossoverOptimizer.py b/nevergrad/optimization/lama/AdvancedQuantumCrossoverOptimizer.py new file mode 100644 index 000000000..843cb2924 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumCrossoverOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class AdvancedQuantumCrossoverOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial setup + current_budget = 0 + population_size = 100 # Increased population for broader initial sampling + mutation_factor = 0.5 # Reduced mutation factor for stability + crossover_prob = 0.5 # Moderated crossover probability for maintaining diversity + elite_factor = 0.1 # Fraction of population considered elite + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + elite_size = int(population_size * elite_factor) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + # Generate new population with quantum-inspired crossover + for i in range(population_size): + if current_budget >= self.budget: + break + + # Selection from elite pool for quantum crossover + parent1 = elite_population[np.random.randint(0, elite_size)] + parent2 = elite_population[np.random.randint(0, elite_size)] + child = np.where(np.random.rand(self.dim) < crossover_prob, parent1, parent2) + + # Mutation inspired by quantum tunneling effect + quantum_mutation = mutation_factor * np.random.randn(self.dim) + child += quantum_mutation + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + # Maintain the best solution found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Dynamic adaptation of mutation factor and crossover probability + mutation_factor *= 0.99 # Gradual decrease to assure convergence + crossover_prob *= 1.01 # Incremental increase to keep exploring new areas + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py b/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py new file mode 100644 index 000000000..71f202ec6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def adaptive_restart(self, particles, fitness, personal_bests, personal_best_fits, func): + best_idx = np.argmin(personal_best_fits) + best_particle = personal_bests[best_idx] + best_fit = personal_best_fits[best_idx] + + if np.std(personal_best_fits) < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = best_particle + global_best_fit = best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + # Refinement step for elite particles + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedQuantumGradientDescent.py b/nevergrad/optimization/lama/AdvancedQuantumGradientDescent.py new file mode 100644 index 000000000..23d709d4b --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumGradientDescent.py @@ -0,0 +1,70 @@ +import numpy as np + + +class AdvancedQuantumGradientDescent: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 50 + self.elite_size = 10 + self.mutation_scale = 0.1 + self.quantum_probability = 0.15 + self.gradient_steps = 5 + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def mutate(self, individual): + if np.random.rand() < self.quantum_probability: + # Quantum mutation + mutation = np.random.normal(0, self.mutation_scale, self.dim) + else: + # Standard mutation + mutation = np.random.normal(0, self.mutation_scale / 10, self.dim) + new_individual = np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return new_individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(func, population) + best_idx = np.argmin(fitness) + best_score = fitness[best_idx] + best_solution = population[best_idx].copy() + + evaluations = self.population_size + + while evaluations < self.budget: + elite_population, elite_fitness = self.select_elite(population, fitness) + new_population = np.zeros_like(population) + + # Crossover and mutation: + for i in range(self.population_size): + if i < self.elite_size: + new_population[i] = elite_population[i] + else: + parent = elite_population[np.random.randint(self.elite_size)] + new_population[i] = self.mutate(parent) + + # Evaluate new population + new_fitness = self.evaluate(func, new_population) + + # Update best solution + min_idx = np.argmin(new_fitness) + if new_fitness[min_idx] < best_score: + best_score = new_fitness[min_idx] + best_solution = new_population[min_idx].copy() + + population = new_population + fitness = new_fitness + evaluations += self.population_size + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/AdvancedQuantumGradientExplorationOptimization.py b/nevergrad/optimization/lama/AdvancedQuantumGradientExplorationOptimization.py new file mode 100644 index 000000000..ad5bc5dc6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumGradientExplorationOptimization.py @@ -0,0 +1,201 @@ +import numpy as np + + +class AdvancedQuantumGradientExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 + c2 = 2.0 + w_max = 0.9 + w_min = 0.4 + + # Learning rate adaptation parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Differential Evolution parameters + F = 0.5 + CR = 0.9 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + w = w_max - (w_max - w_min) * (i / self.budget) # Adaptive inertia weight + T = 1 - (i / self.budget) # Temperature for Simulated Annealing + + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Simulated Annealing step + random_step = np.random.uniform(-T, T, self.dim) + new_position = x + random_step + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < f or np.exp((f - new_f) / T) > np.random.rand(): + positions[idx] = new_position + f = new_f + + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = new_position.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = new_position.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_position.copy() + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.9 + + prev_f = f + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = AdvancedQuantumGradientExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/AdvancedQuantumHarmonicFeedbackOptimizer.py b/nevergrad/optimization/lama/AdvancedQuantumHarmonicFeedbackOptimizer.py new file mode 100644 index 000000000..799ff0f68 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumHarmonicFeedbackOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedQuantumHarmonicFeedbackOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=250, + elite_rate=0.20, + resonance_factor=0.08, + mutation_scale=0.03, + harmonic_frequency=0.25, + feedback_intensity=0.15, + damping_factor=0.95, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.prev_best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.prev_best_fitness = self.best_fitness + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and perform a selective reproduction process + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Apply harmonic and quantum techniques with feedback and damping + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + + if self.best_fitness >= self.prev_best_fitness: + feedback_adjustment = self.feedback_intensity * np.random.uniform(-1, 1, self.dim) + else: + feedback_adjustment = 0 + + # Apply a damping factor to gradually reduce the scale of random perturbations + self.population[idx] = ( + elite_sample + + (harmonic_influence + quantum_resonance + mutation_effect + feedback_adjustment) + * self.damping_factor + ) + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/AdvancedQuantumInfusedAdaptiveStrategyV3.py b/nevergrad/optimization/lama/AdvancedQuantumInfusedAdaptiveStrategyV3.py new file mode 100644 index 000000000..073d39e68 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumInfusedAdaptiveStrategyV3.py @@ -0,0 +1,84 @@ +import numpy as np + + +class AdvancedQuantumInfusedAdaptiveStrategyV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 200 + elite_size = 20 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.7 + quantum_probability = 0.15 + adaptive_rate = 0.03 + learning_period = 25 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum mutation incorporated with gradient-inspired perturbations + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal( + loc=0, scale=1 / np.sqrt(fitness[i] + 1), size=self.dim + ) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Evolve population using differential evolution with mutation and crossover + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adjust strategy parameters based on recent performance + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-4: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.3) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedQuantumMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/AdvancedQuantumMemeticDifferentialEvolution.py new file mode 100644 index 000000000..009df4126 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumMemeticDifferentialEvolution.py @@ -0,0 +1,174 @@ +import numpy as np + + +class AdvancedQuantumMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.5 + self.local_search_iters = 5 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def mutate_quantum(self, current, best, F): + return np.clip(current + F * np.tanh(best - current), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + if np.random.rand() < 0.5: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + else: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + mutant = self.mutate_quantum(population[i], global_best_position, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedQuantumStateCrossoverOptimization.py b/nevergrad/optimization/lama/AdvancedQuantumStateCrossoverOptimization.py new file mode 100644 index 000000000..7c421df6c --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumStateCrossoverOptimization.py @@ -0,0 +1,86 @@ +import numpy as np + + +class AdvancedQuantumStateCrossoverOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.2, + mutation_intensity=0.05, + crossover_rate=0.9, + quantum_prob=0.15, + gamma=0.5, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1) + + # Quantum-inspired updates + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual): + return individual + np.random.normal(0, self.mutation_intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Quantum inspired state update to allow exploration around the best solution found with more control""" + perturbation = np.random.uniform(-1, 1, self.dimension) * self.gamma * (best_individual - individual) + return individual + perturbation diff --git a/nevergrad/optimization/lama/AdvancedQuantumSwarmOptimization.py b/nevergrad/optimization/lama/AdvancedQuantumSwarmOptimization.py new file mode 100644 index 000000000..e32aa9c7a --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumSwarmOptimization.py @@ -0,0 +1,89 @@ +import numpy as np + + +class AdvancedQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.05, 0.3) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/AdvancedQuantumVelocityOptimizer.py b/nevergrad/optimization/lama/AdvancedQuantumVelocityOptimizer.py new file mode 100644 index 000000000..6094e5164 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedQuantumVelocityOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedQuantumVelocityOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 30 # Further optimized population size + inertia_weight = 0.75 # Further reduction in inertia for better dynamic adaptation + cognitive_coefficient = 2.5 # Increased cognitive learning for finer individual adaptation + social_coefficient = 2.5 # Increased social learning for stronger group influence + velocity_limit = 0.1 # Further reduction in velocity limit for finer control + quantum_momentum = 0.01 # Further reduced momentum for very subtle quantum jumps + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + inertia_decay = np.power( + (1 - (current_budget / self.budget)), 3 + ) # Stronger exponential decay for inertia + w = inertia_weight * inertia_decay + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Adaptive quantum jump + quantum_probability = 0.05 * np.exp( + -10 * (current_budget / self.budget) + ) # Smaller probability of quantum jump + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # PSO velocity updates with clamping + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Personal and global best updates + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/AdvancedRAMEDSv6.py b/nevergrad/optimization/lama/AdvancedRAMEDSv6.py new file mode 100644 index 000000000..bfd2b06c1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRAMEDSv6.py @@ -0,0 +1,98 @@ +import numpy as np + + +class AdvancedRAMEDSv6: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.92, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite structures + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + performance_switch_threshold = 0.1 + use_random_mutation = False + last_improvement = 0 + + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic modulation + F = self.F_min + (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if use_random_mutation or np.random.rand() < performance_switch_threshold: + mutant = np.clip(a + F * (b - c), self.lb, self.ub) + else: + best_or_elite = ( + best_solution + if np.random.rand() < 0.7 + else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip( + population[i] + F * (best_or_elite - population[i] + a - b), self.lb, self.ub + ) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + if ( + evaluations - last_improvement > self.population_size * 2 + ): # Switch strategy if stagnant + use_random_mutation = not use_random_mutation + last_improvement = evaluations + + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedRefinedAdaptiveMemoryEnhancedSearch.py b/nevergrad/optimization/lama/AdvancedRefinedAdaptiveMemoryEnhancedSearch.py new file mode 100644 index 000000000..1633c036d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedAdaptiveMemoryEnhancedSearch.py @@ -0,0 +1,92 @@ +import numpy as np + + +class AdvancedRefinedAdaptiveMemoryEnhancedSearch: + def __init__( + self, + budget, + population_size=50, + crossover_base=0.7, + F_min=0.5, + F_max=1, + memory_size=50, + elite_size=10, + aging_factor=0.9, + ): + self.budget = budget + self.population_size = population_size + self.crossover_base = crossover_base # Base rate for crossover, adaptively adjusted + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.aging_factor = aging_factor # Controls how fast memory ages + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite structures + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track the best solution and its fitness + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Dynamic mutation factor incorporating feedback from memory + F = self.F_max - (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + + # Select mutation strategy: DE/current-to-best/1 or DE/rand/1 based on fitness + if np.random.rand() < 0.5: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(population[i] + F * (best_solution - population[i] + a - b), lb, ub) + else: + a, b = population[np.random.choice(self.population_size, 2, replace=False)] + mutant = np.clip(a + F * (b - a), lb, ub) + + # Adaptive crossover rate based on individual performance + crossover_rate = self.crossover_base + (1 - self.crossover_base) * ( + fitness[i] / np.max(fitness) + ) + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory aging and update + aged_fitness = memory_fitness * self.aging_factor + worst_idx = np.argmax(aged_fitness) + if aged_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py b/nevergrad/optimization/lama/AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py new file mode 100644 index 000000000..81e2fc92d --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py @@ -0,0 +1,144 @@ +import numpy as np + + +class AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.95 # Cooling rate for initial phase + beta_initial = 2.0 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 5 + phase2 = 2 * self.budget // 5 + phase3 = 3 * self.budget // 5 + phase4 = 4 * self.budget // 5 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 3.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 2.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 2.0 + alpha = 0.94 + elif evaluations < phase4: + beta = 1.5 + alpha = 0.92 + else: + beta = 1.0 + alpha = 0.90 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 7) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py b/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py new file mode 100644 index 000000000..e988aec39 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py @@ -0,0 +1,203 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + adaptive_memory=True, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + self.adaptive_memory = adaptive_memory + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..1cc4d77b6 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,196 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=100, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.memory = [] + + def local_search(self, x, func, budget): + # Use L-BFGS-B for local search + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.2: # 20% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Reinforce diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedAnnealing.py b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedAnnealing.py new file mode 100644 index 000000000..833098fa4 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedAnnealing.py @@ -0,0 +1,150 @@ +import numpy as np + + +class AdvancedRefinedGradientBoostedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + # Generate candidate solution + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + # Perform Metropolis acceptance criterion + if f_candidate < f_current or np.exp((f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Adaptive cooling schedule + if evaluations < self.budget // 4: + alpha = 0.98 # Phase 1 + elif evaluations < self.budget // 2: + alpha = 0.97 # Phase 2 + elif evaluations < 3 * self.budget // 4: + alpha = 0.96 # Phase 3 + else: + alpha = 0.95 # Phase 4 + T *= alpha + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Enhanced memory-based exploration with diversity encouragement + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + x_candidate = memory[np.random.randint(memory_size)] + np.random.uniform( + -0.5, 0.5, self.dim + ) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..5d145e7a1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemoryAnnealing.py @@ -0,0 +1,158 @@ +import numpy as np + + +class AdvancedRefinedGradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Further increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Additional: Boosted Diversification Phase + if evaluations % (self.budget // 8) == 0: + for _ in range(memory_size): + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..164cc67cf --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class AdvancedRefinedGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Periodic intensive localized search for memory refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/AdvancedRefinedHybridEvolutionaryAnnealingOptimizer.py b/nevergrad/optimization/lama/AdvancedRefinedHybridEvolutionaryAnnealingOptimizer.py new file mode 100644 index 000000000..4be29c686 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedHybridEvolutionaryAnnealingOptimizer.py @@ -0,0 +1,53 @@ +import numpy as np + + +class AdvancedRefinedHybridEvolutionaryAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Adjusted initial temperature and more gradual cooling + T = 2.0 # Start with a higher initial temperature for wider early exploration + T_min = 0.0005 # Further decreased minimum temperature for extended fine-tuning + alpha = 0.95 # More gradual cooling to enhance thorough exploration + + # Mutation and recombination parameters fine-tuned + F = 0.8 # Increased mutation factor + CR = 0.85 # Adjusted crossover probability + + # Population size slightly increased + population_size = 70 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Iteration with enhanced mutation strategy and dynamic components adjustment + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptive mutation factor influenced by both temperature and performance + dynamic_F = F * (1 + 0.15 * np.log(1 + T) * (f_opt / fitness[i])) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Dynamic adjustment of cooling based on both temperature and progress + adaptive_cooling = alpha - 0.02 * (T / T_min) * (evaluation_count / self.budget) + T = max(T * adaptive_cooling, T_min) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51.py b/nevergrad/optimization/lama/AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51.py new file mode 100644 index 000000000..324b22a87 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51.py @@ -0,0 +1,61 @@ +import numpy as np + + +class AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters with refined tuning + T = 1.2 # Slightly increased starting temperature for enhanced exploration + T_min = 0.0003 # Lower minimal temperature for deeper late-stage exploration + alpha = 0.90 # Adjusted slower cooling rate to extend effective search duration + + # Mutation and crossover parameters finely-tuned for this problem set + F = 0.78 # Slightly increased mutation factor + CR = 0.85 # Adjusted crossover probability to maintain diversity while fostering convergence + + population_size = 90 # Increased population size to improve diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with sigmoid adaptation for mutation factor + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptive F with further refined control via sigmoid function + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.65 + 0.35 * np.tanh(4 * (evaluation_count / self.budget - 0.55))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Further refined acceptance criteria incorporating a temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1.05 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy incorporating a sinusoidal modulation + adaptive_cooling = alpha - 0.01 * np.sin(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/AdvancedRefinedRAMEDSPro.py b/nevergrad/optimization/lama/AdvancedRefinedRAMEDSPro.py new file mode 100644 index 000000000..28520126e --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedRAMEDSPro.py @@ -0,0 +1,82 @@ +import numpy as np + + +class AdvancedRefinedRAMEDSPro: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=1.0, + memory_size=20, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate_initial = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory with best initial individuals + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update crossover rate + crossover_rate = self.crossover_rate_initial * (1 - evaluations / self.budget) + 0.1 + + for i in range(self.population_size): + # Adaptive mutation factor with feedback modulation + F = self.F_max - (self.F_max - self.F_min) * (evaluations / self.budget) + + # Mutation: Hybrid strategy using best, random, and worst + indices = np.random.choice(self.population_size, 3, replace=False) + r1, r2, r3 = population[indices] + random_worst_idx = np.argmax(fitness) + random_best_or_worst = ( + best_solution if np.random.rand() < 0.5 else population[random_worst_idx] + ) + mutant = np.clip(r1 + F * (random_best_or_worst - r2 + r3 - population[i]), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Update memory if necessary + worst_memory_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_memory_idx]: + memory[worst_memory_idx] = population[i].copy() + memory_fitness[worst_memory_idx] = fitness[i] + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/AdvancedRefinedSpiralSearchOptimizer.py b/nevergrad/optimization/lama/AdvancedRefinedSpiralSearchOptimizer.py new file mode 100644 index 000000000..78999276b --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedSpiralSearchOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class AdvancedRefinedSpiralSearchOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial point in the center of the search space + initial_point = np.random.uniform(-5.0, 5.0, self.dim) + current_point = initial_point + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Parameters for spiral movement + radius = 5.0 # Maximum extent of the search space + angle_increment = np.pi / 12 # Reduced angle increment for finer detail + radius_decrement_factor = 0.95 # Slower radius reduction for thorough exploration + spiral_budget = self.budget + + # Adaptive refinement based on previous results + adaptive_radius_change = 0.1 # Gradual increase in radius adjustment + min_radius = 0.1 # Minimum radius to prevent infinitesimal spirals + + while spiral_budget > 0: + num_points = int(2 * np.pi / angle_increment) + for i in range(num_points): + if spiral_budget <= 0: + break + + angle = i * angle_increment + candidate_point = current_point.copy() + radius *= 1.0 + adaptive_radius_change # Dynamically increase radius + radius = max(radius, min_radius) # Maintain a minimum radius + + for dim in range(self.dim): # Create a more complex spiral + dx = radius * np.cos(angle + 2 * np.pi * dim / self.dim) + dy = radius * np.sin(angle + 2 * np.pi * dim / self.dim) + candidate_point[dim % self.dim] += dx + candidate_point[(dim + 1) % self.dim] += dy + + candidate_point = np.clip( + candidate_point, -5.0, 5.0 + ) # Ensure the candidate is within bounds + candidate_f = func(candidate_point) + spiral_budget -= 1 + + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate_point + current_point = candidate_point # Move spiral center to new best location + + # Reduce the radius for the next spiral cycle + radius *= radius_decrement_factor + angle_increment *= 0.99 # Gradually refine the angle increment for more precise spiral turns + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AdvancedRefinedUltraEvolutionaryGradientOptimizerV29.py b/nevergrad/optimization/lama/AdvancedRefinedUltraEvolutionaryGradientOptimizerV29.py new file mode 100644 index 000000000..16b7afaf9 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedRefinedUltraEvolutionaryGradientOptimizerV29.py @@ -0,0 +1,79 @@ +import numpy as np + + +class AdvancedRefinedUltraEvolutionaryGradientOptimizerV29: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.56, + F_range=0.44, + CR=0.96, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.82: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v2.py b/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v2.py new file mode 100644 index 000000000..e11f2245c --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v2.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedSelfAdaptiveDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Self-adaptive mutation factor and crossover probability + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.crossover_prob + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + unique_archive = np.unique(self.archive, axis=0) + if len(unique_archive) > self.pop_size: + self.archive = unique_archive[-self.pop_size :].tolist() + else: + self.archive = unique_archive.tolist() + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev # Account for the function evaluations + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + gradient = np.gradient([func(best_x + 1e-4 * np.eye(self.dim)[i]) for i in range(self.dim)]) + new_x = best_x - 0.01 * gradient + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x diff --git a/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v3.py b/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v3.py new file mode 100644 index 000000000..35b886e99 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedSelfAdaptiveDE_v3.py @@ -0,0 +1,139 @@ +import numpy as np +from scipy.optimize import minimize + + +class AdvancedSelfAdaptiveDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + unique_archive = np.unique(self.archive + new_pop, axis=0) + if len(unique_archive) > self.pop_size: + self.archive = unique_archive[-self.pop_size :].tolist() + else: + self.archive = unique_archive.tolist() + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + gradient = np.gradient([func(best_x + 1e-4 * np.eye(self.dim)[i]) for i in range(self.dim)]) + new_x = best_x - 0.01 * gradient + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x diff --git a/nevergrad/optimization/lama/AdvancedSpatialAdaptiveConvergenceOptimizer.py b/nevergrad/optimization/lama/AdvancedSpatialAdaptiveConvergenceOptimizer.py new file mode 100644 index 000000000..f0df987cf --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedSpatialAdaptiveConvergenceOptimizer.py @@ -0,0 +1,81 @@ +import numpy as np + + +class AdvancedSpatialAdaptiveConvergenceOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=120, + initial_step_size=1.5, + step_decay=0.95, + elite_ratio=0.05, + mutation_intensity=0.05, + local_search_prob=0.3, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, individual): + tweaks = np.random.normal(0, self.step_size * 0.02, self.dimension) # More refined local search + return np.clip(individual + tweaks, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: + for idx in range(self.population_size): + candidate = self.local_search(new_population[idx]) + candidate_fitness = func(candidate) + if candidate_fitness < new_fitness[idx]: + new_population[idx] = candidate + new_fitness[idx] = candidate_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedSpatialGradientOptimizer.py b/nevergrad/optimization/lama/AdvancedSpatialGradientOptimizer.py new file mode 100644 index 000000000..cf2efaea3 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedSpatialGradientOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class AdvancedSpatialGradientOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=150, + initial_step_size=3.0, + step_decay=0.95, + elite_ratio=0.2, + mutation_intensity=0.12, + local_search_prob=0.4, + refinement_steps=10, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, self.step_size * 0.01, self.dimension), + self.bounds[0], + self.bounds[1], + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: # Conduct local search on some individuals + for idx in range(self.population_size): + local_individual, local_fitness = self.local_search(func, new_population[idx]) + evaluations += self.refinement_steps # Account for the evaluations used in local search + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/AdvancedStrategicHybridDE.py b/nevergrad/optimization/lama/AdvancedStrategicHybridDE.py new file mode 100644 index 000000000..8964420f1 --- /dev/null +++ b/nevergrad/optimization/lama/AdvancedStrategicHybridDE.py @@ -0,0 +1,80 @@ +import numpy as np + + +class AdvancedStrategicHybridDE: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.6, + F_range=0.4, + CR=0.95, + hybridization_factor=0.25, + elite_strategy=True, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.hybridization_factor = hybridization_factor # Factor for hybrid mutation strategy + self.elite_strategy = elite_strategy # Use elite strategy to focus on top performers + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + if self.elite_strategy and np.random.rand() < 0.1: + # Use one of the top 10% performers as the base + elite_candidates = np.argsort(fitness)[: max(1, self.population_size // 10)] + base = population[np.random.choice(elite_candidates)] + elif np.random.rand() < self.hybridization_factor: + # Occasionally use the best individual for mutation base + base = best_individual + else: + # Regular selection excluding self + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using differential evolution strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using binomial method + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ArchiveEnhancedAdaptiveDE.py b/nevergrad/optimization/lama/ArchiveEnhancedAdaptiveDE.py new file mode 100644 index 000000000..99b10f509 --- /dev/null +++ b/nevergrad/optimization/lama/ArchiveEnhancedAdaptiveDE.py @@ -0,0 +1,146 @@ +import numpy as np + + +class ArchiveEnhancedAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + archive_size = 10 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def archive_management(population, archive): + combined = np.vstack((population, archive)) + if len(combined) > archive_size: + archive = combined[np.argsort(np.var(combined, axis=0))[-archive_size:]] + return archive + + def local_search(best_ind, step_size=0.1): + neighborhood = np.clip(best_ind + step_size * np.random.randn(10, self.dim), bounds[0], bounds[1]) + best_local = best_ind + f_best_local = func(best_ind) + for neighbor in neighborhood: + f_neighbor = func(neighbor) + if f_neighbor < f_best_local: + best_local = neighbor + f_best_local = f_neighbor + return best_local, f_best_local + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + archive = np.empty((0, self.dim)) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(np.vstack((population, archive)), i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Local search on the best solution + best_ind = new_population[np.argmin(new_fitness)] + best_local, f_best_local = local_search(best_ind) + evaluations += 10 # Assuming 10 local search evaluations + + if f_best_local < self.f_opt: + self.f_opt = f_best_local + self.x_opt = best_local + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Archive management + archive = archive_management(population, archive) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/AttenuatedAdaptiveEvolver.py b/nevergrad/optimization/lama/AttenuatedAdaptiveEvolver.py new file mode 100644 index 000000000..5d21ccbbf --- /dev/null +++ b/nevergrad/optimization/lama/AttenuatedAdaptiveEvolver.py @@ -0,0 +1,68 @@ +import numpy as np + + +class AttenuatedAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + initial_step_size=0.5, + step_decay=0.98, + elite_ratio=0.2, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * (self.step_decay**generation) # Geometric decay of step size + + new_population = np.array([self.mutate(ind, scale) for ind in population]) + new_fitness = self.evaluate_population(func, new_population) + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + indices = np.argsort(combined_fitness) + population = combined_population[indices[: self.population_size]] + fitness = combined_fitness[indices[: self.population_size]] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/BalancedAdaptiveMemeticDE.py b/nevergrad/optimization/lama/BalancedAdaptiveMemeticDE.py new file mode 100644 index 000000000..684997552 --- /dev/null +++ b/nevergrad/optimization/lama/BalancedAdaptiveMemeticDE.py @@ -0,0 +1,94 @@ +import numpy as np + + +class BalancedAdaptiveMemeticDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5, step_size=0.01): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Periodically re-initialize worst individuals to enhance exploration + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/BalancedCulturalDifferentialEvolution.py b/nevergrad/optimization/lama/BalancedCulturalDifferentialEvolution.py new file mode 100644 index 000000000..4cd4dbe79 --- /dev/null +++ b/nevergrad/optimization/lama/BalancedCulturalDifferentialEvolution.py @@ -0,0 +1,127 @@ +import numpy as np + + +class BalancedCulturalDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.05 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.15: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.3 + ( + 0.2 * fitness_std / (np.mean(fitness) + 1e-9) + ) # Adjusted influence factors + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/BalancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/BalancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..63d13282c --- /dev/null +++ b/nevergrad/optimization/lama/BalancedDualStrategyAdaptiveDE.py @@ -0,0 +1,129 @@ +import numpy as np + + +class BalancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.9 + self.elitism_rate = 0.3 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage with balanced influence + trial = trial + 0.5 * np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.05 * ( + np.random.rand(self.dim) - 0.5 + ) # Slightly larger perturbation for balanced exploration + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/BalancedDynamicQuantumLevySwarm.py b/nevergrad/optimization/lama/BalancedDynamicQuantumLevySwarm.py new file mode 100644 index 000000000..bd5b93911 --- /dev/null +++ b/nevergrad/optimization/lama/BalancedDynamicQuantumLevySwarm.py @@ -0,0 +1,145 @@ +import numpy as np + + +class BalancedDynamicQuantumLevySwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 60 # Reduced population size for better focus + inertia_weight_max = 0.7 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.5 # Adjusted to standard PSO cognition + social_coefficient = 1.5 # Adjusted to standard PSO social influence + differential_weight = 0.8 # Adjusted for effective DE mutation + crossover_rate = 0.9 # Increased for more frequent recombination + quantum_factor = 0.05 # Adjusted for balanced exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Enhanced Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.3: # Increased local search probability for aggressive refinement + local_search_iters = 10 # Balanced for computational efficiency + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/BalancedQuantumLevyDifferentialSearch.py b/nevergrad/optimization/lama/BalancedQuantumLevyDifferentialSearch.py new file mode 100644 index 000000000..db6e2160a --- /dev/null +++ b/nevergrad/optimization/lama/BalancedQuantumLevyDifferentialSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class BalancedQuantumLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum and Levy Search + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/BalancedQuantumLevySwarmOptimization.py b/nevergrad/optimization/lama/BalancedQuantumLevySwarmOptimization.py new file mode 100644 index 000000000..d91567d70 --- /dev/null +++ b/nevergrad/optimization/lama/BalancedQuantumLevySwarmOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class BalancedQuantumLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 100 # Increased population size for better exploration + inertia_weight_max = 0.7 # Adjusted for a balance between exploration and exploitation + inertia_weight_min = 0.4 # Adjusted for a balance between exploration and exploitation + cognitive_coefficient = 1.4 # Slightly reduced for better performance + social_coefficient = 1.4 # Slightly reduced for better performance + differential_weight = 0.7 # Reduced for enhanced diversity + crossover_rate = 0.8 # Reduced to balance exploration and exploitation + quantum_factor = 0.07 # Increased to enhance exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.2: # Slightly increased local search probability + local_search_iters = 15 # Increased for better refinement + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/BayesianAdaptiveMemeticSearch.py b/nevergrad/optimization/lama/BayesianAdaptiveMemeticSearch.py new file mode 100644 index 000000000..87c441369 --- /dev/null +++ b/nevergrad/optimization/lama/BayesianAdaptiveMemeticSearch.py @@ -0,0 +1,126 @@ +import numpy as np +from scipy.stats import norm + + +class BayesianAdaptiveMemeticSearch: + def __init__( + self, + budget, + population_size=50, + memetic_rate=0.5, + alpha=0.1, + learning_rate=0.01, + elite_fraction=0.2, + mutation_factor=0.7, + crossover_prob=0.9, + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + self.mutation_factor = mutation_factor + self.crossover_prob = crossover_prob + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.normal(size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def ensemble_step(self, func, pop, scores, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def bayesian_selection(self, func, pop, scores): + mean = np.mean(scores) + std_dev = np.std(scores) + if std_dev == 0: + return pop[np.argmin(scores)] + z_scores = (scores - mean) / std_dev + prob_selection = norm.cdf(z_scores) + chosen_index = np.argmax(prob_selection) + return pop[chosen_index] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Perform hybrid step + pop, scores = self.ensemble_step(func, pop, scores, global_best_position) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + # Bayesian selection + global_best_position = self.bayesian_selection(func, pop, scores) + global_best_score = func(global_best_position) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CAMSQSOB.py b/nevergrad/optimization/lama/CAMSQSOB.py new file mode 100644 index 000000000..d805dab51 --- /dev/null +++ b/nevergrad/optimization/lama/CAMSQSOB.py @@ -0,0 +1,77 @@ +import numpy as np + + +class CAMSQSOB: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # Dimensionality of the BBOB test suite + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + last_direction = np.zeros(self.dimension) # Initialize the last_direction properly + + current_position = np.random.uniform(self.lower_bound, self.upper_bound, self.dimension) + current_fitness = func(current_position) + self.update_optimum(current_position, current_fitness) + + delta = 0.5 # Initial step size + alpha = 0.9 # More aggressive reduction factor for step size + beta = 0.5 # Momentum term for stabilizing the direction update + iteration = 1 + + while iteration < self.budget: + scale_changes = [delta * (0.5**i) for i in range(3)] + for scale in scale_changes: + if iteration >= self.budget: + break + points, fitnesses = self.generate_points(func, current_position, scale) + A, b = self.fit_quadratic(points, fitnesses) + + if np.linalg.cond(A) < 1e10: # Condition to check invertibility + step_direction = -np.linalg.solve(A, b) + direction = beta * last_direction + (1 - beta) * step_direction + new_position = np.clip(current_position + direction, self.lower_bound, self.upper_bound) + new_fitness = func(new_position) + self.update_optimum(new_position, new_fitness) + + # Opposition-based Learning + opposite_position = self.lower_bound + self.upper_bound - new_position + opposite_fitness = func(opposite_position) + self.update_optimum(opposite_position, opposite_fitness) + + if opposite_fitness < new_fitness: + new_position, new_fitness = opposite_position, opposite_fitness + + if new_fitness < current_fitness: + current_position, current_fitness = new_position, new_fitness + delta = min(delta / alpha, 1.0) # Adjust delta upon improvement + last_direction = direction + else: + delta *= alpha # Reduce delta upon failure + + iteration += 2 * self.dimension + 2 + + return self.f_opt, self.x_opt + + def generate_points(self, func, center, delta): + points = np.vstack( + [center + delta * np.eye(self.dimension)[i] for i in range(self.dimension)] + + [center - delta * np.eye(self.dimension)[i] for i in range(self.dimension)] + ) + fitnesses = np.array([func(point) for point in points]) + return points, fitnesses + + def update_optimum(self, x, f): + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + def fit_quadratic(self, points, fitnesses): + X = np.hstack([np.ones((len(fitnesses), 1)), points]) + coeffs = np.linalg.lstsq(X, fitnesses - fitnesses.min(), rcond=None)[0] + A = np.diag(coeffs[1:]) + b = coeffs[: self.dimension] + return A, b diff --git a/nevergrad/optimization/lama/CGES.py b/nevergrad/optimization/lama/CGES.py new file mode 100644 index 000000000..7ad6ed14a --- /dev/null +++ b/nevergrad/optimization/lama/CGES.py @@ -0,0 +1,63 @@ +import numpy as np + + +class CGES: + def __init__(self, budget, population_size=100, beta=0.15, mutation_strength=0.1, elitism=3): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.beta = beta # Gradient influence in update + self.mutation_strength = mutation_strength + self.elitism = elitism # Number of elites + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary cycle + while num_evals < self.budget: + # Sort individuals based on fitness + indices = np.argsort(fitness) + elites = population[indices[: self.elitism]] + + new_population = np.zeros_like(population) + new_population[: self.elitism] = elites # Preserve elites directly + + # Generate new candidates + for i in range(self.elitism, self.population_size): + # Select random elite as a base for new candidate + base_idx = np.random.choice(np.arange(self.elitism)) + base = population[indices[base_idx]] + + # Gradient direction towards best individual + direction = best_individual - base + + # Mutation: normal perturbation + mutation = np.random.normal(0, self.mutation_strength, self.dimension) + + # Create new individual + new_individual = base + self.beta * direction + mutation + new_individual = np.clip(new_individual, self.lb, self.ub) # Ensure bounds are respected + + new_population[i] = new_individual + + population = new_population + fitness = np.array([func(ind) for ind in population]) + num_evals += self.population_size - self.elitism + + # Update the best individual found so far + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[current_best_idx].copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/CMADifferentialEvolutionPSO.py b/nevergrad/optimization/lama/CMADifferentialEvolutionPSO.py new file mode 100644 index 000000000..a75fbdef7 --- /dev/null +++ b/nevergrad/optimization/lama/CMADifferentialEvolutionPSO.py @@ -0,0 +1,122 @@ +import numpy as np + + +class CMADifferentialEvolutionPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.min_pop_size = 20 + self.initial_F = 0.5 # Initial mutation factor + self.initial_CR = 0.9 # Initial crossover rate + self.c1 = 1.5 # Cognitive parameter + self.c2 = 1.5 # Social parameter + self.w = 0.5 # Inertia weight + self.restart_threshold = 100 # Stagnation threshold + self.sigma = 0.3 # Initial step size for CMA + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.initial_pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return parent1 + F * (parent2 - parent3) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def cma_update(self, population, mean, cov_matrix): + new_samples = np.random.multivariate_normal(mean, cov_matrix, size=population.shape[0]) + return np.clip(new_samples, -5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + no_improvement_counter = 0 + stagnation_monitor = [] + + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.initial_pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F = np.random.uniform(0.4, 0.9) # Adaptive mutation factor + CR = np.random.uniform(0.6, 1.0) # Adaptive crossover rate + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + stagnation_monitor.append(global_best_score) + + if no_improvement_counter >= self.restart_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + evaluations += self.initial_pop_size + + # CMA Update + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + population = self.cma_update(population, mean, cov_matrix) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CMDEALX.py b/nevergrad/optimization/lama/CMDEALX.py new file mode 100644 index 000000000..7b8ec7f67 --- /dev/null +++ b/nevergrad/optimization/lama/CMDEALX.py @@ -0,0 +1,74 @@ +import numpy as np + + +class CMDEALX: + def __init__( + self, budget, population_size=50, F_init=0.5, CR_init=0.9, local_search_factor=0.1, max_local_steps=20 + ): + self.budget = budget + self.CR_init = CR_init + self.F_init = F_init + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.local_search_factor = local_search_factor + self.max_local_steps = max_local_steps + + def __call__(self, func): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + F = self.F_init + CR = self.CR_init + evaluations = self.population_size + local_search_steps = 0 + + while evaluations < self.budget: + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d, e = np.random.choice(idxs, 5, replace=False) + + # Cross-mutative strategy + mutant = ( + population[a] + F * (population[b] - population[c]) + F * (population[d] - population[e]) + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Adaptive Local Exploration + if np.random.rand() < self.local_search_factor: + local_candidate = best_solution + np.random.normal(0, 0.1, self.dimension) + local_candidate = np.clip(local_candidate, self.lower_bound, self.upper_bound) + local_fitness = func(local_candidate) + evaluations += 1 + if local_fitness < best_fitness: + best_solution = local_candidate + best_fitness = local_fitness + local_search_steps += 1 + if local_search_steps > self.max_local_steps: + local_search_steps = 0 + self.local_search_factor /= 2 # Reduce the intensity if too many local searches + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ClusterAdaptiveQuantumLevyOptimizer.py b/nevergrad/optimization/lama/ClusterAdaptiveQuantumLevyOptimizer.py new file mode 100644 index 000000000..4b8e9ceea --- /dev/null +++ b/nevergrad/optimization/lama/ClusterAdaptiveQuantumLevyOptimizer.py @@ -0,0 +1,153 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class ClusterAdaptiveQuantumLevyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 + elite_size = 5 + cluster_count = 5 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + kmeans = KMeans(n_clusters=cluster_count) + clusters = kmeans.fit_predict(population) + cluster_centers = kmeans.cluster_centers_ + + for cluster_center in cluster_centers: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(cluster_center + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ClusterBasedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ClusterBasedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..fe3652d49 --- /dev/null +++ b/nevergrad/optimization/lama/ClusterBasedAdaptiveDifferentialEvolution.py @@ -0,0 +1,148 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class ClusterBasedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7, cluster_size=5): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.cluster_size = cluster_size + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + kmeans = KMeans(n_clusters=self.cluster_size, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for i in range(len(population)): + if np.linalg.norm(population[i] - cluster_centers[kmeans.labels_[i]]) < 1e-3: + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + success_count_history = [] + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + success_count_history.append(success_rate) + if len(success_count_history) > 10: + success_count_history.pop(0) + + avg_success_rate = np.mean(success_count_history) + + self.base_lr = adaptive_lr(avg_success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + self.mutation_factor = np.clip( + self.mutation_factor * (1.1 if avg_success_rate > 0.2 else 0.9), 0.4, 1.0 + ) + self.crossover_rate = np.clip( + self.crossover_rate * (1.05 if avg_success_rate > 0.2 else 0.95), 0.6, 1.0 + ) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ClusterBasedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ClusteredAdaptiveHybridPSODESimulatedAnnealing.py b/nevergrad/optimization/lama/ClusteredAdaptiveHybridPSODESimulatedAnnealing.py new file mode 100644 index 000000000..e9c45370c --- /dev/null +++ b/nevergrad/optimization/lama/ClusteredAdaptiveHybridPSODESimulatedAnnealing.py @@ -0,0 +1,128 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class ClusteredAdaptiveHybridPSODESimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.num_clusters = 5 + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def simulated_annealing(self, current_position, current_fitness, func, temp): + new_position = current_position + np.random.uniform(-0.1, 0.1, self.dim) + new_position = np.clip(new_position, self.lb, self.ub) + new_fitness = func(new_position) + if new_fitness < current_fitness or np.exp((current_fitness - new_fitness) / temp) > np.random.rand(): + return new_position, new_fitness + return current_position, current_fitness + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + temperature = self.adaptive_parameters(evaluations, self.budget, 1.0, 0.01) + + # Clustering + kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(population) + cluster_labels = kmeans.labels_ + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + cluster_id = cluster_labels[i] + cluster_center = cluster_centers[cluster_id] + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (cluster_center - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + global_best_position = trial_vector + global_best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + population[i], fitness[i] = self.simulated_annealing( + population[i], fitness[i], func, temperature + ) + evaluations += 1 + + if fitness[i] < self.f_opt: + self.f_opt = fitness[i] + self.x_opt = population[i] + global_best_position = population[i] + global_best_fitness = fitness[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ClusteredDifferentialEvolutionWithLocalSearch.py b/nevergrad/optimization/lama/ClusteredDifferentialEvolutionWithLocalSearch.py new file mode 100644 index 000000000..4b6791a71 --- /dev/null +++ b/nevergrad/optimization/lama/ClusteredDifferentialEvolutionWithLocalSearch.py @@ -0,0 +1,111 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.cluster import KMeans + + +class ClusteredDifferentialEvolutionWithLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem statement + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.num_clusters = 10 + self.F = 0.8 + self.CR = 0.9 + self.memory = [] + self.memory_size = 20 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _nelder_mead_local_search(self, x, func): + res = minimize(func, x, method="nelder-mead", options={"xtol": 1e-6, "disp": False}) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.random.uniform(0.4, 1.0) + self.CR = np.random.uniform(0.1, 1.0) + + def _cluster_search(self, population, func): + kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for center in cluster_centers: + if self.evaluations >= self.budget: + break + local_opt, f_local_opt = self._nelder_mead_local_search(center, func) + self.evaluations += 1 + if f_local_opt < self.f_opt: + self.f_opt = f_local_opt + self.x_opt = local_opt + + def _memory_local_search(self, func): + for mem in self.memory: + if self.evaluations >= self.budget: + break + local_opt, f_local_opt = self._nelder_mead_local_search(mem, func) + self.evaluations += 1 + if f_local_opt < self.f_opt: + self.f_opt = f_local_opt + self.x_opt = local_opt + + def _adaptive_restart(self, population, fitness, func): + mean_fitness = np.mean(fitness) + std_fitness = np.std(fitness) + if std_fitness < 1e-6 and self.evaluations < self.budget * 0.9: + new_pop_size = min(self.pop_size * 2, self.budget - self.evaluations) + new_population = np.random.uniform(self.lb, self.ub, (new_pop_size, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + self.evaluations += new_pop_size + return new_population, new_fitness + return population, fitness + + def _crossover(self, a, b, c): + rand_idx = np.random.randint(self.dim) + mutant_vector = np.copy(a) + for j in range(self.dim): + if np.random.rand() < self.CR or j == rand_idx: + mutant_vector[j] = a[j] + self.F * (b[j] - c[j]) + else: + mutant_vector[j] = a[j] + return np.clip(mutant_vector, self.lb, self.ub) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + self.evaluations = len(population) + + while self.evaluations < self.budget: + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + trial_vector = self._crossover(a, b, c) + f_candidate = func(trial_vector) + self.evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + + if self.evaluations < self.budget: + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + self._dynamic_parameters() + self._cluster_search(population, func) + self._memory_local_search(func) + population, fitness = self._adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CoevolutionaryDualPopulationSearch.py b/nevergrad/optimization/lama/CoevolutionaryDualPopulationSearch.py new file mode 100644 index 000000000..2e1087147 --- /dev/null +++ b/nevergrad/optimization/lama/CoevolutionaryDualPopulationSearch.py @@ -0,0 +1,124 @@ +import numpy as np + + +class CoevolutionaryDualPopulationSearch: + def __init__( + self, budget, population_size=30, mutation_rate=0.1, crossover_rate=0.7, learning_rate=0.01, alpha=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.learning_rate = learning_rate + self.alpha = alpha # Weight for adaptive learning + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize two populations + pop_exploratory = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + pop_exploitative = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + + scores_exploratory = np.array([func(ind) for ind in pop_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + best_idx_exploratory = np.argmin(scores_exploratory) + best_idx_exploitative = np.argmin(scores_exploitative) + + global_best_position = pop_exploratory[best_idx_exploratory] + global_best_score = scores_exploratory[best_idx_exploratory] + + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + evaluations = 2 * self.population_size + + while evaluations < self.budget: + # Exploratory Population: Tournament selection and blend crossover (BLX-α) + selected_exploratory = [] + for _ in range(self.population_size): + i, j = np.random.randint(0, self.population_size, 2) + if scores_exploratory[i] < scores_exploitative[j]: + selected_exploratory.append(pop_exploratory[i]) + else: + selected_exploratory.append(pop_exploitative[j]) + selected_exploratory = np.array(selected_exploratory) + + offspring_exploratory = [] + for i in range(0, self.population_size, 2): + if i + 1 >= self.population_size: + break + parent1, parent2 = selected_exploratory[i], selected_exploratory[i + 1] + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-self.alpha, 1 + self.alpha, dim) + child1 = alpha * parent1 + (1 - alpha) * parent2 + child2 = alpha * parent2 + (1 - alpha) * parent1 + else: + child1, child2 = parent1, parent2 + offspring_exploratory.extend([child1, child2]) + offspring_exploratory = np.array(offspring_exploratory[: self.population_size]) + + # Mutation for Exploratory Population + for i in range(self.population_size): + if np.random.rand() < self.mutation_rate: + offspring_exploratory[i] += np.random.normal(0, 0.1, dim) + offspring_exploratory[i] = np.clip(offspring_exploratory[i], lower_bound, upper_bound) + + # Exploitative Population: Gradient-based local search with adaptive learning rate + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop_exploitative[i]) + learning_rate_adaptive = self.learning_rate / (1 + evaluations / self.budget) + pop_exploitative[i] = np.clip( + pop_exploitative[i] - learning_rate_adaptive * grad, lower_bound, upper_bound + ) + + # Evaluate offspring + scores_offspring_exploratory = np.array([func(ind) for ind in offspring_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + evaluations += self.population_size # Exploratory evaluations + evaluations += self.population_size # Exploitative evaluations + + # Update exploratory population and scores + pop_exploratory, scores_exploratory = offspring_exploratory, scores_offspring_exploratory + + # Update global best from both populations + best_idx_exploratory = np.argmin(scores_exploratory) + if scores_exploratory[best_idx_exploratory] < global_best_score: + global_best_score = scores_exploratory[best_idx_exploratory] + global_best_position = pop_exploratory[best_idx_exploratory] + + best_idx_exploitative = np.argmin(scores_exploitative) + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + # Swap roles if one population is stagnating + if evaluations % (2 * self.population_size) == 0: + if np.min(scores_exploratory) == global_best_score: + pop_exploratory = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores_exploratory = np.array([func(ind) for ind in pop_exploratory]) + if np.min(scores_exploitative) == global_best_score: + pop_exploitative = np.random.uniform( + lower_bound, upper_bound, (self.population_size, dim) + ) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CohortDiversityDrivenOptimization.py b/nevergrad/optimization/lama/CohortDiversityDrivenOptimization.py new file mode 100644 index 000000000..2b8f50a1d --- /dev/null +++ b/nevergrad/optimization/lama/CohortDiversityDrivenOptimization.py @@ -0,0 +1,72 @@ +import numpy as np + + +class CohortDiversityDrivenOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.15, + mutation_factor=0.08, + recombination_prob=0.85, + adaptation_intensity=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_factor = mutation_factor + self.recombination_prob = recombination_prob + self.adaptation_intensity = adaptation_intensity + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + mean_elite = np.mean(population[elite_indices], axis=0) + + for i in range(self.population_size): + if np.random.rand() < self.recombination_prob: + # Recombination from elite members + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices[0]], population[parents_indices[1]] + alpha = np.random.rand() + child = alpha * parent1 + (1 - alpha) * parent2 + else: + # Mutation based on distance from elite mean + elite_member = population[np.random.choice(elite_indices)] + mutation_direction = np.random.randn(self.dimension) + child = elite_member + self.mutation_factor * mutation_direction * np.linalg.norm( + mean_elite - elite_member + ) + + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adapt mutation factor to converge slower as nearing budget limit + self.mutation_factor *= self.adaptation_intensity + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/CohortEvolutionWithDynamicSelection.py b/nevergrad/optimization/lama/CohortEvolutionWithDynamicSelection.py new file mode 100644 index 000000000..9c70231f6 --- /dev/null +++ b/nevergrad/optimization/lama/CohortEvolutionWithDynamicSelection.py @@ -0,0 +1,74 @@ +import numpy as np + + +class CohortEvolutionWithDynamicSelection: + def __init__(self, budget, dimension=5, population_size=100, elite_fraction=0.1, mutation_intensity=0.1): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial intensity for mutation + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + for i in range(self.population_size): + if np.random.rand() < self.dynamic_mutation_rate(evaluations, self.budget): + # Mutation occurs + parent_idx = np.random.choice(self.elite_count) + parent = elites[parent_idx] + mutation = self.dynamic_mutation_scale(evaluations, self.budget) * np.random.normal( + 0, 1, self.dimension + ) + child = parent + mutation + else: + # Crossover between two elites + parents_indices = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(self.dimension) + child = np.concatenate( + ( + population[parents_indices[0]][:crossover_point], + population[parents_indices[1]][crossover_point:], + ) + ) + + # Ensure the child is within bounds + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def dynamic_mutation_rate(self, evaluations, budget): + # Decrease mutation rate as the budget is consumed + return max(0.05, 1 - evaluations / budget) + + def dynamic_mutation_scale(self, evaluations, budget): + # Decrease mutation scale as progress is made + return self.mutation_intensity * (1 - evaluations / budget) ** 2 diff --git a/nevergrad/optimization/lama/ConcentricConvergenceOptimizer.py b/nevergrad/optimization/lama/ConcentricConvergenceOptimizer.py new file mode 100644 index 000000000..54d1671f8 --- /dev/null +++ b/nevergrad/optimization/lama/ConcentricConvergenceOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class ConcentricConvergenceOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_learning_rate = 0.1 + self.local_learning_rate = 0.2 + self.convergence_rate = 0.05 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + return positions + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + # Concentric updates focusing on both global best and individual refinement + personal_vector = np.random.normal(0, self.local_learning_rate, self.dimension) + global_vector = np.random.normal(0, self.global_learning_rate, self.dimension) + + # Move towards global best while exploring locally + positions[i] += global_vector * (global_best_position - positions[i]) + personal_vector + + # Ensure particles stay within bounds + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + # Evaluate new position + new_fitness = func(positions[i]) + evaluations += 1 + + # Update personal and global bests + if new_fitness < fitness[i]: + fitness[i] = new_fitness + if new_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = new_fitness + + # Reduce learning rates to increase convergence over time + self.global_learning_rate *= 1 - self.convergence_rate + self.local_learning_rate *= 1 - self.convergence_rate + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/ConcentricDiversityStrategy.py b/nevergrad/optimization/lama/ConcentricDiversityStrategy.py new file mode 100644 index 000000000..de168fafb --- /dev/null +++ b/nevergrad/optimization/lama/ConcentricDiversityStrategy.py @@ -0,0 +1,113 @@ +import numpy as np + + +class ConcentricDiversityStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=50, + population_per_island=60, + migration_interval=15, + migration_rate=0.15, + mutation_intensity=1.8, + mutation_decay=0.92, + elite_ratio=0.2, + crossover_probability=0.85, + tournament_size=5, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_interval = migration_interval + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + generation = 0 + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if generation % self.migration_interval == 0: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + generation += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ConcentricGradientDescentEvolver.py b/nevergrad/optimization/lama/ConcentricGradientDescentEvolver.py new file mode 100644 index 000000000..a6ac91ac9 --- /dev/null +++ b/nevergrad/optimization/lama/ConcentricGradientDescentEvolver.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ConcentricGradientDescentEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + perturbation_scale=0.1, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.perturbation_scale = perturbation_scale # Controls perturbation for gradient estimation + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation operation using Gaussian noise + mutation = np.random.normal(0, self.perturbation_scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def approximate_gradient(self, individual, func): + # Random perturbation gradient estimation + perturbation = np.random.normal(0, self.perturbation_scale, self.dimension) + perturbed_individual = np.clip(individual + perturbation, self.bounds[0], self.bounds[1]) + gradient = ( + (func(perturbed_individual) - func(individual)) + / (np.linalg.norm(perturbation) + 1e-16) + * perturbation + ) + return gradient + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + gradient = self.approximate_gradient(population[i], func) + new_individual = population[i] - gradient # Gradient descent step + new_individual = self.mutate(new_individual) # Apply mutation + new_fitness = func(new_individual) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_individual + fitness[i] = new_fitness + + if new_fitness < best_fitness: + best_individual = new_individual + best_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/ConcentricGradientEnhancedEvolver.py b/nevergrad/optimization/lama/ConcentricGradientEnhancedEvolver.py new file mode 100644 index 000000000..3140c30ab --- /dev/null +++ b/nevergrad/optimization/lama/ConcentricGradientEnhancedEvolver.py @@ -0,0 +1,72 @@ +import numpy as np + + +class ConcentricGradientEnhancedEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + perturbation_scale=0.1, + learning_rate=0.01, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.perturbation_scale = perturbation_scale # Controls perturbation for gradient estimation + self.learning_rate = learning_rate # Controls the step size in the gradient update + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation operation using Gaussian noise to maintain diversity + mutation = np.random.normal(0, self.perturbation_scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def approximate_gradient(self, individual, func): + # Improved gradient estimation with central difference + gradients = np.zeros(self.dimension) + for idx in range(self.dimension): + perturbation = np.zeros(self.dimension) + perturbation[idx] = self.perturbation_scale + forward = np.clip(individual + perturbation, self.bounds[0], self.bounds[1]) + backward = np.clip(individual - perturbation, self.bounds[0], self.bounds[1]) + gradients[idx] = (func(forward) - func(backward)) / (2 * self.perturbation_scale) + return gradients + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + gradient = self.approximate_gradient(population[i], func) + new_individual = ( + population[i] - self.learning_rate * gradient + ) # Gradient descent step with controlled learning rate + new_individual = self.mutate(new_individual) # Apply mutation for diversity + new_fitness = func(new_individual) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_individual + fitness[i] = new_fitness + + if new_fitness < best_fitness: + best_individual = new_individual + best_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/ConcentricQuantumCrossoverStrategyV4.py b/nevergrad/optimization/lama/ConcentricQuantumCrossoverStrategyV4.py new file mode 100644 index 000000000..802cb45eb --- /dev/null +++ b/nevergrad/optimization/lama/ConcentricQuantumCrossoverStrategyV4.py @@ -0,0 +1,91 @@ +import numpy as np + + +class ConcentricQuantumCrossoverStrategyV4: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.04, + mutation_intensity=0.08, + crossover_rate=0.9, + quantum_prob=0.35, + gamma=0.1, + beta=0.5, + epsilon=0.005, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + self.beta = beta # Coefficient for dynamic mutation intensity adjustment + self.epsilon = epsilon # Minimum mutation intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Apply a controlled quantum state update to explore potential better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/ConvergenceAcceleratedSpiralSearch.py b/nevergrad/optimization/lama/ConvergenceAcceleratedSpiralSearch.py new file mode 100644 index 000000000..a5a2f6ba2 --- /dev/null +++ b/nevergrad/optimization/lama/ConvergenceAcceleratedSpiralSearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class ConvergenceAcceleratedSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial setup + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Start with a full range + angle_increment = np.pi / 4 # Initial broad angle for exploration + + # Adaptive parameters + radius_decay = 0.93 # Faster radius decay to focus search more quickly + angle_refinement = 0.9 # Faster angle refinement for quicker focus + evaluations_left = self.budget + min_radius = 0.0005 # Very fine minimum radius to allow detailed exploration + + # Dynamic angle adjustment based on feedback loop + optimal_change_factor = 2.0 # Dynamic adjustment based on improvement + no_improvement_count = 0 + last_best_f = np.inf + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max(int(2 * np.pi / angle_increment), 6) # Ensure at least 6 points + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + displacement = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Determine if there has been an improvement + if self.f_opt < last_best_f: + last_best_f = self.f_opt + no_improvement_count = 0 + # Accelerate search by narrowing down quicker + radius_decay = min(radius_decay * optimal_change_factor, 0.95) + angle_refinement = min(angle_refinement * optimal_change_factor, 0.95) + else: + no_improvement_count += 1 + + # Update centroid to new best point + if points: + best_index = np.argmin(function_values) + centroid = points[best_index] + + # Adjust search parameters if stuck in local optima + if no_improvement_count > 10: + radius *= radius_decay # Tighten search + radius = max(radius, min_radius) # Ensure not too small + angle_increment *= angle_refinement # Refine search + no_improvement_count = 0 # Reset counter + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ConvergentAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/ConvergentAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..e8cc57c1a --- /dev/null +++ b/nevergrad/optimization/lama/ConvergentAdaptiveEvolutionStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class ConvergentAdaptiveEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + elite_fraction=0.1, + mutation_rate=0.2, + mutation_decrease=0.95, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_rate = mutation_rate + self.mutation_decrease = mutation_decrease + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual, scale): + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return individual + + def reproduce(self, elites, elite_fitness): + new_population = np.copy(elites) + while len(new_population) < self.population_size: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = (elites[parents[0]] + elites[parents[1]]) / 2 + child = self.mutate(child, self.mutation_scale) + new_population = np.vstack([new_population, child]) + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + self.mutation_scale = (self.upper_bound - self.lower_bound) / 2 + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += len(population) + self.mutation_scale *= self.mutation_decrease # Decrease mutation scale adaptively + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ConvergentAdaptiveEvolutiveStrategy.py b/nevergrad/optimization/lama/ConvergentAdaptiveEvolutiveStrategy.py new file mode 100644 index 000000000..2c20c2dd7 --- /dev/null +++ b/nevergrad/optimization/lama/ConvergentAdaptiveEvolutiveStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ConvergentAdaptiveEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate, mutation_strength): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + for _ in range(num_children): + if np.random.rand() < 0.9: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + population_size = 200 + num_generations = self.budget // population_size + elitism_size = population_size // 20 # 5% elitism + mutation_rate = 0.05 + mutation_strength = 0.5 + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, elitism_size) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + non_elite_size = population_size - elitism_size + offspring = self.crossover(best_population, non_elite_size) + offspring = self.mutate(offspring, mutation_rate, mutation_strength) + population = np.vstack((best_population, offspring)) + + # Dynamic adaptation of mutation parameters + if gen % 5 == 0 and gen > 0: + mutation_rate *= 0.95 + mutation_strength *= 0.95 + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/CooperativeAdaptiveCulturalSearch.py b/nevergrad/optimization/lama/CooperativeAdaptiveCulturalSearch.py new file mode 100644 index 000000000..cb96059db --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeAdaptiveCulturalSearch.py @@ -0,0 +1,117 @@ +import numpy as np + + +class CooperativeAdaptiveCulturalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: # More infrequent updates + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeAdaptiveEvolutionaryOptimizer.py b/nevergrad/optimization/lama/CooperativeAdaptiveEvolutionaryOptimizer.py new file mode 100644 index 000000000..2f1192ad7 --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeAdaptiveEvolutionaryOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class CooperativeAdaptiveEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.4 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=8, step_size=0.1 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations % (population_size * 3) == 0: + worst_indices = np.argsort(fitness)[-int(0.3 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 2) == 0 and population_size > 20: + best_indices = np.argsort(fitness)[: int(0.6 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeCulturalAdaptiveSearch.py b/nevergrad/optimization/lama/CooperativeCulturalAdaptiveSearch.py new file mode 100644 index 000000000..5d162f216 --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeCulturalAdaptiveSearch.py @@ -0,0 +1,108 @@ +import numpy as np + + +class CooperativeCulturalAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.2: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size // 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.1 + + # Cooperative cultural influence updates + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * np.random.normal( + 0, 0.1, self.dim + ) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeCulturalDifferentialSearch.py b/nevergrad/optimization/lama/CooperativeCulturalDifferentialSearch.py new file mode 100644 index 000000000..cb2a1426a --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeCulturalDifferentialSearch.py @@ -0,0 +1,125 @@ +import numpy as np + + +class CooperativeCulturalDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 # Reduced for refined convergence + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.4: # Increased probability to exploit local regions + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.25 + (0.35 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeCulturalEvolutionStrategy.py b/nevergrad/optimization/lama/CooperativeCulturalEvolutionStrategy.py new file mode 100644 index 000000000..e55b47693 --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeCulturalEvolutionStrategy.py @@ -0,0 +1,109 @@ +import numpy as np + + +class CooperativeCulturalEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "worst_fitness": -np.inf, + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + if trial_fitness > knowledge_base["worst_fitness"]: + knowledge_base["worst_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.1: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size // 2) == 0: + cultural_shift = (knowledge_base["best_solution"] - knowledge_base["mean_position"]) * 0.1 + + # Cooperative cultural influence updates + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * np.random.normal( + 0, 0.1, self.dim + ) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.5, 1.0, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeEvolutionaryGradientSearch.py b/nevergrad/optimization/lama/CooperativeEvolutionaryGradientSearch.py new file mode 100644 index 000000000..5c77633ea --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeEvolutionaryGradientSearch.py @@ -0,0 +1,93 @@ +import numpy as np + + +class CooperativeEvolutionaryGradientSearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_pos[i] += epsilon + f_pos = func(x_pos) + x_neg = np.copy(x) + x_neg[i] -= epsilon + f_neg = func(x_neg) + grad[i] = (f_pos - f_neg) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.8 - 0.7 * (iteration / max_iterations) + self.crossover_rate = 0.9 - 0.4 * (iteration / max_iterations) + self.learning_rate = 0.01 * np.exp(-iteration / (0.5 * max_iterations)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Perform local search on the global best solution + global_best_position, global_best_score = self.local_search( + func, global_best_position, global_best_score + ) + evaluations += 1 + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CooperativeParticleSwarmOptimization.py b/nevergrad/optimization/lama/CooperativeParticleSwarmOptimization.py new file mode 100644 index 000000000..2ce475ccc --- /dev/null +++ b/nevergrad/optimization/lama/CooperativeParticleSwarmOptimization.py @@ -0,0 +1,62 @@ +import numpy as np + + +class CooperativeParticleSwarmOptimization: + def __init__(self, budget, population_size=50, w=0.5, c1=2, c2=2): + self.budget = budget + self.population_size = population_size + self.w = w # inertia weight + self.c1 = c1 # cognitive coefficient + self.c2 = c2 # social coefficient + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize the swarm + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + velocities = np.random.uniform(-1, 1, (self.population_size, dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in personal_best_positions]) + + best_idx = np.argmin(personal_best_scores) + global_best_position = personal_best_positions[best_idx] + global_best_score = personal_best_scores[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Update velocity + r1, r2 = np.random.rand(dim), np.random.rand(dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (global_best_position - population[i]) + ) + + # Update position + population[i] = np.clip(population[i] + velocities[i], lower_bound, upper_bound) + + # Evaluate fitness + score = func(population[i]) + evaluations += 1 + + # Update personal best + if score < personal_best_scores[i]: + personal_best_scores[i] = score + personal_best_positions[i] = population[i] + + # Update global best + if score < global_best_score: + global_best_score = score + global_best_position = population[i] + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CoordinatedAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/CoordinatedAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..9a2e73d7d --- /dev/null +++ b/nevergrad/optimization/lama/CoordinatedAdaptiveHybridOptimizer.py @@ -0,0 +1,132 @@ +import numpy as np + + +class CoordinatedAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Significantly increased population size for better exploration + self.initial_F = 0.7 + self.initial_CR = 0.8 + self.c1 = 1.2 # Tuned parameters for better balance between exploration and exploitation + self.c2 = 1.2 + self.w = 0.4 # Adjusted inertia weight for refined control + self.elite_fraction = 0.2 # Decreased elite fraction to focus on broader exploration + self.diversity_threshold = 1e-5 # Adjusted threshold for controlled diversity + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 10, self.budget - evaluations + ) # Reduced local search iterations for faster convergence + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.05, bounds.lb, bounds.ub + ) # Slightly increased perturbation + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CovarianceMatrixAdaptationDifferentialEvolution.py b/nevergrad/optimization/lama/CovarianceMatrixAdaptationDifferentialEvolution.py new file mode 100644 index 000000000..8bc8486f6 --- /dev/null +++ b/nevergrad/optimization/lama/CovarianceMatrixAdaptationDifferentialEvolution.py @@ -0,0 +1,70 @@ +import numpy as np + + +class CovarianceMatrixAdaptationDifferentialEvolution: + def __init__(self, budget, population_size=50, F=0.5, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F = F + self.CR = CR + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Adaptive Mutation and Crossover + self.F = 0.5 + 0.3 * np.random.rand() + self.CR = 0.8 + 0.2 * np.random.rand() + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), lower_bound, upper_bound) + + cross_points = np.random.rand(dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + new_population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Covariance Matrix Adaptation + mean = np.mean(new_population, axis=0) + cov_matrix = np.cov(new_population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 # Ensure symmetry + cov_matrix = np.clip(cov_matrix, -1, 1) # Prevent numerical issues + + population = np.random.multivariate_normal(mean, cov_matrix, self.population_size) + population = np.clip(population, lower_bound, upper_bound) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CulturalAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/CulturalAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..38313fd26 --- /dev/null +++ b/nevergrad/optimization/lama/CulturalAdaptiveDifferentialEvolution.py @@ -0,0 +1,114 @@ +import numpy as np + + +class CulturalAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=10): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, 0.1, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.3: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 10 # Assuming guided search uses 10 evaluations + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/CulturalGuidedDifferentialEvolution.py b/nevergrad/optimization/lama/CulturalGuidedDifferentialEvolution.py new file mode 100644 index 000000000..9afbf2414 --- /dev/null +++ b/nevergrad/optimization/lama/CulturalGuidedDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class CulturalGuidedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=3): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 80 # Increased population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.25: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 3 # Reduced evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DADERC.py b/nevergrad/optimization/lama/DADERC.py new file mode 100644 index 000000000..358028a8d --- /dev/null +++ b/nevergrad/optimization/lama/DADERC.py @@ -0,0 +1,68 @@ +import numpy as np + + +class DADERC: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.9, adapt_rate=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.adapt_rate = adapt_rate # Rate of adaptation for parameters + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Adaptive mutation strategy controlled by a dual mechanism + Fs = np.random.normal(self.F_base, 0.1, self.population_size) + CRs = np.random.normal(self.CR_base, 0.1, self.population_size) + + for i in range(self.population_size): + # Ensure parameter bounds + F = np.clip(Fs[i], 0.1, 2) + CR = np.clip(CRs[i], 0, 1) + + # Mutation + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CR, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if num_evals >= self.budget: + break + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adapt F_base and CR_base using feedback from the population improvement + successful_Fs = Fs[fitness < np.array([func(ind) for ind in population])] + successful_CRs = CRs[fitness < np.array([func(ind) for ind in population])] + if len(successful_Fs) > 0: + self.F_base += self.adapt_rate * (np.mean(successful_Fs) - self.F_base) + if len(successful_CRs) > 0: + self.CR_base += self.adapt_rate * (np.mean(successful_CRs) - self.CR_base) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DADESM.py b/nevergrad/optimization/lama/DADESM.py new file mode 100644 index 000000000..6a2ef7d9d --- /dev/null +++ b/nevergrad/optimization/lama/DADESM.py @@ -0,0 +1,64 @@ +import numpy as np + + +class DADESM: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_scale = 0.5 + self.crossover_prob = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, fitness, best_idx, F): + mutants = np.empty_like(population) + for i in range(len(population)): + if np.random.rand() < 0.5: # Dynamic strategy based on uniform probability + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + else: + best = population[best_idx] + random_idx = np.random.randint(len(population)) + random_individual = population[random_idx] + mutant_vector = np.clip(best + F * (random_individual - best), self.bounds[0], self.bounds[1]) + mutants[i] = mutant_vector + return mutants + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + offspring = np.where(cross_points, mutant, target) + return offspring + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + F = np.clip(np.random.normal(self.mutation_scale, 0.1), 0.1, 1.0) + mutants = self.mutate(population, fitness, np.argmin(fitness), F) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + improvement_mask = fitness_trials < fitness + population[improvement_mask] = trials[improvement_mask] + fitness[improvement_mask] = fitness_trials[improvement_mask] + + # Adaptive parameter control based on recent improvements + recent_improvements = np.mean(improvement_mask) + self.mutation_scale *= 0.95 if recent_improvements > 0.2 else 1.05 + self.crossover_prob = min( + max(self.crossover_prob + (0.05 if recent_improvements > 0.2 else -0.05), 0.1), 1.0 + ) + + return np.min(fitness), population[np.argmin(fitness)] diff --git a/nevergrad/optimization/lama/DADe.py b/nevergrad/optimization/lama/DADe.py new file mode 100644 index 000000000..27b4d9aaf --- /dev/null +++ b/nevergrad/optimization/lama/DADe.py @@ -0,0 +1,63 @@ +import numpy as np + + +class DADe: + def __init__(self, budget, population_size=25, F_base=0.5, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + + def __call__(self, func): + # Initialize population within the bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Tracking the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Dual adaptation mechanism for mutation and crossover parameters + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Adapt F and CR dynamically based on the progress ratio + progress = num_evals / self.budget + F = self.F_base + 0.1 * np.tan(np.pi * (progress - 0.5)) + CR = self.CR_base - 0.4 * np.sin(np.pi * progress) + + # Mutation using differential evolution strategy "best/2/bin" + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = best_individual + F * (a + b - 2 * best_individual) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial solution + trial_fitness = func(trial) + num_evals += 1 + + # Selection: Greedily select the better vector + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + # Update the population with new generation + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DAEA.py b/nevergrad/optimization/lama/DAEA.py new file mode 100644 index 000000000..117eff2a4 --- /dev/null +++ b/nevergrad/optimization/lama/DAEA.py @@ -0,0 +1,80 @@ +import numpy as np + + +class DAEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.initial_cr = 0.9 + self.initial_f = 0.8 + self.initial_temp = 1.0 + self.final_temp = 0.01 + self.alpha = 0.95 # Standard cooling rate + self.improvement_threshold = 0.01 # Threshold for considering stagnation + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, f, temperature): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = population[best_idx] + f * temperature * (x1 - x2 + x3 - population[best_idx]) + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, cr): + crossover_mask = np.random.rand(self.dimension) < cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + temperature = self.initial_temp + cr = self.initial_cr + f = self.initial_f + last_improvement = 0 + + while evaluations < self.budget: + mutated_population = self.mutate(population, best_idx, f, temperature) + offspring_population = np.array( + [ + self.crossover(population[i], mutated_population[i], cr) + for i in range(self.population_size) + ] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + improvement = False + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i] or np.random.rand() < np.exp( + (fitness[i] - offspring_fitness[i]) / temperature + ): + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution, best_idx = fitness[i], population[i], i + improvement = True + + if improvement: + last_improvement = evaluations + elif evaluations - last_improvement > self.population_size * 10: + # Adaptively increase mutation rate to escape local optima + f = min(1, f + 0.1) + last_improvement = evaluations + + temperature *= ( + self.alpha if evaluations - last_improvement <= self.population_size * 10 else self.alpha**2 + ) # Dynamic cooling + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DAES.py b/nevergrad/optimization/lama/DAES.py new file mode 100644 index 000000000..9e58730dc --- /dev/null +++ b/nevergrad/optimization/lama/DAES.py @@ -0,0 +1,79 @@ +import numpy as np + + +class DAES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.cr = 0.9 # Initial crossover probability + self.f = 0.8 # Initial differential weight + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, fitness): + new_population = np.empty_like(population) + best_idx = np.argmin(fitness) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 4, replace=False) + x1, x2, x3, x4 = ( + population[idxs[0]], + population[idxs[1]], + population[idxs[2]], + population[idxs[3]], + ) + if np.random.rand() < 0.5: # Randomly choose mutation strategy + mutant = x1 + self.f * (x2 - x3 + x4 - population[i]) # DE/rand/2 + else: + mutant = population[i] + self.f * (x1 - x2 + x3 - x4) # DE/current-to-rand/1 + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.cr + return np.where(crossover_mask, mutant, target) + + def adapt_parameters(self, improvements): + if improvements > 0: + self.cr = max(0.1, self.cr * 0.98) + self.f = max(0.5, self.f * 0.99) + else: + self.cr = min(1.0, self.cr / 0.95) + self.f = min(1.2, self.f / 0.95) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + last_best = best_fitness + + while evaluations < self.budget: + mutated_population = self.mutate(population, fitness) + offspring_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + improvements = 0 + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i]: + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + improvements += 1 + if fitness[i] < best_fitness: + best_fitness, best_solution = fitness[i], population[i] + + self.adapt_parameters(improvements) + if best_fitness == last_best: + improvements = 0 + else: + last_best = best_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DAESF.py b/nevergrad/optimization/lama/DAESF.py new file mode 100644 index 000000000..4e027b7c1 --- /dev/null +++ b/nevergrad/optimization/lama/DAESF.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DAESF: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.initial_population_size = 100 + self.min_population_size = 50 + self.max_population_size = 200 + self.decrease_factor = 0.9 + self.increase_factor = 1.1 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best, F): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.arange(len(population)), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + new_population[i] = mutant_vector + return new_population + + def crossover(self, target, mutant, CR): + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def adjust_population_size(self, improvement): + if improvement < 0.01: + self.population_size = min( + self.max_population_size, int(self.population_size * self.increase_factor) + ) + else: + self.population_size = max( + self.min_population_size, int(self.population_size * self.decrease_factor) + ) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + + while evaluations < self.budget: + F = np.random.normal(0.5, 0.1) + CR = 0.1 + 0.4 * np.random.rand() + mutants = self.mutate(population, population[best_index], F) + trials = np.array( + [self.crossover(population[i], mutants[i], CR) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + improvement = False + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + if fitness[i] < best_fitness: + best_fitness = fitness[i] + best_index = i + improvement = True + + if not improvement: + population = np.vstack((population, self.initialize_population())) + fitness = self.evaluate(population, func) + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + evaluations += self.population_size + + self.adjust_population_size(np.abs(fitness[best_index] - best_fitness)) + + return best_fitness, population[best_index] diff --git a/nevergrad/optimization/lama/DASES.py b/nevergrad/optimization/lama/DASES.py new file mode 100644 index 000000000..3350b7d98 --- /dev/null +++ b/nevergrad/optimization/lama/DASES.py @@ -0,0 +1,67 @@ +import numpy as np + + +class DASES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.elite_size = 5 # Top 10% as elite + + def initialize(self): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + mutation_rates = np.random.rand(self.population_size) * 0.1 + crossover_rates = np.random.rand(self.population_size) * 0.8 + 0.2 + return population, mutation_rates, crossover_rates + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, individual, mutation_rate): + mutation_mask = np.random.rand(self.dimension) < mutation_rate + individual[mutation_mask] += np.random.normal(0, 1, np.sum(mutation_mask)) + return np.clip(individual, self.bounds[0], self.bounds[1]) + + def crossover(self, parent1, parent2, crossover_rate): + child = np.where(np.random.rand(self.dimension) < crossover_rate, parent1, parent2) + return child + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def local_search(self, elite): + perturbation = np.random.normal(0, 0.05, self.dimension) + candidate = elite + perturbation + return np.clip(candidate, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + population, mutation_rates, crossover_rates = self.initialize() + best_fitness = np.inf + best_individual = None + + evaluations = 0 + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + if np.min(fitness) < best_fitness: + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)].copy() + + elites, elite_fitness = self.select_elites(population, fitness) + new_population = elites.tolist() # Start next gen with elites + + while len(new_population) < self.population_size: + idx1, idx2 = np.random.choice(self.population_size, 2, replace=False) + child = self.crossover(population[idx1], population[idx2], crossover_rates[idx1]) + child = self.mutate(child, mutation_rates[idx1]) + new_population.append(child) + + population = np.array(new_population) + if evaluations // self.population_size % 5 == 0: + for i in range(len(elites)): + elites[i] = self.local_search(elites[i]) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DASOGG.py b/nevergrad/optimization/lama/DASOGG.py new file mode 100644 index 000000000..9cdc5a62e --- /dev/null +++ b/nevergrad/optimization/lama/DASOGG.py @@ -0,0 +1,60 @@ +import numpy as np + + +class DASOGG: + def __init__(self, budget, population_size=50, spiral_rate=0.6, beta=0.3, gradient_descent_factor=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.spiral_rate = spiral_rate + self.beta = beta + self.gradient_descent_factor = gradient_descent_factor + + def __call__(self, func): + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + velocities = np.zeros((self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Evolutionary loop + while num_evals < self.budget: + for i in range(self.population_size): + r1, r2, r3 = np.random.rand(3) # Random coefficients + # Dynamic spiral updating rule with gradient guidance + dynamic_adjustment = self.beta * np.tanh(num_evals / self.budget) + local_gradient = best_individual - population[i] + global_gradient = np.random.uniform(self.lb, self.ub, self.dimension) - population[i] + + velocities[i] = ( + r1 * velocities[i] + + r2 * self.spiral_rate * local_gradient + + dynamic_adjustment * global_gradient + + r3 * self.gradient_descent_factor * local_gradient + ) + + # Update position + population[i] += velocities[i] + population[i] = np.clip(population[i], self.lb, self.ub) + + # Evaluate + updated_fitness = func(population[i]) + num_evals += 1 + + # Selection + if updated_fitness < fitness[i]: + fitness[i] = updated_fitness + if updated_fitness < best_fitness: + best_fitness = updated_fitness + best_individual = population[i] + + if num_evals >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DDCEA.py b/nevergrad/optimization/lama/DDCEA.py new file mode 100644 index 000000000..a28add0f8 --- /dev/null +++ b/nevergrad/optimization/lama/DDCEA.py @@ -0,0 +1,66 @@ +import numpy as np + + +class DDCEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.elite_size = 5 # Reduced number of elites for more diversity + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, diversity_measure): + F = 0.5 * (2 - diversity_measure) # Adaptive mutation factor based on diversity + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(np.delete(np.arange(self.population_size), best_index), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = a + F * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, diversity_measure): + CR = 0.5 + 0.5 * diversity_measure # Higher crossover when diversity is high + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def calculate_diversity(self, population): + mean_population = np.mean(population, axis=0) + diversity = np.mean(np.sqrt(np.sum((population - mean_population) ** 2, axis=1))) + normalized_diversity = diversity / np.sqrt(self.dimension * (self.bounds[1] - self.bounds[0]) ** 2) + return normalized_diversity + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + diversity_measure = self.calculate_diversity(population) + mutants = self.mutate(population, best_index, diversity_measure) + trials = np.array( + [ + self.crossover(population[i], mutants[i], diversity_measure) + for i in range(self.population_size) + ] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + # Selection with elitism + combined_population = np.vstack((population, trials)) + combined_fitness = np.hstack((fitness, fitness_trials)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + best_index = np.argmin(fitness) + + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/DDPO.py b/nevergrad/optimization/lama/DDPO.py new file mode 100644 index 000000000..2d6833699 --- /dev/null +++ b/nevergrad/optimization/lama/DDPO.py @@ -0,0 +1,69 @@ +import numpy as np + + +class DDPO: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 40 + population = np.random.uniform(*self.bounds, (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def dual_search(self, population, func): + global_best_fitness = np.Inf + global_best_individual = None + local_best_fitness = np.Inf + local_best_individual = None + + evaluations = 0 + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + # Update best solutions + best_idx = np.argmin(fitness) + if fitness[best_idx] < global_best_fitness: + global_best_fitness = fitness[best_idx] + global_best_individual = population[best_idx] + + # Global exploration + exploration_population = np.random.uniform(*self.bounds, (len(population) // 2, self.dimension)) + exploration_fitness = self.evaluate(exploration_population, func) + evaluations += len(exploration_population) + + # Local exploitation + perturbations = np.random.normal(0, 0.1, (len(population) // 2, self.dimension)) + exploitation_population = population[: len(population) // 2] + perturbations + exploitation_population = np.clip(exploitation_population, *self.bounds) + exploitation_fitness = self.evaluate(exploitation_population, func) + evaluations += len(exploitation_population) + + # Combine and select + combined_population = np.vstack([exploration_population, exploitation_population]) + combined_fitness = np.concatenate([exploration_fitness, exploitation_fitness]) + if np.min(combined_fitness) < local_best_fitness: + local_best_fitness = np.min(combined_fitness) + local_best_individual = combined_population[np.argmin(combined_fitness)] + + # Feedback-driven dynamic adjustments + if local_best_fitness < global_best_fitness * 1.05: # Detection of potential local optimum + perturbations = np.random.normal( + 0, 0.5, (len(population), self.dimension) + ) # Enhanced exploration + population = global_best_individual + perturbations + population = np.clip(population, *self.bounds) + else: + population = combined_population + + return global_best_fitness, global_best_individual + + def __call__(self, func): + initial_population, _ = self.initialize() + best_fitness, best_solution = self.dual_search(initial_population, func) + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DEAMC.py b/nevergrad/optimization/lama/DEAMC.py new file mode 100644 index 000000000..ac439ae08 --- /dev/null +++ b/nevergrad/optimization/lama/DEAMC.py @@ -0,0 +1,65 @@ +import numpy as np + + +class DEAMC: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.F_base = 0.8 # Base mutation scaling factor + self.CR_base = 0.9 # Base crossover probability + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, idx, population): + indices = np.delete(np.arange(self.population_size), idx) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + self.F_base * (b - c) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.CR_base + return np.where(cross_points, mutant, target) + + def select(self, target, trial, f_target, f_trial): + return trial if f_trial < f_target else target + + def adapt_parameters(self, successes, trials): + self.F_base *= 0.95 if successes / trials < 0.2 else 1.05 + self.F_base = min(max(self.F_base, 0.1), 1.0) # Keep F within [0.1, 1.0] + self.CR_base = 0.1 + 0.8 * successes / trials # Keep CR adaptive within [0.1, 0.9] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = len(population) + + while evaluations < self.budget: + new_population = np.copy(population) + successful_trials = 0 + + for i in range(self.population_size): + mutant = self.mutation(i, population) + trial = self.crossover(population[i], mutant) + f_trial = func(trial) + f_target = fitness[i] + + if f_trial < f_target: + new_population[i] = trial + fitness[i] = f_trial + successful_trials += 1 + + evaluations += 1 + if evaluations >= self.budget: + break + + self.adapt_parameters(successful_trials, self.population_size) + population = new_population + + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/DEAMC_DSR.py b/nevergrad/optimization/lama/DEAMC_DSR.py new file mode 100644 index 000000000..a3da78c74 --- /dev/null +++ b/nevergrad/optimization/lama/DEAMC_DSR.py @@ -0,0 +1,89 @@ +import numpy as np + + +class DEAMC_DSR: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.F_base = 0.8 # Base mutation scaling factor + self.CR_base = 0.9 # Base crossover probability + self.stagnation_threshold = 30 # Threshold for stagnation detection + self.no_improvement_intervals = 0 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, idx, population): + indices = np.delete(np.arange(self.population_size), idx) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.F_base * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.CR_base + return np.where(cross_points, mutant, target) + + def select(self, target, trial, f_target, f_trial): + return trial if f_trial < f_target else target + + def local_search(self, best_individual, func): + step_size = 0.1 + for _ in range(10): # Perform 10 steps of local search + neighbor = best_individual + np.random.uniform(-step_size, step_size, self.dimension) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + if func(neighbor) < func(best_individual): + best_individual = neighbor + return best_individual + + def adapt_and_refine(self, successes, trials, best_idx, population, fitness, func): + if successes / trials < 0.1: # High stagnation detected + self.no_improvement_intervals += 1 + else: + self.no_improvement_intervals = 0 + + self.F_base = max(0.1, self.F_base * (0.95 if successes / trials < 0.2 else 1.05)) + self.CR_base = 0.1 + 0.8 * successes / trials # Adaptive CR within [0.1, 0.9] + + # Trigger local search on stagnation + if self.no_improvement_intervals >= self.stagnation_threshold: + population[best_idx] = self.local_search(population[best_idx], func) + fitness[best_idx] = func(population[best_idx]) + self.no_improvement_intervals = 0 # reset after local search + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + + while evaluations < self.budget: + new_population = np.copy(population) + successful_trials = 0 + + for i in range(self.population_size): + mutant = self.mutation(i, population) + trial = self.crossover(population[i], mutant) + f_trial = func(trial) + f_target = fitness[i] + + if f_trial < f_target: + new_population[i] = trial + fitness[i] = f_trial + successful_trials += 1 + + evaluations += 1 + if evaluations >= self.budget: + break + + self.adapt_and_refine( + successful_trials, self.population_size, best_idx, population, fitness, func + ) + population = new_population + best_idx = np.argmin(fitness) + + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/DEAMC_LSI.py b/nevergrad/optimization/lama/DEAMC_LSI.py new file mode 100644 index 000000000..76e05a504 --- /dev/null +++ b/nevergrad/optimization/lama/DEAMC_LSI.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DEAMC_LSI: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.F_base = 0.8 # Base mutation scaling factor + self.CR_base = 0.9 # Base crossover probability + self.local_search_interval = 50 # Perform local search every 50 evaluations + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, idx, population): + indices = np.delete(np.arange(self.population_size), idx) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + self.F_base * (b - c) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.CR_base + return np.where(cross_points, mutant, target) + + def select(self, target, trial, f_target, f_trial): + return trial if f_trial < f_target else target + + def local_search(self, best_individual, func): + step_size = 0.1 + for _ in range(10): # Perform 10 steps of local search + neighbor = best_individual + np.random.uniform(-step_size, step_size, self.dimension) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + if func(neighbor) < func(best_individual): + best_individual = neighbor + return best_individual + + def adapt_parameters(self, successes, trials): + self.F_base *= 0.95 if successes / trials < 0.2 else 1.05 + self.F_base = min(max(self.F_base, 0.1), 1.0) # Keep F within [0.1, 1.0] + self.CR_base = 0.1 + 0.8 * successes / trials # Keep CR adaptive within [0.1, 0.9] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + + while evaluations < self.budget: + new_population = np.copy(population) + successful_trials = 0 + + for i in range(self.population_size): + mutant = self.mutation(i, population) + trial = self.crossover(population[i], mutant) + f_trial = func(trial) + f_target = fitness[i] + + if f_trial < f_target: + new_population[i] = trial + fitness[i] = f_trial + successful_trials += 1 + + evaluations += 1 + if evaluations >= self.budget: + break + + # Adapt parameters and possibly perform local search + self.adapt_parameters(successful_trials, self.population_size) + if evaluations % self.local_search_interval == 0: + best_idx = np.argmin(fitness) + population[best_idx] = self.local_search(population[best_idx], func) + fitness[best_idx] = func(population[best_idx]) + + population = new_population + + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/DEWithNelderMead.py b/nevergrad/optimization/lama/DEWithNelderMead.py new file mode 100644 index 000000000..39320d6f5 --- /dev/null +++ b/nevergrad/optimization/lama/DEWithNelderMead.py @@ -0,0 +1,65 @@ +import numpy as np +from scipy.optimize import minimize + + +class DEWithNelderMead: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.pop_size = 20 + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_prob = 0.1 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def nelder_mead(self, x, func): + result = minimize(func, x, method="Nelder-Mead", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + for i in range(self.pop_size): + # Select three distinct individuals (but different from i) + indices = np.arange(self.pop_size) + indices = indices[indices != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Differential Evolution mutation and crossover + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Local Search with a small probability + if np.random.rand() < self.local_search_prob and evaluations + 1 <= self.budget: + trial, f_trial = self.nelder_mead(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Check if we've exhausted our budget + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DHDGE.py b/nevergrad/optimization/lama/DHDGE.py new file mode 100644 index 000000000..7cdcc4904 --- /dev/null +++ b/nevergrad/optimization/lama/DHDGE.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DHDGE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Adaptive parameters + mutation_factor = 0.8 + crossover_rate = 0.7 + gradient_learning_rate = 0.1 + + while num_evals < self.budget: + # Gradient estimation for top performers + elite_idxs = np.argsort(fitness)[: population_size // 5] + gradients = np.zeros_like(population) + + for idx in elite_idxs: + perturbation = np.random.normal( + 0, 0.1 * (self.upper_bound - self.lower_bound), self.dimension + ) + perturbed_individual = np.clip( + population[idx] + perturbation, self.lower_bound, self.upper_bound + ) + perturbed_fitness = func(perturbed_individual) + num_evals += 1 + + if num_evals >= self.budget: + break + + gradient = (perturbed_fitness - fitness[idx]) / (perturbation + 1e-8) + gradients[idx] = -gradient + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Mutation with gradient influence + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = ( + population[a] + + mutation_factor * (population[b] - population[c]) + + gradient_learning_rate * gradients[a] + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DLASS.py b/nevergrad/optimization/lama/DLASS.py new file mode 100644 index 000000000..a47ac33fb --- /dev/null +++ b/nevergrad/optimization/lama/DLASS.py @@ -0,0 +1,66 @@ +import numpy as np + + +class DLASS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.global_mutation_factor = 0.8 + self.local_mutation_factor = 0.1 + self.crossover_prob = 0.7 + self.layer_switch_interval = 100 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, scaling_factor): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + scaling_factor * (b - c), self.bounds[0], self.bounds[1]) + new_population[i] = mutant + return new_population + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dimension) < self.crossover_prob + return np.where(cross_points, mutant, target) + + def select(self, population, fitness, new_population, new_fitness): + for i in range(self.population_size): + if new_fitness[i] < fitness[i]: + population[i], fitness[i] = new_population[i], new_fitness[i] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + current_layer = "global" + + while evaluations < self.budget: + if evaluations % self.layer_switch_interval == 0: + current_layer = "local" if current_layer == "global" else "global" + scaling_factor = ( + self.global_mutation_factor if current_layer == "global" else self.local_mutation_factor + ) + mutated_population = self.mutate(population, best_idx, scaling_factor) + trial_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + trial_fitness = self.evaluate(trial_population, func) + evaluations += self.population_size + + self.select(population, fitness, trial_population, trial_fitness) + best_idx = np.argmin(fitness) + if fitness[best_idx] < best_fitness: + best_fitness, best_solution = fitness[best_idx], population[best_idx] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DMDE.py b/nevergrad/optimization/lama/DMDE.py new file mode 100644 index 000000000..aa35475ce --- /dev/null +++ b/nevergrad/optimization/lama/DMDE.py @@ -0,0 +1,90 @@ +import numpy as np + + +class DMDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.8, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Track elite solutions + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Track the best solution found + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % (self.budget // 10) == 0: # Update elite more frequently + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices] + elite_fitness = fitness[elite_indices] + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic oscillation + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + best = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best - population[i] + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memorize replaced solutions + memory_idx = np.argmax(memory_fitness) + if trial_fitness < memory_fitness[memory_idx]: + memory[memory_idx] = population[i] + memory_fitness[memory_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DMDESM.py b/nevergrad/optimization/lama/DMDESM.py new file mode 100644 index 000000000..dccd88060 --- /dev/null +++ b/nevergrad/optimization/lama/DMDESM.py @@ -0,0 +1,89 @@ +import numpy as np + + +class DMDESM: + def __init__( + self, + budget, + population_size=60, + crossover_rate=0.85, + F_base=0.6, + F_amp=0.4, + memory_size=110, + elite_size=15, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx].copy() + elite_fitness = fitness[elite_idx].copy() + + for i in range(self.population_size): + # Adaptive mutation factor + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: Hybrid strategy using elite and memory dynamically + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mem_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (mem_or_elite - a + b - c), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory by replacing the worst entry + worst_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_idx]: + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DMES.py b/nevergrad/optimization/lama/DMES.py new file mode 100644 index 000000000..0d15482ce --- /dev/null +++ b/nevergrad/optimization/lama/DMES.py @@ -0,0 +1,66 @@ +import numpy as np + + +class DMES: + def __init__(self, budget, population_size=50, f_initial=0.5, cr_initial=0.5): + self.budget = budget + self.population_size = population_size + self.f_initial = f_initial # Initial scaling factor + self.cr_initial = cr_initial # Initial crossover rate + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Initialize mutation and crossover factors dynamically + f = np.full(self.population_size, self.f_initial) + cr = np.full(self.population_size, self.cr_initial) + + while num_evals < self.budget: + for i in range(self.population_size): + # Dynamic adjustment of f and cr based on individual performance + f[i] = np.clip(self.f_initial * (1 - (fitness[i] - best_fitness)), 0.1, 0.9) + cr[i] = np.clip(self.cr_initial * (1 - (fitness[i] - best_fitness)), 0.1, 0.9) + + indices = np.random.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + + # Mutation + mutant = x0 + f[i] * (x1 - x2) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + crossover_mask = np.random.rand(self.dimension) < cr[i] + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(crossover_mask, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of DMES: +# optimizer = DMES(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/DNAS.py b/nevergrad/optimization/lama/DNAS.py new file mode 100644 index 000000000..7282d61db --- /dev/null +++ b/nevergrad/optimization/lama/DNAS.py @@ -0,0 +1,75 @@ +import numpy as np + + +class DNAS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.learning_rate = 0.1 + self.mutation_scale = 0.8 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best, worst, F): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.arange(len(population)), 2, replace=False) + x1, x2 = population[idxs] + mutant_vector = np.clip( + x1 + F * (x2 - population[i]) + self.learning_rate * (best - worst), + self.bounds[0], + self.bounds[1], + ) + new_population[i] = mutant_vector + return new_population + + def crossover(self, target, mutant, CR): + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + previous_best = best_fitness + + while evaluations < self.budget: + F = np.random.normal(self.mutation_scale, 0.1) * (1 + 0.1 * np.random.rand()) + CR = 0.1 + 0.5 * np.random.rand() + worst_index = np.argmax(fitness) + mutants = self.mutate(population, population[best_index], population[worst_index], F) + trials = np.array( + [self.crossover(population[i], mutants[i], CR) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + if fitness[i] < best_fitness: + best_fitness = fitness[i] + best_index = i + + if best_fitness < previous_best: + self.learning_rate *= 1.1 + previous_best = best_fitness + else: + self.learning_rate *= 0.9 + if np.random.rand() < 0.1: + population[ + np.random.choice(len(population), size=int(self.population_size * 0.1), replace=False) + ] = np.random.uniform( + self.bounds[0], self.bounds[1], (int(self.population_size * 0.1), self.dimension) + ) + + return best_fitness, population[best_index] diff --git a/nevergrad/optimization/lama/DPADE.py b/nevergrad/optimization/lama/DPADE.py new file mode 100644 index 000000000..4ed0af32f --- /dev/null +++ b/nevergrad/optimization/lama/DPADE.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DPADE: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_population(self, population, func): + return np.array([func(ind) for ind in population]) + + def adaptive_parameters(self, iteration, max_iter): + # Adjust F and CR over time + F = 0.5 + (0.8 - 0.5) * (1 - iteration / max_iter) + CR = 0.5 + (0.9 - 0.5) * (iteration / max_iter) + return F, CR + + def mutation(self, population, best_idx, F): + new_population = np.zeros_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + new_population[i] = np.clip(mutant, self.lower_bound, self.upper_bound) + return new_population + + def hybrid_mutation(self, population, best_individual, F): + # Second strategy using best individual + new_population = np.zeros_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + b, c = np.random.choice(idxs, 2, replace=False) + mutant = best_individual + F * (population[b] - population[c]) + new_population[i] = np.clip(mutant, self.lower_bound, self.upper_bound) + return new_population + + def crossover(self, population, mutant_population, CR): + crossover_population = np.array( + [ + np.where(np.random.rand(self.dimension) < CR, mutant_population[i], population[i]) + for i in range(self.population_size) + ] + ) + return crossover_population + + def select(self, population, fitness, trial_population, func): + trial_fitness = self.evaluate_population(trial_population, func) + for i in range(self.population_size): + if trial_fitness[i] < fitness[i]: + fitness[i] = trial_fitness[i] + population[i] = trial_population[i] + return population, fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(population, func) + best_idx = np.argmin(fitness) + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + F, CR = self.adaptive_parameters(iteration, max_iterations) + mutant_population = self.mutation(population, best_idx, F) + mutant_population = ( + mutant_population + self.hybrid_mutation(population, population[best_idx], F) + ) / 2 + crossover_population = self.crossover(population, mutant_population, CR) + population, fitness = self.select(population, fitness, crossover_population, func) + evaluations += self.population_size + best_idx = np.argmin(fitness) + + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DPES.py b/nevergrad/optimization/lama/DPES.py new file mode 100644 index 000000000..ad167ee05 --- /dev/null +++ b/nevergrad/optimization/lama/DPES.py @@ -0,0 +1,69 @@ +import numpy as np + + +class DPES: + def __init__(self, budget, population_size=50, initial_step=0.5, step_reduction=0.98, learning_rate=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_step = initial_step + self.step_reduction = step_reduction + self.learning_rate = learning_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + step_size = self.initial_step + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.zeros_like(population) + + # Evolve each individual + for i in range(self.population_size): + perturbation = np.random.randn(self.dimension) * step_size + candidate = population[i] + perturbation + candidate = np.clip(candidate, self.lb, self.ub) + candidate_fitness = func(candidate) + num_evals += 1 + + # Selection process + if candidate_fitness < fitness[i]: + new_population[i] = candidate + fitness[i] = candidate_fitness + # Update best found solution + if candidate_fitness < best_fitness: + best_fitness = candidate_fitness + best_individual = candidate.copy() + else: + new_population[i] = population[i] + + # Adaptive step-size control + step_size *= self.step_reduction + + # Learning phase - update based on the best individual + for j in range(self.population_size): + if np.random.rand() < 0.5: # Learning probability + direction = best_individual - population[j] + new_population[j] += self.learning_rate * direction + new_population[j] = np.clip(new_population[j], self.lb, self.ub) + # Evaluate the new individual + new_fitness = func(new_population[j]) + num_evals += 1 + if new_fitness < fitness[j]: + population[j] = new_population[j] + fitness[j] = new_fitness + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[j].copy() + else: + population[j] = new_population[j] + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DSDE.py b/nevergrad/optimization/lama/DSDE.py new file mode 100644 index 000000000..a13f3bb6b --- /dev/null +++ b/nevergrad/optimization/lama/DSDE.py @@ -0,0 +1,66 @@ +import numpy as np + + +class DSDE: + def __init__(self, budget, population_size=50, crossover_rate=0.8, base_scaling_factor=0.5): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.base_scaling_factor = base_scaling_factor + self.dimension = 5 # Given dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + # Find the best solution in the initial population + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size # Initial population evaluation + + while num_evals < self.budget: + for i in range(self.population_size): + # Mutate using a dynamic scaling factor that adapts with progress + progress = num_evals / self.budget + scaling_factor = self.base_scaling_factor + (1 - progress) * (0.9 - self.base_scaling_factor) + + # Mutation using the "DE/rand/1" strategy + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + scaling_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) # Ensure mutant is within bounds + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + # Stop if budget is exhausted + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of DSDE: +# optimizer = DSDE(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/DSEDES.py b/nevergrad/optimization/lama/DSEDES.py new file mode 100644 index 000000000..cf4afdabe --- /dev/null +++ b/nevergrad/optimization/lama/DSEDES.py @@ -0,0 +1,66 @@ +import numpy as np + + +class DSEDES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.elite_size = 10 + self.mutation_factor = 0.15 + self.crossover_prob = 0.7 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, current_index): + indices = [i for i in range(self.population_size) if i != current_index] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + self.mutation_factor * (population[b] - population[c]) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def crossover(self, target, mutant): + mask = np.random.rand(self.dimension) < self.crossover_prob + trial = np.where(mask, mutant, target) + return trial + + def select(self, target, trial, target_fitness, trial_fitness): + if trial_fitness < target_fitness: + return trial, trial_fitness + else: + return target, target_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + best_index = np.argmin(fitness) + best_individual = population[best_index].copy() + best_fitness = fitness[best_index] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + mutant = self.mutate(population, best_index, i) + trial = self.crossover(population[i], mutant) + trial_fitness = func(trial) + evaluations += 1 + + population[i], fitness[i] = self.select(population[i], trial, fitness[i], trial_fitness) + + if fitness[i] < best_fitness: + best_fitness = fitness[i] + best_individual = population[i].copy() + + if evaluations % 100 == 0: + self.mutation_factor = np.clip( + self.mutation_factor * (0.95 if np.random.rand() < 0.5 else 1.05), 0.05, 1 + ) + self.crossover_prob = np.clip( + self.crossover_prob + (0.05 if np.random.rand() < 0.5 else -0.05), 0.1, 0.9 + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DifferentialEvolutionAdaptiveCrossover.py b/nevergrad/optimization/lama/DifferentialEvolutionAdaptiveCrossover.py new file mode 100644 index 000000000..829cd3698 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionAdaptiveCrossover.py @@ -0,0 +1,55 @@ +import numpy as np + + +class DifferentialEvolutionAdaptiveCrossover: + def __init__(self, budget=10000, population_size=50, F=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight + self.CR = CR # Crossover probability + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize population + pop = np.random.uniform(lb, ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = pop[best_idx] + + # Evolutionary loop + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation and Crossover + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), lb, ub) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + pop[i] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive Crossover Rate Adjustment + if evaluations % 100 == 0: # Adjust every 100 evaluations + mean_fitness = np.mean(fitness) + # Increase CR if progress is being made + if mean_fitness < self.f_opt: + self.CR = min(self.CR + 0.1, 1.0) + else: + self.CR = max(self.CR - 0.1, 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialEvolutionAdaptivePSO.py b/nevergrad/optimization/lama/DifferentialEvolutionAdaptivePSO.py new file mode 100644 index 000000000..8a97e7034 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionAdaptivePSO.py @@ -0,0 +1,87 @@ +import numpy as np + + +class DifferentialEvolutionAdaptivePSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for PSO + self.population_size = 100 + self.w_min = 0.4 + self.w_max = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.velocity_limit = 0.2 + + # Parameters for DE + self.F = 0.8 + self.CR = 0.9 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocity = np.random.uniform( + -self.velocity_limit, self.velocity_limit, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_position = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + + while evaluations < self.budget: + w = self.w_max - ((self.w_max - self.w_min) * (evaluations / self.budget)) + + for i in range(self.population_size): + # PSO Update + r1, r2 = np.random.rand(2) + velocity[i] = ( + w * velocity[i] + + self.c1 * r1 * (personal_best_position[i] - population[i]) + + self.c2 * r2 * (self.x_opt - population[i]) + ) + + # Adaptive velocity clamping + velocity_magnitude = np.linalg.norm(velocity[i]) + if velocity_magnitude > self.velocity_limit: + velocity[i] = (velocity[i] / velocity_magnitude) * self.velocity_limit + + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + + # DE Update + if np.random.rand() < 0.5: + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + self.F * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < self.CR: + trial_vector[j] = mutant_vector[j] + + new_position = trial_vector + + f_candidate = func(new_position) + evaluations += 1 + + if f_candidate < personal_best_fitness[i]: + personal_best_position[i] = new_position.copy() + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = new_position.copy() + + population[i] = new_position + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialEvolutionHybrid.py b/nevergrad/optimization/lama/DifferentialEvolutionHybrid.py new file mode 100644 index 000000000..88cc9a166 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionHybrid.py @@ -0,0 +1,49 @@ +import numpy as np + + +class DifferentialEvolutionHybrid: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.mutation_factor = 0.8 + self.crossover_prob = 0.7 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 20 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation + indices = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + self.mutation_factor * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Crossover + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < self.crossover_prob + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/DifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..45d69c38e --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionOptimizer.py @@ -0,0 +1,47 @@ +import numpy as np + + +class DifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, F=0.8, CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.F = F # Differential weight + self.CR = CR # Crossover probability + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Evolutionary loop + while self.eval_count < self.budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + + if self.eval_count >= self.budget: + break + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialEvolutionPSOHybrid.py b/nevergrad/optimization/lama/DifferentialEvolutionPSOHybrid.py new file mode 100644 index 000000000..8253fd42c --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionPSOHybrid.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DifferentialEvolutionPSOHybrid: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.F = 0.5 # Mutation factor + self.CR = 0.9 # Crossover rate + self.c1 = 1.5 # Cognitive parameter + self.c2 = 1.5 # Social parameter + self.w = 0.5 # Inertia weight + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3): + return parent1 + self.F * (parent2 - parent3) + + def crossover(self, target, mutant): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < self.CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parent1, parent2, parent3 = self.select_parents(population) + mutant = self.mutate(parent1, parent2, parent3) + trial = self.crossover(population[i], mutant) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialEvolutionSearch.py b/nevergrad/optimization/lama/DifferentialEvolutionSearch.py new file mode 100644 index 000000000..802f8ae1b --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialEvolutionSearch.py @@ -0,0 +1,68 @@ +import numpy as np + + +class DifferentialEvolutionSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize parameters + population_size = 20 + crossover_rate = 0.9 + differential_weight = 0.8 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = population_size + + while evaluations < self.budget: + new_population = np.copy(population) + + for i in range(population_size): + # Randomly select three indices that are not the current index + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Generate trial vector + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + # Selection + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + population = new_population + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = DifferentialEvolutionSearch(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/DifferentialFireworkAlgorithm.py b/nevergrad/optimization/lama/DifferentialFireworkAlgorithm.py new file mode 100644 index 000000000..8bc80a6f7 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialFireworkAlgorithm.py @@ -0,0 +1,52 @@ +import numpy as np + + +class DifferentialFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, scaling_factor=0.5, crossover_rate=0.9): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.scaling_factor = scaling_factor + self.crossover_rate = crossover_rate + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scaling_factor, firework + self.scaling_factor, (self.n_sparks, self.dim) + ) + return sparks + + def differential_evolution(self, current, target1, target2): + mutant = current + self.scaling_factor * (target1 - target2) + crossover_points = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover_points, mutant, current) + return trial + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = self.differential_evolution(fireworks[i], fireworks[idx1], fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialGradientEvolutionStrategy.py b/nevergrad/optimization/lama/DifferentialGradientEvolutionStrategy.py new file mode 100644 index 000000000..034c6cda5 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialGradientEvolutionStrategy.py @@ -0,0 +1,58 @@ +import numpy as np + + +class DifferentialGradientEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialization parameters + population_size = 50 + mutation_factor = 0.8 + crossover_prob = 0.7 + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Track the best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + evaluations = population_size + + while evaluations < self.budget: + for i in range(population_size): + # Differential evolution mutation and crossover + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial.copy() + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DifferentialGradientEvolutionStrategy(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/DifferentialHarmonySearch.py b/nevergrad/optimization/lama/DifferentialHarmonySearch.py new file mode 100644 index 000000000..1cfa291b6 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialHarmonySearch.py @@ -0,0 +1,49 @@ +import numpy as np + + +class DifferentialHarmonySearch: + def __init__(self, budget=10000, harmony_memory_size=20, hmcr=0.9, par=0.4, bw=0.5): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + new_fitness = func(new_harmony) + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DifferentialMemeticAlgorithm.py b/nevergrad/optimization/lama/DifferentialMemeticAlgorithm.py new file mode 100644 index 000000000..56362ccc3 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialMemeticAlgorithm.py @@ -0,0 +1,71 @@ +import numpy as np + + +class DifferentialMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + self.population_size = 20 + self.mutation_factor = 0.8 + self.crossover_rate = 0.7 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + # Crossover + crossover = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover, mutant, population[i]) + + # Local Search (Simple Hill Climbing) + if np.random.rand() < 0.1: # Small probability to invoke local search + trial = self.local_search(trial, func) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + step_size = 0.1 + best_x = x.copy() + best_f = func(x) + + for i in range(self.dim): + x_new = x.copy() + x_new[i] += step_size * (np.random.rand() * 2 - 1) # Small random perturbation + x_new = np.clip(x_new, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x diff --git a/nevergrad/optimization/lama/DifferentialQuantumMetaheuristic.py b/nevergrad/optimization/lama/DifferentialQuantumMetaheuristic.py new file mode 100644 index 000000000..62b480c74 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialQuantumMetaheuristic.py @@ -0,0 +1,65 @@ +import numpy as np + + +class DifferentialQuantumMetaheuristic: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + quantum_size = 10 + F = 0.5 # Differential weight + CR = 0.9 # Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + eval_count = 0 + convergence_threshold = 1e-6 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + if abs(best_value - candidate_value) < convergence_threshold: + break + + population = new_population + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = DifferentialQuantumMetaheuristic(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/DifferentialSimulatedAnnealingOptimizer.py b/nevergrad/optimization/lama/DifferentialSimulatedAnnealingOptimizer.py new file mode 100644 index 000000000..8ea6ab926 --- /dev/null +++ b/nevergrad/optimization/lama/DifferentialSimulatedAnnealingOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class DifferentialSimulatedAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize parameters + T = 1.0 # Initial temperature for simulated annealing + T_min = 0.001 # Minimum temperature to stop annealing + alpha = 0.9 # Cooling rate for annealing + mutation_factor = 0.8 # Factor for differential mutation + + # Initialize the population + population_size = 10 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + + # Evaluate the initial population + fitness = np.array([func(ind) for ind in population]) + f_opt = np.min(fitness) + x_opt = population[np.argmin(fitness)] + + # Main optimization loop + evaluation_count = population_size + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + # Differential mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < 0.5 + trial = np.where(cross_points, mutant, population[i]) + + # Simulated annealing acceptance criterion + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cool down the temperature + T *= alpha + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolution.py b/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolution.py new file mode 100644 index 000000000..cb40f80b3 --- /dev/null +++ b/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolution.py @@ -0,0 +1,102 @@ +import numpy as np + + +class DiversityEnhancedAdaptiveGradientEvolution: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = 10 + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + diversity_threshold = 1e-3 + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(self.population_size), size=2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, i) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, i, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worse parent with the new child + worse_parent_idx = ( + parents_idx[0] if fitness[parents_idx[0]] > fitness[parents_idx[1]] else parents_idx[1] + ) + population[worse_parent_idx] = new_x + fitness[worse_parent_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DiversityEnhancedAdaptiveGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolutionV2.py b/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolutionV2.py new file mode 100644 index 000000000..23f357541 --- /dev/null +++ b/nevergrad/optimization/lama/DiversityEnhancedAdaptiveGradientEvolutionV2.py @@ -0,0 +1,104 @@ +import numpy as np + + +class DiversityEnhancedAdaptiveGradientEvolutionV2: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = 20 + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.8 + self.mutation_rate = 0.2 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + diversity_threshold = 1e-2 + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + iteration = 0 + success_count = 0 + while iteration < self.budget: + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(self.population_size), size=2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, iteration) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, iteration, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + iteration += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worse parent with the new child + worse_parent_idx = ( + parents_idx[0] if fitness[parents_idx[0]] > fitness[parents_idx[1]] else parents_idx[1] + ) + population[worse_parent_idx] = new_x + fitness[worse_parent_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DiversityEnhancedAdaptiveGradientEvolutionV2(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DolphinPodOptimization.py b/nevergrad/optimization/lama/DolphinPodOptimization.py new file mode 100644 index 000000000..13df10d76 --- /dev/null +++ b/nevergrad/optimization/lama/DolphinPodOptimization.py @@ -0,0 +1,58 @@ +import numpy as np + + +class DolphinPodOptimization: + def __init__(self, budget=1000, num_dolphins=20, num_dimensions=5, alpha=0.1, beta=0.5, gamma=0.1): + self.budget = budget + self.num_dolphins = num_dolphins + self.num_dimensions = num_dimensions + self.alpha = alpha + self.beta = beta + self.gamma = gamma + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_dolphins, self.num_dimensions)) + + def levy_flight(self): + sigma = 1.0 + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1.5) + return step + + def move_dolphin(self, current_position, best_position, previous_best_position, bounds): + step = ( + self.alpha * (best_position - current_position) + + self.beta * (previous_best_position - current_position) + + self.gamma * self.levy_flight() + ) + new_position = current_position + step + new_position = np.clip(new_position, bounds.lb, bounds.ub) + return new_position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + positions = self.initialize_positions(bounds) + best_position = positions[0].copy() + previous_best_position = best_position.copy() + + for _ in range(self.budget): + for i in range(self.num_dolphins): + new_position = self.move_dolphin(positions[i], best_position, previous_best_position, bounds) + f_new = func(new_position) + f_current = func(positions[i]) + + if f_new < f_current: + positions[i] = new_position + if f_new < func(best_position): + best_position = new_position.copy() + + previous_best_position = best_position + + self.f_opt = func(best_position) + self.x_opt = best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualAdaptiveRestartDE.py b/nevergrad/optimization/lama/DualAdaptiveRestartDE.py new file mode 100644 index 000000000..3db40f6a5 --- /dev/null +++ b/nevergrad/optimization/lama/DualAdaptiveRestartDE.py @@ -0,0 +1,123 @@ +import numpy as np + + +class DualAdaptiveRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_initial = 0.8 # Initial differential weight + CR_initial = 0.9 # Initial crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + diversity_threshold = 0.1 # Threshold for population diversity restart + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def calculate_diversity(population): + mean_point = np.mean(population, axis=0) + diversity = np.mean(np.linalg.norm(population - mean_point, axis=1)) + return diversity + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F_initial) + CR_values = np.full(population_size, CR_initial) + + last_improvement = evaluations + + while evaluations < self.budget: + if ( + evaluations - last_improvement > restart_threshold + or calculate_diversity(population) < diversity_threshold + ): + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F_initial) + CR_values = np.full(population_size, CR_initial) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualAdaptiveSearch.py b/nevergrad/optimization/lama/DualAdaptiveSearch.py new file mode 100644 index 000000000..feb01e74a --- /dev/null +++ b/nevergrad/optimization/lama/DualAdaptiveSearch.py @@ -0,0 +1,54 @@ +import numpy as np + + +class DualAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial random point + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Adaptive parameters + max_scale = 2.0 + min_scale = 0.001 + scale = max_scale + scale_decay_rate = 0.98 + long_term_memory = current_point.copy() + long_term_f = current_f + + # Dual strategy: balance exploration and exploitation + for i in range(1, self.budget): + # Decay the scale + scale *= scale_decay_rate + scale = max(min_scale, scale) + + # Exploration with a probability that decreases over time + if np.random.rand() < 0.5 * (1 - i / self.budget): + # Random exploration within the whole range + candidate = np.random.uniform(-5.0, 5.0, self.dim) + else: + # Exploitation around best known position with decreasing perturbation + perturbation = np.random.normal(0, scale, self.dim) + candidate = long_term_memory + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + + candidate_f = func(candidate) + + # Update if the candidate is better than the current best + if candidate_f < long_term_f: + long_term_memory = candidate + long_term_f = candidate_f + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualConvergenceEvolutiveStrategy.py b/nevergrad/optimization/lama/DualConvergenceEvolutiveStrategy.py new file mode 100644 index 000000000..3096edb1b --- /dev/null +++ b/nevergrad/optimization/lama/DualConvergenceEvolutiveStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class DualConvergenceEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate, mutation_strength): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + for _ in range(num_children): + if np.random.rand() < 0.9: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + population_size = 200 + num_generations = self.budget // population_size + elitism_size = population_size // 10 # 10% elitism + mutation_rate = 0.1 + mutation_strength = 0.75 + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, elitism_size) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + non_elite_size = population_size - elitism_size + offspring = self.crossover(best_population, non_elite_size) + offspring = self.mutate(offspring, mutation_rate, mutation_strength) + population = np.vstack((best_population, offspring)) + + # Adaptive mutation parameters based on generation progress + mutation_rate *= 0.98 + mutation_strength *= 0.98 + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/DualModeOptimization.py b/nevergrad/optimization/lama/DualModeOptimization.py new file mode 100644 index 000000000..d7fdb89cb --- /dev/null +++ b/nevergrad/optimization/lama/DualModeOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DualModeOptimization: + def __init__(self, budget, dimension=5, population_size=20, mutation_scale=0.1, gradient_intensity=5): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.mutation_scale = mutation_scale + self.gradient_intensity = gradient_intensity # Intensity of gradient-based local search + + def __call__(self, func): + # Initialize population within bounds [-5.0, 5.0] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + best_idx = np.argmin(fitness) + f_opt = fitness[best_idx] + x_opt = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + # Tournament selection for mutation + for i in range(self.population_size): + candidates_idx = np.random.choice(self.population_size, 3, replace=False) + candidates_fitness = fitness[candidates_idx] + best_local_idx = np.argmin(candidates_fitness) + target_idx = candidates_idx[best_local_idx] + + # Mutation using differential evolution strategy + r1, r2, r3 = np.random.choice(self.population_size, 3, replace=False) + mutant = population[r1] + self.mutation_scale * (population[r2] - population[r3]) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure mutant is within bounds + + # Evaluate mutant + mutant_fitness = func(mutant) + evaluations += 1 + + # Replace if mutant is better + if mutant_fitness < fitness[target_idx]: + population[target_idx] = mutant + fitness[target_idx] = mutant_fitness + + # Update global optimum + if mutant_fitness < f_opt: + f_opt = mutant_fitness + x_opt = mutant + + if evaluations >= self.budget: + break + + # Perform gradient-based local search on the best solution + if evaluations + self.gradient_intensity <= self.budget: + local_opt = x_opt.copy() + for _ in range(self.gradient_intensity): + gradient = np.array( + [ + (func(local_opt + eps * np.eye(1, self.dimension, k)[0]) - func(local_opt)) / eps + for k, eps in enumerate([1e-5] * self.dimension) + ] + ) + local_opt -= 0.01 * gradient # Small gradient step + local_opt = np.clip(local_opt, -5.0, 5.0) + local_fitness = func(local_opt) + evaluations += 1 + + if local_fitness < f_opt: + f_opt = local_fitness + x_opt = local_opt + else: + break + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/DualPhaseAdaptiveGradientEvolution.py b/nevergrad/optimization/lama/DualPhaseAdaptiveGradientEvolution.py new file mode 100644 index 000000000..9cc796691 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseAdaptiveGradientEvolution.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DualPhaseAdaptiveGradientEvolution: + def __init__(self, budget, dim=5, threshold=0.5, initial_phase_ratio=0.7): + self.budget = budget + self.dim = dim + self.threshold = threshold # Threshold to switch from exploration to exploitation + self.initial_phase_budget = int(budget * initial_phase_ratio) # Budget for the exploration phase + self.exploitation_phase_budget = budget - self.initial_phase_budget + self.bounds = np.array([-5.0, 5.0]) + + def initialize_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def mutate(self, individual, mutation_strength): + mutant = individual + mutation_strength * np.random.randn(self.dim) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + # Phase 1: Exploration with randomly mutating individuals + population_size = 20 + mutation_strength = 0.5 + population = self.initialize_population(population_size) + f_values = self.evaluate_population(func, population) + evaluations = population_size + + while evaluations < self.initial_phase_budget: + new_population = [] + for individual in population: + mutated = self.mutate(individual, mutation_strength) + new_population.append(mutated) + new_f_values = self.evaluate_population(func, new_population) + evaluations += population_size + + # Select the best individuals + combined_f_values = np.concatenate((f_values, new_f_values)) + combined_population = np.vstack((population, new_population)) + best_indices = np.argsort(combined_f_values)[:population_size] + population = combined_population[best_indices] + f_values = combined_f_values[best_indices] + + # Phase 2: Exploitation using gradient descent + best_individual = population[np.argmin(f_values)] + best_f_value = np.min(f_values) + learning_rate = 0.1 + + while evaluations < self.budget: + grad = self.estimate_gradient(func, best_individual) + best_individual = np.clip(best_individual - learning_rate * grad, self.bounds[0], self.bounds[1]) + best_f_value_new = func(best_individual) + evaluations += 1 + + if best_f_value_new < best_f_value: + best_f_value = best_f_value_new + else: + # Reduce learning rate if no improvement + learning_rate *= 0.9 + + return best_f_value, best_individual + + def estimate_gradient(self, func, individual): + grad = np.zeros(self.dim) + base_value = func(individual) + eps = 1e-5 # Small perturbation for numerical gradient + + for i in range(self.dim): + perturbed_individual = np.copy(individual) + perturbed_individual[i] += eps + perturbed_value = func(perturbed_individual) + grad[i] = (perturbed_value - base_value) / eps + + return grad diff --git a/nevergrad/optimization/lama/DualPhaseAdaptiveHybridOptimizerV3.py b/nevergrad/optimization/lama/DualPhaseAdaptiveHybridOptimizerV3.py new file mode 100644 index 000000000..1daa4bd76 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseAdaptiveHybridOptimizerV3.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize + + +class DualPhaseAdaptiveHybridOptimizerV3: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.5 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 10 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + phase_one_budget = int(self.budget * 0.4) + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(-1, 1, self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (phase_one_budget - eval_count) / phase_one_budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + if neighbor_fitness < fitness[i]: + new_population[i] = neighbor + fitness[i] = neighbor_fitness + if neighbor_fitness < best_fitness: + best_individual = neighbor + best_fitness = neighbor_fitness + + if eval_count >= phase_one_budget: + break + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..d58de9533 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,167 @@ +import numpy as np + + +class DualPhaseAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.5 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(10): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolutionV2.py b/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolutionV2.py new file mode 100644 index 000000000..cb80d060a --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseAdaptiveMemeticDifferentialEvolutionV2.py @@ -0,0 +1,168 @@ +import numpy as np + + +class DualPhaseAdaptiveMemeticDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.5 + self.local_search_iters = 5 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced.py b/nevergrad/optimization/lama/DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced.py new file mode 100644 index 000000000..be95033d7 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced.py @@ -0,0 +1,139 @@ +import numpy as np + + +class DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.4 # Increased local search rate + self.memory_size = 5 + self.w = 0.5 # Reduced inertia weight for better exploration + self.c1 = 1.5 # Reduced cognitive component + self.c2 = 2.0 # Increased social component + self.phase_switch_ratio = 0.3 # Earlier phase switch + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/DualPhaseDifferentialEvolution.py b/nevergrad/optimization/lama/DualPhaseDifferentialEvolution.py new file mode 100644 index 000000000..4f87f503f --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseDifferentialEvolution.py @@ -0,0 +1,73 @@ +import numpy as np + + +class DualPhaseDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + eval_count = 0 + phase_switch_threshold = self.budget // 2 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = initial_F * (1 - eval_count / budget) + adaptive_CR = initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if eval_count < phase_switch_threshold: + candidate = trial # Exploration phase, use the trial vector + else: + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + population = new_population + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = DualPhaseDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/DualPhaseOptimizationStrategy.py b/nevergrad/optimization/lama/DualPhaseOptimizationStrategy.py new file mode 100644 index 000000000..912acd40d --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseOptimizationStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class DualPhaseOptimizationStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: # Phase 2 uses a different strategy for mutation + mutant = population[a] + self.F * ( + population[b] - population[c] + population[best_idx] - population[index] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamically adjusting F and CR based on iteration progression + scale = iteration / total_iterations + self.F = np.clip(0.5 * np.sin(np.pi * scale) + 0.5, 0.1, 1) # Oscillating F + self.CR = np.clip(0.5 * np.cos(np.pi * scale) + 0.5, 0.1, 1) # Oscillating CR + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DualPhaseQuantumMemeticSearch.py b/nevergrad/optimization/lama/DualPhaseQuantumMemeticSearch.py new file mode 100644 index 000000000..d1f20d1f7 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseQuantumMemeticSearch.py @@ -0,0 +1,133 @@ +import numpy as np + + +class DualPhaseQuantumMemeticSearch: + def __init__( + self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.6, alpha=0.2, learning_rate=0.01 + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def dual_phase_search(self, func, pop, scores, global_best): + phase1_pop = np.copy(pop) + phase1_scores = np.copy(scores) + + for i in range(self.population_size): + phase1_pop[i] = self.quantum_walk(phase1_pop[i], global_best) + phase1_scores[i] = func(phase1_pop[i]) + + best_idx = np.argmin(phase1_scores) + if phase1_scores[best_idx] < scores[best_idx]: + pop[best_idx] = phase1_pop[best_idx] + scores[best_idx] = phase1_scores[best_idx] + + return pop, scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform dual phase search + pop, scores = self.dual_phase_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPhaseRefinedQuantumLocalSearchOptimizer.py b/nevergrad/optimization/lama/DualPhaseRefinedQuantumLocalSearchOptimizer.py new file mode 100644 index 000000000..fc4040644 --- /dev/null +++ b/nevergrad/optimization/lama/DualPhaseRefinedQuantumLocalSearchOptimizer.py @@ -0,0 +1,92 @@ +import numpy as np + + +class DualPhaseRefinedQuantumLocalSearchOptimizer: + def __init__( + self, + budget, + dim=5, + population_size=60, + elite_size=10, + mutation_intensity=0.05, + local_search_phase1=0.05, + local_search_phase2=0.01, + ): + self.budget = budget + self.dim = dim + self.population_size = population_size + self.elite_size = elite_size + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.mutation_intensity = mutation_intensity + self.local_search_phase1 = local_search_phase1 + self.local_search_phase2 = local_search_phase2 + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(x) for x in population]) + + def select_elites(self, population, fitnesses): + elite_indices = np.argsort(fitnesses)[: self.elite_size] + return population[elite_indices], fitnesses[elite_indices] + + def crossover(self, parent1, parent2): + mask = np.random.rand(self.dim) < 0.5 + offspring = np.where(mask, parent1, parent2) + return offspring + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dim) + mutated = individual + mutation + return np.clip(mutated, self.lower_bound, self.upper_bound) + + def local_search(self, func, candidate, intensity): + for _ in range(10): # perform 10 local search steps + perturbation = np.random.uniform(-intensity, intensity, self.dim) + new_candidate = candidate + perturbation + new_candidate = np.clip(new_candidate, self.lower_bound, self.upper_bound) + if func(new_candidate) < func(candidate): + candidate = new_candidate + return candidate + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations < self.budget / 2: + local_search_intensity = self.local_search_phase1 + else: + local_search_intensity = self.local_search_phase2 + + elites, elite_fitness = self.select_elites(population, fitness) + new_population = elites.copy() # start new population with elites + for _ in range(self.population_size - self.elite_size): + parents = np.random.choice(elites.shape[0], 2, replace=False) + parent1, parent2 = elites[parents[0]], elites[parents[1]] + offspring = self.crossover(parent1, parent2) + offspring = self.mutate(offspring) + offspring = self.local_search( + func, offspring, local_search_intensity + ) # Perform local search on offspring + new_population = np.vstack((new_population, offspring)) + + new_fitness = self.evaluate_population(func, new_population) + + if np.min(new_fitness) < best_fitness: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + + population = new_population + fitness = new_fitness + + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DualPopulationADE.py b/nevergrad/optimization/lama/DualPopulationADE.py new file mode 100644 index 000000000..3df52946e --- /dev/null +++ b/nevergrad/optimization/lama/DualPopulationADE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class DualPopulationADE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.mutation_factor = 0.5 + self.crossover_prob = 0.7 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + while self.budget > 0: + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + self.mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + if self.budget % 100 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Update archive + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + pop = np.array(new_pop) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPopulationAdaptiveSearch.py b/nevergrad/optimization/lama/DualPopulationAdaptiveSearch.py new file mode 100644 index 000000000..3a4e3e752 --- /dev/null +++ b/nevergrad/optimization/lama/DualPopulationAdaptiveSearch.py @@ -0,0 +1,109 @@ +import numpy as np + + +class DualPopulationAdaptiveSearch: + def __init__(self, budget, population_size=30, mutation_rate=0.1, crossover_rate=0.7, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize two populations + pop_exploratory = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + pop_exploitative = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + + scores_exploratory = np.array([func(ind) for ind in pop_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + best_idx_exploratory = np.argmin(scores_exploratory) + best_idx_exploitative = np.argmin(scores_exploitative) + + global_best_position = pop_exploratory[best_idx_exploratory] + global_best_score = scores_exploratory[best_idx_exploratory] + + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + evaluations = 2 * self.population_size + + while evaluations < self.budget: + # Exploratory Population: Tournament selection and blend crossover (BLX-α) + selected_exploratory = [] + for _ in range(self.population_size): + i, j = np.random.randint(0, self.population_size, 2) + if scores_exploratory[i] < scores_exploitative[j]: + selected_exploratory.append(pop_exploratory[i]) + else: + selected_exploratory.append(pop_exploitative[j]) + selected_exploratory = np.array(selected_exploratory) + + offspring_exploratory = [] + for i in range(0, self.population_size, 2): + if i + 1 >= self.population_size: + break + parent1, parent2 = selected_exploratory[i], selected_exploratory[i + 1] + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-0.5, 1.5, dim) + child1 = alpha * parent1 + (1 - alpha) * parent2 + child2 = alpha * parent2 + (1 - alpha) * parent1 + else: + child1, child2 = parent1, parent2 + offspring_exploratory.extend([child1, child2]) + offspring_exploratory = np.array(offspring_exploratory[: self.population_size]) + + # Mutation for Exploratory Population + for i in range(self.population_size): + if np.random.rand() < self.mutation_rate: + offspring_exploratory[i] += np.random.normal(0, 0.1, dim) + offspring_exploratory[i] = np.clip(offspring_exploratory[i], lower_bound, upper_bound) + + # Exploitative Population: Gradient-based local search + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop_exploitative[i]) + pop_exploitative[i] = np.clip( + pop_exploitative[i] - self.learning_rate * grad, lower_bound, upper_bound + ) + + # Evaluate offspring + scores_offspring_exploratory = np.array([func(ind) for ind in offspring_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + evaluations += self.population_size # Exploratory evaluations + evaluations += self.population_size # Exploitative evaluations + + # Update exploratory population and scores + pop_exploratory, scores_exploratory = offspring_exploratory, scores_offspring_exploratory + + # Update global best from both populations + best_idx_exploratory = np.argmin(scores_exploratory) + if scores_exploratory[best_idx_exploratory] < global_best_score: + global_best_score = scores_exploratory[best_idx_exploratory] + global_best_position = pop_exploratory[best_idx_exploratory] + + best_idx_exploitative = np.argmin(scores_exploitative) + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPopulationCovarianceMatrixGradientSearch.py b/nevergrad/optimization/lama/DualPopulationCovarianceMatrixGradientSearch.py new file mode 100644 index 000000000..d003a7e15 --- /dev/null +++ b/nevergrad/optimization/lama/DualPopulationCovarianceMatrixGradientSearch.py @@ -0,0 +1,179 @@ +import numpy as np + + +class DualPopulationCovarianceMatrixGradientSearch: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + gradient_steps=10, + gradient_search_fraction=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + self.gradient_steps = gradient_steps # number of gradient descent steps + self.gradient_search_fraction = ( + gradient_search_fraction # fraction of budget allocated to gradient search + ) + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x, budget): + eps = 1e-8 + for _ in range(self.gradient_steps): + if budget <= 0: + break + + grad = np.zeros_like(x) + fx = func(x) + budget -= 1 + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + budget -= 1 + if budget <= 0: + break + + x -= self.learning_rate * grad + x = np.clip(x, -5.0, 5.0) + + return x, budget + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop_main = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores_main = np.array([func(ind) for ind in pop_main]) + + pop_grad = np.copy(pop_main) + scores_grad = np.copy(scores_main) + + evaluations = self.population_size * 2 + max_iterations = self.budget // (self.population_size * 2) + + # Initialize global best + global_best_score = np.inf + global_best_position = None + + for pop, scores in [(pop_main, scores_main), (pop_grad, scores_grad)]: + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + for iteration in range(max_iterations): + # Main population update + mean = np.mean(pop_main, axis=0) + C = np.cov(pop_main.T) + sigma = self.initial_sigma + + pop_main, scores_main = self.__adaptive_covariance_matrix_adaptation( + func, pop_main, mean, C, sigma + ) + + best_idx = np.argmin(scores_main) + if scores_main[best_idx] < global_best_score: + global_best_score = scores_main[best_idx] + global_best_position = pop_main[best_idx] + + # Gradient-based local search population update + elite_pop, _ = self.__hierarchical_selection(pop_main, scores_main) + budget_remaining = int(self.gradient_search_fraction * (self.budget - evaluations)) + + for i in range(len(elite_pop)): + elite_pop[i], budget_remaining = self.__gradient_local_search( + func, elite_pop[i], budget_remaining + ) + scores_grad[i] = func(elite_pop[i]) + + best_idx = np.argmin(scores_grad) + if scores_grad[best_idx] < global_best_score: + global_best_score = scores_grad[best_idx] + global_best_position = elite_pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop_main, scores_main) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + if iteration == 0: + dim = elite_pop.shape[1] + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1 - 1 / (4.0 * dim) + 1 / (21.0 * dim**2)) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size * 2 + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualPopulationEnhancedSearch.py b/nevergrad/optimization/lama/DualPopulationEnhancedSearch.py new file mode 100644 index 000000000..dfb029665 --- /dev/null +++ b/nevergrad/optimization/lama/DualPopulationEnhancedSearch.py @@ -0,0 +1,113 @@ +import numpy as np + + +class DualPopulationEnhancedSearch: + def __init__( + self, budget, population_size=30, mutation_rate=0.1, crossover_rate=0.7, learning_rate=0.01, alpha=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.learning_rate = learning_rate + self.alpha = alpha # Weight for adaptive learning + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize two populations + pop_exploratory = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + pop_exploitative = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + + scores_exploratory = np.array([func(ind) for ind in pop_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + best_idx_exploratory = np.argmin(scores_exploratory) + best_idx_exploitative = np.argmin(scores_exploitative) + + global_best_position = pop_exploratory[best_idx_exploratory] + global_best_score = scores_exploratory[best_idx_exploratory] + + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + evaluations = 2 * self.population_size + + while evaluations < self.budget: + # Exploratory Population: Tournament selection and blend crossover (BLX-α) + selected_exploratory = [] + for _ in range(self.population_size): + i, j = np.random.randint(0, self.population_size, 2) + if scores_exploratory[i] < scores_exploitative[j]: + selected_exploratory.append(pop_exploratory[i]) + else: + selected_exploratory.append(pop_exploitative[j]) + selected_exploratory = np.array(selected_exploratory) + + offspring_exploratory = [] + for i in range(0, self.population_size, 2): + if i + 1 >= self.population_size: + break + parent1, parent2 = selected_exploratory[i], selected_exploratory[i + 1] + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-self.alpha, 1 + self.alpha, dim) + child1 = alpha * parent1 + (1 - alpha) * parent2 + child2 = alpha * parent2 + (1 - alpha) * parent1 + else: + child1, child2 = parent1, parent2 + offspring_exploratory.extend([child1, child2]) + offspring_exploratory = np.array(offspring_exploratory[: self.population_size]) + + # Mutation for Exploratory Population + for i in range(self.population_size): + if np.random.rand() < self.mutation_rate: + offspring_exploratory[i] += np.random.normal(0, 0.1, dim) + offspring_exploratory[i] = np.clip(offspring_exploratory[i], lower_bound, upper_bound) + + # Exploitative Population: Gradient-based local search with adaptive learning rate + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop_exploitative[i]) + learning_rate_adaptive = self.learning_rate / (1 + evaluations / self.budget) + pop_exploitative[i] = np.clip( + pop_exploitative[i] - learning_rate_adaptive * grad, lower_bound, upper_bound + ) + + # Evaluate offspring + scores_offspring_exploratory = np.array([func(ind) for ind in offspring_exploratory]) + scores_exploitative = np.array([func(ind) for ind in pop_exploitative]) + + evaluations += self.population_size # Exploratory evaluations + evaluations += self.population_size # Exploitative evaluations + + # Update exploratory population and scores + pop_exploratory, scores_exploratory = offspring_exploratory, scores_offspring_exploratory + + # Update global best from both populations + best_idx_exploratory = np.argmin(scores_exploratory) + if scores_exploratory[best_idx_exploratory] < global_best_score: + global_best_score = scores_exploratory[best_idx_exploratory] + global_best_position = pop_exploratory[best_idx_exploratory] + + best_idx_exploitative = np.argmin(scores_exploitative) + if scores_exploitative[best_idx_exploitative] < global_best_score: + global_best_score = scores_exploitative[best_idx_exploitative] + global_best_position = pop_exploitative[best_idx_exploitative] + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/DualStrategyAdaptiveDE.py new file mode 100644 index 000000000..ee3e73862 --- /dev/null +++ b/nevergrad/optimization/lama/DualStrategyAdaptiveDE.py @@ -0,0 +1,117 @@ +import numpy as np + + +class DualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.7 + self.elitism_rate = 0.2 + self.local_search_prob = 0.1 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.05 * (np.random.rand(self.dim) - 0.5) + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/DualStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/DualStrategyDifferentialEvolution.py new file mode 100644 index 000000000..f87a9dad8 --- /dev/null +++ b/nevergrad/optimization/lama/DualStrategyDifferentialEvolution.py @@ -0,0 +1,57 @@ +import numpy as np + + +class DualStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 200 # Population size optimized for balance + self.F = 0.6 # Differential weight + self.CR = 0.9 # Crossover probability + self.p = 0.1 # Probability of choosing best strategy + + def __call__(self, func): + # Initialize population uniformly within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + # Get the initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Iteration loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + for i in range(self.pop_size): + # Choose the mutation strategy + if np.random.rand() < self.p: + # Best/1/bin strategy + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b = pop[np.random.choice(indices, 2, replace=False)] + mutant = best_ind + self.F * (a - b) + else: + # Rand/1/bin strategy + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + self.F * (b - c) + + # Clip within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Evaluate + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/DualStrategyOptimizer.py b/nevergrad/optimization/lama/DualStrategyOptimizer.py new file mode 100644 index 000000000..4095ebf6f --- /dev/null +++ b/nevergrad/optimization/lama/DualStrategyOptimizer.py @@ -0,0 +1,64 @@ +import numpy as np + + +class DualStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + mutation_factor = 0.8 # Aggressive mutation factor to enhance exploration + crossover_rate = 0.8 # Healthy crossover rate for good exploration and exploitation balance + elite_size = 5 # Number of elite individuals to preserve + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DualStrategyQuantumEvolutionOptimizer.py b/nevergrad/optimization/lama/DualStrategyQuantumEvolutionOptimizer.py new file mode 100644 index 000000000..895c9fb50 --- /dev/null +++ b/nevergrad/optimization/lama/DualStrategyQuantumEvolutionOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class DualStrategyQuantumEvolutionOptimizer: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.5, + cognitive_coefficient=2.1, + social_coefficient=2.1, + adaptive_decay=0.99, + quantum_probability=0.1, + quantum_scale=0.05, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.adaptive_decay = adaptive_decay + self.quantum_probability = quantum_probability + self.quantum_scale = quantum_scale + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_probability: + # Quantum jump strategy for exploration + particles[i] = global_best + np.random.normal(0, self.quantum_scale, self.dim) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Classical PSO movement for exploitation + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Adaptive updates to strategy parameters + self.quantum_probability *= self.adaptive_decay + self.quantum_scale *= self.adaptive_decay + self.inertia_weight *= self.adaptive_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/DynamicAdaptiveClimbingStrategy.py b/nevergrad/optimization/lama/DynamicAdaptiveClimbingStrategy.py new file mode 100644 index 000000000..d97facf44 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveClimbingStrategy.py @@ -0,0 +1,85 @@ +import numpy as np + + +class DynamicAdaptiveClimbingStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 + elite_size = 20 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.1 + adaptive_factor = 0.95 + recombination_prob = 0.7 + + # Initiate feedback mechanism modifications + success_rate = np.inf + last_best_fitness = np.inf + + while evaluations < self.budget: + success_count = 0 + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + parents_indices = np.random.choice(population_size, 3, replace=False) + parent1, parent2, parent3 = population[parents_indices] + child = (parent1 + parent2 + parent3) / 3 + else: + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + distance_to_best = np.linalg.norm(population[best_idx] - child) + individual_mutation_scale = mutation_scale * adaptive_factor ** (distance_to_best) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + success_count += 1 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if fitness[current_best_idx] < last_best_fitness: + last_best_fitness = fitness[current_best_idx] + success_rate = success_count / population_size + adaptive_factor = max(0.8, adaptive_factor - 0.05 * success_rate) + mutation_scale = mutation_scale + 0.02 * (1 - success_rate) + + if evaluations % 250 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in elite_indices: + population[idx] = elite_individuals[np.random.choice(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveCohortOptimization.py b/nevergrad/optimization/lama/DynamicAdaptiveCohortOptimization.py new file mode 100644 index 000000000..80a98006d --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveCohortOptimization.py @@ -0,0 +1,71 @@ +import numpy as np + + +class DynamicAdaptiveCohortOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_base=0.1, + recombination_prob=0.9, + adaptation_factor=0.98, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_base = mutation_base + self.recombination_prob = recombination_prob + self.adaptation_factor = adaptation_factor + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + + for i in range(self.population_size): + if np.random.rand() < self.recombination_prob: + # Select parents from elite group + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices[0]], population[parents_indices[1]] + mask = np.random.rand(self.dimension) < 0.5 + child = np.where(mask, parent1, parent2) + else: + # Inherit directly from an elite member + child = population[np.random.choice(elite_indices)].copy() + + # Dynamic mutation based on how far the process has gone + mutation_scale = self.mutation_base * (1 - evaluations / self.budget) ** 2 + mutation = np.random.normal(scale=mutation_scale, size=self.dimension) + child = np.clip(child + mutation, -5.0, 5.0) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + # Adapt mutation base + self.mutation_base *= self.adaptation_factor + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DynamicAdaptiveEliteHybridOptimizer.py b/nevergrad/optimization/lama/DynamicAdaptiveEliteHybridOptimizer.py new file mode 100644 index 000000000..e4f7ab146 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveEliteHybridOptimizer.py @@ -0,0 +1,136 @@ +import numpy as np + + +class DynamicAdaptiveEliteHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 200 # Increased population size for better diversity + self.initial_F = 0.6 # Adjusted for balanced mutation step + self.initial_CR = 0.9 # High crossover rate to maintain genetic diversity + self.c1 = 1.2 # Cognitive coefficient for personal best attraction + self.c2 = 1.2 # Social coefficient for global best attraction + self.w = 0.7 # Inertia weight for maintaining momentum + self.elite_fraction = 0.1 # Fraction of elite population + self.diversity_threshold = 1e-4 # Higher threshold to reinitialize earlier + self.tau1 = 0.1 # Parameter adaptation probability + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 30, self.budget - evaluations + ) # Increased iterations for better local search + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.01, bounds.lb, bounds.ub + ) # Reduced perturbation for precision + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveEnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicAdaptiveEnhancedDifferentialEvolution.py new file mode 100644 index 000000000..6550e2526 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveEnhancedDifferentialEvolution.py @@ -0,0 +1,112 @@ +import numpy as np + + +class DynamicAdaptiveEnhancedDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicAdaptiveEnhancedDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..e9afed288 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimization.py @@ -0,0 +1,116 @@ +import numpy as np + + +class DynamicAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 500 # Initial population size + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_chance = 0.3 # Probability of performing local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 0.1 # Threshold for population diversity + self.cauchy_step_scale = 0.03 # Scale for Cauchy distribution steps + self.gaussian_step_scale = 0.01 # Scale for Gaussian distribution steps + self.reinitialization_rate = 0.2 # Rate for reinitializing population + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): # Adjusted iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adaptive local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimizer.py b/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimizer.py new file mode 100644 index 000000000..532ff9f41 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveExplorationOptimizer.py @@ -0,0 +1,166 @@ +import numpy as np + + +class DynamicAdaptiveExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 40 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor for better exploration + max_exploration_cycles = 40 # Reduced maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = DynamicAdaptiveExplorationOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/DynamicAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..e999e124f --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveFireworkAlgorithm.py @@ -0,0 +1,99 @@ +import numpy as np + + +class DynamicAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveGradientDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicAdaptiveGradientDifferentialEvolution.py new file mode 100644 index 000000000..86bf0697f --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveGradientDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np +from scipy.stats import qmc + + +class DynamicAdaptiveGradientDifferentialEvolution: + def __init__(self, budget, population_size=30, initial_crossover_rate=0.7, initial_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.mutation_factor = initial_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adaptive mutation and crossover strategies based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + # Additional perturbation to improve exploration + if evaluations % 100 == 0: + for l in range(self.population_size): + population[l] += np.random.randn(self.dim) * self.base_lr * 0.1 + population[l] = np.clip(population[l], self.bounds[0], self.bounds[1]) + fitness[l] = func(population[l]) + evaluations += 1 + if fitness[l] < self.f_opt: + self.f_opt = fitness[l] + self.x_opt = population[l] + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicAdaptiveGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..053c82270 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligence.py @@ -0,0 +1,99 @@ +import numpy as np + + +class DynamicAdaptiveGravitationalSwarmIntelligence: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligenceV2.py b/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligenceV2.py new file mode 100644 index 000000000..c0d388787 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveGravitationalSwarmIntelligenceV2.py @@ -0,0 +1,96 @@ +import numpy as np + + +class DynamicAdaptiveGravitationalSwarmIntelligenceV2: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(500): # Increased the number of optimization runs to 500 for better exploration + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(100): # Increased the number of iterations within each optimization run to 100 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/DynamicAdaptiveHybridAlgorithm.py b/nevergrad/optimization/lama/DynamicAdaptiveHybridAlgorithm.py new file mode 100644 index 000000000..dfce58d75 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveHybridAlgorithm.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicAdaptiveHybridAlgorithm: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.8 + self.F = 0.7 + self.CR = 0.9 + self.memory_size = 50 + self.strategy_switch_threshold = 0.03 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.5 + c1 = 1.7 + c2 = 1.7 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = DynamicAdaptiveHybridAlgorithm(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveHybridDE.py b/nevergrad/optimization/lama/DynamicAdaptiveHybridDE.py new file mode 100644 index 000000000..1730d5f3c --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveHybridDE.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DynamicAdaptiveHybridDE: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.2, + CR=0.8, + top_fraction=0.2, + randomization_factor=0.05, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range for random adjustment of F + self.CR = CR # Crossover probability + self.top_fraction = top_fraction # Top fraction for elite strategy + self.randomization_factor = randomization_factor # Randomization factor for mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + elite_size = max(1, int(self.population_size * self.top_fraction)) + elite_indices = np.argsort(fitness)[:elite_size] + + # Mutation strategy proportionate to fitness + if np.random.rand() < self.randomization_factor * best_fitness: + # More inclined to use global best mutation strategy + base = best_individual + else: + base = population[elite_indices[np.random.randint(elite_size)]] + + # Adjust F dynamically with a slight random factor + F = self.F_base + self.F_range * (2 * np.random.rand() - 1) + + # DE mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, elite_indices[0]]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check budget + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DynamicAdaptiveHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/DynamicAdaptiveHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..424eb28d8 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveHybridDEPSOWithEliteMemory.py @@ -0,0 +1,167 @@ +import numpy as np + + +class DynamicAdaptiveHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into the population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + replace_indices = np.random.choice(range(population_size), elite_size, replace=False) + new_population[replace_indices] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimization.py new file mode 100644 index 000000000..d289a028b --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimization.py @@ -0,0 +1,136 @@ +import numpy as np +from scipy.stats import qmc + + +class DynamicAdaptiveHybridOptimization: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicAdaptiveHybridOptimization(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..dfcc0fc07 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveHybridOptimizer.py @@ -0,0 +1,128 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch.py b/nevergrad/optimization/lama/DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch.py new file mode 100644 index 000000000..16562192b --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch.py @@ -0,0 +1,125 @@ +import numpy as np + + +class DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def smart_local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.normal(0, 0.1, self.dim) # Using Gaussian perturbation + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Adaptive Elitism: Adjust elite size based on convergence rate + elite_size = max( + 1, int(self.elite_fraction * self.pop_size * (1 - self.eval_count / global_search_budget)) + ) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + if self.eval_count >= global_search_budget: + break + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform smart local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.smart_local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/DynamicAdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..65318cb1f --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveMemeticOptimizer.py @@ -0,0 +1,127 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicAdaptiveMemeticOptimizer: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.25 + self.local_search_probability = 0.8 + self.F = 0.9 + self.CR = 0.9 + self.memory_size = 7 + self.strategy_switch_threshold = 0.02 + self.rng = np.random.default_rng() + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = DynamicAdaptiveMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptivePopulationDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicAdaptivePopulationDifferentialEvolution.py new file mode 100644 index 000000000..a02f9d0e5 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptivePopulationDifferentialEvolution.py @@ -0,0 +1,184 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicAdaptivePopulationDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_pop_size = 100 + self.min_pop_size = 20 + self.num_subpopulations = 5 + self.subpop_size = self.initial_pop_size // self.num_subpopulations + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 + self.CR = 0.9 + + def _initialize_population(self, pop_size): + return np.random.uniform(self.lb, self.ub, (pop_size, self.dim)) + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(len(population)) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, len(population) - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + subpopulations = np.array_split(population, self.num_subpopulations) + subfitness = np.array_split(fitness, self.num_subpopulations) + new_population = [] + new_fitness = [] + + for subpop, subfit in zip(subpopulations, subfitness): + for i in range(len(subpop)): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(len(subpop)) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(subfit) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(subpop, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(subpop, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(subpop, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(subpop, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < subfit[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(subpop[i]) + new_fitness.append(subfit[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Perform local search on elite solutions + elite_indices = np.argsort(fitness)[: self.num_subpopulations] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= 5: + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Crowding distance to maintain diversity + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + # Opposition-based learning + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.initial_pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic adjustment of population size + if self.no_improvement_count >= 10: + reduced_pop_size = max(self.min_pop_size, len(population) - 10) + population = population[:reduced_pop_size] + fitness = fitness[:reduced_pop_size] + self.subpop_size = len(population) // self.num_subpopulations + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicAdaptiveQuantumDifferentialEvolution.py new file mode 100644 index 000000000..730815cff --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveQuantumDifferentialEvolution.py @@ -0,0 +1,84 @@ +import numpy as np + + +class DynamicAdaptiveQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.5 + self.F_max = 1.0 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # Quantum Inspired Parameters + self.alpha = 0.75 + self.beta = 0.25 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + evaluations = self.population_size + stagnation_counter = 0 + + while evaluations < self.budget: + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + # Quantum Inspired Adjustment + quantum_perturbation = np.random.normal(0, 1, self.dim) * ( + self.alpha * (self.x_opt - population[i]) + self.beta * (population[i] - self.lb) + ) + trial_vector = np.clip(trial_vector + quantum_perturbation, self.lb, self.ub) + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + stagnation_counter = 0 + else: + stagnation_counter += 1 + else: + stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Adaptive Parameter Adjustment based on Stagnation Counter + if stagnation_counter > self.population_size / 2: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + stagnation_counter = 0 + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveQuantumLevyOptimizer.py b/nevergrad/optimization/lama/DynamicAdaptiveQuantumLevyOptimizer.py new file mode 100644 index 000000000..228195d21 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveQuantumLevyOptimizer.py @@ -0,0 +1,160 @@ +import numpy as np + + +class DynamicAdaptiveQuantumLevyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.4 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 30 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 2 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/DynamicAdaptiveQuantumPSO.py new file mode 100644 index 000000000..fbc84466e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveQuantumPSO.py @@ -0,0 +1,133 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicAdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + self.stagnation_threshold = 10 # No improvement iterations before triggering local search + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 # Annealing factor for inertia weight + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + # Trigger local search after a certain number of iterations without improvement + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 # Reset the counter on improvement + + if eval_count >= self.budget: + break + + # Reset no improvement count after local search + self.no_improvement_count = 0 + + # Anneal inertia weight to enhance exploration-exploitation balance + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = DynamicAdaptiveQuantumPSO(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveQuasiRandomDEGradientAnnealing.py b/nevergrad/optimization/lama/DynamicAdaptiveQuasiRandomDEGradientAnnealing.py new file mode 100644 index 000000000..cf4d6c4b6 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveQuasiRandomDEGradientAnnealing.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.stats import qmc + + +class DynamicAdaptiveQuasiRandomDEGradientAnnealing: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + threshold = 1e-3 + diversity_enhanced = False + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + diversity_enhanced = True + return diversity_enhanced + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + crossover_rate = self.crossover_rate + mutation_factor = self.mutation_factor + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + diversity_enhanced = maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + crossover_rate *= 1.05 + mutation_factor = min(1.0, mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + crossover_rate *= 0.95 + mutation_factor = max(0.5, mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + crossover_rate = np.clip(crossover_rate, 0.1, 0.9) + + if diversity_enhanced: + self.base_lr *= 0.9 + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicAdaptiveQuasiRandomDEGradientAnnealing(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicAdaptiveSwarmOptimization.py b/nevergrad/optimization/lama/DynamicAdaptiveSwarmOptimization.py new file mode 100644 index 000000000..fd591a174 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicAdaptiveSwarmOptimization.py @@ -0,0 +1,146 @@ +import numpy as np + + +class DynamicAdaptiveSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.random.randn(swarm_size, self.dim) + personal_bests = positions.copy() + personal_best_scores = np.full(swarm_size, np.inf) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + w_max = 0.9 + w_min = 0.4 + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + inertia_weights = np.linspace(w_max, w_min, self.budget) + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Dynamic adaptive loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + w = inertia_weights[i] + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = DynamicAdaptiveSwarmOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicBalancingPSO.py b/nevergrad/optimization/lama/DynamicBalancingPSO.py new file mode 100644 index 000000000..2ddd2740a --- /dev/null +++ b/nevergrad/optimization/lama/DynamicBalancingPSO.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DynamicBalancingPSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_start=0.9, + omega_end=0.4, + phi_p=0.14, + phi_g=0.16, + adaptive_diversity=True, + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start + self.omega_end = omega_end + self.phi_p = phi_p + self.phi_g = phi_g + self.adaptive_diversity = adaptive_diversity + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + diversity = np.std(particles) + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_start - ((self.omega_start - self.omega_end) * evaluations / self.budget) + phi_total = self.phi_p + self.phi_g + phi_ratio = self.phi_p / phi_total + + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + + # Update velocities + velocities[i] = ( + omega * velocities[i] + + phi_ratio * self.phi_p * r_p * (personal_best[i] - particles[i]) + + (1 - phi_ratio) * self.phi_g * r_g * (global_best - particles[i]) + ) + + # Update positions + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + if self.adaptive_diversity: + current_diversity = np.std(particles) + if current_diversity < diversity: + phi_ratio += 0.05 # Encourage exploration + else: + phi_ratio -= 0.05 # Encourage exploitation + diversity = current_diversity + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/DynamicClusterHybridOptimization.py b/nevergrad/optimization/lama/DynamicClusterHybridOptimization.py new file mode 100644 index 000000000..49a1f396e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicClusterHybridOptimization.py @@ -0,0 +1,128 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class DynamicClusterHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.num_clusters = 5 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def simulated_annealing(self, current_position, current_fitness, func, temp): + new_position = current_position + np.random.uniform(-0.1, 0.1, self.dim) + new_position = np.clip(new_position, self.lb, self.ub) + new_fitness = func(new_position) + if new_fitness < current_fitness or np.exp((current_fitness - new_fitness) / temp) > np.random.rand(): + return new_position, new_fitness + return current_position, current_fitness + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + temperature = self.adaptive_parameters(evaluations, self.budget, (1.0, 0.01)) + + # Clustering + kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(population) + cluster_labels = kmeans.labels_ + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + cluster_id = cluster_labels[i] + cluster_center = cluster_centers[cluster_id] + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (cluster_center - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + global_best_position = trial_vector + global_best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + population[i], fitness[i] = self.simulated_annealing( + population[i], fitness[i], func, temperature + ) + evaluations += 1 + + if fitness[i] < self.f_opt: + self.f_opt = fitness[i] + self.x_opt = population[i] + global_best_position = population[i] + global_best_fitness = fitness[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicCohortAdaptiveEvolution.py b/nevergrad/optimization/lama/DynamicCohortAdaptiveEvolution.py new file mode 100644 index 000000000..f83734cbb --- /dev/null +++ b/nevergrad/optimization/lama/DynamicCohortAdaptiveEvolution.py @@ -0,0 +1,96 @@ +import numpy as np + + +class DynamicCohortAdaptiveEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 50 + self.cohort_size = 10 # Size of subpopulations + self.F = 0.5 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_chance = 0.2 # Probability to perform local search + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Split population into cohorts + cohorts = [ + population[i : i + self.cohort_size] for i in range(0, len(population), self.cohort_size) + ] + new_population = [] + + for cohort in cohorts: + if len(cohort) < self.cohort_size: + continue # Skip incomplete cohorts + + for i in range(len(cohort)): + # Mutation step + idxs = [idx for idx in range(len(cohort)) if idx != i] + a, b, c = cohort[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + trial = np.where(crossover, mutant, cohort[i]) + + # Local search based on chance + if np.random.rand() < self.local_search_chance: + trial = self.local_search(trial, func) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + new_population.append(trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(cohort[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Adaptive control of parameters + self.adaptive_F_CR(evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + for i in range(self.dim): + x_new = best_x.copy() + step_size = np.random.uniform(-0.1, 0.1) + x_new[i] = np.clip(best_x[i] + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_F_CR(self, evaluations): + # Adaptive parameters adjustment + if evaluations % 100 == 0: + self.F = np.random.uniform(0.4, 0.9) + self.CR = np.random.uniform(0.1, 0.9) + self.local_search_chance = np.random.uniform(0.1, 0.3) diff --git a/nevergrad/optimization/lama/DynamicCohortMemeticAlgorithm.py b/nevergrad/optimization/lama/DynamicCohortMemeticAlgorithm.py new file mode 100644 index 000000000..5d2af142e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicCohortMemeticAlgorithm.py @@ -0,0 +1,110 @@ +import numpy as np + + +class DynamicCohortMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + self.global_mutation_factor = 0.5 + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.diversity_cycle == 0: + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, 0.1, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * (1 + 0.1 * remaining_budget_ratio) + self.mutation_factor = self.mutation_factor * (1 + 0.1 * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/DynamicCohortOptimization.py b/nevergrad/optimization/lama/DynamicCohortOptimization.py new file mode 100644 index 000000000..0e5610c06 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicCohortOptimization.py @@ -0,0 +1,69 @@ +import numpy as np + + +class DynamicCohortOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=50, + elite_fraction=0.2, + mutation_scale=0.2, + learning_rate=0.1, + learning_rate_decay=0.98, + mutation_decay=0.99, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_scale = mutation_scale + self.learning_rate = learning_rate + self.learning_rate_decay = learning_rate_decay + self.mutation_decay = mutation_decay + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + elite_indices = fitness.argsort()[: self.elite_count] + for i in range(self.population_size): + # Tournament selection for parent selection + indices = np.random.choice(elite_indices, 2, replace=False) + if fitness[indices[0]] < fitness[indices[1]]: + parent = population[indices[0]] + else: + parent = population[indices[1]] + + # Mutation based on normal distribution + mutation = np.random.normal(0, self.mutation_scale, self.dimension) + child = np.clip(parent + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update best solution found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adjust learning rate and mutation scale + self.mutation_scale *= self.mutation_decay + self.learning_rate *= self.learning_rate_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/DynamicCrowdedDE.py b/nevergrad/optimization/lama/DynamicCrowdedDE.py new file mode 100644 index 000000000..6f2384dc5 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicCrowdedDE.py @@ -0,0 +1,137 @@ +import numpy as np +from scipy.spatial import distance + + +class DynamicCrowdedDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + init_population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(size): + population = np.random.uniform(bounds[0], bounds[1], (size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(len(F_values)): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(len(population))) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(len(population))) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def crowding_distance_sort(population, fitness): + distances = distance.cdist(population, population, "euclidean") + sorted_indices = np.argsort(fitness) + crowding_distances = np.zeros(len(population)) + crowding_distances[sorted_indices[0]] = np.inf + crowding_distances[sorted_indices[-1]] = np.inf + + for i in range(1, len(population) - 1): + crowding_distances[sorted_indices[i]] = distances[ + sorted_indices[i - 1], sorted_indices[i + 1] + ] + + return np.argsort(crowding_distances) + + def select_mutation_strategy(score): + return mutation_strategy_1 if score < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population(init_population_size) + evaluations = init_population_size + + F_values = np.full(init_population_size, F) + CR_values = np.full(init_population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(len(population), F) + CR_values = np.full(len(population), CR) + last_improvement = evaluations + + sorted_indices = crowding_distance_sort(population, fitness) + new_population = np.zeros_like(population) + new_fitness = np.zeros(len(population)) + new_F_values = np.zeros(len(population)) + new_CR_values = np.zeros(len(population)) + + for idx in range(len(population)): + i = sorted_indices[idx] + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy(fitness[i]) + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + if evaluations - last_improvement > restart_threshold // 2: + population_size = int(len(population) * 0.9) + else: + population_size = int(len(population) * 1.1) + population_size = max(10, min(30, population_size)) + + population, fitness = new_population[:population_size], new_fitness[:population_size] + F_values, CR_values = new_F_values[:population_size], new_CR_values[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicCulturalDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicCulturalDifferentialEvolution.py new file mode 100644 index 000000000..d6326f9e6 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicCulturalDifferentialEvolution.py @@ -0,0 +1,127 @@ +import numpy as np + + +class DynamicCulturalDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.2: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.3 + ( + 0.4 * fitness_std / (np.mean(fitness) + 1e-9) + ) # Adjusted influence factors + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEliteAdaptiveHybridOptimizerV2.py b/nevergrad/optimization/lama/DynamicEliteAdaptiveHybridOptimizerV2.py new file mode 100644 index 000000000..ec1803546 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEliteAdaptiveHybridOptimizerV2.py @@ -0,0 +1,165 @@ +import numpy as np + + +class DynamicEliteAdaptiveHybridOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature for annealing + self.cooling_rate = 0.98 # Cooling rate for simulated annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.05 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEliteAnnealingDE.py b/nevergrad/optimization/lama/DynamicEliteAnnealingDE.py new file mode 100644 index 000000000..970aeca1d --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEliteAnnealingDE.py @@ -0,0 +1,157 @@ +import numpy as np + + +class DynamicEliteAnnealingDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature for annealing + self.cooling_rate = 0.95 # Cooling rate for simulated annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.1 # Scale for quantum jumps + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Retain top individuals to maintain high quality solutions in the population + top_indices = np.argsort(fitness)[: self.pop_size // 2] + for i in top_indices: + new_population[i] = np.copy(new_population[i]) + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEliteCovarianceMemeticSearch.py b/nevergrad/optimization/lama/DynamicEliteCovarianceMemeticSearch.py new file mode 100644 index 000000000..aeea8248c --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEliteCovarianceMemeticSearch.py @@ -0,0 +1,122 @@ +import numpy as np + + +class DynamicEliteCovarianceMemeticSearch: + def __init__( + self, + budget, + population_size=50, + memetic_rate=0.7, + elite_fraction=0.2, + initial_learning_rate=0.01, + initial_sigma=0.5, + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.elite_fraction = elite_fraction + self.initial_learning_rate = initial_learning_rate + self.initial_sigma = initial_sigma + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_covariance_matrix_adaptation(self, func, pop, scores, mean, C, sigma): + n_samples = len(pop) + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, learning rate, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + learning_rate = self.initial_learning_rate + sigma = self.initial_sigma + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.adaptive_covariance_matrix_adaptation(func, pop, scores, mean, C, sigma) + + # Perform memetic local search + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + pop[i], scores[i] = self.local_search(func, pop[i], scores[i], learning_rate) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, learning rate, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + # Adaptive learning rate and sigma + learning_rate = ( + self.initial_learning_rate + * (1 - iteration / max_iterations) + * (0.1 + 0.9 * (global_best_score / (global_best_score + 1e-8))) + ) + sigma = ( + self.initial_sigma + * (1 - iteration / max_iterations) + * (0.1 + 0.9 * (global_best_score / (global_best_score + 1e-8))) + ) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEliteEnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicEliteEnhancedDifferentialEvolution.py new file mode 100644 index 000000000..d434e4829 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEliteEnhancedDifferentialEvolution.py @@ -0,0 +1,97 @@ +import numpy as np + + +class DynamicEliteEnhancedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.8 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.02 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Re-initialize half of the worst individuals to maintain diversity + worst_indices = np.argsort(fitness)[-int(0.5 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 3) == 0 and population_size > 20: + elite_indices = np.argsort(fitness)[: int(0.6 * population_size)] + population = population[elite_indices] + fitness = fitness[elite_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicElitistHybridOptimizer.py b/nevergrad/optimization/lama/DynamicElitistHybridOptimizer.py new file mode 100644 index 000000000..66299208d --- /dev/null +++ b/nevergrad/optimization/lama/DynamicElitistHybridOptimizer.py @@ -0,0 +1,146 @@ +import numpy as np + + +class DynamicElitistHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.min_pop_size = 10 + self.max_pop_size = 100 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 2.0 + self.c2 = 2.0 + self.w = 0.7 + self.elite_fraction = 0.1 + self.restart_threshold = 200 + self.diversity_threshold = 0.1 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + + def cma_update(self, population, mean, cov_matrix): + new_samples = np.random.multivariate_normal(mean, cov_matrix, size=population.shape[0]) + return np.clip(new_samples, -5.0, 5.0) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + no_improvement_counter = 0 + + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elitism + elite_count = max(1, int(self.elite_fraction * current_pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + # Check for diversity + if ( + self.diversity(population) < self.diversity_threshold + or no_improvement_counter >= self.restart_threshold + ): + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + evaluations += self.pop_size + else: + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + population = self.cma_update(population, mean, cov_matrix) + # Re-inject elites + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEnhancedDifferentialFireworkAlgorithm.py b/nevergrad/optimization/lama/DynamicEnhancedDifferentialFireworkAlgorithm.py new file mode 100644 index 000000000..48048a07c --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEnhancedDifferentialFireworkAlgorithm.py @@ -0,0 +1,88 @@ +import numpy as np + + +class DynamicEnhancedDifferentialFireworkAlgorithm: + def __init__( + self, + budget=10000, + n_fireworks=20, + n_sparks=10, + scaling_factor=0.5, + crossover_rate=0.9, + levy_flight_prob=0.3, + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.scaling_factor = scaling_factor + self.crossover_rate = crossover_rate + self.levy_flight_prob = levy_flight_prob + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scaling_factor, firework + self.scaling_factor, (self.n_sparks, self.dim) + ) + return sparks + + def differential_evolution(self, current, target1, target2): + mutant = current + self.scaling_factor * (target1 - target2) + crossover_points = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover_points, mutant, current) + return trial + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + if np.random.rand() < self.levy_flight_prob: + fireworks[i] += self.levy_flight() * (fireworks[i] - self.x_opt) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def adjust_parameters(self, iteration): + self.scaling_factor = 0.5 - 0.4 * (iteration / self.budget) + self.levy_flight_prob = 0.3 - 0.25 * (iteration / self.budget) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + self.adjust_parameters(it) + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = self.differential_evolution(fireworks[i], fireworks[idx1], fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicEnhancedHybridOptimizer.py b/nevergrad/optimization/lama/DynamicEnhancedHybridOptimizer.py new file mode 100644 index 000000000..8d3fdb323 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicEnhancedHybridOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np + + +class DynamicEnhancedHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + # Enhanced exploration using adaptive exploration factor + if i % 100 == 0 and i > 0: # Every 100 iterations, enhance exploration + exploration_factor = min( + 0.5, exploration_factor * 1.1 + ) # Gradually increase exploration factor + for idx in range(swarm_size): + new_position = positions[idx] + exploration_factor * np.random.uniform(-1, 1, self.dim) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = DynamicEnhancedHybridOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicExplorationExploitationAlgorithm.py b/nevergrad/optimization/lama/DynamicExplorationExploitationAlgorithm.py new file mode 100644 index 000000000..bbb4de59e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicExplorationExploitationAlgorithm.py @@ -0,0 +1,105 @@ +import numpy as np + + +class DynamicExplorationExploitationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 50 + self.F = np.random.uniform(0.5, 0.9) # Differential weight + self.CR = np.random.uniform(0.1, 0.9) # Crossover probability + self.local_search_chance = np.random.uniform(0.1, 0.3) # Probability to perform local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-5 # Threshold to switch between exploration and exploitation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_F_CR(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + for i in range(self.dim): + x_new = best_x.copy() + step_size = np.random.uniform(-0.1, 0.1) + x_new[i] = np.clip(best_x[i] + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_F_CR(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Switch to exploitation: increase local search chance + self.local_search_chance = np.random.uniform(0.2, 0.4) + self.F = np.random.uniform(0.4, 0.6) + self.CR = np.random.uniform(0.7, 0.9) + else: + # Switch to exploration: decrease local search chance + self.local_search_chance = np.random.uniform(0.1, 0.2) + self.F = np.random.uniform(0.6, 0.9) + self.CR = np.random.uniform(0.1, 0.3) diff --git a/nevergrad/optimization/lama/DynamicExplorationExploitationDE.py b/nevergrad/optimization/lama/DynamicExplorationExploitationDE.py new file mode 100644 index 000000000..7cb963d56 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicExplorationExploitationDE.py @@ -0,0 +1,88 @@ +import numpy as np + + +class DynamicExplorationExploitationDE: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Mutation and crossover + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive parameter control based on success rates + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(1.0, F * 1.2) + Cr = max(0.1, Cr * 0.9) + else: + F = max(0.4, F * 0.8) + Cr = min(1.0, Cr * 1.1) + + # Enhanced restart mechanism with diversity consideration + if evaluations > 0.5 * self.budget and np.std(fitness) < 1e-6: + # Re-initialize population if stuck + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicExplorationExploitationMemeticAlgorithm.py b/nevergrad/optimization/lama/DynamicExplorationExploitationMemeticAlgorithm.py new file mode 100644 index 000000000..8f0ec5024 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicExplorationExploitationMemeticAlgorithm.py @@ -0,0 +1,141 @@ +import numpy as np + + +class DynamicExplorationExploitationMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + self.global_mutation_factor = 0.5 + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + self.global_search_intensity = 10 + + # New parameters + self.local_search_radius = 0.1 + self.global_search_radius = 0.5 + self.reduction_factor = 0.98 # To reduce the mutation factor over time + self.mutation_scale = 0.1 # To scale the random mutations + self.adaptive_crossover_rate = 0.5 # To adjust crossover probability based on diversity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.diversity_cycle == 0: + self.adaptive_population_control(population, fitness, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, self.local_search_radius, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_control(self, population, fitness, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * (1 + 0.1 * remaining_budget_ratio) + self.mutation_factor = self.mutation_factor * (1 + 0.1 * remaining_budget_ratio) + + # New adaptation strategies + self.crossover_probability *= self.adaptive_crossover_rate + self.mutation_factor *= self.reduction_factor + self.global_mutation_factor *= self.reduction_factor + self.local_search_radius *= self.reduction_factor + + if diversity < self.diversity_threshold / 2 and remaining_budget_ratio > 0.5: + self.global_search_reset(population, fitness, evaluations) + + def global_search_reset(self, population, fitness, evaluations): + global_search_population = np.random.uniform( + self.lb, self.ub, (self.global_search_intensity, self.dim) + ) + + for ind in global_search_population: + f_ind = func(ind) + evaluations += 1 + if f_ind < self.f_opt: + self.f_opt = f_ind + self.x_opt = ind + + population[: self.global_search_intensity] = global_search_population diff --git a/nevergrad/optimization/lama/DynamicExplorationOptimization.py b/nevergrad/optimization/lama/DynamicExplorationOptimization.py new file mode 100644 index 000000000..c37761756 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicExplorationOptimization.py @@ -0,0 +1,141 @@ +import numpy as np + + +class DynamicExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 100 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = DynamicExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/DynamicFireworkAlgorithm.py new file mode 100644 index 000000000..89bc3d0f5 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicFireworkAlgorithm.py @@ -0,0 +1,63 @@ +import numpy as np + + +class DynamicFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=10, n_sparks=5, alpha=0.5, beta=2.0, mutation_rate=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.mutation_rate = mutation_rate + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self, func): + self.fireworks = np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + self.firework_fitness = np.array([func(x) for x in self.fireworks]) + + def explode_firework(self, firework, func): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + sparks_fitness = np.array([func(x) for x in sparks]) + return sparks, sparks_fitness + + def apply_mutation(self, sparks): + mutated_sparks = sparks + np.random.normal(0, self.mutation_rate, sparks.shape) + return np.clip(mutated_sparks, self.bounds[0], self.bounds[1]) + + def update_fireworks(self, sparks, sparks_fitness): + for i in range(self.n_fireworks): + if i < len(sparks) and sparks_fitness[i] < self.firework_fitness[i]: + self.fireworks[i] = sparks[i] + self.firework_fitness[i] = sparks_fitness[i] + + def adapt_alpha(self, func): + best_idx = np.argmin(self.firework_fitness) + worst_idx = np.argmax(self.firework_fitness) + self.alpha = self.alpha * ( + self.firework_fitness[best_idx] / (self.firework_fitness[worst_idx] + 1e-8) + ) + + def adapt_beta(self): + self.beta = self.beta * 0.9 + + def __call__(self, func): + self.initialize_fireworks(func) + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks, sparks_fitness = self.explode_firework(self.fireworks[i], func) + mutated_sparks = self.apply_mutation(sparks) + self.update_fireworks(mutated_sparks, sparks_fitness) + + self.adapt_alpha(func) + self.adapt_beta() + + best_idx = np.argmin(self.firework_fitness) + if self.firework_fitness[best_idx] < self.f_opt: + self.f_opt = self.firework_fitness[best_idx] + self.x_opt = self.fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicFireworksSwarmOptimization.py b/nevergrad/optimization/lama/DynamicFireworksSwarmOptimization.py new file mode 100644 index 000000000..7c918e00c --- /dev/null +++ b/nevergrad/optimization/lama/DynamicFireworksSwarmOptimization.py @@ -0,0 +1,89 @@ +import numpy as np + + +class DynamicFireworksSwarmOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func, i): + p_diversify = 0.1 + 0.4 * np.exp(-5 * i / self.budget) # Adaptive probability for diversification + for i in range(self.n_fireworks): + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def enhance_convergence(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + best_firework = fireworks[best_idx] + for i in range(self.n_fireworks): + if i != best_idx: + fireworks[i] = 0.9 * fireworks[i] + 0.1 * best_firework # Attraction towards the global best + return fireworks + + def adaptive_sparks(self, budget): + return 5 + int(45 * np.exp(-5 * budget / self.budget)) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + n_sparks = self.adaptive_sparks(i) + self.n_sparks = n_sparks + fireworks = self.diversify_fireworks(fireworks, func, i) + fireworks = self.enhance_convergence(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicFractionalClusterOptimization.py b/nevergrad/optimization/lama/DynamicFractionalClusterOptimization.py new file mode 100644 index 000000000..9d39298da --- /dev/null +++ b/nevergrad/optimization/lama/DynamicFractionalClusterOptimization.py @@ -0,0 +1,144 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class DynamicFractionalClusterOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def fractional_order_velocity_update(self, velocity, order=0.5): + return np.sign(velocity) * (np.abs(velocity) ** order) + + def local_search(self, position, func, step_size=0.1): + best_position = position + best_fitness = func(position) + for i in range(self.dim): + for direction in [-1, 1]: + new_position = np.copy(position) + new_position[i] += direction * step_size + new_position = np.clip(new_position, self.lb, self.ub) + new_fitness = func(new_position) + if new_fitness < best_fitness: + best_fitness = new_fitness + best_position = new_position + return best_position, best_fitness + + def __call__(self, func): + population_size = 80 + + # Enhanced Initialization using Sobol Sequence + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size) + population = qmc.scale(sample, self.lb, self.ub) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + last_improvement = 0 + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations - last_improvement > self.budget // 10: + strategy = "DE" + else: + strategy = "PSO" + + if strategy == "PSO": + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * self.fractional_order_velocity_update(velocity[i]) + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy with Adaptive Crossover Mechanism + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + scaling_factor = 0.5 + np.random.rand() * 0.5 + mutant_vector = np.clip(a + scaling_factor * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + last_improvement = evaluations + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + # Apply local search for exploitation + for i in top_individuals: + new_position, new_fitness = self.local_search(population[i], func) + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..63a8b1381 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class DynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Balanced memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement more frequently + if evaluations % (self.budget // 8) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.007): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.12): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealingV2.py b/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealingV2.py new file mode 100644 index 000000000..10406c59d --- /dev/null +++ b/nevergrad/optimization/lama/DynamicGradientBoostedMemorySimulatedAnnealingV2.py @@ -0,0 +1,141 @@ +import numpy as np + + +class DynamicGradientBoostedMemorySimulatedAnnealingV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement more frequently + if evaluations % (self.budget // 12) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 6) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/DynamicGradientBoostedRefinementAnnealing.py b/nevergrad/optimization/lama/DynamicGradientBoostedRefinementAnnealing.py new file mode 100644 index 000000000..b9db000fc --- /dev/null +++ b/nevergrad/optimization/lama/DynamicGradientBoostedRefinementAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class DynamicGradientBoostedRefinementAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Intensive localized search as refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/DynamicGradientEnhancedAnnealing.py b/nevergrad/optimization/lama/DynamicGradientEnhancedAnnealing.py new file mode 100644 index 000000000..aa9dcc070 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicGradientEnhancedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class DynamicGradientEnhancedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Periodic intensive localized search as refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/DynamicHybridAnnealing.py b/nevergrad/optimization/lama/DynamicHybridAnnealing.py new file mode 100644 index 000000000..d8b890d8e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicHybridAnnealing.py @@ -0,0 +1,109 @@ +import numpy as np + + +class DynamicHybridAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Balanced memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution periodically + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory, local_search_iters) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/DynamicHybridOptimizer.py b/nevergrad/optimization/lama/DynamicHybridOptimizer.py new file mode 100644 index 000000000..cc3ec3953 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicHybridOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class DynamicHybridOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.5 + self.global_coeff = 0.8 + self.local_coeff = 0.8 + self.inertia_max = 1.4 + self.inertia_min = 0.3 + self.exploration_phase_ratio = 0.4 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + phase_cutoff = int(self.budget * self.exploration_phase_ratio) + + while evaluations < self.budget: + if evaluations < phase_cutoff: + inertia = self.inertia_min + (self.inertia_max - self.inertia_min) * ( + evaluations / phase_cutoff + ) + else: + inertia = self.inertia_min + + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + inertia * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/DynamicHybridQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicHybridQuantumDifferentialEvolution.py new file mode 100644 index 000000000..79e994847 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicHybridQuantumDifferentialEvolution.py @@ -0,0 +1,177 @@ +import numpy as np + + +class DynamicHybridQuantumDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/DynamicHybridSelfAdaptiveDE.py b/nevergrad/optimization/lama/DynamicHybridSelfAdaptiveDE.py new file mode 100644 index 000000000..c5c970707 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicHybridSelfAdaptiveDE.py @@ -0,0 +1,136 @@ +import numpy as np + + +class DynamicHybridSelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + adaptive_interval = 50 # Adapt parameters every 50 evaluations + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F, fitness): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F, fitness): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def crowding_distance_selection(new_population, new_fitness, old_population, old_fitness): + combined_population = np.vstack((new_population, old_population)) + combined_fitness = np.hstack((new_fitness, old_fitness)) + + sorted_indices = np.argsort(combined_fitness) + combined_population = combined_population[sorted_indices] + combined_fitness = combined_fitness[sorted_indices] + + distance = np.zeros(len(combined_population)) + for i in range(self.dim): + sorted_indices = np.argsort(combined_population[:, i]) + sorted_population = combined_population[sorted_indices] + distance[sorted_indices[0]] = distance[sorted_indices[-1]] = np.inf + for j in range(1, len(combined_population) - 1): + distance[sorted_indices[j]] += sorted_population[j + 1, i] - sorted_population[j - 1, i] + + selected_indices = np.argsort(distance)[-population_size:] + return combined_population[selected_indices], combined_fitness[selected_indices] + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + if evaluations % adaptive_interval == 0: + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i], fitness) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Crowding Distance Selection + population, fitness = crowding_distance_selection( + new_population, new_fitness, population, fitness + ) + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicLevyHarmonySearch.py b/nevergrad/optimization/lama/DynamicLevyHarmonySearch.py new file mode 100644 index 000000000..c15fbea80 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicLevyHarmonySearch.py @@ -0,0 +1,79 @@ +import numpy as np + + +class DynamicLevyHarmonySearch: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + levy_step_size=0.3, + global_best_rate=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/DynamicLocalSearchFireworkAlgorithm.py b/nevergrad/optimization/lama/DynamicLocalSearchFireworkAlgorithm.py new file mode 100644 index 000000000..b42ad79ba --- /dev/null +++ b/nevergrad/optimization/lama/DynamicLocalSearchFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicLocalSearchFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicMemeticDifferentialEvolutionWithAdaptiveElitism.py b/nevergrad/optimization/lama/DynamicMemeticDifferentialEvolutionWithAdaptiveElitism.py new file mode 100644 index 000000000..6951d1ea2 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMemeticDifferentialEvolutionWithAdaptiveElitism.py @@ -0,0 +1,128 @@ +import numpy as np + + +class DynamicMemeticDifferentialEvolutionWithAdaptiveElitism: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + # Adaptive Elitism parameter + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Adaptive Elitism: Adjust elite size based on convergence rate + elite_size = max( + 1, int(self.elite_fraction * self.pop_size * (1 - self.eval_count / global_search_budget)) + ) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + if self.eval_count >= global_search_budget: + break + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicMemoryAdaptiveConvergenceStrategyV76.py b/nevergrad/optimization/lama/DynamicMemoryAdaptiveConvergenceStrategyV76.py new file mode 100644 index 000000000..6e95aedea --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMemoryAdaptiveConvergenceStrategyV76.py @@ -0,0 +1,84 @@ +import numpy as np + + +class DynamicMemoryAdaptiveConvergenceStrategyV76: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx): + size = len(population) + a, b, c, d = np.random.choice(size, 4, replace=False) + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = ( + population[a] + self.F * (population[b] - population[c]) + 0.1 * memory_effect + ) # Memory-influenced mutation + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target or np.random.rand() < 0.05: # Probabilistic acceptance + if len(self.memory) < 10: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(len(self.memory))] = trial - target + return trial, f_trial + return target, f_target + + def adapt_parameters(self, success_rate): + if success_rate < 0.1: + self.F = max(0.1, self.F - 0.1) + self.CR = min(0.9, self.CR + 0.1) + elif success_rate > 0.2: + self.F = min(1.0, self.F + 0.1) + self.CR = max(0.1, self.CR - 0.1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + last_best = fitnesses[best_idx] + successes = 0 + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + successes += 1 + + if evaluations >= self.budget: + break + + current_best = fitnesses[best_idx] + if current_best < last_best: + success_rate = successes / self.pop_size + self.adapt_parameters(success_rate) + last_best = current_best + successes = 0 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DynamicMemoryEnhancedDualPhaseStrategyV66.py b/nevergrad/optimization/lama/DynamicMemoryEnhancedDualPhaseStrategyV66.py new file mode 100644 index 000000000..686324bed --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMemoryEnhancedDualPhaseStrategyV66.py @@ -0,0 +1,93 @@ +import numpy as np + + +class DynamicMemoryEnhancedDualPhaseStrategyV66: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + memory_size=10, + switch_ratio=0.7, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover rate + self.memory_size = memory_size + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[b] - population[c]) + else: + memory_effect = ( + np.sum(self.memory, axis=0) / len(self.memory) if self.memory else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = (iteration / total_iterations) * np.pi + self.F = np.clip(0.5 + 0.5 * np.sin(scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + switch_point = int(self.switch_ratio * total_iterations) + + for iteration in range(total_iterations): + phase = 1 if iteration < switch_point else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DynamicMemoryHybridSearch.py b/nevergrad/optimization/lama/DynamicMemoryHybridSearch.py new file mode 100644 index 000000000..94d449a6b --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMemoryHybridSearch.py @@ -0,0 +1,162 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.cluster import KMeans + + +class DynamicMemoryHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.num_sub_populations = 5 + self.F = 0.8 + self.CR = 0.9 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Memory Mechanism + self.memory_size = 20 + self.memory = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + + def _nelder_mead_local_search(self, x, func): + res = minimize(func, x, method="nelder-mead", options={"xtol": 1e-8, "disp": False}) + return res.x, res.fun + + def _bfgs_local_search(self, x, func): + res = minimize(func, x, method="BFGS", options={"gtol": 1e-8, "disp": False}) + return res.x, res.fun + + def _adaptive_parameter_adjustment(self): + self.F = np.random.uniform(0.4, 1.0) + self.CR = np.random.uniform(0.1, 1.0) + self.inertia_weight = np.random.uniform(0.4, 0.9) + + def _cluster_based_search(self, population, fitness, func): + if len(population) > 10: + kmeans = KMeans(n_clusters=10).fit(population) + cluster_centers = kmeans.cluster_centers_ + for center in cluster_centers: + local_candidate, f_local_candidate = self._nelder_mead_local_search(center, func) + self.evaluations += 1 + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + + def _memory_based_search(self, func): + if len(self.memory) > 1: + for mem in self.memory: + local_candidate, f_local_candidate = self._bfgs_local_search(mem, func) + self.evaluations += 1 + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + + def __call__(self, func): + # Initialize population + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + self.evaluations = self.population_size + + while self.evaluations < self.budget: + # Divide population into subpopulations + sub_pop_size = self.population_size // self.num_sub_populations + sub_populations = [ + population[i * sub_pop_size : (i + 1) * sub_pop_size] for i in range(self.num_sub_populations) + ] + sub_fitness = [ + fitness[i * sub_pop_size : (i + 1) * sub_pop_size] for i in range(self.num_sub_populations) + ] + + # Perform DE in subpopulations + for sub_pop, sub_fit in zip(sub_populations, sub_fitness): + for i in range(len(sub_pop)): + # Select three random vectors a, b, c from subpopulation + indices = [idx for idx in range(len(sub_pop)) if idx != i] + a, b, c = sub_pop[np.random.choice(indices, 3, replace=False)] + + # Mutation and Crossover + mutant_vector = np.clip(a + self.F * (b - c), self.lb, self.ub) + trial_vector = np.copy(sub_pop[i]) + for j in range(self.dim): + if np.random.rand() < self.CR: + trial_vector[j] = mutant_vector[j] + + f_candidate = func(trial_vector) + self.evaluations += 1 + + if f_candidate < sub_fit[i]: + sub_pop[i] = trial_vector + sub_fit[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + + if self.evaluations >= self.budget: + break + + # Recombine subpopulations + population = np.vstack(sub_populations) + fitness = np.hstack(sub_fitness) + + # PSO component + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + self.evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + personal_best_positions[i] = population[i] + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + + if self.evaluations >= self.budget: + break + + # Memory mechanism + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + # Adaptive Parameter Adjustment + self._adaptive_parameter_adjustment() + + # Cluster-Based Enhanced Local Search + self._cluster_based_search(population, fitness, func) + + # Memory-Based Search + self._memory_based_search(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicMultiPhaseAnnealingPlus.py b/nevergrad/optimization/lama/DynamicMultiPhaseAnnealingPlus.py new file mode 100644 index 000000000..9ad47aac8 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMultiPhaseAnnealingPlus.py @@ -0,0 +1,125 @@ +import numpy as np + + +class DynamicMultiPhaseAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for greater diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/DynamicMultiStrategyOptimizer.py b/nevergrad/optimization/lama/DynamicMultiStrategyOptimizer.py new file mode 100644 index 000000000..20862cd0a --- /dev/null +++ b/nevergrad/optimization/lama/DynamicMultiStrategyOptimizer.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicMultiStrategyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.8 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 50 + self.strategy_switch_threshold = 0.1 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = DynamicMultiStrategyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicNichePSO_DE_LS.py b/nevergrad/optimization/lama/DynamicNichePSO_DE_LS.py new file mode 100644 index 000000000..b6a430c3f --- /dev/null +++ b/nevergrad/optimization/lama/DynamicNichePSO_DE_LS.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicNichePSO_DE_LS: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.init_num_niches = 5 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < 0.3 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicNichingDEPSOWithRestart.py b/nevergrad/optimization/lama/DynamicNichingDEPSOWithRestart.py new file mode 100644 index 000000000..06afe0fac --- /dev/null +++ b/nevergrad/optimization/lama/DynamicNichingDEPSOWithRestart.py @@ -0,0 +1,148 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicNichingDEPSOWithRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.init_num_niches = 5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + best_niche_idx = np.argmin(local_best_fits) + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (local_bests[best_niche_idx] - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.5 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicPopulationAdaptiveGradientEvolution.py b/nevergrad/optimization/lama/DynamicPopulationAdaptiveGradientEvolution.py new file mode 100644 index 000000000..fb7d848fc --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPopulationAdaptiveGradientEvolution.py @@ -0,0 +1,112 @@ +import numpy as np + + +class DynamicPopulationAdaptiveGradientEvolution: + def __init__(self, budget, initial_population_size=10, max_population_size=50): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.initial_population_size = initial_population_size + self.max_population_size = max_population_size + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + self.diversity_threshold = 1e-3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + def dynamic_population_adjustment(population, fitness, iteration): + if iteration % 10 == 0 and len(population) < self.max_population_size: + population.append(random_vector()) + fitness.append(func(population[-1])) + + # Initialize population + population = [random_vector() for _ in range(self.initial_population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + iteration = 0 + success_count = 0 + while iteration < self.budget: + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(len(population)), size=2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, iteration) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, iteration, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + iteration += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worse parent with the new child + worse_parent_idx = ( + parents_idx[0] if fitness[parents_idx[0]] > fitness[parents_idx[1]] else parents_idx[1] + ) + population[worse_parent_idx] = new_x + fitness[worse_parent_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + # Dynamically adjust population size + dynamic_population_adjustment(population, fitness, iteration) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicPopulationAdaptiveGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicPopulationMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicPopulationMemeticDifferentialEvolution.py new file mode 100644 index 000000000..ce6de27b3 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPopulationMemeticDifferentialEvolution.py @@ -0,0 +1,184 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicPopulationMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_pop_size = 100 + self.min_pop_size = 20 + self.num_subpopulations = 5 + self.subpop_size = self.initial_pop_size // self.num_subpopulations + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 + self.CR = 0.9 + self.local_search_prob = 0.1 + self.restart_threshold = 50 + self.history = [] + + def _initialize_population(self, pop_size): + return np.random.uniform(self.lb, self.ub, (pop_size, self.dim)) + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(len(population)) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, len(population) - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + subpopulations = np.array_split(population, self.num_subpopulations) + subfitness = np.array_split(fitness, self.num_subpopulations) + new_population = [] + new_fitness = [] + + for subpop, subfit in zip(subpopulations, subfitness): + for i in range(len(subpop)): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(len(subpop)) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(subfit) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(subpop, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(subpop, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(subpop, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(subpop, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < subfit[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(subpop[i]) + new_fitness.append(subfit[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: self.num_subpopulations] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.initial_pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + if self.no_improvement_count >= 10: + reduced_pop_size = max(self.min_pop_size, len(population) - 10) + population = population[:reduced_pop_size] + fitness = fitness[:reduced_pop_size] + self.subpop_size = len(population) // self.num_subpopulations + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicPrecisionBalancedEvolution.py b/nevergrad/optimization/lama/DynamicPrecisionBalancedEvolution.py new file mode 100644 index 000000000..5f3f134a9 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPrecisionBalancedEvolution.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DynamicPrecisionBalancedEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 80 + elite_size = 10 + mutation_factor = 0.9 + crossover_probability = 0.85 + recombination_weight = 0.1 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Differential mutation + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = a + mutation_factor * (b - c) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Dynamic recombination towards best + mutant_vector = (1 - recombination_weight) * mutant_vector + recombination_weight * self.x_opt + + # Binomial crossover + trial_vector = np.array( + [ + ( + mutant_vector[j] + if np.random.rand() < crossover_probability or j == np.random.randint(self.dim) + else population[i, j] + ) + for j in range(self.dim) + ] + ) + + # Fitness evaluation and selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Update mutation factor dynamically based on performance + mutation_factor = np.clip( + mutation_factor + 0.02 * (self.f_opt / np.median(fitness) - 1), 0.5, 1.0 + ) + + # Elite replacement to maintain diversity + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in np.random.choice(range(population_size), elite_size, replace=False): + population[idx] = elite_individuals[np.random.randint(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicPrecisionCosineDifferentialSwarm.py b/nevergrad/optimization/lama/DynamicPrecisionCosineDifferentialSwarm.py new file mode 100644 index 000000000..ab46ffde3 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPrecisionCosineDifferentialSwarm.py @@ -0,0 +1,55 @@ +import numpy as np + + +class DynamicPrecisionCosineDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Population size further reduced for higher precision + self.F_base = 0.5 # Lower base mutation factor for very fine adjustments + self.CR_base = 0.8 # Slightly lower base crossover probability + self.adaptive_F_amplitude = 0.25 # Amplitude for the mutation factor oscillation + self.adaptive_CR_amplitude = 0.15 # Amplitude for the crossover rate oscillation + self.epsilon = 1e-10 # To avoid division by zero + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors using cosine modulation + iteration_ratio = i / (self.budget / self.pop_size + self.epsilon) + F = self.F_base + self.adaptive_F_amplitude * np.cos(2 * np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.sin(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Mutation strategy: DE/rand/1/bin with dynamic F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/DynamicPrecisionExplorationOptimizer.py b/nevergrad/optimization/lama/DynamicPrecisionExplorationOptimizer.py new file mode 100644 index 000000000..00cf209c0 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPrecisionExplorationOptimizer.py @@ -0,0 +1,56 @@ +import numpy as np + + +class DynamicPrecisionExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension fixed as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 50 # Adjusted population for broader exploration + mutation_rate = 0.1 # Initial mutation rate + exploration_depth = 0.05 # Depth of exploration around the current best + crossover_probability = 0.7 # Probability of crossover + + # Initialize population within the bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + indices = np.arange(population_size) + np.random.shuffle(indices) + for i in indices: + if np.random.rand() < crossover_probability: + # Crossover operation between two random individuals and the best solution + idx_a, idx_b = np.random.choice(population_size, 2, replace=False) + a, b = population[idx_a], population[idx_b] + mutant = a + mutation_rate * (best_solution - b) + else: + # Mutation only operation for more extensive exploration + mutant = population[i] + np.random.normal(0, exploration_depth, self.dim) + + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + trial_fitness = func(mutant) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = mutant + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = mutant + + # Dynamic adjustments based on the progress + if evaluations % (self.budget // 20) == 0: # Adjustments at finer intervals + mutation_rate *= 0.95 # Decrease mutation rate for finer search as time progresses + exploration_depth *= 0.9 # Reduce exploration depth to focus on local optima + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DynamicPrecisionOptimizer.py b/nevergrad/optimization/lama/DynamicPrecisionOptimizer.py new file mode 100644 index 000000000..59acecd2f --- /dev/null +++ b/nevergrad/optimization/lama/DynamicPrecisionOptimizer.py @@ -0,0 +1,81 @@ +import numpy as np + + +class DynamicPrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Defined dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial strategic setup + current_budget = 0 + population_size = 100 + num_elites = 10 + mutation_rate = 0.8 + crossing_probability = 0.7 + + # Initialize population randomly + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + while current_budget < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elite preservation strategy + elite_indices = np.argsort(fitness)[:num_elites] + new_population[:num_elites] = population[elite_indices] + new_fitness[:num_elites] = fitness[elite_indices] + + # Generate new solutions via mutation and crossover + for i in range(num_elites, population_size): + if current_budget >= self.budget: + break + + # Selection of parents for breeding based on fitness + parents_indices = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[parents_indices] + + # Mutation: differential evolution mutation strategy + mutant = x1 + mutation_rate * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < crossing_probability + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + current_budget += 1 + + # Greedy selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Update population and fitness + population = new_population + fitness = new_fitness + + # Dynamically adjust mutation rate and crossing probability based on progress + progress = current_budget / self.budget + mutation_rate = max(0.5, 1 - progress) # Decrease mutation rate over time + crossing_probability = min(1.0, 0.5 + progress * 0.5) # Increase crossover probability + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/DynamicQuantumAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/DynamicQuantumAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..70fb21058 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumAdaptiveEvolutionStrategy.py @@ -0,0 +1,184 @@ +import numpy as np + + +class DynamicQuantumAdaptiveEvolutionStrategy: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolution.py new file mode 100644 index 000000000..a18df174b --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class DynamicQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.7 # Initial Differential weight + self.initial_CR = 0.7 # Initial Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.amplitude = 0.1 # Quantum amplitude + self.eval_count = 0 + + def __call__(self, func): + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-self.amplitude, self.amplitude, position.shape) * ( + best_position - position + ) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with random components + adaptive_F = self.initial_F + (0.1 * np.random.rand() - 0.05) + adaptive_CR = self.initial_CR + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if self.eval_count % 2 == 0: # Apply quantum every second step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = DynamicQuantumDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch.py b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch.py new file mode 100644 index 000000000..691c96efd --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + # Standard DE mutation and crossover + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart.py b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart.py new file mode 100644 index 000000000..b2a39b19c --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart.py @@ -0,0 +1,137 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.initial_num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + num_elites = self.initial_num_elites + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % (self.population_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + num_elites = max(2, min(self.initial_num_elites, int(self.population_size / 10))) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicQuantumEvolution.py b/nevergrad/optimization/lama/DynamicQuantumEvolution.py new file mode 100644 index 000000000..823649800 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumEvolution.py @@ -0,0 +1,186 @@ +import numpy as np + + +class DynamicQuantumEvolution: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/DynamicQuantumGuidedHybridSearchV7.py b/nevergrad/optimization/lama/DynamicQuantumGuidedHybridSearchV7.py new file mode 100644 index 000000000..d45b6ee29 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumGuidedHybridSearchV7.py @@ -0,0 +1,88 @@ +import numpy as np + + +class DynamicQuantumGuidedHybridSearchV7: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_ratio=0.15, + mutation_scale=0.4, + mutation_decay=0.01, + crossover_prob=0.85, + quantum_boost=True, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_boost = quantum_boost + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Generate a new population + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + # Perform Crossover from elite individuals + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + # Directly copy an elite + child = population[np.random.choice(elite_indices)] + + # Apply quantum boost dynamically + if self.quantum_boost and np.random.random() < 0.95: + child = self.quantum_tuning(child, best_individual) + + # Mutation with dynamic scale adjustment + mutation_scale = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Select the best from the new population + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + # Combine and sort populations based on fitness + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_tuning(self, individual, best_individual): + perturbation = np.random.normal(-0.1, 0.1, self.dimension) + return individual + perturbation * (best_individual - individual) diff --git a/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialHybridSearch.py b/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialHybridSearch.py new file mode 100644 index 000000000..ac99f2941 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialHybridSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class DynamicQuantumLevyDifferentialHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.9 + 0.1 * progress + crossover_rate = 0.8 - 0.3 * progress + quantum_factor = 0.6 - 0.2 * progress + levy_factor = 0.1 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 100 # Increased population size for better diversity + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum and Levy Search + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 20 # Increased local search iterations + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialSwarmOptimization.py b/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialSwarmOptimization.py new file mode 100644 index 000000000..8ea919107 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumLevyDifferentialSwarmOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class DynamicQuantumLevyDifferentialSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 # Adjusted population size for better exploration and exploitation balance + inertia_weight_max = 0.9 # Increased max inertia weight for better exploration in early stages + inertia_weight_min = 0.4 + cognitive_coefficient = 1.2 # Tuned for better search performance + social_coefficient = 1.4 # Tuned for better search performance + differential_weight = 0.8 # Increased differential weight for stronger mutation + crossover_rate = 0.7 # Adjusted for balanced recombination + quantum_factor = 0.08 # Slightly increased for better exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Enhanced Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.3: # Adjusted local search probability for aggressive refinement + local_search_iters = 10 # Adjusted iterations for thorough local search + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicQuantumLevySwarmOptimization.py b/nevergrad/optimization/lama/DynamicQuantumLevySwarmOptimization.py new file mode 100644 index 000000000..9692e3cb5 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumLevySwarmOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class DynamicQuantumLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 80 # Slightly increased population size for better diversity + inertia_weight_max = 0.7 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.4 # Experimentation on coefficients + social_coefficient = 1.6 # Experimentation on coefficients + differential_weight = 0.7 # Adjusted for balanced DE mutation + crossover_rate = 0.85 # Adjusted for balanced recombination + quantum_factor = 0.06 # Adjusted for slightly more exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Enhanced Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.4: # Increased local search probability for aggressive refinement + local_search_iters = 15 # Increased iterations for thorough local search + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicQuantumMemeticOptimizer.py b/nevergrad/optimization/lama/DynamicQuantumMemeticOptimizer.py new file mode 100644 index 000000000..a27c519eb --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumMemeticOptimizer.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicQuantumMemeticOptimizer: + def __init__(self, budget=10000, population_size=60): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.3 + self.elite_fraction = 0.5 + self.memory_size = 30 + self.local_search_probability = 0.7 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = DynamicQuantumMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/DynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..0ddeb291a --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumSwarmOptimization.py @@ -0,0 +1,69 @@ +import numpy as np + + +class DynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.6, + cognitive_weight=1.7, + social_weight=2.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically + self.inertia_weight = 0.9 - 0.5 * (iteration / self.budget) + self.cognitive_weight = 2.5 - 1.5 * (iteration / self.budget) + self.social_weight = 1.5 + 0.7 * (iteration / self.budget) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/DynamicQuantumSwarmOptimizationRefined.py b/nevergrad/optimization/lama/DynamicQuantumSwarmOptimizationRefined.py new file mode 100644 index 000000000..96abe7ce8 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuantumSwarmOptimizationRefined.py @@ -0,0 +1,76 @@ +import numpy as np + + +class DynamicQuantumSwarmOptimizationRefined: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically + delta = 0.5 / self.budget + self.inertia_weight = self.max_inertia_weight - delta * iteration + self.cognitive_weight = self.max_cognitive_weight - delta * iteration + self.social_weight = self.min_social_weight + delta * iteration + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/DynamicQuasiRandomAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/DynamicQuasiRandomAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..998d832ce --- /dev/null +++ b/nevergrad/optimization/lama/DynamicQuasiRandomAdaptiveDifferentialEvolution.py @@ -0,0 +1,158 @@ +import numpy as np +from scipy.stats import qmc + + +class DynamicQuasiRandomAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.local_search_probability = 0.2 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + threshold = 1e-3 + diversity_enhanced = False + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + diversity_enhanced = True + return diversity_enhanced + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + crossover_rate = self.crossover_rate + mutation_factor = self.mutation_factor + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + if np.random.rand() < self.local_search_probability: + local_step = np.random.randn(self.dim) * self.base_lr + local_x = new_x + local_step + local_x = np.clip(local_x, self.bounds[0], self.bounds[1]) + local_f = func(local_x) + evaluations += 1 + if local_f < new_f: + new_population[-1] = local_x + new_fitness[-1] = local_f + if local_f < self.f_opt: + self.f_opt = local_f + self.x_opt = local_x + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + diversity_enhanced = maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + crossover_rate *= 1.05 + mutation_factor = min(1.0, mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + crossover_rate *= 0.95 + mutation_factor = max(0.5, mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + crossover_rate = np.clip(crossover_rate, 0.1, 0.9) + + if diversity_enhanced: + self.base_lr *= 0.9 + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = DynamicQuasiRandomAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/DynamicRefinedGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/DynamicRefinedGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..24ae9e267 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicRefinedGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,181 @@ +import numpy as np + + +class DynamicRefinedGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Initial parameters + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive beta and alpha adjustments based on phases + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Introducing crossover mechanism to create new candidates + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + # Introducing mutation mechanism to create new candidates + if evaluations % (self.budget // 3) == 0: + for i in range(memory_size // 3): + x_mut = memory[np.random.randint(memory_size)] + x_mut += np.random.normal(0, 0.1, self.dim) + x_mut = np.clip(x_mut, func.bounds.lb, func.bounds.ub) + f_mut = func(x_mut) + evaluations += 1 + if f_mut < self.f_opt: + self.f_opt = f_mut + self.x_opt = x_mut + + worst_idx = np.argmax(memory_scores) + if f_mut < memory_scores[worst_idx]: + memory[worst_idx] = x_mut + memory_scores[worst_idx] = f_mut + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/DynamicRefinementGradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/DynamicRefinementGradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..7f097a613 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicRefinementGradientBoostedMemoryAnnealing.py @@ -0,0 +1,167 @@ +import numpy as np + + +class DynamicRefinementGradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Additional: Boosted Diversification Phase + if evaluations % (self.budget // 8) == 0: + for _ in range(memory_size): + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Extra phase for intensive local search near the best solution found + if evaluations > 3 * self.budget // 4: + x_candidate = self._local_refinement(func, self.x_opt) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/DynamicScaleSearch.py b/nevergrad/optimization/lama/DynamicScaleSearch.py new file mode 100644 index 000000000..b9c89e942 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicScaleSearch.py @@ -0,0 +1,62 @@ +import numpy as np + + +class DynamicScaleSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + # Initialize variables + self.f_opt = np.inf + self.x_opt = None + # Start with a random point in the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update optimal solution if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale of the Gaussian perturbations and memory + init_scale = 0.5 + scale = init_scale + memory = [] + adapt_rate = 0.1 # Adaptive rate for scale adjustment + + # Main optimization loop + for i in range(self.budget - 1): + # Dynamic adjustment of the scale based on progress + if i % 100 == 0 and i > 0: + scale = max(scale * 0.9, 0.01) # Reduce scale to fine-tune search + + # Generate a new candidate by perturbing the current point + candidate = current_point + np.random.normal(0, scale, self.dim) + candidate = np.clip(candidate, -5.0, 5.0) + candidate_f = func(candidate) + + # If the candidate is better, move there + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + memory.append(candidate) # Add to memory if successful + + # Update optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Increase scale when making progress + scale += adapt_rate + # Limit memory size + if len(memory) > 20: + memory.pop(0) + else: + # Occasionally jump to a remembered good solution + if memory and np.random.rand() < 0.05: + current_point = memory[np.random.randint(len(memory))] + # Decrease scale when no progress + scale = max(scale * 0.95, 0.01) # Avoid scale becoming too small + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/DynamicSelfAdaptiveOptimizer.py b/nevergrad/optimization/lama/DynamicSelfAdaptiveOptimizer.py new file mode 100644 index 000000000..efe14b005 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicSelfAdaptiveOptimizer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class DynamicSelfAdaptiveOptimizer: + def __init__(self, budget=10000, population_size=40): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.4 + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + # Adaptive Parameter Refinement + diversity = np.std(population, axis=0).mean() + self.inertia_weight = max(0.4, self.inertia_weight * 0.98) + self.mutation_factor = np.random.uniform(0.6, 0.9) + self.crossover_probability = np.random.uniform(0.7, 0.95) + self.cognitive_coeff = np.random.uniform(1.3, 1.7) + self.social_coeff = np.random.uniform(1.3, 1.7) + + if eval_count >= self.budget: + break + + # Enhanced Local Search on selected individuals + if eval_count + self.population_size / 2 <= self.budget: + selected_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + for i in selected_indices: + res = self.nelder_mead(func, population[i]) + if res[1] < fitness[i]: + population[i] = res[0] + fitness[i] = res[1] + if res[1] < global_best_fitness: + global_best = res[0] + global_best_fitness = res[1] + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt + + def nelder_mead(self, func, x_start, tol=1e-6, max_iter=100): + from scipy.optimize import minimize + + res = minimize(func, x_start, method="Nelder-Mead", tol=tol, options={"maxiter": max_iter}) + return res.x, res.fun diff --git a/nevergrad/optimization/lama/DynamicStrategyAdaptiveDE.py b/nevergrad/optimization/lama/DynamicStrategyAdaptiveDE.py new file mode 100644 index 000000000..cd2d7f845 --- /dev/null +++ b/nevergrad/optimization/lama/DynamicStrategyAdaptiveDE.py @@ -0,0 +1,169 @@ +import numpy as np +from scipy.optimize import minimize + + +class DynamicStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 10 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Dynamic strategy: switch mutation strategy based on generation + if generation % 3 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + else: + mutant = ( + x1 + + mutation_factor * (x2 - x3) + + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x1) + ) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/DynamicallyAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/DynamicallyAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..9efa0138e --- /dev/null +++ b/nevergrad/optimization/lama/DynamicallyAdaptiveFireworkAlgorithm.py @@ -0,0 +1,108 @@ +import numpy as np + + +class DynamicallyAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EACDE.py b/nevergrad/optimization/lama/EACDE.py new file mode 100644 index 000000000..65bf6508e --- /dev/null +++ b/nevergrad/optimization/lama/EACDE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EACDE: + def __init__( + self, budget, population_size=50, F_base=0.5, CR_base=0.9, cluster_ratio=0.25, adaptation_frequency=50 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.cluster_ratio = cluster_ratio # Ratio of population to consider for clustering + self.adaptation_frequency = adaptation_frequency # Frequency of adaptation for F and CR + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Clustering top-performing individuals + top_cluster_size = int(self.population_size * self.cluster_ratio) + top_indices = np.argsort(fitness)[:top_cluster_size] + top_cluster = population[top_indices] + + # Adaptive strategy parameters based on performance + if num_evals % self.adaptation_frequency == 0: + successful_indices = fitness < np.median(fitness) + if np.any(successful_indices): + self.F_base = np.clip(np.mean(fitness[successful_indices]), 0.1, 1) + self.CR_base = np.clip(np.mean(fitness[successful_indices]), 0.5, 1) + + for i in range(self.population_size): + if np.random.rand() < np.mean(fitness) / best_fitness: + # Higher mutation rate near best individual + F = self.F_base + 0.1 * np.random.randn() + else: + F = self.F_base + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + CR = self.CR_base + 0.1 * np.random.randn() + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial vector + trial_fitness = func(trial) + num_evals += 1 + if num_evals >= self.budget: + break + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADE.py b/nevergrad/optimization/lama/EADE.py new file mode 100644 index 000000000..95ec5f13f --- /dev/null +++ b/nevergrad/optimization/lama/EADE.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EADe: + def __init__(self, budget, population_size=30, F_base=0.6, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + + def __call__(self, func): + # Initialize population within the bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Tracking the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Begin the evolutionary process + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Jitter and Amplify the F and CR according to the stage of optimization + F = self.F_base + 0.1 * np.sin((num_evals / self.budget) * np.pi) + CR = self.CR_base * (0.5 + 0.5 * np.cos((num_evals / self.budget) * np.pi)) + + # Mutation using differential evolution strategy "rand/1/bin" + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial solution + trial_fitness = func(trial) + num_evals += 1 + + # Selection: Greedily select the better vector + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + # Update the population with new generation + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADEA.py b/nevergrad/optimization/lama/EADEA.py new file mode 100644 index 000000000..0b6511e5f --- /dev/null +++ b/nevergrad/optimization/lama/EADEA.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EADEA: + def __init__(self, budget, population_size=30, crossover_rate=0.8, F_min=0.5, F_max=0.9, archive_size=50): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.archive_size = archive_size + + def __call__(self, func): + # Bounds and dimensionality + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize archive + archive = np.empty((0, dimension)) + + # Best solution found + best_idx = np.argmin(fitness) + best_solution = population[best_idx, :] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor + F = np.random.uniform(self.F_min, self.F_max) + + # Mutation with archive incorporation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if archive.size > 0 and np.random.rand() < 0.1: # Introducing archive mutation + arch_idx = np.random.randint(0, archive.shape[0]) + c = archive[arch_idx] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update archive + if archive.shape[0] < self.archive_size: + archive = np.vstack([archive, population[i]]) + else: + archive[np.random.randint(self.archive_size)] = population[i] + + # Update population + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EADEDM.py b/nevergrad/optimization/lama/EADEDM.py new file mode 100644 index 000000000..095687036 --- /dev/null +++ b/nevergrad/optimization/lama/EADEDM.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EADEDM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_size=10): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight adapted linearly + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size # Number of memory slots + self.memory = [] + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Directly incorporate memory vectors into mutation if available + if self.memory: + m_index = np.random.choice(range(len(self.memory))) + memory_vector = self.memory[m_index] + else: + memory_vector = np.zeros(self.dimension) + + # Mutant vector calculation including memory component + best = population[np.argmin(fitness)] + mutant = x1 + F_current * (best - x1 + x2 - x3 + memory_vector) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with the newly successful mutation vector + if len(self.memory) < self.memory_size: + self.memory.append(mutant - population[i]) + else: + self.memory[np.random.randint(len(self.memory))] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EADEDMGM.py b/nevergrad/optimization/lama/EADEDMGM.py new file mode 100644 index 000000000..48aa1a93e --- /dev/null +++ b/nevergrad/optimization/lama/EADEDMGM.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EADEDMGM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_size=10): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size # Number of memory slots + self.memory = [] + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[np.argmin(fitness)] + + # Incorporate memory vectors into mutation, if available + if self.memory: + m_index = np.random.choice(range(len(self.memory))) + memory_vector = self.memory[m_index] + else: + memory_vector = np.zeros(self.dimension) + + # Mutant vector calculation with memory and guided mutation + mutant = x1 + F_current * (best - x1 + x2 - x3 + memory_vector) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with the newly successful mutation vector + if len(self.memory) < self.memory_size: + self.memory.append(mutant - population[i]) + else: + self.memory[np.random.randint(len(self.memory))] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EADEPC.py b/nevergrad/optimization/lama/EADEPC.py new file mode 100644 index 000000000..17bd5b1ab --- /dev/null +++ b/nevergrad/optimization/lama/EADEPC.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EADEPC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=40, + F_init=0.5, + CR_init=0.9, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F = F_init + self.CR = CR_init + + def __call__(self, func): + # Initialize population uniformly + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive control parameters + success_counter = 0 + adapt_frequency = max(1, int(0.1 * self.budget)) + + while evaluations < self.budget: + F_adapted = np.clip( + np.random.normal(self.F, 0.1), 0.1, 1.0 + ) # Normal distribution around F with clipping + CR_adapted = np.clip( + np.random.normal(self.CR, 0.1), 0.1, 1.0 + ) # Normal distribution around CR with clipping + + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation using "rand/1/bin" strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F_adapted * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.array( + [ + mutant[j] if np.random.rand() < CR_adapted else population[i][j] + for j in range(self.dimension) + ] + ) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + success_counter += 1 + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial + + # Adjust F and CR based on performance after a certain number of evaluations + if evaluations % adapt_frequency == 0: + success_rate = success_counter / (self.population_size * adapt_frequency) + self.F = self.F * (0.9 if success_rate < 0.2 else 1.1) + self.CR = self.CR * (0.9 if success_rate > 0.2 else 1.1) + self.F = max(0.1, min(self.F, 0.9)) + self.CR = max(0.1, min(self.CR, 0.9)) + success_counter = 0 # Reset success counter + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADEPM.py b/nevergrad/optimization/lama/EADEPM.py new file mode 100644 index 000000000..26d8b35ca --- /dev/null +++ b/nevergrad/optimization/lama/EADEPM.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EADEPM: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_size=10, memory_factor=0.1 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size # Number of memory slots + self.memory_factor = memory_factor # Proportion of memory influence + self.memory = [] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[np.argmin(fitness)] + + # Use memory in mutation if available + memory_effect = ( + np.sum(self.memory, axis=0) * self.memory_factor + if self.memory + else np.zeros(self.dimension) + ) + + # Mutation strategy incorporating memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + memory_effect) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Successful trials update the population and memory + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with successful mutation vectors + if len(self.memory) < self.memory_size: + self.memory.append(mutant - population[i]) + else: + # Replace a random memory component + self.memory[np.random.randint(len(self.memory))] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EADEPMC.py b/nevergrad/optimization/lama/EADEPMC.py new file mode 100644 index 000000000..ebd32637e --- /dev/null +++ b/nevergrad/optimization/lama/EADEPMC.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EADEPMC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + F_base=0.5, + CR_base=0.5, + learning_rate=0.1, + p=0.1, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive parameters + self.p = p # Probability of choosing best individuals for mutation + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mutation and crossover probabilities + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + if np.random.rand() < self.p: + # Use best solutions for mutation + indices = np.argsort(fitness)[:3] # Select top 3 performers + else: + # Use random solutions for mutation + indices = np.random.choice(self.population_size, 3, replace=False) + + a, b, c = population[indices] + mutant = a + F_adaptive[i] * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection and adaptivity update + if trial_fitness < fitness[i]: + population[i], fitness[i] = trial, trial_fitness + F_adaptive[i] += self.learning_rate * (1.0 - F_adaptive[i]) + CR_adaptive[i] -= self.learning_rate * CR_adaptive[i] + if trial_fitness < best_fitness: + best_fitness, best_individual = trial_fitness, trial.copy() + else: + F_adaptive[i] -= self.learning_rate * F_adaptive[i] + CR_adaptive[i] += self.learning_rate * (1.0 - CR_adaptive[i]) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADES.py b/nevergrad/optimization/lama/EADES.py new file mode 100644 index 000000000..122ebf19b --- /dev/null +++ b/nevergrad/optimization/lama/EADES.py @@ -0,0 +1,58 @@ +import numpy as np + + +class EADES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.cr = 0.9 # Crossover probability + self.f = 0.8 # Differential weight + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs] + mutant = np.clip(x1 + self.f * (x2 - x3), self.bounds[0], self.bounds[1]) + new_population[i] = mutant + return new_population + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + + while evaluations < self.budget: + mutated_population = self.mutate(population) + offspring_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i]: + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution = fitness[i], population[i] + + # Adaptive differential weight adjustment + self.f *= 0.995 # Gradual decrease to focus more on exploration initially and exploitation later + if evaluations % (self.budget // 10) == 0: + self.f = max(self.f, 0.5) # Prevent it from becoming too small + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EADESC.py b/nevergrad/optimization/lama/EADESC.py new file mode 100644 index 000000000..7bfc27c29 --- /dev/null +++ b/nevergrad/optimization/lama/EADESC.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EADESC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.5, + CR_base=0.9, + adaptive=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.adaptive = adaptive + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation using "best" strategy for faster convergence + indices = [idx for idx in range(self.population_size) if idx != i] + chosen_indices = np.random.choice(indices, 2, replace=False) + x1, x2 = population[chosen_indices] + mutant = best_individual + F[i] * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Strategic Crossover + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptive parameter adjustment + if self.adaptive: + F[i] = max(0.1, min(F[i] + (trial_fitness < fitness[i]) * 0.02 - 0.01, 1.0)) + CR[i] = max(0.1, min(CR[i] + (trial_fitness < fitness[i]) * 0.05 - 0.025, 1.0)) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADEWM.py b/nevergrad/optimization/lama/EADEWM.py new file mode 100644 index 000000000..a336d7d4a --- /dev/null +++ b/nevergrad/optimization/lama/EADEWM.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EADEWM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_size=5): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size + self.memory = [] + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Adaptive F scaling based on the linear progression from initial to end value + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Select three random distinct indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Memory-based mutation strategy: DE/current-to-best/1 with memory + best = population[np.argmin(fitness)] + if self.memory and np.random.rand() < 0.5: + memory_vector = self.memory[np.random.randint(len(self.memory))] + mutant = x1 + F_current * (best - x1 + x2 - x3 + memory_vector - x1) + else: + mutant = x1 + F_current * (best - x1 + x2 - x3) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + + # Evaluate the new candidate + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with difference vector if it improves + if len(self.memory) < self.memory_size: + self.memory.append(mutant - population[i]) + else: + self.memory[np.random.randint(self.memory_size)] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EADE_FIDM.py b/nevergrad/optimization/lama/EADE_FIDM.py new file mode 100644 index 000000000..cb7f9a2cd --- /dev/null +++ b/nevergrad/optimization/lama/EADE_FIDM.py @@ -0,0 +1,54 @@ +import numpy as np + + +class EADE_FIDM: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + for i in range(population_size): + if num_evals >= self.budget: + break + + # Enhanced Adaptive parameters based on fitness percentile + fitness_percentile = np.argsort(np.argsort(fitness)) / population_size + F = 0.8 * (1 - fitness_percentile[i]) # Higher mutation for better solutions + CR = 0.9 * fitness_percentile[i] # Higher crossover for worse solutions + + # Enhanced Mutation: Incorporating best individual + indices = np.random.choice(np.delete(np.arange(population_size), i), 2, replace=False) + x1, x2 = population[indices] + mutant = population[best_idx] + F * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADGM.py b/nevergrad/optimization/lama/EADGM.py new file mode 100644 index 000000000..282c094f7 --- /dev/null +++ b/nevergrad/optimization/lama/EADGM.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EADGM: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=200, + F_base=0.6, + CR_base=0.9, + learning_rate=0.05, + p_best=0.2, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base # Initial mutation factor + self.CR_base = CR_base # Initial crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive parameters + self.p_best = p_best # Probability of using best individual updates + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mutation and crossover probabilities + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Select indices for mutation, ensuring all are unique and not the current index + indices = [idx for idx in range(self.population_size) if idx != i] + selected_indices = np.random.choice(indices, 3, replace=False) + a, b, c = population[selected_indices] + + # Mutation with Gaussian addition for enhanced exploration + G = np.random.normal(0, 1, self.dimension) + mutant = np.clip(a + F_adaptive[i] * (b - c) + G, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection and adaptivity update + if trial_fitness < fitness[i]: + population[i], fitness[i] = trial, trial_fitness + if trial_fitness < best_fitness: + best_fitness, best_individual = trial_fitness, trial.copy() + # Adaptive factor update towards successful mutations + F_adaptive[i] += self.learning_rate * (1.0 - F_adaptive[i]) + CR_adaptive[i] -= self.learning_rate * CR_adaptive[i] + else: + # Adaptive factor degradation towards unsuccessful mutations + F_adaptive[i] -= self.learning_rate * F_adaptive[i] + CR_adaptive[i] += self.learning_rate * (1.0 - CR_adaptive[i]) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EADMMMS.py b/nevergrad/optimization/lama/EADMMMS.py new file mode 100644 index 000000000..2474ca6cf --- /dev/null +++ b/nevergrad/optimization/lama/EADMMMS.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EADMMMS: + def __init__( + self, + budget, + population_size=100, + crossover_rate=0.95, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions and elite solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite and memory periodically + if evaluations % (self.budget // 20) == 0: + sorted_indices = np.argsort(fitness) + elite[:] = population[sorted_indices[: self.elite_size]] + elite_fitness[:] = fitness[sorted_indices[: self.elite_size]] + memory[:] = population[sorted_indices[: self.memory_size]] + memory_fitness[:] = fitness[sorted_indices[: self.memory_size]] + + for i in range(self.population_size): + # Adaptive mutation factor using wave pattern + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation strategy incorporating best, memory, and elite solutions + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.85 else elite[np.random.randint(self.elite_size)] + ) + memory_contrib = memory[np.random.randint(self.memory_size)] + mutant = np.clip(a + F * (best_or_elite - a + memory_contrib - b), lb, ub) + + # Crossover using binomial method + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EADSEA.py b/nevergrad/optimization/lama/EADSEA.py new file mode 100644 index 000000000..ed91463a2 --- /dev/null +++ b/nevergrad/optimization/lama/EADSEA.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EADSEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.elite_size = 10 # Number of elites + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, progress): + mutation_factor = 0.5 + (1 - progress) * 0.5 # Adaptive mutation factor + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(np.delete(np.arange(self.population_size), best_index), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = a + mutation_factor * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, progress): + crossover_rate = 0.7 + 0.2 * progress # Adaptive crossover rate + mask = np.random.rand(self.dimension) < crossover_rate + return np.where(mask, mutant, target) + + def local_search(self, individual, func, iterations=10): + step_size = 0.1 + for _ in range(iterations): + candidate = individual + np.random.uniform(-step_size, step_size, self.dimension) + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + if func(candidate) < func(individual): + individual = candidate + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + progress = evaluations / self.budget + mutants = self.mutate(population, best_index, progress) + trials = np.array( + [self.crossover(population[i], mutants[i], progress) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + + new_best_index = np.argmin(fitness) + if fitness[new_best_index] < fitness[best_index]: + best_index = new_best_index + + # Elitism: preserve top performers + elites_indices = np.argsort(fitness)[: self.elite_size] + for i in elites_indices: + population[i] = self.local_search(population[i], func) + + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/EADSM.py b/nevergrad/optimization/lama/EADSM.py new file mode 100644 index 000000000..dc9ecd501 --- /dev/null +++ b/nevergrad/optimization/lama/EADSM.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EADSM: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.6, + F_amp=0.4, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 10) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx].copy() + elite_fitness = fitness[elite_idx].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with slight modification to increase exploration + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget / 0.8) + + # Mutation using best, random from population and elite + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.85 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover: Binomial with adaptive technique + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory by replacing the worst entry + worst_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_idx]: + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EAMDE.py b/nevergrad/optimization/lama/EAMDE.py new file mode 100644 index 000000000..9aeceec44 --- /dev/null +++ b/nevergrad/optimization/lama/EAMDE.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EAMDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite pool initialization + memory = population[: self.memory_size].copy() + memory_fitness = fitness[: self.memory_size].copy() + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adaptive mutation factor with modified sine function for variability + F = self.F_base + self.F_amp * np.sin(4 * np.pi * evaluations / self.budget) + + for i in range(self.population_size): + # Select mutation candidates + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Mutation using best and a combination of elite and memory + use_memory = np.random.rand() < 0.5 + if use_memory: + m = memory[np.random.randint(self.memory_size)] + mutant = np.clip(a + F * (m - b + c - a), lb, ub) + else: + e = elite[np.random.randint(self.elite_size)] + mutant = np.clip(a + F * (e - b + c - a), lb, ub) + + # Crossover: Binomial, ensuring at least one crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + + # Update individual if improvement + if trial_fitness < fitness[i]: + # Update memory and elite + worst_memory_idx = np.argmax(memory_fitness) + worst_elite_idx = np.argmax(elite_fitness) + + if trial_fitness < memory_fitness[worst_memory_idx]: + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + if trial_fitness < elite_fitness[worst_elite_idx]: + elite[worst_elite_idx] = trial + elite_fitness[worst_elite_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EAMES.py b/nevergrad/optimization/lama/EAMES.py new file mode 100644 index 000000000..99a02be4c --- /dev/null +++ b/nevergrad/optimization/lama/EAMES.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EAMES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.initial_population_size = 50 + self.elite_size = 5 + self.mutation_factor = 0.8 + self.crossover_rate = 0.7 + + def initialize_population(self): + return np.random.uniform( + self.bounds[0], self.bounds[1], (self.initial_population_size, self.dimension) + ) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def differential_mutation(self, population, base_idx): + idxs = np.random.choice(self.initial_population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(x1 + self.mutation_factor * (x2 - x3), self.bounds[0], self.bounds[1]) + return mutant + + def crossover(self, mutant, target): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.initial_population_size + iteration = 0 + best_fitness = np.inf + best_solution = None + + while evaluations < self.budget: + elite_population, elite_fitness = self.select_elites(population, fitness) + + for i in range(self.initial_population_size): + mutant = self.differential_mutation(population, i) + child = self.crossover(mutant, population[i]) + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + population[i] = child + fitness[i] = child_fitness + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + if evaluations >= self.budget: + break + + iteration += 1 + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EAMSDiffEvo.py b/nevergrad/optimization/lama/EAMSDiffEvo.py new file mode 100644 index 000000000..1597acc28 --- /dev/null +++ b/nevergrad/optimization/lama/EAMSDiffEvo.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EAMSDiffEvo: + def __init__(self, budget, population_size=100, F_base=0.6, CR_base=0.9, perturbation=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Modified base mutation factor for potentially better exploration + self.CR_base = CR_base # Base crossover probability remains the same + self.perturbation = perturbation # Perturbation for adaptive parameters + + def __call__(self, func): + # Initialize population and fitness evaluations + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Enhanced mutation with strategy modulation based on best performance + strategy_type = np.random.choice( + ["best", "rand", "rand-to-best", "current-to-rand"], p=[0.3, 0.2, 0.3, 0.2] + ) + F = np.clip(self.F_base + self.perturbation * np.random.randn(), 0.1, 1.0) + CR = np.clip(self.CR_base + self.perturbation * np.random.randn(), 0.0, 1.0) + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + if strategy_type == "best": + mutant = best_individual + F * (a - b) + elif strategy_type == "rand": + mutant = a + F * (b - c) + elif strategy_type == "rand-to-best": + mutant = population[i] + F * (best_individual - population[i]) + F * (a - b) + else: # 'current-to-rand' + mutant = population[i] + F * (a - population[i]) + F * (b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EAMSEA.py b/nevergrad/optimization/lama/EAMSEA.py new file mode 100644 index 000000000..55b983f48 --- /dev/null +++ b/nevergrad/optimization/lama/EAMSEA.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EAMSEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.last_best_fitness = np.inf + self.stagnation_counter = 0 + self.adaptation_rate = 0.05 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, learning_rate): + F = 0.5 * learning_rate + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(np.delete(np.arange(self.population_size), best_index), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = a + F * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def adaptive_crossover(self, target, mutant): + CR = 0.1 + 0.5 * np.random.rand() + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def calculate_learning_rate(self, current_best_fitness, population_var): + if current_best_fitness < self.last_best_fitness: + rate_increase = self.adaptation_rate / (1 + population_var) + else: + rate_increase = -self.adaptation_rate * population_var + return max(0.1, min(0.9, rate_increase)) + + def multi_neighborhood_search(self, best_individual, func): + scales = np.linspace(0.1, 1.0, 10) + candidates = [best_individual + scale * np.random.normal(0, 1, self.dimension) for scale in scales] + candidates = np.clip(candidates, self.bounds[0], self.bounds[1]) + fitnesses = self.evaluate(candidates, func) + best_local_idx = np.argmin(fitnesses) + return candidates[best_local_idx] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + current_best_fitness = fitness[best_index] + population_var = np.var(fitness) + learning_rate = self.calculate_learning_rate(current_best_fitness, population_var) + mutants = self.mutate(population, best_index, learning_rate) + trials = np.array( + [self.adaptive_crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + + best_index = np.argmin(fitness) + if fitness[best_index] > self.last_best_fitness: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + self.last_best_fitness = fitness[best_index] + + if self.stagnation_counter > 20: + population[best_index] = self.multi_neighborhood_search(population[best_index], func) + + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/EAPBES.py b/nevergrad/optimization/lama/EAPBES.py new file mode 100644 index 000000000..9721481a8 --- /dev/null +++ b/nevergrad/optimization/lama/EAPBES.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EAPBES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.elite_size = 5 # Top 10% as elite + self.mutation_rate = 0.1 + self.crossover_probability = 0.5 + + def initialize(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation_mask = np.random.rand(self.dimension) < self.mutation_rate + individual[mutation_mask] += np.random.normal(0, 0.1, np.sum(mutation_mask)) + return np.clip(individual, self.bounds[0], self.bounds[1]) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + point = np.random.randint(self.dimension) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + mask = np.random.rand(self.dimension) < 0.5 + child = parent1 * mask + parent2 * (1 - mask) + return child + + def local_search(self, elite): + perturbation = np.random.normal(0, 0.05, self.dimension) # smaller scale perturbation + candidate = elite + perturbation + return np.clip(candidate, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + population = self.initialize() + best_fitness = np.inf + best_individual = None + + evaluations = 0 + generations = 0 + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + if np.min(fitness) < best_fitness: + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)].copy() + + elites, elite_fitness = self.select_elites(population, fitness) + + if generations % 10 == 0 and generations > 0: # Local search every 10 generations + for i in range(len(elites)): + elites[i] = self.local_search(elites[i]) + + new_population = elites.copy() + while len(new_population) < self.population_size: + parent1, parent2 = population[np.random.choice(len(population), 2, replace=False)] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population = np.vstack([new_population, child]) + + population = new_population + generations += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EAPDELS.py b/nevergrad/optimization/lama/EAPDELS.py new file mode 100644 index 000000000..2a2c90a43 --- /dev/null +++ b/nevergrad/optimization/lama/EAPDELS.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EAPDELS: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.6, + CR_base=0.9, + mutation_strategy="rand/1/bin", + adaptive=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.mutation_strategy = mutation_strategy + self.adaptive = adaptive + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive parameters initialization + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation (DE/rand/1/bin strategy) + indices = [idx for idx in range(self.population_size) if idx != i] + chosen_indices = np.random.choice(indices, 3, replace=False) + x0, x1, x2 = population[chosen_indices] + mutant = x0 + F[i] * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover (binomial) + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + # Intensification: Local Search + local_search_step = 0.1 * (self.upper_bound - self.lower_bound) + local_candidate = best_individual + np.random.uniform( + -local_search_step, local_search_step, self.dimension + ) + local_candidate = np.clip(local_candidate, self.lower_bound, self.upper_bound) + local_fitness = func(local_candidate) + evaluations += 1 + if local_fitness < best_fitness: + best_fitness = local_fitness + best_individual = local_candidate + + # Adaptive parameter update + if self.adaptive: + success = trial_fitness < fitness[i] + F[i] += 0.05 * (success - 0.5) + CR[i] += 0.05 * (success - 0.5) + F[i] = np.clip(F[i], 0.1, 0.9) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EARESDM.py b/nevergrad/optimization/lama/EARESDM.py new file mode 100644 index 000000000..11868bb7f --- /dev/null +++ b/nevergrad/optimization/lama/EARESDM.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EARESDM: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track the best solution and its fitness + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Periodic update of elite solutions + if evaluations % (self.budget // 20) == 0: + elite_idxs = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idxs].copy() + elite_fitness = fitness[elite_idxs].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with directional influence + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: differential mutation with directional influence from elite + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + random_elite = elite[np.random.randint(self.elite_size)] + mutant = np.clip(a + F * (random_elite - a + b - c), lb, ub) + + # Adaptive crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + 0.05 * np.cos( + 2 * np.pi * evaluations / self.budget + ) + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate and select + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update: replace the worst entry if the current individual is better + worst_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_idx]: + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EASO.py b/nevergrad/optimization/lama/EASO.py new file mode 100644 index 000000000..b2c4fbcf7 --- /dev/null +++ b/nevergrad/optimization/lama/EASO.py @@ -0,0 +1,56 @@ +import numpy as np + + +class EASO: + def __init__(self, budget, population_size=100, spiral_rate=0.8, beta=0.4): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.spiral_rate = spiral_rate + self.beta = beta # Introduces a non-linear dynamic adjustment factor + + def __call__(self, func): + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + velocities = np.zeros((self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Evolutionary loop + while num_evals < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) # Random coefficients + # Enhanced spiral updating rule with dynamic adjustment + dynamic_adjustment = self.beta * np.tanh(num_evals / self.budget) # Non-linear adjustment + velocities[i] = ( + r1 * velocities[i] + + r2 * self.spiral_rate * (best_individual - population[i]) + + dynamic_adjustment + * (np.random.uniform(self.lb, self.ub, self.dimension) - population[i]) + ) + + # Update position + population[i] += velocities[i] + population[i] = np.clip(population[i], self.lb, self.ub) + + # Evaluate + updated_fitness = func(population[i]) + num_evals += 1 + + # Selection + if updated_fitness < fitness[i]: + fitness[i] = updated_fitness + if updated_fitness < best_fitness: + best_fitness = updated_fitness + best_individual = population[i] + + if num_evals >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EDAEA.py b/nevergrad/optimization/lama/EDAEA.py new file mode 100644 index 000000000..cd7bf0477 --- /dev/null +++ b/nevergrad/optimization/lama/EDAEA.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EDAEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.speciation_threshold = 0.5 # Distance threshold for speciation + self.initial_cr = 0.9 + self.initial_f = 0.8 + self.initial_temp = 1.0 + self.final_temp = 0.01 + self.alpha = 0.95 # Standard cooling rate + + def initialize_population(self): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + # Opposite-based learning initialization + if np.random.rand() < 0.5: + means = np.mean(population, axis=0) + opposite_population = 2 * means - population + population = np.vstack((population, opposite_population)) + population = population[: self.population_size] # Ensure population size consistency + return population + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, f, temperature): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = population[best_idx] + f * temperature * (x1 - x2 + x3 - population[best_idx]) + # Opposite-based mutation + if np.random.rand() < 0.5: + means = np.mean(mutant) + mutant = 2 * means - mutant + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, cr): + crossover_mask = np.random.rand(self.dimension) < cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + temperature = self.initial_temp + cr = self.initial_cr + f = self.initial_f + + while evaluations < self.budget: + mutated_population = self.mutate(population, best_idx, f, temperature) + offspring_population = np.array( + [ + self.crossover(population[i], mutated_population[i], cr) + for i in range(self.population_size) + ] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i] or np.random.rand() < np.exp( + (fitness[i] - offspring_fitness[i]) / temperature + ): + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution, best_idx = fitness[i], population[i], i + + # Adjust temperature dynamically based on performance + if evaluations % 100 == 0: + improvement_rate = (np.min(fitness) - best_fitness) / best_fitness + if improvement_rate < 0.01: + temperature *= self.alpha**2 # Faster cooling if stagnation + else: + temperature *= self.alpha + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EDAG.py b/nevergrad/optimization/lama/EDAG.py new file mode 100644 index 000000000..facf46d87 --- /dev/null +++ b/nevergrad/optimization/lama/EDAG.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EDAG: + def __init__( + self, + budget, + population_size=100, + initial_step=0.5, + step_decay=0.95, + differential_weight=0.6, + crossover_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_step = initial_step + self.step_decay = step_decay + self.differential_weight = differential_weight + self.crossover_prob = crossover_prob + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + step_size = self.initial_step + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.zeros_like(population) + + # Generate new candidates + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + + # Differential mutation + mutant = population[a] + self.differential_weight * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial = np.where(cross_points, mutant, population[i]) + + # Adaptive gradient-based step + gradient_direction = best_individual - population[i] + trial += step_size * gradient_direction + trial = np.clip(trial, self.lb, self.ub) + + trial_fitness = func(trial) + num_evals += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + step_size *= self.step_decay # Adaptive decay of the step size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EDASOGG.py b/nevergrad/optimization/lama/EDASOGG.py new file mode 100644 index 000000000..c5836bbe8 --- /dev/null +++ b/nevergrad/optimization/lama/EDASOGG.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EDASOGG: + def __init__( + self, + budget, + population_size=30, + spiral_rate=0.5, + beta=0.2, + gradient_descent_factor=0.07, + momentum=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.spiral_rate = spiral_rate # Adjusted for more controlled movement + self.beta = beta # Lowered to reduce rapid changes in dynamics + self.gradient_descent_factor = ( + gradient_descent_factor # Slightly increased for better local exploitation + ) + self.momentum = momentum # New parameter to incorporate historical velocity influence + + def __call__(self, func): + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + velocities = np.zeros((self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + pbest = population.copy() + pbest_fitness = fitness.copy() + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Evolutionary loop + while num_evals < self.budget: + for i in range(self.population_size): + r1, r2, r3, r4 = np.random.rand(4) # Random coefficients + + # Enhanced velocity update formula with momentum and dynamic adjustments + velocities[i] = ( + self.momentum * velocities[i] + + r1 * self.gradient_descent_factor * (pbest[i] - population[i]) + + r2 * self.spiral_rate * (best_individual - population[i]) + + r3 * self.beta * (np.random.uniform(self.lb, self.ub, self.dimension) - population[i]) + ) + + # Update position + population[i] += velocities[i] + population[i] = np.clip(population[i], self.lb, self.ub) + + # Evaluate + updated_fitness = func(population[i]) + num_evals += 1 + + # Update personal and global best + if updated_fitness < pbest_fitness[i]: + pbest[i] = population[i] + pbest_fitness[i] = updated_fitness + + if updated_fitness < best_fitness: + best_fitness = updated_fitness + best_individual = population[i] + + if num_evals >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EDDCEA.py b/nevergrad/optimization/lama/EDDCEA.py new file mode 100644 index 000000000..8f0e75e52 --- /dev/null +++ b/nevergrad/optimization/lama/EDDCEA.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EDDCEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.elite_size = 10 # Slightly larger elite group for better exploitation + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_index, diversity_measure): + F = 0.8 * (2 - diversity_measure) # Adjusted mutation factor + new_population = np.empty_like(population) + for i in range(self.population_size): + idxs = np.random.choice(np.delete(np.arange(self.population_size), best_index), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = a + F * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant, diversity_measure): + CR = 0.4 + 0.6 * diversity_measure # Adjusted crossover probability + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def calculate_diversity(self, population): + mean_population = np.mean(population, axis=0) + diversity = np.mean(np.sqrt(np.sum((population - mean_population) ** 2, axis=1))) + return diversity / np.sqrt(self.dimension * (self.bounds[1] - self.bounds[0]) ** 2) + + def local_search(self, best_individual, func): + # Simple local search around the best individual + perturbation = np.random.normal(0, 0.1, self.dimension) + new_individual = np.clip(best_individual + perturbation, self.bounds[0], self.bounds[1]) + if func(new_individual) < func(best_individual): + return new_individual + return best_individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + diversity_measure = self.calculate_diversity(population) + mutants = self.mutate(population, best_index, diversity_measure) + trials = np.array( + [ + self.crossover(population[i], mutants[i], diversity_measure) + for i in range(self.population_size) + ] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + # Selection with elitism incorporating local search + combined_population = np.vstack((population, trials)) + combined_fitness = np.hstack((fitness, fitness_trials)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + best_index = np.argmin(fitness) + population[best_index] = self.local_search( + population[best_index], func + ) # Apply local search on the best + + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/EDEAS.py b/nevergrad/optimization/lama/EDEAS.py new file mode 100644 index 000000000..aea97e483 --- /dev/null +++ b/nevergrad/optimization/lama/EDEAS.py @@ -0,0 +1,53 @@ +import numpy as np + + +class EDEAS: + def __init__(self, budget, population_size=60, F_min=0.5, F_max=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F_min = F_min # Minimum scale factor + self.F_max = F_max # Maximum scale factor + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Adaptive F based on linear scaling + F_scale = np.linspace(self.F_min, self.F_max, self.population_size) + + while evaluations < self.budget: + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Mutant vector creation with adaptive F + F_i = F_scale[np.argsort(fitness)[i]] # Scale F based on fitness rank + mutant = x1 + F_i * (x2 - x3) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EDEPM.py b/nevergrad/optimization/lama/EDEPM.py new file mode 100644 index 000000000..e9a29c5d6 --- /dev/null +++ b/nevergrad/optimization/lama/EDEPM.py @@ -0,0 +1,64 @@ +import numpy as np + + +class EDEPM: + def __init__( + self, budget, population_size=40, F_init=0.8, F_end=0.1, CR=0.9, memory_size=5, memory_decay=0.05 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size # Number of memory slots + self.memory_decay = memory_decay # Rate of decay for memory influence + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + memory = np.zeros((self.memory_size, self.dimension)) + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[np.argmin(fitness)] + + # Memory influence in mutation + memory_effect = np.mean(memory, axis=0) * np.exp(-self.memory_decay * evaluations) + + # Mutation strategy incorporating memory + mutant = x1 + F_current * (best - x1 + x2 - x3) + memory_effect + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with successful mutation vectors + memory = np.roll(memory, -1, axis=0) + memory[-1] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EDGB.py b/nevergrad/optimization/lama/EDGB.py new file mode 100644 index 000000000..7017c59e7 --- /dev/null +++ b/nevergrad/optimization/lama/EDGB.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EDGB: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def __call__(self, func): + population_size = 100 + mutation_factor = 0.5 + recombination_crossover = 0.7 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + evaluations = population_size + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Select three random indices different from i + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Introduce a secondary mutation vector for diversity + d = population[np.random.choice(indices)] + mutant = np.clip( + a + mutation_factor * ((b - c) + 0.5 * (best_individual - d)), + self.lower_bound, + self.upper_bound, + ) + trial = np.array( + [ + mutant[j] if np.random.rand() < recombination_crossover else population[i][j] + for j in range(self.dimension) + ] + ) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adapt mutation factor and crossover incrementally towards the end + progress = evaluations / self.budget + mutation_factor = max(0.2, 0.8 - 0.6 * progress) # Decrease linearly + recombination_crossover = min(0.9, 0.7 + 0.2 * progress) # Increase linearly + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EDMDESM.py b/nevergrad/optimization/lama/EDMDESM.py new file mode 100644 index 000000000..3f4792fa4 --- /dev/null +++ b/nevergrad/optimization/lama/EDMDESM.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EDMDESM: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx].copy() + elite_fitness = fitness[elite_idx].copy() + + for i in range(self.population_size): + # Adaptive mutation factor + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation using best, random from population and elite + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.85 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover: Binomial with adaptive technique + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory by replacing the worst entry + worst_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worst_idx]: + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EDMRL.py b/nevergrad/optimization/lama/EDMRL.py new file mode 100644 index 000000000..fa3b297c6 --- /dev/null +++ b/nevergrad/optimization/lama/EDMRL.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EDMRL: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Track elite solutions + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track the best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions more frequently + if evaluations % (self.budget // 25) == 0: + elite_update_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_update_indices].copy() + elite_fitness = fitness[elite_update_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic amplitude adjustment + F = self.F_base + self.F_amp * np.cos(2 * np.pi * evaluations / self.budget) + + # Mutation strategy, incorporating reflective learning from memory + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Choose between the best solution and a memory solution for mutation base + memory_used = False + if np.random.rand() < 0.1 and np.any(memory_fitness < np.inf): + memory_idx = np.argmin(memory_fitness) + base_solution = memory[memory_idx] + memory_used = True + else: + base_solution = ( + best_solution + if np.random.rand() < 0.85 + else elite[np.random.randint(0, self.elite_size)] + ) + + mutant = np.clip(base_solution + F * (b - c), lb, ub) + + # Binomial crossover with adaptive mutation incorporation + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Replace worst memory entry with current if better and was used + if memory_used: + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EDMS.py b/nevergrad/optimization/lama/EDMS.py new file mode 100644 index 000000000..dce51ab28 --- /dev/null +++ b/nevergrad/optimization/lama/EDMS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EDMS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.7, + F_amp=0.3, + memory_size=40, + elite_size=3, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions, initialized with first found solutions + memory = population[np.argsort(fitness)[: self.memory_size]].copy() + memory_fitness = fitness[np.argsort(fitness)[: self.memory_size]].copy() + + # Elite solutions tracking + elite = population[np.argsort(fitness)[: self.elite_size]].copy() + elite_fitness = fitness[np.argsort(fitness)[: self.elite_size]].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Periodic elite and memory updates + if evaluations % (self.budget // 10) == 0: + sorted_indices = np.argsort(fitness) + memory = population[sorted_indices[: self.memory_size]].copy() + memory_fitness = fitness[sorted_indices[: self.memory_size]].copy() + + elite = population[sorted_indices[: self.elite_size]].copy() + elite_fitness = fitness[sorted_indices[: self.elite_size]].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with a decaying amplitude + F = self.F_base + self.F_amp * np.exp(-4 * evaluations / self.budget) + + # Select mutation strategy based on the evolution state + idxs = np.random.choice(self.population_size, 3, replace=False) + a, b, c = population[idxs] + if np.random.rand() < 0.3: # Intermittently use memory in mutation + memory_idx = np.random.randint(0, self.memory_size) + mutant = np.clip(a + F * (memory[memory_idx] - b + c), lb, ub) + else: + mutant = np.clip(a + F * (elite[0] - b + c), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial.copy() + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EDNAS.py b/nevergrad/optimization/lama/EDNAS.py new file mode 100644 index 000000000..884ad18ce --- /dev/null +++ b/nevergrad/optimization/lama/EDNAS.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EDNAS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.learning_rate = 0.1 + self.mutation_scale = 0.8 + self.elite_size = int(self.population_size * 0.1) # 10% of the population + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, F): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + new_population[i] = mutant_vector + return new_population + + def crossover(self, target, mutant, CR): + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + previous_best = best_fitness + + while evaluations < self.budget: + F = np.random.normal(self.mutation_scale, 0.1) # Adaptive mutation factor + CR = np.clip(np.std(fitness) / np.ptp(fitness), 0.1, 0.9) # Adaptive crossover rate + elite_indices = np.argsort(fitness)[: self.elite_size] + mutants = self.mutate(population, elite_indices, F) + trials = np.array( + [self.crossover(population[i], mutants[i], CR) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + if fitness[i] < best_fitness: + best_fitness = fitness[i] + best_index = i + + if best_fitness < previous_best: + self.learning_rate *= 1.1 + previous_best = best_fitness + else: + self.learning_rate *= 0.9 + if np.random.rand() < 0.1: + reinit_indices = np.random.choice( + len(population), size=int(self.population_size * 0.1), replace=False + ) + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dimension) + ) + population[elite_indices] = population[ + np.argsort(fitness)[: self.elite_size] + ] # Preserve elites + + return best_fitness, population[best_index] diff --git a/nevergrad/optimization/lama/EDNAS_SAMRA.py b/nevergrad/optimization/lama/EDNAS_SAMRA.py new file mode 100644 index 000000000..b2f4c1cc5 --- /dev/null +++ b/nevergrad/optimization/lama/EDNAS_SAMRA.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EDNAS_SAMRA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_scale = 0.8 # Initial mutation scale + self.elite_size = int(self.population_size * 0.1) # 10% of the population + self.mutation_adjustment_factor = 0.05 # Rate of mutation scale adjustment + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, F): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + new_population[i] = mutant_vector + return new_population + + def crossover(self, target, mutant, CR): + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + prev_best_fitness = best_fitness + + while evaluations < self.budget: + F = np.random.normal(self.mutation_scale, 0.1) # Use normal distribution for mutation scale + CR = np.clip(np.std(fitness) / np.ptp(fitness), 0.1, 0.9) # Adaptive crossover rate + elite_indices = np.argsort(fitness)[: self.elite_size] + mutants = self.mutate(population, elite_indices, F) + trials = np.array( + [self.crossover(population[i], mutants[i], CR) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + if fitness[i] < best_fitness: + best_fitness = fitness[i] + + if best_fitness < prev_best_fitness: + prev_best_fitness = best_fitness + self.mutation_scale *= 1 - self.mutation_adjustment_factor + else: + self.mutation_scale *= 1 + self.mutation_adjustment_factor + + # Check population diversity and introduce random individuals if needed + if np.std(population.flatten()) < 0.1: + reinit_indices = np.random.choice( + len(population), size=int(self.population_size * 0.1), replace=False + ) + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dimension) + ) + + return best_fitness, population[np.argmin(fitness)] diff --git a/nevergrad/optimization/lama/EDSDiffEvoM.py b/nevergrad/optimization/lama/EDSDiffEvoM.py new file mode 100644 index 000000000..9a85f8598 --- /dev/null +++ b/nevergrad/optimization/lama/EDSDiffEvoM.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EDSDiffEvoM: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.8, memory_size=5): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + self.memory = {"F": np.full(memory_size, F_base), "CR": np.full(memory_size, CR_base)} + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + # Track the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + # Update and use the memory for F and CR + current_memory_idx = (num_evals // self.population_size) % len(self.memory["F"]) + F = np.clip(self.memory["F"][current_memory_idx] + 0.1 * np.random.randn(), 0.1, 1.0) + CR = np.clip(self.memory["CR"][current_memory_idx] + 0.05 * np.random.randn(), 0.1, 1.0) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Select individuals for mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 3, replace=False) + a, b, c = population[chosen] + + # Mutation: DE/rand/1/bin scheme + mutant = a + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + # Update memory + self.memory["F"][current_memory_idx] = F + self.memory["CR"][current_memory_idx] = CR + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EGBDE.py b/nevergrad/optimization/lama/EGBDE.py new file mode 100644 index 000000000..3718620db --- /dev/null +++ b/nevergrad/optimization/lama/EGBDE.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EGBDE: + def __init__( + self, budget, population_size=30, F_base=0.5, CR_base=0.9, adapt_rate=0.1, gradient_weight=0.2 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base scaling factor for differential evolution + self.CR_base = CR_base # Base crossover rate + self.adapt_rate = adapt_rate # Rate of adaptation for F and CR + self.gradient_weight = gradient_weight # Weighting for gradient influence in mutation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while num_evals < self.budget: + # Adapt F and CR adaptively + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.0, 1.0) + + # Mutation, Crossover, and Selection + for i in range(self.population_size): + if num_evals >= self.budget: + break + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = ( + population[i] + + Fs[i] * (population[a] - population[b]) + + self.gradient_weight * (population[c] - population[i]) + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Boundary correction + trial = np.clip(trial, self.lb, self.ub) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EGGEO.py b/nevergrad/optimization/lama/EGGEO.py new file mode 100644 index 000000000..8c71bfec7 --- /dev/null +++ b/nevergrad/optimization/lama/EGGEO.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EGGEO: + def __init__( + self, + budget, + population_size=50, + gradient_impact=0.1, + random_impact=0.05, + mutation_rate=0.1, + elitism=2, + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.gradient_impact = gradient_impact + self.random_impact = random_impact + self.mutation_rate = mutation_rate + self.elitism = elitism # Number of elites + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Evolutionary loop + while num_evals < self.budget: + # Selection based on fitness + sorted_indices = np.argsort(fitness) + elites_indices = sorted_indices[: self.elitism] + elites = population[elites_indices] + + new_population = np.zeros_like(population) + new_population[: self.elitism] = elites.copy() # Elitism + + for i in range(self.elitism, self.population_size): + parent1_idx, parent2_idx = np.random.choice(elites_indices, 2, replace=False) + crossover_point = np.random.randint(self.dimension) + + # Crossover + child = np.concatenate( + [population[parent1_idx][:crossover_point], population[parent2_idx][crossover_point:]] + ) + + # Mutation + mutation_mask = np.random.rand(self.dimension) < self.mutation_rate + mutation_values = np.random.uniform(-1, 1, self.dimension) + child += mutation_mask * mutation_values + + # Gradient guidance + grad_direction = best_individual - child + child += self.gradient_impact * grad_direction + self.random_impact * np.random.uniform( + -1, 1, self.dimension + ) + child = np.clip(child, self.lb, self.ub) + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + num_evals += self.population_size - self.elitism + + # Update best + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[current_best_idx].copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EHADEEM.py b/nevergrad/optimization/lama/EHADEEM.py new file mode 100644 index 000000000..f7daf9c4a --- /dev/null +++ b/nevergrad/optimization/lama/EHADEEM.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EHADEEM: + def __init__(self, budget, population_size=60, F_base=0.6, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Initial base for differential weight + self.CR_base = CR_base # Initial base for crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Initialize adaptive parameters + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + memory = np.zeros(self.population_size) # Memory for adaptive adjustments + success_count = np.zeros(self.population_size) # Count successful generations for each individual + + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation indices + idxs = np.random.choice( + [idx for idx in range(self.population_size) if idx != i], 3, replace=False + ) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + + # Mutation: DE/best/1/bin scheme using best individual for faster convergence + best = population[np.argmin(fitness)] + mutant = np.clip(best + F[i] * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory[i] += 1 # Increment success memory + success_count[i] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + memory[i] -= 1 # Decay memory on failure + + # Adaptive parameter tuning based on memory + if memory[i] > 3: + F[i] = min(F[i] * 1.1, 1) + CR[i] = min(CR[i] * 1.1, 1) + elif memory[i] < -3: + F[i] = max(F[i] * 0.9, 0.1) + CR[i] = max(CR[i] * 0.8, 0.1) + + # Reset memory if extremes are achieved + if memory[i] > 8 or memory[i] < -8: + memory[i] = 0 + F[i] = self.F_base + CR[i] = self.CR_base + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EHADEMI.py b/nevergrad/optimization/lama/EHADEMI.py new file mode 100644 index 000000000..550da92dd --- /dev/null +++ b/nevergrad/optimization/lama/EHADEMI.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EHADEMI: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.92, + F_base=0.58, + F_amp=0.42, + memory_size=120, + elite_size=12, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor that changes dynamically + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/rand-to-best/1/b strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + random_indices = np.random.choice(idxs, 3, replace=False) + a, b, c = population[random_indices] + + best_or_elite = ( + best_solution if np.random.rand() < 0.8 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover: Binomial with adaptive rate + cross_points = np.random.rand(dimension) < ( + self.crossover_rate * np.sin(2 * np.pi * evaluations / self.budget) + ) + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory by replacing the worst solution + mem_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[mem_idx]: + memory[mem_idx] = population[i] + memory_fitness[mem_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EHDAM.py b/nevergrad/optimization/lama/EHDAM.py new file mode 100644 index 000000000..720e821e2 --- /dev/null +++ b/nevergrad/optimization/lama/EHDAM.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EHDAM: + def __init__( + self, + budget, + population_size=80, + crossover_rate=0.8, + F_base=0.5, + F_amp=0.5, + memory_size=150, + elite_size=20, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((0, dimension)) + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 30) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx] + + for i in range(self.population_size): + # Adaptive mutation factor that changes dynamically + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/rand-to-best-with-memory/1 strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mem_contrib = ( + memory[np.random.randint(0, np.clip(memory.shape[0], 1, self.memory_size))] + if memory.size > 0 + else 0 + ) + best = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best - a + b - c + mem_contrib), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with the old good solutions more aggressively + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + elif np.random.rand() < 0.3: # More frequent replacement of memory entries + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EHDE.py b/nevergrad/optimization/lama/EHDE.py new file mode 100644 index 000000000..ad02f6716 --- /dev/null +++ b/nevergrad/optimization/lama/EHDE.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EHDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.local_search_rate = 0.1 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx): + mutants = np.empty_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.mutation_factor * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Hybrid mutation: mix with best if random chance hits + if np.random.rand() < self.local_search_rate: + mutant = mutant + 0.5 * (population[best_idx] - mutant) + + mutants[i] = mutant + return mutants + + def crossover(self, target, mutant): + mask = np.random.rand(self.dimension) < self.crossover_probability + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + for i in range(self.population_size): + trial = self.crossover(population[i], mutants[i]) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + return new_population, new_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + + while evaluations < self.budget: + mutants = self.mutate(population, best_idx) + population, fitness = self.select(population, fitness, mutants, func) + evaluations += self.population_size + best_idx = np.argmin(fitness) + + # Adapt mutation factor dynamically + diversity = np.std(fitness) + self.mutation_factor = np.clip(0.5 + 0.5 * diversity / np.ptp(fitness), 0.1, 1.0) + + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/EIADEA.py b/nevergrad/optimization/lama/EIADEA.py new file mode 100644 index 000000000..1c3cab59f --- /dev/null +++ b/nevergrad/optimization/lama/EIADEA.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EIADEA: + def __init__( + self, budget, population_size=40, crossover_rate=0.85, F_min=0.5, F_max=0.8, archive_size=40 + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.archive_size = archive_size + + def __call__(self, func): + # Define the bounds and the dimensionality of the problem + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population with random values within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize an archive to store potential solutions + archive = np.empty((0, dimension)) + + # Track the best solution found so far + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor decreases as iterations progress + F = np.random.uniform(self.F_min, self.F_max) * (1 - evaluations / self.budget) + + # Selection of mutation vectors: Ensure diversity by picking distinct indices + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Mutation using DE/rand/1/bin strategy with archive incorporation for diversity + if archive.size > 0 and np.random.rand() < 0.15: + arch_idx = np.random.randint(0, archive.shape[0]) + a = archive[arch_idx] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover: Combine mutant with target vector + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the new candidate solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update archive with the replaced solution if the archive is not full + if archive.shape[0] < self.archive_size: + archive = np.vstack([archive, population[i]]) + else: + # Replace a randomly selected entry in the archive + archive[np.random.randint(0, self.archive_size)] = population[i] + + # Update the population with the new better solution + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution if the new solution is better + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Exit if the budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EMIDE.py b/nevergrad/optimization/lama/EMIDE.py new file mode 100644 index 000000000..700600838 --- /dev/null +++ b/nevergrad/optimization/lama/EMIDE.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EMIDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=100, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for storing historically good solutions + memory = np.empty((0, dimension)) + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % (self.budget // 20) == 0: + # Update elite solutions periodically + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx] + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic oscillation + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/best/1 with elite influence + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(best + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with replaced good solutions + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + elif np.random.rand() < 0.1: + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EMSADE.py b/nevergrad/optimization/lama/EMSADE.py new file mode 100644 index 000000000..ea691f583 --- /dev/null +++ b/nevergrad/optimization/lama/EMSADE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EMSADE: + def __init__( + self, budget, population_size=100, F_base=0.5, CR_base=0.9, strategy_ratio=0.25, perturbation=0.1 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.strategy_ratio = strategy_ratio # Proportion of population applying each strategy + self.perturbation = perturbation # Perturbation for adaptive parameters + + def __call__(self, func): + # Initialize population and fitness assessments + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + strategy_use = np.random.rand() + F = self.F_base + self.perturbation * np.random.randn() + CR = self.CR_base + self.perturbation * np.random.randn() + + if strategy_use < self.strategy_ratio: + # DE/current-to-best/1/bin + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = population[i] + F * (best_individual - population[i]) + F * (a - b) + else: + # DE/rand/2/bin + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = population[np.random.choice(idxs, 4, replace=False)] + mutant = a + F * (b - c) + F * (c - d) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EMSEAS.py b/nevergrad/optimization/lama/EMSEAS.py new file mode 100644 index 000000000..643e1aca3 --- /dev/null +++ b/nevergrad/optimization/lama/EMSEAS.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EMSEAS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.num_subpopulations = 5 + self.subpopulation_size = int(self.population_size / self.num_subpopulations) + self.mutation_factor = 0.9 + self.crossover_rate = 0.7 + self.elite_size = 2 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def adaptive_parameters(self, progress): + # Non-linear mutation factor decay + self.mutation_factor = 0.9 * (1 - progress**2) + self.crossover_rate = 0.7 + 0.3 * progress + + def mutate_and_crossover(self, population, func, progress): + new_population = np.copy(population) + for i in range(self.population_size): + if np.random.rand() < 0.05: # 5% chance of random mutation + mutation = np.random.normal(0, 1, self.dimension) + else: + mutation = self.differential_mutation(population, i) + mutant = np.clip(population[i] + mutation, self.bounds[0], self.bounds[1]) + child = self.crossover(mutant, population[i]) + new_population[i] = child + new_fitness = self.evaluate(new_population, func) + return new_population, new_fitness + + def differential_mutation(self, population, base_idx): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + return self.mutation_factor * (x1 + (x2 - x3)) + + def crossover(self, mutant, target): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + + while evaluations < self.budget: + progress = evaluations / self.budget + self.adaptive_parameters(progress) + population, fitness = self.mutate_and_crossover(population, func, progress) + evaluations += self.population_size + + if evaluations % (self.budget * 0.1) == 0: # Reinitialize 10% of population every 10% of budget + reinit_indices = np.random.choice( + self.population_size, self.population_size // 10, replace=False + ) + population[reinit_indices] = self.initialize_population()[reinit_indices] + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_solution = population[np.argmin(fitness)] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EORAMED.py b/nevergrad/optimization/lama/EORAMED.py new file mode 100644 index 000000000..7a17b125e --- /dev/null +++ b/nevergrad/optimization/lama/EORAMED.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EORAMED: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate # Lowered to encourage more diversity + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size # Reduced to concentrate on fewer, high-quality elites + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population randomly + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite pool + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with linear decay + F = self.F_max - (evaluations / self.budget) * (self.F_max - self.F_min) + + # Mutation with higher emphasis on best and elite + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate and possibly update population + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EPADE.py b/nevergrad/optimization/lama/EPADE.py new file mode 100644 index 000000000..6270b42a2 --- /dev/null +++ b/nevergrad/optimization/lama/EPADE.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EPADE: + def __init__(self, budget, population_size=50, F_base=0.8, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Enhanced base differential weight + self.CR_base = CR_base # Enhanced base crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Adaptive parameters initialization + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + # Introduction of an intensive search in early phase + intensive_search_phase = True + + while evaluations < self.budget: + # Gradual transition from intensive to conservative strategy based on the budget usage + if evaluations > self.budget * 0.5: + intensive_search_phase = False + + for i in range(self.population_size): + # Adaptive mutation factor based on individual performance + if fitness[i] < np.median(fitness): + F[i] = min(F[i] * 1.2, 1) if intensive_search_phase else min(F[i] * 1.1, 1) + else: + F[i] = max(F[i] * 0.8, 0.1) if intensive_search_phase else max(F[i] * 0.9, 0.1) + + # Mutation and crossover using a mixed strategy + idxs = np.random.choice([j for j in range(self.population_size) if j != i], 3, replace=False) + best_idx = np.argmin(fitness) + mutant = population[i] + F[i] * ( + population[best_idx] - population[i] + population[idxs[0]] - population[idxs[1]] + ) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Trial solution evaluation + f_trial = func(trial) + evaluations += 1 + + # Selection and adaptive CR update based on success + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + CR[i] = min(CR[i] * 1.1, 1) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + CR[i] = max(CR[i] * 0.85, 0.1) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EPDE.py b/nevergrad/optimization/lama/EPDE.py new file mode 100644 index 000000000..d5801e3bf --- /dev/null +++ b/nevergrad/optimization/lama/EPDE.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EPDE: + def __init__(self, budget, population_size=50, f_min=0.5, f_max=0.8, cr_min=0.2, cr_max=0.5): + self.budget = budget + self.population_size = population_size + self.f_min = f_min # Minimum scaling factor + self.f_max = f_max # Maximum scaling factor + self.cr_min = cr_min # Minimum crossover probability + self.cr_max = cr_max # Maximum crossover probability + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + num_evals = self.population_size + + while num_evals < self.budget: + for i in range(self.population_size): + # Adjust control parameters based on a non-linear scale (quadratic adjustments) + remaining_budget = self.budget - num_evals + cr = self.cr_min + (self.cr_max - self.cr_min) * ((remaining_budget / self.budget) ** 2) + f = self.f_min + (self.f_max - self.f_min) * ((remaining_budget / self.budget) ** 2) + + # Mutation: DE/rand-to-best/2/bin strategy + indices = np.random.choice(self.population_size, 4, replace=False) + x1, x2, x3, x4 = population[indices] + mutant = np.clip(best_individual + f * (x2 - x3 + x4 - x1), self.lb, self.ub) + + # Crossover + crossover_mask = np.random.rand(self.dimension) < cr + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(crossover_mask, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of EPDE: +# optimizer = EPDE(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/EPWDEM.py b/nevergrad/optimization/lama/EPWDEM.py new file mode 100644 index 000000000..660b49ca6 --- /dev/null +++ b/nevergrad/optimization/lama/EPWDEM.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EPWDEM: + def __init__( + self, budget, population_size=50, crossover_rate=0.85, F_base=0.6, F_amp=0.3, memory_factor=0.1 + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_factor = memory_factor + self.memory_size = int(memory_factor * population_size) + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for elite solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Maintain a memory of best solutions + sorted_indices = np.argsort(fitness) + memory[: self.memory_size] = population[sorted_indices[: self.memory_size]] + memory_fitness[: self.memory_size] = fitness[sorted_indices[: self.memory_size]] + + for i in range(self.population_size): + # Adaptive mutation factor with progressive wave pattern + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: Differential mutation with memory and best solution + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + memory_idx = np.argmin(memory_fitness) + mutant = np.clip(memory[memory_idx] + F * (a - b), lb, ub) + + # Crossover: Exponential + j = np.random.randint(dimension) + trial = np.array(population[i]) # copy current to trial + for k in range(dimension): + if np.random.rand() < self.crossover_rate or k == dimension - 1: + trial[j] = mutant[j] + j = (j + 1) % dimension # modulo increment + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ERADE.py b/nevergrad/optimization/lama/ERADE.py new file mode 100644 index 000000000..3fe06382b --- /dev/null +++ b/nevergrad/optimization/lama/ERADE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ERADE: + def __init__( + self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50, F=0.8, CR=0.9 + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F = F # Mutation factor + self.CR = CR # Crossover probability + + def __call__(self, func): + # Initialization of population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + evaluations = self.population_size + + # Enhanced strategy adjustments + F_min, F_max = 0.5, 1.2 # Mutation factor range + CR_min, CR_max = 0.6, 1.0 # Crossover probability range + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation: DE/rand/1 strategy with adaptive factor + indices = [index for index in range(self.population_size) if index != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + self.F * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover: Binomial + trial = np.array( + [ + mutant[j] if np.random.rand() < self.CR else population[i][j] + for j in range(self.dimension) + ] + ) + trial_fitness = func(trial) + evaluations += 1 + + # Selection: Greedy + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptation of F and CR + mean_fitness = np.mean(fitness) + if best_fitness < mean_fitness: + self.F = min(F_max, self.F + 0.03 * (mean_fitness - best_fitness)) + self.CR = max(CR_min, self.CR - 0.01 * (mean_fitness - best_fitness)) + else: + self.F = max(F_min, self.F - 0.01) + self.CR = min(CR_max, self.CR + 0.01) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ERADS.py b/nevergrad/optimization/lama/ERADS.py new file mode 100644 index 000000000..cf782e33b --- /dev/null +++ b/nevergrad/optimization/lama/ERADS.py @@ -0,0 +1,75 @@ +import numpy as np + + +class ERADS: + def __init__( + self, + budget, + population_size=50, + F_init=0.5, + F_end=0.8, + CR=0.9, + memory_factor=0.2, + mutation_strategy="best", + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Weight given to memory in mutation + self.mutation_strategy = mutation_strategy # Mutation strategy: 'best' or 'rand' + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # A single vector holds the cumulative memory + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + if self.mutation_strategy == "best": + x_base = population[best_index] + else: + x_base = population[np.random.randint(0, self.population_size)] + + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 2, replace=False + ) + x1, x2 = population[indices] + + # Mutant vector incorporating memory + mutant = x_base + F_current * (x1 - x2 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update best index + + # Update memory with the successful mutation direction scaled by F + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdaptiveDynamic.py b/nevergrad/optimization/lama/ERADS_AdaptiveDynamic.py new file mode 100644 index 000000000..6d747db25 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptiveDynamic.py @@ -0,0 +1,62 @@ +import numpy as np + + +class ERADS_AdaptiveDynamic: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.15): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, dynamically adjusting + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamically adapt scaling factor using a more aggressive nonlinear adaptation + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t**2 # Square law for rapid changes + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector calculation using updated memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover to create the trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Memory update prioritizing recent successful directions + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + trial - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdaptiveDynamicPlus.py b/nevergrad/optimization/lama/ERADS_AdaptiveDynamicPlus.py new file mode 100644 index 000000000..9793fa72b --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptiveDynamicPlus.py @@ -0,0 +1,64 @@ +import numpy as np + + +class ERADS_AdaptiveDynamicPlus: + def __init__(self, budget, population_size=75, F_init=0.5, F_end=0.85, CR=0.92, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Adapt the scaling factor F using a cosine annealing schedule for a smoother transition + progress = evaluations / self.budget + F_current = self.F_end + 0.5 * (self.F_init - self.F_end) * (1 + np.cos(np.pi * progress)) + + for i in range(self.population_size): + # Select three distinct random population indices different from the current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector creation that integrates memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Keep within bounds + + # Crossover to produce the trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection step + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the best solution index + + # Update the memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdaptiveHybrid.py b/nevergrad/optimization/lama/ERADS_AdaptiveHybrid.py new file mode 100644 index 000000000..fad495c98 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptiveHybrid.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ERADS_AdaptiveHybrid: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.95, CR_init=0.6, CR_end=0.95, memory_factor=0.1 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial Crossover probability + self.CR_end = CR_end # Final Crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of scaling factor and crossover probability + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdaptivePlus.py b/nevergrad/optimization/lama/ERADS_AdaptivePlus.py new file mode 100644 index 000000000..4afc3ce84 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptivePlus.py @@ -0,0 +1,60 @@ +import numpy as np + + +class ERADS_AdaptivePlus: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR_init=0.9, CR_end=0.7, memory_factor=0.3 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, dynamically adjusting + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + memory = np.zeros((self.population_size, self.dimension)) # Initialize memory for each individual + + while evaluations < self.budget: + # Adaptive scaling factor and crossover probability + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + # Mutation using differential evolution strategy + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Differential mutation incorporating adaptive memory + mutant = x1 + F_current * (x2 - x3 + self.memory_factor * memory[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Enforcing bounds + + # Crossover operation + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory[i] = (1 - self.memory_factor) * memory[i] + self.memory_factor * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + # Obtain the best result + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ERADS_AdaptiveProgressive.py b/nevergrad/optimization/lama/ERADS_AdaptiveProgressive.py new file mode 100644 index 000000000..7e6f081ad --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptiveProgressive.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ERADS_AdaptiveProgressive: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR_init=0.8, CR_end=0.95, memory_factor=0.15 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial Crossover probability + self.CR_end = CR_end # Final Crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory for successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of scaling factor and crossover probability based on evaluation budget ratio + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector creation incorporating memory and dynamic F scaling + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover with dynamic CR + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdaptiveRefinement.py b/nevergrad/optimization/lama/ERADS_AdaptiveRefinement.py new file mode 100644 index 000000000..c3ffd24d0 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdaptiveRefinement.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ERADS_AdaptiveRefinement: + def __init__(self, budget, population_size=60, F_init=0.5, F_end=0.75, CR=0.9, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting based on a nonlinear schedule + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Nonlinear adaptation of the scaling factor using a sigmoidal approach + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * (1 - np.exp(-10 * t + 5)) / ( + 1 + np.exp(-10 * t + 5) + ) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Advanced.py b/nevergrad/optimization/lama/ERADS_Advanced.py new file mode 100644 index 000000000..0ecc0dc1a --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Advanced.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ERADS_Advanced: + def __init__( + self, budget, population_size=80, F_init=0.7, F_end=0.5, CR_init=0.9, CR_end=0.6, memory_factor=0.25 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Calculate the progression ratio + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Create the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdvancedDynamic.py b/nevergrad/optimization/lama/ERADS_AdvancedDynamic.py new file mode 100644 index 000000000..98a40180f --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdvancedDynamic.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_AdvancedDynamic: + def __init__( + self, budget, population_size=50, F_min=0.5, F_max=0.8, CR=0.9, memory_factor=0.25, adaptive=True + ): + self.budget = budget + self.population_size = population_size + self.F_min = F_min # Minimum scaling factor for mutation + self.F_max = F_max # Maximum scaling factor for mutation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Factor to integrate memory into mutation + self.adaptive = adaptive # Flag to enable/disable dynamic adaptation of F + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Memory for successful mutation directions + + while evaluations < self.budget: + F_current = self.F_min + (self.F_max - self.F_min) * np.sin( + np.pi * evaluations / self.budget + ) # Sinusoidal adaptation of F + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector creation with memory influence + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover to create trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection step + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_AdvancedRefined.py b/nevergrad/optimization/lama/ERADS_AdvancedRefined.py new file mode 100644 index 000000000..f8bdc49ba --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_AdvancedRefined.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_AdvancedRefined: + def __init__(self, budget, population_size=100, F_init=0.6, F_end=0.85, CR=0.95, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_DynamicPrecision.py b/nevergrad/optimization/lama/ERADS_DynamicPrecision.py new file mode 100644 index 000000000..9b67a13b7 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_DynamicPrecision.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_DynamicPrecision: + def __init__(self, budget, population_size=60, F_init=0.48, F_end=0.82, CR=0.92, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * np.sqrt(evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Enhanced.py b/nevergrad/optimization/lama/ERADS_Enhanced.py new file mode 100644 index 000000000..af02ad4c4 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Enhanced.py @@ -0,0 +1,83 @@ +import numpy as np + + +class ERADS_Enhanced: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.1, adaptive_CR=False + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.adaptive_CR = adaptive_CR # Adaptive crossover probability + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + CR_values = np.full(self.population_size, self.CR) # Initialize CR values if adaptive CR is enabled + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Adaptively updating the crossover probability + CR_i = CR_values[i] if self.adaptive_CR else self.CR + + # Selection of distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_i, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + # Adaptively adjust the CR based on success + if self.adaptive_CR: + CR_values[i] = min( + 1.0, CR_values[i] + 0.1 * (1 - self.CR) + ) # Increase CR slightly upon success + else: + if self.adaptive_CR: + CR_values[i] = max( + 0.5, CR_values[i] - 0.1 * self.CR + ) # Decrease CR slightly upon failure + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_EnhancedPrecision.py b/nevergrad/optimization/lama/ERADS_EnhancedPrecision.py new file mode 100644 index 000000000..86fcf99d3 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_EnhancedPrecision.py @@ -0,0 +1,68 @@ +import numpy as np + + +class ERADS_EnhancedPrecision: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR_init=0.9, CR_end=0.7, memory_factor=0.4 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, dynamically adjusting + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + f_opt = fitness[best_index] + x_opt = population[best_index] + evaluations = self.population_size + global_memory = np.zeros( + self.dimension + ) # Initialize global memory to store successful mutation directions + + while evaluations < self.budget: + # Adaptive scaling factor and crossover probability + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + # Mutation using differential evolution strategy + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Differential mutation incorporating global memory + mutant = x1 + F_current * (x2 - x3 + self.memory_factor * global_memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Enforcing bounds + + # Crossover operation + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and global memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < f_opt: + f_opt = f_trial + x_opt = trial + best_index = i # Update the best solution index + # Update memory with the successful mutation direction + global_memory = (1 - self.memory_factor) * global_memory + self.memory_factor * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/ERADS_HyperOptimized.py b/nevergrad/optimization/lama/ERADS_HyperOptimized.py new file mode 100644 index 000000000..6a5e2f288 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_HyperOptimized.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_HyperOptimized: + def __init__(self, budget, population_size=60, F_init=0.5, F_end=0.9, CR=0.95, memory_factor=0.15): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_NextGen.py b/nevergrad/optimization/lama/ERADS_NextGen.py new file mode 100644 index 000000000..2ea91a36e --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_NextGen.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_NextGen: + def __init__(self, budget, population_size=50, F_init=0.5, F_final=0.8, CR=0.9, memory_factor=0.2): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_final = F_final # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + f_opt = fitness[best_index] + x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_final - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < f_opt: + f_opt = f_trial + x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/ERADS_Optimized.py b/nevergrad/optimization/lama/ERADS_Optimized.py new file mode 100644 index 000000000..cd550d571 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Optimized.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_Optimized: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.2): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Precision.py b/nevergrad/optimization/lama/ERADS_Precision.py new file mode 100644 index 000000000..ed8c30864 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Precision.py @@ -0,0 +1,80 @@ +import numpy as np + + +class ERADS_Precision: + def __init__( + self, + budget, + population_size=40, + F_init=0.55, + F_end=0.9, + CR_init=0.95, + CR_end=0.7, + memory_factor=0.3, + elite_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.elite_factor = elite_factor # Percentage of population considered elite + + def __call__(self, func): + # Initialize population uniformly within the bounds and calculate their fitness + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + elite_size = int(self.population_size * self.elite_factor) + + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory + + while evaluations < self.budget: + # Update scaling factor and crossover probability based on progress + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + # Sort population based on fitness and define elite members + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 2, replace=False + ) + x1, x2 = population[indices] + elite_member = elite_population[np.random.randint(0, elite_size)] + + # Differential mutation with elite member influence and memory factor + mutant = x1 + F_current * ( + elite_member - x1 + x2 - population[i] + self.memory_factor * memory + ) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Ensure within bounds + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection step + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + if f_trial < fitness[np.argmin(fitness)]: + fitness[np.argmin(fitness)] = f_trial + population[np.argmin(fitness)] = trial + + if evaluations >= self.budget: + break + + # Return the best solution found + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ERADS_ProgressiveAdaptive.py b/nevergrad/optimization/lama/ERADS_ProgressiveAdaptive.py new file mode 100644 index 000000000..c8a5af3a0 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressiveAdaptive.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_ProgressiveAdaptive: + def __init__(self, budget, population_size=60, F_init=0.5, F_end=0.75, CR=0.95, memory_factor=0.15): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_ProgressiveAdaptivePlus.py b/nevergrad/optimization/lama/ERADS_ProgressiveAdaptivePlus.py new file mode 100644 index 000000000..b277e9c49 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressiveAdaptivePlus.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_ProgressiveAdaptivePlus: + def __init__(self, budget, population_size=60, F_init=0.5, F_end=0.9, CR=0.98, memory_factor=0.1): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_ProgressiveDynamic.py b/nevergrad/optimization/lama/ERADS_ProgressiveDynamic.py new file mode 100644 index 000000000..9a67ebf4c --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressiveDynamic.py @@ -0,0 +1,66 @@ +import numpy as np + + +class ERADS_ProgressiveDynamic: + def __init__(self, budget, population_size=100, F_init=0.5, F_end=0.9, CR=0.9, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Quadratic adaptation of the scaling factor over the course of optimization + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress**2 + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_ProgressiveOptimized.py b/nevergrad/optimization/lama/ERADS_ProgressiveOptimized.py new file mode 100644 index 000000000..d3f959f2f --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressiveOptimized.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ERADS_ProgressiveOptimized: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR_init=0.9, CR_end=0.95, memory_factor=0.2 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory for successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of scaling factor and crossover probability based on the ratio of evaluations to budget + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector creation incorporating memory and dynamic F scaling + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover with dynamically adjusted CR + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_ProgressivePrecision.py b/nevergrad/optimization/lama/ERADS_ProgressivePrecision.py new file mode 100644 index 000000000..57f529af3 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressivePrecision.py @@ -0,0 +1,81 @@ +import numpy as np + + +class ERADS_ProgressivePrecision: + def __init__( + self, + budget, + population_size=50, + F_start=0.5, + F_peak=0.85, + CR=0.9, + memory_factor=0.2, + adaptive_CR=False, + ): + self.budget = budget + self.population_size = population_size + self.F_start = F_start # Initial scaling factor for mutation + self.F_peak = F_peak # Mid-point peak scaling factor, for aggressive exploration + self.CR = CR # Initial Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.adaptive_CR = adaptive_CR # Option to adapt crossover probability based on success rate + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + successful_CR = [] + + while evaluations < self.budget: + # Non-linear adaptation of the scaling factor + t = evaluations / self.budget + if t < 0.5: + F_current = self.F_start + (self.F_peak - self.F_start) * np.sin( + np.pi * t + ) # Sinusoidal increase to peak + else: + F_current = self.F_peak - (self.F_peak - self.F_start) * np.sin( + np.pi * (t - 0.5) + ) # Symmetric decrease + + # Adaptive CR based on past successes + if self.adaptive_CR and successful_CR: + self.CR = np.clip(np.mean(successful_CR), 0.1, 0.9) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + successful_CR.append(self.CR) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_ProgressiveRefinement.py b/nevergrad/optimization/lama/ERADS_ProgressiveRefinement.py new file mode 100644 index 000000000..a2451b5c6 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_ProgressiveRefinement.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_ProgressiveRefinement: + def __init__( + self, budget, population_size=50, F_init=0.5, F_mid=0.7, F_end=0.9, CR=0.9, memory_factor=0.25 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_mid = F_mid # Mid-course scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Non-linear adaptation of the scaling factor using a piecewise function + t = evaluations / self.budget + if t < 0.5: + F_current = self.F_init + (self.F_mid - self.F_init) * (2 * t) # Accelerate quickly initially + else: + F_current = self.F_mid + (self.F_end - self.F_mid) * (2 * (t - 0.5)) # Then adapt more gently + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumFlux.py b/nevergrad/optimization/lama/ERADS_QuantumFlux.py new file mode 100644 index 000000000..a66a24b57 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumFlux.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_QuantumFlux: + def __init__(self, budget, population_size=55, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumFluxPro.py b/nevergrad/optimization/lama/ERADS_QuantumFluxPro.py new file mode 100644 index 000000000..53033cbb1 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumFluxPro.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_QuantumFluxPro: + def __init__(self, budget, population_size=50, F_init=0.6, F_end=0.9, CR=0.92, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumFluxUltra.py b/nevergrad/optimization/lama/ERADS_QuantumFluxUltra.py new file mode 100644 index 000000000..46ca5b771 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumFluxUltra.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_QuantumFluxUltra: + def __init__(self, budget, population_size=40, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.5): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefined.py b/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefined.py new file mode 100644 index 000000000..4e09ebdf5 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefined.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_QuantumFluxUltraRefined: + def __init__(self, budget, population_size=50, F_init=0.55, F_end=0.85, CR=0.95, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefinedPlus.py b/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefinedPlus.py new file mode 100644 index 000000000..b56860aff --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumFluxUltraRefinedPlus.py @@ -0,0 +1,76 @@ +import numpy as np + + +class ERADS_QuantumFluxUltraRefinedPlus: + def __init__( + self, + budget, + population_size=100, + F_init=0.5, + F_end=0.9, + CR=0.9, + memory_factor=0.2, + adaptation_rate=0.05, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.adaptation_rate = adaptation_rate # Rate at which F and CR adapt based on the success rate + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + # Adaptation of F and CR based on the success of mutations and crossovers + successful_mutation_count = 0 + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + successful_mutation_count += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + # Dynamically adjust mutation and crossover strategy based on success rate + success_rate = successful_mutation_count / evaluations + F_current = F_current + self.adaptation_rate * (success_rate - 0.5) * (self.F_end - self.F_init) + self.CR = self.CR + self.adaptation_rate * (success_rate - 0.5) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_QuantumLeap.py b/nevergrad/optimization/lama/ERADS_QuantumLeap.py new file mode 100644 index 000000000..e69cdb727 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_QuantumLeap.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_QuantumLeap: + def __init__(self, budget, population_size=60, F_init=0.6, F_end=0.9, CR=0.85, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Refined.py b/nevergrad/optimization/lama/ERADS_Refined.py new file mode 100644 index 000000000..6792b982a --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Refined.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_Refined: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, gradually adapting + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection process and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Memory update mechanism using successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + trial - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Superior.py b/nevergrad/optimization/lama/ERADS_Superior.py new file mode 100644 index 000000000..7a2b945cb --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Superior.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ERADS_Superior: + def __init__( + self, budget, population_size=50, F_init=0.8, F_end=0.5, CR_init=0.95, CR_end=0.5, memory_factor=0.2 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Calculate the progression ratio + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Create the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_Ultra.py b/nevergrad/optimization/lama/ERADS_Ultra.py new file mode 100644 index 000000000..97190befb --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_Ultra.py @@ -0,0 +1,72 @@ +import numpy as np + + +class ERADS_Ultra: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.15, adapt_factor=0.1 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.adapt_factor = ( + adapt_factor # Factor to adjust mutation and crossover based on evolutionary progress + ) + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Adjust variables based on the progress + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + adaptive_CR = self.CR + (0.1 - self.CR) * np.sin( + np.pi * progress + ) # Oscillating CR for enhanced exploration and exploitation + + for i in range(self.population_size): + # Selection of distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < adaptive_CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * (mutant - population[i]) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamic.py b/nevergrad/optimization/lama/ERADS_UltraDynamic.py new file mode 100644 index 000000000..71a0d0b76 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamic.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamic: + def __init__(self, budget, population_size=55, F_init=0.55, F_end=0.85, CR=0.92, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMax.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMax.py new file mode 100644 index 000000000..9a23016f0 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMax.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMax: + def __init__(self, budget, population_size=55, F_init=0.52, F_end=0.85, CR=0.95, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxEnhanced.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxEnhanced.py new file mode 100644 index 000000000..ee8f10136 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxEnhanced.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxEnhanced: + def __init__(self, budget, population_size=100, F_init=0.5, F_end=0.9, CR=0.9, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHybrid.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHybrid.py new file mode 100644 index 000000000..b6b0eaad0 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHybrid.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHybrid: + def __init__( + self, budget, population_size=100, F_init=0.5, F_end=0.8, CR_init=0.9, CR_end=0.6, memory_factor=0.4 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear interpolation for F and CR over the course of evaluations + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector calculation includes best solution and memory factor + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover to create a trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection based on fitness improvement + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * (mutant - population[i]) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyper.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyper.py new file mode 100644 index 000000000..95623d123 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyper.py @@ -0,0 +1,76 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyper: + def __init__( + self, + budget, + population_size=50, + F_base=0.55, + F_amp=0.25, + CR=0.98, + memory_factor=0.3, + adaptation_factor=0.02, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base scaling factor for mutation + self.F_amp = F_amp # Amplitude factor for dynamic adaptability of mutation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory to guide mutation based on past successful steps + self.adaptation_factor = ( + adaptation_factor # Factor to enhance adaptation during mutation and crossover + ) + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of the scaling factor using a sinusoidal function for robust exploration + t = evaluations / self.budget + F_current = self.F_base + self.F_amp * np.sin(np.pi * t) # Sinusoidal function for F + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + # Adapt CR and memory factor based on periodicity of the search space exploration + if (evaluations / self.budget) % 0.1 == 0: + self.CR = min(1, self.CR + self.adaptation_factor * (0.5 - np.random.random())) + self.memory_factor = max(0, self.memory_factor - self.adaptation_factor) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimized.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimized.py new file mode 100644 index 000000000..8e57b3053 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimized.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperOptimized: + def __init__( + self, budget, population_size=100, F_init=0.5, F_end=0.9, CR_init=0.9, CR_end=0.6, memory_factor=0.2 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros( + (self.population_size, self.dimension) + ) # Initialize memory to store successful mutation directions for each individual + + while evaluations < self.budget: + # Linear adaptation of the scaling factor and crossover probability + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_init + (self.CR_end - self.CR_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating adaptive memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory[i]) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory[i] = (1 - self.memory_factor) * memory[i] + self.memory_factor * ( + mutant - population[i] + ) + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimizedV4.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimizedV4.py new file mode 100644 index 000000000..ebc168862 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperOptimizedV4.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperOptimizedV4: + def __init__(self, budget, population_size=70, F_init=0.5, F_end=0.75, CR=0.97, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Non-linear adaptation of the scaling factor, emphasizing an aggressive start and gentle finish + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) ** 2 + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperPlus.py new file mode 100644 index 000000000..1f402b57f --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperPlus.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperPlus: + def __init__( + self, + budget, + population_size=50, + F_init=0.55, + F_end=0.85, + CR_init=0.95, + CR_end=0.85, + memory_factor=0.25, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor and crossover probability + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefined.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefined.py new file mode 100644 index 000000000..08eadc811 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefined.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperRefined: + def __init__(self, budget, population_size=50, F_init=0.55, F_end=0.85, CR=0.95, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimized.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimized.py new file mode 100644 index 000000000..38b13abe3 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimized.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperRefinedOptimized: + def __init__(self, budget, population_size=50, F_init=0.55, F_end=0.95, CR=0.92, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV2.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV2.py new file mode 100644 index 000000000..0e0b02825 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV2.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperRefinedOptimizedV2: + def __init__(self, budget, population_size=100, F_init=0.5, F_end=0.9, CR=0.9, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV3.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV3.py new file mode 100644 index 000000000..7fb265d6b --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedOptimizedV3.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperRefinedOptimizedV3: + def __init__(self, budget, population_size=60, F_init=0.6, F_end=0.9, CR=0.88, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Gradual adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedPlus.py new file mode 100644 index 000000000..8f2685a4e --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxHyperRefinedPlus.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxHyperRefinedPlus: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.9, CR=0.9, memory_factor=0.5): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimal.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimal.py new file mode 100644 index 000000000..764f7d773 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimal.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxOptimal: + def __init__(self, budget, population_size=60, F_init=0.55, F_end=0.9, CR=0.9, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusts over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor and crossover probability over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimized.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimized.py new file mode 100644 index 000000000..e0d873391 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimized.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxOptimized: + def __init__(self, budget, population_size=55, F_init=0.60, F_end=0.90, CR=0.92, memory_factor=0.38): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimizedPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimizedPlus.py new file mode 100644 index 000000000..77eb123bc --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxOptimizedPlus.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxOptimizedPlus: + def __init__(self, budget, population_size=60, F_init=0.58, F_end=0.88, CR=0.93, memory_factor=0.38): + self.budget = budget + self.population_size = ( + population_size # Slightly increased for better exploration-exploitation balance + ) + self.F_init = F_init # More aggressive initial mutation factor + self.F_end = F_end # Higher final mutation factor for late-stage intensification + self.CR = CR # Adjusted crossover probability for more robust exploration + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space boundaries + self.memory_factor = ( + memory_factor # Enhanced memory factor for leveraging successful mutation patterns + ) + + def __call__(self, func): + # Initialize population within defined bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros( + self.dimension + ) # Memory initialization for storing direction of successful mutations + + while evaluations < self.budget: + # Adapt mutation factor over the optimization period + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector generation incorporating memory of successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover to create the trial solution + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory with successful mutation direction + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPlus.py new file mode 100644 index 000000000..18588f873 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPlus.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxPlus: + def __init__(self, budget, population_size=50, F_init=0.55, F_end=0.85, CR=0.95, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPrecision.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPrecision.py new file mode 100644 index 000000000..ae33ea37c --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxPrecision.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxPrecision: + def __init__(self, budget, population_size=60, F_init=0.6, F_end=0.9, CR=0.92, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefined.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefined.py new file mode 100644 index 000000000..431966a58 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefined.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxRefined: + def __init__( + self, budget, population_size=70, F_init=0.5, F_end=0.9, CR_init=0.8, CR_end=0.5, memory_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusts over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusts over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor and crossover probability over the course of optimization + t = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * t + CR_current = self.CR_init + (self.CR_end - self.CR_init) * t + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefinedPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefinedPlus.py new file mode 100644 index 000000000..37955a93b --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxRefinedPlus.py @@ -0,0 +1,66 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxRefinedPlus: + def __init__(self, budget, population_size=55, F_init=0.55, F_end=0.88, CR=0.92, memory_factor=0.35): + self.budget = budget + self.population_size = ( + population_size # Adjusted for better balance between exploration and exploitation + ) + self.F_init = F_init # Starting mutation factor + self.F_end = ( + F_end # Ending mutation factor, heightened to extend aggressive search later into the runtime + ) + self.CR = CR # Crossover probability, slightly reduced for finer-grained exploration + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Defined bounds for the problem + self.memory_factor = memory_factor # Increased to enhance influence of successful directions + + def __call__(self, func): + # Initialize population within the search space + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Memory for storing efficacious mutations + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * ( + evaluations / self.budget + ) # Mutation factor adaptation + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Differential mutation considering memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover to generate the trial solution + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory with the successful mutation + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxSupreme.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxSupreme.py new file mode 100644 index 000000000..fc12c275f --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxSupreme.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxSupreme: + def __init__(self, budget, population_size=55, F_init=0.5, F_end=0.95, CR=0.9, memory_factor=0.35): + self.budget = budget + self.population_size = population_size # Adjusted for optimal exploration-exploitation + self.F_init = F_init # More conservative initial mutation factor + self.F_end = F_end # Higher final mutation factor for forceful late exploration + self.CR = CR # Adapted crossover probability for increased diversity + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space limits + self.memory_factor = ( + memory_factor # Adapted memory factor for enhanced exploitation of successful directions + ) + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linearly adapt the mutation factor throughout optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Prevent selection of the current index + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Generate mutant vector with dynamic memory usage + mutant = x1 + F_current * (best - x1 + x2 - x3) + self.memory_factor * memory + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover to create the trial solution + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Evaluate and potentially replace the current individual + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + # Update memory based on successful mutation + memory = (1 - self.memory_factor) * memory + self.memory_factor * (mutant - population[i]) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltra.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltra.py new file mode 100644 index 000000000..6bc4e8e1c --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltra.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltra: + def __init__(self, budget, population_size=45, F_init=0.58, F_end=0.88, CR=0.93, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraPlus.py new file mode 100644 index 000000000..420abe0ef --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraPlus.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraPlus: + def __init__(self, budget, population_size=45, F_init=0.5, F_end=0.9, CR=0.88, memory_factor=0.25): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutations + self.F_end = ( + F_end # Final scaling factor for mutations, increasing over time for aggressive late exploration + ) + self.CR = CR # Crossover probability, slightly reduced to maintain good solutions + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to enhance mutation strategy based on past success + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory for successful mutation directions + + while evaluations < self.budget: + # Adapt mutation factor linearly over the course of the optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Select three different random indices, excluding the current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Generate mutant vector using current population, best solution, and memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Ensure mutant is within bounds + + # Perform crossover to create the trial solution + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Update if the trial solution is better + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update best index + + # Update memory with successful mutation scaled by mutation factor + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefined.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefined.py new file mode 100644 index 000000000..4702ff2d0 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefined.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefined: + def __init__(self, budget, population_size=50, F_init=0.6, F_end=0.9, CR=0.92, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutations + self.F_end = F_end # Final scaling factor for mutations, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = ( + memory_factor # Memory factor to guide mutation based on past successful directions + ) + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store directions of successful mutations + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV2.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV2.py new file mode 100644 index 000000000..7760d3295 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV2.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV2: + def __init__(self, budget, population_size=50, F_init=0.6, F_end=0.95, CR=0.90, memory_factor=0.4): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV3.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV3.py new file mode 100644 index 000000000..ea22eccb6 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV3.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV3: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.9, CR=0.92, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV4.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV4.py new file mode 100644 index 000000000..d7c261e37 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV4.py @@ -0,0 +1,71 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV4: + def __init__( + self, + budget, + population_size=50, + F_init=0.55, + F_end=0.9, + CR_start=0.95, + CR_end=0.85, + memory_factor=0.4, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_start = CR_start # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Adjusting parameters dynamically + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_start + (self.CR_end - self.CR_start) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector generation incorporating the adaptive memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover operation with dynamic CR + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection with memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV5.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV5.py new file mode 100644 index 000000000..b29e4c28d --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV5.py @@ -0,0 +1,73 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV5: + def __init__( + self, + budget, + population_size=50, + F_init=0.55, + F_end=0.95, + CR_start=0.9, + CR_end=0.75, + memory_factor=0.35, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = ( + F_end # Final scaling factor for mutation, adjusting to encourage deeper exploration at the end + ) + self.CR_start = CR_start # Crossover probability starts high for exploration + self.CR_end = CR_end # Crossover probability ends lower for exploitation + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamically adjust scaling factor F and crossover probability CR + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_start + (self.CR_end - self.CR_start) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector generation incorporating adaptive memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover operation with dynamic CR + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection with memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV6.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV6.py new file mode 100644 index 000000000..debe708dd --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV6.py @@ -0,0 +1,64 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV6: + def __init__( + self, budget, population_size=40, F_init=0.5, F_end=0.9, CR_start=0.85, CR_end=0.6, memory_factor=0.25 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation + self.CR_start = CR_start # Starting crossover probability + self.CR_end = CR_end # Ending crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Adapt scaling factor F and crossover probability CR dynamically + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_start + (self.CR_end - self.CR_start) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector generation incorporating memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover operation with dynamic CR + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV7.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV7.py new file mode 100644 index 000000000..3b0b58320 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV7.py @@ -0,0 +1,64 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV7: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.95, CR_init=0.9, CR_end=0.7, memory_factor=0.35 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Dynamic adaptation of the scaling factor F and crossover probability CR + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_init + (self.CR_end - self.CR_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Mutant vector generation with memory influence + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover operation + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV8.py b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV8.py new file mode 100644 index 000000000..ed0a39180 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicMaxUltraRefinedV8.py @@ -0,0 +1,80 @@ +import numpy as np + + +class ERADS_UltraDynamicMaxUltraRefinedV8: + def __init__( + self, + budget, + population_size=50, + F_init=0.55, + F_end=0.95, + CR_init=0.95, + CR_end=0.8, + memory_factor=0.35, + adaptive_memory=True, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.adaptive_memory = adaptive_memory # Toggle to adapt memory factor + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor and crossover probability + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_init + (self.CR_end - self.CR_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating adaptive memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Adaptive memory update mechanism + if self.adaptive_memory: + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + else: + memory += mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicPlus.py b/nevergrad/optimization/lama/ERADS_UltraDynamicPlus.py new file mode 100644 index 000000000..d61626aa9 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicPlus.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicPlus: + def __init__(self, budget, population_size=60, F_init=0.48, F_end=0.82, CR=0.94, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionEnhanced.py b/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionEnhanced.py new file mode 100644 index 000000000..f44f1a279 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionEnhanced.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ERADS_UltraDynamicPrecisionEnhanced: + def __init__(self, budget, population_size=55, F_init=0.53, F_end=0.87, CR=0.93, memory_factor=0.32): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Linear adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionOptimized.py b/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionOptimized.py new file mode 100644 index 000000000..4d08a338f --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraDynamicPrecisionOptimized.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ERADS_UltraDynamicPrecisionOptimized: + def __init__(self, budget, population_size=60, F_init=0.58, F_end=0.88, CR=0.92, memory_factor=0.35): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Exponential decay adaptation of the scaling factor over the course of optimization + F_current = self.F_init + (self.F_end - self.F_init) * np.exp( + -3 * (1 - evaluations / self.budget) + ) + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Creation of the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraEnhanced.py b/nevergrad/optimization/lama/ERADS_UltraEnhanced.py new file mode 100644 index 000000000..3503d3f22 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraEnhanced.py @@ -0,0 +1,71 @@ +import numpy as np + + +class ERADS_UltraEnhanced: + def __init__( + self, + budget, + population_size=50, + F_init=0.45, + F_end=0.85, + CR_init=0.95, + CR_end=0.65, + memory_factor=0.25, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraMax.py b/nevergrad/optimization/lama/ERADS_UltraMax.py new file mode 100644 index 000000000..06d9c5f07 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraMax.py @@ -0,0 +1,59 @@ +import numpy as np + + +class ERADS_UltraMax: + def __init__(self, budget, population_size=100, F_init=0.8, F_end=0.2, CR=0.9, memory_factor=0.3): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting dynamically + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + # Inverse adaptation of the scaling factor (high exploration in early phase, high exploitation in late phase) + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Enhanced mutant vector generation incorporating more memory influence + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection process with memory update on successful mutation + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i + + memory += self.memory_factor * F_current * (trial - population[i]) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraOptimized.py b/nevergrad/optimization/lama/ERADS_UltraOptimized.py new file mode 100644 index 000000000..89311a276 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraOptimized.py @@ -0,0 +1,69 @@ +import numpy as np + + +class ERADS_UltraOptimized: + def __init__( + self, budget, population_size=100, F_init=0.6, F_end=0.3, CR_init=0.9, CR_end=0.6, memory_factor=0.3 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + self.f_opt = fitness[best_index] + self.x_opt = population[best_index] + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory to store successful mutation directions + + while evaluations < self.budget: + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + for i in range(self.population_size): + # Selection of three distinct random population indices different from current index i + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[best_index] + + # Create the mutant vector incorporating memory of past successful mutations + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip( + mutant, self.bounds[0], self.bounds[1] + ) # Ensure mutant remains within bounds + + # Crossover operation to generate trial vector + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection: Replace the old vector if the trial vector has better fitness + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + best_index = i # Update the index of the best solution found + + # Update memory with the successful mutation direction scaled by F_current + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ERADS_UltraPrecise.py b/nevergrad/optimization/lama/ERADS_UltraPrecise.py new file mode 100644 index 000000000..760009bf6 --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraPrecise.py @@ -0,0 +1,77 @@ +import numpy as np + + +class ERADS_UltraPrecise: + def __init__( + self, + budget, + population_size=100, + F_init=0.5, + F_end=0.8, + CR_init=0.9, + CR_end=0.7, + memory_factor=0.3, + elite_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = ( + memory_factor # Memory factor to guide mutation based on past successful directions + ) + self.elite_factor = elite_factor # Proportion of the elite population + + def __call__(self, func): + # Initialize population uniformly within the bounds and evaluate fitness + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + memory = np.zeros((self.population_size, self.dimension)) + + while evaluations < self.budget: + # Update scaling factor and crossover probability based on progress + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + # Identify elite population + elite_size = int(self.population_size * self.elite_factor) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(self.population_size): + # Select indices for mutation, exclude current index and elite indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + elite_member = elite_population[np.random.randint(elite_size)] + + # Differential mutation incorporating elite member influence and adaptive memory + mutant = x1 + F_current * (elite_member - x1 + x2 - x3 + self.memory_factor * memory[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Enforcing bounds + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection step with memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory[i] = (1 - self.memory_factor) * memory[i] + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + # Return the best solution found + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ERADS_UltraRefined.py b/nevergrad/optimization/lama/ERADS_UltraRefined.py new file mode 100644 index 000000000..7a280658a --- /dev/null +++ b/nevergrad/optimization/lama/ERADS_UltraRefined.py @@ -0,0 +1,78 @@ +import numpy as np + + +class ERADS_UltraRefined: + def __init__( + self, + budget, + population_size=50, + F_init=0.5, + F_end=0.85, + CR_init=0.95, + CR_end=0.75, + memory_factor=0.25, + elite_factor=0.15, + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial scaling factor for mutation + self.F_end = F_end # Final scaling factor for mutation, adjusting over time + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability, adjusting over time + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Memory factor to guide mutation based on past successful steps + self.elite_factor = elite_factor # Percentage of population considered elite + + def __call__(self, func): + # Initialize population uniformly within the bounds and calculate their fitness + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + elite_size = int(self.population_size * self.elite_factor) + + evaluations = self.population_size + memory = np.zeros(self.dimension) # Initialize memory + + while evaluations < self.budget: + # Update scaling factor and crossover probability based on progress + progress = evaluations / self.budget + F_current = self.F_init + (self.F_end - self.F_init) * progress + CR_current = self.CR_init + (self.CR_end - self.CR_init) * progress + + # Sort population based on fitness and define elite members + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + elite_member = elite_population[np.random.randint(0, elite_size)] + + # Differential mutation with elite member influence and memory factor + mutant = x1 + F_current * (elite_member - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Ensure within bounds + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection step + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + if f_trial < fitness[np.argmin(fitness)]: + fitness[np.argmin(fitness)] = f_trial + population[np.argmin(fitness)] = trial + + if evaluations >= self.budget: + break + + # Return the best solution found + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/ERAMEDS.py b/nevergrad/optimization/lama/ERAMEDS.py new file mode 100644 index 000000000..adad3e7ac --- /dev/null +++ b/nevergrad/optimization/lama/ERAMEDS.py @@ -0,0 +1,100 @@ +import numpy as np + + +class ERAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + memory_fade_interval=100, + ): + self.budget = budget + self.population_size = population_size + self.initial_crossover_rate = initial_crossover_rate + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.memory_fade_interval = memory_fade_interval + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + memory_age = np.zeros(self.memory_size, dtype=int) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Dynamically adjust crossover rate + self.crossover_rate = self.initial_crossover_rate - evaluations / self.budget * ( + self.initial_crossover_rate - 0.6 + ) + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic modulation + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 with enhanced selection + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + use_elite = np.random.rand() < 0.6 + 0.4 * (best_fitness / (elite_fitness.mean() + 1e-6)) + best_or_elite = elite[np.random.randint(0, self.elite_size)] if use_elite else best_solution + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory with fading mechanism + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update with fading mechanism + worst_idx = np.argmax(memory_fitness) + if ( + memory_fitness[worst_idx] > fitness[i] + or memory_age[worst_idx] > self.memory_fade_interval + ): + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + memory_age[worst_idx] = 0 + else: + memory_age[worst_idx] += 1 + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ESADE.py b/nevergrad/optimization/lama/ESADE.py new file mode 100644 index 000000000..4f806a8a0 --- /dev/null +++ b/nevergrad/optimization/lama/ESADE.py @@ -0,0 +1,84 @@ +import numpy as np + + +class ESADE: + def __init__(self, budget, population_size=50, F_base=0.5, CR_base=0.8): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # As problem specification + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Initialize adaptive parameters + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + # Introduce memory for F and CR to enhance strategy adaptation + F_memory = np.zeros(self.population_size) + CR_memory = np.zeros(self.population_size) + + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation strategy based on fitness and memory + if fitness[i] < np.median(fitness): + F[i] = min(F[i] * 1.1, 1) # Limit F to a maximum of 1 + F_memory[i] += 1 + else: + F[i] = max(F[i] * 0.9, 0.1) # Ensure F does not fall below 0.1 + F_memory[i] /= 1.1 + + # Mutation and crossover + idxs = [idx for idx in range(self.population_size) if idx != i] + best_idx = np.argmin(fitness[idxs]) + a, b, c = ( + population[idxs[best_idx]], + population[np.random.choice(idxs)], + population[np.random.choice(idxs)], + ) + mutant = np.clip(a + F[i] * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + f_trial = func(trial) + evaluations += 1 + + # Selection: Accept the trial if it is better + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + CR[i] = min(CR[i] * 1.05, 1) # Limit CR to a maximum of 1 + CR_memory[i] += 1 + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + CR[i] = max(CR[i] * 0.95, 0.1) # Ensure CR does not drop below 0.1 + CR_memory[i] /= 1.1 + + # Dynamically adjust strategy based on memory + if F_memory[i] > 3 or CR_memory[i] > 3: + F[i] = self.F_base + CR[i] = self.CR_base + F_memory[i] = 0 + CR_memory[i] = 0 + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ESADEPFLLP.py b/nevergrad/optimization/lama/ESADEPFLLP.py new file mode 100644 index 000000000..59a437111 --- /dev/null +++ b/nevergrad/optimization/lama/ESADEPFLLP.py @@ -0,0 +1,79 @@ +import numpy as np + + +class ESADEPFLLP: + def __init__(self, budget, population_size=50, F_init=0.5, CR_init=0.9, local_search_steps=10): + self.budget = budget + self.CR_init = CR_init + self.F_init = F_init + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.local_search_steps = local_search_steps + + def __call__(self, func): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + F = self.F_init + CR = self.CR_init + success_memory = [] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Differential evolution operations + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + success_memory.append(1) + else: + success_memory.append(0) + + # Local Learning Phase + if trial_fitness < best_fitness: + local_best = trial + local_best_fitness = trial_fitness + for _ in range(self.local_search_steps): + local_candidate = local_best + np.random.normal(0, 0.1, self.dimension) + local_candidate = np.clip(local_candidate, self.lower_bound, self.upper_bound) + local_fitness = func(local_candidate) + evaluations += 1 + if local_fitness < local_best_fitness: + local_best = local_candidate + local_best_fitness = local_fitness + if local_best_fitness < best_fitness: + best_solution = local_best + best_fitness = local_best_fitness + + if evaluations >= self.budget: + break + + # Adaptive Feedback Mechanism + if len(success_memory) > 20: # Using the last 20 steps to calculate success rate + success_rate = np.mean(success_memory[-20:]) + F = np.clip(F + 0.1 * (success_rate - 0.5), 0.1, 1.0) + CR = np.clip(CR + 0.1 * (success_rate - 0.5), 0.1, 1.0) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ESBASM.py b/nevergrad/optimization/lama/ESBASM.py new file mode 100644 index 000000000..d37f36041 --- /dev/null +++ b/nevergrad/optimization/lama/ESBASM.py @@ -0,0 +1,64 @@ +import numpy as np + + +class ESBASM: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.memory_size = 10 + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def update_memory(self, best_individual, func): + if len(self.memory) < self.memory_size: + self.memory.append(best_individual) + else: + worst_idx = np.argmax([func(m) for m in self.memory]) + if func(best_individual) < func(self.memory[worst_idx]): + self.memory[worst_idx] = best_individual + + def memory_guided_mutation(self, individual, func): + if not self.memory: + return individual # No memory yet, return individual unchanged + # Select a random memory element and apply mutation + memory_individual = self.memory[np.random.randint(len(self.memory))] + mutation_strength = np.random.rand(self.dimension) + # Apply mutation based on difference with a memory individual + mutated = individual + mutation_strength * (memory_individual - individual) + return np.clip(mutated, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + mutant = self.memory_guided_mutation(population[i], func) + mutant_fitness = func(mutant) + evaluations += 1 + + if mutant_fitness < fitness[i]: + population[i] = mutant + fitness[i] = mutant_fitness + + if mutant_fitness < fitness[best_idx]: + best_idx = i + best_individual = mutant + + if evaluations >= self.budget: + break + + # Update the memory with the current best individual + self.update_memory(best_individual, func) + + return fitness[best_idx], best_individual diff --git a/nevergrad/optimization/lama/EliteAdaptiveCrowdingHybridOptimizer.py b/nevergrad/optimization/lama/EliteAdaptiveCrowdingHybridOptimizer.py new file mode 100644 index 000000000..edceff012 --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveCrowdingHybridOptimizer.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteAdaptiveCrowdingHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + dist[i] += np.linalg.norm(population[i] - population[j]) + return dist + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Maintain diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individuals = np.random.uniform(self.bounds[0], self.bounds[1], (1, self.dim)) + distances = self.crowding_distance(new_individuals) + if np.min(distances) > np.min(dist): + population = np.vstack([population, new_individuals]) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, (1, self.dim))]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individuals]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/EliteAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..7639f0b44 --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveHybridDEPSO.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EliteAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + elite_size = 5 + w = 0.5 # Inertia weight for PSO + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/EliteAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..ba8049f29 --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EliteAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 80 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.25 and evaluations + 2 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=4 + ) + evaluations += 2 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations + int(0.10 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.10 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + elite_size = int(0.10 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.01, 0.01, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteAdaptiveMemoryDynamicCrowdingOptimizerV2.py b/nevergrad/optimization/lama/EliteAdaptiveMemoryDynamicCrowdingOptimizerV2.py new file mode 100644 index 000000000..2f22a4c04 --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveMemoryDynamicCrowdingOptimizerV2.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteAdaptiveMemoryDynamicCrowdingOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + adaptive_memory=True, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + self.adaptive_memory = adaptive_memory + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..017a5e49b --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,168 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch.py b/nevergrad/optimization/lama/EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch.py new file mode 100644 index 000000000..a7c49d040 --- /dev/null +++ b/nevergrad/optimization/lama/EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + self.adaptive_memory_rate = 0.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def hybrid_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.hybrid_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(personal_bests, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < global_best_fit: + global_best_fit = learned_fitness[i] + global_best = learned_population[i] + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteCovarianceMatrixAdaptationMemeticSearch.py b/nevergrad/optimization/lama/EliteCovarianceMatrixAdaptationMemeticSearch.py new file mode 100644 index 000000000..c6ba6a4ac --- /dev/null +++ b/nevergrad/optimization/lama/EliteCovarianceMatrixAdaptationMemeticSearch.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EliteCovarianceMatrixAdaptationMemeticSearch: + def __init__( + self, budget, population_size=50, memetic_rate=0.5, elite_fraction=0.2, learning_rate=0.01, sigma=0.3 + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.elite_fraction = elite_fraction + self.learning_rate = learning_rate + self.sigma = sigma + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def covariance_matrix_adaptation(self, func, pop, scores, mean, C): + n_samples = len(pop) + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + self.sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean and covariance matrix + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + + for iteration in range(max_iterations): + # Perform covariance matrix adaptation step + pop, scores = self.covariance_matrix_adaptation(func, pop, scores, mean, C) + + # Perform memetic local search + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + pop[i], scores[i] = self.local_search(func, pop[i], scores[i]) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean and covariance matrix + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteDynamicHybridOptimizer.py b/nevergrad/optimization/lama/EliteDynamicHybridOptimizer.py new file mode 100644 index 000000000..e1ffea673 --- /dev/null +++ b/nevergrad/optimization/lama/EliteDynamicHybridOptimizer.py @@ -0,0 +1,132 @@ +import numpy as np + + +class EliteDynamicHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 80 # Adjusted population size for a balance between speed and exploration + self.initial_F = 0.6 # Adjusted to promote diversity + self.initial_CR = 0.9 # Increased to maintain crossover rate + self.c1 = 1.0 # Slightly reduced for better control over exploration + self.c2 = 1.0 + self.w = 0.5 # Increased inertia weight for keeping momentum + self.elite_fraction = 0.3 # Increased to give more weight to elite solutions + self.diversity_threshold = 1e-6 # Reduced threshold for a more responsive reinitialization + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 20, self.budget - evaluations + ) # Increased iterations for better local search + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.02, bounds.lb, bounds.ub + ) # Reduced perturbation for precision + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteDynamicMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EliteDynamicMemoryHybridOptimizer.py new file mode 100644 index 000000000..aae416794 --- /dev/null +++ b/nevergrad/optimization/lama/EliteDynamicMemoryHybridOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteDynamicMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteDynamicMultiStrategyHybridDEPSO.py b/nevergrad/optimization/lama/EliteDynamicMultiStrategyHybridDEPSO.py new file mode 100644 index 000000000..a2fcceb08 --- /dev/null +++ b/nevergrad/optimization/lama/EliteDynamicMultiStrategyHybridDEPSO.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EliteDynamicMultiStrategyHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.6 # Increased inertia weight for PSO + c1 = 1.1 # Increased cognitive coefficient for PSO + c2 = 1.3 # Increased social coefficient for PSO + initial_F = 0.7 # Initial differential weight for DE + initial_CR = 0.8 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedAdaptiveRestartDE.py b/nevergrad/optimization/lama/EliteGuidedAdaptiveRestartDE.py new file mode 100644 index 000000000..0e7bbfe5b --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedAdaptiveRestartDE.py @@ -0,0 +1,112 @@ +import numpy as np + + +class EliteGuidedAdaptiveRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.restart_threshold = 0.01 # Threshold to trigger a restart + + def __call__(self, func): + def initialize_population(): + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + return pop, fitness + + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + pop, fitness = initialize_population() + self.budget -= self.pop_size + + generation = 0 + best_fitness_history = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Track the best fitness value over generations + best_fitness_history.append(np.min(fitness)) + + # Check if a restart is needed + if len(best_fitness_history) > 10: + recent_improvement = np.abs(best_fitness_history[-10] - best_fitness_history[-1]) + if recent_improvement < self.restart_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + continue + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedDualStrategyDE.py b/nevergrad/optimization/lama/EliteGuidedDualStrategyDE.py new file mode 100644 index 000000000..7866cf3fc --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedDualStrategyDE.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EliteGuidedDualStrategyDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 # Increased population size + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 # Increased crossover probability + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.stagnation_threshold = 30 # Restart after 30 stagnant generations + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Check for stagnation and restart if needed + if best_fitness == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + best_fitness = self.f_opt + + if stagnation_counter >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedHybridAdaptiveDE.py b/nevergrad/optimization/lama/EliteGuidedHybridAdaptiveDE.py new file mode 100644 index 000000000..71d02454c --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedHybridAdaptiveDE.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EliteGuidedHybridAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Adaptive reset based on population diversity + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on diversity + diversity = np.mean(np.std(population, axis=0)) + if diversity < self.epsilon: + # If diversity is too low, reinitialize half the population + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedHybridDE.py b/nevergrad/optimization/lama/EliteGuidedHybridDE.py new file mode 100644 index 000000000..d6b723744 --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedHybridDE.py @@ -0,0 +1,112 @@ +import numpy as np + + +class EliteGuidedHybridDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.stagnation_threshold = 20 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Check for stagnation and restart if needed + if best_fitness == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + best_fitness = self.f_opt + + if stagnation_counter >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedMutationDE.py b/nevergrad/optimization/lama/EliteGuidedMutationDE.py new file mode 100644 index 000000000..11b7658d9 --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedMutationDE.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedMutationDE_v2.py b/nevergrad/optimization/lama/EliteGuidedMutationDE_v2.py new file mode 100644 index 000000000..aa65d56d7 --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedMutationDE_v2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EliteGuidedMutationDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.stagnation_threshold = 20 + self.archive = [] + self.stagnation_counter = 0 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + new_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in new_pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteGuidedQuantumAdaptiveDE.py b/nevergrad/optimization/lama/EliteGuidedQuantumAdaptiveDE.py new file mode 100644 index 000000000..521e57a40 --- /dev/null +++ b/nevergrad/optimization/lama/EliteGuidedQuantumAdaptiveDE.py @@ -0,0 +1,138 @@ +import numpy as np + + +class EliteGuidedQuantumAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, local_search_steps=100): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Replace worst individuals with elite-guided recombination + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, bounds[0], bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteHybridAdaptiveOptimizer.py b/nevergrad/optimization/lama/EliteHybridAdaptiveOptimizer.py new file mode 100644 index 000000000..0c3a080ee --- /dev/null +++ b/nevergrad/optimization/lama/EliteHybridAdaptiveOptimizer.py @@ -0,0 +1,155 @@ +import numpy as np + + +class EliteHybridAdaptiveOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + + def gradient_descent(self, x, func, budget, step_size=0.01): + best_x = x.copy() + best_f = func(x) + grad = np.zeros(self.dim) + for _ in range(budget): + for i in range(self.dim): + x_plus = x.copy() + x_plus[i] += step_size + f_plus = func(x_plus) + grad[i] = (f_plus - best_f) / step_size + + x = np.clip(x - step_size * grad, self.bounds[0], self.bounds[1]) + f = func(x) + if f < best_f: + best_x = x + best_f = f + + return best_x, best_f + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + elitism_ratio = 0.1 + num_elite = max(1, int(self.pop_size * elitism_ratio)) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + progress = self.eval_count / global_search_budget + self.w = 0.4 + 0.5 * (1 - progress) + self.c1 = 1.5 - 0.5 * progress + self.c2 = 1.5 + 0.5 * progress + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + if np.random.rand() < 0.3: + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + elite_indices = np.argsort(fitness)[:num_elite] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + for i in range(num_elite, self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + if np.random.rand() < 0.5: + new_x, new_f = self.gradient_descent(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + else: + new_x, new_f = self.local_search(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteMemoryEnhancedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/EliteMemoryEnhancedDynamicHybridOptimizer.py new file mode 100644 index 000000000..b477b5555 --- /dev/null +++ b/nevergrad/optimization/lama/EliteMemoryEnhancedDynamicHybridOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats import levy_stable + + +class EliteMemoryEnhancedDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + levy_alpha=1.5, + restart_threshold=0.001, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.levy_alpha = levy_alpha + self.restart_threshold = restart_threshold + self.global_best_history = [] + + def levy_flight(self, size): + return levy_stable.rvs(self.levy_alpha, 0, size=size) + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + if len(idxs) < 3: + continue # Skip mutation if less than 3 distinct individuals + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Restart mechanism + if len(self.global_best_history) > 10: + if np.std(self.global_best_history[-10:]) < self.restart_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteMultiStrategySelfAdaptiveDE.py b/nevergrad/optimization/lama/EliteMultiStrategySelfAdaptiveDE.py new file mode 100644 index 000000000..dc1e8632c --- /dev/null +++ b/nevergrad/optimization/lama/EliteMultiStrategySelfAdaptiveDE.py @@ -0,0 +1,128 @@ +import numpy as np + + +class EliteMultiStrategySelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + elite_fraction = 0.1 # Fraction of elite solutions to preserve + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + # Elitism: Preserve the top elite_fraction of the population + elite_count = max(1, int(elite_fraction * population_size)) + elite_indices = fitness.argsort()[:elite_count] + elites = population[elite_indices] + elite_fitness = fitness[elite_indices] + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elites back into the population + if elite_count > 0: + new_population[:elite_count] = elites + new_fitness[:elite_count] = elite_fitness + new_F_values[:elite_count] = F_values[elite_indices] + new_CR_values[:elite_count] = CR_values[elite_indices] + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ElitePreservingDifferentialEvolution.py b/nevergrad/optimization/lama/ElitePreservingDifferentialEvolution.py new file mode 100644 index 000000000..589534d6f --- /dev/null +++ b/nevergrad/optimization/lama/ElitePreservingDifferentialEvolution.py @@ -0,0 +1,97 @@ +import numpy as np + + +class ElitePreservingDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.8 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.02 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Re-initialize only the worst individuals to maintain diversity + worst_indices = np.argsort(fitness)[-int(0.1 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 3) == 0 and population_size > 20: + elite_indices = np.argsort(fitness)[: int(0.6 * population_size)] + population = population[elite_indices] + fitness = fitness[elite_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteQuantumAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/EliteQuantumAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..bf8474078 --- /dev/null +++ b/nevergrad/optimization/lama/EliteQuantumAdaptiveExplorationOptimization.py @@ -0,0 +1,235 @@ +import numpy as np + + +class EliteQuantumAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # PSO constants + c1 = 2.0 + c2 = 2.0 + w = 0.5 + + # Learning rate adaptation parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Differential Evolution parameters + F_min = 0.4 + F_max = 0.9 + CR = 0.9 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 + + # Exploration improvement parameters + exploration_factor = 0.2 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + # Elite pool + elite_pool_size = 5 + elite_pool = [] + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.9 + + prev_f = f + + # Update elite pool + if len(elite_pool) < elite_pool_size: + elite_pool.append(global_best_position.copy()) + else: + worst_idx = np.argmax([func(e) for e in elite_pool]) + if global_best_score < func(elite_pool[worst_idx]): + elite_pool[worst_idx] = global_best_position.copy() + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Adaptive Quantum Rotation + if i % 50 == 0 and i > 0: + rotation_angle = np.pi / 4 * (1 - i / self.budget) + rotation_matrix = np.array( + [ + [np.cos(rotation_angle), -np.sin(rotation_angle)], + [np.sin(rotation_angle), np.cos(rotation_angle)], + ] + ) + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EliteQuantumAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py new file mode 100644 index 000000000..01484305b --- /dev/null +++ b/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class EliteQuantumDifferentialMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.c1 = 1.5 + self.c2 = 1.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < 1e-3 or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + self.c1 * r1 * (personal_bests[i] - particles[i]) + + self.c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + self.c1 = np.random.uniform(1.0, 2.5) + self.c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EliteRefinedAdaptivePrecisionOptimizer.py b/nevergrad/optimization/lama/EliteRefinedAdaptivePrecisionOptimizer.py new file mode 100644 index 000000000..d0877c571 --- /dev/null +++ b/nevergrad/optimization/lama/EliteRefinedAdaptivePrecisionOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EliteRefinedAdaptivePrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Initial higher temperature for more aggressive global exploration + T_min = 0.0008 # Lower minimum temperature for deep exploitation + alpha = 0.95 # Slower cooling rate to extend the exploration phase + + # Optimized mutation and crossover parameters for a balance between diversity and convergence + F_base = 0.85 # Higher base mutation factor to encourage diverse mutations + CR_base = 0.88 # Adjusted crossover probability for optimal diversity in offspring + + population_size = 90 # Increased population size for enhanced search capabilities + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Evolution loop with dynamic mutation and crossover influenced by temperature and solution quality + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor adjusted by an exponential decay based on temperature and search progress + dynamic_F = ( + F_base + * np.exp(-0.15 * T) + * (0.7 + 0.3 * np.cos(1.5 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR_dynamic = CR_base - 0.12 * np.sin(2 * np.pi * evaluation_count / self.budget) + cross_points = np.random.rand(self.dim) < CR_dynamic + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced adaptive acceptance criterion based on delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adjusted cooling strategy with a progressive rate based on performance and remaining budget + adaptive_cooling = alpha - 0.02 * np.cos(1.8 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EliteTranscendentalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/EliteTranscendentalEvolutionaryOptimizer.py new file mode 100644 index 000000000..fd766671a --- /dev/null +++ b/nevergrad/optimization/lama/EliteTranscendentalEvolutionaryOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class EliteTranscendentalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initiation of advanced temperature management settings + T = 1.5 # Higher initial temperature to allow more exploration initially + T_min = 0.0005 # Lower threshold to extend the search phase + alpha = 0.95 # Slower cooling rate to explore more thoroughly + + # Enhanced mutation strategy parameters + F_initial = 0.8 # Initial mutation factor + F_final = 0.5 # Final mutation factor + CR = 0.8 # Adjusted crossover probability + + population_size = 100 # Increased population size for better sampling + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Incorporating a mutation factor schedule and an adaptive acceptance mechanism + while evaluation_count < self.budget and T > T_min: + F = F_initial + (F_final - F_initial) * ( + evaluation_count / self.budget + ) # Linear schedule for mutation factor + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + if trial_fitness < fitness[i] or np.random.rand() < np.exp(-(trial_fitness - fitness[i]) / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + T *= alpha # Cooling happens after each generation + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/ElitistAdaptiveDE.py b/nevergrad/optimization/lama/ElitistAdaptiveDE.py new file mode 100644 index 000000000..b2886b06c --- /dev/null +++ b/nevergrad/optimization/lama/ElitistAdaptiveDE.py @@ -0,0 +1,113 @@ +import numpy as np + + +class ElitistAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.1, 0.9 + CR_min, CR_max = 0.1, 0.9 + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = F_min + (F_max - F_min) * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = CR_min + (CR_max - CR_min) * np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def elitism(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[:population_size] + return combined_population[elite_indices], combined_fitness[elite_indices] + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + if evaluations % 50 == 0: + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + + for i in range(population_size): + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if evaluations >= self.budget: + break + + population, fitness = elitism(population, fitness, new_population, new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW.py b/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW.py new file mode 100644 index 000000000..72782b23b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAQAPSOHR_LSDIW: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + initial_velocity_clamp=0.5, + local_search_radius=0.05, + local_search_samples=20, + inertia_weight=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.initial_velocity_clamp = initial_velocity_clamp + self.local_search_radius = local_search_radius + self.local_search_samples = local_search_samples + self.inertia_weight = inertia_weight + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_samples): + x_new = x + np.random.uniform(-self.local_search_radius, self.local_search_radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_velocity_clamp(self, t): + return max(0.1, self.initial_velocity_clamp - 0.3 * t / self.budget) + + def update_inertia_weight(self, t): + return self.inertia_weight + 0.5 * (1 - t / self.budget) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + velocity_clamp = self.update_velocity_clamp(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -velocity_clamp, velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW_AP.py b/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW_AP.py new file mode 100644 index 000000000..0535ed222 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSOHR_LSDIW_AP.py @@ -0,0 +1,117 @@ +import numpy as np + + +class EnhancedAQAPSOHR_LSDIW_AP: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + initial_velocity_clamp=0.5, + local_search_radius=0.05, + local_search_samples=20, + inertia_weight=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.initial_velocity_clamp = initial_velocity_clamp + self.local_search_radius = local_search_radius + self.local_search_samples = local_search_samples + self.inertia_weight = inertia_weight + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_samples): + x_new = x + np.random.uniform(-self.local_search_radius, self.local_search_radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_velocity_clamp(self, t): + return max(0.1, self.initial_velocity_clamp - 0.3 * t / self.budget) + + def update_inertia_weight(self, t): + return self.inertia_weight + 0.5 * (1 - t / self.budget) + + def update_params(self, t): + self.cognitive_weight = 1.5 - 1.2 * t / self.budget + self.social_weight = 2.0 + 0.8 * t / self.budget + self.acceleration_coeff = 1.1 + 0.4 * t / self.budget + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + velocity_clamp = self.update_velocity_clamp(t) + self.update_params(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -velocity_clamp, velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP.py new file mode 100644 index 000000000..58d1fe756 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(20): # Increased the local search iterations further for better refinement + x_new = x + np.random.uniform(-0.2, 0.2, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget # Further improved the inertia weight update + + def update_parameters(self, t): + return ( + 1.8 - 0.7 * t / self.budget, + 2.2 - 0.7 * t / self.budget, + ) # Further adaptive cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.2 * r3 * (global_best_pos - particles_pos[i]) # Increased acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Final.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Final.py new file mode 100644 index 000000000..6d796e1c3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Final.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Final: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(50): # Increased the local search iterations for better refinement + x_new = x + np.random.uniform(-0.5, 0.5, size=self.dim) # Enhanced the exploration range + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.8 * t / self.budget + + def update_parameters(self, t): + return 1.9 - 0.8 * t / self.budget, 2.3 - 0.8 * t / self.budget + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = ( + 2.0 * r3 * (global_best_pos - particles_pos[i]) + ) # Increased acceleration coefficient further + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined.py new file mode 100644 index 000000000..2dfe586d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Refined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(30): # Increased the local search iterations further for better refinement + x_new = x + np.random.uniform(-0.3, 0.3, size=self.dim) # Enhanced the exploration + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.8 * t / self.budget # Further refined the inertia weight update + + def update_parameters(self, t): + return ( + 1.9 - 0.8 * t / self.budget, + 2.3 - 0.8 * t / self.budget, + ) # Further refined cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Increased acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined_Final.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined_Final.py new file mode 100644 index 000000000..46b211e7e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Refined_Final.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Refined_Final: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(100): # Increased the local search iterations further for better refinement + x_new = x + np.random.uniform(-0.7, 0.7, size=self.dim) # Enhanced the exploration range further + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.6 * t / self.budget # Adjusted inertia weight update for better balance + + def update_parameters(self, t): + return ( + 2.0 - 1.6 * t / self.budget, + 2.5 - 1.6 * t / self.budget, + ) # Fine-tuned cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = ( + 2.5 * r3 * (global_best_pos - particles_pos[i]) + ) # Further increased acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate.py new file mode 100644 index 000000000..f9ed04455 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Ultimate: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(200): # Increased local search iterations for thorough exploration + x_new = x + np.random.uniform(-1.0, 1.0, size=self.dim) # Expanded exploration range further + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget # Adjusted inertia weight update for better balance + + def update_parameters(self, t): + return ( + 2.2 - 1.7 * t / self.budget, + 2.7 - 1.7 * t / self.budget, + ) # Fine-tuned cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = ( + 3.0 * r3 * (global_best_pos - particles_pos[i]) + ) # Increased acceleration coefficient for faster convergence + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py new file mode 100644 index 000000000..b34ba2bd7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): # Increased local search iterations for deeper exploration + x_new = x + np.random.uniform(-1.0, 1.0, size=self.dim) # Adjusted the local search neighborhood + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.8 * t / self.budget # Further adjusted inertia weight update for stability + + def update_parameters(self, t): + return ( + 1.8 - 1.2 * t / self.budget, + 2.2 - 1.2 * t / self.budget, + ) # Fine-tuned cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = ( + 2.0 * r3 * (global_best_pos - particles_pos[i]) + ) # Further adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined.py new file mode 100644 index 000000000..5cc83adc1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): # Keep the same local search iterations + x_new = x + 0.1 * np.random.randn(self.dim) # Adjusted the local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.8 * t / self.budget # Keep the same inertia weight update + + def update_parameters(self, t): + return ( + 1.8 - 1.2 * t / self.budget, + 2.2 - 1.2 * t / self.budget, + ) # Keep the same cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.8 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined.py new file mode 100644 index 000000000..695098497 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(300): # Increased local search iterations for deeper exploration + x_new = x + np.random.uniform(-1.5, 1.5, size=self.dim) # Expanded the local search neighborhood + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget # Adjusted inertia weight update for stability + + def update_parameters(self, t): + return ( + 2.0 - 1.5 * t / self.budget, + 2.5 - 1.5 * t / self.budget, + ) # Fine-tuned cognitive and social weights + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 2.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined.py b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined.py new file mode 100644 index 000000000..f4ddeb34d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): # Keep the same local search iterations + x_new = x + 0.05 * np.random.randn(self.dim) # Adjusted the local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.6 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 1.6 - t / (2 * self.budget), 2.0 - t / ( + 2 * self.budget + ) # Adjusted cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.6 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v2.py b/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v2.py new file mode 100644 index 000000000..e1ac32349 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v2.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedAdaptiveChaoticFireworksOptimization_v2: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func, i): + p_diversify = 0.1 + 0.4 * np.exp(-5 * i / self.budget) # Adaptive probability for diversification + for i in range(self.n_fireworks): + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def enhance_convergence(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + best_firework = fireworks[best_idx] + for i in range(self.n_fireworks): + if i != best_idx: + fireworks[i] = 0.9 * fireworks[i] + 0.1 * best_firework # Attraction towards the global best + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + fireworks = self.diversify_fireworks(fireworks, func, i) + fireworks = self.enhance_convergence(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v3.py b/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v3.py new file mode 100644 index 000000000..b2b2844ff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveChaoticFireworksOptimization_v3.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdaptiveChaoticFireworksOptimization_v3: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func, i): + p_diversify = 0.1 + 0.4 * np.exp(-5 * i / self.budget) # Adaptive probability for diversification + for i in range(self.n_fireworks): + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def enhance_convergence(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + best_firework = fireworks[best_idx] + for i in range(self.n_fireworks): + if i != best_idx: + fireworks[i] = 0.9 * fireworks[i] + 0.1 * best_firework # Attraction towards the global best + return fireworks + + def adaptive_sparks(self, budget): + return 5 + int(45 * np.exp(-5 * budget / self.budget)) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + n_sparks = self.adaptive_sparks(i) + self.n_sparks = n_sparks + fireworks = self.diversify_fireworks(fireworks, func, i) + fireworks = self.enhance_convergence(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveCohortMemeticAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveCohortMemeticAlgorithm.py new file mode 100644 index 000000000..9843d910e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveCohortMemeticAlgorithm.py @@ -0,0 +1,140 @@ +import numpy as np + + +class EnhancedAdaptiveCohortMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.elite_ratio = 0.1 + self.local_search_chance = 0.2 + self.crossover_probability = 0.9 + self.mutation_factor = 0.8 + self.global_mutation_factor = 0.5 + self.diversity_threshold = 0.2 + self.reinitialization_rate = 0.1 + self.diversity_cycle = 50 + self.local_search_intensity = 5 + self.global_search_intensity = 10 + + # New parameters + self.local_search_radius = 0.1 + self.global_search_radius = 0.5 + self.reduction_factor = 0.98 # To reduce the mutation factor over time + self.mutation_scale = 0.1 # To scale the random mutations + self.adaptive_crossover_rate = 0.5 # To adjust crossover probability based on diversity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + diversity_counter = 0 + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.crossover_probability + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + diversity_counter += 1 + if diversity_counter % self.diversity_cycle == 0: + self.adaptive_population_control(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.local_search_intensity): + step_size = np.random.normal(0, self.local_search_radius, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_control(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) + self.crossover_probability = self.crossover_probability * (1 + 0.1 * remaining_budget_ratio) + self.mutation_factor = self.mutation_factor * (1 + 0.1 * remaining_budget_ratio) + + # New adaptation strategies + self.crossover_probability *= self.adaptive_crossover_rate + self.mutation_factor *= self.reduction_factor + self.global_mutation_factor *= self.reduction_factor + self.local_search_radius *= self.reduction_factor + + if diversity < self.diversity_threshold / 2 and remaining_budget_ratio > 0.5: + self.global_search_reset(population, func) + + def global_search_reset(self, population, func): + global_search_population = np.random.uniform( + self.lb, self.ub, (self.global_search_intensity, self.dim) + ) + + for ind in global_search_population: + f_ind = func(ind) + if f_ind < self.f_opt: + self.f_opt = f_ind + self.x_opt = ind + + population[: self.global_search_intensity] = global_search_population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveControlledMemoryAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveControlledMemoryAnnealing.py new file mode 100644 index 000000000..031b0dfdd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveControlledMemoryAnnealing.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedAdaptiveControlledMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.95 # Cooling rate, more aggressive cooling + beta = 2.0 # Higher control parameter for better acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Larger memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + # Dynamically adjust control parameter beta + if evaluations < self.budget / 3: + beta = 1.5 # Initial phase: higher exploration + elif evaluations < 2 * self.budget / 3: + beta = 1.0 # Middle phase: balanced exploration and exploitation + else: + beta = 2.5 # Final phase: higher acceptance for local search refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4.py new file mode 100644 index 000000000..b12572a0a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4.py @@ -0,0 +1,122 @@ +import numpy as np + + +class EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 120 # Increased population size + self.sigma = 0.2 + self.c1 = 0.05 + self.cmu = 0.03 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.015 + self.elitism_rate = 0.1 # Reduced elitism rate to allow more exploration + self.eval_count = 0 + self.F = 0.8 # Tuned differential weight + self.CR = 0.9 # Tuned crossover probability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixEvolution.py new file mode 100644 index 000000000..793415f66 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveCovarianceMatrixEvolution.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveCovarianceMatrixEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Adjust population size for more efficient exploration + self.sigma = 0.5 # Initial step size + self.c1 = 0.02 # Learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) # Damping factor for step size + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) # Number of parents for recombination + self.adaptive_learning_rate = 0.15 # Learning rate for adaptive self-adaptive mutation + self.eval_count = 0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def dynamic_crossover(parent1, parent2): + alpha = np.random.uniform(0, 1, self.dim) + return alpha * parent1 + (1 - alpha) * parent2 + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + for i in range(self.population_size // 2): + parent1, parent2 = offspring[i], offspring[self.population_size // 2 + i] + offspring[i] = dynamic_crossover(parent1, parent2) + + new_fitness = np.array([func(ind) for ind in offspring]) + self.eval_count += self.population_size + + population = offspring + fitness = new_fitness + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveCovarianceMatrixEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDEPSOOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveDEPSOOptimizer.py new file mode 100644 index 000000000..5961d2501 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDEPSOOptimizer.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveDEPSOOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.crossover_probability: + mutant = self.differential_mutation(population, i) + trial = self.differential_crossover(population[i], mutant) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: self.population_size // 4] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def differential_mutation(self, population, current_idx): + indices = [idx for idx in range(self.population_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def differential_crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + trial = np.where(crossover_mask, mutant, target) + return trial + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiffEvolutionGradientDescent.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiffEvolutionGradientDescent.py new file mode 100644 index 000000000..b449e3bb3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiffEvolutionGradientDescent.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveDiffEvolutionGradientDescent: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.9 - 0.8 * (iteration / max_iterations) + self.crossover_rate = 0.9 - 0.5 * (iteration / max_iterations) + self.learning_rate = 0.02 * np.exp(-iteration / (0.5 * max_iterations)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Perform local search only on the global best position + global_best_position, global_best_score = self.local_search( + func, global_best_position, global_best_score + ) + evaluations += 1 + if evaluations >= self.budget: + break + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..6714011a9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolution: + def __init__(self, budget, dimension=5, population_size=50, F=0.8, CR=0.9, adaptive=True): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F + self.CR = CR + self.adaptive = adaptive + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self): + self.F = np.clip(np.random.normal(self.F, 0.1), 0.5, 1.0) + self.CR = np.clip(np.random.normal(self.CR, 0.1), 0.8, 1.0) + + def __call__(self, func): + population = self.initialize_population() + self.fitness = np.array([func(ind) for ind in population]) + evaluations = len(population) + + while evaluations < self.budget: + if self.adaptive: + self.adjust_parameters() + + for i in range(self.pop_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + population[i], self.fitness[i] = self.select(population[i], trial, func) + evaluations += 1 + if evaluations >= self.budget: + break + + f_opt = np.min(self.fitness) + x_opt = population[np.argmin(self.fitness)] + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamic.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamic.py new file mode 100644 index 000000000..25973f8a6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamic.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionDynamic: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamicImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamicImproved.py new file mode 100644 index 000000000..f97b7047d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionDynamicImproved.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionDynamicImproved: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionEnhanced.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionEnhanced.py new file mode 100644 index 000000000..fb17534d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionEnhanced.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionEnhanced: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.4, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.7, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefined.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefined.py new file mode 100644 index 000000000..0ea0c6a9c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefined.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionRefined: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedImproved.py new file mode 100644 index 000000000..c53e5b43b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedImproved.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionRefinedImproved: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.4, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.7, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV2.py new file mode 100644 index 000000000..bf07c4f7d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV2.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionRefinedV2: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.4, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.7, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV3.py new file mode 100644 index 000000000..922bb1ce6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV3.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionRefinedV3: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.4, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.7, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV4.py new file mode 100644 index 000000000..905929f1c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionRefinedV4.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionRefinedV4: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.4, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.7, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV10.py new file mode 100644 index 000000000..268f20098 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV10.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV10: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV11.py new file mode 100644 index 000000000..c5915274e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV11.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV11: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + self.mutation_factor = self.adapt_mutation_factor(self.mutation_factor, fitness_values) + self.crossover_rate = self.adapt_crossover_rate(self.crossover_rate, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_mutation_factor(self, mutation_factor, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (mutation_factor[0] + np.random.rand() * 0.1, mutation_factor[1] + np.random.rand() * 0.1) + else: + return (mutation_factor[0] - np.random.rand() * 0.1, mutation_factor[1] - np.random.rand() * 0.1) + + def adapt_crossover_rate(self, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (crossover_rate[0] + np.random.rand() * 0.1, crossover_rate[1] + np.random.rand() * 0.1) + else: + return (crossover_rate[0] - np.random.rand() * 0.1, crossover_rate[1] - np.random.rand() * 0.1) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV12.py new file mode 100644 index 000000000..3160d5fb1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV12.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV12: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + if np.random.rand() < self.scaling_factor: + self.mutation_factor = self.adapt_mutation_factor(self.mutation_factor, fitness_values) + self.crossover_rate = self.adapt_crossover_rate(self.crossover_rate, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_mutation_factor(self, mutation_factor, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (mutation_factor[0] + np.random.rand() * 0.1, mutation_factor[1] + np.random.rand() * 0.1) + else: + return (mutation_factor[0] - np.random.rand() * 0.1, mutation_factor[1] - np.random.rand() * 0.1) + + def adapt_crossover_rate(self, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (crossover_rate[0] + np.random.rand() * 0.1, crossover_rate[1] + np.random.rand() * 0.1) + else: + return (crossover_rate[0] - np.random.rand() * 0.1, crossover_rate[1] - np.random.rand() * 0.1) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV13.py new file mode 100644 index 000000000..768a8ccd9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV13.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV13: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + if np.random.rand() < self.scaling_factor: + self.mutation_factor = self.adapt_mutation_factor(self.mutation_factor, fitness_values) + self.crossover_rate = self.adapt_crossover_rate(self.crossover_rate, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_mutation_factor(self, mutation_factor, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return ( + mutation_factor[0] + np.random.rand() * 0.05, + mutation_factor[1] + np.random.rand() * 0.05, + ) + else: + return ( + mutation_factor[0] - np.random.rand() * 0.05, + mutation_factor[1] - np.random.rand() * 0.05, + ) + + def adapt_crossover_rate(self, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (crossover_rate[0] + np.random.rand() * 0.05, crossover_rate[1] + np.random.rand() * 0.05) + else: + return (crossover_rate[0] - np.random.rand() * 0.05, crossover_rate[1] - np.random.rand() * 0.05) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV14.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV14.py new file mode 100644 index 000000000..7dcfe1f9f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV14.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV14: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + if np.random.rand() < self.scaling_factor: + self.mutation_factor = self.adapt_mutation_factor(self.mutation_factor, fitness_values) + self.crossover_rate = self.adapt_crossover_rate(self.crossover_rate, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_mutation_factor(self, mutation_factor, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return ( + mutation_factor[0] + np.random.rand() * 0.02, + mutation_factor[1] + np.random.rand() * 0.02, + ) + else: + return ( + mutation_factor[0] - np.random.rand() * 0.02, + mutation_factor[1] - np.random.rand() * 0.02, + ) + + def adapt_crossover_rate(self, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (crossover_rate[0] + np.random.rand() * 0.02, crossover_rate[1] + np.random.rand() * 0.02) + else: + return (crossover_rate[0] - np.random.rand() * 0.02, crossover_rate[1] - np.random.rand() * 0.02) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV15.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV15.py new file mode 100644 index 000000000..091389a81 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV15.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV15: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.normal(np.mean(self.mutation_factor), 0.1) + mutation_factor = np.clip(mutation_factor, self.mutation_factor[0], self.mutation_factor[1]) + + crossover_rate = np.random.normal(np.mean(self.crossover_rate), 0.1) + crossover_rate = np.clip(crossover_rate, self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + if np.random.rand() < self.scaling_factor: + self.mutation_factor = self.adapt_mutation_factor(self.mutation_factor, fitness_values) + self.crossover_rate = self.adapt_crossover_rate(self.crossover_rate, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_mutation_factor(self, mutation_factor, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return ( + mutation_factor[0] + np.random.rand() * 0.02, + mutation_factor[1] + np.random.rand() * 0.02, + ) + else: + return ( + mutation_factor[0] - np.random.rand() * 0.02, + mutation_factor[1] - np.random.rand() * 0.02, + ) + + def adapt_crossover_rate(self, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + if mean_fitness < np.min(fitness_values): + return (crossover_rate[0] + np.random.rand() * 0.02, crossover_rate[1] + np.random.rand() * 0.02) + else: + return (crossover_rate[0] - np.random.rand() * 0.02, crossover_rate[1] - np.random.rand() * 0.02) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV16.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV16.py new file mode 100644 index 000000000..297ea85a8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV16.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV16: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.normal(np.mean(self.mutation_factor), 0.1) + mutation_factor = np.clip(mutation_factor, self.mutation_factor[0], self.mutation_factor[1]) + + crossover_rate = np.random.normal(np.mean(self.crossover_rate), 0.1) + crossover_rate = np.clip(crossover_rate, self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Adaptive control of mutation factor and crossover rate + self.mutation_factor, self.crossover_rate = self.adapt_parameters( + self.mutation_factor, self.crossover_rate, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_parameters(self, mutation_factor, crossover_rate, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + if mean_fitness < np.min(fitness_values): + mutation_factor = np.clip( + np.array(mutation_factor) + np.random.normal(0, std_fitness * 0.1, 2), 0.1, 2.0 + ) + crossover_rate = np.clip( + np.array(crossover_rate) + np.random.normal(0, std_fitness * 0.1, 2), 0.1, 1.0 + ) + else: + mutation_factor = np.clip( + np.array(mutation_factor) - np.random.normal(0, std_fitness * 0.1, 2), 0.1, 2.0 + ) + crossover_rate = np.clip( + np.array(crossover_rate) - np.random.normal(0, std_fitness * 0.1, 2), 0.1, 1.0 + ) + + return mutation_factor, crossover_rate diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV17.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV17.py new file mode 100644 index 000000000..986c64521 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV17.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV17: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = self.adapt_parameter(self.mutation_factor) + crossover_rate = self.adapt_parameter(self.crossover_rate) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_parameter(self, parameter_range): + return np.random.uniform(parameter_range[0], parameter_range[1]) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV18.py new file mode 100644 index 000000000..57a1db10c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV18.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV18: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = self.adapt_parameter(self.mutation_factor) + crossover_rate = self.adapt_parameter(self.crossover_rate) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_parameter(self, parameter_range): + return np.clip( + np.random.normal(np.mean(parameter_range), np.std(parameter_range)), + parameter_range[0], + parameter_range[1], + ) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV19.py new file mode 100644 index 000000000..089a44062 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV19.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV19: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + mutation_strength = np.mean(np.abs(scaling_factors - np.mean(scaling_factors))) + + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + crossover_rate_range = np.array(self.crossover_rate_range) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV20.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV20.py new file mode 100644 index 000000000..3a6c63961 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV20.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV20: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + mutation_strength = np.mean(np.abs(scaling_factors - np.mean(scaling_factors))) + + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV21.py new file mode 100644 index 000000000..4b5209a38 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV21.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV21: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + mutation_strength = np.mean(np.abs(scaling_factors - np.mean(scaling_factors))) + + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV22.py new file mode 100644 index 000000000..c04d68d70 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV22.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV22: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + mutation_strength = np.mean(np.abs(scaling_factors - np.mean(scaling_factors))) + + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.02 * mutation_strength), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.02 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV23.py new file mode 100644 index 000000000..010c8698d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV23.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV23: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.02 * np.mean(scaling_factors)), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.02 * np.mean(scaling_factors)), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV24.py new file mode 100644 index 000000000..e78a4ff0d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV24.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV24: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.02 * np.mean(scaling_factors)), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.02 * np.mean(scaling_factors)), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV25.py new file mode 100644 index 000000000..8c2123936 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV25.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV25: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.15 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.03 * np.mean(scaling_factors)), 0.1, 1.0 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.03 * np.mean(scaling_factors)), 0.1, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV26.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV26.py new file mode 100644 index 000000000..bbd86b065 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV26.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV26: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.2, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.5, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV27.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV27.py new file mode 100644 index 000000000..d152a0f0b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV27.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV27: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.4, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + scaling_factor_adaptation = np.zeros(self.population_size) + crossover_rate_adaptation = np.zeros(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + scaling_factor_adaptation[i] + crossover_rate = crossover_rates[i] + crossover_rate_adaptation[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factor_adaptation, crossover_rate_adaptation = self.update_adaptations( + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ) + scaling_factors, crossover_rates = self.update_parameters(scaling_factors, crossover_rates) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_adaptations( + self, + scaling_factors, + crossover_rates, + fitness_values, + scaling_factor_adaptation, + crossover_rate_adaptation, + ): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + return scaling_factor_adaptation, crossover_rate_adaptation + + def update_parameters(self, scaling_factors, crossover_rates): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.2, 0.8 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.01 * np.mean(scaling_factors)), 0.5, 0.9 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV28.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV28.py new file mode 100644 index 000000000..fbc78a23f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV28.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV28: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV4.py new file mode 100644 index 000000000..f630e756b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV4.py @@ -0,0 +1,50 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV4: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, p_best=0.2): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + self.mutation_factor * (b - c) + self.mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + current_fitness = func(population[i]) + + if trial_fitness < current_fitness: + population[i] = trial_individual + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV5.py new file mode 100644 index 000000000..286feb3ad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV5.py @@ -0,0 +1,50 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV5: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, p_best=0.2): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + self.mutation_factor * (b - c) + self.mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + current_fitness = func(population[i]) + + if trial_fitness < current_fitness: + population[i] = trial_individual + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV6.py new file mode 100644 index 000000000..d33e121f8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV6.py @@ -0,0 +1,50 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV6: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, p_best=0.2): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + self.mutation_factor * (b - c) + self.mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + current_fitness = func(population[i]) + + if trial_fitness < current_fitness: + population[i] = trial_individual + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV7.py new file mode 100644 index 000000000..48d5a04cc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV7.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV7: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + current_fitness = func(population[i]) + + if trial_fitness < current_fitness: + population[i] = trial_individual + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV8.py new file mode 100644 index 000000000..a22404fee --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV8.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV8: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + current_fitness = func(population[i]) + + if trial_fitness <= current_fitness: # Include equality to explore more + population[i] = trial_individual + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV9.py new file mode 100644 index 000000000..16ec6f5de --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionV9.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionV9: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = np.random.uniform(self.mutation_factor[0], self.mutation_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness < fitness_values[i]: # Improvement in fitness + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch.py new file mode 100644 index 000000000..1b8cb2666 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch.py @@ -0,0 +1,123 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import Matern + + +class EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, gp, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + else: + # Use the GP model for local search guidance + new_f = gp.predict([new_x])[0] + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def surrogate_model(self, X, y): + kernel = Matern(nu=2.5) + gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gp.fit(X, y) + return gp + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply Bayesian guided local search + if np.random.rand() < 0.5: + gp = self.surrogate_model(population, fitness) + local_best_x, local_best_f = self.local_search( + population[i], func, gp, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Reinitialize worst individuals more frequently + if evaluations + int(0.20 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.20 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Elite Preservation with larger perturbations + elite_size = int(0.2 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.05, 0.05, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation.py new file mode 100644 index 000000000..0c947dab1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation.py @@ -0,0 +1,58 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation: + def __init__(self, budget=1000, population_size=50, cr_range=(0.1, 0.9), f_range=(0.1, 0.9)): + self.budget = budget + self.population_size = population_size + self.cr_range = cr_range + self.f_range = f_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + cr = np.random.uniform(*self.cr_range, size=self.population_size) + f = np.random.uniform(*self.f_range, size=self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + trial_individual = self.generate_trial_individual(population[i], a, b, c, f[i], cr[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + cr, f = self.adapt_parameters(cr, f, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, f, cr): + dimension = len(current) + mutant = np.clip(a + f * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < cr + return np.where(crossover_points, mutant, current) + + def adapt_parameters(self, cr, f, fitness_values): + mean_fitness = np.mean(fitness_values) + cr = cr + 0.1 * np.mean(cr * (1 + 0.1 * (mean_fitness - fitness_values))) + f = f + 0.1 * np.mean(f * (1 + 0.1 * (mean_fitness - fitness_values))) + return np.clip(cr, *self.cr_range), np.clip(f, *self.f_range) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved.py new file mode 100644 index 000000000..914f8bcbc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved.py @@ -0,0 +1,64 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved: + def __init__(self, budget=1000, population_size=50, cr_range=(0.1, 0.9), f_range=(0.1, 0.9)): + self.budget = budget + self.population_size = population_size + self.cr_range = cr_range + self.f_range = f_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + cr = np.random.uniform(*self.cr_range, size=self.population_size) + f = np.random.uniform(*self.f_range, size=self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + trial_individual = self.generate_trial_individual(population[i], a, b, c, f[i], cr[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + cr, f = self.adapt_parameters(cr, f, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, f, cr): + dimension = len(current) + mutant = np.clip(a + f * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < cr + return np.where(crossover_points, mutant, current) + + def adapt_parameters(self, cr, f, fitness_values): + mean_fitness = np.mean(fitness_values) + cr = cr + 0.1 * np.mean(cr * (1 + 0.1 * (mean_fitness - fitness_values))) + f = f + 0.1 * np.mean(f * (1 + 0.1 * (mean_fitness - fitness_values))) + + # Adjust the adaptivity rates based on the best fitness value + if self.f_opt < mean_fitness: + cr = np.maximum(cr, 0.5) + f = np.maximum(f, 0.5) + + return np.clip(cr, *self.cr_range), np.clip(f, *self.f_range) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters.py new file mode 100644 index 000000000..57f7cf0a1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor = scaling_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = np.random.uniform(self.scaling_factor[0], self.scaling_factor[1]) + crossover_rate = np.random.uniform(self.crossover_rate[0], self.crossover_rate[1]) + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2.py new file mode 100644 index 000000000..446533899 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2: + def __init__(self, budget=1000, population_size=50): + self.budget = budget + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.ones(self.population_size) + crossover_rates = np.ones(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = self.update_parameter(scaling_factors, fitness_values, i) + crossover_rate = self.update_parameter(crossover_rates, fitness_values, i) + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors = self.update_parameters(scaling_factors, fitness_values) + crossover_rates = self.update_parameters(crossover_rates, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameter(self, parameter_values, fitness_values, idx): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameter = parameter_values[idx] * np.exp( + 0.1 * (mean_fitness - fitness_values[idx]) / (std_fitness + 1e-6) + ) + return np.clip(new_parameter, 0.1, 1.0) + + def update_parameters(self, parameters, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameters = parameters * np.exp(0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6)) + return np.clip(new_parameters, 0.1, 1.0) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3.py new file mode 100644 index 000000000..118121ef6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3: + def __init__(self, budget=1000, population_size=50): + self.budget = budget + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.ones(self.population_size) + crossover_rates = np.ones(self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = self.update_parameter(scaling_factors, fitness_values, i) + crossover_rate = self.update_parameter(crossover_rates, fitness_values, i) + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors = self.update_parameters(scaling_factors, fitness_values) + crossover_rates = self.update_parameters(crossover_rates, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameter(self, parameter_values, fitness_values, idx): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameter = parameter_values[idx] * np.exp( + 0.1 * (mean_fitness - fitness_values[idx]) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values[idx]) + ) + return np.clip(new_parameter, 0.1, 1.0) + + def update_parameters(self, parameters, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameters = parameters * np.exp( + 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values) + ) + return np.clip(new_parameters, 0.1, 1.0) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4.py new file mode 100644 index 000000000..b4c4e84d6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform(*self.scaling_factor_range, size=self.population_size) + crossover_rates = np.random.uniform(*self.crossover_rate_range, size=self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = self.update_parameter(scaling_factors, fitness_values, i) + crossover_rate = self.update_parameter(crossover_rates, fitness_values, i) + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors = self.update_parameters(scaling_factors, fitness_values) + crossover_rates = self.update_parameters(crossover_rates, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameter(self, parameter_values, fitness_values, idx): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameter = parameter_values[idx] * np.exp( + 0.1 * (mean_fitness - fitness_values[idx]) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values[idx]) + ) + return np.clip(new_parameter, *self.scaling_factor_range) + + def update_parameters(self, parameters, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameters = parameters * np.exp( + 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values) + ) + return np.clip(new_parameters, *self.crossover_rate_range) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5.py new file mode 100644 index 000000000..304be7c45 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors = self.update_parameters(scaling_factors, fitness_values) + crossover_rates = self.update_parameters(crossover_rates, fitness_values) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, parameters, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_parameters = parameters * np.exp( + 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values) + ) + return np.clip(new_parameters, *self.scaling_factor_range) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation.py new file mode 100644 index 000000000..aad531855 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation: + def __init__( + self, + budget=1000, + init_population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.init_population_size = init_population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population_size = self.init_population_size + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=population_size + ) + + for _ in range(self.budget): + for i in range(population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + if np.random.rand() < 0.1: + if population_size > 2: + population_size -= 1 + idx = np.argmin(fitness_values) + population = np.delete(population, idx, axis=0) + fitness_values = np.delete(fitness_values, idx) + scaling_factors = np.delete(scaling_factors, idx) + crossover_rates = np.delete(crossover_rates, idx) + elif population_size < self.init_population_size: + population_size += 1 + new_individual = np.random.uniform(func.bounds.lb, func.bounds.ub, size=dimension) + population = np.vstack([population, new_individual]) + fitness_values = np.append(fitness_values, func(new_individual)) + scaling_factors = np.append( + scaling_factors, np.random.uniform(*self.scaling_factor_range) + ) + crossover_rates = np.append( + crossover_rates, np.random.uniform(*self.crossover_rate_range) + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined.py new file mode 100644 index 000000000..f483b51e7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined: + def __init__( + self, + budget=1000, + init_population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.init_population_size = init_population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population_size = self.init_population_size + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=population_size + ) + + for _ in range(self.budget): + for i in range(population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + if np.random.rand() < 0.1: + if population_size > 2: + population_size -= 1 + idx = np.argmin(fitness_values) + population = np.delete(population, idx, axis=0) + fitness_values = np.delete(fitness_values, idx) + scaling_factors = np.delete(scaling_factors, idx) + crossover_rates = np.delete(crossover_rates, idx) + elif population_size < self.init_population_size: + population_size += 1 + new_individual = np.random.uniform(func.bounds.lb, func.bounds.ub, size=dimension) + population = np.vstack([population, new_individual]) + fitness_values = np.append(fitness_values, func(new_individual)) + scaling_factors = np.append( + scaling_factors, np.random.uniform(*self.scaling_factor_range) + ) + crossover_rates = np.append( + crossover_rates, np.random.uniform(*self.crossover_rate_range) + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2.py new file mode 100644 index 000000000..1e4df3d3d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2: + def __init__( + self, + budget=1000, + init_population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.init_population_size = init_population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population_size = self.init_population_size + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=population_size + ) + + for _ in range(self.budget): + for i in range(population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + if np.random.rand() < 0.1: + if population_size > 2: + population_size -= 1 + idx = np.argmin(fitness_values) + population = np.delete(population, idx, axis=0) + fitness_values = np.delete(fitness_values, idx) + scaling_factors = np.delete(scaling_factors, idx) + crossover_rates = np.delete(crossover_rates, idx) + elif population_size < self.init_population_size: + population_size += 1 + new_individual = np.random.uniform(func.bounds.lb, func.bounds.ub, size=dimension) + population = np.vstack([population, new_individual]) + fitness_values = np.append(fitness_values, func(new_individual)) + scaling_factors = np.append( + scaling_factors, np.random.uniform(*self.scaling_factor_range) + ) + crossover_rates = np.append( + crossover_rates, np.random.uniform(*self.crossover_rate_range) + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize.py new file mode 100644 index 000000000..fae3f3839 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize.py @@ -0,0 +1,132 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step_range=(0.01, 0.1), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step_range = dynamic_step_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + dynamic_steps = np.full(self.population_size, np.mean(self.dynamic_step_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + dynamic_step = dynamic_steps[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates, dynamic_steps = self.update_parameters( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates, dynamic_steps = self.dynamic_adjustment( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + 0.1 * np.mean(fitness_values)), 0.01, 0.1 + ) + + return ( + np.clip(scaling_factors, *scaling_factor_range), + np.clip(crossover_rates, *crossover_rate_range), + np.clip(dynamic_steps, *dynamic_step_range), + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.6, + 1.0, + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.01, + 0.1, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + dynamic_steps = np.clip(dynamic_steps, *dynamic_step_range) + + return scaling_factors, crossover_rates, dynamic_steps diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined.py new file mode 100644 index 000000000..654965572 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined.py @@ -0,0 +1,132 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step_range=(0.01, 0.1), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step_range = dynamic_step_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + dynamic_steps = np.full(self.population_size, np.mean(self.dynamic_step_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + dynamic_step = dynamic_steps[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates, dynamic_steps = self.update_parameters( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates, dynamic_steps = self.dynamic_adjustment( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + 0.1 * np.mean(fitness_values)), 0.01, 0.1 + ) + + return ( + np.clip(scaling_factors, *scaling_factor_range), + np.clip(crossover_rates, *crossover_rate_range), + np.clip(dynamic_steps, *dynamic_step_range), + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.6, + 1.0, + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.01, + 0.1, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + dynamic_steps = np.clip(dynamic_steps, *dynamic_step_range) + + return scaling_factors, crossover_rates, dynamic_steps diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters.py new file mode 100644 index 000000000..4e8e24905 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters: + def __init__(self, budget=1000, population_size=50, p_best=0.2): + self.budget = budget + self.population_size = population_size + self.p_best = p_best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + scaling_factor_range = (0.5, 2.0) + crossover_rate_range = (0.1, 1.0) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + scaling_factor_range[0], scaling_factor_range[1], size=self.population_size + ) + crossover_rates = np.random.uniform( + crossover_rate_range[0], crossover_rate_range[1], size=self.population_size + ) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + # Update scaling factors and crossover rates with self-adaptation + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + # Adapt scaling factors and crossover rates based on individuals' performance + scaling_factors *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + crossover_rates *= np.exp( + 0.1 * (np.mean(fitness_values) - fitness_values) / (np.std(fitness_values) + 1e-6) + ) + + # Clip values to predefined ranges + scaling_factors = np.clip(scaling_factors, 0.5, 2.0) + crossover_rates = np.clip(crossover_rates, 0.1, 1.0) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialMemeticAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialMemeticAlgorithm.py new file mode 100644 index 000000000..c4a18eab0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDifferentialMemeticAlgorithm.py @@ -0,0 +1,126 @@ +import numpy as np + + +class EnhancedAdaptiveDifferentialMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 100 + self.F_min = 0.5 + self.F_max = 0.9 + self.CR_min = 0.3 + self.CR_max = 0.8 + self.local_search_chance = 0.2 + self.elite_ratio = 0.1 + self.diversity_threshold = 0.05 + self.cauchy_step_scale = 0.001 + self.gaussian_step_scale = 0.001 + self.reinitialization_rate = 0.1 + self.hyper_heuristic_probability = 0.7 + + # Adaptive parameters + self.F = self.F_min + self.CR = self.CR_min + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + self.adaptive_parameters_adjustment(evaluations) + + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(5): + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_parameters_adjustment(self, evaluations): + progress_ratio = evaluations / self.budget + self.F = self.F_min + (self.F_max - self.F_min) * progress_ratio + self.CR = self.CR_min + (self.CR_max - self.CR_min) * progress_ratio + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDirectionalBiasQuorumOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveDirectionalBiasQuorumOptimization.py new file mode 100644 index 000000000..0e2cd352f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDirectionalBiasQuorumOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDirectionalBiasQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_scale=0.2, + momentum=0.8, + learning_rate=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(max(1, population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.learning_rate = learning_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Initialize best solution tracking + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + # Enhanced optimization loop + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select elite indices including the best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Determine the local best + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Mutation and update strategy + direction = best_individual - local_best + random_noise = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = direction * random_noise + self.momentum * velocity + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution and velocity + if child_fitness < best_fitness: + velocity = self.learning_rate * (child - best_individual) + self.momentum * velocity + best_fitness = child_fitness + best_individual = child + + new_population[i, :] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adapt mutation scale and elite count dynamically based on performance + adaptive_ratio = np.random.uniform(-0.1, 0.1) + self.mutation_scale *= 1 + self.learning_rate * adaptive_ratio + self.elite_count = int(max(1, self.elite_count * (1 + self.learning_rate * adaptive_ratio))) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedEvolutionStrategy.py new file mode 100644 index 000000000..d0a4b3623 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedEvolutionStrategy.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedEvolutionStrategy: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=30): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.mutation_scale_base = 0.1 # Base scale for mutation + self.crossover_probability = 0.7 # Probability of crossover + + def mutate(self, individual): + """Apply Gaussian mutation with adaptive scaling.""" + scale = self.mutation_scale_base * np.random.rand() + mutation = np.random.normal(0, scale, self.dimension) + mutant = individual + mutation + return np.clip(mutant, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent1, parent2): + """Uniform crossover between two parents.""" + mask = np.random.rand(self.dimension) < 0.5 + offspring = np.where(mask, parent1, parent2) + return offspring + + def select(self, population, fitness, offspring, offspring_fitness): + """Tournament selection to decide the next generation.""" + better_mask = offspring_fitness < fitness + population[better_mask] = offspring[better_mask] + fitness[better_mask] = offspring_fitness[better_mask] + return population, fitness + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + f_opt = np.min(fitness) + x_opt = population[np.argmin(fitness)] + + # Evolutionary loop + iterations = self.budget // self.population_size + for _ in range(iterations): + offspring = [] + offspring_fitness = [] + + # Generate offspring + for idx in range(self.population_size): + # Mutation + mutant = self.mutate(population[idx]) + + # Crossover + if np.random.rand() < self.crossover_probability: + partner_idx = np.random.randint(self.population_size) + child = self.crossover(mutant, population[partner_idx]) + else: + child = mutant + + # Evaluate + child_fitness = func(child) + offspring.append(child) + offspring_fitness.append(child_fitness) + + # Selection + offspring = np.array(offspring) + offspring_fitness = np.array(offspring_fitness) + population, fitness = self.select(population, fitness, offspring, offspring_fitness) + + # Update best found solution + min_idx = np.argmin(fitness) + if fitness[min_idx] < f_opt: + f_opt = fitness[min_idx] + x_opt = population[min_idx] + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization.py new file mode 100644 index 000000000..9006e7622 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(500): # Increased the number of optimization runs to 500 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Keep the number of iterations within each optimization run as 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2.py new file mode 100644 index 000000000..9639c7630 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(1000): # Increase the number of optimization runs to 1000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Keep the number of iterations within each optimization run as 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3.py new file mode 100644 index 000000000..b2931a80f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(2000): # Increase the number of optimization runs to 2000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Keep the number of iterations within each optimization run as 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4.py new file mode 100644 index 000000000..2158f9441 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4: + def __init__( + self, + budget=5000, + G0=200.0, + alpha=0.3, + delta=0.1, + gamma=0.3, + population_size=250, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(3000): # Increase the number of optimization runs to 3000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Keep the number of iterations within each optimization run as 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearch.py new file mode 100644 index 000000000..a2304ce11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearch.py @@ -0,0 +1,93 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveDiversifiedHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizer.py new file mode 100644 index 000000000..92be30c4a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizer.py @@ -0,0 +1,115 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedHarmonySearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 # Initial diversification rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return min(1.5, bandwidth * 1.1) + else: + return max(0.5, bandwidth * 0.9) + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.01, self.exploration_rate * 0.95) + else: + return min(0.3, self.exploration_rate * 1.05) + + def adapt_diversification_rate(self, aocc): + if aocc < 0.1: + return 0.3 + elif aocc < 0.5: + return 0.15 + else: + return 0.05 + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + self.diversification_rate = self.adapt_diversification_rate(best_fitness) + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + if abs(best_fitness - prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2.py new file mode 100644 index 000000000..e956774f2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 # Initial diversification rate + self.prev_best_fitness = np.Inf + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.bandwidth * 1.1 + else: + return self.bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.exploration_rate * 0.95 + else: + return self.exploration_rate * 1.05 + + def adapt_diversification_rate(self, aocc): + if aocc < 0.1: + return 0.3 + elif aocc < 0.5: + return 0.15 + else: + return 0.05 + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + self.prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + self.diversification_rate = self.adapt_diversification_rate(best_fitness) + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness) + self.prev_best_fitness = best_fitness + + if abs(best_fitness - self.prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3.py new file mode 100644 index 000000000..688d7c927 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 # Initial diversification rate + self.prev_best_fitness = np.Inf + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.bandwidth * 1.1 + else: + return self.bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.02) # Adjusted memory update rate + + def adaptive_exploration_rate(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.exploration_rate * 0.95 + else: + return self.exploration_rate * 1.03 # Adjusted exploration rate + + def adapt_diversification_rate(self, aocc): + if aocc < 0.1: + return 0.15 # Adjusted diversification rate for better exploration + elif aocc < 0.5: + return 0.1 + else: + return 0.05 + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + self.prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + self.diversification_rate = self.adapt_diversification_rate(best_fitness) + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness) + self.prev_best_fitness = best_fitness + + if abs(best_fitness - self.prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4.py new file mode 100644 index 000000000..b0a8cd8b3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 # Initial diversification rate + self.prev_best_fitness = np.Inf + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.bandwidth * 1.1 + else: + return self.bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.02) # Adjusted memory update rate + + def adaptive_exploration_rate(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.exploration_rate * 0.95 + else: + return self.exploration_rate * 1.05 # Increase exploration rate + + def adapt_diversification_rate(self, aocc): + if aocc < 0.1: + return 0.15 # Adjusted diversification rate for better exploration + elif aocc < 0.5: + return 0.1 + else: + return 0.05 + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + self.prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + self.diversification_rate = self.adapt_diversification_rate(best_fitness) + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness) + self.prev_best_fitness = best_fitness + + if abs(best_fitness - self.prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5.py new file mode 100644 index 000000000..0d0cfd2c5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 + self.prev_best_fitness = np.Inf + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.bandwidth * 1.1 + else: + return self.bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.02) + + def adaptive_exploration_rate(self, best_fitness): + if best_fitness < self.prev_best_fitness: + return self.exploration_rate * 0.95 + else: + return self.exploration_rate * 1.1 # Increased exploration rate + + def adapt_diversification_rate(self, aocc): + if aocc < 0.1: + return 0.15 + elif aocc < 0.5: + return 0.1 + else: + return 0.05 + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < self.diversification_rate: + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + self.prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + self.diversification_rate = self.adapt_diversification_rate(best_fitness) + population = self.diversify_population(population) + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness) + self.prev_best_fitness = best_fitness + + if abs(best_fitness - self.prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV2.py new file mode 100644 index 000000000..d44e0ee43 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV2.py @@ -0,0 +1,93 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveDiversifiedHarmonySearchV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.98): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV3.py new file mode 100644 index 000000000..95cecd4fe --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV3.py @@ -0,0 +1,93 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveDiversifiedHarmonySearchV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.99): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV4.py new file mode 100644 index 000000000..7c44318e4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedHarmonySearchV4.py @@ -0,0 +1,93 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveDiversifiedHarmonySearchV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.98): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds, max_iter=10): + current_solution = solution.copy() + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm.py new file mode 100644 index 000000000..703ed9206 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm: + def __init__( + self, budget=10000, population_size=50, num_iterations=100, step_size=0.1, diversity_rate=0.2 + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + beta = 1.5 + sigma1 = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2.py new file mode 100644 index 000000000..2054450d4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.3, + decay_rate=0.9, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.decay_rate = decay_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + beta = 1.5 + sigma1 = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + self.step_size *= self.decay_rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedSearch.py new file mode 100644 index 000000000..8b871d411 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDiversifiedSearch.py @@ -0,0 +1,64 @@ +import numpy as np + + +class EnhancedAdaptiveDiversifiedSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialize solution and function value tracking + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Main optimization loop + num_iterations = int(self.budget / population_size) + for iteration in range(num_iterations): + for i in range(population_size): + # Mutation strategy: Adaptive perturbation with occasional large jumps + if np.random.rand() < 0.1: # 10% chance for a larger mutation + perturbation_scale = 1.0 - ( + iteration / num_iterations + ) # Larger mutation at the beginning + else: + perturbation_scale = 0.1 * (1 - iteration / num_iterations) # Standard mutation scale + + perturbation = np.random.normal(0, perturbation_scale, self.dim) + candidate = population[i] + perturbation + + # Ensure candidate stays within bounds + candidate = np.clip(candidate, self.lb, self.ub) + + # Evaluate candidate + candidate_fitness = func(candidate) + + # Acceptance condition: Greedy selection with elitism + if ( + candidate_fitness < fitness[i] or np.random.rand() < 0.05 + ): # 5% chance to accept worse solutions + population[i] = candidate + fitness[i] = candidate_fitness + + # Update the global best solution + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate.copy() + + return self.f_opt, self.x_opt + + +# Example of use (requires a function `func` and bounds to run): +# optimizer = EnhancedAdaptiveDiversifiedSearch(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDolphinPodOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveDolphinPodOptimization.py new file mode 100644 index 000000000..5bcb48254 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDolphinPodOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDolphinPodOptimization: + def __init__( + self, budget=1000, num_dolphins=20, num_dimensions=5, alpha=0.1, beta=0.5, gamma=0.1, delta=0.2 + ): + self.budget = budget + self.num_dolphins = num_dolphins + self.num_dimensions = num_dimensions + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_dolphins, self.num_dimensions)) + + def levy_flight(self): + sigma = 1.0 + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1.5) + return step + + def move_dolphin(self, current_position, best_position, previous_best_position, bounds): + step = ( + self.alpha * (best_position - current_position) + + self.beta * (previous_best_position - current_position) + + self.gamma * self.levy_flight() + ) + new_position = current_position + step + new_position = np.clip(new_position, bounds.lb, bounds.ub) + return new_position + + def update_parameters(self, iteration): + self.alpha = max(0.01, self.alpha * (1 - 0.9 * iteration / self.budget)) + self.beta = min(0.9, self.beta + 0.1 * iteration / self.budget) + self.gamma = max(0.01, self.gamma * (1 - 0.8 * iteration / self.budget)) + + def adaptive_delta(self, f_new, f_current): + delta = self.delta + if f_new < f_current: + delta *= 1.1 + else: + delta *= 0.9 + return delta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + positions = self.initialize_positions(bounds) + best_position = positions[0].copy() + previous_best_position = best_position.copy() + + for i in range(self.budget): + self.update_parameters(i) + for j in range(self.num_dolphins): + new_position = self.move_dolphin(positions[j], best_position, previous_best_position, bounds) + f_new = func(new_position) + f_current = func(positions[j]) + + if f_new < f_current: + positions[j] = new_position + if f_new < func(best_position): + best_position = new_position.copy() + self.delta = self.adaptive_delta(f_new, f_current) + + previous_best_position = best_position + + self.f_opt = func(best_position) + self.x_opt = best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization.py new file mode 100644 index 000000000..57f52cba8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization.py @@ -0,0 +1,146 @@ +import numpy as np + + +class EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Reduced population size for faster convergence + self.initial_F = 0.8 # Tuned for effective mutation + self.initial_CR = 0.9 # Tuned for effective crossover + self.elite_rate = 0.2 # Increased elite rate + self.local_search_rate = 0.3 # Local search probability + self.memory_size = 20 # Memory size for adaptive parameters + self.w = 0.5 # Reduced inertia weight for faster convergence + self.c1 = 1.5 # Cognitive component + self.c2 = 1.5 # Social component + self.adaptive_phase_ratio = 0.6 # More budget for evolutionary phase + self.alpha = 0.6 # Differential weight for local search + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Increased for effective local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl.py b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl.py new file mode 100644 index 000000000..15568e565 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl.py @@ -0,0 +1,150 @@ +import numpy as np + + +class EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 # Increased for more exploration + self.initial_F = 0.7 # Reduced for more controlled mutation + self.initial_CR = 0.9 # Increased for more crossover + self.elite_rate = 0.1 + self.local_search_rate = 0.4 # Increased for better local exploration + self.memory_size = 25 # Increased for better parameter adaptation + self.w = 0.6 # Reduced for more stable convergence + self.c1 = 1.4 # Reduced slightly for more balanced exploration + self.c2 = 1.8 # Increased for stronger social influence + self.adaptive_phase_ratio = 0.6 # More emphasis on evolutionary phase for diversity + self.alpha = 0.5 # Reduced for finer tuning + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Reduced for finer local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + ( + 0.05 * np.random.randn() + ) # Reduced noise for more controlled adaptation + adaptive_CR = memory_CR[idx] + ( + 0.05 * np.random.randn() + ) # Reduced noise for more controlled adaptation + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV2.py new file mode 100644 index 000000000..5c35fb035 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV2.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedAdaptiveDualPhaseStrategyV2: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Phase 1 uses current best for direction emphasizing exploitation + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Phase 2 uses more exploration with mixed influence from best and random individuals + d = np.random.choice(idxs, 1)[0] + mutant = population[best_idx] + self.F * ( + population[a] - population[b] + population[c] - population[d] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR with modulation based on a sigmoid function for smoother transitions + scale = iteration / total_iterations + self.F = np.clip(0.6 / (1 + np.exp(-10 * (scale - 0.5))) + 0.4, 0.1, 1) + self.CR = np.clip(0.6 / (1 + np.exp(10 * (scale - 0.5))) + 0.4, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV5.py new file mode 100644 index 000000000..6f105945d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDualPhaseStrategyV5.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveDualPhaseStrategyV5: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using additional differential vectors + d = np.random.choice(idxs, 1, replace=False)[0] + e = np.random.choice(idxs, 1, replace=False)[0] + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR with more aggressive changes based on a sine-cosine model + scale = iteration / total_iterations + self.F = np.clip(0.9 * np.sin(2 * np.pi * scale) + 0.5, 0.1, 1) + self.CR = np.clip(0.9 * np.cos(2 * np.pi * scale) + 0.5, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDualStrategyOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveDualStrategyOptimizer.py new file mode 100644 index 000000000..cbd131d72 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDualStrategyOptimizer.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedAdaptiveDualStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + mutation_factor = 0.5 # Constant mutation factor + crossover_rate = 0.9 # Increased crossover for better exploration + elite_size = 5 # Same number of elite individuals to preserve + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Adaptive mutation based on relative fitness differences + adaptive_mutation = mutation_factor * (1 + (fitness[i] - best_fitness) / best_fitness) + + mutant = a + adaptive_mutation * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDE.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDE.py new file mode 100644 index 000000000..6b2c4e89a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDE.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicDE: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 100 # Number of generations to consider for stagnation + stagnation_counter = 0 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Mutation strategy + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Adaptive parameter control based on success rates + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(1.0, F * 1.2) + Cr = max(0.1, Cr * 0.9) + else: + F = max(0.4, F * 0.8) + Cr = min(1.0, Cr * 1.1) + + # Enhanced restart mechanism with diversity consideration + if stagnation_counter > stagnation_threshold: + # Re-initialize population if stuck + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDifferentialEvolution.py new file mode 100644 index 000000000..4f2a340a1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDifferentialEvolution.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicDifferentialEvolution: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates = self.dynamic_adjustment( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV19.py new file mode 100644 index 000000000..884789e4a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV19.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicDualPhaseStrategyV19: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e, f, g = np.random.choice(idxs, 4, replace=False) + mutant = population[a] + self.F * ( + population[b] + - population[c] + + 0.5 * (population[d] - population[e]) + + 0.2 * (population[f] - population[g]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * scale), 0.1, 1) # More aggressive adaptation + self.CR = np.clip(0.9 - 0.4 * scale, 0.1, 0.9) # Linear decrease + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV22.py new file mode 100644 index 000000000..848808839 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicDualPhaseStrategyV22.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicDualPhaseStrategyV22: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Simple differential mutation for initial exploration + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Complex differential mutation for exploitation phase + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adjustment of parameters using scaled sigmoid for non-linear control + scale = iteration / total_iterations + sigmoid = 1 / (1 + np.exp(-10 * (scale - 0.5))) + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..4b82d8dab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced.py new file mode 100644 index 000000000..e68d88603 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmImproved.py new file mode 100644 index 000000000..5c467e089 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmImproved.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkAlgorithmImproved: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmRefined.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmRefined.py new file mode 100644 index 000000000..8a9d82110 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkAlgorithmRefined.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkAlgorithmRefined: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5.py new file mode 100644 index 000000000..1c0feb382 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5: + def __init__(self, budget=10000, n_fireworks=50, f_init=0.5, f_final=0.2, cr_init=0.9, cr_final=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_fireworks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_fireworks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) ** 0.5 + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) ** 0.5 + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6.py new file mode 100644 index 000000000..1bd131b6f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6: + def __init__( + self, budget=10000, n_fireworks=30, n_sparks=10, f_init=0.5, f_final=0.2, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) ** 0.5 + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) ** 0.5 + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7.py new file mode 100644 index 000000000..ca0684e16 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, f_init=0.8, f_final=0.2, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) ** 0.5 + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) ** 0.5 + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearch.py new file mode 100644 index 000000000..53497a483 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearch.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV2.py new file mode 100644 index 000000000..ad04899be --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicHarmonySearchV2: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV3.py new file mode 100644 index 000000000..83499c036 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicHarmonySearchV3.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicHarmonySearchV3: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..89198a202 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=50): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate, F): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.5 * ((iteration / max_iterations) ** 0.5) + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + F = 0.5 + 0.5 * (1 - (iteration / max_iterations) ** 0.5) + return crossover_rate, learning_rate, memetic_probability, F + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability, F): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate, F) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability, F = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability, F + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..e606bd203 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 50 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 20 + self.dynamic_parameters_adjustment_threshold = 30 + self.pop_shrink_factor = 0.1 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 4) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self._dynamic_parameters() + + # Dynamic population resizing based on performance + if self.no_improvement_count >= self.dynamic_adjustment_period: + new_pop_size = max(20, int(self.pop_size * (1 - self.pop_shrink_factor))) + population = population[:new_pop_size] + fitness = fitness[:new_pop_size] + self.pop_size = new_pop_size + self.no_improvement_count = 0 + + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..77e3b8e98 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveDynamicQuantumSwarmOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedAdaptiveDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.6, + cognitive_weight=1.7, + social_weight=2.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters adaptively based on performance + if iteration % 1000 == 0 and iteration > 0: + best_value_avg = np.mean(self.personal_best_values) + global_improvement = abs(self.global_best_value - best_value_avg) / self.global_best_value + if global_improvement < 0.01: + self.inertia_weight *= 0.9 + self.cognitive_weight *= 1.1 + self.social_weight *= 1.1 + + self.inertia_weight = max(0.4, min(0.9, self.inertia_weight)) + self.cognitive_weight = max(1.5, min(2.5, self.cognitive_weight)) + self.social_weight = max(1.2, min(1.8, self.social_weight)) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveEliteDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveEliteDifferentialEvolution.py new file mode 100644 index 000000000..1db61709e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveEliteDifferentialEvolution.py @@ -0,0 +1,122 @@ +import numpy as np + + +class EnhancedAdaptiveEliteDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 150 # Increased initial population size + self.F_min = 0.4 + self.F_max = 0.8 + self.CR_min = 0.6 + self.CR_max = 0.8 + self.local_search_chance = 0.3 + self.elite_ratio = 0.1 + self.diversity_threshold = 0.03 # Reduced threshold to maintain higher diversity + self.cauchy_step_scale = 0.01 + self.gaussian_step_scale = 0.003 + self.reinitialization_rate = 0.2 # Increased reinitialization rate + self.hyper_heuristic_probability = 0.6 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + self.adaptive_parameters_adjustment(evaluations) + + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): # Increased local search iterations + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_parameters_adjustment(self, evaluations): + progress_ratio = evaluations / self.budget + self.F = self.F_min + (self.F_max - self.F_min) * progress_ratio + self.CR = self.CR_min + (self.CR_max - self.CR_min) * progress_ratio + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveEliteGuidedMutationDE_v2.py b/nevergrad/optimization/lama/EnhancedAdaptiveEliteGuidedMutationDE_v2.py new file mode 100644 index 000000000..7307637ff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveEliteGuidedMutationDE_v2.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedAdaptiveEliteGuidedMutationDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 # Slightly increased crossover probability + self.elitism_rate = 0.25 # Increased elitism rate + self.archive_rate = 0.1 # Archive usage rate + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation with enhanced crossover strategy + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Mutation + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Enhanced crossover with additional elitist guidance + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + if np.random.rand() < 0.5: + trial = trial + np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * self.archive_rate) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..2dcece2ce --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution.py @@ -0,0 +1,164 @@ +import numpy as np +import math + + +class EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution: + def __init__( + self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7, reinit_frequency=100 + ): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.reinit_frequency = reinit_frequency + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + distances = np.array( + [ + [np.linalg.norm(population[i] - population[j]) for j in range(len(population))] + for i in range(len(population)) + ] + ) + too_close = (distances < 1e-3) & (distances > 0) + for i in range(len(population)): + if np.any(too_close[i]): + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + generation = 0 + + while evaluations < self.budget: + success_count = 0 + + elite_idx = np.argmin(fitness) + elite = population[elite_idx] + elite_fitness = fitness[elite_idx] + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + self.mutation_factor = np.clip( + self.mutation_factor * (1.1 if success_rate > 0.2 else 0.9), 0.4, 1.0 + ) + self.crossover_rate = np.clip( + self.crossover_rate * (1.05 if success_rate > 0.2 else 0.95), 0.6, 1.0 + ) + + if elite_fitness < min(fitness): + weakest_idx = np.argmax(fitness) + population[weakest_idx] = elite + fitness[weakest_idx] = elite_fitness + + generation += 1 + if generation % self.reinit_frequency == 0: + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + evaluations += self.population_size + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveEnvironmentalStrategyV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveEnvironmentalStrategyV24.py new file mode 100644 index 000000000..7b689c0cf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveEnvironmentalStrategyV24.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedAdaptiveEnvironmentalStrategyV24: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase, adaptive_factors): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutation_factor = adaptive_factors["mutation"] + if phase == 1: + mutant = population[best_idx] + mutation_factor * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + mutation_factor * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, adaptive_factors): + CR_val = adaptive_factors["crossover"] + crossover_mask = np.random.rand(self.dimension) < CR_val + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adapt_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + sigmoid_adjustment = 1 / (1 + np.exp(-10 * (scale - 0.5))) # Sigmoid for smooth transition + return { + "mutation": np.clip(0.5 + 0.5 * np.sin(2 * np.pi * sigmoid_adjustment), 0.1, 1), + "crossover": np.clip(0.5 + 0.5 * np.cos(2 * np.pi * sigmoid_adjustment), 0.1, 1), + } + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + adaptive_factors = self.adapt_parameters( + iteration, switch_point if phase == 1 else self.budget - switch_point + ) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase, adaptive_factors) + trial = self.crossover(population[i], mutant, adaptive_factors) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy.py b/nevergrad/optimization/lama/EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy.py new file mode 100644 index 000000000..beb59241c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 2.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.random.uniform( + self.scaling_factor_range[0], self.scaling_factor_range[1], size=self.population_size + ) + crossover_rates = np.random.uniform( + self.crossover_rate_range[0], self.crossover_rate_range[1], size=self.population_size + ) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutant = np.clip( + a + scaling_factors[i] * (b - c) + scaling_factors[i] * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rates[i] + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values, population + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values, population): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + scaling_factors *= np.exp(0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6)) + crossover_rates *= np.exp(0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6)) + + scaling_factors = np.clip(scaling_factors, self.scaling_factor_range[0], self.scaling_factor_range[1]) + crossover_rates = np.clip(crossover_rates, self.crossover_rate_range[0], self.crossover_rate_range[1]) + + # Adaptive parameter adjustment based on population diversity + diversity = np.mean(np.std(population, axis=0)) + scaling_factors *= np.exp(0.01 * diversity) + crossover_rates *= np.exp(0.01 * diversity) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveExplorationExploitationAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveExplorationExploitationAlgorithm.py new file mode 100644 index 000000000..a65ac560b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveExplorationExploitationAlgorithm.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedAdaptiveExplorationExploitationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 300 # Increased initial population size for better diversity + self.F = 0.7 # Differential weight for exploration + self.CR = 0.4 # Crossover probability for controlled exploitation + self.local_search_chance_initial = 0.4 # Initial local search probability + self.elite_ratio = 0.15 # Ratio of elite members to retain + self.diversity_threshold = 1e-4 # Increased threshold to switch between exploration and exploitation + self.reinit_percentage = 0.4 # Higher reinitialization percentage for better diversity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(15): # Increased local search iterations + step_size = np.random.uniform(-0.05, 0.05, size=self.dim) # Smaller step size for finer search + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max(0.1, self.local_search_chance_initial * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveExplorationOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveExplorationOptimizer.py new file mode 100644 index 000000000..9915bc8a8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveExplorationOptimizer.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Search space dimension + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 250 + mutation_factor = 0.8 + crossover_rate = 0.9 + elite_size = 25 + + # Initialize population and evaluate + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mechanisms + success_tracker = np.zeros(population_size) + mutation_factors = np.full(population_size, mutation_factor) + crossover_rates = np.full(population_size, crossover_rate) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Elite retention + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + # Parents selection + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with adaptive mutation rate + mutant = a + mutation_factors[i] * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover with adaptive crossover rate + trial = np.where(np.random.rand(self.dim) < crossover_rates[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + success_tracker[i] += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Adapt mutation and crossover rates based on success + if success_tracker[i] >= 5: + mutation_factors[i] = min(1.0, mutation_factors[i] + 0.02) + crossover_rates[i] = min(1.0, crossover_rates[i] + 0.05) + elif success_tracker[i] == 0: + mutation_factors[i] = max(0.1, mutation_factors[i] - 0.02) + crossover_rates[i] = max(0.1, crossover_rates[i] - 0.05) + + # Update best solution + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + # Refresh success tracker periodically to avoid stagnation + if evaluations % 1000 == 0: + success_tracker = np.zeros(population_size) + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..786e11e53 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveFireworksAlgorithm.py new file mode 100644 index 000000000..d6c86e450 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveFireworksAlgorithm.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveFireworksAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma *= 0.99 # Adjusted sigma update rule for slower decrease + return max(0.1, self.sigma) + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGaussianSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveGaussianSearch.py new file mode 100644 index 000000000..b29efd92c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGaussianSearch.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedAdaptiveGaussianSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + # Initialize variables + self.f_opt = np.inf + self.x_opt = None + # Start with a random point in the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update optimal solution if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale of the Gaussian perturbations + scale = 1.0 + # Introduce memory to remember past successful steps + memory = [] + + # Main optimization loop + for i in range(self.budget - 1): + # Generate a new candidate by perturbing the current point + if np.random.rand() < 0.2 and memory: + # With a 20% chance, jump to a historically good point + candidate = memory[np.random.randint(len(memory))] + np.random.normal( + 0, scale * 0.5, self.dim + ) + else: + candidate = current_point + np.random.normal(0, scale, self.dim) + + # Ensure the candidate stays within bounds + candidate = np.clip(candidate, -5.0, 5.0) + candidate_f = func(candidate) + + # If the candidate is better, move there and adjust the perturbation scale + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + scale *= 1.2 # Increase scale to explore further + memory.append(candidate) # Remember successful step + + # Update the optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + # Limit memory size to avoid excessive growth + if len(memory) > 10: + memory.pop(0) + + # If not better, decrease the perturbation scale to refine search + else: + scale *= 0.85 # Encourage more localized search + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGradientBalancedCrossoverPSO.py b/nevergrad/optimization/lama/EnhancedAdaptiveGradientBalancedCrossoverPSO.py new file mode 100644 index 000000000..5f20bf37e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGradientBalancedCrossoverPSO.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveGradientBalancedCrossoverPSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.1, + social_weight=2.0, + crossover_rate=0.15, + mutation_rate=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.crossover_rate = crossover_rate + self.mutation_rate = mutation_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = self.inertia_weight * velocities[i] + personal_component + social_component + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + # Crossover mechanism + if np.random.rand() < self.crossover_rate: + j = np.random.choice([x for x in range(self.population_size) if x != i]) + crossover_point = np.random.randint(self.dim) + particles[i][:crossover_point], particles[j][:crossover_point] = ( + particles[j][:crossover_point].copy(), + particles[i][:crossover_point].copy(), + ) + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.dim, size=int(np.ceil(self.dim * 0.2)), replace=False + ) + particles[i][mutation_indices] += np.random.normal(0, 1, size=len(mutation_indices)) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..844598b3c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 25 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=200, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGranularStrategyV26.py b/nevergrad/optimization/lama/EnhancedAdaptiveGranularStrategyV26.py new file mode 100644 index 000000000..ea0922746 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGranularStrategyV26.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveGranularStrategyV26: + def __init__( + self, budget, dimension=5, population_size=100, F_min=0.4, F_max=0.9, CR_min=0.5, CR_max=0.9 + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_min = F_min + self.F_max = F_max + self.CR_min = CR_min + self.CR_max = CR_max + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[best_idx] - population[c]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adapt_parameters(self, iteration, total_iterations): + progress = iteration / total_iterations + self.F = np.clip( + self.F_min + (self.F_max - self.F_min) * np.sin(np.pi * progress), self.F_min, self.F_max + ) + self.CR = np.clip( + self.CR_min + (self.CR_max - self.CR_min) * np.cos(np.pi * progress), self.CR_min, self.CR_max + ) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adapt_parameters(evaluations, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV10.py new file mode 100644 index 000000000..34aefe50e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV10.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV10: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + if np.random.rand() < self.epsilon: # Introduce random perturbation + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV11.py new file mode 100644 index 000000000..7bf6677b7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV11.py @@ -0,0 +1,112 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV11: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + mu=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.mu = mu + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + def perturb_population(self, func, population, f_vals): + for i in range(self.population_size): + if np.random.rand() < self.mu: + population[i] = np.random.uniform(low=func.bounds.lb, high=func.bounds.ub) + f_vals[i] = func(population[i]) + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + population, f_vals = self.perturb_population(func, population, f_vals) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV12.py new file mode 100644 index 000000000..cb28e27ae --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV12.py @@ -0,0 +1,115 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV12: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + mu=0.1, + sigma=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.mu = mu + self.sigma = sigma + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + def perturb_population(self, func, population, f_vals): + for i in range(self.population_size): + if np.random.rand() < self.mu: + population[i] = np.random.normal(population[i], self.sigma) + population[i] = np.clip(population[i], func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + population, f_vals = self.perturb_population(func, population, f_vals) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV19.py new file mode 100644 index 000000000..84e86b591 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV19.py @@ -0,0 +1,115 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV19: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def check_premature_convergence(self, f_vals): + sorted_vals = np.sort(f_vals) + diff = np.diff(sorted_vals) + quartile = np.percentile(diff, 75) # 75th percentile of the differences + return quartile < self.epsilon + + def adaptive_population_size(self, func, t): + return min(max(int(20 + 0.1 * t), 10), 50) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population_size = self.population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + if t % 100 == 0: + population_size = self.adaptive_population_size(func, t) + + if population_size != self.population_size: + self.population_size = population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + if self.check_premature_convergence(f_vals): + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV20.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV20.py new file mode 100644 index 000000000..436c345d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV20.py @@ -0,0 +1,119 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV20: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def check_premature_convergence(self, f_vals): + sorted_vals = np.sort(f_vals) + diff = np.diff(sorted_vals) + quartile = np.percentile(diff, 75) # 75th percentile of the differences + return quartile < self.epsilon + + def adaptive_population_size(self, func, t): + return min(max(int(20 + 0.1 * t), 10), 50) + + def adaptive_alpha_min(self, t): + return max(self.alpha_min * np.exp(-self.gamma * t), 0.05) # Ensure alpha_min doesn't drop too low + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population_size = self.population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + if t % 100 == 0: + population_size = self.adaptive_population_size(func, t) + self.alpha_min = self.adaptive_alpha_min(t) + + if population_size != self.population_size: + self.population_size = population_size + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + if self.check_premature_convergence(f_vals): + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV21.py new file mode 100644 index 000000000..95d264f19 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV21.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV21: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def check_premature_convergence(self, f_vals): + sorted_vals = np.sort(f_vals) + diff = np.diff(sorted_vals) + quartile = np.percentile(diff, 75) # 75th percentile of the differences + return quartile < self.epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(len(population)): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + if self.check_premature_convergence(f_vals): + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV27.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV27.py new file mode 100644 index 000000000..743b1e2e0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV27.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV27: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV28.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV28.py new file mode 100644 index 000000000..f20bf763a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV28.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV28: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV3.py new file mode 100644 index 000000000..ef7a3fb03 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV3.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV3: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-self.alpha * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha * (1.0 - self.delta) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.beta_max = self.update_beta(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV4.py new file mode 100644 index 000000000..e8f4e00c5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV4.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV4: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-self.alpha * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha * (1.0 - self.delta) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.beta_max = self.update_beta(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV5.py new file mode 100644 index 000000000..b18de28f0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV5.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV5: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV6.py new file mode 100644 index 000000000..82dc39154 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV6.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV6: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV7.py new file mode 100644 index 000000000..01822d084 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV7.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV7: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV8.py new file mode 100644 index 000000000..1cf5764ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV8.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV8: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV9.py new file mode 100644 index 000000000..c683f03d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmIntelligenceV9.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmIntelligenceV9: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py new file mode 100644 index 000000000..f1a794145 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(100): # Increase the number of optimization runs to 100 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(2000): # Increase the number of iterations within each optimization run to 2000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGuidedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveGuidedDifferentialEvolution.py new file mode 100644 index 000000000..3b252f673 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGuidedDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedAdaptiveGuidedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 120 # Increased population size for better diversity + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.5: # Increased probability of local search + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.3 + (0.4 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveGuidedMutationOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveGuidedMutationOptimizer.py new file mode 100644 index 000000000..d90a08682 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveGuidedMutationOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveGuidedMutationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 300 # Slightly increased population size for better exploration + mutation_factor = ( + 0.8 # Adjusted initial mutation factor to balance exploration and robust convergence + ) + crossover_prob = 0.7 # Adjusted initial crossover probability for more frequent trials + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Enhance with Local Search Initialization + local_search_frequency = 200 # frequency of localized search + local_search_radius = 0.1 # radius of the local search + + # Main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Mutation and crossover phases + indices = np.arange(population_size) + indices = np.delete(indices, i) + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + + # Hybrid mutation strategy with local search tweak + if current_budget % local_search_frequency == 0: + # Conduct a local search around the current best + local_mutant = best_solution + local_search_radius * np.random.randn(self.dim) + local_mutant = np.clip(local_mutant, self.lower_bound, self.upper_bound) + local_fitness = func(local_mutant) + current_budget += 1 + + if local_fitness < best_fitness: + best_solution = local_mutant + best_fitness = local_fitness + + mutant = population[i] + mutation_factor * ( + best_solution - population[i] + x1 - (x2 + x3) / 2 + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + best_index = np.argmin(fitness) + + # Adaptively adjust mutation and crossover parameters + mutation_factor = max(0.5, mutation_factor - 0.005) # Slower decrease + crossover_prob = min(0.9, crossover_prob + 0.005) # Slower increase + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearch.py new file mode 100644 index 000000000..d85c8ba96 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearch.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicFireworksTabuSearch: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 + self.bandwidth *= 0.95 + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def enhance_search(self, harmony_memory, best_solution, func, bounds): + self.diversify_search(harmony_memory, bounds) + self.local_search(harmony_memory, best_solution, func, bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.adaptive_tabu_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.enhance_search(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearchV2.py new file mode 100644 index 000000000..d680da607 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicFireworksTabuSearchV2.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicFireworksTabuSearchV2: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Fine-tune tabu ratio update + self.bandwidth *= 0.95 # Refine bandwidth adjustment + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def enhance_search(self, harmony_memory, best_solution, func, bounds): + self.diversify_search(harmony_memory, bounds) + self.local_search(harmony_memory, best_solution, func, bounds) + + def hybrid_search(self, harmony_memory, best_solution, func, bounds): + self.enhance_search(harmony_memory, best_solution, func, bounds) + self.adaptive_tabu_search(harmony_memory, best_solution, func, bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.hybrid_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicOptimizationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicOptimizationV2.py new file mode 100644 index 000000000..959e0de19 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicOptimizationV2.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicOptimizationV2: + def __init__( + self, + budget=1000, + num_particles=50, + num_dimensions=5, + harmony_memory_rate=0.8, + pitch_adjust_rate=0.6, + local_search_prob=0.3, + step_size_factor=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size_factor = step_size_factor + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + indexes = np.random.choice(range(self.num_particles), size=2, replace=False) + new_solution[i] = np.mean(memory_matrix[indexes, i]) + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) * self.step_size_factor + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV10.py new file mode 100644 index 000000000..a2857c24d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV10.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV10: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.95 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.05 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV18.py new file mode 100644 index 000000000..31a03d58e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV18.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV18: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, 0.1, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def adapt_pitch_rate(self): + return 0.1 + 0.4 * (self.budget - self.iteration) / self.budget + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.iteration = i + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + self.pitch_adjustment_rate = self.adapt_pitch_rate() # Adapt pitch adjustment rate + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV21.py new file mode 100644 index 000000000..02dad8e62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV21.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV21: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.05 # Increase the tabu ratio for more exploration + self.bandwidth *= 0.95 # Decrease the bandwidth for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV22.py new file mode 100644 index 000000000..808e38897 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV22.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV22: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.01 # Increase the tabu ratio for more exploration + self.bandwidth *= 0.95 # Decrease the bandwidth for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV23.py new file mode 100644 index 000000000..20615e32b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV23.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV23: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.05 # Increase the tabu ratio for more exploration + self.bandwidth *= 0.9 # Decrease the bandwidth for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV25.py new file mode 100644 index 000000000..f6d23a163 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV25.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV25: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.05 # Increase the tabu ratio slightly for more exploration + self.bandwidth *= 0.95 # Decrease the bandwidth slightly for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV26.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV26.py new file mode 100644 index 000000000..08e6f4ef5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV26.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV26: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.98 # Adjust the bandwidth for better exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV27.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV27.py new file mode 100644 index 000000000..549805fad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV27.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV27: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.98 # Adjust the bandwidth for better exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV29.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV29.py new file mode 100644 index 000000000..470959cad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV29.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV29: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.98 # Adjust the bandwidth for better exploitation + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.adaptive_tabu_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV30.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV30.py new file mode 100644 index 000000000..35e8273a4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV30.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV30: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.01 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.99 # Adjust the bandwidth for better exploitation + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.adaptive_tabu_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV31.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV31.py new file mode 100644 index 000000000..a406d7255 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV31.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV31: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.95 # Adjust the bandwidth for better exploitation + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.adaptive_tabu_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV9.py new file mode 100644 index 000000000..05cea229f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonicTabuSearchV9.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonicTabuSearchV9: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyFireworksAlgorithm.py new file mode 100644 index 000000000..8c8fea342 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyFireworksAlgorithm.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyFireworksAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=20, + pitch_adjust_rate=0.7, + mutation_rate=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + if np.random.rand() < 0.5: + new_solution[i] = best_solution[i] + else: + new_solution[i] = np.random.uniform(-5.0, 5.0) + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + + return np.clip(new_solution, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithm.py new file mode 100644 index 000000000..05ad46d2b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithm.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithm: + def __init__( + self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Adaptive adjustment of HMCR + self.par = min(self.par + 0.0001, 0.5) # Adaptive adjustment of PAR + self.bw = max(self.bw - 0.00001, 0.01) # Adaptive adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Adaptive adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV10.py new file mode 100644 index 000000000..07d723697 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV10.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV10: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Adjust HMCR with a smaller step + self.par = min(self.par + 0.0001, 0.3) # Adjust PAR with a smaller step + self.bw = max(self.bw - 0.0001, 0.02) # Adjust BW with a smaller step + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Adjust Memetic Probability with a smaller step + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV11.py new file mode 100644 index 000000000..b5402bb34 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV11.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV11: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV12.py new file mode 100644 index 000000000..ce8f96e10 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV12.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV12: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV13.py new file mode 100644 index 000000000..d2f5b2bd5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV13.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV13: + def __init__( + self, budget=10000, hmcr=0.7, par=0.3, bw=0.05, memetic_iter=150, memetic_prob=0.9, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV14.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV14.py new file mode 100644 index 000000000..07793125d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV14.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV14: + def __init__( + self, budget=10000, hmcr=0.7, par=0.3, bw=0.05, memetic_iter=150, memetic_prob=0.9, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV16.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV16.py new file mode 100644 index 000000000..6951860f1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV16.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV16: + def __init__( + self, budget=10000, hmcr=0.8, par=0.4, bw=0.1, memetic_iter=300, memetic_prob=0.95, memetic_step=0.02 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV18.py new file mode 100644 index 000000000..daaec5c7e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV18.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV18: + def __init__( + self, + budget=10000, + hmcr=0.95, + par=0.7, + bw=0.02, + memetic_iter=1000, + memetic_prob=0.99, + memetic_step=0.005, + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV19.py new file mode 100644 index 000000000..68a324dec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV19.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV19: + def __init__( + self, + budget=10000, + hmcr=0.95, + par=0.7, + bw=0.02, + memetic_iter=1000, + memetic_prob=0.99, + memetic_step=0.005, + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV2.py new file mode 100644 index 000000000..1165f9e4e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV2: + def __init__( + self, budget=10000, hmcr=0.8, par=0.3, bw=0.1, memetic_iter=100, memetic_prob=0.8, memetic_step=0.05 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Adaptive adjustment of HMCR + self.par = min(self.par + 0.0001, 0.4) # Adaptive adjustment of PAR + self.bw = max(self.bw - 0.00001, 0.02) # Adaptive adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Adaptive adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV20.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV20.py new file mode 100644 index 000000000..87f42bc11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV20.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV20: + def __init__( + self, budget=10000, hmcr=0.9, par=0.7, bw=0.1, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.01 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV21.py new file mode 100644 index 000000000..b873299d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV21.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV21: + def __init__( + self, budget=10000, hmcr=0.9, par=0.7, bw=0.1, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.01 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV22.py new file mode 100644 index 000000000..60bea58de --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV22.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV22: + def __init__( + self, budget=10000, hmcr=0.9, par=0.7, bw=0.1, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.01 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV23.py new file mode 100644 index 000000000..ffcad6678 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV23.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV23: + def __init__( + self, budget=10000, hmcr=0.8, par=0.6, bw=0.2, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.05 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV24.py new file mode 100644 index 000000000..335c9f9c7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV24.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV24: + def __init__( + self, budget=10000, hmcr=0.9, par=0.7, bw=0.3, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV25.py new file mode 100644 index 000000000..546699f80 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV25.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV25: + def __init__( + self, budget=10000, hmcr=0.9, par=0.7, bw=0.3, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + min_idx = np.argmin(harmony_memory_costs) + if new_cost < harmony_memory_costs[min_idx]: + harmony_memory[min_idx] = new_harmony + harmony_memory_costs[min_idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV3.py new file mode 100644 index 000000000..fbfe26b27 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV3.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV3: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.00005, 0.6) # Adaptive adjustment of HMCR + self.par = min(self.par + 0.00005, 0.3) # Adaptive adjustment of PAR + self.bw = max(self.bw - 0.000005, 0.02) # Adaptive adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.00005, 1.0 + ) # Adaptive adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV4.py new file mode 100644 index 000000000..856596627 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV4: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Enhanced adjustment of HMCR + self.par = min(self.par + 0.0001, 0.3) # Enhanced adjustment of PAR + self.bw = max(self.bw - 0.0001, 0.02) # Enhanced adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Enhanced adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV5.py new file mode 100644 index 000000000..3f947f264 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV5.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV5: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0005, 0.6) # Enhanced adjustment of HMCR + self.par = min(self.par + 0.0005, 0.3) # Enhanced adjustment of PAR + self.bw = max(self.bw - 0.0005, 0.02) # Enhanced adjustment of BW + self.memetic_prob = min( + self.memetic_prob + 0.0005, 1.0 + ) # Enhanced adjustment of Memetic Probability + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV6.py new file mode 100644 index 000000000..36cac9d62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV6.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV6: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.001, 0.6) # Enhanced adjustment of HMCR with smaller step + self.par = min(self.par + 0.001, 0.3) # Enhanced adjustment of PAR with smaller step + self.bw = max(self.bw - 0.001, 0.02) # Enhanced adjustment of BW with smaller step + self.memetic_prob = min( + self.memetic_prob + 0.001, 1.0 + ) # Enhanced adjustment of Memetic Probability with smaller step + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV7.py new file mode 100644 index 000000000..1dce387c3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV7.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV7: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0005, 0.6) # Enhanced adjustment of HMCR with smaller step + self.par = min(self.par + 0.0005, 0.3) # Enhanced adjustment of PAR with smaller step + self.bw = max(self.bw - 0.0005, 0.02) # Enhanced adjustment of BW with smaller step + self.memetic_prob = min( + self.memetic_prob + 0.0005, 1.0 + ) # Enhanced adjustment of Memetic Probability with smaller step + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV8.py new file mode 100644 index 000000000..0fb646b45 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV8.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV8: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.0001, 0.6) # Adjust HMCR with smaller step + self.par = min(self.par + 0.0001, 0.3) # Adjust PAR with smaller step + self.bw = max(self.bw - 0.0001, 0.02) # Adjust BW with smaller step + self.memetic_prob = min( + self.memetic_prob + 0.0001, 1.0 + ) # Adjust Memetic Probability with smaller step + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV9.py new file mode 100644 index 000000000..6d17de87e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticAlgorithmV9.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticAlgorithmV9: + def __init__( + self, budget=10000, hmcr=0.7, par=0.2, bw=0.05, memetic_iter=100, memetic_prob=0.8, memetic_step=0.03 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + self.hmcr = max(self.hmcr - 0.00005, 0.6) # Adjust HMCR with a smaller step + self.par = min(self.par + 0.00005, 0.3) # Adjust PAR with a smaller step + self.bw = max(self.bw - 0.00005, 0.02) # Adjust BW with a smaller step + self.memetic_prob = min( + self.memetic_prob + 0.00005, 1.0 + ) # Adjust Memetic Probability with a smaller step + + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV28.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV28.py new file mode 100644 index 000000000..05b9dd0b9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV28.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV28: + def __init__(self, budget=10000, memory_size=50, pitch_adjustment_rate=0.9, pitch_bandwidth=0.5): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.uniform(-0.1, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + # Enhanced pitch adjustment rate adaptation based on success rate and convergence + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + if success_rate > 0.5: + self.pitch_adjustment_rate *= 1.1 + else: + self.pitch_adjustment_rate *= 0.9 + + if i % 50 == 0 and i != 0: + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len( + harmony_memory_costs + ) + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.1 + else: + self.pitch_bandwidth *= 0.9 + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV29.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV29.py new file mode 100644 index 000000000..521263942 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV29.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV29: + def __init__(self, budget=10000, memory_size=50, pitch_adjustment_rate=0.9, pitch_bandwidth=0.5): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + # Enhanced pitch adjustment rate adaptation based on success rate and convergence + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if i % 50 == 0 and i != 0: + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len( + harmony_memory_costs + ) + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.05 + else: + self.pitch_bandwidth *= 0.95 + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV3.py new file mode 100644 index 000000000..517b68f8f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV3.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV3: + def __init__(self, budget=10000, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.2, memory_size=100): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, self.memory_size + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV30.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV30.py new file mode 100644 index 000000000..9766de0f9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV30.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV30: + def __init__(self, budget=10000, memory_size=50, pitch_adjustment_rate=0.9, pitch_bandwidth=0.5): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + # Enhanced pitch adjustment rate adaptation based on success rate and convergence + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if i % 50 == 0 and i != 0: + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len( + harmony_memory_costs + ) + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.05 + else: + self.pitch_bandwidth *= 0.95 + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV31.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV31.py new file mode 100644 index 000000000..ba68fc417 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV31.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV31: + def __init__(self, budget=10000, memory_size=50, pitch_adjustment_rate=0.9, pitch_bandwidth=0.5): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_pitch_parameters(self, success_rate, convergence_rate): + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.05 + else: + self.pitch_bandwidth *= 0.95 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + + self._adapt_pitch_parameters(success_rate, convergence_rate) + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV32.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV32.py new file mode 100644 index 000000000..a7b4ee671 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV32.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV32: + def __init__( + self, + budget=10000, + memory_size=50, + pitch_adjustment_rate=0.9, + pitch_bandwidth=0.5, + local_search_prob=0.8, + ): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + self.local_search_prob = local_search_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_pitch_parameters(self, success_rate, convergence_rate): + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.05 + else: + self.pitch_bandwidth *= 0.95 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + + self._adapt_pitch_parameters(success_rate, convergence_rate) + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV33.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV33.py new file mode 100644 index 000000000..e60a4e3ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV33.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV33: + def __init__( + self, + budget=10000, + memory_size=50, + pitch_adjustment_rate=0.8, + pitch_bandwidth=0.4, + local_search_prob=0.9, + ): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + self.local_search_prob = local_search_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(100): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_pitch_parameters(self, success_rate, convergence_rate): + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.1 + else: + self.pitch_bandwidth *= 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + + self._adapt_pitch_parameters(success_rate, convergence_rate) + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV4.py new file mode 100644 index 000000000..cef85bb8d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV4.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV4: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV5.py new file mode 100644 index 000000000..3b3136ebf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV5.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV5: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.6, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV6.py new file mode 100644 index 000000000..5e36668f3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV6.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV6: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.7, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV7.py new file mode 100644 index 000000000..bdf10147c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV7.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV7: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.8, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV8.py new file mode 100644 index 000000000..076dea44d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticOptimizationV8.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticOptimizationV8: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.85, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearch.py new file mode 100644 index 000000000..1f9d258b5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearch.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticSearch: + def __init__( + self, budget=10000, hmcr=0.7, par=0.4, bw=0.6, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.explore_prob = 0.1 # Probability of exploration + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < self.explore_prob: # Introduce exploration + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearchV2.py new file mode 100644 index 000000000..7d6a60142 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyMemeticSearchV2.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyMemeticSearchV2: + def __init__( + self, budget=10000, hmcr=0.7, par=0.4, bw=0.6, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.explore_prob = 0.1 # Probability of exploration + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + convergence_curve = [] + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < self.explore_prob: # Introduce exploration + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + convergence_curve.append(self.f_opt) + + mean_aocc = np.mean(np.array(convergence_curve)) + std_dev = np.std(np.array(convergence_curve)) + + return mean_aocc, std_dev diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization.py new file mode 100644 index 000000000..72e5bdaee --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(10): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization.py new file mode 100644 index 000000000..99caaf20b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(10): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizer.py new file mode 100644 index 000000000..f6d00b6be --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchOptimizer: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.1 + 0.4 * (1 - iteration / self.budget) # Adaptive Pitch Adjustment Rate + hmcr = 0.6 + 0.2 * (1 - iteration / self.budget) # Adaptive Harmony Memory Consideration Rate + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.01: # Fixed value for pitch adjustment + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Exploring rate fixed to 0.1 + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizerV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizerV2.py new file mode 100644 index 000000000..24c685efa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchOptimizerV2.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchOptimizerV2: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return min(1.5, bandwidth * 1.1) + else: + return max(0.5, bandwidth * 0.9) + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.01, self.exploration_rate * 0.95) + else: + return min(0.3, self.exploration_rate * 1.05) + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + if abs(best_fitness - prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV10.py new file mode 100644 index 000000000..def878b61 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV10.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV10: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV11.py new file mode 100644 index 000000000..ad5420271 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV11.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV11: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV12.py new file mode 100644 index 000000000..79444d5ee --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV12.py @@ -0,0 +1,95 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV12: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: # Intensification step to focus exploration + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV13.py new file mode 100644 index 000000000..0cdb0df47 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV13.py @@ -0,0 +1,98 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV13: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV14.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV14.py new file mode 100644 index 000000000..476c90af1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV14.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV14: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV15.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV15.py new file mode 100644 index 000000000..a7c539c1d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV15.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV15: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV16.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV16.py new file mode 100644 index 000000000..7ad68e62a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV16.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV16: + def __init__(self, budget=1000, hmcr=0.7, par=0.4, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV17.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV17.py new file mode 100644 index 000000000..375a4cd5f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV17.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV17: + def __init__(self, budget=1000, hmcr=0.7, par=0.5, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.3: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV18.py new file mode 100644 index 000000000..0fed9e986 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV18.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV18: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.4: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV19.py new file mode 100644 index 000000000..9663ba177 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV19.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV19: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.2: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.4: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV20.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV20.py new file mode 100644 index 000000000..4ddd7fac0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV20.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV20: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.4: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 100 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV21.py new file mode 100644 index 000000000..c839d2c81 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV21.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV21: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.5: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 40 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 80 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV22.py new file mode 100644 index 000000000..6d16cdaf3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV22.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV22: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 30 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 60 == 0: + self.intensify_exploration(harmony_memory, func) + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV23.py new file mode 100644 index 000000000..88c0fd3e5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV23.py @@ -0,0 +1,101 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV23: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 30 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 40 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 80 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV24.py new file mode 100644 index 000000000..f3da1d6cd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV24.py @@ -0,0 +1,101 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV24: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 25 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV25.py new file mode 100644 index 000000000..5b6b6b6b4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV25.py @@ -0,0 +1,101 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV25: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV3.py new file mode 100644 index 000000000..040693a3b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV3.py @@ -0,0 +1,81 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV4.py new file mode 100644 index 000000000..5b7ce26c1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV4.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def enhanced_local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.03): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def enhanced_diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.uniform(-scale, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def enhanced_global_best_update(self, harmony_memory, func): + sorted_harmony = sorted(harmony_memory, key=lambda x: func(x)) + return sorted_harmony[0] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.enhanced_global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.enhanced_global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 10 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.enhanced_diversification_search(harmony_memory, func.bounds, scale=0.1) + + if i % 200 == 0: + harmony_memory = self.enhanced_local_search( + harmony_memory, func, func.bounds, iterations=5, bandwidth=0.03 + ) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV5.py new file mode 100644 index 000000000..456e13e76 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV5.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV5: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 100 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV6.py new file mode 100644 index 000000000..e8a277360 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV6.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV6: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 100 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV7.py new file mode 100644 index 000000000..45bb4a0fc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV7.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV7: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 100 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV8.py new file mode 100644 index 000000000..30b944035 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV8.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV8: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 100 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV9.py new file mode 100644 index 000000000..274c41295 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchV9.py @@ -0,0 +1,88 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchV9: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 50 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration.py new file mode 100644 index 000000000..e50e09f9c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_adaptive_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) + sigma = np.random.uniform(0.1, 1.0) # Adaptive scaling factor + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / np.abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.05 + sigma *= 0.95 # Adaptive decay factor + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10.py new file mode 100644 index 000000000..69b510f02 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3.py new file mode 100644 index 000000000..ea59075f7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4.py new file mode 100644 index 000000000..811329a77 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5.py new file mode 100644 index 000000000..f11594a2c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6.py new file mode 100644 index 000000000..f50461b5d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7.py new file mode 100644 index 000000000..02ed3e313 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds): + best_solution = solution + best_cost = func(solution) + for _ in range(5): + new_solution = self.exploit( + [solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8.py new file mode 100644 index 000000000..b22b04258 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(5): + new_harmony = self.exploit( + [new_harmony], func, func.bounds, bandwidth=0.01 + ) # Fixed bandwidth for local search + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds): + best_solution = solution + best_cost = func(solution) + for _ in range(5): + new_solution = self.exploit( + [solution], func, func.bounds, bandwidth=0.01 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9.py new file mode 100644 index 000000000..d8d6a932c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.01): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=5, bandwidth=0.01): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement.py new file mode 100644 index 000000000..700c93dc9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10.py new file mode 100644 index 000000000..3de25af51 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.02): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11.py new file mode 100644 index 000000000..5a26dd8d9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.01): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2.py new file mode 100644 index 000000000..b8c66eac7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3.py new file mode 100644 index 000000000..acf2ba124 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4.py new file mode 100644 index 000000000..cac8a2ffa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.2 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5.py new file mode 100644 index 000000000..216c9ba52 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.2 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6.py new file mode 100644 index 000000000..0c6b313b0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.2): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7.py new file mode 100644 index 000000000..4f78069bd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8.py new file mode 100644 index 000000000..90ca92c6d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.05): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9.py new file mode 100644 index 000000000..a41af1090 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.03): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17.py new file mode 100644 index 000000000..e1e4fcc84 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + # Enhanced Hybrid inspiration using Gaussian distribution with variable standard deviation + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18.py new file mode 100644 index 000000000..ab0d2d087 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.2: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + # Enhanced Hybrid inspiration using Gaussian distribution with variable standard deviation + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6.py new file mode 100644 index 000000000..1369d2db5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_enhanced_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_enhanced_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12.py new file mode 100644 index 000000000..7f39475e4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.001): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13.py new file mode 100644 index 000000000..d4021d223 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, levy_step_size=0.0005): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb), self.levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.5) * np.sin(np.pi * 1.5 / 2) / (np.math.gamma(1.75) * 1.5 * 2**0.25) + ) ** 0.6667 + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** 0.6667 + epsilon) + levy[:, i] = 1.5 * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2.py new file mode 100644 index 000000000..e008e574a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2.py @@ -0,0 +1,132 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def enhanced_local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.03): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 10 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.05) + + if i % 200 == 0: + harmony_memory = self.enhanced_local_search( + harmony_memory, func, func.bounds, iterations=5, bandwidth=0.03 + ) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3.py new file mode 100644 index 000000000..51317998f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def enhanced_local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.03): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def enhanced_diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.uniform(-scale, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def enhanced_global_best_update(self, harmony_memory, func): + sorted_harmony = sorted(harmony_memory, key=lambda x: func(x)) + return sorted_harmony[0] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.enhanced_global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.enhanced_global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 10 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.enhanced_diversification_search(harmony_memory, func.bounds, scale=0.1) + + if i % 200 == 0: + harmony_memory = self.enhanced_local_search( + harmony_memory, func, func.bounds, iterations=5, bandwidth=0.03 + ) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithHybridInspirationV16.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithHybridInspirationV16.py new file mode 100644 index 000000000..b9ff5464b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithHybridInspirationV16.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithHybridInspirationV16: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.1: + # Hybrid inspiration using Gaussian distribution + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, 0.1, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight.py new file mode 100644 index 000000000..4aafbbe46 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, leviness=1.5, alpha=0.6): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.leviness = leviness + self.alpha = alpha + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(size=self.harmony_memory_size, dimension=len(func.bounds.lb)) + new_harmony[:, i] += self.alpha * levy[:, i] + + new_harmony = np.clip(new_harmony, func.bounds.lb, func.bounds.ub) + + return new_harmony + + def generate_levy_flight(self, size, dimension): + levy = np.zeros((size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.0 + self.leviness) + * np.sin(np.pi * self.leviness / 2) + / (np.math.gamma(1.0 + 2 * self.leviness) * (self.leviness**0.5)) + ) ** (1.0 / self.leviness) + + for i in range(size): + for j in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1.0 / self.leviness) + epsilon) + levy[i, j] = step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10.py new file mode 100644 index 000000000..445a7bc10 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11.py new file mode 100644 index 000000000..30abb28e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12.py new file mode 100644 index 000000000..bba1b0ce3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13.py new file mode 100644 index 000000000..6860ddc92 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14.py new file mode 100644 index 000000000..0f12d1163 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15.py new file mode 100644 index 000000000..47188406f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4.py new file mode 100644 index 000000000..2e15ebc0c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros(dimension) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7.py new file mode 100644 index 000000000..7050d9c67 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8.py new file mode 100644 index 000000000..1b3506e4e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9.py new file mode 100644 index 000000000..f42b7a626 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_alpha=1.5, + levy_beta=1.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlight.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlight.py new file mode 100644 index 000000000..56c4a2736 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlight.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithLevyFlight: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, leviness=1.5, alpha=0.6): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.leviness = leviness + self.alpha = alpha + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(size=self.harmony_memory_size, dimension=len(func.bounds.lb)) + new_harmony[:, i] += self.alpha * levy[:, i] + + new_harmony = np.clip(new_harmony, func.bounds.lb, func.bounds.ub) + + return new_harmony + + def generate_levy_flight(self, size, dimension): + levy = np.zeros((size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.0 + self.leviness) + * np.sin(np.pi * self.leviness / 2) + / (np.math.gamma(1.0 + 2 * self.leviness) * (self.leviness**0.5)) + ) ** (1.0 / self.leviness) + + for i in range(size): + for j in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1.0 / self.leviness) + epsilon) + levy[i, j] = step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2.py new file mode 100644 index 000000000..5590029a6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, 1, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / np.abs(v) ** (1 / beta) + levy += step * self.levy_alpha + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimization.py new file mode 100644 index 000000000..a58b6fff5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimization.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimization: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(10): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2.py new file mode 100644 index 000000000..49d468f68 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.01): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=5, bandwidth=0.01): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3.py new file mode 100644 index 000000000..597218dd5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.01): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=5, bandwidth=0.01): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4.py new file mode 100644 index 000000000..d16681a90 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=5, bandwidth=0.01): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=5, bandwidth=0.01): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=3, bandwidth=0.05 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5.py new file mode 100644 index 000000000..ff7bf5098 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=3, bandwidth=0.05 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6.py new file mode 100644 index 000000000..0cfb6a979 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 15 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.08) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3.py new file mode 100644 index 000000000..4495e3898 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=5, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4.py new file mode 100644 index 000000000..87f6a1565 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=5, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5.py new file mode 100644 index 000000000..4b350695f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=5, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6.py new file mode 100644 index 000000000..7f2d67276 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=5, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7.py new file mode 100644 index 000000000..55ed3e247 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=5, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8.py new file mode 100644 index 000000000..3ce833ce8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = self.simulated_annealing(h, func, func.bounds, max_iter=10, initial_temp=5.0) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def diversity_search(self, harmony_memory, func, func_bounds): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, 0.1, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func, func.bounds) + + if i % 100 == 0: + harmony_memory = self.diversity_search(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight.py new file mode 100644 index 000000000..347d8e91a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, leviness=1.5, alpha=0.6): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.leviness = leviness + self.alpha = alpha + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(size=self.harmony_memory_size, dimension=len(func.bounds.lb)) + new_harmony[:, i] += self.alpha * levy[:, i] + + new_harmony = np.clip(new_harmony, func.bounds.lb, func.bounds.ub) + + return new_harmony + + def generate_levy_flight(self, size, dimension): + levy = np.zeros((size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.0 + self.leviness) + * np.sin(np.pi * self.leviness / 2) + / (np.math.gamma(1.0 + 2 * self.leviness) * (self.leviness**0.5)) + ) ** (1.0 / self.leviness) + + for i in range(size): + for j in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1.0 / self.leviness) + epsilon) + levy[i, j] = step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration.py new file mode 100644 index 000000000..7388ec847 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + levy_sigma_scaling=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + self.levy_sigma_scaling = levy_sigma_scaling + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: + levy = self.generate_refined_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_refined_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) + sigma = self.levy_sigma_scaling # Use a fixed scaling factor + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / np.abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.05 + sigma = self.levy_sigma_scaling # Retain the fixed scaling factor + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing.py new file mode 100644 index 000000000..3a06160e9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing.py @@ -0,0 +1,98 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.98): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2.py new file mode 100644 index 000000000..afd0dc541 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2.py @@ -0,0 +1,98 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.98): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3.py new file mode 100644 index 000000000..b25ec1a62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4.py new file mode 100644 index 000000000..27984d4da --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5.py new file mode 100644 index 000000000..1fab9d54a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6.py new file mode 100644 index 000000000..60456b3bb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6.py @@ -0,0 +1,106 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.2: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def simulated_annealing(self, solution, func, func_bounds, max_iter=10, initial_temp=10.0): + current_solution = solution.copy() + current_cost = func(current_solution) + T = initial_temp + for _ in range(max_iter): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + new_cost = func(new_solution) + if new_cost < current_cost or np.random.rand() < np.exp((current_cost - new_cost) / T): + current_solution = new_solution + current_cost = new_cost + T *= 0.95 # Cooling schedule + return current_solution + + def local_search(self, harmony_memory, func): + for i in range(len(harmony_memory)): + harmony_memory[i] = self.simulated_annealing( + harmony_memory[i], func, func.bounds, max_iter=5, initial_temp=5.0 + ) + return harmony_memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.simulated_annealing(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuOptimization.py new file mode 100644 index 000000000..8463cb113 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyTabuOptimization: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list, iteration): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list, iteration) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.2 * iteration / self.budget) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list, i) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV2.py new file mode 100644 index 000000000..9dd54a345 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV2.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyTabuSearchV2: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 # Initial success ratio + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_ratio > self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 # Increase pitch adjustment rate + elif self.success_ratio < self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 # Decrease pitch adjustment rate + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + self.adjust_parameters() # Adjust parameters periodically + self.iteration += 1 + + # Update success ratio based on the improvement in the best score + if best_score < self.f_opt: + self.success_ratio = 1.0 + else: + self.success_ratio *= 0.95 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV3.py new file mode 100644 index 000000000..d972e9531 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV3.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyTabuSearchV3: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 # Initial success ratio + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count == 0: + self.pitch_adjustment_rate *= 0.9 # Decrease pitch adjustment rate + elif self.success_count > 0.8 * self.num_harmonies: + self.pitch_adjustment_rate *= 1.1 # Increase pitch adjustment rate + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() # Adjust parameters periodically + self.iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV4.py new file mode 100644 index 000000000..d649d1e94 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyTabuSearchV4: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV5.py new file mode 100644 index 000000000..13f5db799 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHarmonyTabuSearchV5.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveHarmonyTabuSearchV5: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory.py new file mode 100644 index 000000000..253e4f022 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory.py @@ -0,0 +1,135 @@ +import numpy as np + + +class EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions with variable size + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Adjust memory size dynamically based on progress + if evaluations % (self.budget // 4) == 0: + memory_size = min(20, memory_size + 5) + new_memory = np.zeros((memory_size, self.dim)) + new_memory_scores = np.full(memory_size, np.Inf) + new_memory[: len(memory)] = memory + new_memory_scores[: len(memory_scores)] = memory_scores + memory = new_memory + memory_scores = new_memory_scores + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV22.py new file mode 100644 index 000000000..b965fae5b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV22.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV22: + def __init__( + self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, gaussian_std=0.1, levy_rate=0.3 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV23.py new file mode 100644 index 000000000..9c7ef03a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV23.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV23: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + levy_rate=0.3, + levy_step_size=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV24.py new file mode 100644 index 000000000..f5d6a6c18 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV24.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV24: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + levy_rate=0.3, + levy_step_size=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV25.py new file mode 100644 index 000000000..b6770dd7c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV25.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV25: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + levy_rate=0.3, + levy_step_size=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV26.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV26.py new file mode 100644 index 000000000..147360b48 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV26.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV26: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + levy_rate=0.3, + levy_step_size=0.6, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV27.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV27.py new file mode 100644 index 000000000..8938dcfae --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridHarmonySearchV27.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveHybridHarmonySearchV27: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + levy_rate=0.3, + levy_step_size=0.7, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.levy_rate = levy_rate + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.levy_rate: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridMetaOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridMetaOptimizer.py new file mode 100644 index 000000000..fce0c627a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridMetaOptimizer.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveHybridMetaOptimizer: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.3 + self.local_search_probability = 0.75 + self.F = 0.9 + self.CR = 0.7 + self.memory_size = 10 + self.strategy_switch_threshold = 0.01 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.9 + c1 = 1.5 + c2 = 1.5 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = EnhancedAdaptiveHybridMetaOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..2337d92c0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridOptimizer.py @@ -0,0 +1,148 @@ +import numpy as np + + +class EnhancedAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # Dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def gradient_descent(self, x, func, budget, step_size=0.01): + best_x = x.copy() + best_f = func(x) + grad = np.zeros(self.dim) + for _ in range(budget): + for i in range(self.dim): + x_plus = x.copy() + x_plus[i] += step_size + f_plus = func(x_plus) + grad[i] = (f_plus - best_f) / step_size + + x = np.clip(x - step_size * grad, self.bounds[0], self.bounds[1]) + f = func(x) + if f < best_f: + best_x = x + best_f = f + + return best_x, best_f + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + progress = self.eval_count / global_search_budget + self.w = 0.4 + 0.5 * (1 - progress) + self.c1 = 1.5 - 0.5 * progress + self.c2 = 1.5 + 0.5 * progress + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + if np.random.rand() < 0.3: + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + if np.random.rand() < 0.5: + new_x, new_f = self.gradient_descent(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + else: + new_x, new_f = self.local_search(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..967c9663d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np + + +class EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Differential weight + self.initial_CR = 0.9 # Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.local_search_rate = 0.2 # Probability for local search + self.memory_size = 5 # Memory size for self-adaptation + self.w = 0.5 # Inertia weight for PSO + self.c1 = 1.5 # Cognitive coefficient for PSO + self.c2 = 2.0 # Social coefficient for PSO + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal bests + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + # Simple local search strategy + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with memory + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.6, 1.0), np.clip(adaptive_CR, 0.6, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + # Update memory + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # PSO update for non-elite particles + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + # Update personal bests + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + # Update global best + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + # Update population and fitness + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py new file mode 100644 index 000000000..0896d6356 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 60 + self.initial_F = 0.7 + self.initial_CR = 0.9 + self.elite_rate = 0.15 + self.local_search_rate = 0.25 + self.memory_size = 5 + self.w = 0.6 + self.c1 = 1.5 + self.c2 = 1.7 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveInertiaHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveInertiaHybridOptimizer.py new file mode 100644 index 000000000..6806f5470 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveInertiaHybridOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EnhancedAdaptiveInertiaHybridOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.7 + self.global_coeff = np.linspace(0.9, 0.5, self.budget) # Dynamic adaptation of global coefficient + self.local_coeff = np.linspace(0.5, 0.9, self.budget) # Dynamic adaptation of local coefficient + self.inertia_base = 1.1 + self.inertia_reduction = 0.3 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + inertia_weight = self.inertia_base - (self.inertia_reduction * (evaluations / self.budget)) + + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + inertia_weight * velocities[i] + + self.local_coeff[evaluations] * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff[evaluations] * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py new file mode 100644 index 000000000..5cdd90a4f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2.py new file mode 100644 index 000000000..d1f1dd180 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def update_step_size(self, iter_count): + return self.step_size / np.sqrt(iter_count + 1) + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for i in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + self.step_size = self.update_step_size(i) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3.py new file mode 100644 index 000000000..622a9db22 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + self.alpha = alpha + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def update_step_size(self, iter_count): + return self.step_size / (1 + iter_count) ** self.alpha + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for i in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + population = self.update_diversity_mutation(population) + self.step_size = self.update_step_size(i) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4.py new file mode 100644 index 000000000..18de11d58 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + self.alpha = alpha + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def update_step_size(self, iter_count): + return self.step_size / (1 + iter_count) ** self.alpha + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + population = self.update_diversity_mutation(population) + self.step_size = self.update_step_size(i) + + return np.mean(best_fitnesses), best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearch.py new file mode 100644 index 000000000..fa8154601 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearch.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveLevyHarmonySearch: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + levy_step_size=0.3, + global_best_rate=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV2.py new file mode 100644 index 000000000..562fc9ea9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV2.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveLevyHarmonySearchV2: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + levy_step_size=0.3, + global_best_rate=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV3.py new file mode 100644 index 000000000..e37c9608b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLevyHarmonySearchV3.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveLevyHarmonySearchV3: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + levy_step_size=0.3, + global_best_rate=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..787461f5b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_range=0.1 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(10): # Adaptive local search + perturb_range = search_range * np.exp(-_ / 10) # Reduce perturbation range over iterations + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2.py new file mode 100644 index 000000000..88baea1c3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3.py new file mode 100644 index 000000000..4951b6daf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4.py new file mode 100644 index 000000000..f3a5dcd1a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func, search_range=0.1) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5.py new file mode 100644 index 000000000..bbabf4ad3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step( + candidate_x, func, search_range=self.perturb_range + ) # Use perturb_range directly + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..7f2005d0b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,115 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.cos(np.pi * iteration / max_iterations) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def reset_population(self, population, fitness, func): + threshold = np.percentile(fitness, 75) + for i in range(len(population)): + if fitness[i] > threshold: + population[i] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[i] = func(population[i]) + return population, fitness + + def elitist_selection(self, population, fitness): + elite_size = max(1, len(population) // 10) + elite_indices = np.argsort(fitness)[:elite_size] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with refined strategies + if np.random.rand() < 0.5: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Elitist selection to preserve the best individuals + elites, elite_fitness = self.elitist_selection(population, fitness) + + # Reset part of the population if stagnation is detected + if evaluations + population_size <= self.budget and iteration % 5 == 0: + population, fitness = self.reset_population(population, fitness, func) + + # Inject elite individuals back into the population + elite_size = len(elites) + population[:elite_size] = elites + fitness[:elite_size] = elite_fitness + + iteration += 1 + + return self.f_opt, self.x_opt + + +# Example usage: +# optimizer = EnhancedAdaptiveMemeticDifferentialEvolution(budget=10000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizer.py new file mode 100644 index 000000000..1100754d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizer.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveMemeticDiverseOptimizer: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=15): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.6 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV2.py new file mode 100644 index 000000000..74e1d99e7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV2.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveMemeticDiverseOptimizerV2: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Elite update using archive members + if self.rng.random() < self.elite_update_rate: + archive_index = self.rng.choice(len(archive)) + elite_index = self.rng.choice(elite_count) + population[elite_index] = archive[archive_index] + fitness[elite_index] = evaluate(population[elite_index]) + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV3.py new file mode 100644 index 000000000..d6eec48e0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticDiverseOptimizerV3.py @@ -0,0 +1,186 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveMemeticDiverseOptimizerV3: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + self.diversity_check_interval = 50 + self.diversity_threshold = 0.1 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + def calculate_diversity(population): + pairwise_distances = np.sum((population[:, None] - population[None, :]) ** 2, axis=-1) + mean_distance = np.sum(pairwise_distances) / (self.population_size * (self.population_size - 1)) + return mean_distance + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Check and adapt diversity + if eval_count % self.diversity_check_interval == 0: + diversity = calculate_diversity(population) + if diversity < self.diversity_threshold: + new_population = self.rng.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + new_fitness = np.array([evaluate(ind) for ind in new_population]) + eval_count += self.population_size + population = np.vstack((population, new_population)) + fitness = np.hstack((fitness, new_fitness)) + top_indices = np.argsort(fitness)[: self.population_size] + population = population[top_indices] + fitness = fitness[top_indices] + + # Elite update using archive members + if self.rng.random() < self.elite_update_rate: + archive_index = self.rng.choice(len(archive)) + elite_index = self.rng.choice(elite_count) + population[elite_index] = archive[archive_index] + fitness[elite_index] = evaluate(population[elite_index]) + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2.py new file mode 100644 index 000000000..81a933b48 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2: + def __init__(self, budget, population_size=50): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.5 + np.random.rand() * 0.3 + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.5 * ((iteration / max_iterations) ** 0.5) + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + return crossover_rate, learning_rate, memetic_probability + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimization.py new file mode 100644 index 000000000..6afdbffa6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimization.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHarmonyOptimization: + def __init__(self, budget=10000, memetic_iter=1000, memetic_prob=0.8, memetic_step=0.1): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV2.py new file mode 100644 index 000000000..354715cc2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV2.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHarmonyOptimizationV2: + def __init__(self, budget=10000, memetic_iter=500, memetic_prob=0.6, memetic_step=0.05): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV3.py new file mode 100644 index 000000000..09d032a09 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV3.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHarmonyOptimizationV3: + def __init__(self, budget=10000, memetic_iter=500, memetic_prob=0.6, memetic_step=0.05): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV4.py new file mode 100644 index 000000000..8354529e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV4.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHarmonyOptimizationV4: + def __init__(self, budget=10000, memetic_iter=500, memetic_prob=0.6, memetic_step=0.05): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV6.py new file mode 100644 index 000000000..d10c5421c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHarmonyOptimizationV6.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHarmonyOptimizationV6: + def __init__(self, budget=10000, memetic_iter=500, memetic_prob=0.6, memetic_step=0.1, memory_size=50): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < 0.5: # Increase the probability of using existing values + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.7: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, self.memory_size + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHybridOptimizer.py new file mode 100644 index 000000000..e776dd448 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticHybridOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.7 + self.elite_fraction = 0.2 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-0.1, 0.1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func, budget): + for _ in range(budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + if budget <= 0: + break + return individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(20, self.budget - evaluations) + elite_population[idx] = self.local_search( + elite_population[idx], bounds, func, local_search_budget + ) + evaluations += local_search_budget + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + # Additional mechanism for maintaining diversity + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemeticOptimizerV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticOptimizerV7.py new file mode 100644 index 000000000..779ee0bc6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemeticOptimizerV7.py @@ -0,0 +1,144 @@ +import numpy as np + + +class EnhancedAdaptiveMemeticOptimizerV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.memory_size = 30 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 30 # Adjusted local search iterations for efficiency + self.elitism_rate = 0.1 # Adjusted elitism rate for better exploration + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.3 # Adjusted local search probability for better balance + self.alpha = 0.01 + self.beta = 0.01 # Learning rate for adaptive parameters + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = (1 - self.beta) * self.memory_F[ + self.memory_index + ] + self.beta * F + self.memory_CR[self.memory_index] = (1 - self.beta) * self.memory_CR[ + self.memory_index + ] + self.beta * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryControlStrategyV49.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryControlStrategyV49.py new file mode 100644 index 000000000..528e50cb0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryControlStrategyV49.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryControlStrategyV49: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=10, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + 0.1 * memory_effect + else: + mutant = population[a] + self.F * (population[b] - population[c]) + 0.3 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + adaptive_rate = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * adaptive_rate), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(np.pi * adaptive_rate), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + iteration = 0 + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryDualPhaseStrategyV46.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryDualPhaseStrategyV46.py new file mode 100644 index 000000000..44458b0ec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryDualPhaseStrategyV46.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryDualPhaseStrategyV46: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover probability + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.long_term_memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Use both short-term and long-term memory to guide mutation + memory_effect = (np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension)) + ( + np.mean(self.long_term_memory, axis=0) if self.long_term_memory else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + self.long_term_memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + iteration = 0 + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost.py new file mode 100644 index 000000000..4c68ded4d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost.py @@ -0,0 +1,141 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Annealing properties + T_initial = 1.0 + T_min = 1e-5 + alpha_initial = 0.97 + beta_initial = 1.5 + + # Initial solution + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Larger memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + # Initial settings + T = T_initial + alpha = alpha_initial + beta = beta_initial + + # Define dynamic phases + phase1 = self.budget // 3 # Exploration phase + phase2 = 2 * self.budget // 3 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.5 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + else: + beta = 3.0 # Higher acceptance for local search refinement + alpha = 0.90 # Faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 8) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 6) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridAnnealing.py new file mode 100644 index 000000000..b19c7a4ce --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridAnnealing.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryHybridAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, slightly less aggressive cooling + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + # Adaptive memory factor + memory_factor = 0.2 # Increased memory influence + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < memory_factor: + x_candidate = memory[i] + T * np.random.randn(self.dim) + else: + x_candidate = x_current + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + + # Hybrid component: local search around the candidate + local_search_range = 0.1 * T + x_local_candidate = x_candidate + local_search_range * np.random.randn(self.dim) + x_local_candidate = np.clip(x_local_candidate, func.bounds.lb, func.bounds.ub) + + f_candidate = func(x_candidate) + f_local_candidate = func(x_local_candidate) + evaluations += 2 + + # Use the better of the candidate and local candidate + if f_local_candidate < f_candidate: + x_candidate = x_local_candidate + f_candidate = f_local_candidate + + if f_candidate < f_current or np.exp((f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + # Adapt memory factor based on temperature + memory_factor = max(0.1, memory_factor * (1 - T / T_initial)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridDEPSO.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridDEPSO.py new file mode 100644 index 000000000..ab90f2d0b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryHybridDEPSO.py @@ -0,0 +1,174 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Lower to 10% of budget to encourage exploration + archive_size = 10 # Increase memory archive size for better exploration + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def mutation_strategy_3(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c, d = population[np.random.choice(indices, 4, replace=False)] + return np.clip(a + F * (b - c) + F * (d - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + rand = np.random.rand() + if rand < 0.33: + return mutation_strategy_1 + elif rand < 0.66: + return mutation_strategy_2 + else: + return mutation_strategy_3 + + def update_archive(archive, new_solution, new_fitness): + if len(archive) < archive_size: + archive.append((new_solution, new_fitness)) + else: + archive.sort(key=lambda x: x[1]) + if new_fitness < archive[-1][1]: + archive[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + archive = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + + update_archive(archive, trial, f_trial) + + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV54.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV54.py new file mode 100644 index 000000000..b221ebebc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV54.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryStrategyV54: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + mutant = population[a] + self.F * (population[b] - population[c]) + + if self.memory: + memory_effect = np.mean(self.memory, axis=0) + mutant += 0.1 * memory_effect # Scaled memory influence + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV79.py b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV79.py new file mode 100644 index 000000000..d6f2ae619 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMemoryStrategyV79.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptiveMemoryStrategyV79: + def __init__(self, budget, dimension=5, population_size=50, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, phase): + size = len(population) + a, b, c = np.random.choice(size, 3, replace=False) + F = 0.5 + 0.4 * np.sin(np.pi * phase) # Dynamic mutation factor based on phase + if phase < 0.5: + # Exploration phase + mutant = population[a] + F * (population[b] - population[c]) + else: + # Exploitation phase using memory + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[best_idx] + F * (population[b] - population[c]) + 0.1 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + CR = 0.9 - 0.4 * np.cos(np.pi * len(self.memory) / self.memory_size) # Dynamic crossover rate + crossover_mask = np.random.rand(self.dimension) < CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(self.memory_size)] = trial - target + return trial, f_trial + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + phase = 0 + + while evaluations < self.budget: + phase_progress = evaluations / self.budget + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, phase_progress) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + evaluations += 1 + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSO.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSO.py new file mode 100644 index 000000000..8a0e5b8ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv12.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv12.py new file mode 100644 index 000000000..59a4c084a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv12.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetAQAPSOv12: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.25 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv14.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv14.py new file mode 100644 index 000000000..a6bdb8c7c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv14.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetAQAPSOv14: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.3 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv15.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv15.py new file mode 100644 index 000000000..96fea6ab6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv15.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetAQAPSOv15: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.5 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv16.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv16.py new file mode 100644 index 000000000..3b054d231 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv16.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetAQAPSOv16: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv2.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv2.py new file mode 100644 index 000000000..46cb129a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv2.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveMetaNetAQAPSOv2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 3000 + self.meta_net_lr = 0.6 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv3.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv3.py new file mode 100644 index 000000000..f5fcdef33 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetAQAPSOv3.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdaptiveMetaNetAQAPSOv3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 2.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 5000 + self.meta_net_lr = 0.8 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO.py new file mode 100644 index 000000000..a3f0fa180 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 2.0 + self.social_weight = 2.5 + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v2.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v2.py new file mode 100644 index 000000000..b677332e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v2.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetPSO_v2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 1.5 # Adjusted cognitive weight + self.social_weight = 2.0 # Adjusted social weight + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v3.py b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v3.py new file mode 100644 index 000000000..7260c0b3d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMetaNetPSO_v3.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedAdaptiveMetaNetPSO_v3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 1.0 # Adjusted cognitive weight + self.social_weight = 2.0 # Adjusted social weight + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiMemorySimulatedAnnealing.py new file mode 100644 index 000000000..a84bb3bd5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiMemorySimulatedAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class EnhancedAdaptiveMultiMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive beta and alpha values + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i]) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..a4c4086d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiOperatorSearch.py @@ -0,0 +1,141 @@ +import numpy as np + + +class EnhancedAdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedAdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealing.py new file mode 100644 index 000000000..986616b7d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealing.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveMultiPhaseAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Adaptive Memory Enhancement + if evaluations % (memory_size * 5) == 0: + self._enhance_memory(func, memory, memory_scores) + + return self.f_opt, self.x_opt + + def _enhance_memory(self, func, memory, memory_scores): + # Enhancing memory by local optimization around best memory points + for i in range(len(memory)): + local_T = 0.1 # Low disturbance for local search + x_local = memory[i] + f_local = memory_scores[i] + for _ in range(5): # Local search iterations + x_candidate = x_local + local_T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + if f_candidate < f_local: + x_local = x_candidate + f_local = f_candidate + + memory[i] = x_local + memory_scores[i] = f_local diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealingWithGradient.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealingWithGradient.py new file mode 100644 index 000000000..8a6979923 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPhaseAnnealingWithGradient.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedAdaptiveMultiPhaseAnnealingWithGradient: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiPopulationDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPopulationDifferentialEvolution.py new file mode 100644 index 000000000..159fb70e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiPopulationDifferentialEvolution.py @@ -0,0 +1,175 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveMultiPopulationDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.num_subpopulations = 5 + self.subpop_size = self.pop_size // self.num_subpopulations + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 + self.CR = 0.9 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(self.pop_size) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, self.pop_size - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + subpopulations = np.array_split(population, self.num_subpopulations) + subfitness = np.array_split(fitness, self.num_subpopulations) + new_population = [] + new_fitness = [] + + for subpop, subfit in zip(subpopulations, subfitness): + for i in range(self.subpop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.subpop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(subfit) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(subpop, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(subpop, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(subpop, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(subpop, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < subfit[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(subpop[i]) + new_fitness.append(subfit[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Perform local search on elite solutions + elite_indices = np.argsort(fitness)[: self.num_subpopulations] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= 5: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Crowding distance to maintain diversity + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + # Opposition-based learning + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategicOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategicOptimizer.py new file mode 100644 index 000000000..26f2b8567 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategicOptimizer.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedAdaptiveMultiStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Parameters and initial conditions + population_size = 200 + mutation_rate = 0.8 + recombination_rate = 0.2 + sigma = 0.2 # Initial mutation step size + elite_size = int(0.05 * population_size) # Reduced elite proportion + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + while evaluations < self.budget: + new_population = [] + indices = np.arange(population_size) + + # Elitism + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + new_population.append(population[idx]) + + # Main evolutionary process + while len(new_population) < population_size: + # Mutation and recombination strategy based on adaptive thresholds + if np.random.rand() < mutation_rate: + # Mutation strategy + idx = np.random.choice(indices) + individual = population[idx] + mutant = individual + sigma * np.random.randn(self.dim) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + mutant_fitness = func(mutant) + evaluations += 1 + + # Acceptance of new mutant + if mutant_fitness < fitness[idx]: + new_population.append(mutant) + if mutant_fitness < best_fitness: + best_solution = mutant + best_fitness = mutant_fitness + else: + new_population.append(individual) + else: + # Recombination + parents = np.random.choice(indices, 2, replace=False) + alpha = np.random.rand() + offspring = alpha * population[parents[0]] + (1 - alpha) * population[parents[1]] + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + offspring_fitness = func(offspring) + evaluations += 1 + + # Acceptance of new offspring + if offspring_fitness < fitness[parents[0]] and offspring_fitness < fitness[parents[1]]: + new_population.append(offspring) + if offspring_fitness < best_fitness: + best_solution = offspring + best_fitness = offspring_fitness + else: + new_population.append(population[parents[0]]) + + population = np.array(new_population) + fitness = np.array([func(x) for x in population]) + + # Adaptive mutation rate and sigma adjustment + mutation_rate = min(1.0, mutation_rate + np.random.uniform(-0.05, 0.05)) # Smoother adjustment + sigma = max( + 0.001, sigma * np.exp(0.05 * (np.mean(fitness) - best_fitness) / best_fitness) + ) # Smoother sigma adjustment + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDE.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDE.py new file mode 100644 index 000000000..b789bc7be --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDE.py @@ -0,0 +1,128 @@ +import numpy as np + + +class EnhancedAdaptiveMultiStrategyDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.5, 0.9 + CR_min, CR_max = 0.1, 1.0 + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + memory_size = 5 # Memory size for adaptive parameters + memory_F = np.full(memory_size, 0.5) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), F_min, F_max) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), CR_min, CR_max) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..0fcab93b3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyDifferentialEvolution.py @@ -0,0 +1,135 @@ +import numpy as np + + +class EnhancedAdaptiveMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedAdaptiveMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyOptimizer.py new file mode 100644 index 000000000..223732c94 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveMultiStrategyOptimizer.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveMultiStrategyOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.5 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.adaptive_crossover_prob = [0.9, 0.8, 0.7, 0.6, 0.5] + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 5 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Crossover and mutation + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.min_local_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..dc8ff2aef --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 80 # Increased swarm size for better exploration + self.init_num_niches = 8 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + self.local_search_prob = 0.2 # Adjusted probability for performing local search + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adjust local search probability based on progress + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..ad86f7d93 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedAdaptiveOppositionBasedDifferentialEvolution: + def __init__(self, budget=10000, pop_size=40, f_init=0.8, cr_init=0.9, scaling_factor=0.1): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + success_rate = success / self.pop_size + f_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + cr_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def enhance_search(self, solution, best_solution, scaling_factor): + return solution + scaling_factor * (best_solution - solution) + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + success_count = 0 + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + enhanced_solution = self.enhance_search(current_solution, best_solution, self.scaling_factor) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + enhanced_fitness = func(enhanced_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + success_count += 1 + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + success_count += 1 + + if enhanced_fitness < self.pop_fitness[j]: + self.population[j] = enhanced_solution + self.pop_fitness[j] = enhanced_fitness + success_count += 1 + + f_current, cr_current = self.adaptive_parameter_update( + success_count, f_current, cr_current, self.scaling_factor + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], enhanced_fitness, self.population[j], enhanced_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2.py b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2.py new file mode 100644 index 000000000..7e93646e0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2: + def __init__(self, budget=10000, pop_size=40, f_init=0.8, cr_init=0.9, scaling_factor=0.1): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + success_rate = success / self.pop_size + f_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + cr_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def enhance_search(self, solution, best_solution, scaling_factor): + return solution + scaling_factor * (best_solution - solution) + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + success_count = 0 + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + success_count += 1 + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + success_count += 1 + + f_current, cr_current = self.adaptive_parameter_update( + success_count, f_current, cr_current, self.scaling_factor + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE.py b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE.py new file mode 100644 index 000000000..88e2ad820 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE.py @@ -0,0 +1,118 @@ +import numpy as np + + +class EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + de_scale=0.5, + de_sf_min=0.5, + de_sf_max=1.0, + de_sf_decay=0.99, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr + self.par = par + self.bw = bw + self.bw_min = bw_min + self.bw_decay = bw_decay + self.bw_range = bw_range + self.de_scale = de_scale + self.de_sf_min = de_sf_min + self.de_sf_max = de_sf_max + self.de_sf_decay = de_sf_decay + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func, bandwidth): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += bandwidth * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return max(self.bw_range / (1 + iteration), self.bw_min) + + def adapt_de_scale_factor(self): + return max(self.de_sf_min, self.de_sf_max * self.de_sf_decay) + + def differential_evolution(self, func, current_harmony, best_harmony, scale_factor): + mutant_harmony = current_harmony + scale_factor * (best_harmony - current_harmony) + return np.clip(mutant_harmony, func.bounds.lb, func.bounds.ub) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = self.adjust_bandwidth(i) + + new_harmony = self.harmony_search(func, self.bw) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + best_harmony = self.harmony_memory[np.argmin(self.harmony_memory_fitness)] + scale_factor = self.adapt_de_scale_factor() + trial_harmony = self.differential_evolution(func, new_harmony, best_harmony, scale_factor) + trial_fitness = func(trial_harmony) + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_harmony + + idx_worst_trial = np.argmax(self.harmony_memory_fitness) + if trial_fitness < self.harmony_memory_fitness[idx_worst_trial]: + self.harmony_memory[idx_worst_trial] = trial_harmony + self.harmony_memory_fitness[idx_worst_trial] = trial_fitness + + self.bw = self.bw * self.bw_decay + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveOrthogonalDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveOrthogonalDifferentialEvolution.py new file mode 100644 index 000000000..2c0f1a21e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveOrthogonalDifferentialEvolution.py @@ -0,0 +1,63 @@ +import numpy as np + + +class EnhancedAdaptiveOrthogonalDifferentialEvolution: + def __init__( + self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, orthogonal_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + self.orthogonal_factor_min = 0.1 + self.orthogonal_factor_max = 0.9 + self.orthogonal_factor_decay = 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + orthogonal_factor = self.orthogonal_factor + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + orthogonal_vector = np.random.normal(0, orthogonal_factor, size=dimension) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + orthogonal_factor = max( + orthogonal_factor * self.orthogonal_factor_decay, self.orthogonal_factor_min + ) + + if np.random.rand() < 0.1: # Introduce random restart + population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension) + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.delete(np.arange(len(population)), current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py b/nevergrad/optimization/lama/EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py new file mode 100644 index 000000000..2855fbccd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py @@ -0,0 +1,157 @@ +import numpy as np + + +class EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.1 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-5 + self.learning_rate = 0.1 + + # For adaptive population sizing + self.min_pop_size = 20 + self.max_pop_size = 70 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + # Use memory to adapt parameters F and CR + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(5): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self, current_pop_size): + new_pop_size = np.random.randint(self.min_pop_size, self.max_pop_size + 1) + return new_pop_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size( + self.initial_pop_size + ) # Adapt population size here + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size # Update population size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptivePrecisionCohortOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdaptivePrecisionCohortOptimizationV5.py new file mode 100644 index 000000000..bea477d90 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptivePrecisionCohortOptimizationV5.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedAdaptivePrecisionCohortOptimizationV5: + def __init__(self, budget, dimension=5, population_size=250, elite_fraction=0.1, mutation_intensity=0.9): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial intensity for mutation + + def __call__(self, func): + # Initialize the population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites based on current fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new candidates + for i in range(self.population_size): + if np.random.rand() < self.adaptive_mutation_rate(evaluations): + # Mutation: random elite perturbation + parent_idx = np.random.choice(elite_indices) + mutation = np.random.normal(0, self.adaptive_mutation_scale(evaluations), self.dimension) + child = np.clip(population[parent_idx] + mutation, -5.0, 5.0) + else: + # Crossover: combine features of two elites + parents = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + (population[parents[0], :crossover_point], population[parents[1], crossover_point:]) + ) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + # Update the population + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def adaptive_mutation_rate(self, evaluations): + # Gradually reduce mutation rate for balanced exploration and exploitation + return max(0.05, 1 - (evaluations / self.budget)) + + def adaptive_mutation_scale(self, evaluations): + # Gradual decay of mutation scale to refine search as evaluations progress + return self.mutation_intensity * (1 / (1 + np.log(1 + evaluations))) diff --git a/nevergrad/optimization/lama/EnhancedAdaptivePrecisionFocalStrategy.py b/nevergrad/optimization/lama/EnhancedAdaptivePrecisionFocalStrategy.py new file mode 100644 index 000000000..40972d6c0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptivePrecisionFocalStrategy.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedAdaptivePrecisionFocalStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + focal_ratio=0.1, + elite_ratio=0.05, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.focal_population_size = int(population_size * focal_ratio) + self.elite_population_size = int(population_size * elite_ratio) + self.sigma = 0.3 # Initial standard deviation for mutations + self.learning_rate = 0.1 # Learning rate for self-adaptation of sigma + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation with adaptive sigma + return np.clip( + individual + np.random.normal(0, self.sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def select_focal_group(self, population, fitness): + # Select a smaller focal group based on the best fitness values + sorted_indices = np.argsort(fitness) + return population[sorted_indices[: self.focal_population_size]] + + def select_elite_group(self, population, fitness): + # Select the elite group for intense exploitation + sorted_indices = np.argsort(fitness) + return population[sorted_indices[: self.elite_population_size]] + + def recombine(self, focal_group): + # Global intermediate recombination from a focal group + return np.mean(focal_group, axis=0) + + def adapt_sigma(self, success_rate): + # Dynamically adjust sigma based on observed mutation success + if success_rate > 0.2: + self.sigma /= self.learning_rate + elif success_rate < 0.2: + self.sigma *= self.learning_rate + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + successful_mutations = 0 + + while evaluations < self.budget: + focal_group = self.select_focal_group(population, fitness) + elite_group = self.select_elite_group(population, fitness) + recombined_individual = self.recombine(focal_group) + + for i in range(self.population_size): + if i < self.elite_population_size: + mutant = self.mutate(elite_group[i % self.elite_population_size]) + else: + mutant = self.mutate(recombined_individual) + + mutant_fitness = func(mutant) + + if mutant_fitness < fitness[i]: + population[i] = mutant + fitness[i] = mutant_fitness + successful_mutations += 1 + + if mutant_fitness < best_fitness: + best_individual = mutant + best_fitness = mutant_fitness + + evaluations += 1 + if evaluations >= self.budget: + break + + # Adjust mutation strategy based on success + success_rate = successful_mutations / self.population_size + self.adapt_sigma(success_rate) + successful_mutations = 0 # Reset for next generation + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA.py new file mode 100644 index 000000000..dc5538b44 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v10.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v10.py new file mode 100644 index 000000000..0c5f6696d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v10.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v10: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = self._calculate_gravitational_force(agents[i], masses[i], best_agent) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v11.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v11.py new file mode 100644 index 000000000..db42c3d87 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v11.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v11: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = self._calculate_gravitational_force(agents[i], masses[i], best_agent) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v12.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v12.py new file mode 100644 index 000000000..1437bc0ba --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v12.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v12: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = self._calculate_gravitational_force(agents[i], masses[i], best_agent) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v13.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v13.py new file mode 100644 index 000000000..5bd678185 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v13.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v13: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = self._calculate_gravitational_force(agents[i], masses[i], best_agent) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v14.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v14.py new file mode 100644 index 000000000..055c0ed08 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v14.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v14: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v15.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v15.py new file mode 100644 index 000000000..e9a6ce61d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v15.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v15: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v16.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v16.py new file mode 100644 index 000000000..14cbbfc10 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v16.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v16: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v17.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v17.py new file mode 100644 index 000000000..a4249115f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v17.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v17: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.98 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) + else: + self.delta = max(0.01, self.delta * 0.95) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v18.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v18.py new file mode 100644 index 000000000..c5ccddedc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v18.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v18: + def __init__( + self, budget=1000, num_agents=15, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.93 + self.alpha *= 0.97 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.15, self.delta * 1.03) + else: + self.delta = max(0.03, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v19.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v19.py new file mode 100644 index 000000000..660727137 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v19.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v19: + def __init__( + self, budget=1000, num_agents=20, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v2.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v2.py new file mode 100644 index 000000000..dc5899fac --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v2.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v2: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v20.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v20.py new file mode 100644 index 000000000..f48334393 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v20.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v20: + def __init__( + self, budget=1000, num_agents=25, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v21.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v21.py new file mode 100644 index 000000000..5885d9854 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v21.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v21: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v22.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v22.py new file mode 100644 index 000000000..60b7a07b1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v22.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v22: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v23.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v23.py new file mode 100644 index 000000000..d0560f520 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v23.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v23: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v24.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v24.py new file mode 100644 index 000000000..929b4cdc7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v24.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v24: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v25.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v25.py new file mode 100644 index 000000000..ebf5450a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v25.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v25: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.96 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v26.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v26.py new file mode 100644 index 000000000..6f9a88c75 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v26.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v26: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v27.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v27.py new file mode 100644 index 000000000..6bb53d9d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v27.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v27: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v28.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v28.py new file mode 100644 index 000000000..52dd4b2d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v28.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v28: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v29.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v29.py new file mode 100644 index 000000000..8c0b11ae6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v29.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v29: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent, dimension): + r = np.random.uniform(0, 1, size=dimension) + return agent + r * (best_agent - agent) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent, self.dimension) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v3.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v3.py new file mode 100644 index 000000000..d0233610f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v3.py @@ -0,0 +1,66 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v3: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v30.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v30.py new file mode 100644 index 000000000..10c7b9947 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v30.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v30: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v31.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v31.py new file mode 100644 index 000000000..e78dfbe35 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v31.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v31: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=3): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v32.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v32.py new file mode 100644 index 000000000..347aa97e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v32.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v32: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=3): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v33.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v33.py new file mode 100644 index 000000000..b9b6a474a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v33.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v33: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=3): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v34.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v34.py new file mode 100644 index 000000000..29849c6de --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v34.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v34: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=3): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v35.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v35.py new file mode 100644 index 000000000..d337898dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v35.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v35: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=2): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v36.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v36.py new file mode 100644 index 000000000..8006fa4aa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v36.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v36: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agents = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_agents(self, agents, fitness_values, num_best_agents=2): + best_agents_idx = np.argsort(fitness_values)[:num_best_agents] + best_agents = agents[best_agents_idx] + return best_agents + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.best_agents.append(self._update_best_agents(agents, fitness_values)) + + best_agents = self._update_best_agents(agents, fitness_values, num_best_agents=1) + self.x_opt = best_agents[0] + self.f_opt = self._objective_function(func, self.x_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v38.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v38.py new file mode 100644 index 000000000..bdae54d28 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v38.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v38: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v39.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v39.py new file mode 100644 index 000000000..768b0d1d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v39.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v39: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agent = None + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + self.best_agent = agents[best_agent_idx] + return self.best_agent, best_agent_idx + + def _adjust_agent_position(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (self.best_agent - agent), self.lb, self.ub) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v4.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v4.py new file mode 100644 index 000000000..28481307f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v4.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v4: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.1) # Increase perturbation factor if improvement + else: + self.delta = max(0.01, self.delta * 0.9) # Decrease perturbation factor if no improvement + self.prev_best_fitness = self.f_opt + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v40.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v40.py new file mode 100644 index 000000000..cfd6ca3a0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v40.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v40: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_agent = None + self.collapse_factor = 0.8 # New parameter to control agent convergence + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + self.best_agent = agents[best_agent_idx] + return self.best_agent, best_agent_idx + + def _adjust_agent_position(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (self.best_agent - agent), self.lb, self.ub) + + def _update_agents_collapse(self, agents, best_agent): + return best_agent + self.collapse_factor * (agents - best_agent) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent) + new_agent = self._update_agents_collapse( + new_agent, best_agent + ) # Introduce agent collapse towards the best agent + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v41.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v41.py new file mode 100644 index 000000000..82fc6e247 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v41.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v41: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self, current_fitness): + self.G0 *= 0.95 + self.alpha *= 0.95 + if current_fitness < self.f_opt: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.f_opt = current_fitness + + def _update_agents(self, agents, new_agents, fitness_values, new_fitness_values): + for i in range(self.num_agents): + if new_fitness_values[i] < fitness_values[i]: + agents[i] = new_agents[i] + fitness_values[i] = new_fitness_values[i] + if new_fitness_values[i] < self.f_opt: + self.f_opt = new_fitness_values[i] + self.x_opt = new_agents[i] + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + masses = self._calculate_masses(fitness_values) + + new_agents = np.copy(agents) + new_fitness_values = np.copy(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agents[i] = self._update_agent_position(agents[i], force) + new_fitness_values[i] = self._objective_function(func, new_agents[i]) + + self._update_agents(agents, new_agents, fitness_values, new_fitness_values) + self._adaptive_parameters(np.min(fitness_values)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v42.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v42.py new file mode 100644 index 000000000..408b70c98 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v42.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v42: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: # Include equality for better exploration + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v43.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v43.py new file mode 100644 index 000000000..193ceda8c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v43.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v43: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.iter_count = 0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: # Include equality for better exploration + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self.iter_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v44.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v44.py new file mode 100644 index 000000000..dc9be8490 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v44.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v44: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.iter_count = 0 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: # Include equality for better exploration + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + self.iter_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v47.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v47.py new file mode 100644 index 000000000..a14030735 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v47.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v47: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + noise = np.random.normal(0, self.step_size, size=self.dimension) + return np.clip(agent + noise + self.delta * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v5.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v5.py new file mode 100644 index 000000000..6faf01040 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v5.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v5: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v6.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v6.py new file mode 100644 index 000000000..6a715c025 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v6.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v6: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.delta = 0.1 # Perturbation factor + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v8.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v8.py new file mode 100644 index 000000000..f66df917f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v8.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v8: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v9.py b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v9.py new file mode 100644 index 000000000..568432e0b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQGSA_v9.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedAdaptiveQGSA_v9: + def __init__( + self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.iteration = 0 + self.prev_best_fitness = np.Inf + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + self.delta * np.random.uniform(-1, 1, size=agent.shape) + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 # Adjust gravitational constant reduction rate + self.alpha *= 0.98 # Adjust step size reduction rate + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.1, self.delta * 1.05) # Increase perturbation factor slightly if improvement + else: + self.delta = max( + 0.01, self.delta * 0.95 + ) # Decrease perturbation factor slightly if no improvement + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = self._calculate_gravitational_force(agents[i], masses[i], best_agent) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + self._adaptive_parameters() # Update algorithm parameters + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDEWithDynamicElitistLearning.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDEWithDynamicElitistLearning.py new file mode 100644 index 000000000..ded491c4f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDEWithDynamicElitistLearning.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumDEWithDynamicElitistLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def hybrid_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.hybrid_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + # Memory update + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = self.memory_rate * memory[i] + (1 - self.memory_rate) * population[i] + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Elitist learning phase + learned_population = self.elitist_learning(personal_bests, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < global_best_fit: + global_best_fit = learned_fitness[i] + global_best = learned_population[i] + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolution.py new file mode 100644 index 000000000..a9d48edc5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolution.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + eval_count = population_size + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = ( + initial_F * (1 - eval_count / budget) + 0.2 * np.random.rand() + ) # Random component to escape local minima + adaptive_CR = ( + initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + 0.1 * np.random.rand() + ) # Random component to escape local minima + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if eval_count % 2 == 0: # Apply quantum every alternate step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdaptiveQuantumDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch.py new file mode 100644 index 000000000..f8a656b4f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + # Standard DE mutation and crossover + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDynamicLevyOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDynamicLevyOptimization.py new file mode 100644 index 000000000..888f1d26b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumDynamicLevyOptimization.py @@ -0,0 +1,160 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumDynamicLevyOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.6 * progress + cognitive_coefficient = 1.5 + 0.4 * progress + social_coefficient = 1.5 - 0.4 * progress + differential_weight = 0.8 - 0.5 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.4 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 # Increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.6: # Increased probability for local search + local_search_iters = 5 # Increased number of local search iterations + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumGradientMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumGradientMemeticOptimizer.py new file mode 100644 index 000000000..e27ce7724 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumGradientMemeticOptimizer.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumGradientMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedAdaptiveQuantumGradientMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGB.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGB.py new file mode 100644 index 000000000..e8be9f571 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGB.py @@ -0,0 +1,62 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchDBGB: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = harmony_memory[np.argmin([func(h) for h in harmony_memory])] + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if f < func(global_best): + global_best = new_harmony + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinal.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinal.py new file mode 100644 index 000000000..d107e0bf7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinal.py @@ -0,0 +1,67 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchDBGBFinal: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII.py new file mode 100644 index 000000000..96a81b693 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII.py @@ -0,0 +1,74 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def explore(self, func, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII.py new file mode 100644 index 000000000..9a2cab618 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII.py @@ -0,0 +1,74 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBImproved.py new file mode 100644 index 000000000..183f97cfa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchDBGBImproved.py @@ -0,0 +1,65 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchDBGBImproved: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = harmony_memory[np.argmin([func(h) for h in harmony_memory])] + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if f < func(global_best): + global_best = new_harmony + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: # Introduce occasional global best updating + global_best = harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchFinal.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchFinal.py new file mode 100644 index 000000000..260c0841f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchFinal.py @@ -0,0 +1,74 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchFinal: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImproved.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImproved.py new file mode 100644 index 000000000..b9a9d2604 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImproved.py @@ -0,0 +1,74 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchImproved: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImprovedRefined.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImprovedRefined.py new file mode 100644 index 000000000..be63a99ab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumHarmonySearchImprovedRefined.py @@ -0,0 +1,74 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedAdaptiveQuantumHarmonySearchImprovedRefined: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevyMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevyMemeticOptimizer.py new file mode 100644 index 000000000..f16485ff2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevyMemeticOptimizer.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumLevyMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedAdaptiveQuantumLevyMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevySwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevySwarmOptimization.py new file mode 100644 index 000000000..3829f7496 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLevySwarmOptimization.py @@ -0,0 +1,157 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.7 + 0.3 * progress + social_coefficient = 1.7 - 0.3 * progress + differential_weight = 0.6 + 0.4 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.1 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) + + def __call__(self, func): + population_size = 70 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 20 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLocalSearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLocalSearch.py new file mode 100644 index 000000000..15e1c9e24 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumLocalSearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumLocalSearch: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + adaptive_local_search=True, + local_search_range=0.1, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + self.adaptive_local_search = adaptive_local_search + self.local_search_range = local_search_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = ( + self.local_search_range * np.exp(-_ / self.local_search_iters) + if self.adaptive_local_search + else self.local_search_range + ) + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumMemeticOptimizerV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumMemeticOptimizerV4.py new file mode 100644 index 000000000..9483811eb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumMemeticOptimizerV4.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumMemeticOptimizerV4: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.4 # Increased quantum influence + self.elite_fraction = 0.2 # Increased elite fraction + self.memory_size = 20 # Further increased memory size + self.local_search_probability = 0.5 # Higher probability for local search + self.stagnation_threshold = 3 # Further reduced threshold + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 # Slightly faster annealing for quicker adaptation + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedAdaptiveQuantumMemeticOptimizerV4(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSO.py new file mode 100644 index 000000000..fe812211a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSO.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + self.stagnation_threshold = 10 # No improvement iterations before triggering local search + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + no_improvement_count = 0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + no_improvement_count = 0 + else: + no_improvement_count += 1 + else: + no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * adaptive_factor) + else: + adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * adaptive_factor) + + # Trigger local search after a certain number of iterations without improvement + if no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + no_improvement_count = 0 # Reset the counter on improvement + + if eval_count >= self.budget: + break + + # Reset no improvement count after local search + no_improvement_count = 0 + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedAdaptiveQuantumPSO(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSOv2.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSOv2.py new file mode 100644 index 000000000..10f9f94e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumPSOv2.py @@ -0,0 +1,133 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdaptiveQuantumPSOv2: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + self.stagnation_threshold = 10 # No improvement iterations before triggering local search + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 # Annealing factor for inertia weight + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + # Trigger local search after a certain number of iterations without improvement + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 # Reset the counter on improvement + + if eval_count >= self.budget: + break + + # Reset no improvement count after local search + self.no_improvement_count = 0 + + # Anneal inertia weight to enhance exploration-exploitation balance + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedAdaptiveQuantumPSOv2(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumParticleSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumParticleSwarmOptimization.py new file mode 100644 index 000000000..858659aa4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumParticleSwarmOptimization.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumParticleSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.5, + cognitive_weight=1.5, + social_weight=1.5, + quantum_param=0.5, + adapt_param=0.1, + explore_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_param = quantum_param + self.adapt_param = adapt_param + self.explore_rate = explore_rate + + def initialize_particles(self, func): + self.particles = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_best_positions = self.particles.copy() + self.personal_best_values = np.array([func(p) for p in self.particles]) + self.global_best_position = self.personal_best_positions[np.argmin(self.personal_best_values)] + self.global_best_value = np.min(self.personal_best_values) + self.adaptive_quantum_param = np.full(self.num_particles, self.quantum_param) + + def update_particles(self, func): + for i in range(self.num_particles): + r1, r2 = np.random.uniform(0, 1, 2) + self.velocities[i] = ( + self.inertia_weight * self.velocities[i] + + self.cognitive_weight * r1 * (self.personal_best_positions[i] - self.particles[i]) + + self.social_weight * r2 * (self.global_best_position - self.particles[i]) + ) + + # Exploration mechanism + exploration = np.random.uniform(-self.explore_rate, self.explore_rate, self.dim) + self.particles[i] = np.clip(self.particles[i] + self.velocities[i] + exploration, -5.0, 5.0) + + new_value = func(self.particles[i]) + + if new_value < self.personal_best_values[i]: + self.personal_best_values[i] = new_value + self.personal_best_positions[i] = self.particles[i] + + if new_value < self.global_best_value: + self.global_best_value = new_value + self.global_best_position = self.particles[i] + + # Adaptive Quantum Parameter update + self.adaptive_quantum_param[i] = max(self.adaptive_quantum_param[i] - self.adapt_param, 0.1) + + # Quantum-inspired velocity update + self.velocities[i] = self.adaptive_quantum_param[i] * self.velocities[i] + + def __call__(self, func): + self.initialize_particles(func) + + for _ in range(self.budget // self.num_particles): + self.update_particles(func) + + return self.global_best_value, self.global_best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..2e09a02a1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealing.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSimulatedAnnealing: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + damp_ratio=0.9, + perturb_factor=0.01, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.damp_ratio = damp_ratio + self.perturb_factor = perturb_factor + self.success_count = 0 + self.failure_count = 0 + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def _perturb_solution(self, x): + return x + np.random.normal(0, self.perturb_factor, size=self.dim) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + step = 0 + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + self.success_count += 1 + else: + self.failure_count += 1 + + current_x = self._perturb_solution(current_x) + current_x = np.clip(current_x, -5.0, 5.0) + current_f = func(current_x) + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + self.explore_ratio *= self.damp_ratio + + if step % 100 == 0: + success_rate = self.success_count / (self.success_count + self.failure_count) + if success_rate < 0.2: + self.perturb_factor *= 1.2 + elif success_rate > 0.6: + self.perturb_factor *= 0.8 + + step += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealingOptimized.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealingOptimized.py new file mode 100644 index 000000000..baace7e1a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSimulatedAnnealingOptimized.py @@ -0,0 +1,52 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSimulatedAnnealingOptimized: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_factor=0.01 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_factor = perturb_factor + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def _perturb_solution(self, x): + return x + np.random.normal(0, self.perturb_factor, size=self.dim) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + current_x = self._perturb_solution(current_x) + current_x = np.clip(current_x, -5.0, 5.0) + current_f = func(current_x) + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimization.py new file mode 100644 index 000000000..248cc5b22 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimization.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.5, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_damping(self): + self.damping *= 0.99 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + self.adapt_weights() + self.adapt_damping() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV10.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV10.py new file mode 100644 index 000000000..a65a5a67d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV10.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV10: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV11.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV11.py new file mode 100644 index 000000000..6387d5b95 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV11.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV11: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.25, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV12.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV12.py new file mode 100644 index 000000000..bbceb7a8f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV12.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV12: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.25, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV13.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV13.py new file mode 100644 index 000000000..adca82e5d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV13.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV13: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.25, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.98 + + def adapt_damping(self): + self.damping *= 0.98 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV14.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV14.py new file mode 100644 index 000000000..f2c0266f9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV14.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV14: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.9, + cognitive_weight=1.5, + social_weight=1.5, + step_size=0.2, + damping=0.8, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.98 + + def adapt_damping(self): + self.damping *= 0.98 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV15.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV15.py new file mode 100644 index 000000000..9ae1a4a75 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV15.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV15: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.9, + cognitive_weight=1.5, + social_weight=1.5, + step_size=0.2, + damping=0.8, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.98 + + def adapt_damping(self): + self.damping *= 0.98 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV16.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV16.py new file mode 100644 index 000000000..c99b20529 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV16.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV16: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.9, + cognitive_weight=1.5, + social_weight=1.5, + step_size=0.2, + damping=0.8, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.98 + + def adapt_damping(self): + self.damping *= 0.98 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV17.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV17.py new file mode 100644 index 000000000..e6b57169f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV17.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV17: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + inertia_term + self.step_size * (cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV18.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV18.py new file mode 100644 index 000000000..9c5c02e4e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV18.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV18: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + inertia_term + self.step_size * (cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.05 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.05 * np.random.randn(), 0.7, 0.95) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV19.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV19.py new file mode 100644 index 000000000..fec43c624 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV19.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV19: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + inertia_term + self.step_size * (cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.05 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.05 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.1 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..e48221d2a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV2.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV20.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV20.py new file mode 100644 index 000000000..e649466ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV20.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV20: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.05 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.05 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.1 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV21.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV21.py new file mode 100644 index 000000000..482d70369 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV21.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV21: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=0.5, + social_weight=0.5, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.05 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.05 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.1 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.1 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV22.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV22.py new file mode 100644 index 000000000..f1a98651f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV22.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV22: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=0.5, + social_weight=0.5, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.03 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.03 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.05 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.05 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.05 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV23.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV23.py new file mode 100644 index 000000000..0e0b3fc4d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV23.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV23: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=0.5, + social_weight=0.5, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.01 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.01 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.02 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV24.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV24.py new file mode 100644 index 000000000..ffccda0d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV24.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV24: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=0.5, + social_weight=0.5, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.01 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.01 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.02 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV25.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV25.py new file mode 100644 index 000000000..6c6608c0b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV25.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV25: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=0.5, + social_weight=0.5, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self, func): + if np.random.rand() < 0.1: + self.step_size = np.clip(self.step_size + 0.01 * np.random.randn(), 0.1, 0.5) + if np.random.rand() < 0.1: + self.damping = np.clip(self.damping - 0.01 * np.random.randn(), 0.7, 0.95) + if np.random.rand() < 0.1: + self.inertia_weight = np.clip(self.inertia_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.cognitive_weight = np.clip(self.cognitive_weight + 0.02 * np.random.randn(), 0.1, 1.0) + if np.random.rand() < 0.1: + self.social_weight = np.clip(self.social_weight + 0.02 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV26.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV26.py new file mode 100644 index 000000000..9e00d4c0e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV26.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV26: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.01 * np.random.randn(), 0.1, 0.5) + self.damping = np.clip(self.damping - 0.01 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.02 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.02 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.02 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV27.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV27.py new file mode 100644 index 000000000..c66e9b750 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV27.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV27: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.1, 0.5) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV28.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV28.py new file mode 100644 index 000000000..965429314 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV28.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV28: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.1, 0.5) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV29.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV29.py new file mode 100644 index 000000000..2807ccc7a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV29.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV29: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.2, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles_position = np.random.uniform(-boundary, boundary, (num_particles, self.dim)) + self.particles_velocity = np.zeros((num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.boundary = boundary + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = (self.damping * self.particles_velocity[i]) + self.step_size * ( + inertia_term + cognitive_term + social_term + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.1, 0.5) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV3.py new file mode 100644 index 000000000..0e7855437 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV3.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV3: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV30.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV30.py new file mode 100644 index 000000000..6b2f5265b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV30.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV30: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV31.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV31.py new file mode 100644 index 000000000..2e74b343e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV31.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV31: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV4.py new file mode 100644 index 000000000..42da2fe6b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV4.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV4: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV5.py new file mode 100644 index 000000000..ddcb48776 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV5.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV5: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV6.py new file mode 100644 index 000000000..a34e40348 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV6.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV6: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV7.py new file mode 100644 index 000000000..76e1f08b8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV7.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV7: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV8.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV8.py new file mode 100644 index 000000000..c087d776a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV8.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV8: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV9.py b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV9.py new file mode 100644 index 000000000..64ebffcfb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveQuantumSwarmOptimizationV9.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdaptiveQuantumSwarmOptimizationV9: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.2, + damping=0.9, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + self.boundary = boundary + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-self.boundary, self.boundary, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + particle["position"] = np.clip(particle["position"], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size *= 0.95 + self.damping *= 0.98 + + def adapt_weights(self): + self.inertia_weight *= 0.95 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def adapt_num_particles(self, func): + mean_fitness = np.mean([func(particle["position"]) for particle in self.particles]) + if mean_fitness > 0.8 * self.best_fitness: + self.num_particles += 1 + elif mean_fitness < 0.2 * self.best_fitness and self.num_particles > 2: + self.num_particles -= 1 + + def adapt_step_size(self): + self.step_size *= 0.99 + + def adapt_damping(self): + self.damping *= 0.99 + + def adapt_parameters_adaptive(self, func): + func_values = [ + func(particle["position"]) for particle in self.particles if particle["best_position"] is not None + ] + if func_values: + best_func_value = min(func_values) + for i, particle in enumerate(self.particles): + if particle["best_position"] is not None and func_values[i] > best_func_value: + self.step_size *= 0.95 + self.damping *= 0.98 + else: + self.step_size *= 1.05 + self.damping *= 1.02 + + def adaptive_update(self, func, particle): + prev_position = particle["position"].copy() + self.update_particle(particle, func) + new_position = particle["position"] + + if func(new_position) >= func(prev_position): + particle["position"] = prev_position + particle["velocity"] *= -0.5 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.num_particles = 30 + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.adaptive_update(func, particle) + + self.adapt_parameters() + self.adapt_weights() + self.adapt_num_particles(func) + self.adapt_step_size() + self.adapt_damping() + self.adapt_parameters_adaptive(func) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSinusoidalDifferentialSwarm.py b/nevergrad/optimization/lama/EnhancedAdaptiveSinusoidalDifferentialSwarm.py new file mode 100644 index 000000000..6a5a1c3d3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSinusoidalDifferentialSwarm.py @@ -0,0 +1,54 @@ +import numpy as np + + +class EnhancedAdaptiveSinusoidalDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Increased population size for enhanced exploration + self.F_base = 0.5 # Base mutation factor + self.CR_base = 0.95 # Increased base crossover probability for better trial acceptance + self.adaptive_F_amplitude = 0.45 # Increased mutation factor amplitude for enhanced adaptive range + self.adaptive_CR_amplitude = ( + 0.25 # Increased crossover rate amplitude for robust exploration/exploitation balance + ) + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors using enhanced sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.adaptive_F_amplitude * np.sin(np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.cos(np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin with adaptive F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure boundaries are respected + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12.py new file mode 100644 index 000000000..8f3b7e203 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13.py new file mode 100644 index 000000000..283c073ef --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15.py new file mode 100644 index 000000000..c2d540d13 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16.py new file mode 100644 index 000000000..ff4c5f326 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + self.early_stopping = budget // 2 # Introduce early stopping + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + # Implement early stopping + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17.py new file mode 100644 index 000000000..eae43a456 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 # Probability for exploration phase + self.early_stopping = budget // 2 # Introduce early stopping + self.vel_limit = 0.5 # Velocity limit for better convergence + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: # Introduce exploration phase + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + # Implement early stopping + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18.py new file mode 100644 index 000000000..7853d845d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 0.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19.py new file mode 100644 index 000000000..6742e3ef7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 0.7 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.05 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, adaptive_vals): + if t < self.adaptive_iters: + return adaptive_vals[0], adaptive_vals[1] + else: + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / (2.0 * self.budget) + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, [cognitive_weight, social_weight]) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 40 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20.py new file mode 100644 index 000000000..52de374e0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 0.7 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.05 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.4 - 0.35 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight, social_weight + else: + return cognitive_weight - 0.01, social_weight - 0.01 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 40 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21.py new file mode 100644 index 000000000..6f8777d11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 100 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 0.7 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.05 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight, social_weight + else: + return cognitive_weight - 0.02, social_weight - 0.02 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 40 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22.py new file mode 100644 index 000000000..5392b0a55 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 150 + self.explore_prob = 0.15 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.4 - 0.35 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight, social_weight + else: + return cognitive_weight - 0.03, social_weight - 0.03 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 40 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23.py new file mode 100644 index 000000000..a16bfd789 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.2 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 40 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24.py new file mode 100644 index 000000000..c3924e667 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.05 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.5 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.1, social_weight - 0.1 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25.py new file mode 100644 index 000000000..fbec19676 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.5 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.1, social_weight - 0.1 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26.py b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26.py new file mode 100644 index 000000000..d5b717873 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.05 * np.random.randn(self.dim) # Adjusted local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.6 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 # Adjusted parameters update + else: + return cognitive_weight - 0.08, social_weight - 0.08 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.5 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveSwarmHarmonicOptimization.py b/nevergrad/optimization/lama/EnhancedAdaptiveSwarmHarmonicOptimization.py new file mode 100644 index 000000000..6546f1952 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveSwarmHarmonicOptimization.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedAdaptiveSwarmHarmonicOptimization: + def __init__( + self, + budget=1000, + num_particles=30, + num_dimensions=5, + harmony_memory_rate=0.7, + pitch_adjust_rate=0.6, + local_search_prob=0.5, + step_size_factor=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size_factor = step_size_factor + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + indexes = np.random.choice(range(self.num_particles), size=2, replace=False) + new_solution[i] = np.mean(memory_matrix[indexes, i]) + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + step_size = (bounds.ub[i] - bounds.lb[i]) * self.step_size_factor + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearch.py b/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearch.py new file mode 100644 index 000000000..f0dff087c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearch.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveTabuHarmonySearch: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.2 * iteration / self.budget) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearchV2.py b/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearchV2.py new file mode 100644 index 000000000..2ce9c5306 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdaptiveTabuHarmonySearchV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedAdaptiveTabuHarmonySearchV2: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.1 * iteration / self.budget) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdvancedAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedAdvancedAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..af146ad9a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedAdaptiveFireworkAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedAdvancedAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + exploration_range=0.5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdvancedDifferentialEvolutionLocalSearch_v56.py b/nevergrad/optimization/lama/EnhancedAdvancedDifferentialEvolutionLocalSearch_v56.py new file mode 100644 index 000000000..0119de99c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedDifferentialEvolutionLocalSearch_v56.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedAdvancedDifferentialEvolutionLocalSearch_v56: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + population_size=10, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(100): # Increased the number of runs for better results + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedAdvancedHybridDifferentialEvolutionV4.py b/nevergrad/optimization/lama/EnhancedAdvancedHybridDifferentialEvolutionV4.py new file mode 100644 index 000000000..223dcf596 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedHybridDifferentialEvolutionV4.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedAdvancedHybridDifferentialEvolutionV4: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Reduced population size for faster convergence + self.sigma = 0.1 + self.c1 = 0.02 + self.cmu = 0.03 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.5 # Adjusted for improved performance + self.CR = 0.9 # Adjusted for improved performance + self.elitism_rate = 0.2 + self.eval_count = 0 + self.alpha_levy = 0.1 + self.levy_prob = 0.2 # Increased Levy flight probability + self.adaptive_learning_rate = 0.1 + self.strategy_switches = [0.25, 0.5, 0.75] + self.local_opt_prob = 0.3 # Increased local optimization probability + self.learning_rate_decay = 0.95 + self.pbest_rate = 0.2 # Introduced p-best rate for DE/current-to-pbest/1 strategy + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, recombined): + z = (selected_population - recombined) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 5, replace=False) + x1, x2, x3, x4, x5 = ( + population[indices[0]], + population[indices[1]], + population[indices[2]], + population[indices[3]], + population[indices[4]], + ) + p_best_idx = np.random.randint(0, int(self.pbest_rate * self.population_size)) + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3 + x4 - x5 + p_best_idx - population[i])) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.1 + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + if np.std(fitness) < 1e-5: + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + def self_adaptive_differential_evolution_parameters(): + if np.random.rand() < 0.1: + self.F = np.random.uniform(0.5, 1) + self.CR = np.random.uniform(0.1, 1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + if strategy == "balanced": + population = hybridization(population, cov_matrix) + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + adapt_parameters_based_on_performance() + self_adaptive_differential_evolution_parameters() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedAdvancedHybridDifferentialEvolutionV4(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV17.py b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV17.py new file mode 100644 index 000000000..2c0ac4e1c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV17.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedAdvancedHybridMetaHeuristicOptimizerV17: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=5, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV18.py b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV18.py new file mode 100644 index 000000000..c7ba11c16 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV18.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedAdvancedHybridMetaHeuristicOptimizerV18: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=10, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV19.py b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV19.py new file mode 100644 index 000000000..683cc1a4a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedHybridMetaHeuristicOptimizerV19.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedAdvancedHybridMetaHeuristicOptimizerV19: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=20, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer.py new file mode 100644 index 000000000..cfbe97a05 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.5, + mutation_rate=0.03, + num_generations=500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + improvement_counter = 0 # Track the number of consecutive non-improvements + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + improvement_counter = 0 + else: + improvement_counter += 1 + if ( + improvement_counter >= 20 + ): # Reinitialize the particle if no improvement after 20 iterations + swarm[i] = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + personal_best[i] = np.copy(swarm[i]) + improvement_counter = 0 + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV1.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV1.py new file mode 100644 index 000000000..9669ccd9a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV1.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV1: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.05, 0.3) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV10.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV10.py new file mode 100644 index 000000000..c5eb8fcfb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV10.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV10: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.15) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.6, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV11.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV11.py new file mode 100644 index 000000000..36deda779 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV11.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV11: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.15) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.6, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV12.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV12.py new file mode 100644 index 000000000..b2250b43b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV12.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV12: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.75, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.95, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.15) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.9, 0.98) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.7, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV13.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV13.py new file mode 100644 index 000000000..3498b3b39 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV13.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV13: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.75, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.95, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.9, 0.98) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.7, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV14.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV14.py new file mode 100644 index 000000000..d0b017c56 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV14.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV14: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.9, 0.98) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.7, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..21e5f1766 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV2.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.005 * np.random.randn(), 0.05, 0.3) + self.damping = np.clip(self.damping - 0.005 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.01 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.01 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV3.py new file mode 100644 index 000000000..bb08abff5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV3.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV3: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.003 * np.random.randn(), 0.05, 0.3) + self.damping = np.clip(self.damping - 0.003 * np.random.randn(), 0.7, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.005 * np.random.randn(), 0.1, 1.0) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.005 * np.random.randn(), 0.1, 1.0) + self.social_weight = np.clip(self.social_weight + 0.005 * np.random.randn(), 0.1, 1.0) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV4.py new file mode 100644 index 000000000..7ec436e8c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV4.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV4: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.002 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.002 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.003 * np.random.randn(), 0.5, 0.9) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.003 * np.random.randn(), 0.5, 0.9) + self.social_weight = np.clip(self.social_weight + 0.003 * np.random.randn(), 0.5, 0.9) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV5.py new file mode 100644 index 000000000..e0c9c3238 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV5.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV5: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.6, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.002 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.002 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.003 * np.random.randn(), 0.5, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.003 * np.random.randn(), 0.5, 0.8) + self.social_weight = np.clip(self.social_weight + 0.003 * np.random.randn(), 0.5, 0.8) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV6.py new file mode 100644 index 000000000..799b7b6a6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV6.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV6: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.002 * np.random.randn(), 0.5, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.002 * np.random.randn(), 0.5, 0.8) + self.social_weight = np.clip(self.social_weight + 0.002 * np.random.randn(), 0.5, 0.8) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV7.py new file mode 100644 index 000000000..60f6b8d08 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV7.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV7: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.002 * np.random.randn(), 0.5, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.002 * np.random.randn(), 0.5, 0.8) + self.social_weight = np.clip(self.social_weight + 0.002 * np.random.randn(), 0.5, 0.8) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV8.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV8.py new file mode 100644 index 000000000..6b43e3e05 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV8.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV8: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.6, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.4, 0.7) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.8, 1.2) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.8, 1.2) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV9.py b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV9.py new file mode 100644 index 000000000..f81bc075e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedQuantumSwarmOptimizationV9.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedAdvancedQuantumSwarmOptimizationV9: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.6, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.05, 0.15) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.85, 0.95) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.4, 0.7) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.8, 1.2) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.8, 1.2) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): # Start from 1 to avoid index out of bounds + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78.py b/nevergrad/optimization/lama/EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78.py new file mode 100644 index 000000000..54d4ac004 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_advanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_advanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedAdvancedUltimateGuidedMassQGSA_v79.py b/nevergrad/optimization/lama/EnhancedAdvancedUltimateGuidedMassQGSA_v79.py new file mode 100644 index 000000000..836440511 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedAdvancedUltimateGuidedMassQGSA_v79.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedAdvancedUltimateGuidedMassQGSA_v79: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_advanced_ultimate_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_advanced_ultimate_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedArchiveDE.py b/nevergrad/optimization/lama/EnhancedArchiveDE.py new file mode 100644 index 000000000..3ed7664d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedArchiveDE.py @@ -0,0 +1,150 @@ +import numpy as np + + +class EnhancedArchiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + archive_size = 20 + F = 0.5 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def archive_management(population, archive, fitness): + combined = np.vstack((population, archive)) + if len(combined) > archive_size: + combined_fitness = np.array([func(ind) for ind in combined]) + sorted_indices = np.argsort(combined_fitness) + archive = combined[sorted_indices[:archive_size]] + return archive + + def multi_modal_local_search(best_ind, step_size=0.1): + new_positions = np.clip( + best_ind + step_size * np.random.randn(10, self.dim), bounds[0], bounds[1] + ) + best_local = best_ind + f_best_local = func(best_ind) + for new_pos in new_positions: + f_new_pos = func(new_pos) + if f_new_pos < f_best_local: + best_local = new_pos + f_best_local = f_new_pos + return best_local, f_best_local + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + archive = np.empty((0, self.dim)) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(np.vstack((population, archive)), i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Multi-modal local search on the best solution + best_ind = new_population[np.argmin(new_fitness)] + best_local, f_best_local = multi_modal_local_search(best_ind) + evaluations += 10 # Assuming 10 local search evaluations + + if f_best_local < self.f_opt: + self.f_opt = f_best_local + self.x_opt = best_local + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Archive management + archive = archive_management(population, archive, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedBalancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedBalancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..50ea908ef --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedBalancedDualStrategyAdaptiveDE.py @@ -0,0 +1,135 @@ +import numpy as np + + +class EnhancedBalancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.9 + self.elitism_rate = 0.3 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.mutation_factor_schedule = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + if len(self.mutation_factor_schedule) < generation + 1: + self.mutation_factor_schedule.append( + self.initial_mutation_factor + - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + ) + mutation_factor = self.mutation_factor_schedule[generation] + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage with balanced influence + trial = trial + 0.5 * np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.05 * ( + np.random.rand(self.dim) - 0.5 + ) # Slightly larger perturbation for balanced exploration + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/EnhancedCMAES.py b/nevergrad/optimization/lama/EnhancedCMAES.py new file mode 100644 index 000000000..8f89847b5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCMAES.py @@ -0,0 +1,44 @@ +import numpy as np + + +class EnhancedCMAES: + def __init__(self, budget=10000, mu=None, sigma=1): + self.budget = budget + self.mu = mu if mu is not None else 20 + self.dim = 5 + self.sigma = sigma + self.population = np.random.uniform(-5.0, 5.0, size=(self.mu, self.dim)) + self.weights = np.log(self.mu + 1 / 2) - np.log(np.arange(1, self.mu + 1)) + self.weights /= np.sum(self.weights) + self.mu_eff = 1 / np.sum(self.weights**2) + self.cov_matrix = np.eye(self.dim) + self.best_fitness = np.Inf + self.best_solution = None + + def sample_population(self): + return np.random.multivariate_normal(np.zeros(self.dim), self.cov_matrix, self.mu) + + def evaluate_population(self, func, population): + fitness = np.array([func(sol) for sol in population]) + min_idx = np.argmin(fitness) + if fitness[min_idx] < self.best_fitness: + self.best_fitness = fitness[min_idx] + self.best_solution = population[min_idx].copy() + return fitness + + def update_parameters(self, population, fitness): + z = (population - np.mean(population, axis=0)) / np.sqrt(self.sigma**2) + rank = np.argsort(fitness) + y = np.sum(self.weights[:, None] * z[rank[: self.mu]], axis=0) + self.cov_matrix = np.cov(population, rowvar=False, aweights=self.weights, bias=True) + + self.sigma *= np.exp((np.linalg.norm(y) - self.mu_eff) / (2 * np.sqrt(self.dim))) + + def __call__(self, func): + for _ in range(self.budget): + children = self.sample_population() + fitness = self.evaluate_population(func, children) + self.update_parameters(children, fitness) + + aocc = 1 - np.std(fitness) / np.mean(fitness) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedCMAESv2.py b/nevergrad/optimization/lama/EnhancedCMAESv2.py new file mode 100644 index 000000000..906d80025 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCMAESv2.py @@ -0,0 +1,55 @@ +import numpy as np + + +class EnhancedCMAESv2: + def __init__(self, budget=10000, mu=None, sigma=1, damp=1, cc=-1, ccov=-1): + self.budget = budget + self.mu = mu if mu is not None else 20 + self.dim = 5 + self.sigma = sigma + self.population = np.random.uniform(-5.0, 5.0, size=(self.mu, self.dim)) + self.weights = np.log(self.mu + 1 / 2) - np.log(np.arange(1, self.mu + 1)) + self.weights /= np.sum(self.weights) + self.mu_eff = 1 / np.sum(self.weights**2) + self.cov_matrix = np.eye(self.dim) + self.best_fitness = np.Inf + self.best_solution = None + self.damp = damp + self.cc = ( + cc if cc != -1 else (4 + self.mu_eff / self.dim) / (self.dim + 4 + 2 * self.mu_eff / self.dim) + ) + self.ccov = ccov if ccov != -1 else 2 / ((self.dim + 1.3) ** 2 + self.mu_eff) + + def sample_population(self): + return np.random.multivariate_normal(np.zeros(self.dim), self.cov_matrix, self.mu) + + def evaluate_population(self, func, population): + fitness = np.array([func(sol) for sol in population]) + min_idx = np.argmin(fitness) + if fitness[min_idx] < self.best_fitness: + self.best_fitness = fitness[min_idx] + self.best_solution = population[min_idx].copy() + return fitness + + def update_parameters(self, population, fitness): + z = (population - np.mean(population, axis=0)) / np.sqrt(self.sigma**2) + rank = np.argsort(fitness) + y = np.sum(self.weights[:, None] * z[rank[: self.mu]], axis=0) + self.cov_matrix = (1 - self.ccov) * self.cov_matrix + self.ccov * np.outer(y, y) + + self.sigma *= np.exp((np.linalg.norm(y) - self.mu_eff) / (2 * np.sqrt(self.dim))) + + def update_evolution_path(self, z): + c = np.sqrt(self.cc * (2 - self.cc) * self.mu_eff) + self.weights = (1 - self.cc) * self.weights + c * np.sum(z, axis=0) + + def __call__(self, func): + for _ in range(self.budget): + children = self.sample_population() + fitness = self.evaluate_population(func, children) + self.update_parameters(children, fitness) + z = self.weights / np.linalg.norm(self.weights) + self.update_evolution_path(z) + + aocc = 1 - np.std(fitness) / np.mean(fitness) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedChaoticFireworksOptimization.py b/nevergrad/optimization/lama/EnhancedChaoticFireworksOptimization.py new file mode 100644 index 000000000..43e3a6937 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedChaoticFireworksOptimization.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedChaoticFireworksOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + p_diversify = 0.1 + 0.4 * np.exp(-5 * i / self.budget) # Adaptive probability for diversification + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def enhance_convergence(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + best_firework = fireworks[best_idx] + for i in range(self.n_fireworks): + if i != best_idx: + fireworks[i] = 0.9 * fireworks[i] + 0.1 * best_firework # Attraction towards the global best + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + fireworks = self.diversify_fireworks(fireworks, func) + fireworks = self.enhance_convergence(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedClusterDifferentialCrossover.py b/nevergrad/optimization/lama/EnhancedClusterDifferentialCrossover.py new file mode 100644 index 000000000..e6aeda5cc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedClusterDifferentialCrossover.py @@ -0,0 +1,153 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class EnhancedClusterDifferentialCrossover: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 + elite_size = 5 + cluster_count = 5 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + kmeans = KMeans(n_clusters=cluster_count) + clusters = kmeans.fit_predict(population) + cluster_centers = kmeans.cluster_centers_ + + for cluster_center in cluster_centers: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(cluster_center + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedClusteredDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedClusteredDifferentialEvolution.py new file mode 100644 index 000000000..d5c82c60c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedClusteredDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats.qmc import Sobol +from sklearn.cluster import KMeans + + +class EnhancedClusteredDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem statement + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.num_clusters = 10 + self.F = 0.8 + self.CR = 0.9 + self.memory = [] + self.memory_size = 20 + self.elite_size = 5 + self.elite = [] + + def _initialize_population(self): + sobol_engine = Sobol(d=self.dim, scramble=False) + sobol_samples = sobol_engine.random_base2(m=int(np.log2(self.pop_size // 2))) + sobol_samples = self.lb + (self.ub - self.lb) * sobol_samples + + random_samples = np.random.uniform(self.lb, self.ub, (self.pop_size - len(sobol_samples), self.dim)) + return np.vstack((sobol_samples, random_samples)) + + def _local_search(self, x, func): + res = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim, options={"disp": False} + ) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.random.uniform(0.4, 1.0) + self.CR = np.random.uniform(0.1, 1.0) + + def _cluster_search(self, population, func): + kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for center in cluster_centers: + if self.evaluations >= self.budget: + break + local_opt, f_local_opt = self._local_search(center, func) + self.evaluations += 1 + if f_local_opt < self.f_opt: + self.f_opt = f_local_opt + self.x_opt = local_opt + + def _memory_local_search(self, func): + for mem in self.memory: + if self.evaluations >= self.budget: + break + local_opt, f_local_opt = self._local_search(mem, func) + self.evaluations += 1 + if f_local_opt < self.f_opt: + self.f_opt = f_local_opt + self.x_opt = local_opt + + def _adaptive_restart(self, population, fitness, func): + mean_fitness = np.mean(fitness) + std_fitness = np.std(fitness) + if std_fitness < 1e-6 and self.evaluations < self.budget * 0.9: + new_pop_size = min(self.pop_size * 2, self.budget - self.evaluations) + new_population = np.random.uniform(self.lb, self.ub, (new_pop_size, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + self.evaluations += new_pop_size + return new_population, new_fitness + return population, fitness + + def _crossover(self, a, b, c): + rand_idx = np.random.randint(self.dim) + mutant_vector = np.copy(a) + for j in range(self.dim): + if np.random.rand() < self.CR or j == rand_idx: + mutant_vector[j] = a[j] + self.F * (b[j] - c[j]) + else: + mutant_vector[j] = a[j] + return np.clip(mutant_vector, self.lb, self.ub) + + def _mutation_strategies(self, population): + strategies = [ + lambda a, b, c: a + self.F * (b - c), + lambda a, b, c: a + self.F * (b - np.mean(population, axis=0)), + lambda a, b, c: a + + self.F * (b - c) + + self.F * (np.random.uniform(self.lb, self.ub, self.dim) - a), + ] + return strategies[np.random.randint(len(strategies))] + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + self.evaluations = len(population) + + while self.evaluations < self.budget: + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutation_strategy = self._mutation_strategies(population) + trial_vector = np.clip(a + mutation_strategy(a, b, c), self.lb, self.ub) + f_candidate = func(trial_vector) + self.evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + self.elite = [population[idx] for idx in elite_indices] + + if self.evaluations < self.budget: + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + self._dynamic_parameters() + self._cluster_search(population, func) + self._memory_local_search(func) + population, fitness = self._adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedConvergenceAcceleratedSpiralSearch.py b/nevergrad/optimization/lama/EnhancedConvergenceAcceleratedSpiralSearch.py new file mode 100644 index 000000000..11d22c26d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedConvergenceAcceleratedSpiralSearch.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedConvergenceAcceleratedSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial setup + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Start with a full range + angle_increment = np.pi / 8 # More precise initial angle for exploration + + # Adaptive parameters + radius_decay = 0.92 # More gradual radius decay to maintain broader search longer + angle_refinement = 0.88 # More gradual angle refinement for more thorough search + evaluations_left = self.budget + min_radius = 0.0001 # Even finer minimum radius for very detailed exploration + + # Dynamic angle adjustment based on feedback loop + optimal_change_factor = 1.9 # Slightly less aggressive dynamic adjustment + no_improvement_count = 0 + last_best_f = np.inf + + # Improved escape mechanism parameters + escape_momentum = 0 # To track when to increase radius temporarily + escape_trigger = 15 # Number of cycles without improvement to trigger escape + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max(int(2 * np.pi / angle_increment), 6) # Ensure at least 6 points + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + displacement = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Determine if there has been an improvement + if self.f_opt < last_best_f: + last_best_f = self.f_opt + no_improvement_count = 0 + radius_decay = min(radius_decay * optimal_change_factor, 0.94) + angle_refinement = min(angle_refinement * optimal_change_factor, 0.94) + else: + no_improvement_count += 1 + + # Update centroid to new best point + if points: + best_index = np.argmin(function_values) + centroid = points[best_index] + + # Adjust search parameters when stuck + if no_improvement_count > escape_trigger: + radius = min(radius / radius_decay, 5.0) # Increase radius to escape + angle_increment = np.pi / 4 # Reset angle increment to improve exploration + no_improvement_count = 0 + escape_momentum += 1 + + # Gradual refinement of search + else: + radius *= radius_decay # Tighten search + radius = max(radius, min_radius) # Ensure not too small + angle_increment *= angle_refinement # Refine search + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolution.py new file mode 100644 index 000000000..19dabfbad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolution.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedConvergentDifferentialEvolution: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + mutation_strength = np.mean(np.abs(new_scaling_factors - scaling_factors)) + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(new_scaling_factors, *scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV2.py b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV2.py new file mode 100644 index 000000000..e11d97ac6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV2.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedConvergentDifferentialEvolutionV2: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + mutation_strength = np.mean(np.abs(new_scaling_factors - scaling_factors)) + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(new_scaling_factors, *scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV3.py b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV3.py new file mode 100644 index 000000000..e020d5391 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV3.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedConvergentDifferentialEvolutionV3: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + mutation_strength = np.mean(np.abs(new_scaling_factors - scaling_factors)) + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(new_scaling_factors, *scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV4.py b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV4.py new file mode 100644 index 000000000..9a8a0863d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedConvergentDifferentialEvolutionV4.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedConvergentDifferentialEvolutionV4: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 1.0), + crossover_rate_range=(0.5, 1.0), + diversification_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + mutation_strength = np.mean(np.abs(new_scaling_factors - scaling_factors)) + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.01 * mutation_strength), 0.1, 1.0 + ) + + return np.clip(new_scaling_factors, *scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedCooperativeCulturalDifferentialSearch.py b/nevergrad/optimization/lama/EnhancedCooperativeCulturalDifferentialSearch.py new file mode 100644 index 000000000..956d4e3d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCooperativeCulturalDifferentialSearch.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedCooperativeCulturalDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 # Reduced for refined convergence + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.3: # Reduced probability to balance exploration and exploitation + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.2 + ( + 0.5 * fitness_std / (np.mean(fitness) + 1e-9) + ) # Adjusted influence factors + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarm.py b/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarm.py new file mode 100644 index 000000000..1acf4693c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarm.py @@ -0,0 +1,54 @@ +import numpy as np + + +class EnhancedCosineAdaptiveDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 250 # Adjusted population size for better convergence + self.F_base = 0.8 # Increased base mutation factor for more aggressive exploration + self.CR = 0.9 # Increased crossover probability to promote diversity + self.adapt_rate = 0.2 # Increased adaptation rate for dynamic mutation adjustment + self.top_percentile = 0.2 # Using top 20% of individuals for mutation + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Cosine adaptive mutation factor + F_adaptive = self.F_base + self.adapt_rate * np.cos(2 * np.pi * i / (self.budget / self.pop_size)) + + for j in range(self.pop_size): + # Mutation strategy: DE/current-to-best/1 with cosine adaptive F + idxs = np.argsort(fitness)[: int(self.top_percentile * self.pop_size)] # top individuals + best_local = pop[np.random.choice(idxs)] + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adaptive * (best_local - pop[j]) + F_adaptive * (a - b) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarmV2.py b/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarmV2.py new file mode 100644 index 000000000..9af7ab96f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCosineAdaptiveDifferentialSwarmV2.py @@ -0,0 +1,58 @@ +import numpy as np + + +class EnhancedCosineAdaptiveDifferentialSwarmV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Smaller population for more focused search + self.F_base = 0.75 # Slightly lower base mutation factor for finer adjustments + self.CR = 0.9 # Increased crossover probability for more thorough exploration + self.adaptive_F_adjustment = 0.2 # Increased change rate for mutation factor + self.top_percentile = 0.05 # Top 5% to focus on elite solutions + self.epsilon = 1e-10 # Small constant to avoid division by zero in cosine computation + self.adaptive_CR_adjustment = 0.1 # Adaptive adjustment for the crossover rate + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Cosine adaptive mutation and crossover factors with precision control + iteration_ratio = i / (self.budget / self.pop_size + self.epsilon) + F_adaptive = self.F_base + self.adaptive_F_adjustment * np.cos(2 * np.pi * iteration_ratio) + CR_adaptive = self.CR - self.adaptive_CR_adjustment * np.cos(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Mutation strategy: DE/current-to-best/1 with cosine adaptive F + idxs = np.argsort(fitness)[: int(self.top_percentile * self.pop_size)] # top individuals + best_local = pop[np.random.choice(idxs)] + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adaptive * (best_local - pop[j]) + F_adaptive * (a - b) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_adaptive + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedCovarianceGradientSearchV2.py b/nevergrad/optimization/lama/EnhancedCovarianceGradientSearchV2.py new file mode 100644 index 000000000..573bbc91a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCovarianceGradientSearchV2.py @@ -0,0 +1,189 @@ +import numpy as np + + +class EnhancedCovarianceGradientSearchV2: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + gradient_steps=10, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c + self.c_s = c_s + self.c_1 = c_1 + self.c_mu = c_mu + self.damps = damps + self.learning_rate = learning_rate + self.gradient_steps = gradient_steps + + def __adaptive_covariance_matrix_adaptation(self, func, mean, C, sigma): + n_samples = self.population_size + dim = mean.shape[0] + + new_pop = np.zeros((n_samples, dim)) + new_scores = np.zeros(n_samples) + cholesky_success = False + + for _ in range(10): # Retry up to 10 times if Cholesky fails + try: + chol_decomp = np.linalg.cholesky(C + 1e-10 * np.eye(dim)) + cholesky_success = True + break + except np.linalg.LinAlgError: + C += 1e-6 * np.eye(dim) + + if not cholesky_success: + chol_decomp = np.eye(dim) + + for i in range(n_samples): + if self.budget_remaining <= 0: + break + z = np.random.randn(dim) + y = np.dot(chol_decomp, z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + self.budget_remaining -= 1 + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + for _ in range(self.gradient_steps): + if self.budget_remaining <= 0: + break + + grad = np.zeros_like(x) + fx = func(x) + self.budget_remaining -= 1 + + for i in range(len(x)): + if self.budget_remaining <= 0: + break + + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + self.budget_remaining -= 1 + + x -= self.learning_rate * grad + x = np.clip(x, -5.0, 5.0) + + return x + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.random.choice(np.arange(len(pop)), size=diverse_count, replace=False) + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + self.budget_remaining = self.budget + + # Initialize populations + pop_main = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores_main = np.array([func(ind) for ind in pop_main]) + self.budget_remaining -= self.population_size + + # Initialize global best + global_best_score = np.min(scores_main) + global_best_position = pop_main[np.argmin(scores_main)] + + # Initialize CMA-ES parameters + mean = np.mean(pop_main, axis=0) + C = np.eye(dim) + sigma = self.initial_sigma + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1 - 1 / (4.0 * dim) + 1 / (21.0 * dim**2)) + + while self.budget_remaining > 0: + if self.budget_remaining <= 0: + break + + # Main population update + new_pop, new_scores = self.__adaptive_covariance_matrix_adaptation(func, mean, C, sigma) + + # Update population and scores + pop_main = np.vstack((pop_main, new_pop)) + scores_main = np.hstack((scores_main, new_scores)) + + best_idx = np.argmin(scores_main) + if scores_main[best_idx] < global_best_score: + global_best_score = scores_main[best_idx] + global_best_position = pop_main[best_idx] + + # Gradient-based local search on elitist solutions + elite_pop, _ = self.__hierarchical_selection(pop_main, scores_main) + for i in range(len(elite_pop)): + if self.budget_remaining <= 0: + break + + elite_pop[i] = self.__gradient_local_search(func, elite_pop[i]) + new_score = func(elite_pop[i]) + self.budget_remaining -= 1 + + # Update if new solution is better + idx = np.where((pop_main == elite_pop[i]).all(axis=1))[0] + if len(idx) > 0 and new_score < scores_main[idx[0]]: + scores_main[idx[0]] = new_score + + best_idx = np.argmin(scores_main) + if scores_main[best_idx] < global_best_score: + global_best_score = scores_main[best_idx] + global_best_position = pop_main[best_idx] + + # Hierarchical selection for diversity + elite_pop, diverse_pop = self.__hierarchical_selection(pop_main, scores_main) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) + / np.sqrt( + 1 - (1 - self.c_s) ** (2 * (self.budget - self.budget_remaining) / self.population_size) + ) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + # Ensure numerical stability + C = np.nan_to_num(C, nan=1e-10, posinf=1e-10, neginf=1e-10) + sigma = max(1e-10, sigma) + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/EnhancedCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..e098d6c90 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCovarianceMatrixAdaptation.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedCovarianceMatrixAdaptation: + def __init__(self, budget, population_size=50, elite_fraction=0.2, initial_sigma=0.3): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + # Adaptive sigma adjustment based on elite score improvements + if iteration > 0 and np.mean(scores) < np.mean(prev_scores): + sigma *= 1.2 # Increase sigma if improvement + else: + sigma *= 0.8 # Decrease sigma otherwise + + prev_scores = scores + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolution.py new file mode 100644 index 000000000..19358cb24 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolution.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedCovarianceMatrixEvolution: + def __init__(self, budget=10000, population_size=20): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = (-5.0, 5.0) + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Evolution parameters + mu = self.population_size // 2 + weights = np.log(mu + 0.5) - np.log(np.arange(1, mu + 1)) + weights /= np.sum(weights) + mueff = np.sum(weights) ** 2 / np.sum(weights**2) + sigma = 0.3 + cs = (mueff + 2) / (self.dim + mueff + 5) + ds = 1 + 2 * max(0, np.sqrt((mueff - 1) / (self.dim + 1)) - 1) + cs + enn = np.sqrt(self.dim) * (1 - 1 / (4 * self.dim) + 1 / (21 * self.dim**2)) + cc = (4 + mueff / self.dim) / (self.dim + 4 + 2 * mueff / self.dim) + c1 = 2 / ((self.dim + 1.3) ** 2 + mueff) + cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((self.dim + 2) ** 2 + mueff)) + hthresh = (1.4 + 2 / (self.dim + 1)) * enn + + # Evolution strategy state variables + pc = np.zeros(self.dim) + ps = np.zeros(self.dim) + B = np.eye(self.dim) + D = np.ones(self.dim) + C = np.eye(self.dim) + invsqrtC = np.eye(self.dim) + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = 0 + + while evaluations < self.budget: + # Sample new population + arz = np.random.randn(self.population_size, self.dim) + arx = self.x_opt + sigma * np.dot(arz, B * D) + + # Boundary handling + arx = np.clip(arx, self.bounds[0], self.bounds[1]) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in arx]) + evaluations += self.population_size + + # Sort by fitness + sorted_indices = np.argsort(new_fitness) + arx = arx[sorted_indices] + arz = arz[sorted_indices] + new_fitness = new_fitness[sorted_indices] + + # Update best solution found + if new_fitness[0] < self.f_opt: + self.f_opt = new_fitness[0] + self.x_opt = arx[0] + + # Update evolution strategy state variables + xmean = np.dot(weights, arx[:mu]) + zmean = np.dot(weights, arz[:mu]) + + ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(invsqrtC, zmean) + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - cs) ** (2 * evaluations / self.population_size)) / enn + < hthresh + ) + pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * np.dot(B, D * zmean) + + artmp = (arx[:mu] - self.x_opt) / sigma + C = ( + (1 - c1 - cmu) * C + + c1 * (np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + + cmu * np.dot((weights * artmp.T), artmp) + ) + + sigma *= np.exp((np.linalg.norm(ps) / enn - 1) * cs / ds) + + if eigenval_update_counter <= 0: + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = eigenval_update_freq + C = np.triu(C) + np.triu(C, 1).T + D, B = np.linalg.eigh(C) + D = np.sqrt(D) + invsqrtC = np.dot(B, np.dot(np.diag(D**-1), B.T)) + else: + eigenval_update_counter -= 1 + + self.x_opt = xmean + + # Integrate elitism strategy + elite_ratio = 0.1 + elite_size = int(self.population_size * elite_ratio) + elite_indices = sorted_indices[:elite_size] + elite_solutions = arx[elite_indices] + elite_fitness = new_fitness[elite_indices] + + # Replace worst individuals with elite solutions + replace_indices = sorted_indices[-elite_size:] + arx[replace_indices] = elite_solutions + new_fitness[replace_indices] = elite_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolutionV2.py b/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolutionV2.py new file mode 100644 index 000000000..1467b28d7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCovarianceMatrixEvolutionV2.py @@ -0,0 +1,135 @@ +import numpy as np + + +class EnhancedCovarianceMatrixEvolutionV2: + def __init__(self, budget=10000, population_size=20): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = (-5.0, 5.0) + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Evolution parameters + mu = self.population_size // 2 + weights = np.log(mu + 0.5) - np.log(np.arange(1, mu + 1)) + weights /= np.sum(weights) + mueff = np.sum(weights) ** 2 / np.sum(weights**2) + sigma = 0.3 + cs = (mueff + 2) / (self.dim + mueff + 5) + ds = 1 + 2 * max(0, np.sqrt((mueff - 1) / (self.dim + 1)) - 1) + cs + enn = np.sqrt(self.dim) * (1 - 1 / (4 * self.dim) + 1 / (21 * self.dim**2)) + cc = (4 + mueff / self.dim) / (self.dim + 4 + 2 * mueff / self.dim) + c1 = 2 / ((self.dim + 1.3) ** 2 + mueff) + cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((self.dim + 2) ** 2 + mueff)) + hthresh = (1.4 + 2 / (self.dim + 1)) * enn + + # Evolution strategy state variables + pc = np.zeros(self.dim) + ps = np.zeros(self.dim) + B = np.eye(self.dim) + D = np.ones(self.dim) + C = np.eye(self.dim) + invsqrtC = np.eye(self.dim) + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = 0 + + while evaluations < self.budget: + # Sample new population + arz = np.random.randn(self.population_size, self.dim) + arx = self.x_opt + sigma * np.dot(arz, B * D) + + # Boundary handling + arx = np.clip(arx, self.bounds[0], self.bounds[1]) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in arx]) + evaluations += self.population_size + + # Sort by fitness + sorted_indices = np.argsort(new_fitness) + arx = arx[sorted_indices] + arz = arz[sorted_indices] + new_fitness = new_fitness[sorted_indices] + + # Update best solution found + if new_fitness[0] < self.f_opt: + self.f_opt = new_fitness[0] + self.x_opt = arx[0] + + # Update evolution strategy state variables + xmean = np.dot(weights, arx[:mu]) + zmean = np.dot(weights, arz[:mu]) + + ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(invsqrtC, zmean) + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - cs) ** (2 * evaluations / self.population_size)) / enn + < hthresh + ) + pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * np.dot(B, D * zmean) + + artmp = (arx[:mu] - self.x_opt) / sigma + C = ( + (1 - c1 - cmu) * C + + c1 * (np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + + cmu * np.dot((weights * artmp.T), artmp) + ) + + sigma *= np.exp((np.linalg.norm(ps) / enn - 1) * cs / ds) + + if eigenval_update_counter <= 0: + eigenval_update_freq = self.population_size / (c1 + cmu) / self.dim / 10 + eigenval_update_counter = eigenval_update_freq + C = np.triu(C) + np.triu(C, 1).T + D, B = np.linalg.eigh(C) + D = np.sqrt(D) + invsqrtC = np.dot(B, np.dot(np.diag(D**-1), B.T)) + else: + eigenval_update_counter -= 1 + + self.x_opt = xmean + + # Integrate elitism strategy + elite_ratio = 0.1 + elite_size = int(self.population_size * elite_ratio) + elite_indices = sorted_indices[:elite_size] + elite_solutions = arx[elite_indices] + elite_fitness = new_fitness[elite_indices] + + # Replace worst individuals with elite solutions + replace_indices = sorted_indices[-elite_size:] + arx[replace_indices] = elite_solutions + new_fitness[replace_indices] = elite_fitness + + # Adaptive restart mechanism + if evaluations > 0.75 * self.budget and (self.f_opt - new_fitness[0] < 1e-6): + # Re-initialize population and parameters if stuck + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + ps = np.zeros(self.dim) + pc = np.zeros(self.dim) + B = np.eye(self.dim) + D = np.ones(self.dim) + C = np.eye(self.dim) + invsqrtC = np.eye(self.dim) + sigma = 0.3 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCrossoverElitistStrategyV9.py b/nevergrad/optimization/lama/EnhancedCrossoverElitistStrategyV9.py new file mode 100644 index 000000000..2881ec2cd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCrossoverElitistStrategyV9.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedCrossoverElitistStrategyV9: + def __init__( + self, + budget, + dimension=5, + population_size=250, + elite_fraction=0.08, + mutation_intensity=0.025, + crossover_rate=0.9, + adaptive_crossover_depth=0.85, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_crossover_depth = adaptive_crossover_depth + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform adaptive crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation intensity + scale = self.mutation_intensity * np.exp(-evaluations / self.budget * 10) + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2, evaluations): + # Adaptive recombination based on the stage of optimization + alpha = np.random.uniform(0.3, 0.7) + if evaluations < self.budget * self.adaptive_crossover_depth: + alpha *= np.exp(-evaluations / (self.budget * self.adaptive_crossover_depth) * 2) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/EnhancedCrowdingMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedCrowdingMemoryHybridOptimizer.py new file mode 100644 index 000000000..dacd10863 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCrowdingMemoryHybridOptimizer.py @@ -0,0 +1,196 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedCrowdingMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + crowding_factor=0.5, + restart_threshold=1e-5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.crowding_factor = crowding_factor + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.restart_threshold = restart_threshold + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_select(self, population, trial, fitness, f_trial): + distances = np.linalg.norm(population - trial, axis=1) + idx = np.argmin(distances) + if f_trial < fitness[idx]: + return idx + else: + return None + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + memory = [] + last_best_fitness = g_best_fitness + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + r = np.random.choice(3) + if r == 0: + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + elif r == 1: + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(a + F * (b - population[i]), self.bounds[0], self.bounds[1]) + else: + a = population[np.random.choice(idxs)] + mutant = np.clip(a + F * (g_best - population[i]), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection with crowding mechanism + f_trial = func(trial) + self.eval_count += 1 + selected_idx = self.crowding_select(population, trial, fitness, f_trial) + if selected_idx is not None and selected_idx < current_pop_size: + fitness[selected_idx] = f_trial + population[selected_idx] = trial + successful_steps.append((F, CR)) + memory.append(trial) + if len(successful_steps) > 50: + successful_steps.pop(0) + if len(memory) > 100: + memory.pop(0) + # Self-adapting parameters + F_values[selected_idx] = min(F * 1.1, 1.0) + CR_values[selected_idx] = min(CR * 1.1, 1.0) + + # Update personal best + if f_trial < p_best_fitness[selected_idx]: + p_best[selected_idx] = trial + p_best_fitness[selected_idx] = f_trial + + # Update global best + if f_trial < g_best_fitness: + g_best = trial + g_best_fitness = f_trial + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Restart mechanism based on convergence criterion + if ( + abs(last_best_fitness - g_best_fitness) < self.restart_threshold + and self.eval_count > 0.2 * global_search_budget + ): + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + successful_steps = [] + last_best_fitness = g_best_fitness + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCulturalAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedCulturalAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..f32f0c1d9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCulturalAdaptiveDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedCulturalAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.1 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.3: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Assuming guided search uses 5 evaluations + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCulturalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/EnhancedCulturalEvolutionaryOptimizer.py new file mode 100644 index 000000000..77aa8e8d7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCulturalEvolutionaryOptimizer.py @@ -0,0 +1,117 @@ +import numpy as np + + +class EnhancedCulturalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedCulturalMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedCulturalMemeticDifferentialEvolution.py new file mode 100644 index 000000000..c721a33ed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedCulturalMemeticDifferentialEvolution.py @@ -0,0 +1,130 @@ +import numpy as np + + +class EnhancedCulturalMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def update_knowledge_base(self, knowledge_base, population, fitness): + best_individual = population[np.argmin(fitness)] + mean_position = np.mean(population, axis=0) + knowledge_base["best_solution"] = best_individual + knowledge_base["best_fitness"] = np.min(fitness) + knowledge_base["mean_position"] = mean_position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.2: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.3 + (0.1 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + self.update_knowledge_base(knowledge_base, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolution.py new file mode 100644 index 000000000..9eb1cd1ec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolution.py @@ -0,0 +1,56 @@ +import numpy as np + + +class EnhancedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Increased population size for better diversity + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + + def __call__(self, func): + # Initialize population randomly + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolutionary loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + for i in range(self.pop_size): + # Adaptive mutation strategy: DE/current-to-best/1 + # Select indices for mutation (excluding current index i) + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + best = pop[best_idx] + + # Mutation: Including information from the current best + mutant = np.clip(pop[i] + self.F * (best - pop[i]) + self.F * (a - b), -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + # Update best solution if found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + best_idx = i # Update index of the best individual + + # Dynamic adaptation of parameters + self.F = 0.5 + 0.3 * np.log1p(iteration) / np.log1p(n_iterations) + self.CR = 0.5 + 0.4 * iteration / n_iterations + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptivePSO.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptivePSO.py new file mode 100644 index 000000000..607ee2198 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptivePSO.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionAdaptivePSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for PSO + self.population_size = 100 + self.w_min = 0.4 + self.w_max = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.velocity_limit = 0.2 + + # Parameters for DE + self.F = 0.8 + self.CR = 0.9 + + # Parameters for adaptive DE + self.F_l = 0.5 + self.F_u = 1.0 + self.CR_l = 0.1 + self.CR_u = 0.9 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocity = np.random.uniform( + -self.velocity_limit, self.velocity_limit, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_position = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + + while evaluations < self.budget: + w = self.w_max - ((self.w_max - self.w_min) * (evaluations / self.budget)) + + for i in range(self.population_size): + # PSO Update + r1, r2 = np.random.rand(2) + velocity[i] = ( + w * velocity[i] + + self.c1 * r1 * (personal_best_position[i] - population[i]) + + self.c2 * r2 * (self.x_opt - population[i]) + ) + + # Adaptive velocity clamping + velocity_magnitude = np.linalg.norm(velocity[i]) + if velocity_magnitude > self.velocity_limit: + velocity[i] = (velocity[i] / velocity_magnitude) * self.velocity_limit + + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + + # Adaptive DE Update + if np.random.rand() < 0.5: + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F_adaptive = self.F_l + np.random.rand() * (self.F_u - self.F_l) + CR_adaptive = self.CR_l + np.random.rand() * (self.CR_u - self.CR_l) + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + new_position = trial_vector + + f_candidate = func(new_position) + evaluations += 1 + + if f_candidate < personal_best_fitness[i]: + personal_best_position[i] = new_position.copy() + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = new_position.copy() + + population[i] = new_position + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptiveStrategy.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptiveStrategy.py new file mode 100644 index 000000000..c601338e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionAdaptiveStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionAdaptiveStrategy: + def __init__(self, budget=10000, population_size=50, F=0.8, initial_CR=0.5, strategy="rand/1/bin"): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight + self.CR = initial_CR # Initial Crossover probability + self.strategy = strategy # Mutation strategy + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize population and fitness + pop = np.random.uniform(lb, ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = pop[best_idx] + + # Evolutionary loop + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation and Crossover + idxs = [idx for idx in range(self.population_size) if idx != i] + if self.strategy == "rand/1/bin": + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), lb, ub) + elif self.strategy == "best/1/bin": + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(self.x_opt + self.F * (a - b), lb, ub) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + pop[i] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive strategy change + if evaluations % (self.budget // 10) == 0: + if self.strategy == "rand/1/bin": + self.strategy = "best/1/bin" + self.F = min(self.F + 0.1, 1.0) + else: + self.strategy = "rand/1/bin" + self.F = max(self.F - 0.1, 0.5) + + # Adaptive Crossover Rate Adjustment + global_improvement = np.min(fitness) < self.f_opt + if global_improvement: + self.CR = min(self.CR + 0.05, 1.0) + else: + self.CR = max(self.CR - 0.05, 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionFireworkAlgorithm.py new file mode 100644 index 000000000..fcf2e8c07 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionFireworkAlgorithm.py @@ -0,0 +1,55 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=15, f=0.5, cr=0.9, alpha=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f = f + self.cr = cr + self.alpha = alpha + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + self.f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < self.cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + fireworks = self.evolve_fireworks(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v15.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v15.py new file mode 100644 index 000000000..99ab2bf15 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v15.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLSRefinement_v15: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.1, f_max=0.9, cr_min=0.1, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < trial_val: + population[idx] = new_trial + if trial_val < target_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v16.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v16.py new file mode 100644 index 000000000..42072b2d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v16.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLSRefinement_v16: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.1, f_max=0.9, cr_min=0.1, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v17.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v17.py new file mode 100644 index 000000000..a46568888 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v17.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLSRefinement_v17: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.1, f_max=0.9, cr_min=0.1, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v18.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v18.py new file mode 100644 index 000000000..873345491 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v18.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLSRefinement_v18: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.1, f_max=0.9, cr_min=0.1, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v19.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v19.py new file mode 100644 index 000000000..2b1a1662a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLSRefinement_v19.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLSRefinement_v19: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.2, f_max=0.8, cr_min=0.2, cr_max=0.8, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v21.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v21.py new file mode 100644 index 000000000..4a0032320 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v21.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v21: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.4, f_max=0.9, cr_min=0.2, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v22.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v22.py new file mode 100644 index 000000000..6f410ece2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v22.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v22: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.4, f_max=0.9, cr_min=0.2, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v23.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v23.py new file mode 100644 index 000000000..6649f43b7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v23.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v23: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.3, f_max=0.8, cr_min=0.3, cr_max=0.8, local_search_iters=20 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v24.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v24.py new file mode 100644 index 000000000..823c7ca84 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v24.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v24: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=20 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v25.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v25.py new file mode 100644 index 000000000..a3d1fa95e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v25.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v25: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=20 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v26.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v26.py new file mode 100644 index 000000000..7c9e11c53 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v26.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v26: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.4, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=30 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v27.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v27.py new file mode 100644 index 000000000..800c1a2b7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v27.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v27: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=50 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.2 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v28.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v28.py new file mode 100644 index 000000000..3d680f813 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v28.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v28: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=100 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v29.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v29.py new file mode 100644 index 000000000..e66c0e94d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v29.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v29: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=150 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v30.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v30.py new file mode 100644 index 000000000..15809ab35 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v30.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v30: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=200 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v31.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v31.py new file mode 100644 index 000000000..3ecd425b2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v31.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v31: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=250 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v32.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v32.py new file mode 100644 index 000000000..469526ed4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v32.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v32: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=500 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v33.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v33.py new file mode 100644 index 000000000..5f903c318 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v33.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v33: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.5, cr_max=0.9, local_search_iters=1000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v34.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v34.py new file mode 100644 index 000000000..047fbac73 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v34.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v34: + def __init__( + self, budget=10000, p_best=0.25, f_min=0.4, f_max=0.8, cr_min=0.4, cr_max=0.8, local_search_iters=1500 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v35.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v35.py new file mode 100644 index 000000000..7b7eaedb0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v35.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v35: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.5, f_max=1.0, cr_min=0.3, cr_max=0.9, local_search_iters=2000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v36.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v36.py new file mode 100644 index 000000000..e28ec0cde --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v36.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v36: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.4, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=3000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v37.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v37.py new file mode 100644 index 000000000..9a091f3d2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v37.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v37: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=5000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v38.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v38.py new file mode 100644 index 000000000..db29dff47 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v38.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v38: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=5000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v39.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v39.py new file mode 100644 index 000000000..05fcba7e4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v39.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v39: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=5000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v40.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v40.py new file mode 100644 index 000000000..398289eb6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v40.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v40: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=5000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v41.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v41.py new file mode 100644 index 000000000..82ec785be --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v41.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v41: + def __init__( + self, budget=10000, p_best=0.25, f_min=0.4, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=5000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v43.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v43.py new file mode 100644 index 000000000..d58570a58 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v43.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v43: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=1000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v44.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v44.py new file mode 100644 index 000000000..37805bbd2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v44.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v44: + def __init__( + self, budget=10000, p_best=0.25, f_min=0.4, f_max=0.8, cr_min=0.3, cr_max=0.7, local_search_iters=500 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v45.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v45.py new file mode 100644 index 000000000..2c615b01a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v45.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v45: + def __init__( + self, budget=10000, p_best=0.25, f_min=0.4, f_max=0.8, cr_min=0.2, cr_max=0.9, local_search_iters=500 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.05 * np.random.normal(0, 1, self.dim) # Adjusted perturbation + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v46.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v46.py new file mode 100644 index 000000000..b4d040561 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v46.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v46: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.4, f_max=0.8, cr_min=0.2, cr_max=0.9, local_search_iters=500 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.03 * np.random.normal(0, 1, self.dim) # Adjusted perturbation + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v47.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v47.py new file mode 100644 index 000000000..5a8fcbc0c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v47.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v47: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.4, f_max=0.8, cr_min=0.2, cr_max=0.9, local_search_iters=1000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.01 * np.random.normal( + 0, 1, self.dim + ) # Reduced perturbation factor + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v48.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v48.py new file mode 100644 index 000000000..a44969cff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v48.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v48: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v49.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v49.py new file mode 100644 index 000000000..87d9d65ff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v49.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v49: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v50.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v50.py new file mode 100644 index 000000000..297cca0cb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v50.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v50: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v51.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v51.py new file mode 100644 index 000000000..b852b7c43 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v51.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v51: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v52.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v52.py new file mode 100644 index 000000000..6b72dfc2d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v52.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v52: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(10): # Run the algorithm multiple times to find the best result + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, func( + np.random.uniform(-5.0, 5.0, self.dim) + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v53.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v53.py new file mode 100644 index 000000000..74aebb0da --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v53.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v53: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(20): # Increased the number of runs to find the best result + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, func( + np.random.uniform(-5.0, 5.0, self.dim) + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v59.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v59.py new file mode 100644 index 000000000..a6c06811d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v59.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v59: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.01, + population_size=30, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(500): # Increased the number of runs for enhanced optimization + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v60.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v60.py new file mode 100644 index 000000000..0ee17a3f3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v60.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v60: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.01, + population_size=30, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(1000): # Increased the number of runs to 1000 for enhanced optimization + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v62.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v62.py new file mode 100644 index 000000000..da8cfb6cd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v62.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v62: + def __init__( + self, + budget=10000, + p_best=0.3, + f_min=0.3, + f_max=0.8, + cr_min=0.3, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.02, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(1000): # Increased the number of runs to 1000 for enhanced optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v63.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v63.py new file mode 100644 index 000000000..010852138 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v63.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v63: + def __init__( + self, + budget=10000, + p_best=0.25, + f_min=0.4, + f_max=0.9, + cr_min=0.4, + cr_max=0.9, + local_search_iters=500, + perturbation_factor=0.02, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(1000): # Increased the number of runs to 1000 for enhanced optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v64.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v64.py new file mode 100644 index 000000000..e48e52166 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v64.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v64: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.9, + cr_min=0.4, + cr_max=0.9, + local_search_iters=500, + perturbation_factor=0.03, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(1500): # Increased the number of runs to 1500 for enhanced optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v66.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v66.py new file mode 100644 index 000000000..49ee840b0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v66.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v66: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.7, + cr_max=0.9, + local_search_iters=2000, + perturbation_factor=0.05, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(3000): # Increased the number of runs to 3000 for further optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v67.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v67.py new file mode 100644 index 000000000..8a65e1f33 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v67.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v67: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.7, + cr_max=0.9, + local_search_iters=2000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(5000): # Increased the number of runs to 5000 for further optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v68.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v68.py new file mode 100644 index 000000000..0d28d46d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v68.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v68: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.7, + cr_max=0.9, + local_search_iters=2000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(10000): # Increased the number of runs to 10000 for further optimization + best_fitness = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v69.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v69.py new file mode 100644 index 000000000..b463a72fc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v69.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v69: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.7, + cr_max=0.9, + local_search_iters=2000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v70.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v70.py new file mode 100644 index 000000000..ed0313598 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v70.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v70: + def __init__( + self, + budget=10000, + p_best=0.15, + f_min=0.5, + f_max=0.8, + cr_min=0.6, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.05, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v71.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v71.py new file mode 100644 index 000000000..e8d073c7c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v71.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v71: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v72.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v72.py new file mode 100644 index 000000000..09080b8a1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v72.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v72: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.7, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v73.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v73.py new file mode 100644 index 000000000..36c3b0ca6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v73.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v73: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.7, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v74.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v74.py new file mode 100644 index 000000000..01d756554 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v74.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v74: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v75.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v75.py new file mode 100644 index 000000000..44b0e5924 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v75.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v75: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v76.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v76.py new file mode 100644 index 000000000..570edc4f7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v76.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v76: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v77.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v77.py new file mode 100644 index 000000000..e58c2111d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v77.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v77: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.9, + cr_max=0.95, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v78.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v78.py new file mode 100644 index 000000000..e60597588 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v78.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v78: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=0.9, + cr_min=0.8, + cr_max=0.95, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v79.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v79.py new file mode 100644 index 000000000..c97cd4fa2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v79.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v79: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.6, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v80.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v80.py new file mode 100644 index 000000000..a5cd11c42 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionLocalSearch_v80.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionLocalSearch_v80: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.7, + f_max=0.9, + cr_min=0.6, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.1, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial = np.where(np.random.rand(self.dim) < cr, trial, target) + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_fitness = self.enhanced_de_local_search(func) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..e793b97a9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.dim = 5 # Dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize archive to store successful mutation vectors + archive = [] + + while self.eval_count < self.budget: + new_population = [] + new_fitness = [] + for i in range(self.pop_size): + # Mutation with archive usage + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if archive: + d = archive[np.random.randint(len(archive))] + mutant = np.clip( + a + F_values[i] * (b - c) + F_values[i] * (a - d), self.bounds[0], self.bounds[1] + ) + else: + mutant = np.clip(a + F_values[i] * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + archive.append(population[i]) + # Limit archive size + if len(archive) > self.pop_size: + archive.pop(np.random.randint(len(archive))) + # Self-adapting parameters + F_values[i] = F_values[i] * 1.1 if F_values[i] < 1 else F_values[i] + CR_values[i] = CR_values[i] * 1.1 if CR_values[i] < 1 else CR_values[i] + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + F_values[i] = F_values[i] * 0.9 if F_values[i] > 0 else F_values[i] + CR_values[i] = CR_values[i] * 0.9 if CR_values[i] > 0 else CR_values[i] + + if self.eval_count >= self.budget: + break + + # Replace the old population with the new one + population = np.array(new_population) + fitness = np.array(new_fitness) + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizer.py new file mode 100644 index 000000000..3aafc0625 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizer.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionParticleSwarmOptimizer: + def __init__( + self, + budget, + swarm_size=20, + differential_weight=0.5, + crossover_rate=0.7, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV2.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV2.py new file mode 100644 index 000000000..3b3d0c4c8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV2.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionParticleSwarmOptimizerV2: + def __init__( + self, + budget, + swarm_size=20, + differential_weight=0.5, + crossover_rate=0.7, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV3.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV3.py new file mode 100644 index 000000000..18fe58b1a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV3.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionParticleSwarmOptimizerV3: + def __init__( + self, + budget, + swarm_size=20, + differential_weight=0.6, + crossover_rate=0.8, + inertia_weight=0.6, + cognitive_weight=1.3, + social_weight=1.3, + max_velocity=0.3, + mutation_rate=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV4.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV4.py new file mode 100644 index 000000000..7e82987c1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionParticleSwarmOptimizerV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionParticleSwarmOptimizerV4: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDifferentialEvolutionWithAdaptiveMutationControl.py b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionWithAdaptiveMutationControl.py new file mode 100644 index 000000000..66b379837 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialEvolutionWithAdaptiveMutationControl.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedDifferentialEvolutionWithAdaptiveMutationControl: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + mutation_control_factor=0.01, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.mutation_control_factor = mutation_control_factor + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate, func + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate, func): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), func.bounds.lb, func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + # Adaptive mutation control + mutation_strength = np.mean(np.abs(new_scaling_factors - scaling_factors)) + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + self.mutation_control_factor * mutation_strength), + 0.1, + 1.0, + ) + + return np.clip(new_scaling_factors, *scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) diff --git a/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm.py new file mode 100644 index 000000000..3e2d494c0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedDifferentialFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, scaling_factor=0.5, crossover_rate=0.9): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.scaling_factor = scaling_factor + self.crossover_rate = crossover_rate + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scaling_factor, firework + self.scaling_factor, (self.n_sparks, self.dim) + ) + return sparks + + def differential_evolution(self, current, target1, target2): + mutant = current + self.scaling_factor * (target1 - target2) + crossover_points = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover_points, mutant, current) + return trial + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * (fireworks[i] - self.x_opt) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = self.differential_evolution(fireworks[i], fireworks[idx1], fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm_v2.py b/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm_v2.py new file mode 100644 index 000000000..168682b06 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialFireworkAlgorithm_v2.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedDifferentialFireworkAlgorithm_v2: + def __init__( + self, + budget=10000, + n_fireworks=20, + n_sparks=10, + scaling_factor=0.5, + crossover_rate=0.9, + levy_flight_prob=0.3, + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.scaling_factor = scaling_factor + self.crossover_rate = crossover_rate + self.levy_flight_prob = levy_flight_prob + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scaling_factor, firework + self.scaling_factor, (self.n_sparks, self.dim) + ) + return sparks + + def differential_evolution(self, current, target1, target2): + mutant = current + self.scaling_factor * (target1 - target2) + crossover_points = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover_points, mutant, current) + return trial + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + if np.random.rand() < self.levy_flight_prob: + fireworks[i] += self.levy_flight() * (fireworks[i] - self.x_opt) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = self.differential_evolution(fireworks[i], fireworks[idx1], fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentialSimulatedAnnealingOptimizer.py b/nevergrad/optimization/lama/EnhancedDifferentialSimulatedAnnealingOptimizer.py new file mode 100644 index 000000000..90a44c330 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentialSimulatedAnnealingOptimizer.py @@ -0,0 +1,53 @@ +import numpy as np + + +class EnhancedDifferentialSimulatedAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize parameters + T = 1.0 # Initial temperature for simulated annealing + T_min = 0.001 # Minimum temperature to stop annealing + alpha = 0.95 # Cooling rate for annealing, increased for slower cooling + mutation_factor = 0.85 # Enhanced mutation factor for better exploration + crossover_probability = 0.7 # Increased crossover probability + + # Initialize the population + population_size = 20 # Increased population size for better diversity + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + + # Evaluate the initial population + fitness = np.array([func(ind) for ind in population]) + f_opt = np.min(fitness) + x_opt = population[np.argmin(fitness)] + + # Main optimization loop + evaluation_count = population_size + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + # Differential mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + # Simulated annealing acceptance criterion + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cool down the temperature + T *= alpha + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedDifferentiatedAdaptiveEvolution.py b/nevergrad/optimization/lama/EnhancedDifferentiatedAdaptiveEvolution.py new file mode 100644 index 000000000..b3493684d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDifferentiatedAdaptiveEvolution.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedDifferentiatedAdaptiveEvolution: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=(0.5, 2.0), + crossover_rate=(0.1, 1.0), + p_best=0.2, + scaling_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.p_best = p_best + self.scaling_factor = scaling_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + mutation_factor = self.adapt_parameter(self.mutation_factor, fitness_values, i) + crossover_rate = self.adapt_parameter(self.crossover_rate, fitness_values, i) + + mutant = np.clip( + a + mutation_factor * (b - c) + mutation_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: # Include equal fitness for diversity + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def adapt_parameter(self, parameter_range, fitness_values, idx): + sorted_fitness_idxs = np.argsort(fitness_values) + best_idx = sorted_fitness_idxs[0] + + if idx == best_idx: + return parameter_range[1] + + worst_idx = sorted_fitness_idxs[-1] + diff = np.abs(fitness_values[best_idx] - fitness_values[worst_idx]) + + if diff == 0: + return parameter_range[0] + + return np.clip( + parameter_range[0] + + (parameter_range[1] - parameter_range[0]) + * (fitness_values[idx] - fitness_values[worst_idx]) + / diff, + parameter_range[0], + parameter_range[1], + ) diff --git a/nevergrad/optimization/lama/EnhancedDimensionalFeedbackEvolverV3.py b/nevergrad/optimization/lama/EnhancedDimensionalFeedbackEvolverV3.py new file mode 100644 index 000000000..4e6fcb704 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDimensionalFeedbackEvolverV3.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedDimensionalFeedbackEvolverV3: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=350, + elite_fraction=0.1, + mutation_intensity=0.025, + crossover_probability=0.65, + feedback_factor=0.6, + exploration_increment=0.05, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.feedback_factor = feedback_factor + self.exploration_increment = exploration_increment + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + feedback_vector = self.feedback_factor * (previous_best - previous_population[i]) + child = np.clip(child + feedback_vector, self.lower_bound, self.upper_bound) + new_population[i] = child + + # Ensuring the best previous individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + # Incrementally increase mutation intensity to enhance exploration over time + self.mutation_intensity += self.exploration_increment * (evaluations / self.budget) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedDiverseMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedDiverseMemoryHybridOptimizer.py new file mode 100644 index 000000000..5cd13c325 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiverseMemoryHybridOptimizer.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDiverseMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.diversity_threshold = diversity_threshold + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def crowding_distance(self, population): + distances = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + distances[i] += np.linalg.norm(population[i] - population[j]) + return distances + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def calculate_diversity(self, population): + pairwise_distances = np.sqrt(((population[:, np.newaxis] - population) ** 2).sum(axis=2)) + diversity = np.mean(pairwise_distances) + return diversity + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Diversity preservation mechanism + diversity = self.calculate_diversity(population) + if diversity < self.diversity_threshold: + new_population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim) + ) + population = np.concatenate((population, new_population), axis=0) + fitness = np.concatenate((fitness, [func(ind) for ind in new_population])) + self.eval_count += self.init_pop_size + velocities = np.concatenate( + (velocities, np.random.uniform(-1, 1, (self.init_pop_size, self.dim))), axis=0 + ) + F_values = np.concatenate((F_values, np.full(self.init_pop_size, self.init_F)), axis=0) + CR_values = np.concatenate((CR_values, np.full(self.init_pop_size, self.init_CR)), axis=0) + p_best = np.concatenate((p_best, new_population), axis=0) + p_best_fitness = np.concatenate( + (p_best_fitness, [func(ind) for ind in new_population]), axis=0 + ) + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedAdaptiveHarmonySearch.py b/nevergrad/optimization/lama/EnhancedDiversifiedAdaptiveHarmonySearch.py new file mode 100644 index 000000000..c7a004627 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedAdaptiveHarmonySearch.py @@ -0,0 +1,103 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedDiversifiedAdaptiveHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def intensify_exploration(self, harmony_memory, func): + for j in range(len(harmony_memory)): + rand_idx = np.random.choice([k for k in range(len(harmony_memory)) if k != j]) + trial_harmony = (harmony_memory[j] + harmony_memory[rand_idx]) / 2 + if func(trial_harmony) < func(harmony_memory[j]): + harmony_memory[j] = trial_harmony + + def diversify_population(self, harmony_memory, func, func_bounds): + for i in range(len(harmony_memory)): + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func_bounds.lb, func_bounds.ub) + if func(new_harmony) < func(harmony_memory[i]): + harmony_memory[i] = new_harmony + + def local_optimization(self, solution, func, func_bounds): + current_solution = solution.copy() + for _ in range(10): + new_solution = self.exploit( + [current_solution], func, func.bounds, bandwidth=0.05 + ) # Fixed bandwidth for local search + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.6: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 20 == 0: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + self.intensify_exploration(harmony_memory, func) + + if i % 100 == 0: + self.diversify_population(harmony_memory, func, func.bounds) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithm.py new file mode 100644 index 000000000..bcbe6bd62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithm.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedDiversifiedCuckooFireworksAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + self.alpha = alpha + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def update_step_size(self, iter_count): + return self.step_size / (1 + iter_count) ** self.alpha + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + population = self.update_diversity_mutation(population) + self.step_size = self.update_step_size(i) + + return np.mean(best_fitnesses), best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithmV2.py new file mode 100644 index 000000000..9f583a3a6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedCuckooFireworksAlgorithmV2.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDiversifiedCuckooFireworksAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + step_size=0.1, + diversity_rate=0.2, + levy_beta=1.8, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.step_size = step_size + self.diversity_rate = diversity_rate + self.levy_beta = levy_beta + self.alpha = alpha + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / self.levy_beta)) + return levy + + def update_diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def update_step_size(self, iter_count): + return self.step_size / (1 + iter_count) ** self.alpha + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + population = self.update_diversity_mutation(population) + self.step_size = self.update_step_size(i) + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimization.py new file mode 100644 index 000000000..2c36b427f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimization.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimization: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(200): # Increase the number of optimization runs to 200 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Increase the number of iterations within each optimization run to 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOOC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV2.py new file mode 100644 index 000000000..471c8d86d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV2.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV2: + def __init__( + self, + budget=5000, + G0=100.0, + alpha=0.1, + delta=0.1, + gamma=0.3, + population_size=200, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(20): # Perform multiple runs and take the best result + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(15): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV3.py new file mode 100644 index 000000000..d89118f26 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV3.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV3: + def __init__( + self, + budget=5000, + G0=100.0, + alpha=0.1, + delta=0.1, + gamma=0.3, + population_size=200, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(30): # Increase the number of runs + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV4.py new file mode 100644 index 000000000..56a64730e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV4.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV4: + def __init__( + self, + budget=5000, + G0=50.0, + alpha=0.1, + delta=0.1, + gamma=0.3, + population_size=200, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(50): # Increase the number of runs + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(30): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV5.py new file mode 100644 index 000000000..cf966ca37 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV5.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV5: + def __init__( + self, + budget=5000, + G0=100.0, + alpha=0.1, + delta=0.1, + gamma=0.3, + population_size=300, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(100): # Increase the number of runs further + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(50): # Increase the number of iterations within each run further + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV6.py new file mode 100644 index 000000000..d715cad23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV6.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV6: + def __init__( + self, + budget=5000, + G0=50.0, + alpha=0.05, + delta=0.01, + gamma=0.1, + population_size=200, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(200): # Increase the number of runs + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(100): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV7.py new file mode 100644 index 000000000..17eb6bb6c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedGravitationalSwarmOptimizationV7.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDiversifiedGravitationalSwarmOptimizationV7: + def __init__( + self, + budget=5000, + G0=10.0, + alpha=0.02, + delta=0.005, + gamma=0.1, + population_size=300, + rho_min=0.1, + rho_max=0.35, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + + for _ in range(300): # Increase the number of runs + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(150): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer.py new file mode 100644 index 000000000..d8e0afd92 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonicHarmonyOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + harmony_memory_size=10, + bandwidth=3.0, + exploration_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution): + exploration = np.random.uniform( + -self.exploration_rate, self.exploration_rate, (self.population_size, self.dim) + ) + new_population = population + exploration + return new_population + + def diversify_population(self, population, best_solution): + population_mean = np.mean(population, axis=0) + exploration = np.random.uniform( + -self.exploration_rate, self.exploration_rate, (self.population_size, self.dim) + ) + diversified_population = population_mean + exploration + return diversified_population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution) + diversified_population = self.diversify_population(population, best_solution) + population = np.vstack((population, new_population, diversified_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.update_bandwidth(i) + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V2.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V2.py new file mode 100644 index 000000000..c217cb5c4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V2.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonicHarmonyOptimizer_V2: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + harmony_memory_size=5, + bandwidth=2.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution): + exploration = np.random.uniform( + -self.exploration_rate, self.exploration_rate, (self.population_size, self.dim) + ) + new_population = population + exploration + return new_population + + def diversify_population(self, population, best_solution): + population_mean = np.mean(population, axis=0) + exploration = np.random.uniform( + -self.exploration_rate, self.exploration_rate, (self.population_size, self.dim) + ) + diversified_population = population_mean + exploration + return diversified_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution) + diversified_population = self.diversify_population(population, best_solution) + population = np.vstack((population, new_population, diversified_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.update_bandwidth(i) + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V3.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V3.py new file mode 100644 index 000000000..41c781bd4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonicHarmonyOptimizer_V3.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonicHarmonyOptimizer_V3: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + harmony_memory_size=5, + bandwidth=2.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution): + exploration = np.random.normal(0, self.bandwidth, (self.population_size, self.dim)) + new_population = population + exploration + return new_population + + def diversify_population(self, population, best_solution): + population_mean = np.mean(population, axis=0) + exploration = np.random.normal(0, self.bandwidth, (self.population_size, self.dim)) + diversified_population = population_mean + exploration + return diversified_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution) + diversified_population = self.diversify_population(population, best_solution) + population = np.vstack((population, new_population, diversified_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.update_bandwidth(i) + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyAlgorithm.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyAlgorithm.py new file mode 100644 index 000000000..ae5d40c57 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyAlgorithm.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonyAlgorithm: + def __init__(self, budget=10000, population_size=20, dim=5, pa=0.25, beta=1.5, gamma=0.01, alpha=0.95): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another cuckoo + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + # Further exploration by random perturbation to improve diversity + new_harmony[i] += np.random.normal(0, 0.1, self.dim) + + self.population = new_harmony + + def __call__(self, func): + for _ in range(self.budget): + self.update_population(func) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithm.py new file mode 100644 index 000000000..ead5d0b27 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithm.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonyFireworksAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=20, + pitch_adjust_rate=0.7, + mutation_rate=0.2, + diversity_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + if np.random.rand() < 0.5: + new_solution[i] = best_solution[i] + else: + new_solution[i] = np.random.uniform(-5.0, 5.0) + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + + return np.clip(new_solution, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + if np.random.rand() < self.diversity_rate: + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV2.py new file mode 100644 index 000000000..c56b34d1c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonyFireworksAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=20, + pitch_adjust_rate=0.7, + mutation_rate=0.2, + diversity_rate=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + if np.random.rand() < 0.5: + new_solution[i] = best_solution[i] + else: + new_solution[i] = np.random.uniform(-5.0, 5.0) + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + + return np.clip(new_solution, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + if np.random.rand() < self.diversity_rate: + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV3.py new file mode 100644 index 000000000..28556a4f7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonyFireworksAlgorithmV3.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonyFireworksAlgorithmV3: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=20, + pitch_adjust_rate=0.7, + mutation_rate=0.2, + diversity_rate=0.3, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + if np.random.rand() < 0.5: + new_solution[i] = best_solution[i] + else: + new_solution[i] = np.random.uniform(-5.0, 5.0) + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + + return np.clip(new_solution, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + if np.random.rand() < self.diversity_rate: + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonySearchOptimizer.py new file mode 100644 index 000000000..86ceb9fff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedHarmonySearchOptimizer.py @@ -0,0 +1,105 @@ +import numpy as np + + +class EnhancedDiversifiedHarmonySearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return min(1.5, bandwidth * 1.1) + else: + return max(0.5, bandwidth * 0.9) + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.01, self.exploration_rate * 0.95) + else: + return min(0.3, self.exploration_rate * 1.05) + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < 0.1: # 10% chance of diversification + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + population = self.diversify_population(population) # Diversify the population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + if abs(best_fitness - prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV3.py new file mode 100644 index 000000000..5f470f217 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV3.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDiversifiedMetaHeuristicAlgorithmV3: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + mutation_rate=0.1, + step_size=0.1, + diversity_rate=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + self.step_size = step_size + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + beta = 1.5 + sigma1 = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / beta)) + return levy + + def adaptive_mutation_rate(self, success_counts, trial_counts): + return self.mutation_rate * (1 - success_counts / (trial_counts + 1)) + + def update_trial_counts(self, success_mask, trial_counts): + trial_counts += ~success_mask + trial_counts[success_mask] = 0 + return trial_counts + + def diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + success_counts = np.zeros(self.population_size) + trial_counts = np.zeros(self.population_size) + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + success_counts += 1 + + success_mask = fitness < best_fitness + trial_counts = self.update_trial_counts(success_mask, trial_counts) + mutation_rates = self.adaptive_mutation_rate(success_counts, trial_counts) + + population = self.diversity_mutation(population) + self.step_size = np.clip(self.step_size * np.exp(np.mean(mutation_rates)), 0.01, 0.5) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV4.py b/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV4.py new file mode 100644 index 000000000..1a6409df0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDiversifiedMetaHeuristicAlgorithmV4.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedDiversifiedMetaHeuristicAlgorithmV4: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + mutation_rate=0.1, + step_size=0.1, + diversity_rate=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + self.step_size = step_size + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + beta = 1.5 + sigma1 = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / beta)) + return levy + + def adaptive_mutation_rate(self, success_counts, trial_counts): + return self.mutation_rate * (1 - success_counts / (trial_counts + 1)) + + def update_trial_counts(self, success_mask, trial_counts): + trial_counts += ~success_mask + trial_counts[success_mask] = 0 + return trial_counts + + def diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + success_counts = np.zeros(self.population_size) + trial_counts = np.zeros(self.population_size) + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + success_counts += 1 + + success_mask = fitness < best_fitness + trial_counts = self.update_trial_counts(success_mask, trial_counts) + mutation_rates = self.adaptive_mutation_rate(success_counts, trial_counts) + + population = self.diversity_mutation(population) + self.step_size = np.clip(self.step_size * np.exp(np.mean(mutation_rates)), 0.01, 0.5) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization.py b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization.py new file mode 100644 index 000000000..f536710bd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization.py @@ -0,0 +1,146 @@ +import numpy as np + + +class EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 # Increased for better exploration + self.initial_F = 0.8 # Tweaked mutation factor + self.initial_CR = 0.6 # Tweaked crossover rate + self.elite_rate = 0.20 # Adjusted elite preservation rate + self.local_search_rate = 0.5 # Increased local search rate + self.memory_size = 30 # Increased memory size for better parameter adaptation + self.w = 0.8 # Tweaked inertia weight + self.c1 = 2.0 # Increased cognitive component + self.c2 = 2.0 # Increased social component + self.adaptive_phase_ratio = 0.7 # Increased DE phase ratio for better initial exploration + self.alpha = 0.5 # Increased differential weight for faster convergence + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Increased local search step size for better local exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizationV3.py b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizationV3.py new file mode 100644 index 000000000..ead2a9a40 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizationV3.py @@ -0,0 +1,146 @@ +import numpy as np + + +class EnhancedDualPhaseAdaptiveHybridOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 250 # Increased population size for better diversity + self.initial_F = 0.8 # Adjusted mutation factor + self.initial_CR = 0.9 # Adjusted crossover rate + self.elite_rate = 0.05 # Reduced elite rate to maintain diversity + self.local_search_rate = 0.3 # Balanced local search rate + self.memory_size = 20 # Reduced memory size for quicker adaptation + self.w = 0.5 # Lowered inertia weight for improved convergence speed + self.c1 = 1.5 # Adjusted cognitive component + self.c2 = 1.7 # Adjusted social component + self.adaptive_phase_ratio = 0.7 # Allocate more budget to the DE phase + self.alpha = 0.1 # Differential weight for exploration-exploitation balance + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Fine-tuned local search step + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedDualPhaseAdaptiveHybridOptimizationV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizerV3.py b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizerV3.py new file mode 100644 index 000000000..c4d31ef63 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveHybridOptimizerV3.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDualPhaseAdaptiveHybridOptimizerV3: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.7 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 10 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + phase_one_budget = int(self.budget * 0.5) # Increase exploration phase budget + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(-1, 1, self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (phase_one_budget - eval_count) / phase_one_budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + if neighbor_fitness < fitness[i]: + new_population[i] = neighbor + fitness[i] = neighbor_fitness + if neighbor_fitness < best_fitness: + best_individual = neighbor + best_fitness = neighbor_fitness + + if eval_count >= phase_one_budget: + break + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..0991471dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,170 @@ +import numpy as np + + +class EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.5 + self.local_search_iters = 5 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) # Changed mutation strategy before phase switch + else: + mutant = self.mutate_rand_1( + parent1, parent2, parent3, F + ) # Changed mutation strategy after phase switch + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDualPhaseDifferentialEvolution.py new file mode 100644 index 000000000..0c682e45a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseDifferentialEvolution.py @@ -0,0 +1,159 @@ +import numpy as np + + +class EnhancedDualPhaseDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.memory_size = 30 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 20 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.3 + self.alpha = 0.01 + self.beta = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + phase_two_trigger = False + phase_two_budget = self.budget // 2 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_count = int(self.elitism_rate * self.pop_size) + elite_population = new_population[sorted_indices[:elite_count]] + elite_fitness = new_fitness[sorted_indices[:elite_count]] + + for idx in range(elite_count): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices[:elite_count]] = elite_population + new_fitness[sorted_indices[:elite_count]] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = (1 - self.beta) * self.memory_F[ + self.memory_index + ] + self.beta * F + self.memory_CR[self.memory_index] = (1 - self.beta) * self.memory_CR[ + self.memory_index + ] + self.beta * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + # Switching to Phase Two + if not phase_two_trigger and evaluations >= phase_two_budget: + additional_pop_size = self.pop_size // 2 + additional_population = self.initialize_population(bounds)[:additional_pop_size] + additional_fitness = np.array([func(ind) for ind in additional_population]) + population = np.concatenate((population, additional_population), axis=0) + fitness = np.concatenate((fitness, additional_fitness), axis=0) + evaluations += additional_pop_size + self.pop_size += additional_pop_size + phase_two_trigger = True + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimization.py b/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimization.py new file mode 100644 index 000000000..1343e94b4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class EnhancedDualPhaseHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 250 # Further increased population size for even better exploration + self.initial_F = 0.8 # Slightly higher mutation factor for broader search + self.initial_CR = 0.85 # Balanced crossover rate + self.elite_rate = 0.1 # Adjusted elite rate for better balance + self.local_search_rate = 0.3 # Further increased local search rate for enhanced exploitation + self.memory_size = 30 # Increased memory size for better parameter adaptation + self.w = 0.5 # Adjusted inertia weight + self.c1 = 1.5 # Balanced cognitive component + self.c2 = 1.5 # Balanced social component + self.phase_switch_ratio = 0.6 # More budget allocated to DE phase for better search + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Further larger local search step + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedDualPhaseHybridOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimizationV2.py b/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimizationV2.py new file mode 100644 index 000000000..8b3fc8a8f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualPhaseHybridOptimizationV2.py @@ -0,0 +1,145 @@ +import numpy as np + + +class EnhancedDualPhaseHybridOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 400 # Increased population size for better exploration + self.initial_F = 0.8 # Balanced mutation factor + self.initial_CR = 0.9 # Slightly higher crossover rate + self.elite_rate = 0.2 # Increased elite rate for more robust performance + self.local_search_rate = 0.3 # Balanced local search rate + self.memory_size = 50 # Further increased memory size for better parameter adaptation + self.w = 0.7 # Increased inertia weight for better exploration + self.c1 = 1.3 # Slightly reduced cognitive component + self.c2 = 1.8 # Increased social component + self.adaptive_phase_ratio = 0.4 # Allocate more budget to the DE phase initially + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Balanced local search step for better local exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedDualPhaseHybridOptimizationV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedDualStrategyAdaptiveDE_v2.py b/nevergrad/optimization/lama/EnhancedDualStrategyAdaptiveDE_v2.py new file mode 100644 index 000000000..f0d8f2bb4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualStrategyAdaptiveDE_v2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedDualStrategyAdaptiveDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.3 + self.local_search_prob = 0.25 + self.archive = [] + self.tol = 1e-6 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.05 * (np.random.rand(self.dim) - 0.5) # Adjusted perturbation for broader search + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/EnhancedDualStrategyHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedDualStrategyHybridOptimizer.py new file mode 100644 index 000000000..c0313664b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDualStrategyHybridOptimizer.py @@ -0,0 +1,154 @@ +import numpy as np + + +class EnhancedDualStrategyHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.5 + self.elite_fraction = 0.1 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(20, self.budget - evaluations) + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.01, bounds.lb, bounds.ub + ) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + # Additional mechanism for maintaining diversity + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveClimbingStrategy.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveClimbingStrategy.py new file mode 100644 index 000000000..9b129a324 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveClimbingStrategy.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveClimbingStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 250 + elite_size = 30 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.1 + adaptive_factor = 0.95 + recombination_prob = 0.85 # Increased probability for recombination + + # Enhancing exploration and exploitation + last_best_fitness = np.inf + + while evaluations < self.budget: + success_count = 0 + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + parents_indices = np.random.choice(population_size, 4, replace=False) # Use 4 parents + parent1, parent2, parent3, parent4 = population[parents_indices] + child = (parent1 + parent2 + parent3 + parent4) / 4 # Average of 4 parents + else: + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + distance_to_best = np.linalg.norm(population[best_idx] - child) + individual_mutation_scale = mutation_scale * adaptive_factor ** (distance_to_best) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + success_count += 1 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if fitness[current_best_idx] < last_best_fitness: + last_best_fitness = fitness[current_best_idx] + success_rate = success_count / population_size + adaptive_factor = max(0.8, adaptive_factor - 0.05 * success_rate) + mutation_scale = mutation_scale + 0.02 * (1 - success_rate) + + # Elite reinforcement for better global optima stabilization + if evaluations % 200 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size): + if idx not in elite_indices: + population[idx] = elite_individuals[np.random.choice(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDE.py new file mode 100644 index 000000000..fab728517 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDE.py @@ -0,0 +1,162 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicAdaptiveDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 20 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation and crossover factors + success_rate = max(0, (self.budget - self.pop_size * generation) / self.budget) + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) * success_rate + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) * success_rate + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Enhanced selection strategy + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Hybrid mutation strategy based on success rate + if success_rate < 0.3: + mutant = x1 + mutation_factor * (x2 - x3) + elif success_rate < 0.6: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + if np.random.rand() < 0.5: + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + else: + # Gradient-based adjustment + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + result = minimize(func, best_x + perturbation, method="BFGS", options={"maxiter": 10}) + + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..ebbc0128d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolution.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveDifferentialEvolution: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_scaling_factors = scaling_factors * np.exp( + 0.05 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.05 * (fitness_values.min() - fitness_values) + ) + new_crossover_rates = crossover_rates * np.exp( + 0.05 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.05 * (fitness_values.min() - fitness_values) + ) + + return np.clip(new_scaling_factors, *self.scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation.py new file mode 100644 index 000000000..a658f6b69 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + hypermutation_probability=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.hypermutation_probability = hypermutation_probability + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, p_best, scaling_factor, crossover_rate, func + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + if np.random.rand() < self.hypermutation_probability: + population = self.hypermutate_population(population, func) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, p_best, scaling_factor, crossover_rate, func): + dimension = len(current) + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - current), func.bounds.lb, func.bounds.ub + ) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + 0.05 * ( + fitness_values.min() - fitness_values + ) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + 0.05 * ( + fitness_values.min() - fitness_values + ) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + return np.clip(new_scaling_factors, *self.scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) + + def hypermutate_population(self, population, func): + dimension = len(population[0]) + mutated_population = population + np.random.normal(0, 0.1, size=(self.population_size, dimension)) + mutated_population = np.clip(mutated_population, func.bounds.lb, func.bounds.ub) + return mutated_population diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionRefined.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionRefined.py new file mode 100644 index 000000000..f1cd34336 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionRefined.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveDifferentialEvolutionRefined: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, p_best, scaling_factor, crossover_rate, func + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, p_best, scaling_factor, crossover_rate, func): + dimension = len(current) + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - current), func.bounds.lb, func.bounds.ub + ) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + scaling_factor_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + 0.05 * ( + fitness_values.min() - fitness_values + ) + crossover_rate_adaptation = 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + 0.05 * ( + fitness_values.min() - fitness_values + ) + + new_scaling_factors = scaling_factors * np.exp(scaling_factor_adaptation) + new_crossover_rates = crossover_rates * np.exp(crossover_rate_adaptation) + + return np.clip(new_scaling_factors, *self.scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionV2.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionV2.py new file mode 100644 index 000000000..82d69f2b5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveDifferentialEvolutionV2.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveDifferentialEvolutionV2: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, p_best, scaling_factor, crossover_rate, func + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, p_best, scaling_factor, crossover_rate, func): + dimension = len(current) + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - current), func.bounds.lb, func.bounds.ub + ) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_scaling_factors = scaling_factors * np.exp( + 0.05 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.05 * (fitness_values.min() - fitness_values) + ) + new_crossover_rates = crossover_rates * np.exp( + 0.05 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.05 * (fitness_values.min() - fitness_values) + ) + + return np.clip(new_scaling_factors, *self.scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..1d952ffbd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveExplorationOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 + c2 = 1.5 + w = 0.6 + + # Learning rate adaptation parameters + alpha = 0.1 + beta = 0.85 + epsilon = 1e-8 + + # Differential Evolution parameters + F = 0.9 + CR = 0.8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 + + # Exploration improvement parameters + exploration_factor = 0.2 + max_exploration_cycles = 40 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedDynamicAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..259aaf59b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveFireworkAlgorithm.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..9fa05cf18 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveGravitationalSwarmIntelligence: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(1000): # Increased the number of optimization runs to 1000 for better exploration + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(200): # Increased the number of iterations within each optimization run to 200 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2.py new file mode 100644 index 000000000..2613949bc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(self.population_size): + for j in range(self.population_size): + if np.random.rand() < self.beta_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.beta_max = self.update_beta(t) + self.alpha_min = self.update_alpha(t) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + self.update_parameters(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizer.py new file mode 100644 index 000000000..0ad02aec0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizer.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizer: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.1 + 0.4 * (1 - iteration / self.budget) # Adaptive Pitch Adjustment Rate + hmcr = 0.6 + 0.2 * (1 - iteration / self.budget) # Adaptive Harmony Memory Consideration Rate + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.01: # Fixed value for pitch adjustment + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Exploring rate fixed to 0.1 + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) # Dynamic step size adaptation + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV2.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV2.py new file mode 100644 index 000000000..74f78d0cf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV2.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizerV2: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.1 + 0.4 * (1 - iteration / self.budget) # Adaptive Pitch Adjustment Rate + hmcr = 0.6 + 0.2 * (1 - iteration / self.budget) # Adaptive Harmony Memory Consideration Rate + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.01: # Fixed value for pitch adjustment + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Exploring rate fixed to 0.1 + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) # Dynamic step size adaptation + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV3.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV3.py new file mode 100644 index 000000000..5e6af9c66 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV3.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizerV3: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.2 + 0.3 * (1 - iteration / self.budget) # Adjusted Pitch Adjustment Rate range + hmcr = 0.6 + 0.2 * (1 - iteration / self.budget) + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.05: # Increased pitch adjustment probability + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.15: # Enhanced exploring rate + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV4.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV4.py new file mode 100644 index 000000000..4fcb47812 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV4.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizerV4: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.2 + 0.3 * (1 - iteration / self.budget) # Adjusted Pitch Adjustment Rate range + hmcr = 0.6 + 0.3 * (1 - iteration / self.budget) # Increased HMCR range + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.15: # Adjusted pitch adjustment probability + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Enhanced exploring rate + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV5.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV5.py new file mode 100644 index 000000000..b133f313c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV5.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizerV5: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.2 + 0.3 * (1 - iteration / self.budget) # Adjusted Pitch Adjustment Rate range + hmcr = 0.6 + 0.4 * (1 - iteration / self.budget) # Increased HMCR range + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.2: # Adjusted pitch adjustment probability + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Enhanced exploring rate + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV6.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV6.py new file mode 100644 index 000000000..d700d3b82 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHarmonySearchOptimizerV6.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHarmonySearchOptimizerV6: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.2 + 0.3 * (1 - iteration / self.budget) # Adjusted Pitch Adjustment Rate range + hmcr = 0.5 + 0.5 * (1 - iteration / self.budget) # Increased HMCR range + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.25: # Adjusted pitch adjustment probability + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.15: # Enhanced exploring rate + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..0e8ee9b69 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridDEPSO.py @@ -0,0 +1,151 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: # Increase probability of adapting F and CR + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind, std_dev_factor=0.5): + std_dev = np.std(population, axis=0) * std_dev_factor + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart( + best_ind, std_dev_factor=0.5 + ) # Restart with smaller std dev + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimization.py new file mode 100644 index 000000000..14efc78db --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimization.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.stats import qmc + + +class EnhancedDynamicAdaptiveHybridOptimization: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + self.diversity_factor = 0.1 # Introduce a factor to control diversity perturbation + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + perturbation = np.random.uniform( + -self.diversity_factor, self.diversity_factor, self.dim + ) + if fitness[i] > fitness[j]: + population[i] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[i] = func(population[i]) + else: + population[j] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedDynamicAdaptiveHybridOptimization(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..1291d3b13 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveHybridOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 300 + initial_mutation_factor = 0.6 # Adjusted initial mutation factor + initial_crossover_prob = 0.8 # Adjusted initial crossover probability + adaptive_factor_mut = 0.002 # Adjusted finer adaptive change for mutation factor + adaptive_factor_cross = 0.002 # Adjusted finer adaptive change for crossover probability + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + num_iterations = self.budget // population_size + mutation_factor = initial_mutation_factor + crossover_prob = initial_crossover_prob + + for iteration in range(num_iterations): + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # More frequent dynamic adjustment based on performance + if iteration % 3 == 0: + current_mean_fitness = np.mean(fitness) + if best_value < current_mean_fitness: + mutation_factor = max(0.1, mutation_factor + adaptive_factor_mut) + crossover_prob = max(0.1, crossover_prob - adaptive_factor_cross) + else: + mutation_factor = max(0.1, mutation_factor - adaptive_factor_mut) + crossover_prob = min(1.0, crossover_prob + adaptive_factor_cross) + + mutant = np.clip(a + mutation_factor * (b - c), self.lower_bound, self.upper_bound) + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + trial_fitness = func(trial) + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryAnnealing.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryAnnealing.py new file mode 100644 index 000000000..2745417fd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryAnnealing.py @@ -0,0 +1,135 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions with variable size + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.5 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.2 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 3.0 # Higher acceptance for local search refinement + alpha = 0.90 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Adjust memory size dynamically based on progress + if evaluations % (self.budget // 4) == 0: + memory_size = min(20, memory_size + 5) + new_memory = np.zeros((memory_size, self.dim)) + new_memory_scores = np.full(memory_size, np.Inf) + new_memory[: len(memory)] = memory + new_memory_scores[: len(memory_scores)] = memory_scores + memory = new_memory + memory_scores = new_memory_scores + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryStrategyV59.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryStrategyV59.py new file mode 100644 index 000000000..01343bb56 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveMemoryStrategyV59.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveMemoryStrategyV59: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=10, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) >= self.memory_size: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adapt_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR based on sigmoid functions + self.F = 0.5 + 0.5 * np.sin(np.pi * iteration / total_iterations) + self.CR = 0.5 + 0.5 * np.cos(np.pi * iteration / total_iterations) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adapt_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveOptimizerV8.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveOptimizerV8.py new file mode 100644 index 000000000..f11e68da9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveOptimizerV8.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedDynamicAdaptiveOptimizerV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Optimized starting temperature for better initial exploration + T_min = 0.0005 # Fine-tuned minimum temperature for sustained exploration at later stages + alpha = 0.91 # Optimized cooling rate to maintain effective search duration + + # Mutation and crossover parameters are finely tuned for balance + F = 0.76 # Fine-tuned mutation factor to optimize explorative capabilities + CR = 0.86 # Optimized crossover probability to ensure diversity within population + + population_size = 82 # Optimally adjusted population size for effective search space sampling + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation approach with sigmoid-based control for adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptively tuning the mutation factor with a sigmoid function for refined control + dynamic_F = ( + F + * np.exp(-0.08 * T) + * (0.7 + 0.3 * np.tanh(3.3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Incorporating a more sensitive acceptance criterion that takes the current temperature into account + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with sinusoidal modulation for precise temperature control + adaptive_cooling = alpha - 0.007 * np.cos(2.7 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptivePopulationDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptivePopulationDifferentialEvolution.py new file mode 100644 index 000000000..ef87e3683 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptivePopulationDifferentialEvolution.py @@ -0,0 +1,184 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicAdaptivePopulationDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.initial_pop_size = 100 + self.min_pop_size = 20 + self.num_subpopulations = 5 + self.subpop_size = self.initial_pop_size // self.num_subpopulations + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 + self.CR = 0.9 + self.local_search_prob = 0.1 # Probability of performing local search + self.restart_threshold = 50 # Number of iterations to trigger restart if no improvement + self.history = [] + + def _initialize_population(self, pop_size): + return np.random.uniform(self.lb, self.ub, (pop_size, self.dim)) + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(len(population)) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, len(population) - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + subpopulations = np.array_split(population, self.num_subpopulations) + subfitness = np.array_split(fitness, self.num_subpopulations) + new_population = [] + new_fitness = [] + + for subpop, subfit in zip(subpopulations, subfitness): + for i in range(len(subpop)): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(len(subpop)) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(subfit) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(subpop, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(subpop, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(subpop, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(subpop, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < subfit[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(subpop[i]) + new_fitness.append(subfit[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: self.num_subpopulations] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population(self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.initial_pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + if self.no_improvement_count >= 10: + reduced_pop_size = max(self.min_pop_size, len(population) - 10) + population = population[:reduced_pop_size] + fitness = fitness[:reduced_pop_size] + self.subpop_size = len(population) // self.num_subpopulations + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveQuantumPSO.py new file mode 100644 index 000000000..21760b6c2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicAdaptiveQuantumPSO.py @@ -0,0 +1,133 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicAdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + self.convergence_threshold = 1e-6 # Convergence threshold for local search + self.stagnation_threshold = 10 # No improvement iterations before triggering local search + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 # Annealing factor for inertia weight + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + # Trigger local search after a certain number of iterations without improvement + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 # Reset the counter on improvement + + if eval_count >= self.budget: + break + + # Reset no improvement count after local search + self.no_improvement_count = 0 + + # Anneal inertia weight to enhance exploration-exploitation balance + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter, "ftol": self.convergence_threshold}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedDynamicAdaptiveQuantumPSO(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicBalancingPSO.py b/nevergrad/optimization/lama/EnhancedDynamicBalancingPSO.py new file mode 100644 index 000000000..57cac162a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicBalancingPSO.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDynamicBalancingPSO: + def __init__( + self, + budget=10000, + population_size=150, + omega_start=0.9, + omega_end=0.2, + phi_p=0.1, + phi_g=0.2, + adaptive_diversity_threshold=0.1, + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start + self.omega_end = omega_end + self.phi_p = phi_p + self.phi_g = phi_g + self.adaptive_diversity_threshold = adaptive_diversity_threshold + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + diversity = np.std(particles) + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_start - ((self.omega_start - self.omega_end) * evaluations / self.budget) + phi_total = self.phi_p + self.phi_g + phi_ratio = self.phi_p / phi_total + + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + + # Update velocities + velocities[i] = ( + omega * velocities[i] + + phi_ratio * self.phi_p * r_p * (personal_best[i] - particles[i]) + + (1 - phi_ratio) * self.phi_g * r_g * (global_best - particles[i]) + ) + + # Update positions + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + if np.std(particles) < diversity - self.adaptive_diversity_threshold: + phi_ratio += 0.02 # Encourage exploration + elif np.std(particles) > diversity + self.adaptive_diversity_threshold: + phi_ratio -= 0.02 # Encourage exploitation + diversity = np.std(particles) + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicClusterOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicClusterOptimization.py new file mode 100644 index 000000000..65e31ae31 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicClusterOptimization.py @@ -0,0 +1,151 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class EnhancedDynamicClusterOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def fractional_order_velocity_update(self, velocity, order=0.5): + return np.sign(velocity) * (np.abs(velocity) ** order) + + def local_search(self, position, func, step_size=0.1): + best_position = position + best_fitness = func(position) + for i in range(self.dim): + for direction in [-1, 1]: + new_position = np.copy(position) + new_position[i] += direction * step_size + new_position = np.clip(new_position, self.lb, self.ub) + new_fitness = func(new_position) + if new_fitness < best_fitness: + best_fitness = new_fitness + best_position = new_position + return best_position, best_fitness + + def __call__(self, func): + population_size = 80 + + # Hybrid Initialization using Sobol Sequence and Random Initialization + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size // 2) + population = qmc.scale(sample, self.lb, self.ub) + random_population = np.random.uniform(self.lb, self.ub, (population_size // 2, self.dim)) + population = np.vstack((population, random_population)) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + last_improvement = 0 + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations - last_improvement > self.budget // 10: + strategy = "DE" + else: + strategy = "PSO" + + if strategy == "PSO": + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * self.fractional_order_velocity_update(velocity[i]) + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy with Adaptive Crossover Mechanism + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + scaling_factor = 0.5 + np.random.rand() * 0.5 + mutant_vector = np.clip(a + scaling_factor * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + last_improvement = evaluations + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + # Apply local search for exploitation within clusters + for cluster_center in cluster_centers: + cluster_mask = np.all(np.isclose(population, cluster_center, atol=1.0), axis=1) + cluster_population = population[cluster_mask] + if len(cluster_population) > 0: + best_idx = np.argmin(fitness[cluster_mask]) + best_position = cluster_population[best_idx] + new_position, new_fitness = self.local_search(best_position, func) + if new_fitness < fitness[cluster_mask][best_idx]: + population[cluster_mask][best_idx] = new_position + fitness[cluster_mask][best_idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicClusterSearch.py b/nevergrad/optimization/lama/EnhancedDynamicClusterSearch.py new file mode 100644 index 000000000..5d6305b97 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicClusterSearch.py @@ -0,0 +1,164 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class EnhancedDynamicClusterSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 + elite_size = 5 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cluster_count = int(self.adaptive_parameters(evaluations, self.budget, 2, 10)) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + kmeans = KMeans(n_clusters=cluster_count) + clusters = kmeans.fit_predict(population) + cluster_centers = kmeans.cluster_centers_ + + for cluster_center in cluster_centers: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(cluster_center + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + # Intensify local search around best solutions found + for elite in elites: + local_search_radius = levy_factor * self.levy_flight(self.dim) + local_candidate = np.clip(elite + local_search_radius, self.lb, self.ub) + local_fitness = func(local_candidate) + evaluations += 1 + + if local_fitness < self.f_opt: + self.f_opt = local_fitness + self.x_opt = local_candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicCohortOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicCohortOptimization.py new file mode 100644 index 000000000..f19805301 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicCohortOptimization.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedDynamicCohortOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_scale=0.3, + crossover_rate=0.9, + learning_rate=0.05, + learning_rate_decay=0.95, + mutation_decay=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_scale = mutation_scale + self.learning_rate = learning_rate + self.learning_rate_decay = learning_rate_decay + self.mutation_decay = mutation_decay + self.crossover_rate = crossover_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + elite_indices = fitness.argsort()[: self.elite_count] + for i in range(self.population_size): + # Tournament selection for parent selection + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.rand() < self.crossover_rate: + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + child = parent1.copy() # no crossover, child is a copy of parent1 + + # Mutation based on normal distribution + mutation = np.random.normal(0, self.mutation_scale, self.dimension) + child = np.clip(child + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update best solution found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adjust learning rate and mutation scale + self.mutation_scale *= self.mutation_decay + self.learning_rate *= self.learning_rate_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedDynamicCrossoverRAMEDS.py b/nevergrad/optimization/lama/EnhancedDynamicCrossoverRAMEDS.py new file mode 100644 index 000000000..6fc2b4314 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicCrossoverRAMEDS.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedDynamicCrossoverRAMEDS: + def __init__( + self, + budget, + population_size=50, + init_crossover=0.8, + F_min=0.4, + F_max=0.8, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.init_crossover = init_crossover + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + progress = evaluations / self.budget + # Dynamically adjust mutation and crossover rates + F = self.F_min + (self.F_max - self.F_min) * np.sin(np.pi * progress) + crossover_rate = self.init_crossover * (1 - np.exp(-4 * progress)) + + # Mutation and crossover + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicCuckooHarmonyAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicCuckooHarmonyAlgorithm.py new file mode 100644 index 000000000..0af77758a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicCuckooHarmonyAlgorithm.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedDynamicCuckooHarmonyAlgorithm: + def __init__(self, budget=10000, population_size=20, dim=5, pa=0.25, beta=1.5, gamma=0.01, alpha=0.95): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another cuckoo + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + self.population = new_harmony + + def __call__(self, func): + for _ in range(self.budget): + self.update_population(func) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolution.py new file mode 100644 index 000000000..7e844c227 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np +from scipy.stats import qmc + + +class EnhancedDynamicDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedDynamicDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionImproved.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionImproved.py new file mode 100644 index 000000000..3bea6e95d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionImproved.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionImproved: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionRefined.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionRefined.py new file mode 100644 index 000000000..27dfcd859 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionRefined.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionRefined: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates = self.dynamic_adjustment( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV2.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV2.py new file mode 100644 index 000000000..33d26f0a1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV2.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionV2: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.2, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV3.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV3.py new file mode 100644 index 000000000..c0c4ebd38 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionV3.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionV3: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover.py new file mode 100644 index 000000000..955a40b02 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates = self.dynamic_adjustment( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation.py new file mode 100644 index 000000000..bbffe6f36 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation.py @@ -0,0 +1,132 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step_range=(0.01, 0.1), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step_range = dynamic_step_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + dynamic_steps = np.full(self.population_size, np.mean(self.dynamic_step_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + dynamic_step = dynamic_steps[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates, dynamic_steps = self.update_parameters( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates, dynamic_steps = self.dynamic_adjustment( + scaling_factors, crossover_rates, dynamic_steps, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + 0.1 * np.mean(fitness_values)), 0.01, 0.1 + ) + + return ( + np.clip(scaling_factors, *scaling_factor_range), + np.clip(crossover_rates, *crossover_rate_range), + np.clip(dynamic_steps, *dynamic_step_range), + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, dynamic_steps, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.6, + 1.0, + ) + dynamic_step_range = np.clip( + np.array(self.dynamic_step_range) * (1 + np.mean(fitness_values) - np.min(fitness_values)), + 0.01, + 0.1, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + dynamic_steps = np.clip(dynamic_steps, *dynamic_step_range) + + return scaling_factors, crossover_rates, dynamic_steps diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined.py new file mode 100644 index 000000000..1ca1ee3ff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined.py @@ -0,0 +1,121 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.05, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + scaling_factors, crossover_rates = self.dynamic_adjustment( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population + + def dynamic_adjustment(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return scaling_factors, crossover_rates diff --git a/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover.py b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover.py new file mode 100644 index 000000000..a160dbcf4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover: + def __init__( + self, + budget=1000, + population_size=50, + diversification_factor=0.1, + cr_range=(0.2, 0.9), + f_range=(0.2, 0.8), + ): + self.budget = budget + self.population_size = population_size + self.diversification_factor = diversification_factor + self.cr_range = cr_range + self.f_range = f_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + cr = np.random.uniform(*self.cr_range, size=self.population_size) + f = np.random.uniform(*self.f_range, size=self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + trial_individual = self.generate_trial_individual(population[i], a, b, c, f[i], cr[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + cr, f = self.adapt_parameters(cr, f, fitness_values) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, f, cr): + dimension = len(current) + mutant = np.clip(a + f * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < cr + return np.where(crossover_points, mutant, current) + + def adapt_parameters(self, cr, f, fitness_values): + mean_fitness = np.mean(fitness_values) + cr = cr * (1 + 0.1 * (mean_fitness - fitness_values)) + f = f * (1 + 0.1 * (mean_fitness - fitness_values)) + return np.clip(cr, *self.cr_range), np.clip(f, *self.f_range) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedDynamicDiversifiedHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicDiversifiedHarmonySearchOptimizer.py new file mode 100644 index 000000000..2536c0e1a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDiversifiedHarmonySearchOptimizer.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedDynamicDiversifiedHarmonySearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + self.diversification_rate = 0.2 + self.prev_best_fitness = np.Inf + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness): + return self.bandwidth * np.exp(-0.1 * best_fitness) + + def adaptive_memory_update(self, best_fitness): + return 1.0 / (1.0 + np.exp(-0.05 * best_fitness)) + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + + population = np.vstack([h[0] for h in harmony_memory]) + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + self.bandwidth = self.adaptive_bandwidth(best_fitness) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness) + + if abs(best_fitness - self.prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(fitness) / np.mean(fitness) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicDualPhaseStrategyV12.py b/nevergrad/optimization/lama/EnhancedDynamicDualPhaseStrategyV12.py new file mode 100644 index 000000000..d8c0197a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicDualPhaseStrategyV12.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedDynamicDualPhaseStrategyV12: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Modified mutation strategy for phase 1 with weighted best solution influence + gamma = 0.5 + (np.random.rand() * (1.0 - 0.5)) + mutant = population[best_idx] + self.F * gamma * (population[a] - population[b]) + else: + # Advanced mutation strategy for phase 2 using additional differential vectors + d, e, f, g = np.random.choice(idxs, 4, replace=False) + mutant = population[a] + self.F * ( + population[b] + - population[c] + + 0.3 * (population[d] - population[e]) + + 0.2 * (population[f] - population[g]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Further refined dynamic parameter adjustment + scale = iteration / total_iterations + self.F = np.clip(0.5 * np.sin(2 * np.pi * scale) + 0.5, 0.1, 1) # Adjusting range and function + self.CR = np.clip( + 0.5 * np.cos(2 * np.pi * scale) + 0.5, 0.1, 1 + ) # Cosine for exploration and exploitation balance + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicEliteAnnealingDE.py b/nevergrad/optimization/lama/EnhancedDynamicEliteAnnealingDE.py new file mode 100644 index 000000000..1ccc1b953 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicEliteAnnealingDE.py @@ -0,0 +1,165 @@ +import numpy as np + + +class EnhancedDynamicEliteAnnealingDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature for annealing + self.cooling_rate = 0.98 # Cooling rate for simulated annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.05 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicEscapeStrategyV32.py b/nevergrad/optimization/lama/EnhancedDynamicEscapeStrategyV32.py new file mode 100644 index 000000000..8fbfc0e9b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicEscapeStrategyV32.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedDynamicEscapeStrategyV32: + def __init__( + self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, escape_threshold=0.1 + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover rate + self.escape_threshold = escape_threshold # Threshold to trigger an escape mechanism + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, iteration, func): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + # Dynamic mutation factor adjustment based on iteration progress + F_dynamic = self.F * (1 + 0.5 * np.sin(2 * np.pi * iteration / self.budget)) + mutant = population[best_idx] + F_dynamic * ( + population[a] - population[b] + population[c] - population[best_idx] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, iteration): + # Dynamic crossover rate adjustment based on iteration progress + CR_dynamic = self.CR * (1 + 0.5 * np.cos(2 * np.pi * iteration / self.budget)) + crossover_mask = np.random.rand(self.dimension) < CR_dynamic + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def escape_mechanism(self, population, fitnesses, func): + # Trigger an escape mechanism if population converges + if np.std(fitnesses) < self.escape_threshold: + # Reinitialize a portion of the population + num_escape = self.pop_size // 5 + escape_indices = np.random.choice(range(self.pop_size), num_escape, replace=False) + for index in escape_indices: + population[index] = np.random.uniform(self.lower_bounds, self.upper_bounds, self.dimension) + fitnesses[index] = func(population[index]) + return population, fitnesses + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, evaluations, func) + trial = self.crossover(population[i], mutant, evaluations) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i], fitnesses[i] = trial, trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + population, fitnesses = self.escape_mechanism(population, fitnesses, func) + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedDynamicEvolutionStrategy.py new file mode 100644 index 000000000..3f2b0d4f8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicEvolutionStrategy.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 200 + mutation_rate = 0.08 + mutation_scale = 0.2 + crossover_rate = 0.7 + elite_size = 20 + + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + # Safe roulette wheel selection using max fitness scaling + max_fitness = np.max(fitness) + adjusted_fitness = max_fitness - fitness + 1e-9 # Adding small constant to avoid zero probability + probabilities = adjusted_fitness / adjusted_fitness.sum() + + chosen_parents = np.random.choice( + population_size, size=population_size - elite_size, p=probabilities + ) + parents = population[chosen_parents] + + # Crossover and mutation + np.random.shuffle(parents) + for i in range(0, len(parents) - 1, 2): + parent1, parent2 = parents[i], parents[i + 1] + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child1 = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + child2 = np.concatenate([parent2[:cross_point], parent1[cross_point:]]) + else: + child1, child2 = parent1.copy(), parent2.copy() + + new_population.extend([child1, child2]) + + # Mutation in the new population + new_population = np.array(new_population) + mutation_masks = np.random.rand(len(new_population), self.dim) < mutation_rate + mutations = np.random.normal(0, mutation_scale, (len(new_population), self.dim)) + new_population = np.clip(new_population + mutation_masks * mutations, self.lb, self.ub) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += len(new_population) + + # Replace the worst with new individuals + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + # Update best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicExplorationOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicExplorationOptimizer.py new file mode 100644 index 000000000..b1ce563cd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicExplorationOptimizer.py @@ -0,0 +1,166 @@ +import numpy as np + + +class EnhancedDynamicExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 40 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor for better exploration + max_exploration_cycles = 40 # Reduced maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedDynamicExplorationOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..9ddd5529c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmFinal.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmFinal.py new file mode 100644 index 000000000..45b6fea34 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmFinal.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithmFinal: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmImproved.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmImproved.py new file mode 100644 index 000000000..b8b0ec918 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmImproved.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithmImproved: + def __init__( + self, + population_size=50, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + exploration_range=0.4, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRedesigned.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRedesigned.py new file mode 100644 index 000000000..3711acdb4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRedesigned.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithmRedesigned: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + else: + self.alpha[k] *= 1.1 # Increase alpha + self.beta[k] *= 0.9 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRefined.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRefined.py new file mode 100644 index 000000000..31c4e3f22 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmRefined.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithmRefined: + def __init__( + self, + population_size=50, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + exploration_range=0.3, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmV2.py new file mode 100644 index 000000000..e8aaea5b9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmV2.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDynamicFireworkAlgorithmV2: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py new file mode 100644 index 000000000..e34f95991 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation.py new file mode 100644 index 000000000..449821e09 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10.py new file mode 100644 index 000000000..5131686f6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.9, self.fireworks[k][3] * 1.1) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return x + np.random.uniform(-0.1, 0.1, size=self.dim) # Refine mutation to add variation + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11.py new file mode 100644 index 000000000..5576e213d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return x + np.random.uniform(-0.05, 0.05, size=self.dim) # Refine mutation to add less variation + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12.py new file mode 100644 index 000000000..099559989 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return x + np.random.uniform( + -0.03, 0.03, size=self.dim + ) # Adjust mutation rate for more controlled exploration + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13.py new file mode 100644 index 000000000..3f650016e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return x + np.random.normal(0, 0.03, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2.py new file mode 100644 index 000000000..68ee1ca27 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3.py new file mode 100644 index 000000000..cdba75cd8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4.py new file mode 100644 index 000000000..6f4a65472 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5.py new file mode 100644 index 000000000..5681811ec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6.py new file mode 100644 index 000000000..6fd5241d7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7.py new file mode 100644 index 000000000..fc39ef6aa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8.py new file mode 100644 index 000000000..4432ea4da --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.95, self.fireworks[k][3] * 1.05) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9.py new file mode 100644 index 000000000..f1defcf11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=10, + mutation_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, self.initial_alpha, self.initial_beta) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.fireworks[k] = self.fireworks[k][:2] + (self.fireworks[k][2] * 0.9, self.fireworks[k][3] * 1.1) + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def mutate(self, x, func): + if np.random.rand() < self.mutation_rate: + return x + np.random.uniform(-0.1, 0.1, size=self.dim) # Refine mutation to add variation + return x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + new_spark = self.mutate(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0, alpha, beta) + self.update_parameters(i) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization.py new file mode 100644 index 000000000..16362d0c8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization.py new file mode 100644 index 000000000..bb15be989 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization.py @@ -0,0 +1,118 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + colony_size=15, + max_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.colony_size = colony_size + self.max_trials = max_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.max_trials}, + ) + return res.x + + def bee_colony_optimization(self, x, func): + best_x = np.copy(x) + best_fitness = func(x) + + for _ in range(self.colony_size): + new_x = self.explosion_operator(x, func, np.random.uniform(0.1, 0.5)) + new_x = self.local_search(new_x, func) + new_fitness = func(new_x) + + if new_fitness < best_fitness: + best_x = np.copy(new_x) + best_fitness = new_fitness + + return best_x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + else: + new_spark = self.bee_colony_optimization(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization.py new file mode 100644 index 000000000..9b4d8d227 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.95 # Fine-tune alpha update + self.beta[k] *= 1.05 # Fine-tune beta update + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithHybridSearch.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithHybridSearch.py new file mode 100644 index 000000000..931cacaa9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithHybridSearch.py @@ -0,0 +1,118 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithHybridSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + colony_size=15, + max_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.colony_size = colony_size + self.max_trials = max_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.max_trials}, + ) + return res.x + + def bee_colony_optimization(self, x, func): + best_x = np.copy(x) + best_fitness = func(x) + + for _ in range(self.colony_size): + new_x = self.explosion_operator(x, func, np.random.uniform(0.1, 0.5)) + new_x = self.local_search(new_x, func) + new_fitness = func(new_x) + + if new_fitness < best_fitness: + best_x = np.copy(new_x) + best_fitness = new_fitness + + return best_x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + else: + new_spark = self.bee_colony_optimization(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization.py new file mode 100644 index 000000000..db80de646 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolution.py new file mode 100644 index 000000000..c525dde58 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolution.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedDynamicFireworkDifferentialEvolution: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, f_init=0.7, f_final=0.4, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV2.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV2.py new file mode 100644 index 000000000..dea91360b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV2.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedDynamicFireworkDifferentialEvolutionV2: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, f_init=0.8, f_final=0.2, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV3.py b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV3.py new file mode 100644 index 000000000..961cc2330 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicFireworkDifferentialEvolutionV3.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedDynamicFireworkDifferentialEvolutionV3: + def __init__( + self, budget=10000, n_fireworks=50, n_sparks=15, f_init=0.5, f_final=0.2, cr_init=0.9, cr_final=0.1 + ): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.f_init = f_init + self.f_final = f_final + self.cr_init = cr_init + self.cr_final = cr_final + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func, f, cr): + for i in range(self.n_fireworks): + alpha = 0.5 * (1 - i / self.n_fireworks) # Dynamic alpha based on iteration + sparks = self.explode_firework(fireworks[i], alpha) + + for j in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds(fireworks[idx1] + f * (fireworks[idx2] - fireworks[idx3])) + + trial = np.where(np.random.rand(self.dim) < cr, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adapt_params(self, iteration): + f = self.f_init + (self.f_final - self.f_init) * (iteration / self.budget) ** 0.5 + cr = self.cr_init + (self.cr_final - self.cr_init) * (iteration / self.budget) ** 0.5 + return f, cr + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + f, cr = self.adapt_params(i) + fireworks = self.evolve_fireworks(fireworks, func, f, cr) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..8f742c4b9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedDynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + memory_size = 20 # Increased memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Smart Memory Reinforcement + if evaluations % (self.budget // 10) == 0: + best_idx = np.argmin(memory_scores) + for _ in range(memory_size // 4): + x_candidate = memory[best_idx] + np.random.normal(0, T, self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.008): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py b/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py new file mode 100644 index 000000000..c68427fc3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py @@ -0,0 +1,140 @@ +import numpy as np + + +class EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.98 # Cooling rate for initial phase + beta_initial = 2.0 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.5 + alpha = 0.99 + elif evaluations < phase2: + beta = 2.0 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.5 + alpha = 0.95 + else: + beta = 1.0 + alpha = 0.93 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithm.py new file mode 100644 index 000000000..98b3cadfd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithm.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedDynamicHarmonyAlgorithm: + def __init__( + self, + budget=10000, + population_size=20, + dim=5, + pa=0.25, + beta=1.5, + gamma=0.01, + alpha=0.95, + exploring_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.exploring_rate = exploring_rate + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another member + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + # Dynamic exploration rate adjustment + exploring_rate = self.exploring_rate * np.exp(-iteration / self.budget) + + # Further exploration by random perturbation to improve diversity + new_harmony[i] += np.random.normal(0, exploring_rate, self.dim) + + self.population = new_harmony + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithmV2.py new file mode 100644 index 000000000..d8db15c75 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonyAlgorithmV2.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedDynamicHarmonyAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=20, + dim=5, + pa=0.25, + beta=1.5, + gamma=0.01, + alpha=0.95, + exploring_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.exploring_rate = exploring_rate + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another member + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + # Dynamic exploration rate adjustment + exploring_rate = self.exploring_rate * np.exp(-iteration / self.budget) + + # Further exploration by random perturbation to improve diversity + new_harmony[i] += np.random.normal(0, exploring_rate, self.dim) + + # Clamp new solutions within the search space bounds + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + self.population = new_harmony + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonyFireworksSearch.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonyFireworksSearch.py new file mode 100644 index 000000000..fb366879a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonyFireworksSearch.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedDynamicHarmonyFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5, gamma=1.0, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizer.py new file mode 100644 index 000000000..f41409b80 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchOptimizer: + def __init__( + self, budget=10000, population_size=20, dim=5, pa=0.1, hmcr=0.7, bw=0.01, exploring_rate=0.1 + ): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.hmcr = hmcr + self.bw = bw + self.exploring_rate = exploring_rate + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Harmony Memory Consideration Rate (HMCR) + if np.random.rand() < self.hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + # Perform random selection from harmony memory + new_solution = harmony_pool[np.random.randint(self.population_size)] + + # Pitch Adjustment Rate (PAR) + for k in range(self.dim): + if np.random.rand() < self.bw: + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + if np.random.rand() < self.exploring_rate: + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizerV7.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizerV7.py new file mode 100644 index 000000000..e6f073133 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchOptimizerV7.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchOptimizerV7: + def __init__(self, budget=10000, population_size=20, dim=5): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + self.steps = 1 + + def levy_flight(self): + sigma1 = 1.0 + sigma2 = 1.0 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + pa = 0.2 + 0.3 * (1 - iteration / self.budget) # Adjusted Pitch Adjustment Rate range + hmcr = 0.5 + 0.5 * (1 - iteration / self.budget) # Increased HMCR range + + if np.random.rand() < pa: + if np.random.rand() < hmcr: + j = np.random.randint(self.population_size) + new_solution = harmony_pool[j] + else: + new_solution = harmony_pool[np.random.randint(self.population_size)] + + for k in range(self.dim): + if np.random.rand() < 0.2: # Adjusted pitch adjustment probability + new_solution[k] = new_solution[k] + self.levy_flight()[k] + + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + for i in range(self.population_size): + if np.random.rand() < 0.1: # Enhanced exploring rate + new_harmony[i] = np.random.uniform(-5.0, 5.0, self.dim) + else: + new_harmony[i] = harmony_pool[np.random.randint(self.population_size)] + + self.population = new_harmony + + def adaptive_step_size(self, iteration): + self.steps = 1 + 0.1 * np.log(1 + iteration) + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + self.adaptive_step_size(itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV5.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV5.py new file mode 100644 index 000000000..a15bd8dc0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV5.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchV5: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV6.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV6.py new file mode 100644 index 000000000..197cfb627 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV6.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchV6: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV7.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV7.py new file mode 100644 index 000000000..42f83b11a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV7.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchV7: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV8.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV8.py new file mode 100644 index 000000000..d53bea1bf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonySearchV8.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicHarmonySearchV8: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicHarmonyTabuSearch.py b/nevergrad/optimization/lama/EnhancedDynamicHarmonyTabuSearch.py new file mode 100644 index 000000000..507ec6ebe --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHarmonyTabuSearch.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedDynamicHarmonyTabuSearch: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + self.pitch_adjustment_rate *= 0.9 # Decrease pitch adjustment rate every 100 iterations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + self.adjust_parameters() # Adjust parameters periodically + self.iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/EnhancedDynamicHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..07a486e46 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHybridDEPSOWithEliteMemory.py @@ -0,0 +1,166 @@ +import numpy as np + + +class EnhancedDynamicHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = np.random.choice(np.delete(np.arange(population_size), i), 3, replace=False) + a, b, c = population[indices] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = np.random.choice(np.delete(np.arange(population_size), i), 2, replace=False) + a, b = population[indices] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + if elite_solutions.shape[0] > elite_size: + elite_solutions = elite_solutions[:elite_size] + new_population[:elite_size] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21.py b/nevergrad/optimization/lama/EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21.py new file mode 100644 index 000000000..1bae54519 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, gaussian_std=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + mutation_rate = self.compute_mutation_rate() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, mutation_rate + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < 0.3: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy + + def compute_mutation_rate(self): + return np.exp(-1.0 * len(self.convergence_curve) / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedDynamicHybridOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicHybridOptimization.py new file mode 100644 index 000000000..b3eed720c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHybridOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class EnhancedDynamicHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 15 # Reduced maximum exploration cycles for quicker reaction + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.85 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedDynamicHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicHybridOptimizer.py new file mode 100644 index 000000000..2266441f2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicHybridOptimizer.py @@ -0,0 +1,171 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def simulated_annealing(self, x, func, budget): + T = 1.0 + T_min = 0.0001 + alpha = 0.9 + best = x + best_score = func(x) + self.eval_count += 1 + while self.eval_count < budget and T > T_min: + i = 1 + while i <= 100: + candidate = x + np.random.normal(0, 1, self.dim) + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_score = func(candidate) + self.eval_count += 1 + if candidate_score < best_score: + best = candidate + best_score = candidate_score + else: + ap = np.exp((best_score - candidate_score) / T) + if np.random.rand() < ap: + best = candidate + best_score = candidate_score + i += 1 + T = T * alpha + return best, best_score + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + best_fitness_history = [g_best_fitness] + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + best_fitness_history.append(g_best_fitness) + + # Population resizing based on convergence rate + if len(best_fitness_history) > 10 and best_fitness_history[-10] == g_best_fitness: + self.init_pop_size = max(5, self.init_pop_size // 2) + population = population[: self.init_pop_size] + fitness = fitness[: self.init_pop_size] + velocities = velocities[: self.init_pop_size] + F_values = F_values[: self.init_pop_size] + CR_values = CR_values[: self.init_pop_size] + p_best = p_best[: self.init_pop_size] + p_best_fitness = p_best_fitness[: self.init_pop_size] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + # Hybridization with Simulated Annealing + if self.eval_count < self.budget: + remaining_budget = self.budget - self.eval_count + g_best, g_best_fitness = self.simulated_annealing(g_best, func, remaining_budget) + + self.f_opt = g_best_fitness + self.x_opt = g_best + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearch.py b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearch.py new file mode 100644 index 000000000..bf914bab9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearch.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedDynamicLevyHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV2.py b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV2.py new file mode 100644 index 000000000..459c150ba --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV2.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedDynamicLevyHarmonySearchV2: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV3.py b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV3.py new file mode 100644 index 000000000..ffb704881 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLevyHarmonySearchV3.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedDynamicLevyHarmonySearchV3: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithm.py new file mode 100644 index 000000000..f57b00f19 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicLocalSearchFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV2.py new file mode 100644 index 000000000..46ef9da46 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV2.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicLocalSearchFireworkAlgorithmV2: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV3.py new file mode 100644 index 000000000..14bbe7cf4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicLocalSearchFireworkAlgorithmV3.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicLocalSearchFireworkAlgorithmV3: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicMemoryStrategyV51.py b/nevergrad/optimization/lama/EnhancedDynamicMemoryStrategyV51.py new file mode 100644 index 000000000..16cdfa183 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicMemoryStrategyV51.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedDynamicMemoryStrategyV51: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + 0.1 * memory_effect + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + linear_progress = iteration / total_iterations + self.F = np.clip(0.5 + 0.4 * np.sin(np.pi * linear_progress), 0.1, 1) + self.CR = np.clip(0.9 - 0.8 * linear_progress, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicMultiPhaseAnnealingPlus.py b/nevergrad/optimization/lama/EnhancedDynamicMultiPhaseAnnealingPlus.py new file mode 100644 index 000000000..20acf930d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicMultiPhaseAnnealingPlus.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedDynamicMultiPhaseAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Expanded memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedDynamicMutationSearch.py b/nevergrad/optimization/lama/EnhancedDynamicMutationSearch.py new file mode 100644 index 000000000..c0c174ea0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicMutationSearch.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedDynamicMutationSearch: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.5, + F_max=1.2, + memory_size=30, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite tracking + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track the best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Incorporate feedback into crossover rate adaptation + self.crossover_rate = 0.5 + 0.5 * np.sin(np.pi * evaluations / self.budget) + + # Update elite and memory + sorted_indices = np.argsort(fitness) + elite = population[sorted_indices[: self.elite_size]] + elite_fitness = fitness[sorted_indices[: self.elite_size]] + for i in range(self.elite_size): + if elite_fitness[i] < np.max(memory_fitness): + worst_idx = np.argmax(memory_fitness) + memory[worst_idx] = elite[i] + memory_fitness[worst_idx] = elite_fitness[i] + + for i in range(self.population_size): + F = self.F_min + (self.F_max - self.F_min) * (1 - np.std(fitness) / np.ptp(fitness)) + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(population[i] + F * (best_solution - population[i] + a - b), lb, ub) + + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedDynamicNichePSO_DE_LS.py b/nevergrad/optimization/lama/EnhancedDynamicNichePSO_DE_LS.py new file mode 100644 index 000000000..08a502e96 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicNichePSO_DE_LS.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicNichePSO_DE_LS: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 30 # Increased swarm size for better exploration + self.init_num_niches = 5 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < 0.2 and evaluations < self.budget: # Lower local search probability + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicNichingDEPSO.py b/nevergrad/optimization/lama/EnhancedDynamicNichingDEPSO.py new file mode 100644 index 000000000..c7487ce5c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicNichingDEPSO.py @@ -0,0 +1,137 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicNichingDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.init_num_niches = 5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + best_niche_idx = np.argmin(local_best_fits) + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (local_bests[best_niche_idx] - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.5 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicPrecisionBalancedEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicPrecisionBalancedEvolution.py new file mode 100644 index 000000000..8933ad31d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicPrecisionBalancedEvolution.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedDynamicPrecisionBalancedEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + elite_size = 15 + mutation_factor = 0.8 + crossover_probability = 0.8 + recombination_weight = 0.15 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Differential mutation with dynamic scaling + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = a + mutation_factor * np.random.normal() * (b - c) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Dynamic recombination towards best and local search + mutant_vector = (1 - recombination_weight) * mutant_vector + recombination_weight * self.x_opt + trial_vector = population[i] + 0.5 * np.random.normal(size=self.dim) * ( + self.x_opt - population[i] + ) + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Binomial crossover with adaptive crossover probability + trial_vector = np.array( + [ + ( + mutant_vector[j] + if np.random.rand() < crossover_probability or j == np.random.randint(self.dim) + else population[i, j] + ) + for j in range(self.dim) + ] + ) + + # Fitness evaluation and selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Adjust mutation and crossover parameters dynamically + mutation_factor = np.clip( + mutation_factor + 0.015 * (self.f_opt / np.median(fitness) - 1), 0.5, 1.0 + ) + crossover_probability = np.clip( + crossover_probability - 0.02 * (self.f_opt / np.median(fitness) - 1), 0.7, 0.95 + ) + + # Elite preservation strategy + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in np.random.choice(range(population_size), elite_size, replace=False): + population[idx] = elite_individuals[np.random.randint(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicPrecisionOptimizer.py b/nevergrad/optimization/lama/EnhancedDynamicPrecisionOptimizer.py new file mode 100644 index 000000000..7ecb0c14e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicPrecisionOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedDynamicPrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature, slightly higher for more initial exploration + T_min = 0.0003 # Lower minimum temperature threshold for longer fine-tuning + alpha = 0.95 # Slower cooling rate, providing more opportunity for search + + # Mutation and crossover parameters further refined + F = 0.8 # Increased mutation factor for more aggressive explorative moves + CR = 0.87 # Adjusted crossover probability for balanced exploration and exploitation + + population_size = 90 # Increased population size for more diverse solutions + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Incorporating more dynamic mutation and adaptive temperature schedule + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # More dynamic mutation adapting with both temperature and normalized progress + dynamic_F = F * ( + np.exp(-0.2 * T) + (0.5 - 0.5 * np.cos(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion considering the annealing process and dynamic fitness improvement + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.03 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Gradual and adaptive cooling with an additional modulation to accommodate search stages + adaptive_cooling = alpha - 0.02 * np.sin(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolution.py new file mode 100644 index 000000000..40df05328 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolution.py @@ -0,0 +1,177 @@ +import numpy as np + + +class EnhancedDynamicQuantumDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory.py new file mode 100644 index 000000000..2b04bb2dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + # Standard DE mutation and crossover + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart.py new file mode 100644 index 000000000..a7cbc06ad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart.py @@ -0,0 +1,145 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.initial_num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % (self.population_size * 10) == 0: + if diversity < self.diversity_threshold: + for j in range(num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..2d9e4e53f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimization.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=50, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.0, + min_cognitive_weight=1.0, + max_social_weight=1.5, + min_social_weight=0.5, + boundary_handling=True, + alpha=0.6, + delta=0.2, + decay_rate=0.98, + max_step=0.3, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationFinal.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationFinal.py new file mode 100644 index 000000000..277e0a50b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationFinal.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationFinal: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + beta=0.9, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.beta = beta + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters with an enhanced strategy + self.inertia_weight = self.max_inertia_weight - self.beta * (iteration / self.budget) * ( + self.max_inertia_weight - self.min_inertia_weight + ) + self.cognitive_weight = self.max_cognitive_weight - self.beta * (iteration / self.budget) * ( + self.max_cognitive_weight - self.min_cognitive_weight + ) + self.social_weight = self.min_social_weight + self.beta * (iteration / self.budget) * ( + self.max_social_weight - self.min_social_weight + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationImproved.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationImproved.py new file mode 100644 index 000000000..60ea70b0a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationImproved.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationImproved: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + beta=0.9, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.beta = beta + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters with an enhanced strategy + self.inertia_weight = self.max_inertia_weight - self.beta * (iteration / self.budget) * ( + self.max_inertia_weight - self.min_inertia_weight + ) + self.cognitive_weight = self.max_cognitive_weight - self.beta * (iteration / self.budget) * ( + self.max_cognitive_weight - self.min_cognitive_weight + ) + self.social_weight = self.min_social_weight + self.beta * (iteration / self.budget) * ( + self.max_social_weight - self.min_social_weight + ) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV10.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV10.py new file mode 100644 index 000000000..943847309 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV10.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV10: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV11.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV11.py new file mode 100644 index 000000000..d7ed2e2a2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV11.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV11: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV12.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV12.py new file mode 100644 index 000000000..aa9e7cbd1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV12.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV12: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV13.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV13.py new file mode 100644 index 000000000..d76292069 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV13.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV13: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.1, + max_cognitive_weight=1.5, + min_cognitive_weight=0.5, + max_social_weight=1.5, + min_social_weight=0.5, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + gamma=0.1, + mutation_rate=0.2, + mutation_strength=0.2, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV14.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV14.py new file mode 100644 index 000000000..89ae68b23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV14.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV14: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV15.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV15.py new file mode 100644 index 000000000..46f659ecb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV15.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV15: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV16.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV16.py new file mode 100644 index 000000000..520cb9c0a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV16.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV16: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.3, + max_cognitive_weight=1.0, + min_cognitive_weight=0.3, + max_social_weight=1.0, + min_social_weight=0.3, + boundary_handling=True, + alpha=0.5, + decay_rate=0.95, + exploration_rate=0.2, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-0.1, 0.1, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.exploration_rate = exploration_rate + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = ( + self.max_inertia_weight + - (self.max_inertia_weight - self.min_inertia_weight) * iteration / self.budget + ) + self.cognitive_weight = ( + self.max_cognitive_weight + - (self.max_cognitive_weight - self.min_cognitive_weight) * iteration / self.budget + ) + self.social_weight = ( + self.max_social_weight + - (self.max_social_weight - self.min_social_weight) * iteration / self.budget + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV17.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV17.py new file mode 100644 index 000000000..14adbfeed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV17.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV17: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV18.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV18.py new file mode 100644 index 000000000..438b6f666 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV18.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV18: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + self.delta *= self.decay_rate + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV19.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV19.py new file mode 100644 index 000000000..b0482c89c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV19.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV19: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..34dd55c0f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV2.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV20.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV20.py new file mode 100644 index 000000000..ceb005b87 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV20.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV20: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV21.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV21.py new file mode 100644 index 000000000..6c67a97ab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV21.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV21: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + mutation_strength=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV22.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV22.py new file mode 100644 index 000000000..68b018308 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV22.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV22: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.2, + max_cognitive_weight=1.5, + min_cognitive_weight=0.5, + max_social_weight=1.2, + min_social_weight=0.3, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.4, + exploration_rate=0.3, + gamma=0.1, + mutation_rate=0.2, + mutation_strength=0.2, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.random.uniform(-max_step, max_step, (self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + self.mutation_strength = mutation_strength + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + self.alpha * new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity with adjustable strength + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_strength, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV23.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV23.py new file mode 100644 index 000000000..ef9c99d16 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV23.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV23: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV24.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV24.py new file mode 100644 index 000000000..053fdccf2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV24.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV24: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV25.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV25.py new file mode 100644 index 000000000..755cd1c58 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV25.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV25: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV26.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV26.py new file mode 100644 index 000000000..563bfc963 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV26.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV26: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV27.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV27.py new file mode 100644 index 000000000..a4ed2c463 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV27.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV27: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV28.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV28.py new file mode 100644 index 000000000..8794e9363 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV28.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV28: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV3.py new file mode 100644 index 000000000..3d7a2aa0e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV3.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV3: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + new_position += np.random.normal(0, 0.1, self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV4.py new file mode 100644 index 000000000..4cc3f79f2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV4.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV4: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV5.py new file mode 100644 index 000000000..b44dae43f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV5.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV5: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV6.py new file mode 100644 index 000000000..9b78cd476 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV6.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV6: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV7.py new file mode 100644 index 000000000..34f8fd4ab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV7.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV7: + def __init__( + self, + budget=10000, + num_particles=100, + inertia_weight=0.5, + cognitive_weight=1.5, + social_weight=1.0, + boundary_handling=True, + step_size=0.1, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.step_size = step_size + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + pass + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV8.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV8.py new file mode 100644 index 000000000..48cb8ebd5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV8.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV8: + def __init__( + self, + budget=10000, + num_particles=100, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + boundary_handling=True, + step_size=0.1, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.step_size = step_size + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + pass + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV9.py b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV9.py new file mode 100644 index 000000000..d76ea7125 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicQuantumSwarmOptimizationV9.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedDynamicQuantumSwarmOptimizationV9: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + mutation_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + self.mutation_rate = mutation_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + # Mutation step to introduce diversity + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, 0.1, self.dim) + new_position = np.clip(new_position + mutation_vector, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..d4c5f9ef2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,180 @@ +import numpy as np + + +class EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive beta and alpha adjustments based on phases + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Introducing crossover mechanism to create new candidates + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + # Introducing mutation mechanism to create new candidates + if evaluations % (self.budget // 3) == 0: + for i in range(memory_size // 3): + x_mut = memory[np.random.randint(memory_size)] + x_mut += np.random.normal(0, 0.1, self.dim) + x_mut = np.clip(x_mut, func.bounds.lb, func.bounds.ub) + f_mut = func(x_mut) + evaluations += 1 + if f_mut < self.f_opt: + self.f_opt = f_mut + self.x_opt = x_mut + + worst_idx = np.argmax(memory_scores) + if f_mut < memory_scores[worst_idx]: + memory[worst_idx] = x_mut + memory_scores[worst_idx] = f_mut + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/EnhancedDynamicRefinementGradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/EnhancedDynamicRefinementGradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..bfeb4eaad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicRefinementGradientBoostedMemoryAnnealing.py @@ -0,0 +1,150 @@ +import numpy as np + + +class EnhancedDynamicRefinementGradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Extra phase for intensive local search near the best solution found + if evaluations > 3 * self.budget // 4: + x_candidate = self._local_refinement(func, self.x_opt) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedDynamicRestartAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedDynamicRestartAdaptiveDE.py new file mode 100644 index 000000000..6047cc0eb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicRestartAdaptiveDE.py @@ -0,0 +1,150 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicRestartAdaptiveDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.85 + self.elitism_rate = 0.3 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 50 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + if np.random.rand() < 0.5: + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + else: + # Gradient-based adjustment + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + result = minimize(func, best_x + perturbation, method="BFGS", options={"maxiter": 10}) + + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/EnhancedDynamicStrategyAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedDynamicStrategyAdaptiveDE.py new file mode 100644 index 000000000..de3e921c9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicStrategyAdaptiveDE.py @@ -0,0 +1,169 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedDynamicStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 10 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Dynamic strategy: switch mutation strategy based on generation + if generation % 3 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + else: + mutant = ( + x1 + + mutation_factor * (x2 - x3) + + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x1) + ) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithm.py new file mode 100644 index 000000000..c5632378d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithm.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedDynamicallyAdaptiveFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved.py b/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved.py new file mode 100644 index 000000000..1f5a0875b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/EnhancedEliteAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..0f342c42d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteAdaptiveHybridDEPSO.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EnhancedEliteAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + elite_size = 5 + w = 0.6 # Increased inertia weight for PSO + c1 = 1.0 # Cognitive coefficient for PSO + c2 = 1.0 # Social coefficient for PSO + initial_F = 0.7 # Slightly reduced differential weight for DE + initial_CR = 0.8 # Slightly reduced crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with elite handling + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..37ff245c0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,206 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.neighbors import NearestNeighbors + + +class EnhancedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.diversity_threshold = diversity_threshold + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def measure_diversity(self, population): + if len(population) < 2: + return np.inf # Maximum diversity when population size is very small + nbrs = NearestNeighbors(n_neighbors=2, algorithm="ball_tree").fit(population) + distances, _ = nbrs.kneighbors(population) + avg_distance = np.mean(distances[:, 1]) # Average distance to the nearest neighbor + return avg_distance + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enforce population diversity if it falls below a threshold + avg_distance = self.measure_diversity(population) + if avg_distance < self.diversity_threshold: + # Introduce new individuals to increase diversity + for _ in range(self.init_pop_size - current_pop_size): + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV2.py b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV2.py new file mode 100644 index 000000000..e211f7022 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV2.py @@ -0,0 +1,206 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.neighbors import NearestNeighbors + + +class EnhancedEliteAdaptiveMemoryHybridOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.diversity_threshold = diversity_threshold + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def measure_diversity(self, population): + if len(population) < 2: + return np.inf # Maximum diversity when population size is very small + nbrs = NearestNeighbors(n_neighbors=2, algorithm="ball_tree").fit(population) + distances, _ = nbrs.kneighbors(population) + avg_distance = np.mean(distances[:, 1]) # Average distance to the nearest neighbor + return avg_distance + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enforce population diversity if it falls below a threshold + avg_distance = self.measure_diversity(population) + if avg_distance < self.diversity_threshold: + # Introduce new individuals to increase diversity + for _ in range(self.init_pop_size - current_pop_size): + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV6.py b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV6.py new file mode 100644 index 000000000..a400d04ad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV6.py @@ -0,0 +1,206 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.neighbors import NearestNeighbors + + +class EnhancedEliteAdaptiveMemoryHybridOptimizerV6: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.diversity_threshold = diversity_threshold + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def measure_diversity(self, population): + if len(population) < 2: + return np.inf # Maximum diversity when population size is very small + nbrs = NearestNeighbors(n_neighbors=2, algorithm="ball_tree").fit(population) + distances, _ = nbrs.kneighbors(population) + avg_distance = np.mean(distances[:, 1]) # Average distance to the nearest neighbor + return avg_distance + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 2, self.min_pop_size) # faster reduction + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enforce population diversity if it falls below a threshold + avg_distance = self.measure_diversity(population) + if avg_distance < self.diversity_threshold: + # Introduce new individuals to increase diversity + for _ in range(self.init_pop_size - current_pop_size): + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + new_fitness = func(new_individual) + self.eval_count += 1 + population = np.vstack([population, new_individual]) + fitness = np.append(fitness, new_fitness) + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV7.py b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV7.py new file mode 100644 index 000000000..9266c2fe6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteAdaptiveMemoryHybridOptimizerV7.py @@ -0,0 +1,174 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedEliteAdaptiveMemoryHybridOptimizerV7: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + blend_crossover_prob=0.3, + max_no_improvement=100, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.blend_crossover_prob = blend_crossover_prob + self.max_no_improvement = max_no_improvement + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= self.max_no_improvement: + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteCrowdingMemoryHybridOptimizerV3.py b/nevergrad/optimization/lama/EnhancedEliteCrowdingMemoryHybridOptimizerV3.py new file mode 100644 index 000000000..b1f114ba7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteCrowdingMemoryHybridOptimizerV3.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedEliteCrowdingMemoryHybridOptimizerV3: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + mem_size=50, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.mem_size = mem_size + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + dist[i] += np.linalg.norm(population[i] - population[j]) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.mem_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Maintain diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individuals = np.random.uniform(self.bounds[0], self.bounds[1], (1, self.dim)) + distances = self.crowding_distance(new_individuals) + if np.min(distances) > np.min(dist): + population = np.vstack([population, new_individuals]) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, (1, self.dim))]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individuals]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveDE.py new file mode 100644 index 000000000..dd6c7769e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveDE.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedEliteGuidedAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive_max_size = 100 + self.learning_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + archive = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + if archive and np.random.rand() < 0.5: + archive_idx = np.random.choice(len(archive), 3, replace=False) + x1, x2, x3 = np.array(archive)[archive_idx] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + archive.extend(new_pop) + if len(archive) > self.archive_max_size: + archive = archive[-self.archive_max_size :] + + if self.budget % 50 == 0 and archive: + archive_idx = np.random.choice(len(archive)) + archive_ind = archive[archive_idx] + f_archive = func(archive_ind) + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + pop = np.vstack((elite_pop, new_pop[: self.pop_size - elite_count])) + fitness = np.hstack((elite_fitness, fitness[: self.pop_size - elite_count])) + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveRestartDE.py b/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveRestartDE.py new file mode 100644 index 000000000..c0353aad2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedAdaptiveRestartDE.py @@ -0,0 +1,115 @@ +import numpy as np + + +class EnhancedEliteGuidedAdaptiveRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.restart_threshold = 0.01 + self.max_generations = int(self.budget / self.pop_size) + self.diversity_threshold = 0.1 + + def __call__(self, func): + def initialize_population(): + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + return pop, fitness + + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + pop, fitness = initialize_population() + self.budget -= self.pop_size + + generation = 0 + best_fitness_history = [] + + while self.budget > 0: + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / self.max_generations) + ) + + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + best_fitness_history.append(np.min(fitness)) + + if len(best_fitness_history) > 10: + recent_improvement = np.abs(best_fitness_history[-10] - best_fitness_history[-1]) + if recent_improvement < self.restart_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + continue + + diversity = np.mean(np.std(pop, axis=0)) + if diversity < self.diversity_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedDualMutationDE.py b/nevergrad/optimization/lama/EnhancedEliteGuidedDualMutationDE.py new file mode 100644 index 000000000..cccb8d306 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedDualMutationDE.py @@ -0,0 +1,118 @@ +import numpy as np + + +class EnhancedEliteGuidedDualMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.archive = [] + self.mutation_prob = 0.5 + self.diversity_threshold = 1e-5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided dual mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < self.mutation_prob: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local Search mechanism + for idx in elite_indices: + if np.random.rand() < self.local_search_prob: + local_search_ind = pop[idx] + np.random.normal(0, 0.1, self.dim) + local_search_ind = np.clip(local_search_ind, lower_bound, upper_bound) + f_local = func(local_search_ind) + self.budget -= 1 + if f_local < fitness[idx]: + pop[idx] = local_search_ind + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = local_search_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + # Diversity preservation mechanism + diversity = np.mean(np.std(combined_pop, axis=0)) + if diversity < self.diversity_threshold: + combined_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + combined_fitness = np.array([func(ind) for ind in combined_pop]) + self.budget -= self.pop_size + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v81.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v81.py new file mode 100644 index 000000000..fb5cb19a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v81.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMassQGSA_v81: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v82.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v82.py new file mode 100644 index 000000000..f443d4f60 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v82.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMassQGSA_v82: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.6 # Adjusted crossover rate + self.explore_rate = 0.4 # Adjusted explore rate + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v83.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v83.py new file mode 100644 index 000000000..e8d42207a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v83.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMassQGSA_v83: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) * 0.1 + r2 = np.random.rand(self.dimension) * 0.1 + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v85.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v85.py new file mode 100644 index 000000000..34b364543 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v85.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMassQGSA_v85: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) * 0.1 + r2 = np.random.rand(self.dimension) * 0.1 + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = (self.crossover_rate + 0.1) * agents[elite_agent_idx] + ( + 1 - self.crossover_rate - 0.1 + ) * agents[i] + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v86.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v86.py new file mode 100644 index 000000000..43383d762 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMassQGSA_v86.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMassQGSA_v86: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) * 0.1 + r2 = np.random.rand(self.dimension) * 0.1 + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = (self.crossover_rate + 0.1) * agents[elite_agent_idx] + ( + 1 - self.crossover_rate - 0.1 + ) * agents[i] + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteGuidedMutationDE_v2.py b/nevergrad/optimization/lama/EnhancedEliteGuidedMutationDE_v2.py new file mode 100644 index 000000000..5939e9e6d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteGuidedMutationDE_v2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedEliteGuidedMutationDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.stagnation_threshold = 20 + self.local_search_prob = 0.3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + new_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in new_pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedEliteHybridOptimizer.py new file mode 100644 index 000000000..cb06e1e1f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteHybridOptimizer.py @@ -0,0 +1,163 @@ +import numpy as np + + +class EnhancedEliteHybridOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def simulated_annealing(self, x, func, budget, initial_temp=100, cooling_rate=0.95): + best_x = x.copy() + best_f = func(x) + current_x = x.copy() + current_f = best_f + temp = initial_temp + + for _ in range(budget): + if temp <= 0: + break + candidate_x = np.clip( + current_x + np.random.uniform(-0.5, 0.5, self.dim), self.bounds[0], self.bounds[1] + ) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < np.exp((current_f - candidate_f) / temp): + current_x = candidate_x + current_f = candidate_f + if candidate_f < best_f: + best_x = candidate_x + best_f = candidate_f + + temp *= cooling_rate + + return best_x, best_f + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + stagnant_iterations = 0 + max_stagnant_iterations = 100 + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + stagnant_iterations = 0 + else: + stagnant_iterations += 1 + + if self.eval_count >= global_search_budget: + break + + # Check for stagnation and reinitialize part of the population + if stagnant_iterations >= max_stagnant_iterations: + reinit_indices = np.random.choice(self.pop_size, self.pop_size // 2, replace=False) + for idx in reinit_indices: + population[idx] = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + fitness[idx] = func(population[idx]) + F_values[idx] = self.init_F + CR_values[idx] = self.init_CR + stagnant_iterations = 0 + self.eval_count += len(reinit_indices) + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.simulated_annealing(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEliteQuantumAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedEliteQuantumAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..78af53fb3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEliteQuantumAdaptiveExplorationOptimization.py @@ -0,0 +1,235 @@ +import numpy as np + + +class EnhancedEliteQuantumAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # PSO constants + c1 = 2.0 + c2 = 2.0 + w = 0.5 + + # Learning rate adaptation parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Differential Evolution parameters + F_min = 0.4 + F_max = 0.9 + CR = 0.9 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 + + # Exploration improvement parameters + exploration_factor = 0.2 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + # Elite pool + elite_pool_size = 5 + elite_pool = [] + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.9 + + prev_f = f + + # Update elite pool + if len(elite_pool) < elite_pool_size: + elite_pool.append(global_best_position.copy()) + else: + worst_idx = np.argmax([func(e) for e in elite_pool]) + if global_best_score < func(elite_pool[worst_idx]): + elite_pool[worst_idx] = global_best_position.copy() + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Adaptive Quantum Rotation + if i % 50 == 0 and i > 0: + rotation_angle = np.pi / 4 * (1 - i / self.budget) + rotation_matrix = np.array( + [ + [np.cos(rotation_angle), -np.sin(rotation_angle)], + [np.sin(rotation_angle), np.cos(rotation_angle)], + ] + ) + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedEliteQuantumAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonicTabuSearchV24.py b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonicTabuSearchV24.py new file mode 100644 index 000000000..7fc2180e8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonicTabuSearchV24.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedEnhancedAdaptiveHarmonicTabuSearchV24: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.2, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.1 # Increase the tabu ratio slightly for more exploration + self.bandwidth *= 0.95 # Decrease the bandwidth slightly for more exploitation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + if i % 200 == 0: # Adaptive parameter update every 200 iterations + self.update_parameters() + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7.py b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7.py new file mode 100644 index 000000000..837078859 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 10 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8.py b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8.py new file mode 100644 index 000000000..cc5fc093e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8.py @@ -0,0 +1,117 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8: + def __init__(self, budget=1000, hmcr=0.7, par=0.6, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def local_search(self, harmony_memory, func, func_bounds, iterations=3, bandwidth=0.05): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + for _ in range(iterations): + new_harmony = self.exploit([new_harmony], func, func.bounds, bandwidth) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def diversification_search(self, harmony_memory, func_bounds, scale=0.1): + new_harmony_memory = [] + for h in harmony_memory: + new_harmony = h + np.random.normal(0, scale, size=len(func_bounds.lb)) + new_harmony = np.clip(new_harmony, func_bounds.lb, func_bounds.ub) + new_harmony_memory.append(new_harmony) + return new_harmony_memory + + def generate_new_solution(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def local_optimization(self, solution, func, func_bounds, iterations=3, bandwidth=0.05): + best_solution = solution + best_cost = func(solution) + for _ in range(iterations): + new_solution = self.exploit([solution], func, func.bounds, bandwidth) + new_cost = func(new_solution) + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + return best_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.7: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + if i % 10 == 0: + new_harmony = self.generate_new_solution(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.5: + new_harmony = self.local_optimization(new_harmony, func, func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if i % 50 == 0: + harmony_memory = self.local_search( + harmony_memory, func, func.bounds, iterations=2, bandwidth=0.03 + ) + + if i % 100 == 0: + harmony_memory = self.diversification_search(harmony_memory, func.bounds, scale=0.05) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..390f506f3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution: + def __init__(self, budget=10000, pop_size=40, f_init=0.8, cr_init=0.9, scaling_factor=0.1): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + success_rate = success / self.pop_size + f_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + cr_scale = scaling_factor * (1 - 2 * np.random.rand()) * (1 - success_rate) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def enhance_search(self, solution, best_solution, scaling_factor): + return solution + scaling_factor * (best_solution - solution) + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + success_count = 0 + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + success_count += 1 + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + success_count += 1 + + f_current, cr_current = self.adaptive_parameter_update( + success_count, f_current, cr_current, self.scaling_factor + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + # Enhanced step - Further enhance the promising solutions + self.population[j] = self.enhance_search( + self.population[j], best_solution, self.scaling_factor + ) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57.py b/nevergrad/optimization/lama/EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57.py new file mode 100644 index 000000000..f72dbafc3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + population_size=20, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(200): # Increased the number of runs to enhance the optimization + best_fitness, _ = self.enhanced_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..f32d2ebe3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=100, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(2000): # Increased the number of optimization runs to 2000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(400): # Increased the number of iterations within each optimization run to 400 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedEnhancedDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedEnhancedDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..af9e82885 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedDynamicQuantumSwarmOptimization.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedEnhancedDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically with enhancements + self.max_inertia_weight = 0.9 + self.min_inertia_weight = 0.4 + self.max_cognitive_weight = 2.5 + self.min_cognitive_weight = 1.5 + self.max_social_weight = 1.8 + self.min_social_weight = 1.2 + + delta = 0.9 / self.budget + self.inertia_weight = self.max_inertia_weight - delta * iteration + self.cognitive_weight = self.min_cognitive_weight + delta * iteration + self.social_weight = self.max_social_weight - delta * iteration + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + cognitive_component = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + social_component = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + cognitive_component + social_component + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10.py b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10.py new file mode 100644 index 000000000..6aaf1f40f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6.py b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6.py new file mode 100644 index 000000000..40884a4c9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=50, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7.py b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7.py new file mode 100644 index 000000000..03018dab8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8.py b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8.py new file mode 100644 index 000000000..a96df96d4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=200, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9.py b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9.py new file mode 100644 index 000000000..49b027960 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=300, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization.py new file mode 100644 index 000000000..d3282b6a2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedEnhancedFireworkSwarmOptimization: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v2.py b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v2.py new file mode 100644 index 000000000..1b3f0f259 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v2.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedEnhancedFireworkSwarmOptimization_v2: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v3.py b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v3.py new file mode 100644 index 000000000..926baa249 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v3.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedEnhancedFireworkSwarmOptimization_v3: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v4.py b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v4.py new file mode 100644 index 000000000..b4ae6b452 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedFireworkSwarmOptimization_v4.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedEnhancedFireworkSwarmOptimization_v4: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + # Randomly reset some fireworks to encourage exploration + reset_idx = np.random.choice(self.n_fireworks, int(0.1 * self.n_fireworks), replace=False) + fireworks[reset_idx] = self.initialize_fireworks()[reset_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v63.py b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v63.py new file mode 100644 index 000000000..725699271 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v63.py @@ -0,0 +1,105 @@ +import numpy as np + + +class EnhancedEnhancedGuidedMassQGSA_v63: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v64.py b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v64.py new file mode 100644 index 000000000..72c235189 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v64.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedEnhancedGuidedMassQGSA_v64: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def _calculate_area_over_convergence_curve(self): + if len(self.best_fitness_history) <= 1: + return 1.0 + aocc = np.trapz(self.best_fitness_history, dx=1) / (len(self.best_fitness_history) - 1) + return 1.0 / (1.0 + aocc) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + self.best_fitness_history.append(self.f_opt) + + # Calculate AOCC + aocc = self._calculate_area_over_convergence_curve() + + return aocc, self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v68.py b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v68.py new file mode 100644 index 000000000..22daf752f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedGuidedMassQGSA_v68.py @@ -0,0 +1,105 @@ +import numpy as np + + +class EnhancedEnhancedGuidedMassQGSA_v68: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration.py b/nevergrad/optimization/lama/EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration.py new file mode 100644 index 000000000..0eacfac9e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=15, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: # Introduce Improved Adaptive Levy Flight + levy = self.generate_improved_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_improved_adaptive_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) # Randomly select beta in range + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.1 # Adjust beta for next iteration with smaller increment + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + return levy diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizer.py new file mode 100644 index 000000000..1df07a863 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.75, + crossover_rate=0.85, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.55, + mutation_rate=0.12, + num_generations=80, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV10.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV10.py new file mode 100644 index 000000000..42ef60232 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV10.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV10: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.6, + mutation_rate=0.08, + num_generations=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV11.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV11.py new file mode 100644 index 000000000..4e79b1996 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV11.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV11: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.1, + num_generations=120, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2, r3 = np.random.choice(len(swarm), 3, replace=False) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV12.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV12.py new file mode 100644 index 000000000..f2fd5aead --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV12.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV12: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.1, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2, r3 = np.random.choice(len(swarm), 3, replace=False) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV13.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV13.py new file mode 100644 index 000000000..d3b7d6e26 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV13.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV13: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.1, + num_generations=200, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2, r3 = np.random.choice(len(swarm), 3, replace=False) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV14.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV14.py new file mode 100644 index 000000000..ef2eccd41 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV14.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV14: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.1, + num_generations=250, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2, r3 = np.random.choice(len(swarm), 3, replace=False) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV2.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV2.py new file mode 100644 index 000000000..cc31c3191 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV2: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=1.3, + social_weight=1.3, + max_velocity=0.5, + mutation_rate=0.15, + num_generations=80, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV3.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV3.py new file mode 100644 index 000000000..dbdb79e9c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV3.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV3: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.95, + inertia_weight=0.7, + cognitive_weight=1.7, + social_weight=1.7, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV4.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV4.py new file mode 100644 index 000000000..1e7d7b73d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV4: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.75, + crossover_rate=0.8, + inertia_weight=0.65, + cognitive_weight=1.6, + social_weight=1.6, + max_velocity=0.55, + mutation_rate=0.15, + num_generations=80, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV5.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV5.py new file mode 100644 index 000000000..2de1d680f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV5.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV5: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.7, + crossover_rate=0.85, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.6, + mutation_rate=0.15, + num_generations=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV6.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV6.py new file mode 100644 index 000000000..e5913036b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV6.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV6: + def __init__( + self, + budget, + swarm_size=35, + differential_weight=0.75, + crossover_rate=0.95, + inertia_weight=0.65, + cognitive_weight=1.3, + social_weight=1.3, + max_velocity=0.55, + mutation_rate=0.12, + num_generations=80, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV7.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV7.py new file mode 100644 index 000000000..d6c4c7716 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV7.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV7: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.6, + mutation_rate=0.08, + num_generations=90, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV8.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV8.py new file mode 100644 index 000000000..b5aaad649 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV8.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV8: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.7, + mutation_rate=0.1, + num_generations=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV9.py b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV9.py new file mode 100644 index 000000000..10cccbd53 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedHybridMetaHeuristicOptimizerV9.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedHybridMetaHeuristicOptimizerV9: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=100, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedMetaHeuristicOptimizerV3.py b/nevergrad/optimization/lama/EnhancedEnhancedMetaHeuristicOptimizerV3.py new file mode 100644 index 000000000..eb9d8a571 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedMetaHeuristicOptimizerV3.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedMetaHeuristicOptimizerV3: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.7, + social_weight=1.7, + max_velocity=0.9, + mutation_rate=0.05, + num_generations=300, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py b/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py new file mode 100644 index 000000000..27bf8cfb7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.7 - 0.5 * t / self.budget # Improved inertia weight update + + def update_parameters(self, t): + return 1.5 - 1.0 * t / (1.5 * self.budget), 2.0 - 1.2 * t / ( + 1.5 * self.budget + ) # Adjusted cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t % 100 == 0: + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Explore the search space more intensively + for i in range(self.num_particles): + r4 = np.random.rand(self.dim) + particles_pos[i] = (1 - r4) * particles_pos[i] + r4 * global_best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4.py b/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4.py new file mode 100644 index 000000000..0fd6c0fdc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget # Adjusted inertia weight update for better convergence + + def update_parameters(self, t): + return 1.6 - 1.4 * t / (1.6 * self.budget), 2.1 - 1.6 * t / ( + 1.6 * self.budget + ) # Refined cognitive and social weights update for improved exploration + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.6 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV1.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV1.py new file mode 100644 index 000000000..9adc8632e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV1.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV1: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=20, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV12.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV12.py new file mode 100644 index 000000000..e6ed44500 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV12.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV12: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=1000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV13.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV13.py new file mode 100644 index 000000000..580493b91 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV13.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV13: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=1000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV14.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV14.py new file mode 100644 index 000000000..27bacec52 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV14.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV14: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=2.0, + social_weight=2.0, + max_velocity=0.5, + mutation_rate=0.1, + num_generations=500, + num_local_searches=1000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV15.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV15.py new file mode 100644 index 000000000..3a8e19472 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV15.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV15: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.8, + social_weight=1.8, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=600, + num_local_searches=1500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV16.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV16.py new file mode 100644 index 000000000..80e93ead0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV16.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV16: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=800, + num_local_searches=2000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV17.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV17.py new file mode 100644 index 000000000..c1a117c4a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV17.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV17: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.5, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.5, + mutation_rate=0.1, + num_generations=1000, + num_local_searches=3000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV18.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV18.py new file mode 100644 index 000000000..eef0d4b9f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV18.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV18: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.3, + mutation_rate=0.1, + num_generations=1500, + num_local_searches=5000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV19.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV19.py new file mode 100644 index 000000000..c6458f163 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV19.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV19: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.7, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.4, + mutation_rate=0.05, + num_generations=2000, + num_local_searches=10000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV2.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV2.py new file mode 100644 index 000000000..42dc06b3c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV2.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV2: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=500, + num_local_searches=25, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV20.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV20.py new file mode 100644 index 000000000..4f7ef3c23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV20.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV20: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.4, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.6, + mutation_rate=0.03, + num_generations=5000, + num_local_searches=15000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV21.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV21.py new file mode 100644 index 000000000..17597fa08 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV21.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV21: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=20000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV22.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV22.py new file mode 100644 index 000000000..73f2e85e8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV22.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV22: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.2, + mutation_rate=0.1, + num_generations=500, + num_local_searches=30000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV23.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV23.py new file mode 100644 index 000000000..32596fa84 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV23.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV23: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.5, + mutation_rate=0.03, + num_generations=600, + num_local_searches=50000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV24.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV24.py new file mode 100644 index 000000000..a09c47862 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV24.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV24: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.7, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.3, + mutation_rate=0.01, + num_generations=800, + num_local_searches=100000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV25.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV25.py new file mode 100644 index 000000000..53d4bf45f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV25.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV25: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.4, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.25, + mutation_rate=0.01, + num_generations=1000, + num_local_searches=10000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV26.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV26.py new file mode 100644 index 000000000..52494e636 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV26.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV26: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.5, + cognitive_weight=1.8, + social_weight=1.8, + max_velocity=0.5, + mutation_rate=0.02, + num_generations=500, + num_local_searches=20000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV27.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV27.py new file mode 100644 index 000000000..9b1f5a180 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV27.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV27: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.3, + mutation_rate=0.02, + num_generations=500, + num_local_searches=30000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV28.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV28.py new file mode 100644 index 000000000..e08559d23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV28.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV28: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.5, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.3, + mutation_rate=0.02, + num_generations=500, + num_local_searches=30000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV29.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV29.py new file mode 100644 index 000000000..0937e522d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV29.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV29: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.4, + mutation_rate=0.03, + num_generations=600, + num_local_searches=50000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV3.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV3.py new file mode 100644 index 000000000..0b8b7f14d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV3.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV3: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=500, + num_local_searches=25, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV30.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV30.py new file mode 100644 index 000000000..0a10e9670 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV30.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV30: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.75, + crossover_rate=0.85, + inertia_weight=0.65, + cognitive_weight=1.3, + social_weight=1.3, + max_velocity=0.5, + mutation_rate=0.02, + num_generations=800, + num_local_searches=100000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(best_solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV4.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV4.py new file mode 100644 index 000000000..f86d8c293 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV4.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV4: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.6, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=500, + num_local_searches=25, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV5.py b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV5.py new file mode 100644 index 000000000..8cea385b7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryDifferentialSwarmOptimizerV5.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedEvolutionaryDifferentialSwarmOptimizerV5: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=30, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch.py new file mode 100644 index 000000000..0448976d7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5, gamma=1.0, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v2.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v2.py new file mode 100644 index 000000000..a4c064f71 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v2.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch_v2: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = 1.0 + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * ( + fireworks[np.random.randint(len(fireworks))] + - fireworks[np.random.randint(len(fireworks))] + ) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.995) # Adapt sigma parameter + return self.sigma + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v3.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v3.py new file mode 100644 index 000000000..e5409cb7f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v3.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch_v3: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5, gamma=1.0, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.alpha = max(0.05, self.alpha * 0.995) + self.beta = min(2.0, self.beta * 1.005) + return self.alpha, self.beta + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.alpha, self.beta = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v4.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v4.py new file mode 100644 index 000000000..ae15b43b1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v4.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch_v4: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=1.5, gamma=1.0, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.beta = max(1.0, self.beta * 0.995) + return self.beta + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.beta = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v5.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v5.py new file mode 100644 index 000000000..40b263f81 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v5.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch_v5: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = 1.0 + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.99) # Adjusted sigma update rule for better exploration + return self.sigma + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v6.py b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v6.py new file mode 100644 index 000000000..ed31b6f06 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryFireworksSearch_v6.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedEvolutionaryFireworksSearch_v6: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = 1.0 + self.best_firework = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.995) # Adjusted sigma update rule for better exploration + return self.sigma + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + self.best_firework = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryGradientSearch.py b/nevergrad/optimization/lama/EnhancedEvolutionaryGradientSearch.py new file mode 100644 index 000000000..a29f28df0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryGradientSearch.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedEvolutionaryGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(len(x)): + x_step = np.array(x) + x_step[i] += epsilon + grad[i] = (func(x_step) - fx) / epsilon + return grad + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + + population_size = 20 + mutation_rate = 0.1 + selection_rate = 0.5 + elite_fraction = 0.1 + grad_step = 0.01 + + # Initialize population + population = np.random.uniform(func.bounds.lb, func.bounds.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + while evaluations < self.budget: + # Select elites and breed new generation + num_elites = int(elite_fraction * population_size) + elite_indices = np.argsort(fitness)[:num_elites] + elites = population[elite_indices] + + # Create new population + new_population = [] + for _ in range(population_size): + if np.random.rand() < selection_rate: + parents = np.random.choice(num_elites, 2, replace=False) + parent1, parent2 = elites[parents[0]], elites[parents[1]] + offspring = (parent1 + parent2) / 2 + else: + offspring = np.random.uniform(func.bounds.lb, func.bounds.ub, self.dim) + + # Mutation + if np.random.rand() < mutation_rate: + mutation_vector = np.random.randn(self.dim) * 0.1 + offspring = offspring + mutation_vector + + offspring = np.clip(offspring, func.bounds.lb, func.bounds.ub) + new_population.append(offspring) + + new_population = np.array(new_population) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += population_size + + # Gradient-based local search for elites + for i in range(num_elites): + elite = elites[i] + grad = self.approximate_gradient(func, elite) + elite_new = elite - grad_step * grad + elite_new = np.clip(elite_new, func.bounds.lb, func.bounds.ub) + elite_fitness = func(elite_new) + evaluations += 1 + + if elite_fitness < fitness[elite_indices[i]]: + new_population[elite_indices[i]] = elite_new + new_fitness[elite_indices[i]] = elite_fitness + + # Update population and fitness + population = new_population + fitness = new_fitness + + # Update best solution found + min_fitness_idx = np.argmin(fitness) + if fitness[min_fitness_idx] < self.f_opt: + self.f_opt = fitness[min_fitness_idx] + self.x_opt = population[min_fitness_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizer.py new file mode 100644 index 000000000..1abbdc81e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedEvolutionaryParticleSwarmOptimizer: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.1, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV2.py b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV2.py new file mode 100644 index 000000000..f25dc4601 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV2.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedEvolutionaryParticleSwarmOptimizerV2: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV3.py b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV3.py new file mode 100644 index 000000000..d1327b612 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryParticleSwarmOptimizerV3.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedEvolutionaryParticleSwarmOptimizerV3: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedEvolutionaryStrategy.py b/nevergrad/optimization/lama/EnhancedEvolutionaryStrategy.py new file mode 100644 index 000000000..a7941464b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedEvolutionaryStrategy.py @@ -0,0 +1,46 @@ +import numpy as np + + +class EnhancedEvolutionaryStrategy: + def __init__(self, budget=10000, mu=10, lambda_=20, tau=1 / np.sqrt(2), tau_prime=1 / np.sqrt(2)): + self.budget = budget + self.mu = mu + self.lambda_ = lambda_ + self.tau = tau + self.tau_prime = tau_prime + self.dim = 5 + self.population = np.random.uniform(-5.0, 5.0, size=(self.mu, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def mutate_population(self, population, tau, tau_prime): + mutated_population = population + np.random.normal(0, tau, size=population.shape) + mutated_population += np.random.normal(0, tau_prime, size=population.shape) + return np.clip(mutated_population, -5.0, 5.0) + + def evaluate_population(self, func, population): + fitness = np.array([func(sol) for sol in population]) + min_idx = np.argmin(fitness) + if fitness[min_idx] < self.best_fitness: + self.best_fitness = fitness[min_idx] + self.best_solution = population[min_idx].copy() + return fitness + + def selection(self, population, fitness, mu): + idx = np.argsort(fitness)[:mu] + return population[idx].copy() + + def __call__(self, func): + tau = self.tau + tau_prime = self.tau_prime + for _ in range(self.budget): + children = self.mutate_population(self.population, tau, tau_prime) + fitness = self.evaluate_population(func, children) + self.population = self.selection(children, fitness, self.mu) + + # Adapt strategy parameters + tau *= np.exp((1 / np.sqrt(2 * self.dim)) * np.random.normal(0, 1)) + tau_prime *= np.exp((1 / np.sqrt(2 * np.sqrt(self.dim))) * np.random.normal(0, 1)) + + aocc = 1 - np.std(fitness) / np.mean(fitness) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimization.py new file mode 100644 index 000000000..345313e06 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedExplorationGravitationalSwarmOptimization: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(500): # Increased the number of optimization runs to 500 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5000): # Increased the number of iterations within each optimization run to 5000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV2.py new file mode 100644 index 000000000..f101ca4de --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV2.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedExplorationGravitationalSwarmOptimizationV2: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(1000): # Increasing the number of optimization runs to 1000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range( + 10000 + ): # Increasing the number of iterations within each optimization run to 10000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV3.py new file mode 100644 index 000000000..c503d92af --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV3.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedExplorationGravitationalSwarmOptimizationV3: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(2000): # Increasing the number of optimization runs to 2000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range( + 15000 + ): # Increasing the number of iterations within each optimization run to 15000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV4.py new file mode 100644 index 000000000..dae24c897 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV4.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedExplorationGravitationalSwarmOptimizationV4: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(3000): # Increase the number of optimization runs to 3000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20000): # Increase the number of iterations within each optimization run to 20000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV5.py new file mode 100644 index 000000000..70cce24a6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorationGravitationalSwarmOptimizationV5.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedExplorationGravitationalSwarmOptimizationV5: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(5000): # Increase the number of optimization runs to 5000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(30000): # Increase the number of iterations within each optimization run to 30000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedExplorativeHarmonicSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedExplorativeHarmonicSwarmOptimizer.py new file mode 100644 index 000000000..fe04f9064 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedExplorativeHarmonicSwarmOptimizer.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedExplorativeHarmonicSwarmOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + harmony_memory_size=10, + bandwidth=3.0, + exploration_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution): + exploration = np.random.uniform( + -self.exploration_rate, self.exploration_rate, (self.population_size, self.dim) + ) + new_population = population + exploration + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + best_fitnesses = [best_fitness] + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution) + population = np.vstack((population, new_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.update_bandwidth(i) + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithm.py new file mode 100644 index 000000000..9d257e01e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithm.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedFireworkAlgorithm: + def __init__( + self, population_size=30, max_sparks=5, max_generations=1000, alpha=0.1, beta=0.2, p_ex=0.8, p_dt=0.1 + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.alpha = alpha + self.beta = beta + self.p_ex = p_ex + self.p_dt = p_dt + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * self.beta + + def attraction_operator(self, x, y): + return x + self.alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0]) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + for i, (x, _) in enumerate(self.fireworks): + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization.py new file mode 100644 index 000000000..869a3681f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedFireworkAlgorithmOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) # Adaptive alpha + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + + for _ in range(self.n_fireworks // 5): # Introduce chaotic search for diversity + idx = np.random.randint(self.n_fireworks) + fireworks[idx] = self.chaotic_search(func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization_v2.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization_v2.py new file mode 100644 index 000000000..433326bba --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmOptimization_v2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedFireworkAlgorithmOptimization_v2: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + if np.random.rand() < 0.1: # Introduce probability for diversification + fireworks[i] = self.chaotic_search(func) + return fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + fireworks = self.diversify_fireworks(fireworks, func) # Integrate diversification + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py new file mode 100644 index 000000000..0d5ce52cc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py @@ -0,0 +1,115 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithAdaptiveLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + if func(x) < self.best_fitness: + self.best_individual = np.copy(x) + self.best_fitness = func(x) + + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + def adaptive_local_search(self, func): + improved = False + for i in range(self.population_size): + current_fitness = func(self.fireworks[i][0]) + new_individual = self.local_search(self.fireworks[i][0], func) + new_fitness = func(new_individual) + if new_fitness < current_fitness: + self.fireworks[i] = (np.copy(new_individual), 0) + improved = True + + return improved + + def __call__(self, func): + self.run_firework_algorithm(func) + + improved = self.adaptive_local_search(func) + if improved: + self.run_firework_algorithm(func) + + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined.py new file mode 100644 index 000000000..1c8589e43 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined.py @@ -0,0 +1,94 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.best_individual = x + self.best_fitness = func(self.best_individual) + + def __call__(self, func): + self.run_firework_algorithm(func) + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveMutation.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveMutation.py new file mode 100644 index 000000000..6edf5a807 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithAdaptiveMutation.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedFireworkAlgorithmWithAdaptiveMutation: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + initial_mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.initial_mutation_rate = initial_mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + self.mutation_rate = np.full(self.population_size, self.initial_mutation_rate) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def adapt_mutation_rate(self, fitness_diff, k): + if fitness_diff < 0: + self.mutation_rate[k] *= 0.95 # Decrease mutation rate + else: + self.mutation_rate[k] *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate[i], size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i) + self.adapt_mutation_rate(fitness_diff, i) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithDynamicMutation.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithDynamicMutation.py new file mode 100644 index 000000000..034db8d4a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithDynamicMutation.py @@ -0,0 +1,114 @@ +import numpy as np + + +class EnhancedFireworkAlgorithmWithDynamicMutation: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate_range=(0.01, 0.1), + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate_range = mutation_rate_range + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [ + ( + np.copy(x), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + for x in self.population + ] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, alpha, beta): + alpha *= 0.9 # Decrease alpha + beta *= 1.1 # Increase beta + return alpha, beta + + def adapt_mutation_rate(self, fitness_diff, mutation_rate): + if fitness_diff < 0: + mutation_rate *= 0.9 # Decrease mutation rate + else: + mutation_rate *= 1.1 # Increase mutation rate + return np.clip(mutation_rate, *self.mutation_rate_range) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta, mutation_rate) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + new_spark += np.random.normal(0, mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = ( + np.copy(new_spark), + 0, + *self.update_parameters(i, alpha, beta), + mutation_rate, + ) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + self.adapt_mutation_rate(fitness_diff, mutation_rate), + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithHybridLocalSearch.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithHybridLocalSearch.py new file mode 100644 index 000000000..7e9356207 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithHybridLocalSearch.py @@ -0,0 +1,94 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithHybridLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.best_individual = x + self.best_fitness = func(self.best_individual) + + def __call__(self, func): + self.run_firework_algorithm(func) + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithImprovedMutation.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithImprovedMutation.py new file mode 100644 index 000000000..182b1f9af --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithImprovedMutation.py @@ -0,0 +1,114 @@ +import numpy as np + + +class EnhancedFireworkAlgorithmWithImprovedMutation: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate_range=(0.01, 0.1), + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate_range = mutation_rate_range + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [ + ( + np.copy(x), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + for x in self.population + ] + self.best_individual = None + self.best_fitness = np.Inf + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, alpha, beta): + alpha *= 0.9 # Decrease alpha + beta *= 1.1 # Increase beta + return alpha, beta + + def adapt_mutation_rate(self, fitness_diff, mutation_rate): + if fitness_diff < 0: + mutation_rate *= 0.95 # Decrease mutation rate + else: + mutation_rate *= 1.05 # Increase mutation rate + return np.clip(mutation_rate, *self.mutation_rate_range) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, alpha, beta, mutation_rate) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, beta) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], alpha) + + new_spark += np.random.normal(0, mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = ( + np.copy(new_spark), + 0, + *self.update_parameters(i, alpha, beta), + mutation_rate, + ) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + alpha, + beta, + self.adapt_mutation_rate(fitness_diff, mutation_rate), + ) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + self.initial_alpha, + self.initial_beta, + np.random.uniform(*self.mutation_rate_range), + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearch.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearch.py new file mode 100644 index 000000000..0cefc8a79 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearch.py @@ -0,0 +1,91 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize(func, x, bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)]) + return res.x + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinal.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinal.py new file mode 100644 index 000000000..4416fbbbf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinal.py @@ -0,0 +1,95 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchFinal: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized.py new file mode 100644 index 000000000..e3a5de0aa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized.py @@ -0,0 +1,98 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def get_bounding_box(self, lb, ub): + return np.maximum(lb, np.minimum(ub, np.random.normal((lb + ub) / 2, (ub - lb) / 3))) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (self.get_bounding_box(func.bounds.lb, func.bounds.ub), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalRefined.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalRefined.py new file mode 100644 index 000000000..620e1f889 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchFinalRefined.py @@ -0,0 +1,95 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchFinalRefined: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchImproved.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchImproved.py new file mode 100644 index 000000000..fbc4bbce5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchImproved.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchImproved: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + if func(x) < self.best_fitness: + self.best_individual = np.copy(x) + self.best_fitness = func(x) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchOptimized.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchOptimized.py new file mode 100644 index 000000000..0ce8bd6ff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchOptimized.py @@ -0,0 +1,95 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchOptimized: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchRefined.py b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchRefined.py new file mode 100644 index 000000000..3a8516853 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkAlgorithmWithLocalSearchRefined.py @@ -0,0 +1,95 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedFireworkAlgorithmWithLocalSearchRefined: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworkSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedFireworkSwarmOptimization.py new file mode 100644 index 000000000..be358453d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworkSwarmOptimization.py @@ -0,0 +1,55 @@ +import numpy as np + + +class EnhancedFireworkSwarmOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) # Adaptive alpha + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + fireworks = self.evolve_fireworks(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedFireworksAlgorithm.py new file mode 100644 index 000000000..aa266be7b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworksAlgorithm.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedFireworksAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma *= 0.99 # Adjusted sigma update rule for slower decrease + return max(0.1, self.sigma) + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFireworksSwarmOptimization_v4.py b/nevergrad/optimization/lama/EnhancedFireworksSwarmOptimization_v4.py new file mode 100644 index 000000000..f20cca4de --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFireworksSwarmOptimization_v4.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedFireworksSwarmOptimization_v4: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def diversify_fireworks(self, fireworks, func, i): + p_diversify = 0.1 + 0.4 * np.exp(-5 * i / self.budget) # Adaptive probability for diversification + for i in range(self.n_fireworks): + if np.random.rand() < p_diversify: + fireworks[i] = self.chaotic_search(func) + return fireworks + + def enhance_convergence(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + best_firework = fireworks[best_idx] + for i in range(self.n_fireworks): + if i != best_idx: + fireworks[i] = 0.9 * fireworks[i] + 0.1 * best_firework # Attraction towards the global best + return fireworks + + def adaptive_sparks(self, budget): + return 5 + int(45 * np.exp(-5 * budget / self.budget)) + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + n_sparks = self.adaptive_sparks(i) + self.n_sparks = n_sparks + fireworks = self.diversify_fireworks(fireworks, func, i) + fireworks = self.enhance_convergence(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedFocusedBalancedAdaptivePSO.py b/nevergrad/optimization/lama/EnhancedFocusedBalancedAdaptivePSO.py new file mode 100644 index 000000000..80a52c47e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedFocusedBalancedAdaptivePSO.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedFocusedBalancedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_initial=0.9, + omega_final=0.4, + phi_p=0.2, + phi_g=0.6, + adaptive_depth=5, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal preference influence + self.phi_g = phi_g # Global preference influence + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.adaptive_depth = adaptive_depth # Depth of performance evaluation for adaptive inertia + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.adaptive_depth :] + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + # More aggressive inertia adaptation + if len(scores) > 1 and np.std(scores) < 0.01: + return max(self.omega_final, self.omega_initial - (evaluation_counter / self.budget) * 2.0) + else: + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) diff --git a/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizer.py b/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizer.py new file mode 100644 index 000000000..06df6f197 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizer.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedGlobalClimbingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 # Adjusted population size for a balance between exploration and convergence + elite_size = 50 # Adjusted elite size to maintain quality without hindering diversity + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.1 # Fine-tuned mutation scale + adaptive_factor = 0.90 # Enhanced adaptiveness to fitness landscape changes + recombination_prob = 0.8 # Tuned recombination probability for stability + + # Evolution loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + # Differential evolution inspired crossover strategy + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) # Differential mutation + child = np.clip(child, self.lb, self.ub) + else: + # Mutation and selection + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Adaptive mutation scaling and replacement strategy + if evaluations % 400 == 0: + mutation_scale *= adaptive_factor # Decrement to stabilize as optimization progresses + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size - elite_size): + if np.random.rand() < 0.2: # Introducing fresh blood + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizerV3.py b/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizerV3.py new file mode 100644 index 000000000..af4ecc79e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGlobalClimbingOptimizerV3.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedGlobalClimbingOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 150 # More focused population size + elite_size = 20 # Smaller elite size + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.1 # Slightly increased mutation scale for broader exploration + adaptive_factor = 0.98 # Slightly slower reduction in mutation scale + recombination_prob = 0.6 # Slightly increased recombination to introduce more diversity + + # Evolution loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if evaluations % 200 == 0: + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size - elite_size): + if np.random.rand() < 0.1: + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGlobalStructureAdaptiveEvolver.py b/nevergrad/optimization/lama/EnhancedGlobalStructureAdaptiveEvolver.py new file mode 100644 index 000000000..a83bbd552 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGlobalStructureAdaptiveEvolver.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedGlobalStructureAdaptiveEvolver: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 250 + elite_size = 80 + evaluations = 0 + mutation_scale = 0.1 + adaptive_factor = 0.95 + recombination_prob = 0.75 + innovators_factor = 0.1 # Proportion of population for extensive exploration + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Recombination and mutation within elites + new_population = elite_individuals.copy() + new_fitness = elite_fitness.copy() + for _ in range(population_size - elite_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(elite_size, 3, replace=False) + x0, x1, x2 = elite_individuals[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + idx = np.random.choice(elite_size) + child = elite_individuals[idx] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population = np.append(new_population, [child], axis=0) + new_fitness = np.append(new_fitness, child_fitness) + + # Introduce innovators for extensive exploration + innovators = np.random.uniform( + self.lb, self.ub, (int(population_size * innovators_factor), self.dim) + ) + innovator_fitness = np.array([func(ind) for ind in innovators]) + evaluations += len(innovators) + + combined_population = np.concatenate((new_population, innovators), axis=0) + combined_fitness = np.concatenate((new_fitness, innovator_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + mutation_scale *= adaptive_factor # Gradually reduce mutation scale to fine-tune exploration + if mutation_scale < 0.01: + mutation_scale = 0.1 # Reset mutation scale if it becomes too small + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGlobalStructureAwareOptimizer.py b/nevergrad/optimization/lama/EnhancedGlobalStructureAwareOptimizer.py new file mode 100644 index 000000000..7892a765d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGlobalStructureAwareOptimizer.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedGlobalStructureAwareOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 80 + elite_size = 15 + evaluations = 0 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.1 + adaptive_factor = 0.98 + recombination_prob = 0.85 + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + for idx in range(population_size - elite_size): + if np.random.rand() < 0.2: + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Introduce global structure-aware mutation for diversity enhancement + if evaluations % 500 == 0: + structure_scale = 0.5 + structure_population = np.random.normal(0, structure_scale, (population_size // 5, self.dim)) + structure_population = np.clip( + structure_population + + population[np.random.choice(population_size, population_size // 5)], + self.lb, + self.ub, + ) + structure_fitness = np.array([func(ind) for ind in structure_population]) + evaluations += population_size // 5 + + combined_population = np.concatenate((population, structure_population), axis=0) + combined_fitness = np.concatenate((fitness, structure_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGlobalStructureOptimizer.py b/nevergrad/optimization/lama/EnhancedGlobalStructureOptimizer.py new file mode 100644 index 000000000..4ec2716f9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGlobalStructureOptimizer.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedGlobalStructureOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + mutation_factor = 0.9 + crossover_rate = 0.9 + elite_size = 5 + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main loop + while evaluations < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elitism: carry forward best solutions + sorted_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[sorted_indices] + new_fitness[:elite_size] = fitness[sorted_indices] + + # Generate new candidates for the rest of the population + for i in range(elite_size, population_size): + # Differential Evolution Strategy: "rand/1/bin" + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + + # Mutation + mutant = x0 + mutation_factor * (x1 - x2) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial = np.where(crossover_mask, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + population = new_population + fitness = new_fitness + + # Update the best solution found + current_best_index = np.argmin(fitness) + current_best_fitness = fitness[current_best_index] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_solution = population[current_best_index] + + # Adaptive mutation factor and crossover rate + if evaluations % (self.budget // 10) == 0: + mutation_factor = max(0.5, mutation_factor * 0.98) + crossover_rate = max(0.5, crossover_rate * 0.98) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedGradientBoostedAnnealingWithAdaptiveMemory.py b/nevergrad/optimization/lama/EnhancedGradientBoostedAnnealingWithAdaptiveMemory.py new file mode 100644 index 000000000..ecdd296e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGradientBoostedAnnealingWithAdaptiveMemory.py @@ -0,0 +1,170 @@ +import numpy as np + + +class EnhancedGradientBoostedAnnealingWithAdaptiveMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 + T_min = 1e-6 + alpha_initial = 0.96 + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta_initial = 1.5 + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + x_candidate = memory[np.random.randint(memory_size)] + np.random.uniform( + -0.5, 0.5, self.dim + ) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + if evaluations % (self.budget // 7) == 0: + for _ in range(memory_size): + x_candidate = memory[np.random.randint(memory_size)] + np.random.uniform( + -0.2, 0.2, self.dim + ) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedGradientGuidedClusterSearch.py b/nevergrad/optimization/lama/EnhancedGradientGuidedClusterSearch.py new file mode 100644 index 000000000..ef875fb71 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGradientGuidedClusterSearch.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedGradientGuidedClusterSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial population + population_size = 20 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + + # Parameters for adaptive mechanisms + mutation_rate = 0.1 + mutation_scale = 1.0 + + iteration = 0 + while iteration < self.budget: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = best_individual + + # Estimating gradients with central difference method for more accuracy + gradients = [] + for i in range(population_size): + grad = np.zeros(self.dimension) + for d in range(self.dimension): + perturb = np.zeros(self.dimension) + epsilon = 0.01 + perturb[d] = epsilon + forward = np.clip(population[i] + perturb, self.lower_bound, self.upper_bound) + backward = np.clip(population[i] - perturb, self.lower_bound, self.upper_bound) + grad[d] = (func(forward) - func(backward)) / (2 * epsilon) + gradients.append(grad) + + # Adaptive gradient step and mutation + new_population = [] + for i in range(population_size): + learning_rate = mutation_scale / (1 + iteration / self.budget * 10) # Decreases over time + new_individual = population[i] - learning_rate * gradients[i] + new_individual = np.clip(new_individual, self.lower_bound, self.upper_bound) + new_fitness = func(new_individual) + + # Mutation with adaptive rate + if np.random.rand() < mutation_rate: + new_individual += np.random.normal(0, mutation_scale, self.dimension) + new_individual = np.clip(new_individual, self.lower_bound, self.upper_bound) + new_fitness = func(new_individual) + + if new_fitness < fitness[i]: + new_population.append(new_individual) + fitness[i] = new_fitness + else: + new_population.append(population[i]) + + population = np.array(new_population) + iteration += population_size + + # Adjust mutation parameters based on progress + if iteration % 100 == 0: + mutation_rate *= 0.95 # Gradual decrease + mutation_scale *= 0.99 # Reduce mutation impact over time + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGradientGuidedEvolution.py b/nevergrad/optimization/lama/EnhancedGradientGuidedEvolution.py new file mode 100644 index 000000000..653135fa4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGradientGuidedEvolution.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedGradientGuidedEvolution: + def __init__( + self, + budget, + dimension=5, + population_size=30, + mutation_factor=0.7, + crossover_prob=0.8, + local_search_prob=0.15, + gradient_step_size=0.005, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_prob = crossover_prob + self.local_search_prob = local_search_prob + self.gradient_step_size = gradient_step_size # Step size for gradient approximation + + def __call__(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + best_idx = np.argmin(fitness) + f_opt, x_opt = fitness[best_idx], population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Mutation and crossover using differential evolution strategy + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.crossover_prob, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt, x_opt = trial_fitness, trial + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + + # Intermittent gradient-based local search + if np.random.rand() < self.local_search_prob and evaluations + self.dimension <= self.budget: + for _ in range(int(0.05 * self.budget)): # Limited local search steps + gradient = np.zeros(self.dimension) + for d in range(self.dimension): + perturb = np.zeros(self.dimension) + perturb[d] = self.gradient_step_size + f_plus = func(x_opt + perturb) + f_minus = func(x_opt - perturb) + gradient[d] = (f_plus - f_minus) / (2 * self.gradient_step_size) + evaluations += 2 + + if evaluations >= self.budget: + break + + # Update the best solution based on the gradient information + x_opt = np.clip(x_opt - 0.01 * gradient, -5.0, 5.0) + f_opt = func(x_opt) + evaluations += 1 + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedGradientGuidedHybridPSO.py b/nevergrad/optimization/lama/EnhancedGradientGuidedHybridPSO.py new file mode 100644 index 000000000..0dd6b4aa3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGradientGuidedHybridPSO.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedGradientGuidedHybridPSO: + def __init__( + self, + budget=10000, + population_size=50, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=1.4, + social_weight=1.2, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Decaying inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + # Improved gradient-guided component + gradient_guided_component = ( + 0.1 + * (global_best_position - personal_best_positions[i]) + / (1 + np.linalg.norm(global_best_position - personal_best_positions[i])) + ) + personal_component = r1 * (personal_best_positions[i] - particles[i]) + social_component = r2 * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * personal_component + + self.social_weight * social_component + + gradient_guided_component + ) # Enhanced hybridization with gradient direction + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedGradualAdaptiveRAMEDS.py b/nevergrad/optimization/lama/EnhancedGradualAdaptiveRAMEDS.py new file mode 100644 index 000000000..8cf810bba --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGradualAdaptiveRAMEDS.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedGradualAdaptiveRAMEDS: + def __init__( + self, + budget, + population_size=100, + crossover_rate=0.9, + F_start=0.9, + F_end=0.1, + memory_size=100, + elite_size=20, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_start = F_start + self.F_end = F_end + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor using linear interpolation + F = self.F_start + (self.F_end - self.F_start) * (evaluations / self.budget) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimization.py new file mode 100644 index 000000000..cd351ece9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimization.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedGravitationSwarmOptimization: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=100, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(5000): # Increased the number of optimization runs to 5000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(800): # Increased the number of iterations within each optimization run to 800 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimizationV2.py new file mode 100644 index 000000000..f441bfea6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationSwarmOptimizationV2.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedGravitationSwarmOptimizationV2: + def __init__( + self, + budget=1500, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=150, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(7500): # Increased the number of optimization runs to 7500 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(1000): # Increased the number of iterations within each optimization run to 1000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV10.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV10.py new file mode 100644 index 000000000..c37f8f812 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV10.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV10: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + reinitialize_percent=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.reinitialize_percent = reinitialize_percent + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + # Reinitialize worst % of the population + sorted_indices = np.argsort(f_vals) + reinitialize_count = int(self.reinitialize_percent * len(population)) + for idx in sorted_indices[-reinitialize_count:]: + population[idx] = np.random.uniform(low=func.bounds.lb, high=func.bounds.ub) + f_vals[idx] = func(population[idx]) + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV11.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV11.py new file mode 100644 index 000000000..8a1309627 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV11.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV11: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV12.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV12.py new file mode 100644 index 000000000..3a2e46549 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV12.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV12: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV13.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV13.py new file mode 100644 index 000000000..023a0411b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV13.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV13: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(3): # Perform optimization 3 times + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + if self.f_opt < np.inf: # If a valid solution is found, break + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV14.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV14.py new file mode 100644 index 000000000..b0abf5eb7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV14.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV14: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(5): # Increase the number of optimization runs to 5 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + if self.f_opt < np.inf: # If a valid solution is found, break + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV15.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV15.py new file mode 100644 index 000000000..1348214f4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV15.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV15: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.k = 5 # Number of individuals in the population to replace + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(self.k): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(self.k, self.population_size): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(10): # Increase the number of optimization runs to 10 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + if self.f_opt < np.inf: # If a valid solution is found, break + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV16.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV16.py new file mode 100644 index 000000000..d7b7debea --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV16.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV16: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.k = 10 # Number of individuals in the population to replace + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(self.k): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(self.k, self.population_size): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20): # Increase the number of optimization runs to 20 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + if self.f_opt < np.inf: # If a valid solution is found, break + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV17.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV17.py new file mode 100644 index 000000000..3856cf1e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV17.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV17: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20): # Increase the number of optimization runs to 20 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + if self.f_opt < np.inf: # If a valid solution is found, break + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV18.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV18.py new file mode 100644 index 000000000..f9ec9820b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV18.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV18: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + best_vals = [] + for _ in range(30): # Increase the number of optimization runs to 30 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + best_vals.append(self.f_opt) + + best_idx = np.argmin(best_vals) + self.f_opt = best_vals[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV19.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV19.py new file mode 100644 index 000000000..542dccced --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV19.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV19: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + best_vals = [] + for _ in range(50): # Increase the number of optimization runs to 50 + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + best_vals.append(self.f_opt) + + best_idx = np.argmin(best_vals) + self.f_opt = best_vals[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV2.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV2.py new file mode 100644 index 000000000..d982cc5e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV2.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV2: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV20.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV20.py new file mode 100644 index 000000000..74cfacd67 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV20.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV20: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + best_aooc = 1.0 + for _ in range(50): # Increase the number of optimization runs to 50 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(10): # Increase the number of iterations within each optimization run + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + # Update the best AOCC and optimal solution + if self.f_opt < best_aooc: + best_aooc = self.f_opt + best_x_opt = self.x_opt + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV21.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV21.py new file mode 100644 index 000000000..b6b5e9526 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV21.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV21: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = 1.0 + best_x_opt = None + + for _ in range(50): # Increase the number of optimization runs to 50 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20): # Increase the number of iterations within each optimization run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC and optimal solution + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + + return best_aooc, best_x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV22.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV22.py new file mode 100644 index 000000000..ae8e82aff --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV22.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV22: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def check_premature_convergence(self, f_vals): + sorted_vals = np.sort(f_vals) + diff = np.diff(sorted_vals) + quartile = np.percentile(diff, 75) # 75th percentile of the differences + return quartile < self.epsilon + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(len(population)): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + self.update_parameters(t) + + if self.check_premature_convergence(f_vals): + break + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV23.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV23.py new file mode 100644 index 000000000..4cac14bc2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV23.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV23: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = 1.0 + best_x_opt = None + best_std = np.Inf + + for _ in range(100): # Increased the number of optimization runs to 100 for better convergence + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range( + 40 + ): # Increased the number of iterations within each optimization run for better exploration + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV24.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV24.py new file mode 100644 index 000000000..32cf1a301 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV24.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV24: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(len(population)): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + self.update_parameters(t) + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV25.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV25.py new file mode 100644 index 000000000..358fb4f90 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV25.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV25: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t): + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + + for i in range(len(population)): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < f_vals[best_idx]: + best_pos = population[best_idx] + + self.update_parameters(t) + + return population, f_vals, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + population, f_vals, best_pos = self.evolve_population(population, f_vals, func) + + self.f_opt = np.min(f_vals) + self.x_opt = best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV3.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV3.py new file mode 100644 index 000000000..ef5b2c363 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV3.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV3: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV30.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV30.py new file mode 100644 index 000000000..d23750470 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV30.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV30: + def __init__( + self, + budget=1500, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=200, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(10000): # Increased the number of optimization runs to 10000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(1500): # Increased the number of iterations within each optimization run to 1500 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV31.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV31.py new file mode 100644 index 000000000..0971a2fc6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV31.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV31: + def __init__( + self, + budget=2000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=250, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(150): # Increased the number of optimization runs to 150 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(2000): # Increased the number of iterations within each optimization run to 2000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV32.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV32.py new file mode 100644 index 000000000..2c3231cea --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV32.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV32: + def __init__( + self, + budget=3000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=300, + elite_percentage=0.1, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + self.elite_size = int(elite_percentage * population_size) + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(200): # Increased the number of optimization runs to 200 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(3000): # Increased the number of iterations within each optimization run to 3000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV4.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV4.py new file mode 100644 index 000000000..a8375559a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV4.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV4: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos): + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < self.alpha_max: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV6.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV6.py new file mode 100644 index 000000000..3e209e46e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV6.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV6: + def __init__( + self, budget=1000, G0=100.0, alpha_min=0.1, alpha_max=0.9, delta=0.1, gamma=0.1, eta=0.01, epsilon=0.1 + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + + def initialize_population(self, population_size, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population_size = 20 + population = self.initialize_population(population_size, func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV7.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV7.py new file mode 100644 index 000000000..711a57fc3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV7.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV7: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=20, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV8.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV8.py new file mode 100644 index 000000000..bdf7135bf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV8.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV8: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.2, + alpha_max=0.8, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=30, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV9.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV9.py new file mode 100644 index 000000000..dfa035d51 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmIntelligenceV9.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmIntelligenceV9: + def __init__( + self, + budget=1000, + G0=100.0, + alpha_min=0.1, + alpha_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.01, + epsilon=0.1, + population_size=50, + ): + self.budget = budget + self.G0 = G0 + self.alpha_min = alpha_min + self.alpha_max = alpha_max + self.delta = delta + self.gamma = gamma + self.eta = eta + self.epsilon = epsilon + self.population_size = population_size + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.eta * t) + + def update_alpha(self, t): + return self.alpha_min + (self.alpha_max - self.alpha_min) * np.exp(-self.delta * t) + + def update_population(self, population, f_vals, func, G, best_pos, t): + alpha_t = self.update_alpha(t) + for i in range(len(population)): + for j in range(len(population)): + if np.random.rand() < alpha_t: + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha_max = self.update_alpha(t) + self.delta = 1 / (1 + np.exp(-self.gamma * (avg_f - np.min(f_vals) + self.epsilon))) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G, best_pos, t) + self.update_parameters(t, f_vals) + + for i in range(len(population)): + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + return best_val, best_pos + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + self.f_opt, self.x_opt = self.evolve_population(population, f_vals, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDiversityPreservation.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDiversityPreservation.py new file mode 100644 index 000000000..ab307b90d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDiversityPreservation.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedGravitationalSwarmOptimizationWithDiversityPreservation: + def __init__(self, budget=5000, G0=100.0, alpha=0.2, delta=0.1, gamma=0.2, population_size=200, rho=0.1): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho = rho + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < self.rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(10000): # Increase the number of optimization runs to 10000 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(20000): # Increase the number of iterations within each optimization run to 20000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2.py new file mode 100644 index 000000000..b27bc7a19 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2: + def __init__( + self, + budget=5000, + G0=100.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(10): # Increase the number of optimization runs to 10 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(1000): # Decrease the number of iterations within each optimization run to 1000 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3.py b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3.py new file mode 100644 index 000000000..e866f654e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3: + def __init__( + self, + budget=5000, + G0=150.0, + alpha=0.2, + delta=0.1, + gamma=0.2, + population_size=200, + rho_min=0.1, + rho_max=0.5, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + t) + + def update_parameters(self, t, f_vals): + avg_f = np.mean(f_vals) + self.G0 = self.update_G(t) + self.alpha = self.alpha * np.exp(-self.delta * t) + self.gamma = self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * ( + 1 - t / self.budget + ) # Dynamic diversity preservation + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + self.update_parameters(t, f_vals) + + # Diversity preservation + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(20): # Increase the number of optimization runs to 20 for more robustness + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(750): # Adjust the number of iterations within each optimization run to 750 + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + # Update the best AOCC, optimal solution, and standard deviation + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v62.py b/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v62.py new file mode 100644 index 000000000..8568e8acc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v62.py @@ -0,0 +1,105 @@ +import numpy as np + + +class EnhancedGuidedMassQGSA_v62: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v94.py b/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v94.py new file mode 100644 index 000000000..140581fa2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedGuidedMassQGSA_v94.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedGuidedMassQGSA_v94: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedHarmonicFireworkAlgorithm.py new file mode 100644 index 000000000..b995efd6b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicFireworkAlgorithm.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedHarmonicFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.5, beta=2.0, mutation_rate=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.mutation_rate = mutation_rate + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self, func): + self.fireworks = np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + self.firework_fitness = np.array([func(x) for x in self.fireworks]) + + def explode_firework(self, firework, func): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + sparks_fitness = np.array([func(x) for x in sparks]) + return sparks, sparks_fitness + + def apply_mutation(self, sparks): + mutated_sparks = sparks + np.random.normal(0, self.mutation_rate, sparks.shape) + return np.clip(mutated_sparks, self.bounds[0], self.bounds[1]) + + def update_fireworks(self, sparks, sparks_fitness): + for i in range(self.n_fireworks): + if i < len(sparks) and sparks_fitness[i] < self.firework_fitness[i]: + self.fireworks[i] = sparks[i] + self.firework_fitness[i] = sparks_fitness[i] + + def adapt_parameters(self): + self.alpha = self.alpha * 0.95 + self.beta = self.beta * 0.9 + self.mutation_rate = max(self.mutation_rate * 0.9, 0.01) + + def local_search(self, func): + for i in range(self.n_fireworks): + best_firework = self.fireworks[i].copy() + best_fitness = self.firework_fitness[i] + + for _ in range(3): + new_firework = self.fireworks[i] + np.random.normal(0, 0.1, self.dim) + new_fitness = func(new_firework) + + if new_fitness < best_fitness: + best_firework = new_firework + best_fitness = new_fitness + + self.fireworks[i] = best_firework + self.firework_fitness[i] = best_fitness + + def __call__(self, func): + self.initialize_fireworks(func) + + for _ in range(int(self.budget / self.n_fireworks)): + for i in range(self.n_fireworks): + sparks, sparks_fitness = self.explode_firework(self.fireworks[i], func) + mutated_sparks = self.apply_mutation(sparks) + self.update_fireworks(mutated_sparks, sparks_fitness) + + self.adapt_parameters() + self.local_search(func) + + best_idx = np.argmin(self.firework_fitness) + if self.firework_fitness[best_idx] < self.f_opt: + self.f_opt = self.firework_fitness[best_idx] + self.x_opt = self.fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicLevyDolphinOptimization.py b/nevergrad/optimization/lama/EnhancedHarmonicLevyDolphinOptimization.py new file mode 100644 index 000000000..4362c8712 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicLevyDolphinOptimization.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedHarmonicLevyDolphinOptimization: + def __init__( + self, budget=1000, num_dolphins=20, num_dimensions=5, alpha=0.1, beta=0.5, gamma=0.1, delta=0.2 + ): + self.budget = budget + self.num_dolphins = num_dolphins + self.num_dimensions = num_dimensions + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_dolphins, self.num_dimensions)) + + def levy_flight(self): + sigma = 1.0 + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1.5) + return step + + def move_dolphin(self, current_position, best_position, previous_best_position, bounds): + step = ( + self.alpha * (best_position - current_position) + + self.beta * (previous_best_position - current_position) + + self.gamma * self.levy_flight() + ) + new_position = current_position + step + new_position = np.clip(new_position, bounds.lb, bounds.ub) + return new_position + + def update_parameters(self, iteration): + self.alpha = max(0.01, self.alpha * (1 - 0.9 * iteration / self.budget)) + self.beta = min(0.9, self.beta + 0.1 * iteration / self.budget) + self.gamma = max(0.01, self.gamma * (1 - 0.8 * iteration / self.budget)) + + def levy_harmonic_search(self, func, position): + new_position = position + self.levy_flight() + new_position = np.clip(new_position, func.bounds.lb, func.bounds.ub) + if func(new_position) < func(position): + return new_position + else: + return position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + positions = self.initialize_positions(bounds) + best_position = positions[0].copy() + previous_best_position = best_position.copy() + + for i in range(self.budget): + self.update_parameters(i) + for j in range(self.num_dolphins): + new_position = self.move_dolphin(positions[j], best_position, previous_best_position, bounds) + new_position = self.levy_harmonic_search(func, new_position) + + if func(new_position) < func(positions[j]): + positions[j] = new_position + if func(new_position) < func(best_position): + best_position = new_position.copy() + + previous_best_position = best_position + + self.f_opt = func(best_position) + self.x_opt = best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizer.py b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizer.py new file mode 100644 index 000000000..8f05c0c39 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedHarmonicSearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=2.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.1 + else: + return bandwidth * 0.9 + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV2.py b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV2.py new file mode 100644 index 000000000..3010a9528 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedHarmonicSearchOptimizerV2: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.05 + else: + return bandwidth * 0.95 + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV3.py b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV3.py new file mode 100644 index 000000000..3bf381fe6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV3.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedHarmonicSearchOptimizerV3: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / (1 + iter_count) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.05 + else: + return bandwidth * 0.95 + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV4.py b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV4.py new file mode 100644 index 000000000..1d606fbb7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV4.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedHarmonicSearchOptimizerV4: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.1 + else: + return bandwidth * 0.9 + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV5.py b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV5.py new file mode 100644 index 000000000..0a4efc39b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSearchOptimizerV5.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedHarmonicSearchOptimizerV5: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return bandwidth * 1.1 + else: + return bandwidth * 0.9 + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return 1.0 - self.memory_update_rate + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimization.py new file mode 100644 index 000000000..8eaff97d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimization.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedHarmonicSwarmOptimization: + def __init__( + self, budget=1000, num_particles=20, num_dimensions=5, harmony_memory_rate=0.6, pitch_adjust_rate=0.5 + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + new_solution[i] = np.clip( + new_solution[i] + np.random.uniform(-0.1, 0.1), bounds.lb[i], bounds.ub[i] + ) + + if func(new_solution) < func(solution): + return new_solution + else: + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV2.py new file mode 100644 index 000000000..3842a0550 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV2.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedHarmonicSwarmOptimizationV2: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + if np.random.rand() < self.local_search_prob: + for i in range(self.num_dimensions): + new_solution[i] = np.clip( + new_solution[i] + np.random.uniform(-0.1, 0.1), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV3.py new file mode 100644 index 000000000..e27f403bb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV3.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedHarmonicSwarmOptimizationV3: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, 0.5), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV4.py new file mode 100644 index 000000000..d12d56378 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicSwarmOptimizationV4.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedHarmonicSwarmOptimizationV4: + def __init__( + self, + budget=1000, + num_particles=20, + num_dimensions=5, + harmony_memory_rate=0.6, + pitch_adjust_rate=0.5, + local_search_prob=0.5, + step_size=0.2, + ): + self.budget = budget + self.num_particles = num_particles + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.local_search_prob = local_search_prob + self.step_size = step_size + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, self.num_dimensions)) + + def generate_new_solution(self, memory_matrix, pitch_matrix, bounds): + new_solution = np.zeros_like(memory_matrix[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjust_rate: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + index = np.random.randint(self.num_particles) + new_solution[i] = memory_matrix[index, i] + + return new_solution + + def local_search(self, solution, func, bounds): + new_solution = solution.copy() + for i in range(self.num_dimensions): + if np.random.rand() < self.local_search_prob: + new_solution[i] = np.clip( + new_solution[i] + np.random.normal(0, self.step_size), bounds.lb[i], bounds.ub[i] + ) + if func(new_solution) < func(solution): + return new_solution + return solution + + def update_memory_matrix(self, memory_matrix, new_solution, func): + worst_index = np.argmax([func(solution) for solution in memory_matrix]) + if func(new_solution) < func(memory_matrix[worst_index]): + memory_matrix[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + memory_matrix = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(memory_matrix, memory_matrix, bounds) + new_solution = self.local_search(new_solution, func, bounds) + self.update_memory_matrix(memory_matrix, new_solution, func) + + if func(new_solution) < self.f_opt: + self.f_opt = func(new_solution) + self.x_opt = new_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV11.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV11.py new file mode 100644 index 000000000..7ae380318 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV11.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV11: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + self.bandwidth_decay = 0.95 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.95 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.05 + self.success_count = 0 + self.bandwidth *= self.bandwidth_decay + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV13.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV13.py new file mode 100644 index 000000000..e37259b77 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV13.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV13: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV14.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV14.py new file mode 100644 index 000000000..a4a385e3f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV14.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV14: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV15.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV15.py new file mode 100644 index 000000000..90607682a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV15.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV15: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV16.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV16.py new file mode 100644 index 000000000..a6248aa11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV16.py @@ -0,0 +1,105 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV16: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, 0.1, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV19.py b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV19.py new file mode 100644 index 000000000..f0f649ae2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonicTabuSearchV19.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHarmonicTabuSearchV19: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=5, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, 0.1, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if i % 50 == 0: # Diversify the search every 50 iterations + self.diversify_search(harmony_memory, bounds) + if i % 100 == 0: # Perform local search every 100 iterations + self.local_search(harmony_memory, best_harmony, func, bounds) + if i % 75 == 0: # Perturb solutions every 75 iterations + for j in range(self.num_harmonies): + harmony_memory[j] = self.perturb_solution(harmony_memory[j], bounds) + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyDiversifiedCuckooAlgorithm.py b/nevergrad/optimization/lama/EnhancedHarmonyDiversifiedCuckooAlgorithm.py new file mode 100644 index 000000000..08c17203e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyDiversifiedCuckooAlgorithm.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedHarmonyDiversifiedCuckooAlgorithm: + def __init__(self, budget=10000, population_size=20, dim=5, pa=0.25, beta=1.5, gamma=0.01, alpha=0.95): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another cuckoo + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + # Further exploration by random perturbation + new_harmony[i] += np.random.uniform(-0.1, 0.1, self.dim) + + self.population = new_harmony + + def __call__(self, func): + for _ in range(self.budget): + self.update_population(func) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonyFireworkOptimizer.py b/nevergrad/optimization/lama/EnhancedHarmonyFireworkOptimizer.py new file mode 100644 index 000000000..4acdc2808 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyFireworkOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedHarmonyFireworkOptimizer: + def __init__( + self, + budget=10000, + population_size=20, + dim=5, + bw=0.1, + sr=0.3, + amp_min=0.5, + amp_max=2.0, + memory_rate=0.4, + ): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.bw = bw # bandwidth for mutation + self.sr = sr # success rate of mutation + self.amp_min = amp_min # minimum explosion amplitude + self.amp_max = amp_max # maximum explosion amplitude + self.memory_rate = memory_rate # rate of retaining memory + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.inf + self.best_solution = None + + def calculate_fitness(self, func, solution): + return func(solution) + + def mutate_solution(self, solution): + mutated_solution = np.clip(solution + np.random.normal(0, self.bw, self.dim), -5.0, 5.0) + return mutated_solution + + def firework_explosion(self, solution): + explosion_amp = np.random.uniform(self.amp_min, self.amp_max) + new_solution = solution + np.random.uniform(-1, 1, self.dim) * explosion_amp + return new_solution + + def update_population(self, func, memory): + for i in range(self.population_size): + mutated_solution = self.mutate_solution(self.population[i]) + if np.random.rand() < self.sr: + new_solution = mutated_solution + else: + new_solution = self.firework_explosion(self.population[i]) + + new_fitness = self.calculate_fitness(func, new_solution) + if new_fitness < self.calculate_fitness(func, self.population[i]): + self.population[i] = new_solution + + if new_fitness < self.best_fitness: + self.best_fitness = new_fitness + self.best_solution = new_solution + + if new_fitness < memory[i]: + memory[i] = new_fitness + + def __call__(self, func): + memory = np.full(self.population_size, np.inf) + for itr in range(1, self.budget + 1): + self.update_population(func, memory) + + aocc = 1 - np.std(memory) / np.mean(memory) if np.mean(memory) != 0 else 0 + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV2.py new file mode 100644 index 000000000..28487f4cb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV2.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedHarmonyMemeticAlgorithmV2: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=30, memetic_prob=0.5): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV3.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV3.py new file mode 100644 index 000000000..80beb3bb9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV3.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedHarmonyMemeticAlgorithmV3: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=50, memetic_prob=0.7): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV4.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV4.py new file mode 100644 index 000000000..e2172dca4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticAlgorithmV4.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedHarmonyMemeticAlgorithmV4: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=100, memetic_prob=0.8): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV10.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV10.py new file mode 100644 index 000000000..c60bd9cba --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV10.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV10: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV11.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV11.py new file mode 100644 index 000000000..df4327af5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV11.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV11: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV12.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV12.py new file mode 100644 index 000000000..8d0ca23b9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV12.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV12: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV13.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV13.py new file mode 100644 index 000000000..64c0d6386 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV13.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV13: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV14.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV14.py new file mode 100644 index 000000000..2676787dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV14.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV14: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + pitch_bandwidth=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.uniform( + -self.memetic_step, self.memetic_step, size=self.dim + ) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV15.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV15.py new file mode 100644 index 000000000..18f852957 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV15.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV15: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.95, + pitch_bandwidth=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.uniform( + -self.memetic_step, self.memetic_step, size=self.dim + ) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV16.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV16.py new file mode 100644 index 000000000..864ddbf13 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV16.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV16: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.9, + pitch_bandwidth=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.uniform( + -self.memetic_step, self.memetic_step, size=self.dim + ) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV17.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV17.py new file mode 100644 index 000000000..379943220 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV17.py @@ -0,0 +1,99 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV17: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.8, + pitch_bandwidth=0.5, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.uniform(-self.pitch_bandwidth, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.uniform( + -self.memetic_step, self.memetic_step, size=self.dim + ) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV34.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV34.py new file mode 100644 index 000000000..d2c6f4f1f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticOptimizationV34.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedHarmonyMemeticOptimizationV34: + def __init__( + self, + budget=10000, + memory_size=50, + pitch_adjustment_rate=0.7, + pitch_bandwidth=0.3, + local_search_prob=0.8, + ): + self.budget = budget + self.dim = 5 + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.pitch_bandwidth = pitch_bandwidth + self.local_search_prob = local_search_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, self.pitch_bandwidth) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(50): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_pitch_parameters(self, success_rate, convergence_rate): + if success_rate > 0.5: + self.pitch_adjustment_rate += 0.05 + else: + self.pitch_adjustment_rate -= 0.05 + + if convergence_rate > 0.8: + self.pitch_bandwidth *= 1.1 + else: + self.pitch_bandwidth *= 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + success_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + convergence_rate = np.sum(np.array(harmony_memory_costs) < new_cost) / len(harmony_memory_costs) + + self._adapt_pitch_parameters(success_rate, convergence_rate) + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearch.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearch.py new file mode 100644 index 000000000..d66bb0d79 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearch.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedHarmonyMemeticSearch: + def __init__( + self, budget=10000, hmcr=0.9, par=0.3, bw=0.5, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + min_idx = np.argmin(harmony_memory_costs) + if new_cost < harmony_memory_costs[min_idx]: + harmony_memory[min_idx] = new_harmony + harmony_memory_costs[min_idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV2.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV2.py new file mode 100644 index 000000000..0ea22d29a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV2.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedHarmonyMemeticSearchV2: + def __init__( + self, budget=10000, hmcr=0.9, par=0.3, bw=0.5, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + min_idx = np.argmin(harmony_memory_costs) + if new_cost < harmony_memory_costs[min_idx]: + harmony_memory[min_idx] = new_harmony + harmony_memory_costs[min_idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV3.py b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV3.py new file mode 100644 index 000000000..215e7937b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyMemeticSearchV3.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedHarmonyMemeticSearchV3: + def __init__( + self, budget=10000, hmcr=0.9, par=0.3, bw=0.5, memetic_iter=1000, memetic_prob=0.9, memetic_step=0.1 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonySearchOB.py b/nevergrad/optimization/lama/EnhancedHarmonySearchOB.py new file mode 100644 index 000000000..558321346 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonySearchOB.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedHarmonySearchOB: + def __init__(self, budget=10000, harmony_memory_size=20, hmcr=0.9, par=0.4, bw=0.5, bw_decay=0.95): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + self.bw *= self.bw_decay # Decay the bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py b/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py new file mode 100644 index 000000000..7e3453733 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=10, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: # Introduce Adaptive Levy Flight + levy = self.generate_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_adaptive_levy_flight(self, dimension): + beta = 1.5 # Initial beta value + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.2 # Adjust beta for next iteration + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + return levy diff --git a/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightV2.py b/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightV2.py new file mode 100644 index 000000000..208db5f75 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonySearchWithAdaptiveLevyFlightV2.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedHarmonySearchWithAdaptiveLevyFlightV2: + def __init__(self, budget, harmony_memory_size=20, global_best_rate=0.1, leviness=1.5): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.global_best_rate = global_best_rate + self.leviness = leviness + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(size=self.harmony_memory_size, dimension=len(func.bounds.lb)) + new_harmony += levy + + new_harmony = np.clip(new_harmony, func.bounds.lb, func.bounds.ub) + + return new_harmony + + def generate_levy_flight(self, size, dimension): + levy = np.zeros((size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1.0 + self.leviness) + * np.sin(np.pi * self.leviness / 2) + / (np.math.gamma(1.0 + 2 * self.leviness) * (self.leviness**0.5)) + ) ** (1.0 / self.leviness) + + for i in range(size): + for j in range(dimension): + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / (np.abs(v) ** (1.0 / self.leviness) + epsilon) + levy[i, j] = step + + return levy diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimization.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimization.py new file mode 100644 index 000000000..81953b182 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimization.py @@ -0,0 +1,64 @@ +import numpy as np + + +class EnhancedHarmonyTabuOptimization: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + + for _ in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV2.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV2.py new file mode 100644 index 000000000..447ba05d8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV2.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedHarmonyTabuOptimizationV2: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - iteration * 0.0001) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV3.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV3.py new file mode 100644 index 000000000..8876595f9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuOptimizationV3.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedHarmonyTabuOptimizationV3: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.2 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearch.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearch.py new file mode 100644 index 000000000..2b4393c76 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearch.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearch: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, best_solution, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration, num_improvements): + if num_improvements == 0: + self.pitch_adjustment_rate = min(0.9, self.pitch_adjustment_rate + 0.1) + else: + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.1) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution( + harmony_memory, self.x_opt, bounds, tabu_list + ) + if new_solution_str not in tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i, num_improvements) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV2.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV2.py new file mode 100644 index 000000000..143479500 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV2.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearchV2: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, best_solution, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration, num_improvements): + if num_improvements == 0: + self.pitch_adjustment_rate = min(0.9, self.pitch_adjustment_rate + 0.05) + else: + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.05) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution( + harmony_memory, self.x_opt, bounds, tabu_list + ) + if new_solution_str not in tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i, num_improvements) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV3.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV3.py new file mode 100644 index 000000000..ff3cdd53c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV3.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearchV3: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, best_solution, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration, num_improvements): + if num_improvements == 0 or (iteration > 0 and iteration % (self.budget // 10) == 0): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.05) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution( + harmony_memory, self.x_opt, bounds, tabu_list + ) + if new_solution_str not in tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i, num_improvements) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV4.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV4.py new file mode 100644 index 000000000..d992d9c4b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV4.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearchV4: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV6.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV6.py new file mode 100644 index 000000000..85ad1e53b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV6.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearchV6: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV7.py b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV7.py new file mode 100644 index 000000000..6f453466f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHarmonyTabuSearchV7.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedHarmonyTabuSearchV7: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + tabu_ratio=0.1, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.success_ratio = 0.5 + self.min_success_ratio = 0.3 + self.max_success_ratio = 0.7 + self.success_count = 0 + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > int(self.tabu_ratio * self.budget): # Update tabu list size + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def adjust_parameters(self): + if self.iteration % 100 == 0: + if self.success_count < self.num_harmonies * self.min_success_ratio: + self.pitch_adjustment_rate *= 0.9 + elif self.success_count > self.num_harmonies * self.max_success_ratio: + self.pitch_adjustment_rate *= 1.1 + self.success_count = 0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + new_solution = self.generate_new_solution(harmony_memory, self.x_opt, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + + if best_score < func(self.x_opt): + self.success_count += 1 + + self.adjust_parameters() + self.iteration += 1 + + return 1.0 - (self.f_opt - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHierarchicalCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/EnhancedHierarchicalCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..b9a9b7f80 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHierarchicalCovarianceMatrixAdaptation.py @@ -0,0 +1,149 @@ +import numpy as np + + +class EnhancedHierarchicalCovarianceMatrixAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + return x - self.learning_rate * grad + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to elite individuals in the population + for i in range(len(pop)): + pop[i] = self.__gradient_local_search(func, pop[i]) + if func(pop[i]) < scores[i]: + scores[i] = func(pop[i]) + + # Update global best after local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..426b16557 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveDifferentialEvolution.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedHybridAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + eval_count = population_size + phase_switch_threshold = self.budget // 2 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = initial_F * (1 - eval_count / budget) + adaptive_CR = initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if eval_count >= phase_switch_threshold: + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Combine exploration and exploitation phases + if eval_count >= phase_switch_threshold: + # Combine with Quantum-inspired with a probability + for i in range(population_size): + if np.random.rand() < 0.5: + new_population[i] = quantum_position_update(new_population[i], best_position) + new_population[i] = np.clip(new_population[i], self.lower_bound, self.upper_bound) + new_fitness[i] = func(new_population[i]) + eval_count += 1 + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedHybridAdaptiveDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveExplorationOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveExplorationOptimizer.py new file mode 100644 index 000000000..1e1d3e102 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveExplorationOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np + + +class EnhancedHybridAdaptiveExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + # Enhanced exploration using adaptive exploration factor + if i % 100 == 0 and i > 0: # Every 100 iterations, enhance exploration + exploration_factor = min( + 0.5, exploration_factor * 1.1 + ) # Gradually increase exploration factor + for idx in range(swarm_size): + new_position = positions[idx] + exploration_factor * np.random.uniform(-1, 1, self.dim) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedHybridAdaptiveExplorationOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveGeneticSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveGeneticSwarmOptimizer.py new file mode 100644 index 000000000..ab5be28a8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveGeneticSwarmOptimizer.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedHybridAdaptiveGeneticSwarmOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.85 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.8 + self.social_coeff = 1.8 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.cr = 0.9 + self.min_std_dev = 1e-5 # Minimum standard deviation for convergence check + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (best_individual - population[i]) + + self.social_coeff * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + if np.std(fitness) < self.min_std_dev: + break + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveHarmonicFireworksTabuSearch.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveHarmonicFireworksTabuSearch.py new file mode 100644 index 000000000..ebe7c58d8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveHarmonicFireworksTabuSearch.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedHybridAdaptiveHarmonicFireworksTabuSearch: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.03 + self.bandwidth *= 0.93 + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def enhance_search(self, harmony_memory, best_solution, func, bounds): + self.diversify_search(harmony_memory, bounds) + self.local_search(harmony_memory, best_solution, func, bounds) + + def hybrid_search(self, harmony_memory, best_solution, func, bounds): + self.enhance_search(harmony_memory, best_solution, func, bounds) + self.adaptive_tabu_search(harmony_memory, best_solution, func, bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.hybrid_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveMemoryAnnealing.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMemoryAnnealing.py new file mode 100644 index 000000000..23a5888a2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMemoryAnnealing.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedHybridAdaptiveMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.95 # Faster cooling rate for more aggressive convergence + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta for better exploration-exploitation balance + if evaluations < self.budget / 4: + beta = 2.0 # Higher exploration phase + elif evaluations < self.budget / 2: + beta = 1.5 # Balanced phase + elif evaluations < 3 * self.budget / 4: + beta = 1.0 # Transition to exploitation + else: + beta = 2.5 # Higher acceptance for local search refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiPhaseEvolution.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiPhaseEvolution.py new file mode 100644 index 000000000..64ce1d805 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiPhaseEvolution.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedHybridAdaptiveMultiPhaseEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..ab60307b7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class EnhancedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.7 + self.initial_CR = 0.9 + self.elite_rate = 0.25 + self.local_search_rate = 0.4 + self.memory_size = 30 + self.w = 0.7 + self.c1 = 1.5 + self.c2 = 1.5 + self.phase_switch_ratio = 0.4 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveQuantumOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveQuantumOptimizer.py new file mode 100644 index 000000000..620a55c50 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveQuantumOptimizer.py @@ -0,0 +1,116 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedHybridAdaptiveQuantumOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * adaptive_factor) + else: + adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * adaptive_factor) + + if eval_count < self.budget and np.random.rand() < self.local_search_probability: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedHybridAdaptiveQuantumOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveSearch.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveSearch.py new file mode 100644 index 000000000..b58a254dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveSearch.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedHybridAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is explicitly set + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + population_size = 200 + elite_size = int(0.1 * population_size) + mutation_rate = 0.2 + mutation_scale = lambda t: 0.1 * np.exp(-0.0005 * t) # Slower decay to provide exploration longer + crossover_rate = 0.95 + + # Adapt local search probability based on remaining budget + local_search_base_prob = 0.05 + local_search_decay_rate = 0.0001 + + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + # Track best solution found + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + new_population = [] + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + # Generation loop + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + # Crossover + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + # Mutation + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + # Local search with adaptive probability + local_search_prob = local_search_base_prob * np.exp(-local_search_decay_rate * evaluations) + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) # Random direction + step_size = 0.05 # Smaller step size for finer local search + local_candidate = child + step_size * direction + local_candidate = np.clip(local_candidate, self.lb, self.ub) + if func(local_candidate) < func(child): + child = local_candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + # Combine elites with new generation + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + # Update best solution if found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..cb3e7184d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased population size for better diversity + self.initial_F = 0.5 # Standard DE mutation factor + self.initial_CR = 0.9 # Standard DE crossover rate + self.self_adaptive_rate = 0.1 # Rate of change for F and CR + self.elite_rate = 0.1 # Elite retention rate + self.memory_size = 20 # Memory size for adaptive parameters + self.adaptive_phase_ratio = 0.7 # More budget for DE-based phase + self.local_search_rate = 0.2 # Local search probability + self.alpha = 0.6 # Differential weight for local search + self.ring_topology_radius = 3 # Neighborhood radius for ring topology + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 # Step size for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (self.self_adaptive_rate * np.random.randn()) + adaptive_CR = memory_CR[idx] + (self.self_adaptive_rate * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def ring_topology_neighbors(index): + neighbors = [ + (index + i) % self.population_size + for i in range(-self.ring_topology_radius, self.ring_topology_radius + 1) + if i != 0 + ] + return neighbors + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + neighbors = ring_topology_neighbors(i) + a, b, c = population[np.random.choice(neighbors, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + 0.5 * velocities[i] + + 1.5 * r1 * (personal_best_positions[i] - population[i]) + + 1.5 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedHybridCMAESDE.py b/nevergrad/optimization/lama/EnhancedHybridCMAESDE.py new file mode 100644 index 000000000..61b9a2e8b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridCMAESDE.py @@ -0,0 +1,183 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedHybridCMAESDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 50 + self.strategy_weights = np.ones(3) + self.strategy_success = np.zeros(3) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 20 + self.dynamic_parameters_adjustment_threshold = 30 + self.pop_shrink_factor = 0.1 + self.diversification_period = 50 + self.sigma = 0.3 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_cmaes(self, population, cma_es): + z = np.random.randn(self.dim) + return cma_es.mean + self.sigma * cma_es.cov.dot(z) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _diversify_population(self, population, fitness, func): + num_new_individuals = int(self.pop_size * 0.1) # 10% of the population + new_individuals = np.random.uniform(self.lb, self.ub, (num_new_individuals, self.dim)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + self.evaluations += num_new_individuals + + combined_population = np.vstack((population, new_individuals)) + combined_fitness = np.hstack((fitness, new_fitness)) + + best_indices = np.argsort(combined_fitness)[: self.pop_size] + return combined_population[best_indices], combined_fitness[best_indices] + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + cma_es = CMAES(self.dim, self.lb, self.ub) + + iteration = 0 + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3 = np.random.choice(indices, 3, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + else: # strategy == self._mutation_cmaes + donor = self._mutation_cmaes(population, cma_es) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes].index( + strategy + ) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + cma_es.update(population, fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 3) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self._dynamic_parameters() + + if self.no_improvement_count >= self.dynamic_adjustment_period: + new_pop_size = max(20, int(self.pop_size * (1 - self.pop_shrink_factor))) + population = population[:new_pop_size] + fitness = fitness[:new_pop_size] + self.pop_size = new_pop_size + self.no_improvement_count = 0 + + if iteration % self.diversification_period == 0 and self.evaluations < self.budget: + population, fitness = self._diversify_population(population, fitness, func) + + iteration += 1 + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt + + +class CMAES: + def __init__(self, dim, lb, ub): + self.dim = dim + self.lb = lb + self.ub = ub + self.mean = np.random.uniform(self.lb, self.ub, self.dim) + self.cov = np.eye(self.dim) + self.sigma = 0.5 + + def update(self, population, fitness): + best_idx = np.argmin(fitness) + self.mean = population[best_idx] + cov_update = np.cov(population.T) + self.cov = 0.9 * self.cov + 0.1 * cov_update diff --git a/nevergrad/optimization/lama/EnhancedHybridCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedHybridCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..e9d291210 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedHybridCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + self.alpha_levy = 0.01 # Levy flight parameter + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(population, fitness): + """Switch strategy based on current performance.""" + strategy = "default" + if self.eval_count < self.budget * 0.33: + strategy = "explorative" + self.F = 0.9 + self.CR = 0.9 + elif self.eval_count < self.budget * 0.66: + strategy = "balanced" + self.F = 0.7 + self.CR = 0.85 + else: + strategy = "exploitative" + self.F = 0.5 + self.CR = 0.75 + return strategy + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < 0.2: + population[i] = levy_flight_step(population[i]) + return population + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching(population, fitness) + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedHybridCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedHybridDEPSOWithDynamicAdaptationV4.py b/nevergrad/optimization/lama/EnhancedHybridDEPSOWithDynamicAdaptationV4.py new file mode 100644 index 000000000..a31daf249 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridDEPSOWithDynamicAdaptationV4.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EnhancedHybridDEPSOWithDynamicAdaptationV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.6 # Adaptive inertia weight for PSO + c1 = 1.5 # Cognitive coefficient for PSO + c2 = 1.5 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridDEPSOWithQuantumLevyFlight.py b/nevergrad/optimization/lama/EnhancedHybridDEPSOWithQuantumLevyFlight.py new file mode 100644 index 000000000..c583461fa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridDEPSOWithQuantumLevyFlight.py @@ -0,0 +1,173 @@ +import numpy as np +import scipy.stats as st + + +class EnhancedHybridDEPSOWithQuantumLevyFlight: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 + w = 0.7 # Inertia weight for PSO + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.3 # Social coefficient for PSO + initial_F = 0.7 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def levy_flight(Lambda=1.5): + # Corrected the Levy flight calculation, ensuring that the step size is calculated correctly + sigma = ( + st.gamma(1 + Lambda).mean() + * np.sin(np.pi * Lambda / 2) + / (st.gamma((1 + Lambda) / 2).mean() * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.normal(0, sigma, self.dim) + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / Lambda) + return step + + def quantum_behavior(population, global_best, alpha=0.25, beta=0.75): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior and Levy flight + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity + levy_flight(), bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridDEPSOwithAdaptiveRestart.py b/nevergrad/optimization/lama/EnhancedHybridDEPSOwithAdaptiveRestart.py new file mode 100644 index 000000000..c9c454a76 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridDEPSOwithAdaptiveRestart.py @@ -0,0 +1,149 @@ +import numpy as np + + +class EnhancedHybridDEPSOwithAdaptiveRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.6 # Adjusted inertia weight for PSO + c1 = 1.2 # Adjusted cognitive coefficient for PSO + c2 = 1.2 # Adjusted social coefficient for PSO + initial_F = 0.7 # Adjusted initial differential weight for DE + initial_CR = 0.8 # Adjusted initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.4 + 0.6 * np.random.rand() # Smaller range for F to avoid too large jumps + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridDifferentialEvolutionMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridDifferentialEvolutionMemeticOptimizer.py new file mode 100644 index 000000000..ed3b5dec6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridDifferentialEvolutionMemeticOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedHybridDifferentialEvolutionMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.4 # Increased local search probability + self.F = 0.9 # Differential weight, increased for more aggressive mutations + self.CR = 0.8 # Crossover probability, slightly decreased for higher diversity + self.memory_size = 50 # Increased memory size for more stable performance tracking + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = EnhancedHybridDifferentialEvolutionMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridDynamicAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedHybridDynamicAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..0c2e0b045 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridDynamicAdaptiveExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class EnhancedHybridDynamicAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.7 # Cognitive constant + c2 = 1.7 # Social constant + w = 0.6 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.2 # Initial learning rate + beta = 0.8 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.9 # Differential weight + CR = 0.85 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.15 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.25 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Reduced Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedHybridDynamicAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedHybridExplorationOptimization.py new file mode 100644 index 000000000..db40f8de5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridExplorationOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class EnhancedHybridExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedHybridExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridGradientAnnealingWithMemory.py b/nevergrad/optimization/lama/EnhancedHybridGradientAnnealingWithMemory.py new file mode 100644 index 000000000..ef4acba35 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridGradientAnnealingWithMemory.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedHybridGradientAnnealingWithMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedHybridGradientBasedStrategyV8.py b/nevergrad/optimization/lama/EnhancedHybridGradientBasedStrategyV8.py new file mode 100644 index 000000000..2291f2959 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridGradientBasedStrategyV8.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedHybridGradientBasedStrategyV8: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjust parameters based on the tangent modulation for a more dynamic and responsive control + scale = (iteration / total_iterations) * np.pi + self.F = np.clip(0.5 + 0.5 * np.tan(scale - np.pi / 2), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.tan(np.pi / 2 - scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridGradientPSO.py b/nevergrad/optimization/lama/EnhancedHybridGradientPSO.py new file mode 100644 index 000000000..8e023681b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridGradientPSO.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedHybridGradientPSO: + def __init__( + self, + budget=10000, + population_size=50, + omega_start=0.9, + omega_end=0.4, + phi_p=0.5, + phi_g=0.5, + learning_rate=0.05, + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start + self.omega_end = omega_end + self.phi_p = phi_p + self.phi_g = phi_g + self.learning_rate = learning_rate + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_start - ((self.omega_start - self.omega_end) * evaluations / self.budget) + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + + # Update velocities + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + ) + + # Update positions + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + return global_best_score, global_best + + def estimate_gradient(self, x, objective, epsilon=1e-5): + gradient = np.zeros(self.dim) + for j in range(self.dim): + x1 = np.array(x) + x2 = np.array(x) + x1[j] += epsilon + x2[j] -= epsilon + gradient[j] = (objective(x1) - objective(x2)) / (2 * epsilon) + return gradient diff --git a/nevergrad/optimization/lama/EnhancedHybridHarmonySearchWithAdaptiveMutationV20.py b/nevergrad/optimization/lama/EnhancedHybridHarmonySearchWithAdaptiveMutationV20.py new file mode 100644 index 000000000..a8cb52c81 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridHarmonySearchWithAdaptiveMutationV20.py @@ -0,0 +1,103 @@ +import numpy as np + + +class EnhancedHybridHarmonySearchWithAdaptiveMutationV20: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.3, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.3: + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + new_harmony[:, i] = np.clip( + new_harmony[:, i] + + np.random.normal(0, self.adaptive_gaussian_std(i), self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy + + def adaptive_gaussian_std(self, iteration): + return np.exp(-1.0 * iteration / self.budget) * self.gaussian_std diff --git a/nevergrad/optimization/lama/EnhancedHybridMemoryAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedHybridMemoryAdaptiveDE.py new file mode 100644 index 000000000..288d0e302 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMemoryAdaptiveDE.py @@ -0,0 +1,136 @@ +import numpy as np + + +class EnhancedHybridMemoryAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def crowding_distance_selection(new_population, new_fitness, old_population, old_fitness): + combined_population = np.vstack((new_population, old_population)) + combined_fitness = np.hstack((new_fitness, old_fitness)) + + sorted_indices = np.argsort(combined_fitness) + combined_population = combined_population[sorted_indices] + combined_fitness = combined_fitness[sorted_indices] + + distance = np.zeros(len(combined_population)) + for i in range(self.dim): + sorted_indices = np.argsort(combined_population[:, i]) + sorted_population = combined_population[sorted_indices] + distance[sorted_indices[0]] = distance[sorted_indices[-1]] = np.inf + for j in range(1, len(combined_population) - 1): + distance[sorted_indices[j]] += sorted_population[j + 1, i] - sorted_population[j - 1, i] + + selected_indices = np.argsort(distance)[-population_size:] + return combined_population[selected_indices], combined_fitness[selected_indices] + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Crowding Distance Selection + population, fitness = crowding_distance_selection( + new_population, new_fitness, population, fitness + ) + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridMemoryPSO.py b/nevergrad/optimization/lama/EnhancedHybridMemoryPSO.py new file mode 100644 index 000000000..046a38de4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMemoryPSO.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedHybridMemoryPSO: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.min_std_dev = 1e-5 # Minimum standard deviation for convergence check + self.archive_size = 50 # Archive size for memory-based learning + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + archive_fitness = np.array([evaluate(ind) for ind in archive]) + eval_count += len(archive) + if best_fitness not in archive_fitness: + worst_index = np.argmax(archive_fitness) + if best_fitness < archive_fitness[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + if np.std(fitness) < self.min_std_dev: + break + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizer.py new file mode 100644 index 000000000..c9c04318b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=20, + differential_weight=0.6, + crossover_rate=0.8, + inertia_weight=0.6, + cognitive_weight=1.3, + social_weight=1.3, + max_velocity=0.3, + mutation_rate=0.2, + num_generations=5, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV10.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV10.py new file mode 100644 index 000000000..f7824c80a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV10.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV10: + def __init__( + self, + budget, + swarm_size=60, + differential_weight=0.6, + crossover_rate=0.85, + inertia_weight=0.75, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV11.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV11.py new file mode 100644 index 000000000..4df9731ed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV11.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV11: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.85, + inertia_weight=0.8, + cognitive_weight=1.6, + social_weight=1.6, + max_velocity=0.7, + mutation_rate=0.09, + num_generations=120, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV12.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV12.py new file mode 100644 index 000000000..bf7a09789 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV12.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV12: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.85, + inertia_weight=0.75, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.7, + mutation_rate=0.1, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV15.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV15.py new file mode 100644 index 000000000..c15b86ab7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV15.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV15: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV2.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV2.py new file mode 100644 index 000000000..1a6fda500 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV2: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.7, + mutation_rate=0.05, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV3.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV3.py new file mode 100644 index 000000000..4379e3564 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV3.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV3: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.5, + mutation_rate=0.1, + num_generations=200, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV4.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV4.py new file mode 100644 index 000000000..e79a8d4e3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV4: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.2, + social_weight=1.2, + max_velocity=0.4, + mutation_rate=0.1, + num_generations=250, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV5.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV5.py new file mode 100644 index 000000000..591373785 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV5.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV5: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.6, + mutation_rate=0.1, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV6.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV6.py new file mode 100644 index 000000000..55bef48b3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV6.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV6: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.6, + mutation_rate=0.08, + num_generations=120, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV7.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV7.py new file mode 100644 index 000000000..af6f6c785 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV7.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV7: + def __init__( + self, + budget, + swarm_size=40, + differential_weight=0.7, + crossover_rate=0.8, + inertia_weight=0.7, + cognitive_weight=1.4, + social_weight=1.4, + max_velocity=0.6, + mutation_rate=0.08, + num_generations=120, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV8.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV8.py new file mode 100644 index 000000000..1b2621bc3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV8.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV8: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.9, + inertia_weight=0.9, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.7, + mutation_rate=0.1, + num_generations=150, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV9.py b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV9.py new file mode 100644 index 000000000..1240af28c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaHeuristicOptimizerV9.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHybridMetaHeuristicOptimizerV9: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.85, + inertia_weight=0.8, + cognitive_weight=1.6, + social_weight=1.6, + max_velocity=0.8, + mutation_rate=0.12, + num_generations=200, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithm.py b/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithm.py new file mode 100644 index 000000000..38cccb033 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithm.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedHybridMetaOptimizationAlgorithm: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=20, + pitch_adjust_rate=0.7, + mutation_rate=0.2, + diversity_rate=0.3, + num_cuckoos=10, + step_size=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.pitch_adjust_rate = pitch_adjust_rate + self.mutation_rate = mutation_rate + self.diversity_rate = diversity_rate + self.num_cuckoos = num_cuckoos + self.step_size = step_size + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def pitch_adjustment(self, solution, best_solution): + new_solution = solution.copy() + for i in range(self.dim): + if np.random.rand() < self.pitch_adjust_rate: + if np.random.rand() < 0.5: + new_solution[i] = best_solution[i] + else: + new_solution[i] = np.random.uniform(-5.0, 5.0) + + return new_solution + + def fireworks_mutation(self, solution): + new_solution = solution + self.mutation_rate * np.random.normal(0, 1, self.dim) + return np.clip(new_solution, -5.0, 5.0) + + def cuckoo_search(self, solution): + cuckoo = solution + self.step_size * np.random.normal(0, 1, self.dim) + return np.clip(cuckoo, -5.0, 5.0) + + def __call__(self, func): + population = self.initialize_population() + memory = population[ + np.random.choice(range(self.population_size), self.harmony_memory_size, replace=False) + ] + fitness = [func(sol) for sol in population] + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + for _ in range(self.budget // self.population_size): + new_solution = self.pitch_adjustment( + population[np.random.randint(self.population_size)], best_solution + ) + new_solution = self.fireworks_mutation(new_solution) + population = np.vstack((population, new_solution)) + + for _ in range(self.num_cuckoos): + cuckoo_solution = self.cuckoo_search(population[np.random.randint(len(population))]) + population = np.vstack((population, cuckoo_solution)) + + fitness = [func(sol) for sol in population] + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = [func(sol) for sol in population] + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + memory = np.vstack((memory, population[: self.harmony_memory_size])) + memory_fitness = [func(sol) for sol in memory] + memory_sorted_indices = np.argsort(memory_fitness)[: self.harmony_memory_size] + memory = memory[memory_sorted_indices] + + if np.random.rand() < self.diversity_rate: + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithmV2.py new file mode 100644 index 000000000..d8120f75d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridMetaOptimizationAlgorithmV2.py @@ -0,0 +1,54 @@ +import numpy as np + + +class EnhancedHybridMetaOptimizationAlgorithmV2: + def __init__(self, budget=10000, num_pop=10, num_children=5, mutation_rate=0.1): + self.budget = budget + self.num_pop = num_pop + self.num_children = num_children + self.mutation_rate = mutation_rate + self.dim = 5 + self.population = np.random.uniform(-5.0, 5.0, size=(self.num_pop, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def mutate_solution(self, solution): + mutated_solution = solution + np.random.normal(0, self.mutation_rate, size=self.dim) + return np.clip(mutated_solution, -5.0, 5.0) + + def evaluate_population(self, func): + fitness = np.array([func(sol) for sol in self.population]) + min_idx = np.argmin(fitness) + if fitness[min_idx] < self.best_fitness: + self.best_fitness = fitness[min_idx] + self.best_solution = self.population[min_idx].copy() + return fitness + + def selection(self, fitness): + idx = np.argsort(fitness)[: self.num_pop] + self.population = self.population[idx].copy() + + def recombine(self): + children = [] + for _ in range(self.num_children): + idx1, idx2 = np.random.choice(self.num_pop, 2, replace=False) + child = 0.5 * (self.population[idx1] + self.population[idx2]) + children.append(child) + return np.array(children) + + def __call__(self, func): + for _ in range(self.budget): + fitness = self.evaluate_population(func) + self.selection(fitness) + children = self.recombine() + + for i in range(self.num_children): + mutated_child = self.mutate_solution(children[i]) + fitness_child = func(mutated_child) + if fitness_child < np.max(fitness): + idx = np.argmax(fitness) + self.population[idx] = mutated_child + fitness[idx] = fitness_child + + aocc = 1 - np.std(fitness) / np.mean(fitness) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/EnhancedHybridOptimization.py b/nevergrad/optimization/lama/EnhancedHybridOptimization.py new file mode 100644 index 000000000..38f7bf473 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridOptimization.py @@ -0,0 +1,157 @@ +import numpy as np + + +class EnhancedHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.4 + self.F_max = 0.9 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + # Elitism + self.elite_fraction = 0.1 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while evaluations < self.budget: + # Elitism Preservation + elite_count = int(self.elite_fraction * self.population_size) + elites = population[np.argsort(fitness)[:elite_count]].copy() + elite_fitness = np.sort(fitness)[:elite_count].copy() + + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Update personal best + if f_candidate < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = f_candidate + + # Integrate Elitism + population[np.argsort(fitness)[-elite_count:]] = elites + fitness[np.argsort(fitness)[-elite_count:]] = elite_fitness + + # Update velocities and positions (PSO component) + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment based on Stagnation Counter + if self.stagnation_counter > self.stagnation_threshold: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + self.stagnation_counter = 0 + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + # Adjust inertia weight dynamically + self.inertia_weight = 0.4 + 0.5 * (self.budget - evaluations) / self.budget + + # Adjust population size dynamically based on performance + if self.stagnation_counter > self.stagnation_threshold * 2: + new_population_size = min(self.population_size + 10, 200) + if new_population_size > self.population_size: + new_individuals = np.random.uniform( + self.lb, self.ub, (new_population_size - self.population_size, self.dim) + ) + population = np.vstack((population, new_individuals)) + new_velocities = np.random.uniform( + -1, 1, (new_population_size - self.population_size, self.dim) + ) + velocities = np.vstack((velocities, new_velocities)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.hstack((fitness, new_fitness)) + personal_best_positions = np.vstack((personal_best_positions, new_individuals)) + personal_best_fitness = np.hstack((personal_best_fitness, new_fitness)) + self.population_size = new_population_size + evaluations += new_population_size - self.population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedHybridOptimizer.py new file mode 100644 index 000000000..9867bd1b9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridOptimizer.py @@ -0,0 +1,170 @@ +import numpy as np + + +class EnhancedHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def gradient_descent(self, x, func, budget, step_size=0.01): + best_x = x.copy() + best_f = func(x) + grad = np.zeros(self.dim) + for _ in range(budget): + for i in range(self.dim): + x_plus = x.copy() + x_plus[i] += step_size + f_plus = func(x_plus) + grad[i] = (f_plus - best_f) / step_size + + x = np.clip(x - step_size * grad, self.bounds[0], self.bounds[1]) + f = func(x) + if f < best_f: + best_x = x + best_f = f + + return best_x, best_f + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + self.eval_count = self.init_pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # Adaptive parameter adjustment based on progress + progress = self.eval_count / global_search_budget + self.w = 0.4 + 0.5 * (1 - progress) # Decrease inertia weight over time + self.c1 = 1.5 - 0.5 * progress # Decrease cognitive component + self.c2 = 1.5 + 0.5 * progress # Increase social component + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.init_pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Elitism: Ensure the best individual is carried forward + if self.eval_count < global_search_budget: + elitist_idx = np.argmin(fitness) + elitist = population[elitist_idx].copy() + elitist_fitness = fitness[elitist_idx] + population = np.concatenate(([elitist], population[:-1])) + fitness = np.concatenate(([elitist_fitness], fitness[:-1])) + + # Perform a combined local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.gradient_descent(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + new_x, new_f = self.local_search(population[i], func, local_budget // 2) + self.eval_count += local_budget // 2 + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridQuantumDifferentialPSO.py b/nevergrad/optimization/lama/EnhancedHybridQuantumDifferentialPSO.py new file mode 100644 index 000000000..c9cb9fcaa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridQuantumDifferentialPSO.py @@ -0,0 +1,154 @@ +import numpy as np + + +class EnhancedHybridQuantumDifferentialPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.5 + self.F_max = 1.0 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # Quantum Inspired Parameters + self.alpha = 0.75 + self.beta = 0.25 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while evaluations < self.budget: + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + # Quantum Inspired Adjustment + quantum_perturbation = np.random.normal(0, 1, self.dim) * ( + self.alpha * (self.x_opt - population[i]) + self.beta * (population[i] - self.lb) + ) + trial_vector = np.clip(trial_vector + quantum_perturbation, self.lb, self.ub) + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Update personal best + if f_candidate < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = f_candidate + + # Update velocities and positions (PSO component) + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment based on Stagnation Counter and Dynamic Population Size adjustment + if self.stagnation_counter > self.stagnation_threshold: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + self.stagnation_counter = 0 + + # Increase population size if stagnation persists + new_population_size = min(self.population_size + 10, 200) + if new_population_size > self.population_size: + new_individuals = np.random.uniform( + self.lb, self.ub, (new_population_size - self.population_size, self.dim) + ) + population = np.vstack((population, new_individuals)) + new_velocities = np.random.uniform( + -1, 1, (new_population_size - self.population_size, self.dim) + ) + velocities = np.vstack((velocities, new_velocities)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.hstack((fitness, new_fitness)) + personal_best_positions = np.vstack((personal_best_positions, new_individuals)) + personal_best_fitness = np.hstack((personal_best_fitness, new_fitness)) + self.population_size = new_population_size + evaluations += new_population_size - self.population_size + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + # Adjust inertia weight dynamically + self.inertia_weight = 0.4 + 0.5 * (self.budget - evaluations) / self.budget + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridQuasiRandomGradientDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedHybridQuasiRandomGradientDifferentialEvolution.py new file mode 100644 index 000000000..9aeec5bf5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridQuasiRandomGradientDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np +from scipy.stats import qmc + + +class EnhancedHybridQuasiRandomGradientDifferentialEvolution: + def __init__(self, budget, population_size=30, crossover_rate=0.7, initial_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = initial_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedHybridQuasiRandomGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedHybridSearch.py b/nevergrad/optimization/lama/EnhancedHybridSearch.py new file mode 100644 index 000000000..cf3b65728 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridSearch.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Cognitive constant + c2 = 2.0 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.01 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Local search parameters + local_search_radius = 0.1 + local_search_steps = 5 + + # Hybrid loop (combining PSO and Local Gradient-based search) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Local search with gradient approximation + for _ in range(local_search_steps): + grad = np.zeros_like(x) + perturbation = local_search_radius * (np.random.random(self.dim) - 0.5) + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation[j] + grad[j] = (func(x_perturb) - f) / (perturbation[j] + epsilon) + + # Update position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Evaluate the new position + new_f = func(positions[idx]) + if new_f < f: + f = new_f + x = positions[idx] + + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHybridSimulatedAnnealingOptimization.py b/nevergrad/optimization/lama/EnhancedHybridSimulatedAnnealingOptimization.py new file mode 100644 index 000000000..3b2dd03e5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHybridSimulatedAnnealingOptimization.py @@ -0,0 +1,214 @@ +import numpy as np +from scipy.spatial.distance import cdist + + +class EnhancedHybridSimulatedAnnealingOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.4 + self.F_max = 0.9 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + # Elitism + self.elite_fraction = 0.1 + + # Memory Mechanism + self.memory_size = 10 + self.memory = [] + + def _latin_hypercube_sampling(self, n_samples, n_dim): + l = np.arange(n_samples) + np.random.shuffle(l) + sample = l / n_samples + np.random.rand(n_samples) / n_samples + return sample + + def _initialize_population(self): + lhs_samples = np.zeros((self.population_size, self.dim)) + for i in range(self.dim): + lhs_samples[:, i] = self._latin_hypercube_sampling(self.population_size, self.dim) + lhs_samples = self.lb + lhs_samples * (self.ub - self.lb) + random_samples = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + population = np.vstack((lhs_samples, random_samples))[: self.population_size] + return population + + def _simulated_annealing(self, x, func): + temp = 1.0 + cooling_rate = 0.99 + best_x = x.copy() + best_f = func(x) + while temp > 1e-3: + new_x = np.clip(x + np.random.normal(0, 1, self.dim), self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f or np.random.rand() < np.exp((best_f - new_f) / temp): + best_x = new_x + best_f = new_f + temp *= cooling_rate + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while evaluations < self.budget: + # Elitism Preservation + elite_count = int(self.elite_fraction * self.population_size) + elites = population[np.argsort(fitness)[:elite_count]].copy() + elite_fitness = np.sort(fitness)[:elite_count].copy() + + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Update personal best + if f_candidate < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = f_candidate + + # Integrate Elitism + population[np.argsort(fitness)[-elite_count:]] = elites + fitness[np.argsort(fitness)[-elite_count:]] = elite_fitness + + # Update velocities and positions (PSO component) + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment based on Stagnation Counter + if self.stagnation_counter > self.stagnation_threshold: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + self.stagnation_counter = 0 + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + # Adjust inertia weight dynamically + self.inertia_weight = 0.4 + 0.5 * (self.budget - evaluations) / self.budget + + # Adjust population size dynamically based on performance + if self.stagnation_counter > self.stagnation_threshold * 2: + new_population_size = min(self.population_size + 10, 200) + if new_population_size > self.population_size: + new_individuals = np.random.uniform( + self.lb, self.ub, (new_population_size - self.population_size, self.dim) + ) + population = np.vstack((population, new_individuals)) + new_velocities = np.random.uniform( + -1, 1, (new_population_size - self.population_size, self.dim) + ) + velocities = np.vstack((velocities, new_velocities)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.hstack((fitness, new_fitness)) + personal_best_positions = np.vstack((personal_best_positions, new_individuals)) + personal_best_fitness = np.hstack((personal_best_fitness, new_fitness)) + self.population_size = new_population_size + evaluations += new_population_size - self.population_size + + # Memory mechanism + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmin([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + # Enhanced Local Search using Simulated Annealing + if self.stagnation_counter > self.stagnation_threshold * 3: + for i, mem in enumerate(self.memory): + local_candidate, f_local_candidate = self._simulated_annealing(mem, func) + evaluations += 1 + + if f_local_candidate < func(mem): + self.memory[i] = local_candidate + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + self.stagnation_counter = 0 + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHyperAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/EnhancedHyperAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..1bac33366 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperAdaptiveHybridDEPSO.py @@ -0,0 +1,149 @@ +import numpy as np + + +class EnhancedHyperAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Slightly increased population size for more exploration + w = 0.5 # Inertia weight for PSO + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.8 # Differential weight for DE + initial_CR = 0.9 # Crossover probability for DE + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59.py b/nevergrad/optimization/lama/EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59.py new file mode 100644 index 000000000..c0f010032 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.12, + mutation_strategy="robust", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Refined base mutation factor + self.F_range = F_range # Adjusted mutation range for improved adaptive behavior + self.CR = CR # Crossover probability fine-tuned for robustness + self.elite_fraction = elite_fraction # Incremented elite fraction for better elite selection + self.mutation_strategy = ( + mutation_strategy # Enhanced mutation strategy for more robust adaptive response + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Dynamic base selection influencing mutation strategy + if self.mutation_strategy == "robust": + if np.random.rand() < 0.80: # Higher probability to select the best individual for base + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjusted dynamic F adaptation + F = ( + self.F_base + (np.random.rand() * 2 - 1) * self.F_range + ) # Using a uniformly distributed adjustment + + # Mutation strategy (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover (binomial) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62.py b/nevergrad/optimization/lama/EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62.py new file mode 100644 index 000000000..e4ac06bbe --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62: + def __init__( + self, + budget=10000, + population_size=160, + F_base=0.5, + F_range=0.4, + CR=0.97, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor fine-tuned for even exploration + self.F_range = F_range # Controlled range for mutation factor to enhance mutation stability + self.CR = CR # Slightly higher crossover probability for improved diversity + self.elite_fraction = ( + elite_fraction # Slightly increased elite fraction to focus more on the best candidates + ) + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy to dynamically adapt to fitness landscape + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Select base individual from elite with a flexible strategy for dynamic adaptation + if np.random.rand() < 0.8: # Adjusted probability to prefer current best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Use random elite as base + base = population[np.random.choice(elite_indices)] + + # Mutation factor F dynamically adjusted + F = self.F_base + (np.random.rand() * 2 - 1) * self.F_range + + # Mutation (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover (binomial) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] is True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedHyperOptimizedMultiStrategicOptimizerV49.py b/nevergrad/optimization/lama/EnhancedHyperOptimizedMultiStrategicOptimizerV49.py new file mode 100644 index 000000000..9eecd8909 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperOptimizedMultiStrategicOptimizerV49.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedHyperOptimizedMultiStrategicOptimizerV49: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.5, + F_range=0.4, + CR=0.9, + elite_fraction=0.05, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor, slightly reduced for more stable exploration + self.F_range = F_range # Adjusted mutation factor range for controlled exploration + self.CR = CR # Crossover probability, slightly reduced to promote more exploitation + self.elite_fraction = elite_fraction # Reduced elite fraction to increase competitive pressure + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Enhanced adaptive strategy: prefer current best slightly more often + if np.random.rand() < 0.85: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjust mutation factor for stronger exploitation + F = self.F_base + np.random.normal(0, self.F_range / 2) + + # DE/rand/1/bin mutation strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Break if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedHyperParameterTunedMetaHeuristicOptimizerV4.py b/nevergrad/optimization/lama/EnhancedHyperParameterTunedMetaHeuristicOptimizerV4.py new file mode 100644 index 000000000..0360e49bb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperParameterTunedMetaHeuristicOptimizerV4.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedHyperParameterTunedMetaHeuristicOptimizerV4: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedHyperStrategicOptimizerV56.py b/nevergrad/optimization/lama/EnhancedHyperStrategicOptimizerV56.py new file mode 100644 index 000000000..84b2ef94f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedHyperStrategicOptimizerV56.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedHyperStrategicOptimizerV56: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.57, + F_range=0.43, + CR=0.94, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly adjusted mutation factor for balance + self.F_range = F_range # Slightly narrower mutation range for stability + self.CR = CR # Optimized crossover probability for improved gene mixing + self.elite_fraction = elite_fraction # Increased elite fraction to enhance exploitation + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy for dynamic problem adaptation + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Use adaptive mutation strategy with a modified selection probability + if self.mutation_strategy == "adaptive": + if ( + np.random.rand() < 0.85 + ): # Increased probability to select the current best, focusing search intensity + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of mutation factor F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme with tweaks + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using a slightly altered CR to improve solution mixing + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedImprovedDifferentialEvolutionLocalSearch_v58.py b/nevergrad/optimization/lama/EnhancedImprovedDifferentialEvolutionLocalSearch_v58.py new file mode 100644 index 000000000..c981830ad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedImprovedDifferentialEvolutionLocalSearch_v58.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedImprovedDifferentialEvolutionLocalSearch_v58: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.01, + population_size=30, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def improved_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(300): # Increased the number of runs further for better optimization + best_fitness, _ = self.improved_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer.py new file mode 100644 index 000000000..c67779839 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77.py b/nevergrad/optimization/lama/EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77.py new file mode 100644 index 000000000..5d8a2534f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7.py new file mode 100644 index 000000000..d1a31dd32 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.5, + delta=0.1, + decay_rate=0.95, + max_step=0.2, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategy.py new file mode 100644 index 000000000..d9760b6dc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategy.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedIslandEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=5, + population_per_island=20, + migration_rate=0.15, + mutation_intensity=0.5, + mutation_decay=0.95, + elite_ratio=0.2, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + # Fill the rest of the island population + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + # Introduce new genetic material by shuffling some individuals between islands + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) # Shuffle the migration indices to mix individuals + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV10.py b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV10.py new file mode 100644 index 000000000..64e181abd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV10.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedIslandEvolutionStrategyV10: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=25, + population_per_island=120, + migration_rate=0.2, + mutation_intensity=0.9, + mutation_decay=0.92, + elite_ratio=0.25, + crossover_probability=0.9, + tournament_size=2, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.uniform(0.3, 0.7, self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV3.py b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV3.py new file mode 100644 index 000000000..cacb3a7b8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV3.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedIslandEvolutionStrategyV3: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=8, + population_per_island=30, + migration_rate=0.25, + mutation_intensity=0.9, + mutation_decay=0.95, + elite_ratio=0.30, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + # Introduce new genetic material by shuffling some individuals between islands + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) # Shuffle the migration indices to mix individuals + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV7.py b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV7.py new file mode 100644 index 000000000..25889d617 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV7.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedIslandEvolutionStrategyV7: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=20, + population_per_island=70, + migration_rate=0.15, + mutation_intensity=1.0, + mutation_decay=0.95, + elite_ratio=0.05, + crossover_probability=0.85, + tournament_size=4, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV8.py b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV8.py new file mode 100644 index 000000000..ac7d5cfb4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedIslandEvolutionStrategyV8.py @@ -0,0 +1,109 @@ +import numpy as np + + +class EnhancedIslandEvolutionStrategyV8: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=25, + population_per_island=80, + migration_rate=0.1, + mutation_intensity=0.9, + mutation_decay=0.98, + elite_ratio=0.15, + crossover_probability=0.9, + tournament_size=5, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.uniform(0.3, 0.7, self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedLocalSearchAdaptiveStrategyV29.py b/nevergrad/optimization/lama/EnhancedLocalSearchAdaptiveStrategyV29.py new file mode 100644 index 000000000..b2d3f790f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedLocalSearchAdaptiveStrategyV29.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedLocalSearchAdaptiveStrategyV29: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.8, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover rate + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def local_search(self, best_individual): + perturbation = np.random.normal(0, 0.1, self.dimension) + candidate = best_individual + perturbation + candidate = np.clip(candidate, self.lower_bounds, self.upper_bounds) + return candidate + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + if np.random.rand() < 0.8: # Majority of time perform mutation and crossover + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + else: # Occasionally perform local search + trial = self.local_search(population[best_idx]) + + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedLocalSearchQuantumSimulatedAnnealingV6.py b/nevergrad/optimization/lama/EnhancedLocalSearchQuantumSimulatedAnnealingV6.py new file mode 100644 index 000000000..59e67c605 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedLocalSearchQuantumSimulatedAnnealingV6.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedLocalSearchQuantumSimulatedAnnealingV6: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step( + candidate_x, func, search_range=self.perturb_range + ) # Use perturb_range directly + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedMemeticDifferentialEvolution.py new file mode 100644 index 000000000..e6add31d3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemeticDifferentialEvolution.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnhancedMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5, step_size=0.01): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMemeticEvolutionarySearch.py b/nevergrad/optimization/lama/EnhancedMemeticEvolutionarySearch.py new file mode 100644 index 000000000..6886d2d57 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemeticEvolutionarySearch.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedMemeticEvolutionarySearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.5 + np.random.rand() * 0.3 + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < 0.9 + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - 0.01 * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + self.crossover_rate = 0.9 - 0.3 * (iteration / max_iterations) + self.learning_rate = 0.01 * np.exp(-iteration / (0.5 * max_iterations)) + + def hybrid_step(self, func, pop, scores): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < 0.5: # 50% probability to apply local search + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform hybrid step + pop, scores = self.hybrid_step(func, pop, scores) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMemeticHarmonyOptimization.py b/nevergrad/optimization/lama/EnhancedMemeticHarmonyOptimization.py new file mode 100644 index 000000000..7c96a9be9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemeticHarmonyOptimization.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedMemeticHarmonyOptimization: + def __init__( + self, + budget=10000, + hmcr=0.7, + par=0.4, + bw=0.6, + memetic_iter=1000, + memetic_prob=0.8, + memetic_step=0.1, + explore_prob=0.1, + local_search_prob=0.7, + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.explore_prob = explore_prob + self.local_search_prob = local_search_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + convergence_curve = [] + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < self.explore_prob: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + convergence_curve.append(self.f_opt) + + mean_aocc = np.mean(np.array(convergence_curve)) + std_dev = np.std(np.array(convergence_curve)) + + return mean_aocc, std_dev diff --git a/nevergrad/optimization/lama/EnhancedMemoryAdaptiveDynamicHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedMemoryAdaptiveDynamicHybridOptimizer.py new file mode 100644 index 000000000..8df555609 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemoryAdaptiveDynamicHybridOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import pdist + + +class EnhancedMemoryAdaptiveDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + stagnation_threshold=10, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.diversity_threshold = diversity_threshold + self.stagnation_threshold = stagnation_threshold + self.global_best_history = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def population_diversity(self, population): + if len(population) < 2: + return 0.0 + distances = pdist(population) + return np.mean(distances) + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Check population diversity and restart if necessary + if self.population_diversity(population) < self.diversity_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Restart mechanism based on stagnation + if len(self.global_best_history) > self.stagnation_threshold: + if np.std(self.global_best_history[-self.stagnation_threshold :]) < 1e-5: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77.py b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77.py new file mode 100644 index 000000000..7befd925b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_init = F_init + self.CR_init = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx): + size = len(population) + idxs = np.random.choice(size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + if self.memory: + memory_effect = np.mean(self.memory, axis=0) + mutant = a + self.F_init * (best_idx - b) + memory_effect + else: + mutant = a + self.F_init * (b - c) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR_init + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.update_memory(trial - target) + return trial, f_trial + return target, f_target + + def update_memory(self, diff): + if len(self.memory) < self.memory_size: + self.memory.append(diff) + else: + # Replace an old memory with new one probabilistically + replace_idx = np.random.randint(0, self.memory_size) + self.memory[replace_idx] = diff + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV41.py b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV41.py new file mode 100644 index 000000000..ce16cca11 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV41.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedMemoryGuidedAdaptiveStrategyV41: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[best_idx] + # Adaptive mutation strategy + if self.memory and np.random.rand() < self.get_memory_usage_rate(): + memory_effect = np.mean(self.memory, axis=0) + mutant += self.F * (memory_effect + population[a] - population[b]) + else: + mutant += self.F * (population[a] - population[b]) + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.update_memory(trial - target) + return trial, f_trial + else: + return target, f_target + + def update_memory(self, successful_mutation): + self.memory.append(successful_mutation) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + + def get_memory_usage_rate(self): + """Dynamically adjusts the rate at which memory is used in mutations""" + return np.clip(len(self.memory) / self.memory_size, 0.1, 1) + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV69.py b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV69.py new file mode 100644 index 000000000..29a639c00 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMemoryGuidedAdaptiveStrategyV69.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedMemoryGuidedAdaptiveStrategyV69: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover factor + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation based on sigmoid function for smooth transition + scale = iteration / total_iterations + self.F = 0.5 + 0.4 * np.sin(np.pi * scale) + self.CR = 0.5 + 0.4 * np.cos(np.pi * scale) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + + for iteration in range(total_iterations): + phase = 1 if iteration < total_iterations * self.switch_ratio else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedMetaDynamicPrecisionOptimizerV1.py b/nevergrad/optimization/lama/EnhancedMetaDynamicPrecisionOptimizerV1.py new file mode 100644 index 000000000..3802661a5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaDynamicPrecisionOptimizerV1.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedMetaDynamicPrecisionOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and annealing parameters + T = 1.2 # Optimized initial temperature to encourage exploration + T_min = 0.0001 # Very low minimum temperature for deep exploration in late stages + alpha = 0.93 # Optimal cooling rate to maintain a balance between exploration and exploitation + + # Mutation and crossover parameters adjusted for optimal search dynamics + F = 0.78 # Mutation factor adjusted for aggressive exploration + CR = 0.88 # High crossover probability to ensure diversity in solutions + + population_size = 85 # Optimized population size for effective coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Integrate adaptive mutation strategy with enhanced dynamic control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor adapts based on the sigmoid function for refined control + dynamic_F = ( + F * np.exp(-0.07 * T) * (0.8 + 0.2 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criteria incorporating a dynamic temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy enhanced with sinusoidal modulation for longer effective search + adaptive_cooling = alpha - 0.005 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaHeuristicOptimizerV2.py b/nevergrad/optimization/lama/EnhancedMetaHeuristicOptimizerV2.py new file mode 100644 index 000000000..168809dcd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaHeuristicOptimizerV2.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedMetaHeuristicOptimizerV2: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.6, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.7, + social_weight=1.7, + max_velocity=0.9, + mutation_rate=0.05, + num_generations=250, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V1.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V1.py new file mode 100644 index 000000000..5edcf0344 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V1.py @@ -0,0 +1,116 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V1: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Some custom meta-learning process to guide the search + # Implement your meta-learning strategy here + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V2.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V2.py new file mode 100644 index 000000000..935855bf5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V2.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 100 + self.meta_net_lr = 0.01 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V3.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V3.py new file mode 100644 index 000000000..1a3358906 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V3.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 150 + self.meta_net_lr = 0.05 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V4.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V4.py new file mode 100644 index 000000000..05792f98d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V4.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V4: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 2.0 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 200 + self.meta_net_lr = 0.1 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.5 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V5.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V5.py new file mode 100644 index 000000000..3f39865af --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V5.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V5: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.15 + self.max_local_search_attempts = 5 + self.meta_net_iters = 300 + self.meta_net_lr = 0.05 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.5 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V6.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V6.py new file mode 100644 index 000000000..22d6deea0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V6.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V6: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 400 + self.meta_net_lr = 0.1 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V7.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V7.py new file mode 100644 index 000000000..8832dcdab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSO_LS_DIW_AP_V7.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSO_LS_DIW_AP_V7: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.05 + self.max_local_search_attempts = 5 + self.meta_net_iters = 500 + self.meta_net_lr = 0.05 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + # Gradient descent for meta-learning + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Apply meta-learning guidance + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv2.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv2.py new file mode 100644 index 000000000..e76ce839d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv2.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSOv2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 5000 + self.meta_net_lr = 0.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv3.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv3.py new file mode 100644 index 000000000..5b2cd5dd6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv3.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSOv3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 5000 + self.meta_net_lr = 0.1 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv4.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv4.py new file mode 100644 index 000000000..b7b3d9f0d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv4.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSOv4: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 10000 + self.meta_net_lr = 0.01 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(10): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv5.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv5.py new file mode 100644 index 000000000..1ea49ec8d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv5.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSOv5: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 5000 + self.meta_net_lr = 0.05 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(10): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv6.py b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv6.py new file mode 100644 index 000000000..1fccf63bd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetAQAPSOv6.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedMetaNetAQAPSOv6: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.1 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(10): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetPSO.py b/nevergrad/optimization/lama/EnhancedMetaNetPSO.py new file mode 100644 index 000000000..fc01aeb4e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetPSO.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedMetaNetPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaNetPSOv2.py b/nevergrad/optimization/lama/EnhancedMetaNetPSOv2.py new file mode 100644 index 000000000..d8abd84ad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaNetPSOv2.py @@ -0,0 +1,130 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedMetaNetPSOv2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMetaPopulationAdaptiveGradientSearch.py b/nevergrad/optimization/lama/EnhancedMetaPopulationAdaptiveGradientSearch.py new file mode 100644 index 000000000..a82ae9ac9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMetaPopulationAdaptiveGradientSearch.py @@ -0,0 +1,174 @@ +import numpy as np + + +class EnhancedMetaPopulationAdaptiveGradientSearch: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + gradient_steps=10, + meta_population_size=3, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + self.gradient_steps = gradient_steps # number of gradient descent steps + self.meta_population_size = meta_population_size # number of meta-populations + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + for _ in range(self.gradient_steps): + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + x -= self.learning_rate * grad + x = np.clip(x, -5.0, 5.0) + + return x + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize meta-populations + meta_populations = [] + for _ in range(self.meta_population_size): + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + meta_populations.append((pop, scores)) + + evaluations = self.population_size * self.meta_population_size + max_iterations = self.budget // (self.population_size * self.meta_population_size) + + # Initialize global best + global_best_score = np.inf + global_best_position = None + + for pop, scores in meta_populations: + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + for iteration in range(max_iterations): + for idx, (pop, scores) in enumerate(meta_populations): + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to elite individuals in the population + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + for i in range(len(elite_pop)): + elite_pop[i] = self.__gradient_local_search(func, elite_pop[i]) + scores[i] = func(elite_pop[i]) + + # Update global best after local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + if iteration == 0: + dim = elite_pop.shape[1] + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1 - 1 / (4.0 * dim) + 1 / (21.0 * dim**2)) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) + / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * ( + mean_new - mean + ) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + + if evaluations >= self.budget: + break + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMultiFocalAdaptiveOptimizer.py b/nevergrad/optimization/lama/EnhancedMultiFocalAdaptiveOptimizer.py new file mode 100644 index 000000000..161c15d10 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiFocalAdaptiveOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EnhancedMultiFocalAdaptiveOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=100): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_decay = 0.99 + self.local_decay = 0.95 + self.initial_velocity_scale = 0.1 + self.learning_rate = 0.3 # Added learning rate for adapting velocity updates + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.random.randn(self.particles, self.dimension) * self.initial_velocity_scale + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + self.global_decay * velocities[i] + + self.learning_rate * r1 * (personal_best_positions[i] - positions[i]) + + self.learning_rate * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < fitness[i]: + fitness[i] = new_fitness + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/EnhancedMultiModalAdaptiveOptimizer.py b/nevergrad/optimization/lama/EnhancedMultiModalAdaptiveOptimizer.py new file mode 100644 index 000000000..9094dec7a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiModalAdaptiveOptimizer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedMultiModalAdaptiveOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.15, + mutation_intensity=0.25, + crossover_probability=0.85, + gradient_step=0.15, + mutation_decay=0.98, + gradient_enhancement=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement = gradient_enhancement + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def adaptive_gradient(self, individual, func, best_individual): + if self.gradient_enhancement: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.linalg.norm(gradient_direction)) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = [] + for i in range(self.population_size): + if i < len(elites): + new_population.append(self.adaptive_gradient(elites[i], func, best_individual)) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population.append(child) + + population = np.array(new_population) + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + + evaluations += self.population_size + + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedMultiModalConvergenceOptimizer.py b/nevergrad/optimization/lama/EnhancedMultiModalConvergenceOptimizer.py new file mode 100644 index 000000000..4f4f9622d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiModalConvergenceOptimizer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedMultiModalConvergenceOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=250, + elite_fraction=0.05, + mutation_intensity=0.3, + crossover_probability=0.8, + gradient_step=0.1, + mutation_decay=0.95, + gradient_enhancement_cycle=4, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement_cycle = gradient_enhancement_cycle + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def adaptive_gradient(self, individual, func, best_individual, iteration): + if iteration % self.gradient_enhancement_cycle == 0: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.sqrt(np.dot(gradient_direction, gradient_direction))) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + iteration = 0 + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = [] + for i in range(self.population_size): + if i < len(elites): + new_population.append(self.adaptive_gradient(elites[i], func, best_individual, iteration)) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population.append(child) + + population = np.array(new_population) + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + + evaluations += self.population_size + iteration += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedMultiModalExplorationStrategy.py b/nevergrad/optimization/lama/EnhancedMultiModalExplorationStrategy.py new file mode 100644 index 000000000..f84de59ec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiModalExplorationStrategy.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedMultiModalExplorationStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + elite_fraction=0.2, + mutation_intensity=1.0, + crossover_rate=0.8, + ): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity # Intensity of mutation + self.crossover_rate = crossover_rate # Crossover probability + self.sigma = 0.3 # Standard deviation of Gaussian noise for mutation + + def mutate(self, individual): + """Hybrid mutation incorporating both global and local search tendencies""" + global_mutation = individual + self.mutation_intensity * np.random.randn(self.dimension) + local_mutation = individual + np.random.normal(0, self.sigma, self.dimension) + # Choose between global or local mutation based on a random choice + if np.random.rand() < 0.5: + return np.clip(global_mutation, self.bounds["lb"], self.bounds["ub"]) + else: + return np.clip(local_mutation, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent, donor): + """Uniform crossover with adjustable rate""" + mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(mask, donor, parent) + + def __call__(self, func): + # Initialize the population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Elite selection + num_elites = int(self.population_size * self.elite_fraction) + elites_indices = np.argsort(fitness)[:num_elites] + elites = population[elites_indices] + + # Generate offspring using mutation and crossover + new_population = np.empty_like(population) + for i in range(self.population_size): + if i < num_elites: + new_population[i] = elites[i] + else: + # select random elite for crossover + elite = elites[np.random.randint(num_elites)] + mutated = self.mutate(population[i]) + new_population[i] = self.crossover(elite, mutated) + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Select the best solutions to form the new population + combined_population = np.vstack((population, new_population)) + combined_fitness = np.concatenate((fitness, new_fitness)) + indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[current_best_idx] + + # Adapt mutation parameters + successful_ratio = np.mean(new_fitness < fitness) + self.sigma *= np.exp(0.1 * (successful_ratio - 0.2)) + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedMultiModalMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedMultiModalMemoryHybridOptimizer.py new file mode 100644 index 000000000..2e93993ec --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiModalMemoryHybridOptimizer.py @@ -0,0 +1,201 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import pdist, squareform + + +class EnhancedMultiModalMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + stagnation_threshold=10, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.diversity_threshold = diversity_threshold + self.stagnation_threshold = stagnation_threshold + self.global_best_history = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def population_diversity(self, population): + if len(population) < 2: + return 0.0 + distances = pdist(population) + return np.mean(distances) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + if len(idxs) < 3: + continue # Skip mutation if less than 3 distinct individuals + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Check population diversity and restart if necessary + if self.population_diversity(population) < self.diversity_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Restart mechanism based on stagnation + if len(self.global_best_history) > self.stagnation_threshold: + if np.std(self.global_best_history[-self.stagnation_threshold :]) < 1e-5: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMultiOperatorSearch.py b/nevergrad/optimization/lama/EnhancedMultiOperatorSearch.py new file mode 100644 index 000000000..041a0707f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiOperatorSearch.py @@ -0,0 +1,117 @@ +import numpy as np + + +class EnhancedMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMultiOperatorSearch2.py b/nevergrad/optimization/lama/EnhancedMultiOperatorSearch2.py new file mode 100644 index 000000000..4616b2d3f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiOperatorSearch2.py @@ -0,0 +1,122 @@ +import numpy as np + + +class EnhancedMultiOperatorSearch2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and "prev_f" in locals() and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedMultiOperatorSearch2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedMultiPhaseAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedMultiPhaseAdaptiveDE.py new file mode 100644 index 000000000..290c95a62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiPhaseAdaptiveDE.py @@ -0,0 +1,138 @@ +import numpy as np + + +class EnhancedMultiPhaseAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMultiPhaseOptimizationAlgorithm.py b/nevergrad/optimization/lama/EnhancedMultiPhaseOptimizationAlgorithm.py new file mode 100644 index 000000000..458d28c29 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiPhaseOptimizationAlgorithm.py @@ -0,0 +1,118 @@ +import numpy as np + + +class EnhancedMultiPhaseOptimizationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 400 # Improved population size for initial exploration + self.F = 0.9 # Differential weight for exploration + self.CR = 0.8 # Crossover probability for exploitation + self.local_search_chance_initial = 0.4 # Initial local search probability + self.elite_ratio = 0.15 # Ratio of elite members to retain + self.diversity_threshold = 1e-4 # Threshold to switch between exploration and exploitation + self.reinit_percentage = 0.2 # Reinitialization percentage for diversity + self.cauchy_step_scale = 0.02 # Scale for Cauchy distribution steps + self.gaussian_step_scale = 0.002 # Scale for Gaussian distribution steps + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance_initial: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): # Increased iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adapt local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance_initial = max( + 0.15, self.local_search_chance_initial * remaining_budget_ratio + ) diff --git a/nevergrad/optimization/lama/EnhancedMultiStageGradientBoostedAnnealing.py b/nevergrad/optimization/lama/EnhancedMultiStageGradientBoostedAnnealing.py new file mode 100644 index 000000000..c66c9db58 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiStageGradientBoostedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class EnhancedMultiStageGradientBoostedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Frequent intensive localized search as refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/EnhancedMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..9a0587999 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiStrategyDifferentialEvolution.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.2 + self.restart_threshold = 30 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.05 + self.no_improvement_count = 0 + self.elite_fraction = 0.1 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + x_local, f_local = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim + ).x, func(x) + return x_local, f_local + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.4, 1.2) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedMultiStrategyQuantumLevyOptimizer.py b/nevergrad/optimization/lama/EnhancedMultiStrategyQuantumLevyOptimizer.py new file mode 100644 index 000000000..46265f979 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedMultiStrategyQuantumLevyOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedMultiStrategyQuantumLevyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.5 + self.social_weight = 1.5 + self.quantum_weight = 0.4 + self.elite_fraction = 0.2 + self.memory_size = 30 + self.local_search_probability = 0.7 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + self.strategy_rewards = [0, 0, 0, 0] + self.strategy_uses = [0, 0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2, 3], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 3: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = EnhancedMultiStrategyQuantumLevyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedNicheDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedNicheDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..3c95b19da --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedNicheDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,149 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedNicheDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.init_num_niches = 6 + self.alpha = 0.5 + self.beta = 0.5 + self.local_search_prob = 0.1 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..435f000d0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOppositionBasedDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedOppositionBasedDifferentialEvolution: + def __init__(self, budget=10000, pop_size=25, f_init=0.8, cr_init=0.9, scaling_factor=0.1): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + success_rate = 1.0 * success / self.pop_size + f_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) * (1.0 - success_rate) + cr_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) * (1.0 - success_rate) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + success_count = 0 + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + success_count += 1 + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + success_count += 1 + + f_current, cr_current = self.adaptive_parameter_update( + success_count, f_current, cr_current, self.scaling_factor + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearch.py b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearch.py new file mode 100644 index 000000000..29f039a0e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedOppositionBasedHarmonySearch: + def __init__( + self, budget=10000, harmony_memory_size=20, hmcr=0.7, par=0.4, bw=0.5, bw_min=0.01, bw_decay=0.995 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_min = bw_min # Minimum Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + self.bw = max(self.bw * self.bw_decay, self.bw_min) # Decay the bandwidth with a minimum value + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidth.py b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidth.py new file mode 100644 index 000000000..dc57072ed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidth.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedOppositionBasedHarmonySearchDynamicBandwidth: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Initial Bandwidth + self.bw_min = bw_min # Minimum Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + self.bw_range = bw_range # Bandwidth range for dynamic adjustment + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return self.bw_range / (1 + iteration) # Dynamic adjustment of bandwidth + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = max(self.adjust_bandwidth(i), self.bw_min) # Update bandwidth dynamically + + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC.py b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC.py new file mode 100644 index 000000000..378fa82a2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC.py @@ -0,0 +1,100 @@ +import numpy as np + + +class EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Initial Bandwidth + self.bw_min = bw_min # Minimum Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + self.bw_range = bw_range # Bandwidth range for dynamic adjustment + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func, bandwidth): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += bandwidth * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return max(self.bw_range / (1 + iteration), self.bw_min) # Dynamic adjustment of bandwidth + + def adaptive_bandwidth_control(self, iteration, best_fitness, current_fitness): + if best_fitness == np.Inf or current_fitness == np.Inf: + return self.bw + + if current_fitness < best_fitness: + return self.bw * (1 + self.bw_decay) + else: + return self.bw * (1 - self.bw_decay) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = self.adjust_bandwidth(i) # Update bandwidth dynamically + + new_harmony = self.harmony_search(func, self.bw) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + self.bw = self.adaptive_bandwidth_control( + i, self.f_opt, improved_fitness + ) # Adaptive bandwidth control + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py new file mode 100644 index 000000000..421976392 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE: + def __init__( + self, + budget=10000, + harmony_memory_size=20, + hmcr=0.7, + par=0.4, + bw=0.5, + bw_min=0.01, + bw_decay=0.995, + bw_range=0.5, + de_sf_min=0.5, + de_sf_max=1.0, + de_sf_decay=0.99, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr + self.par = par + self.bw = bw + self.bw_min = bw_min + self.bw_decay = bw_decay + self.bw_range = bw_range + self.de_sf_min = de_sf_min + self.de_sf_max = de_sf_max + self.de_sf_decay = de_sf_decay + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func, bandwidth): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += bandwidth * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def adjust_bandwidth(self, iteration): + return max(self.bw_range / (1 + iteration), self.bw_min) + + def adapt_de_scale_factor(self): + return max(self.de_sf_min, self.de_sf_max * self.de_sf_decay) + + def differential_evolution(self, func, current_harmony, best_harmony, scale_factor): + mutant_harmony = current_harmony + scale_factor * (best_harmony - current_harmony) + return np.clip(mutant_harmony, func.bounds.lb, func.bounds.ub) + + def self_adaptive_parameter_update(self, success): + if success: + self.hmcr = min(1.0, self.hmcr * 1.05) # Increase HMCR if successful + self.par = max(0.0, self.par * 0.95) # Decrease PAR if successful + else: + self.hmcr = max(0.0, self.hmcr * 0.95) # Decrease HMCR if not successful + self.par = min(1.0, self.par * 1.05) # Increase PAR if not successful + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + self.bw = self.adjust_bandwidth(i) + + new_harmony = self.harmony_search(func, self.bw) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + best_harmony = self.harmony_memory[np.argmin(self.harmony_memory_fitness)] + scale_factor = self.adapt_de_scale_factor() + trial_harmony = self.differential_evolution(func, new_harmony, best_harmony, scale_factor) + trial_fitness = func(trial_harmony) + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_harmony + + idx_worst_trial = np.argmax(self.harmony_memory_fitness) + if trial_fitness < self.harmony_memory_fitness[idx_worst_trial]: + self.harmony_memory[idx_worst_trial] = trial_harmony + self.harmony_memory_fitness[idx_worst_trial] = trial_fitness + self.self_adaptive_parameter_update(True) + else: + self.self_adaptive_parameter_update(False) + + self.bw = self.bw * self.bw_decay + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedOptimalEvolutionaryGradientOptimizerV9.py b/nevergrad/optimization/lama/EnhancedOptimalEvolutionaryGradientOptimizerV9.py new file mode 100644 index 000000000..9ea2c4238 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOptimalEvolutionaryGradientOptimizerV9.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedOptimalEvolutionaryGradientOptimizerV9: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.5, + F_range=0.3, + CR=0.85, + elite_fraction=0.1, + mutation_strategy="adaptive", + amplification_factor=1.2, + contraction_factor=0.8, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.amplification_factor = amplification_factor # Factor for enhancing exploration + self.contraction_factor = contraction_factor # Factor for controlling convergence + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.75: # Slightly increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F with amplification and contraction to adapt to search progress + if np.random.rand() < 0.5: + F = self.F_base + np.random.rand() * self.F_range * self.amplification_factor + else: + F = self.F_base - np.random.rand() * self.F_range * self.contraction_factor + + # DE/rand/1 mutation + idxs = [ + idx + for idx in range(self.population_size) + if idx not in [i, best_idx] + list(elite_indices) + ] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedOptimalPrecisionEvolutionaryThermalOptimizer.py b/nevergrad/optimization/lama/EnhancedOptimalPrecisionEvolutionaryThermalOptimizer.py new file mode 100644 index 000000000..554218d74 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOptimalPrecisionEvolutionaryThermalOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class EnhancedOptimalPrecisionEvolutionaryThermalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Adjust initial temperature and refinement of cooling and exploitation strategies + T = 1.0 # Reduced initial temperature to prevent premature convergence + T_min = 0.001 # Lower minimum temperature for prolonged fine-tuning + alpha = 0.98 # Slower cooling rate to extend the exploration phase significantly + + # Mutation and crossover parameters optimized further + F = 0.75 # Mutation factor adjusted for deeper exploration + CR = 0.88 # Adjusted crossover probability to increase genetic diversity + + population_size = 80 # Adjusted population size for better initial space exploration + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation factor adjustments and more responsive annealing acceptance conditions + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and linear progress + dynamic_F = F * np.exp(-T) * (0.75 + 0.25 * np.cos(np.pi * evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion incorporating a more responsive strategy to temperature and fitness changes + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Progressive and adaptive cooling strategy that adjusts more dynamically based on optimization progress + adaptive_cooling = alpha - 0.01 * np.sin(np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedOptimizedEvolutiveStrategy.py b/nevergrad/optimization/lama/EnhancedOptimizedEvolutiveStrategy.py new file mode 100644 index 000000000..d828c3697 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOptimizedEvolutiveStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedOptimizedEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=20): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate=0.2, mutation_strength=0.8): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dim) + return alpha * parent1 + (1 - alpha) * parent2 + + def __call__(self, func): + # Parameters + population_size = 20 + num_generations = self.budget // population_size + num_best = 4 + mutation_rate = 0.2 + mutation_strength_initial = 0.8 + decay_factor = 0.95 + + # Initialize + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + # Evolution loop + for _ in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + # Generate new population + new_population = [] + while len(new_population) < population_size: + for i in range(num_best): + for j in range(i + 1, num_best): + child = self.crossover(best_population[i], best_population[j]) + new_population.append(child) + if len(new_population) >= population_size: + break + population = np.array(new_population) + population = self.mutate(population, mutation_rate, mutation_strength_initial) + mutation_strength_initial *= decay_factor + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46.py b/nevergrad/optimization/lama/EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46.py new file mode 100644 index 000000000..d4fb62e1a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.07, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Fine-tuned mutation factor + self.F_range = F_range # Adjusted mutation range for better control + self.CR = CR # Adjusted crossover probability to enhance convergence stability + self.elite_fraction = elite_fraction # Adjusted elite fraction to optimize diversity and convergence + self.mutation_strategy = mutation_strategy # Retained adaptive mutation strategy for dynamic response + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Choose the base individual from the elite pool or the best, dynamically + if np.random.rand() < 0.8: # Increased use of best individual to focus search + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Mutation factor adjustment + F = self.F_base + (np.random.rand() * self.F_range - self.F_range / 2) + + # Mutation strategy: DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Termination check + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDE.py b/nevergrad/optimization/lama/EnhancedOrthogonalDE.py new file mode 100644 index 000000000..606477a61 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDE.py @@ -0,0 +1,46 @@ +import numpy as np + + +class EnhancedOrthogonalDE: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + # Introducing Enhanced Orthogonal Crossover + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = np.mean(orthogonal_vectors, axis=0) + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(population_fitness) + if population_fitness[best_idx] < self.f_opt: + self.f_opt = population_fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolution.py new file mode 100644 index 000000000..3c796eb8f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolution.py @@ -0,0 +1,46 @@ +import numpy as np + + +class EnhancedOrthogonalDifferentialEvolution: + def __init__(self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + # Introducing Enhanced Orthogonal Crossover + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = np.mean(orthogonal_vectors, axis=0) + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionImproved.py b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionImproved.py new file mode 100644 index 000000000..47e21ff55 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionImproved.py @@ -0,0 +1,51 @@ +import numpy as np + + +class EnhancedOrthogonalDifferentialEvolutionImproved: + def __init__( + self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, orthogonal_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + # Introducing Enhanced Orthogonal Crossover with improved adjustment + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = ( + np.mean(orthogonal_vectors, axis=0) * self.orthogonal_factor + ) # Adjusting the orthogonal factor + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV2.py b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV2.py new file mode 100644 index 000000000..3c735ff72 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV2.py @@ -0,0 +1,51 @@ +import numpy as np + + +class EnhancedOrthogonalDifferentialEvolutionV2: + def __init__( + self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, orthogonal_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + # Introducing Enhanced Orthogonal Crossover with improved adjustment + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = np.mean(orthogonal_vectors, axis=0) * ( + self.orthogonal_factor / np.sqrt(2 * np.log(dimension)) + ) # Improved scaling factor + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV3.py b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV3.py new file mode 100644 index 000000000..86ddccbac --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV3.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedOrthogonalDifferentialEvolutionV3: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=0.8, + crossover_rate=0.9, + orthogonal_factor=0.5, + adapt_orthogonal=True, + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + self.adapt_orthogonal = adapt_orthogonal + self.orthogonal_factor_min = 0.1 + self.orthogonal_factor_max = 0.9 + self.orthogonal_factor_decay = 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + orthogonal_factor = self.orthogonal_factor + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = np.mean(orthogonal_vectors, axis=0) * ( + orthogonal_factor / np.sqrt(2 * np.log(dimension)) + ) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + if self.adapt_orthogonal: + orthogonal_factor = max( + orthogonal_factor * self.orthogonal_factor_decay, self.orthogonal_factor_min + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV4.py b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV4.py new file mode 100644 index 000000000..625674252 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedOrthogonalDifferentialEvolutionV4.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedOrthogonalDifferentialEvolutionV4: + def __init__( + self, + budget=1000, + population_size=50, + mutation_factor=0.8, + crossover_rate=0.9, + orthogonal_factor=0.5, + adapt_orthogonal=True, + crossover_strategy="rand-to-best", + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + self.adapt_orthogonal = adapt_orthogonal + self.orthogonal_factor_min = 0.1 + self.orthogonal_factor_max = 0.9 + self.orthogonal_factor_decay = 0.9 + self.crossover_strategy = crossover_strategy + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + orthogonal_factor = self.orthogonal_factor + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + if self.crossover_strategy == "rand-to-best": + rand_individual = population[np.random.choice(len(population))] + mutant = ( + population[i] + + self.mutation_factor * (rand_individual - population[i]) + + self.mutation_factor * (a - b) + ) + else: # Default to traditional mutation + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + orthogonal_vectors = np.random.uniform(-1, 1, size=(2, dimension)) + orthogonal_vector = np.mean(orthogonal_vectors, axis=0) * ( + orthogonal_factor / np.sqrt(2 * np.log(dimension)) + ) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + if self.adapt_orthogonal: + orthogonal_factor = max( + orthogonal_factor * self.orthogonal_factor_decay, self.orthogonal_factor_min + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.random.choice(len(population), size=3, replace=False) + return population[idxs[0]], population[idxs[1]], population[idxs[2]] diff --git a/nevergrad/optimization/lama/EnhancedParallelDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedParallelDifferentialEvolution.py new file mode 100644 index 000000000..71651c42f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParallelDifferentialEvolution.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EnhancedParallelDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, F=0.8, CR=0.9, strategy="best"): + self.budget = budget + self.population_size = population_size + self.F = F # Increased Differential weight to encourage more aggressive diversification + self.CR = CR # Increased Crossover probability to allow more mixing + self.strategy = strategy # Strategy for the selection of base vector in mutation + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Main loop + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation based on strategy + if self.strategy == "best": + best_idx = np.argmin(fitness) + base = population[best_idx] + else: # "random" strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + base = population[np.random.choice(idxs)] + + # Generate mutant + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + self.F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Check if budget exhausted + if evaluations >= self.budget: + break + + # Find the best solution + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] + + +# Example usage: +# optimizer = EnhancedParallelDifferentialEvolution(budget=10000) +# best_f, best_x = optimizer(your_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedParticleSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimization.py new file mode 100644 index 000000000..378f2208f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimization.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.population_size = 100 + self.inertia_weight = 0.5 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.velocity_limit = 0.2 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocity = np.random.uniform( + -self.velocity_limit, self.velocity_limit, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_position = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + velocity[i] = ( + self.inertia_weight * velocity[i] + + self.cognitive_coeff * r1 * (personal_best_position[i] - population[i]) + + self.social_coeff * r2 * (self.x_opt - population[i]) + ) + velocity[i] = np.clip(velocity[i], -self.velocity_limit, self.velocity_limit) + population[i] = np.clip(population[i] + velocity[i], self.lb, self.ub) + + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < personal_best_fitness[i]: + personal_best_position[i] = population[i].copy() + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i].copy() + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizer.py new file mode 100644 index 000000000..286700af5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizer.py @@ -0,0 +1,47 @@ +import numpy as np + + +class EnhancedParticleSwarmOptimizer: + def __init__(self, budget, swarm_size=20, inertia_weight=0.5, cognitive_weight=1.5, social_weight=1.5): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best): + velocity = np.random.uniform(-1, 1, size=len(particle)) + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + swarm[i], velocity = self.optimize_particle(particle, func, personal_best[i], global_best) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV4.py b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV4.py new file mode 100644 index 000000000..090b04b45 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV4.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedParticleSwarmOptimizerV4: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.1, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV5.py b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV5.py new file mode 100644 index 000000000..0d9123fe4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV5.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedParticleSwarmOptimizerV5: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.9, + cognitive_weight=2.0, + social_weight=2.0, + max_velocity=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV6.py b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV6.py new file mode 100644 index 000000000..95bc99773 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedParticleSwarmOptimizerV6.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedParticleSwarmOptimizerV6: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/EnhancedPhaseAdaptiveMemoryStrategyV75.py b/nevergrad/optimization/lama/EnhancedPhaseAdaptiveMemoryStrategyV75.py new file mode 100644 index 000000000..4efbc7290 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPhaseAdaptiveMemoryStrategyV75.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedPhaseAdaptiveMemoryStrategyV75: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.8, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx): + size = len(population) + idxs = np.random.choice(size, 4, replace=False) + a, b, c, d = population[idxs] + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = a + self.F * (b - c) + 0.1 * memory_effect # Memory influenced mutation step + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(self.memory_size)] = trial - target + return trial, f_trial + return target, f_target + + def adapt_parameters(self, current_eval): + progress = current_eval / self.budget + self.F = max(0.1, 0.9 - progress) # Decreasing F over time + self.CR = 0.5 + 0.4 * np.sin(np.pi * progress) # Oscillating CR for balance + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adapt_parameters(evaluations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedPhaseTransitionMemoryStrategyV82.py b/nevergrad/optimization/lama/EnhancedPhaseTransitionMemoryStrategyV82.py new file mode 100644 index 000000000..bf9c63794 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPhaseTransitionMemoryStrategyV82.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedPhaseTransitionMemoryStrategyV82: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + memory_size=10, + phase_transition=0.5, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.phase_transition = phase_transition + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, evaluations): + phase = 1 if evaluations < self.budget * self.phase_transition else 2 + idxs = np.random.choice(self.pop_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + if phase == 1: + mutant = x1 + self.F * (x2 - x3) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = x1 + self.F * (x2 - x3) + 0.1 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, evaluations) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveCohortOptimization.py b/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveCohortOptimization.py new file mode 100644 index 000000000..0d9fa50ea --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveCohortOptimization.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedPrecisionAdaptiveCohortOptimization: + def __init__(self, budget, dimension=5, population_size=120, elite_fraction=0.2, mutation_intensity=0.3): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Intensity for mutation + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + for i in range(self.population_size): + if np.random.rand() < self.dynamic_mutation_rate(evaluations, self.budget): + # Mutation occurs + parent_idx = np.random.choice(self.elite_count) + parent = elites[parent_idx] + mutation = self.dynamic_mutation_scale(evaluations, self.budget) * np.random.normal( + 0, 1, self.dimension + ) + child = np.clip(parent + mutation, -5.0, 5.0) # Keeping child within bounds + else: + # Crossover between two elites + parents_indices = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + ( + population[parents_indices[0]][:crossover_point], + population[parents_indices[1]][crossover_point:], + ) + ) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def dynamic_mutation_rate(self, evaluations, budget): + # Adaptively decrease the mutation rate, focusing more on exploitation towards the end. + return max(0.01, 1 - np.sqrt(evaluations / budget)) + + def dynamic_mutation_scale(self, evaluations, budget): + # Gradual exponential decay for mutation scale to allow for refined search in later stages. + return self.mutation_intensity * (0.5 ** (evaluations / budget)) diff --git a/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveGradientClusteringPSO.py b/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveGradientClusteringPSO.py new file mode 100644 index 000000000..2a49b0643 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionAdaptiveGradientClusteringPSO.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedPrecisionAdaptiveGradientClusteringPSO: + def __init__( + self, + budget=10000, + population_size=150, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.2, + social_weight=1.5, + cluster_factor=0.1, + adaptation_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.cluster_factor = cluster_factor + self.adaptation_rate = adaptation_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + cluster_center = np.mean(particles, axis=0) + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + exploration_component = ( + (np.random.rand(self.dim) - 0.5) * 2 * self.cluster_factor * (self.ub - self.lb) + ) # Random exploratory moves + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + cluster_component = self.cluster_factor * (cluster_center - particles[i]) # Cluster force + + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + cluster_component + + exploration_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + # Adaptive clustering adjustment + if evaluation_counter % (self.budget // 10) == 0: + self.cluster_factor *= 1 - self.adaptation_rate # Gradually reduce clustering influence + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedPrecisionBoostedDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedPrecisionBoostedDifferentialEvolution.py new file mode 100644 index 000000000..919609fa0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionBoostedDifferentialEvolution.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedPrecisionBoostedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Maintaining high population for diversity + self.F_base = 0.5 # Reduced mutation factor for better local search + self.CR_base = 0.9 # Increased crossover probability for higher genetic exchange + self.F_min = 0.1 # Lower minimum mutation factor to prevent excessive perturbation + self.CR_min = 0.6 # Higher minimum crossover to ensure continuous variability + self.adaptive_increment = 0.05 # Increment factor for adaptive mutation strategy + self.loss_threshold = 1e-6 # Threshold for loss stabilization + + def __call__(self, func): + # Initialize population uniformly within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify the best individual + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx] + + # Evolutionary process given the budget constraints + n_iterations = int(self.budget / self.pop_size) + F = self.F_base + CR = self.CR_base + prev_best_fitness = best_fitness + + for iteration in range(n_iterations): + # Check if fitness improvements have stabilized + if abs(prev_best_fitness - best_fitness) < self.loss_threshold: + F += self.adaptive_increment # Increase mutation factor adaptively + + prev_best_fitness = best_fitness + + # Mutate and recombine + for i in range(self.pop_size): + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + trial = np.array([mutant[j] if np.random.rand() < CR else pop[i][j] for j in range(self.dim)]) + + # Evaluate and select + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Decay CR to maintain exploitation ability + CR = max(self.CR_min, CR - (self.CR_base - self.CR_min) / n_iterations) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedPrecisionConvergenceOptimizer.py b/nevergrad/optimization/lama/EnhancedPrecisionConvergenceOptimizer.py new file mode 100644 index 000000000..4f4f87623 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionConvergenceOptimizer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedPrecisionConvergenceOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=80, + elite_fraction=0.1, + mutation_intensity=0.5, + crossover_probability=0.8, + elite_boost_factor=1.2, + mutation_decay_factor=0.95, + improvement_threshold=20, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.elite_boost_factor = elite_boost_factor + self.mutation_decay_factor = mutation_decay_factor + self.improvement_threshold = improvement_threshold + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def elite_boosting(self, elite_individual): + perturbation = np.random.normal(0, self.mutation_intensity * self.elite_boost_factor, self.dimension) + return np.clip(elite_individual + perturbation, self.lower_bound, self.upper_bound) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + non_improvement_streak = 0 + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = np.array([self.elite_boosting(elite) for elite in elites]) + + while len(new_population) < self.population_size: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population = np.append(new_population, [child], axis=0) + + population = new_population + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + non_improvement_streak = 0 + else: + non_improvement_streak += 1 + + evaluations += self.population_size + + if non_improvement_streak >= self.improvement_threshold: + self.mutation_intensity *= self.mutation_decay_factor + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV38.py b/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV38.py new file mode 100644 index 000000000..cab64eb59 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV38.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedPrecisionEvolutionaryOptimizerV38: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.525, + F_range=0.35, + CR=0.92, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Fine-tuned mutation factor for more stability + self.F_range = F_range # Narrowed range for more precise mutations + self.CR = CR # Fine-tuned crossover probability to enhance quality of offspring + self.elite_fraction = elite_fraction # Increased elite fraction to emphasize better solutions + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Enhanced adaptive strategy + if ( + np.random.rand() < 0.8 + ): # Increased probability for selecting the best solution for mutation + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjusted F within a narrower range for precision + F = self.F_base + (np.random.rand() - 0.5) * 2 * self.F_range + + # DE/rand/1 mutation strategy + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with adjusted CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Early stopping if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV39.py b/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV39.py new file mode 100644 index 000000000..31eaa6af2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionEvolutionaryOptimizerV39.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedPrecisionEvolutionaryOptimizerV39: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.58, + F_range=0.4, + CR=0.93, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Adjusted base mutation factor for better exploration + self.F_range = F_range # Reduced range for mutation factor to prioritize precision + self.CR = CR # Fine-tuned crossover probability to optimize offspring quality + self.elite_fraction = elite_fraction # Slightly increased elite fraction for better elitism + self.mutation_strategy = mutation_strategy # Enhanced adaptive mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Enhanced adaptive strategy with rebalanced probability for selecting the best solution + if np.random.rand() < 0.85: # Increased use of the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + F = ( + self.F_base + (np.random.rand() - 0.5) * 2 * self.F_range + ) # Dynamically adjusted F for precision + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with maintained crossover probability + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedPrecisionGuidedQuantumStrategy.py b/nevergrad/optimization/lama/EnhancedPrecisionGuidedQuantumStrategy.py new file mode 100644 index 000000000..32aaab2ab --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionGuidedQuantumStrategy.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedPrecisionGuidedQuantumStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 350 # Further increased population size for extensive exploration + self.elite_size = 70 # Increased elite size to mitigate premature convergence + self.crossover_probability = 0.85 # Adjusted for a more balanced exploration-exploitation + self.mutation_scale = 0.008 # Refined mutation scale for even finer adjustments + self.quantum_mutation_scale = 0.03 # Reduced scale for smaller, precise quantum leaps + self.quantum_probability = 0.25 # Increased probability for quantum mutations + self.precision_boost_factor = 0.03 # Reduced boost factor for a smoother precision increase + self.reactivity_factor = 0.015 # Reduced for more stable evolution + self.recombination_rate = 0.25 # Increased rate for recombining elite solutions for diversity + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + # Implement quantum-inspired recombination + for i in range(num_offspring): + if np.random.rand() < self.crossover_probability: + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + offspring[i] = self.quantum_recombination(elite[p1], elite[p2]) + else: + offspring[i] = elite[np.random.choice(elite.shape[0])] + + # Mutation controlled by remaining budget + scale = self.mutation_scale + self.precision_boost_factor * np.log(remaining_budget + 1) + offspring[i] += np.random.normal(0, scale, self.dim) + + # Quantum mutation with optimization-oriented control + if np.random.rand() < self.quantum_probability: + offspring[i] += np.random.normal(0, self.quantum_mutation_scale, self.dim) + + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + + return np.vstack([elite, offspring]) + + def quantum_recombination(self, parent1, parent2): + # Implement a quantum-inspired recombination mechanism + mask = np.random.rand(self.dim) > 0.5 + child = np.where(mask, parent1, parent2) + return child + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/EnhancedPrecisionHybridSearchV2.py b/nevergrad/optimization/lama/EnhancedPrecisionHybridSearchV2.py new file mode 100644 index 000000000..4587bb80f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionHybridSearchV2.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedPrecisionHybridSearchV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + elite_size = int(0.20 * population_size) + mutation_rate = 0.06 + mutation_scale = lambda t: 0.1 * np.exp(-0.0001 * t) # Gradual reduction + crossover_rate = 0.80 + + local_search_prob = 0.25 # Slightly increased local search probability + local_search_step_scale = lambda t: 0.02 * np.exp(-0.00005 * t) # More gradual step decrease + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedPrecisionTunedCrossoverElitistStrategyV14.py b/nevergrad/optimization/lama/EnhancedPrecisionTunedCrossoverElitistStrategyV14.py new file mode 100644 index 000000000..1b4dfe609 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedPrecisionTunedCrossoverElitistStrategyV14.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedPrecisionTunedCrossoverElitistStrategyV14: + def __init__( + self, + budget, + dimension=5, + population_size=600, + elite_fraction=0.3, + mutation_intensity=0.025, + crossover_rate=0.85, + adaptivity_coefficient=0.9, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptivity_coefficient = adaptivity_coefficient + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.adaptive_crossover(parent1, parent2, evaluations) + else: + # Mutation of an elite + child = self.adaptive_mutate(parent1, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutate(self, individual, evaluations): + # Adaptive mutation intensity based on stage of optimization process + normalized_time = evaluations / self.budget + intensity = self.mutation_intensity * (1 - np.exp(-self.adaptivity_coefficient * normalized_time)) + return individual + np.random.normal(0, intensity, self.dimension) + + def adaptive_crossover(self, parent1, parent2, evaluations): + # Adaptive weighted crossover using a dynamic strategy based on evaluations + normalized_time = evaluations / self.budget + weight = 0.5 + 0.5 * np.sin(np.pi * normalized_time) + return weight * parent1 + (1 - weight) * parent2 diff --git a/nevergrad/optimization/lama/EnhancedProgressiveAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedProgressiveAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..e0f45093e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedProgressiveAdaptiveDifferentialEvolution.py @@ -0,0 +1,56 @@ +import numpy as np + + +class EnhancedProgressiveAdaptiveDifferentialEvolution: + def __init__(self, budget, dim=5, pop_size=150, F_init=0.8, F_end=0.2, CR=0.8): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.F_init = F_init # Initial mutation factor + self.F_end = F_end # Final mutation factor during the later stages of optimization + self.CR = CR # Crossover probability + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, population, idx, n_evals): + indices = [i for i in range(self.pop_size) if i != idx] + a, b, c = np.random.choice(indices, 3, replace=False) + # Linear decrease of mutation factor from F_init to F_end + F = self.F_init - (self.F_init - self.F_end) * (n_evals / self.budget) + mutant = np.clip(population[a] + F * (population[b] - population[c]), self.bounds[0], self.bounds[1]) + return mutant + + def crossover(self, target, mutant): + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, f_values, trial, trial_f, trial_idx): + if trial_f < f_values[trial_idx]: + population[trial_idx] = trial + f_values[trial_idx] = trial_f + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + + while n_evals < self.budget: + for idx in range(self.pop_size): + mutant = self.mutate(population, idx, n_evals) + trial = self.crossover(population[idx], mutant) + trial_f = func(trial) + n_evals += 1 + self.select(population, f_values, trial, trial_f, idx) + if n_evals >= self.budget: + break + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHR.py b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHR.py new file mode 100644 index 000000000..59d3f9ec3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHR.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedQAPSOAIRVCHR: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + velocity_clamp=0.5, + hybrid_restart_interval=100, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.velocity_clamp = velocity_clamp + self.hybrid_restart_interval = hybrid_restart_interval + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func, radius=0.1, num_samples=10): + best_x = x + best_f = func(x) + + for _ in range(num_samples): + x_new = x + np.random.uniform(-radius, radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -self.velocity_clamp, self.velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold or t % self.hybrid_restart_interval == 0: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLS.py b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLS.py new file mode 100644 index 000000000..c3950166a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLS.py @@ -0,0 +1,110 @@ +import numpy as np + + +class EnhancedQAPSOAIRVCHRLS: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + velocity_clamp=0.5, + hybrid_restart_interval=100, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.velocity_clamp = velocity_clamp + self.hybrid_restart_interval = hybrid_restart_interval + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func, radius=0.1, num_samples=10): + best_x = x + best_f = func(x) + + for _ in range(num_samples): + x_new = x + np.random.uniform(-radius, radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -self.velocity_clamp, self.velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold or t % self.hybrid_restart_interval == 0: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLSDP.py b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLSDP.py new file mode 100644 index 000000000..e442d6c34 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQAPSOAIRVCHRLSDP.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedQAPSOAIRVCHRLSDP: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + initial_velocity_clamp=0.5, + local_search_radius=0.05, + local_search_samples=20, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.initial_velocity_clamp = initial_velocity_clamp + self.local_search_radius = local_search_radius + self.local_search_samples = local_search_samples + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_samples): + x_new = x + np.random.uniform(-self.local_search_radius, self.local_search_radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_velocity_clamp(self, t): + return max(0.1, self.initial_velocity_clamp - 0.3 * t / self.budget) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + velocity_clamp = self.update_velocity_clamp(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -velocity_clamp, velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveCrossover.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveCrossover.py new file mode 100644 index 000000000..af83dd64e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveCrossover.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveCrossover: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 300 # Increased population size for greater diversity + mutation_factor = 0.8 # Initial mutation factor + crossover_prob = 0.7 # Initial crossover probability + adaptivity_rate = 0.1 # Increased rate at which parameters adapt + + # Initialize population and fitness + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + + # Elite-based reproduction with stronger adaptation + elite_size = int(population_size * 0.2) # Increased elite size to 20% + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Select parents from elite + if np.random.rand() < 0.5: + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices] + else: + parent1, parent2 = population[np.random.choice(range(population_size), 2, replace=False)] + + # Crossover with adaptive probability + mask = np.random.rand(self.dim) < crossover_prob + child = np.where(mask, parent1, parent2) + + # Enhanced Quantum-Inspired mutation + quantum_noise = np.random.randn(self.dim) * mutation_factor + child += quantum_noise * np.abs(np.sin(child)) # Sine modulation to introduce non-linearity + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + # Update the best solution if found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive mechanism for mutation and crossover, quick response to landscape + mutation_factor *= 1 - adaptivity_rate + crossover_prob = np.clip(crossover_prob + adaptivity_rate, 0, 1) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDE.py new file mode 100644 index 000000000..abbea5e5c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDE.py @@ -0,0 +1,149 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-enhanced dual strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt = np.random.uniform(-0.1, 0.1, self.dim) + mutant = mutant + jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py new file mode 100644 index 000000000..4d4c06005 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.dynamic_restart_threshold = 0.01 # Added for more adaptive restarts + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.dynamic_restart_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def dynamic_restart(self, population, fitness, func): + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def differential_memory_update(self, population): + if len(self.memory) >= self.elite_size: + for i in range(self.elite_size): + idx = np.random.randint(len(self.memory)) + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, self.memory[idx][0]) + self.memory[idx] = (trial, np.inf) # Reset fitness as it will be recalculated + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + self.differential_memory_update(population) + + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveEliteGuidedSearch.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveEliteGuidedSearch.py new file mode 100644 index 000000000..2b38c72cc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveEliteGuidedSearch.py @@ -0,0 +1,186 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveEliteGuidedSearch: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveFireworksOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveFireworksOptimizer.py new file mode 100644 index 000000000..9821b7f1f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveFireworksOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveFireworksOptimizer: + def __init__( + self, + budget=1000, + num_sparks=10, + num_iterations=100, + learning_rate=0.1, + momentum=0.9, + explosion_factor=2.0, + mutation_rate=0.1, + ): + self.budget = budget + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.learning_rate = learning_rate + self.momentum = momentum + self.explosion_factor = explosion_factor + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_sparks, dimensions)) + best_firework = fireworks[0] + explosion_sizes = np.ones(self.num_sparks) + velocities = np.zeros_like(fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for i, firework in enumerate(fireworks): + gradient = np.zeros(dimensions) + for _ in range(self.num_sparks): + spark = firework + np.random.normal(0, 1, size=dimensions) * explosion_sizes[i] + spark = np.clip(spark, bounds.lb, bounds.ub) + gradient += (func(spark) - func(firework)) * (spark - firework) + + velocities[i] = self.momentum * velocities[i] + self.learning_rate * gradient + fireworks[i] += velocities[i] + + # Introduce random mutation + if np.random.rand() < self.mutation_rate: + fireworks[i] = np.random.uniform(bounds.lb, bounds.ub, dimensions) + + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + explosion_sizes[i] *= self.explosion_factor + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveGradientDiversityExplorer.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveGradientDiversityExplorer.py new file mode 100644 index 000000000..11da00928 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveGradientDiversityExplorer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveGradientDiversityExplorer: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_intensity=1.5, + crossover_rate=0.8, + quantum_prob=0.9, + gradient_prob=0.2, + gamma=0.8, + beta=0.1, + epsilon=0.01, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gradient_prob = gradient_prob + self.gamma = gamma # Quantum state update influence + self.beta = beta # Mutation decay rate + self.epsilon = epsilon # Minimum mutation factor + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = self.mutate(population[parent_idx], evaluations) + + if np.random.random() < self.gradient_prob: + child = self.gradient_step(child, func) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, self.gamma, self.dimension) * (best_individual - individual) + return individual + perturbation + + def gradient_step(self, individual, func, lr=0.01): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = np.array(individual) + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - lr * grad_est diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridDEPSO_V4.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridDEPSO_V4.py new file mode 100644 index 000000000..7e9ea3720 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridDEPSO_V4.py @@ -0,0 +1,160 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveHybridDEPSO_V4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 + w = 0.7 # Inertia weight for PSO + c1 = 1.0 # Cognitive coefficient for PSO + c2 = 1.5 # Social coefficient for PSO + initial_F = 0.7 # Initial differential weight for DE + initial_CR = 0.8 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.1, beta=0.9): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridSearchV2.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridSearchV2.py new file mode 100644 index 000000000..4d18ebb0d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveHybridSearchV2.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveHybridSearchV2: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_frac=0.3, + mutation_intensity=0.8, + crossover_prob=0.7, + quantum_prob=0.8, + gradient_prob=0.6, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_frac) + self.mutation_intensity = mutation_intensity + self.crossover_prob = crossover_prob + self.quantum_prob = quantum_prob + self.gradient_prob = gradient_prob + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + p1, p2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[p1], population[p2]) + else: + offspring = population[np.random.choice(elite_indices)] + + # Apply quantum state update probabilistically + if np.random.random() < self.quantum_prob: + offspring = self.quantum_state_update(offspring, best_individual) + + # Apply gradient boost probabilistically + if np.random.random() < self.gradient_prob: + offspring = self.gradient_boost(offspring, func) + + # Mutate the offspring + mutation_scale = self.adaptive_mutation_scale(evaluations) + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) + + new_population[i] = offspring + + # Evaluate the new population + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = population[current_best_idx] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + child = alpha * parent1 + (1 - alpha) * parent2 + return child + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, 0.1, self.dimension) + return best_individual + perturbation * (best_individual - individual) + + def gradient_boost(self, individual, func): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = individual.copy() + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - 0.01 * grad_est + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_intensity * np.exp(-0.05 * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveLevySwarmOptimization.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveLevySwarmOptimization.py new file mode 100644 index 000000000..00d35a2c1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveLevySwarmOptimization.py @@ -0,0 +1,160 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.4 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiPhaseDE_v3.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiPhaseDE_v3.py new file mode 100644 index 000000000..8bb8d2550 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiPhaseDE_v3.py @@ -0,0 +1,136 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveMultiPhaseDE_v3: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(15): # increased number of local steps + perturbation = np.random.uniform(-0.03, 0.03, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiStrategyEvolution.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiStrategyEvolution.py new file mode 100644 index 000000000..ebc79b06c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveMultiStrategyEvolution.py @@ -0,0 +1,184 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveMultiStrategyEvolution: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveNesterovStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveNesterovStrategy.py new file mode 100644 index 000000000..4bbb9fb2e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveNesterovStrategy.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveNesterovStrategy: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.9, + quantum_influence_rate=0.05, + adaptive_lr_factor=0.95, + elite_fraction=0.1, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.elite_fraction = elite_fraction + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_fraction), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_fraction), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_fraction), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + self.population[i] = np.copy(self.population[i]) + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + # Quantum influence occasionally gives a random kick + if np.random.rand() < self.quantum_influence_rate: + self.population[i] += ( + np.random.normal(0, 1, self.dim) * 0.1 * (self.upper_bound - self.lower_bound) + ) + + # Nesterov accelerated gradient (simulated with random noise as surrogate gradient) + self.velocities[i] = self.momentum * self.velocities[i] - self.learning_rate * np.random.normal( + 0, 1, self.dim + ) + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + # Adaptive decay of learning rate + self.learning_rate *= self.adaptive_lr_factor + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/EnhancedQuantumAdaptiveOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveOptimizer.py new file mode 100644 index 000000000..bbababa7a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAdaptiveOptimizer.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedQuantumAdaptiveOptimizer: + def __init__( + self, + budget=10000, + population_size=60, + inertia_weight=0.8, + cognitive_coef=1.5, + social_coef=1.7, + quantum_probability=0.15, + damping_factor=0.99, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + + self.social_coef * r2 * (global_best - particles[i]) + ) + + # Quantum Leap Mechanism + if np.random.rand() < self.quantum_probability: + quantum_leap = global_best + np.random.normal(0, 1, self.dim) * ( + global_best - personal_bests[i] + ) + particles[i] = quantum_leap + else: + particles[i] += velocities[i] + + # Boundary handling + particles[i] = np.clip(particles[i], self.lb, self.ub) + + # Evaluate + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + self.inertia_weight *= self.damping_factor + self.quantum_probability *= 1.05 + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumAnnealingOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumAnnealingOptimizer.py new file mode 100644 index 000000000..d697cc3d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumAnnealingOptimizer.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EnhancedQuantumAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature + self.cooling_rate = 0.85 # Cooling rate for annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the elite fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumCognitionFocusedOptimizerV18.py b/nevergrad/optimization/lama/EnhancedQuantumCognitionFocusedOptimizerV18.py new file mode 100644 index 000000000..18a90f536 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCognitionFocusedOptimizerV18.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedQuantumCognitionFocusedOptimizerV18: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coeff=2.5, + social_coeff=2.5, + inertia_decay=0.99, + quantum_jump_rate=0.01, + quantum_scale=0.008, + quantum_decay=0.97, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive Quantum Jump Strategy + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal(0, self.quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + # Adjust decay rates and adapt quantum mechanics dynamically + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumCognitionOptimizerV12.py b/nevergrad/optimization/lama/EnhancedQuantumCognitionOptimizerV12.py new file mode 100644 index 000000000..537ecdc54 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCognitionOptimizerV12.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedQuantumCognitionOptimizerV12: + def __init__( + self, + budget=10000, + population_size=25, + inertia_weight=0.75, + cognitive_coefficient=2.1, + social_coefficient=2.1, + inertia_decay=0.98, + quantum_jump_rate=0.03, + min_quantum_scale=0.05, + max_quantum_scale=0.15, + adaptive_scale_factor=0.6, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with controlled adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Decay inertia weight and adapt quantum jump rate + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= 1 - self.adaptive_scale_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumCooperativeStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumCooperativeStrategy.py new file mode 100644 index 000000000..42fad4457 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCooperativeStrategy.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedQuantumCooperativeStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 100 + self.elite_size = 15 # Increased elite size for better exploitation + self.crossover_fraction = 0.85 # Increased crossover fraction + self.mutation_scale = 0.08 # Slightly reduced for more fine-grained search + self.quantum_mutation_scale = 0.25 # Reduced quantum mutation scale + self.quantum_probability = 0.03 # Reduced quantum probability for more stability + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover_and_mutate(self, parents, num_offspring): + offspring = np.empty((num_offspring, self.dim)) + for i in range(num_offspring): + if np.random.rand() < self.crossover_fraction: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = parents[p1][:cross_point] + offspring[i][cross_point:] = parents[p2][cross_point:] + else: + offspring[i] = parents[np.random.randint(len(parents))] + + # Mutation strategy + if np.random.rand() < self.quantum_probability: + mutation_shift = np.random.normal(0, self.quantum_mutation_scale, self.dim) + else: + mutation_shift = np.random.normal(0, self.mutation_scale, self.dim) + offspring[i] += mutation_shift + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + num_offspring = self.population_size - self.elite_size + offspring = self.crossover_and_mutate(elite_population, num_offspring) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..7123d4f5a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,192 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Reduced population size for increased iterations + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.9 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.25 + self.adaptive_learning_rate = 0.02 + self.strategy_switches = [0.2, 0.5, 0.8] + self.local_opt_prob = 0.1 # Probability of local optimization + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus.py new file mode 100644 index 000000000..3086bfd78 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Increased population size for better exploration + self.sigma = 0.05 # Reduced sigma for more precision + self.c1 = 0.05 # Reduced for more stability + self.cmu = 0.03 # Reduced for more stability + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.5 # Adjusted differential weight for balanced exploration/exploitation + self.CR = 0.9 # Increased crossover rate for more diversity + self.elitism_rate = 0.15 # Adjusted elitism rate to retain more diversity + self.eval_count = 0 + self.alpha_levy = 0.005 # Further reduced Levy flight step size for better precision + self.levy_prob = 0.1 # Increased Levy flight probability for better exploration + self.adaptive_learning_rate = 0.05 # Adjusted adaptive learning rate for stability + self.strategy_switches = [0.25, 0.5, 0.75] + self.local_opt_prob = 0.5 # Increased probability of local optimization + self.learning_rate_decay = 0.95 # Adjusted learning rate decay for stability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.05 # Further reduced hybridization probability for stability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py new file mode 100644 index 000000000..36d58c32d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py @@ -0,0 +1,201 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 + self.sigma = 0.1 # Adjusted sigma + self.c1 = 0.01 # Reduced for more stability + self.cmu = 0.02 # Reduced for more stability + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 # Adjusted differential weight + self.CR = 0.8 # Adjusted crossover rate + self.elitism_rate = 0.1 # Reduced elitism rate + self.eval_count = 0 + self.alpha_levy = 0.1 # Adjusted Levy flight step size + self.levy_prob = 0.05 # Adjusted Levy flight probability + self.adaptive_learning_rate = 0.1 # Adjusted adaptive learning rate + self.strategy_switches = [0.2, 0.5, 0.8] # Adjusted strategy switching points + self.local_opt_prob = 0.3 # Adjusted probability of local optimization + self.learning_rate_decay = 0.9 # Adjusted learning rate decay + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.05 # Further reduced hybridization probability for stability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + def self_adaptive_differential_evolution_parameters(): + """Self-adaptive parameter adjustment for F and CR.""" + if np.random.rand() < 0.1: # 10% chance to adjust parameters + self.F = np.random.uniform(0.5, 1) + self.CR = np.random.uniform(0.1, 1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + self_adaptive_differential_evolution_parameters() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts.py new file mode 100644 index 000000000..9f84ad41d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.dynamic_restart_threshold = 0.01 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.dynamic_restart_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def dynamic_restart(self, population, fitness, func): + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def differential_memory_update(self, population): + if len(self.memory) >= self.elite_size: + for i in range(self.elite_size): + idx = np.random.randint(len(self.memory)) + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, self.memory[idx][0]) + self.memory[idx] = (trial, np.inf) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + self.differential_memory_update(population) + + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolution.py new file mode 100644 index 000000000..094f6b2cf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolution.py @@ -0,0 +1,149 @@ +import numpy as np + + +class EnhancedQuantumDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.uniform(-0.1, 0.1, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, bounds): + """Perform differential mutation""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = [-5.0, 5.0] + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, F, bounds) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart.py new file mode 100644 index 000000000..9984bddf8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart.py @@ -0,0 +1,109 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 50 + self.memory_update_interval = 25 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + memory = self.update_memory(memory, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts.py new file mode 100644 index 000000000..3240b6b23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts.py @@ -0,0 +1,138 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, global_best, global_best_fit, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + return population, fitness, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + population, fitness, global_best, global_best_fit = self.adaptive_restart( + population, fitness, global_best, global_best_fit, func + ) + + if evaluations % (self.population_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory.py new file mode 100644 index 000000000..38b8e6078 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.initial_num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % (self.population_size * 10) == 0: + if diversity < self.diversity_threshold: + for j in range(num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + # Memory update + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = self.memory_rate * memory[i] + (1 - self.memory_rate) * population[i] + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism.py new file mode 100644 index 000000000..f9928d02b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Initial Differential weight + self.initial_CR = 0.9 # Initial Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.amplitude = 0.15 # Quantum amplitude + self.eval_count = 0 + + def __call__(self, func): + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-self.amplitude, self.amplitude, position.shape) * ( + best_position - position + ) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with random components + adaptive_F = self.initial_F * np.random.rand() + adaptive_CR = self.initial_CR * np.random.rand() + return adaptive_F, adaptive_CR + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if self.eval_count % 2 == 0: # Apply quantum every second step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py new file mode 100644 index 000000000..9019763be --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py @@ -0,0 +1,154 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def adaptive_restart(self, particles, fitness, personal_bests, personal_best_fits, func): + best_idx = np.argmin(personal_best_fits) + best_particle = personal_bests[best_idx] + best_fit = personal_best_fits[best_idx] + + if np.std(personal_best_fits) < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = best_particle + global_best_fit = best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + # Refinement step for elite particles + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..f90bc353a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 50 # Reduced swarm size for faster convergence + self.init_num_niches = 5 # Reduced initial number of niches for better focus + self.alpha = 0.7 # Increased alpha for better DE influence + self.beta = 0.3 # Reduced beta for controlled quantum update + self.local_search_prob = 0.3 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, p_best, g_best, beta): + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - g_best) * np.log(1 / u) + return x + Q * v + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Quantum inspired update + quantum_trial = self.quantum_update(trial, local_bests[n], global_best, self.beta) + quantum_trial = np.clip(quantum_trial, self.bounds[0], self.bounds[1]) + f_quantum_trial = func(quantum_trial) + evaluations += 1 + + if f_quantum_trial < f_trial: + trial, f_trial = quantum_trial, f_quantum_trial + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDiversityDE.py b/nevergrad/optimization/lama/EnhancedQuantumDiversityDE.py new file mode 100644 index 000000000..b944c864c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDiversityDE.py @@ -0,0 +1,153 @@ +import numpy as np + + +class EnhancedQuantumDiversityDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumDynamicAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/EnhancedQuantumDynamicAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..5a3ce83f3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDynamicAdaptiveHybridDEPSO.py @@ -0,0 +1,160 @@ +import numpy as np + + +class EnhancedQuantumDynamicAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.5 # Inertia weight for PSO + c1 = 0.5 # Cognitive coefficient for PSO + c2 = 1.5 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # More frequent restarts + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.1, beta=0.9): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumDynamicBalanceOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumDynamicBalanceOptimizer.py new file mode 100644 index 000000000..0acc4bce9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDynamicBalanceOptimizer.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedQuantumDynamicBalanceOptimizer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.95, + quantum_prob=0.15, + elite_rate=0.1, + noise_intensity=0.05, + perturbation_scale=0.05, + balance_factor=0.75, + adaptive_lr=True, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + self.balance_factor = balance_factor # Adjusted for better quantum/classical balance + self.adaptive_lr = adaptive_lr # New feature for adaptive learning rates + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob: + # Quantum jump influenced by dynamic balance factor + quantum_jump = np.random.normal( + 0.0, + self.perturbation_scale + * (1 - self.balance_factor) + * np.linalg.norm(global_best - self.population[i]), + self.dim, + ) + self.population[i] += quantum_jump + else: + # Classic momentum and adaptive learning rate + if self.adaptive_lr: + lr = self.learning_rate / ( + 1 + 0.01 * np.sqrt(np.sum((global_best - self.population[i]) ** 2)) + ) + else: + lr = self.learning_rate + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = ( + self.momentum * self.velocities[i] + lr * (global_best - self.population[i]) + noise + ) + future_position = self.population[i] + self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/EnhancedQuantumDynamicOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumDynamicOptimizer.py new file mode 100644 index 000000000..03f5ed6cf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumDynamicOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedQuantumDynamicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # Increased population for better exploration + inertia_weight = 0.9 # Higher inertia for initial global exploration + cognitive_coefficient = 2.1 # Slightly reduced to prevent premature convergence + social_coefficient = 2.1 # Balanced to enhance information sharing + velocity_limit = 0.2 # Increased for faster coverage of the search space + quantum_momentum = 0.02 # Increased momentum for enhanced quantum jumps + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main loop + while current_budget < self.budget: + w = inertia_weight * ( + 0.99 ** (current_budget / self.budget) + ) # Dynamic weight decay for fine-tuned exploration-to-exploitation shift + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump with dynamically decreasing probability + quantum_probability = 0.05 * np.exp(-10 * (current_budget / self.budget)) + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions using PSO rules + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Update personal and global bests + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumEvolutionStrategy.py new file mode 100644 index 000000000..ebac63f61 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumEvolutionStrategy.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedQuantumEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 250 + elite_size = 30 + evaluations = 0 + mutation_scale = 0.3 # Initial mutation scale for better exploration + recombination_prob = 0.8 + quantum_factor = 0.3 # Proportion of population to regenerate quantumly + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Quantum-inspired solution space exploration + num_quantum_individuals = int(population_size * quantum_factor) + quantum_population = np.random.uniform(self.lb, self.ub, (num_quantum_individuals, self.dim)) + quantum_fitness = np.array([func(ind) for ind in quantum_population]) + evaluations += len(quantum_population) + + combined_population = np.vstack((population, quantum_population)) + combined_fitness = np.hstack((fitness, quantum_fitness)) + + # Elite selection using tournament selection + elite_indices = np.argsort(combined_fitness)[:elite_size] + elite_individuals = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + + # Generate new candidates using differential evolution strategy + new_population = [] + for _ in range(population_size - elite_size): + indices = np.random.choice(elite_size, 3, replace=False) + x1, x2, x3 = elite_individuals[indices] + mutant = x1 + mutation_scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + if np.random.rand() < recombination_prob: + cross_points = np.random.rand(self.dim) < 0.5 + child = np.where(cross_points, mutant, x1) + else: + child = mutant + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + + # Update population and fitness + population = np.vstack((elite_individuals, new_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += len(new_population) + + # Adaptive mutation scale update + mutation_scale *= 0.96 # Adapted slower decay to retain exploration longer + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithm.py b/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithm.py new file mode 100644 index 000000000..04dce7587 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithm.py @@ -0,0 +1,53 @@ +import numpy as np + + +class EnhancedQuantumFireworksAlgorithm: + def __init__(self, budget=1000, num_fireworks=10, num_sparks=5, num_iterations=100, mutation_rate=0.1): + self.budget = budget + self.num_fireworks = num_fireworks + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_fireworks, dimensions)) + best_firework = fireworks[0] + explosion_sizes = np.ones(self.num_fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for _ in range(self.num_sparks): + selected_firework = np.random.choice(range(self.num_fireworks)) + spark = ( + fireworks[selected_firework] + + np.random.normal(0, 1, size=dimensions) * explosion_sizes[selected_firework] + ) + spark = np.clip(spark, bounds.lb, bounds.ub) + f_spark = func(spark) + + if f_spark < f: + fireworks[selected_firework] = spark + f = f_spark + if f < func(best_firework): + best_firework = spark + + # Introduce random mutation + for i in range(self.num_fireworks): + if np.random.rand() < self.mutation_rate: + fireworks[i] = np.random.uniform(bounds.lb, bounds.ub, dimensions) + + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + explosion_sizes[i] *= 2.0 + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithmV2.py b/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithmV2.py new file mode 100644 index 000000000..41e85dacd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumFireworksAlgorithmV2.py @@ -0,0 +1,64 @@ +import numpy as np + + +class EnhancedQuantumFireworksAlgorithmV2: + def __init__( + self, + budget=1000, + num_fireworks=10, + num_sparks=5, + num_iterations=100, + mutation_rate=0.1, + explosion_rate=0.1, + ): + self.budget = budget + self.num_fireworks = num_fireworks + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + self.explosion_rate = explosion_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_fireworks, dimensions)) + best_firework = fireworks[0] + explosion_sizes = np.ones(self.num_fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for _ in range(self.num_sparks): + selected_firework = np.random.choice(range(self.num_fireworks)) + spark = ( + fireworks[selected_firework] + + np.random.normal(0, 1, size=dimensions) * explosion_sizes[selected_firework] + ) + spark = np.clip(spark, bounds.lb, bounds.ub) + f_spark = func(spark) + + if f_spark < f: + fireworks[selected_firework] = spark + f = f_spark + if f < func(best_firework): + best_firework = spark + + # Introduce random mutation with adaptive explosion sizes + for i in range(self.num_fireworks): + if np.random.rand() < self.mutation_rate: + fireworks[i] = np.random.uniform(bounds.lb, bounds.ub, dimensions) + + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + explosion_sizes[i] = np.clip( + explosion_sizes[i] * (1 + self.explosion_rate * np.random.normal()), 1, None + ) + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..a1c74be96 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimization.py @@ -0,0 +1,225 @@ +import numpy as np + + +class EnhancedQuantumGradientAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1_init = 2.0 # Initial cognitive constant + c2_init = 2.0 # Initial social constant + w_init = 0.5 # Initial inertia weight + w_min = 0.1 # Minimum inertia weight + w_max = 0.9 # Maximum inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + + # Differential Evolution parameters + F_init = 0.5 # Initial Differential weight + F = F_init # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1_init * r1 * (personal_bests[idx] - x) + social_component = c2_init * r2 * (global_best_position - x) + velocities[idx] = w_init * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Dynamic adjustment of PSO parameters + w = w_min + (w_max - w_min) * (1 - i / self.budget) + c1 = c1_init * (1 - i / self.budget) + c2 = c2_init * (i / self.budget) + + # Adjust Differential Evolution parameter F dynamically + if i % 50 == 0 and i > 0: + if self.f_opt < global_best_score: + F *= 1.05 # Increase F if the global best is improving + else: + F = F_init # Reset F if no improvement + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedQuantumGradientAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimizationV5.py b/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimizationV5.py new file mode 100644 index 000000000..f436b5f7d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientAdaptiveExplorationOptimizationV5.py @@ -0,0 +1,212 @@ +import numpy as np + + +class EnhancedQuantumGradientAdaptiveExplorationOptimizationV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Further increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (slightly reduced for balance) + c2 = 2.5 # Social constant (slightly increased for better global search) + w = 0.3 # Further reduced inertia weight for better convergence control + + # Learning rate adaptation parameters + alpha = 0.15 # Slightly increased learning rate + beta = 0.85 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.7 # Differential weight (slightly increased for stronger mutation) + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.05 # Reduced threshold for stricter diversity enforcement + stagnation_counter = 0 + max_stagnation = 15 # Reduced max stagnation to trigger diversity enforcement sooner + + # Exploration improvement parameters + exploration_factor = 0.3 # Increased exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.4 # Increased mutation factor + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + new_position = np.concatenate((new_position, positions[idx][2:]), axis=0) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedQuantumGradientAdaptiveExplorationOptimizationV5(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimization.py new file mode 100644 index 000000000..9318b92ee --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimization.py @@ -0,0 +1,216 @@ +import numpy as np + + +class EnhancedQuantumGradientExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 50 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (decreased for more balanced search) + c2 = 1.5 # Social constant (decreased for more balanced search) + w = 0.7 # Inertia weight (balanced for exploration and exploitation) + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.7 # Differential weight (increased for stronger mutations) + CR = 0.8 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.2 + stagnation_counter = 0 + max_stagnation = 15 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 6 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.3 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Apply gradient clipping + grad = np.clip(grad, -1.0, 1.0) + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.2 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = new_position + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Dynamic mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation_factor_adaptive = mutation_factor * (1 - i / self.budget) + mutation = mutation_factor_adaptive * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedQuantumGradientExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimizationV2.py b/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimizationV2.py new file mode 100644 index 000000000..79a4a5476 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientExplorationOptimizationV2.py @@ -0,0 +1,194 @@ +import numpy as np + + +class EnhancedQuantumGradientExplorationOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + global_best_position = None + global_best_score = np.inf + + c1, c2 = 2.0, 2.0 + w_max, w_min = 0.9, 0.4 + + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + F_min, F_max = 0.4, 0.9 + CR = 0.9 + + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 + + theta = np.pi / 4 + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + mutation_factor = 0.2 + + improvement_threshold = 0.005 + + historical_bests = [] + + prev_f = np.inf + + for i in range(self.budget): + w = w_max - (w_max - w_min) * (i / self.budget) + T = 1 - (i / self.budget) + + for idx in range(swarm_size): + x, v = positions[idx], velocities[idx] + + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cog_comp = c1 * r1 * (personal_bests[idx] - x) + soc_comp = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cog_comp + soc_comp + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.9 + + prev_f = f + + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + if len(historical_bests) > 0 and i % 50 == 0: + for idx in range(swarm_size): + new_position = historical_bests[ + np.random.randint(len(historical_bests)) + ] + mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + historical_bests.append(global_best_position) + if len(historical_bests) > 10: + historical_bests.pop(0) + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedQuantumGradientExplorationOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumGradientMemeticOptimizer.py new file mode 100644 index 000000000..5ce96bbbc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientMemeticOptimizer.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumGradientMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.6 + self.cognitive_weight = 1.5 + self.social_weight = 1.5 + self.quantum_weight = 0.3 + self.elite_fraction = 0.2 + self.memory_size = 30 + self.local_search_probability = 0.95 + self.stagnation_threshold = 1 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.9 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedQuantumGradientMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumGradientOptimizerV5.py b/nevergrad/optimization/lama/EnhancedQuantumGradientOptimizerV5.py new file mode 100644 index 000000000..5fd9d5c5a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumGradientOptimizerV5.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedQuantumGradientOptimizerV5: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.8, + cognitive_coef=2.2, + social_coef=2.2, + quantum_probability=0.2, + damping_factor=0.95, + adaptive_quantum_shift=0.005, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.adaptive_quantum_shift = adaptive_quantum_shift + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + # Gradual inertia weight reduction for precision + inertia = self.inertia_weight * (self.damping_factor ** (evaluations / self.budget)) + + velocities[i] = ( + inertia * velocities[i] + + self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + + self.social_coef * r2 * (global_best - particles[i]) + ) + + if np.random.rand() < self.quantum_probability: + # Quantum leap using a Gaussian distribution centered at global best + quantum_leap = global_best + np.random.normal(0, 1.0 / evaluations**0.5, self.dim) + particles[i] = np.clip(quantum_leap, self.lb, self.ub) + else: + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Dynamic adjustment for exploration and exploitation balance + self.quantum_probability += self.adaptive_quantum_shift + self.inertia_weight *= self.damping_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonicAdaptationStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonicAdaptationStrategy.py new file mode 100644 index 000000000..933a5f383 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonicAdaptationStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedQuantumHarmonicAdaptationStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 180 # Slightly larger population size for broader search + self.sigma_initial = 1.0 # Increased initial standard deviation for global exploration + self.learning_rate = 0.07 # Slightly reduced learning rate for more stable adaptation + self.CR_base = 0.6 # Increased base crossover probability for higher diversity in offspring + self.q_impact_initial = 0.4 # Increased initial quantum impact to boost early exploration + self.q_impact_decay = 0.98 # More aggressive decay rate for the quantum impact + self.sigma_decay = 0.98 # More aggressive decay for sigma to focus on local areas faster + self.elitism_factor = 5 # Increased elitism factor to ensure survival of top individuals + self.CR_adaptive_increment = 0.004 # More fine-tuned control over adaptive crossover increment + + def __call__(self, func): + # Initialize population within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Setup for elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + current_CR = self.CR_base + self.CR_adaptive_increment * iteration + + for i in range(self.pop_size): + if i in elites: # Avoid disturbing elite members + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c + quantum_term) + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = current_CR + self.learning_rate * (np.random.rand() - 0.5) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites regularly + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonyMemeticAlgorithm.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonyMemeticAlgorithm.py new file mode 100644 index 000000000..833e49446 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonyMemeticAlgorithm.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedQuantumHarmonyMemeticAlgorithm: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=20, memetic_prob=0.4): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonySearch.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearch.py new file mode 100644 index 000000000..337984571 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearch.py @@ -0,0 +1,55 @@ +import numpy as np + + +class EnhancedQuantumHarmonySearch: + def __init__( + self, budget, harmony_memory_size=10, pitch_adjustment_rate=0.1, bandwidth=0.01, mutation_rate=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + return self.f_opt, self.x_opt + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < self.pitch_adjustment_rate: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + return new_harmony diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchAB.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchAB.py new file mode 100644 index 000000000..81f83eb40 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchAB.py @@ -0,0 +1,58 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedQuantumHarmonySearchAB: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.9): + self.budget = budget + self.hmcr = hmcr # Harmony Memory Considering Rate + self.par = par # Pitch Adjustment Rate + self.init_bw = init_bw # Initial Bandwidth + self.bw_range = bw_range # Range for Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.1: # Introduce occasional random jumps for exploration + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + bandwidth = self.adaptive_bandwidth(i) # Update bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGB.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGB.py new file mode 100644 index 000000000..176fe6c43 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGB.py @@ -0,0 +1,62 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedQuantumHarmonySearchABGB: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.9): + self.budget = budget + self.hmcr = hmcr # Harmony Memory Considering Rate + self.par = par # Pitch Adjustment Rate + self.init_bw = init_bw # Initial Bandwidth + self.bw_range = bw_range # Range for Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = harmony_memory[np.argmin([func(h) for h in harmony_memory])] + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if f < func(global_best): + global_best = new_harmony # Update global best + + if np.random.rand() < 0.1: # Introduce occasional random jumps for exploration + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + bandwidth = self.adaptive_bandwidth(i) # Update bandwidth + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGBRefined.py b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGBRefined.py new file mode 100644 index 000000000..4d7f835e6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHarmonySearchABGBRefined.py @@ -0,0 +1,62 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedQuantumHarmonySearchABGBRefined: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + global_best = harmony_memory[np.argmin([func(h) for h in harmony_memory])] + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=bandwidth + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if f < func(global_best): + global_best = new_harmony + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE.py new file mode 100644 index 000000000..c706eb1ed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE.py @@ -0,0 +1,184 @@ +import numpy as np + + +class EnhancedQuantumHybridAdaptiveDE: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE_v2.py b/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE_v2.py new file mode 100644 index 000000000..065ecbb8a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumHybridAdaptiveDE_v2.py @@ -0,0 +1,184 @@ +import numpy as np + + +class EnhancedQuantumHybridAdaptiveDE_v2: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumInformedGradientOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumInformedGradientOptimizer.py new file mode 100644 index 000000000..226e041f6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumInformedGradientOptimizer.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedQuantumInformedGradientOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimension as per the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 100 # A larger population for better exploration + inertia_weight = 0.7 # More moderate starting inertia + cognitive_coefficient = 1.8 # Personal learning factor + social_coefficient = 1.8 # Social learning factor + quantum_probability = 0.1 # Probability for quantum-inspired jumps + max_velocity = 0.5 # Reduced max velocity for finer control + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + w = inertia_weight * (1 - np.sqrt(current_budget / self.budget)) # More gradual decay + + for i in range(population_size): + if current_budget >= self.budget: + break + + if np.random.rand() < quantum_probability: + # Quantum jump to potentially escape local minima + population[i] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + else: + # Standard PSO update formula with velocity clamping + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia + cognitive_component + social_component, -max_velocity, max_velocity + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumInfusedAdaptiveStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumInfusedAdaptiveStrategy.py new file mode 100644 index 000000000..dd1b09234 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumInfusedAdaptiveStrategy.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedQuantumInfusedAdaptiveStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 200 + elite_size = 20 + evaluations = 0 + mutation_factor = 0.7 + crossover_probability = 0.9 + quantum_probability = 0.15 + adaptive_rate = 0.1 + learning_period = 100 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Adaptive mutation and crossover strategy, enhanced by periodic learning + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Quantum mutation step with dynamic adaptation + if np.random.rand() < quantum_probability: + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Adapt strategy parameters based on recent performance improvements + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-5: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.1) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumInspiredHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumInspiredHybridOptimizer.py new file mode 100644 index 000000000..aa643c0ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumInspiredHybridOptimizer.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedQuantumInspiredHybridOptimizer: + def __init__(self, budget, dim=5, population_size=50, elite_size=10): + self.budget = budget + self.dim = dim + self.population_size = population_size + self.elite_size = elite_size + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.tournament_size = 5 + self.mutation_prob = 0.2 + self.learning_rate = 0.1 + self.alpha = 0.75 # Factor to adjust mutation probability dynamically + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(x) for x in population]) + + def tournament_selection(self, population, fitnesses): + selected_indices = np.random.randint( + 0, self.population_size, (self.population_size, self.tournament_size) + ) + selected_fitnesses = fitnesses[selected_indices] + winners_indices = selected_indices[ + np.arange(self.population_size), np.argmin(selected_fitnesses, axis=1) + ] + return population[winners_indices] + + def mutate(self, population): + mutation_mask = np.random.rand(self.population_size, self.dim) < self.mutation_prob * self.alpha + gaussian_perturbations = np.random.normal(0, self.learning_rate, (self.population_size, self.dim)) + mutated_population = population + mutation_mask * gaussian_perturbations + return np.clip(mutated_population, self.lower_bound, self.upper_bound) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + iterations = self.population_size + while iterations < self.budget: + selected = self.tournament_selection(population, fitness) + mutated = self.mutate(selected) + mutated_fitness = self.evaluate_population(func, mutated) + + combined_population = np.vstack((population, mutated)) + combined_fitness = np.concatenate((fitness, mutated_fitness)) + + top_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[top_indices] + fitness = combined_fitness[top_indices] + + if np.min(fitness) < best_fitness: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + iterations += self.population_size + # Update mutation probability dynamically to balance exploration and exploitation + self.mutation_prob *= 1 - self.alpha + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedQuantumIterativeRefinement.py b/nevergrad/optimization/lama/EnhancedQuantumIterativeRefinement.py new file mode 100644 index 000000000..9c6d6c351 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumIterativeRefinement.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedQuantumIterativeRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 500 # Increased population size for wider exploration + self.sigma_initial = 0.9 # Narrower initial spread to focus on promising regions quickly + self.learning_rate = 0.01 # Reduced learning rate for more gradual adjustments + self.CR_base = 0.9 # Higher initial crossover probability for initial diversity + self.q_impact_initial = 1.0 # Strong initial quantum impact for broad exploration + self.q_impact_decay = 0.95 # Decays slower to maintain quantum effects longer + self.sigma_decay = 0.99 # Maintain sigma value longer for sustained exploration + self.elitism_factor = 20 # Increased percentage of elite individuals + self.convergence_threshold = 1e-8 # Threshold for early stopping if solution converges + + def __call__(self, func): + # Initialize population within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Setup for elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + last_best_fitness = best_fitness + + # Evolutionary loop + for iteration in range(int(self.budget / self.pop_size)): + if abs(last_best_fitness - best_fitness) < self.convergence_threshold: + break # Early stopping if convergence is achieved + last_best_fitness = best_fitness + + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + current_CR = ( + self.CR_base - (iteration / (self.budget / self.pop_size)) * 0.3 + ) # Faster decrease in CR + + for i in range(self.pop_size): + if i in elites: # Keep elite members + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c + quantum_term) + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = np.clip(current_CR + self.learning_rate * np.random.randn(), 0, 1) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites periodically + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedQuantumLeapGradientBoostPSO.py b/nevergrad/optimization/lama/EnhancedQuantumLeapGradientBoostPSO.py new file mode 100644 index 000000000..1c6adbd67 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLeapGradientBoostPSO.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedQuantumLeapGradientBoostPSO: + def __init__( + self, + budget=10000, + population_size=100, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=1.8, + social_weight=2.2, + quantum_prob=0.2, + gradient_boost=0.25, + adaptative_gradient=0.15, + quantum_radius_factor=1.0, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_prob = quantum_prob + self.gradient_boost = gradient_boost + self.adaptative_gradient = adaptative_gradient + self.quantum_radius_factor = quantum_radius_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + inertia_reduction = (self.initial_inertia - self.final_inertia) / self.budget + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.adaptative_gradient + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + + cognitive_component + + social_component + + gradient_component * self.gradient_boost + ) + + if np.random.rand() < self.quantum_prob: + quantum_radius = self.quantum_radius_factor * np.linalg.norm( + global_best - personal_bests[i] + ) + quantum_jump = np.random.normal(0, quantum_radius, self.dim) + particles[i] = global_best + quantum_jump + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumLeapPSO.py b/nevergrad/optimization/lama/EnhancedQuantumLeapPSO.py new file mode 100644 index 000000000..ac779a6c2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLeapPSO.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedQuantumLeapPSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.9, + final_inertia=0.3, + cognitive_weight=1.8, + social_weight=2.1, + quantum_prob=0.3, + quantum_radius=0.25, + adaptative_gradient=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_prob = quantum_prob + self.quantum_radius = quantum_radius + self.adaptative_gradient = adaptative_gradient + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + inertia_reduction = (self.initial_inertia - self.final_inertia) / self.budget + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.adaptative_gradient + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component - gradient_component + ) + + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal(0, self.quantum_radius, self.dim) + particles[i] = global_best + quantum_jump + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialDynamicOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialDynamicOptimizer.py new file mode 100644 index 000000000..0132d8900 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialDynamicOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedQuantumLevyDifferentialDynamicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.5 * progress + social_coefficient = 1.7 + 0.5 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.6 - 0.5 * progress + levy_factor = 0.8 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 200 # Further increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.9: # Further increased probability for Levy flight + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialOptimizer.py new file mode 100644 index 000000000..43bdf0bd9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedQuantumLevyDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.4 - 1.0 * progress + social_coefficient = 1.4 + 0.6 * progress + differential_weight = 0.9 + 0.2 * progress + crossover_rate = 0.8 - 0.5 * progress + quantum_factor = 0.4 - 0.2 * progress + levy_factor = 0.3 + 0.4 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.3, 0.3, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.25: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialSearch.py b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialSearch.py new file mode 100644 index 000000000..816f18223 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLevyDifferentialSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class EnhancedQuantumLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 60 + inertia_weight_max = 0.9 + inertia_weight_min = 0.3 + cognitive_coefficient = 1.5 + social_coefficient = 2.0 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 100 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLevyMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumLevyMemeticOptimizer.py new file mode 100644 index 000000000..ee25826b1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLevyMemeticOptimizer.py @@ -0,0 +1,139 @@ +import numpy as np + + +class EnhancedQuantumLevyMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 10 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.3 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLevyParticleOptimization.py b/nevergrad/optimization/lama/EnhancedQuantumLevyParticleOptimization.py new file mode 100644 index 000000000..1d7b2bd25 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLevyParticleOptimization.py @@ -0,0 +1,160 @@ +import numpy as np + + +class EnhancedQuantumLevyParticleOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.8 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + 0.3 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.05 + 0.35 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.4: + local_search_iters = 7 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLocalSearch.py b/nevergrad/optimization/lama/EnhancedQuantumLocalSearch.py new file mode 100644 index 000000000..f4b95d930 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLocalSearch.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedQuantumLocalSearch: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func, search_range=0.1): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = search_range * np.exp(-_ / self.local_search_iters) # Adaptive perturbation range + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step( + candidate_x, func, search_range=self.perturb_range + ) # Use perturb_range directly + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumLocalSearchImproved.py b/nevergrad/optimization/lama/EnhancedQuantumLocalSearchImproved.py new file mode 100644 index 000000000..7fe153a61 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumLocalSearchImproved.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedQuantumLocalSearchImproved: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + adaptive_local_search=True, + local_search_range=0.1, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + self.adaptive_local_search = adaptive_local_search + self.local_search_range = local_search_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = ( + self.local_search_range * np.exp(-_ / self.local_search_iters) + if self.adaptive_local_search + else self.local_search_range + ) + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizer.py new file mode 100644 index 000000000..d08bb82c8 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizer.py @@ -0,0 +1,128 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumMemeticOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 + self.local_search_probability = 0.3 + self.stagnation_threshold = 10 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.99 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedQuantumMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizerV5.py b/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizerV5.py new file mode 100644 index 000000000..29f27182a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumMemeticOptimizerV5.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumMemeticOptimizerV5: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.5 + self.social_weight = 1.5 + self.quantum_weight = 0.2 + self.elite_fraction = 0.3 + self.memory_size = 20 + self.local_search_probability = 0.85 + self.stagnation_threshold = 3 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = EnhancedQuantumMemeticOptimizerV5(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedQuantumMultiPhaseAdaptiveDE_v10.py b/nevergrad/optimization/lama/EnhancedQuantumMultiPhaseAdaptiveDE_v10.py new file mode 100644 index 000000000..6674d664d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumMultiPhaseAdaptiveDE_v10.py @@ -0,0 +1,175 @@ +import numpy as np + + +class EnhancedQuantumMultiPhaseAdaptiveDE_v10: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Dimension-wise adaptive mutation + mutant = np.zeros(self.dim) + for d in range(self.dim): + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c], d]) + mutant[d] = ( + centroid + + F * (population[a, d] - population[b, d]) + + 0.1 * (elite_ind[d] - population[i, d]) + ) + else: + mutant[d] = population[a, d] + F * (population[b, d] - population[c, d]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + jolt = np.random.uniform(-jolt_intensity, jolt_intensity, self.dim) + mutant += jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Perform local search on elite individuals + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumMultiStrategyOptimization_v2.py b/nevergrad/optimization/lama/EnhancedQuantumMultiStrategyOptimization_v2.py new file mode 100644 index 000000000..88c8936c0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumMultiStrategyOptimization_v2.py @@ -0,0 +1,186 @@ +import numpy as np + + +class EnhancedQuantumMultiStrategyOptimization_v2: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnhancedQuantumPSO.py b/nevergrad/optimization/lama/EnhancedQuantumPSO.py new file mode 100644 index 000000000..2ef0f202d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumPSO.py @@ -0,0 +1,92 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedQuantumPSO: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.3 # parameter for quantum behavior + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + population[i] = best_individual + 0.5 * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: self.population_size // 4] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/EnhancedQuantumReactiveCooperativeStrategy.py b/nevergrad/optimization/lama/EnhancedQuantumReactiveCooperativeStrategy.py new file mode 100644 index 000000000..1cdfa759e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumReactiveCooperativeStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class EnhancedQuantumReactiveCooperativeStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 200 # Further increased population size for deeper exploration + self.elite_size = 30 # Increased elite size for better retention of good solutions + self.crossover_fraction = 0.8 # Slightly decreased to tune genetic diversity + self.mutation_scale = 0.03 # Reduced mutation scale for finer-grained search + self.quantum_mutation_scale = 0.15 # Reduced quantum mutation for controlled exploration + self.quantum_probability = 0.1 # Increased quantum probability for enhanced quantum effects + self.reactivity_factor = 0.05 # Reduced reactivity factor for slower adaptation of mutation scale + self.adaptive_quantum_boost = 0.02 # New: Adaptive boost to quantum mutation scale over time + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover_and_mutate(self, parents, num_offspring, iteration): + offspring = np.empty((num_offspring, self.dim)) + for i in range(num_offspring): + if np.random.rand() < self.crossover_fraction: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = parents[p1][:cross_point] + offspring[i][cross_point:] = parents[p2][cross_point:] + else: + offspring[i] = parents[np.random.randint(len(parents))] + + dynamic_scale = self.mutation_scale / (1 + iteration * self.reactivity_factor) + dynamic_quantum_scale = ( + self.quantum_mutation_scale + iteration * self.adaptive_quantum_boost + ) / (1 + iteration * self.reactivity_factor) + + if np.random.rand() < self.quantum_probability: + mutation_shift = np.random.normal(0, dynamic_quantum_scale, self.dim) + else: + mutation_shift = np.random.normal(0, dynamic_scale, self.dim) + offspring[i] += mutation_shift + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + iteration = 0 + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + num_offspring = self.population_size - self.elite_size + offspring = self.crossover_and_mutate(elite_population, num_offspring, iteration) + + population = np.vstack((elite_population, offspring)) + iteration += 1 + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/EnhancedQuantumReinforcedNesterovAcceleratorV2.py b/nevergrad/optimization/lama/EnhancedQuantumReinforcedNesterovAcceleratorV2.py new file mode 100644 index 000000000..607dfddf0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumReinforcedNesterovAcceleratorV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedQuantumReinforcedNesterovAcceleratorV2: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.95, + quantum_prob=0.25, + adaptive_lr_decay=0.95, + elite_rate=0.5, + noise_intensity=0.05, + perturbation_scale=0.25, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.adaptive_lr_decay = adaptive_lr_decay + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob: + # Enhanced quantum jump dynamics + quantum_jump = np.random.normal( + 0, self.perturbation_scale * np.linalg.norm(global_best - self.population[i]), self.dim + ) + self.population[i] += quantum_jump + else: + # Regular Nesterov momentum update + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = ( + self.momentum * self.velocities[i] + + self.learning_rate * (global_best - self.population[i]) + + noise + ) + future_position = self.population[i] + self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + self.learning_rate *= self.adaptive_lr_decay + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/EnhancedQuantumResilientCrossoverStrategyV2.py b/nevergrad/optimization/lama/EnhancedQuantumResilientCrossoverStrategyV2.py new file mode 100644 index 000000000..867469a4b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumResilientCrossoverStrategyV2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedQuantumResilientCrossoverStrategyV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 100 + elite_size = 20 + evaluations = 0 + mutation_factor = 0.75 + crossover_probability = 0.85 + quantum_probability = 0.05 + adaptive_scaling_factor = 0.1 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + quantum_mutant = ( + population[i] + np.random.normal(0, 1.0, self.dim) * adaptive_scaling_factor + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution operators + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3 + x4 - x1) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..d1c6a8f28 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealing.py @@ -0,0 +1,60 @@ +import numpy as np + + +class EnhancedQuantumSimulatedAnnealing: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + damp_ratio=0.9, + perturb_factor=0.01, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.damp_ratio = damp_ratio + self.perturb_factor = perturb_factor + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def _perturb_solution(self, x): + return x + np.random.normal(0, self.perturb_factor, size=self.dim) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + current_x = self._perturb_solution(current_x) + current_x = np.clip(current_x, -5.0, 5.0) + current_f = func(current_x) + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + self.explore_ratio *= self.damp_ratio + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingImproved.py b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingImproved.py new file mode 100644 index 000000000..f521c2bb0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingImproved.py @@ -0,0 +1,49 @@ +import numpy as np + + +class EnhancedQuantumSimulatedAnnealingImproved: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_range=0.1 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _perturb_step(self, x): + return x + np.random.uniform(-self.perturb_range, self.perturb_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x = self._perturb_step(candidate_x) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingOptimized.py b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingOptimized.py new file mode 100644 index 000000000..c1bbf6643 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingOptimized.py @@ -0,0 +1,42 @@ +import numpy as np + + +class EnhancedQuantumSimulatedAnnealingOptimized: + def __init__(self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingV2.py b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingV2.py new file mode 100644 index 000000000..6bca78b8f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSimulatedAnnealingV2.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedQuantumSimulatedAnnealingV2: + def __init__( + self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, perturb_range=0.1 + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _perturb_step(self, x): + return x + np.random.uniform(-self.perturb_range, self.perturb_range, size=self.dim) + + def _local_search_step(self, x, func): + candidate_x = x + candidate_f = func(candidate_x) + for _ in range(10): # Perform local search for better candidate + new_candidate_x = self._perturb_step(candidate_x) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumStateConvergenceOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumStateConvergenceOptimizer.py new file mode 100644 index 000000000..1a95b63ef --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumStateConvergenceOptimizer.py @@ -0,0 +1,50 @@ +import numpy as np + + +class EnhancedQuantumStateConvergenceOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 50 # Reduced population size to increase refinement per individual + self.F = 0.7 # Slightly reduced differential weight to stabilize convergence + self.CR = 0.85 # Slightly reduced crossover probability to maintain good traits + self.q_influence = 0.15 # Higher quantum influence to enhance exploration + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main optimization loop + for _ in range(int(self.budget / self.pop_size)): + for i in range(self.pop_size): + # Apply quantum mutation with higher frequency and influence + if np.random.rand() < self.q_influence: + mutation = best_ind + np.random.normal(0, 1, self.dim) * 0.15 + else: + # DE/rand/1 mutation strategy with modified random selection logic + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutation = a + self.F * (b - c) # Focused on the difference for mutation + + mutation = np.clip(mutation, -5.0, 5.0) + + # Binomial crossover + trial = np.where(np.random.rand(self.dim) < self.CR, mutation, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + + # Update best if necessary + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimization.py new file mode 100644 index 000000000..a695ba149 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimization.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically with enhancements + self.max_inertia_weight = 0.9 + self.min_inertia_weight = 0.4 + self.max_cognitive_weight = 2.5 + self.min_cognitive_weight = 1.5 + self.max_social_weight = 1.8 + self.min_social_weight = 1.2 + + delta = 0.9 / self.budget + self.inertia_weight = self.max_inertia_weight - delta * iteration + self.cognitive_weight = self.min_cognitive_weight + delta * iteration + self.social_weight = self.max_social_weight - delta * iteration + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + cognitive_component = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + social_component = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + cognitive_component + social_component + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationRefined.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationRefined.py new file mode 100644 index 000000000..aabcfe444 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationRefined.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationRefined: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically with enhancements + self.max_inertia_weight = 0.9 + self.min_inertia_weight = 0.4 + self.max_cognitive_weight = 2.5 + self.min_cognitive_weight = 1.5 + self.max_social_weight = 1.8 + self.min_social_weight = 1.2 + + delta = 0.9 / self.budget + self.inertia_weight = self.max_inertia_weight - delta * iteration + self.cognitive_weight = self.min_cognitive_weight + delta * iteration + self.social_weight = self.max_social_weight - delta * iteration + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + cognitive_component = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + social_component = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + cognitive_component + social_component + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV10.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV10.py new file mode 100644 index 000000000..e1b026bdd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV10.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV10: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + step_size_reduction=0.995, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.step_size_reduction = step_size_reduction + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= self.step_size_reduction + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV11.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV11.py new file mode 100644 index 000000000..be307c93d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV11.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV11: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + step_size_reduction=0.995, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.step_size_reduction = step_size_reduction + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= self.step_size_reduction + + def adapt_weights(self): + self.inertia_weight *= 0.99 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + self.adapt_weights() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV12.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV12.py new file mode 100644 index 000000000..ee1037422 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV12.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV12: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + step_size_reduction=0.995, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.step_size_reduction = step_size_reduction + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= self.step_size_reduction + + def adapt_weights(self): + self.inertia_weight *= 0.99 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + self.adapt_weights() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV13.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV13.py new file mode 100644 index 000000000..47b4508ca --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV13.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV13: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=0.5, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.damping = damping + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.damping * (inertia_term + self.step_size * (cognitive_term + social_term)) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.99 + + def adapt_weights(self): + self.inertia_weight *= 0.99 + self.cognitive_weight += 0.01 + self.social_weight += 0.01 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles() + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + self.adapt_weights() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..1efec8d56 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV2.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.5, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.7, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + cognitive_rand = np.random.rand() + social_rand = np.random.rand() + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * cognitive_rand + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight * social_rand * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.particles_velocity[i] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV3.py new file mode 100644 index 000000000..828d22659 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV3.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV3: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.particles_velocity[i] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV4.py new file mode 100644 index 000000000..79413b449 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV4.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV4: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.step_size = 1.0 + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.step_size * self.particles_velocity[i] + + def adapt_step_size(self): + # Adjust the step size based on the improvement in the global best fitness + self.step_size *= 1.0 / (1.0 + np.exp(-0.1 * (self.global_best_fitness - self.f_opt))) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_step_size() + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV5.py new file mode 100644 index 000000000..d30cf558c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV5.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV5: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.step_size = 1.0 + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.step_size * self.particles_velocity[i] + + def adapt_step_size(self): + self.step_size *= 1.0 / (1.0 + np.exp(-0.1 * (self.global_best_fitness - self.f_opt))) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_step_size() + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV6.py new file mode 100644 index 000000000..b42364cd3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV6.py @@ -0,0 +1,75 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV6: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=1.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.step_size * self.particles_velocity[i] + + def adapt_parameters(self, func): + self.damping *= 0.99 + self.step_size *= 0.99 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_parameters(func) + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV7.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV7.py new file mode 100644 index 000000000..34ba89234 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV7.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV7: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self, func): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.zeros(self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + inertia_term = self.inertia_weight * particle["velocity"] + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] += self.step_size * (inertia_term + cognitive_term + social_term) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= 0.99 + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles(func) + + for _ in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV8.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV8.py new file mode 100644 index 000000000..2de5c9c39 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV8.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV8: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self, func): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.inertia_weight * particle["velocity"] + self.step_size * ( + cognitive_term + social_term + ) + particle["position"] += particle["velocity"] + + def adapt_parameters(self, iteration): + self.step_size *= 0.995**iteration + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles(func) + + for i in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters(i) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV9.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV9.py new file mode 100644 index 000000000..725eadfbe --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizationV9.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizationV9: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + step_size=1.0, + step_size_reduction=0.995, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.step_size = step_size + self.step_size_reduction = step_size_reduction + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + def initialize_particles(self, func): + for _ in range(self.num_particles): + particle = { + "position": np.random.uniform(-5.0, 5.0, self.dim), + "velocity": np.random.uniform(-1.0, 1.0, self.dim), + "best_position": None, + "best_fitness": np.inf, + } + self.particles.append(particle) + + def update_particle(self, particle, func): + fitness = func(particle["position"]) + if fitness < particle["best_fitness"]: + particle["best_fitness"] = fitness + particle["best_position"] = particle["position"].copy() + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = particle["position"].copy() + + cognitive_term = ( + self.cognitive_weight * np.random.rand() * (particle["best_position"] - particle["position"]) + ) + social_term = self.social_weight * np.random.rand() * (self.best_position - particle["position"]) + + particle["velocity"] = self.inertia_weight * particle["velocity"] + self.step_size * ( + cognitive_term + social_term + ) + particle["position"] += particle["velocity"] + + def adapt_parameters(self): + self.step_size *= self.step_size_reduction + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.particles = [] + + self.initialize_particles(func) + + for i in range(self.budget): + for particle in self.particles: + self.update_particle(particle, func) + self.adapt_parameters() + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizerV4.py b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizerV4.py new file mode 100644 index 000000000..5d84858df --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSwarmOptimizerV4.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedQuantumSwarmOptimizerV4: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.8, + cognitive_coefficient=2.5, + social_coefficient=2.5, + inertia_decay=0.99, + quantum_jump_rate=0.25, + quantum_scale=0.25, + adaptive_depth=30, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_depth = ( + adaptive_depth # Depth of historical performance to adapt parameters dynamically + ) + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + performance_history = [] + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhanced Quantum jump with adaptive scaling + q_scale = self.quantum_scale * (1 + np.sin(2 * np.pi * evaluations / self.budget)) + particles[i] = global_best + np.random.normal(0, q_scale, self.dim) * (self.ub - self.lb) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Classical PSO update with dynamic adaptation + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + performance_history.append(global_best_score) + + # Dynamic adaptation of quantum and classical parameters + if len(performance_history) > self.adaptive_depth: + recent_progress = np.mean(np.diff(performance_history[-self.adaptive_depth :])) + if recent_progress > 0: + # Adaptively increase quantum jump rate if improvements are observed + self.quantum_jump_rate = min(self.quantum_jump_rate * 1.05, 1.0) + else: + # Stabilize and focus on exploitation by reducing quantum jump rate + self.quantum_jump_rate = max(self.quantum_jump_rate * 0.95, 0.05) + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/EnhancedQuantumSymbioticStrategyV5.py b/nevergrad/optimization/lama/EnhancedQuantumSymbioticStrategyV5.py new file mode 100644 index 000000000..10ce1dae4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSymbioticStrategyV5.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedQuantumSymbioticStrategyV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given problem dimensionality + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 500 + elite_size = 50 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.7 + quantum_probability = 0.1 + adaptive_scaling_factor = lambda t: 0.2 * np.exp(-0.2 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step with fine-tuned control + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Enhanced symbiotic mutation and crossover with dynamic elite selection + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedQuantumSynergyStrategyV2.py b/nevergrad/optimization/lama/EnhancedQuantumSynergyStrategyV2.py new file mode 100644 index 000000000..c6651f731 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumSynergyStrategyV2.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedQuantumSynergyStrategyV2: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 500 # Increased population size for broader exploration + self.elite_size = 100 # Larger elite pool to maintain more successful individuals + self.crossover_probability = 0.95 # Slightly increased crossover probability + self.mutation_scale = 0.002 # Decreased mutation scale for finer adaptations + self.quantum_mutation_scale = 0.015 # Reduced quantum mutation scale for controlled exploration + self.quantum_probability = 0.35 # Increased probability to enhance quantum mutation effects + self.precision_boost_factor = 0.015 # Reduced boost factor for better precision in later stages + self.reactivity_factor = 0.008 # Further reduced to stabilize the dynamic changes + self.recombination_rate = 0.35 # Enhanced recombination rate among elites + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + for i in range(num_offspring): + if np.random.rand() < self.crossover_probability: + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + offspring[i] = self.optimal_quantum_recombination(elite[p1], elite[p2], remaining_budget) + else: + offspring[i] = elite[np.random.choice(elite.shape[0])] + + scale = self.mutation_scale + self.precision_boost_factor * np.log(remaining_budget + 1) + offspring[i] += np.random.normal(0, scale, self.dim) + + if np.random.rand() < self.quantum_probability: + offspring[i] += np.random.normal(0, self.quantum_mutation_scale, self.dim) + + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + + return np.vstack([elite, offspring]) + + def optimal_quantum_recombination(self, parent1, parent2, remaining_budget): + blend_factor = self.reactivity_factor * np.exp(-remaining_budget / self.budget) + child = blend_factor * parent1 + (1 - blend_factor) * parent2 + return child + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/EnhancedQuantumTunnelingOptimizer.py b/nevergrad/optimization/lama/EnhancedQuantumTunnelingOptimizer.py new file mode 100644 index 000000000..71842516a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedQuantumTunnelingOptimizer.py @@ -0,0 +1,77 @@ +import numpy as np + + +class EnhancedQuantumTunnelingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem statement + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize parameters + population_size = 50 # Increased population size for better coverage + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Quantum parameters + gamma = 0.1 # Initial quantum fluctuation magnitude + gamma_decay = 0.985 # Slightly slower decay to maintain diversity longer + + # Evolutionary parameters + crossover_rate = 0.9 # Increased crossover rate for better mixing + mutation_strength = 0.3 # Reduced mutation strength to refine local search + elite_count = 5 # Number of elites to preserve between generations + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update quantum fluctuation magnitude + gamma *= gamma_decay + + # Select elite individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + + new_population = list(elite_individuals) # Start with elite individuals + new_fitness = list(fitness[elite_indices]) + + for _ in range(population_size - elite_count): + # Selection of parents for crossover + parents_indices = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parents_indices] + + # Crossover + mask = np.random.rand(self.dim) < crossover_rate + offspring = np.where(mask, parent1, parent2) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling effect + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + # Evaluate offspring + f_offspring = func(offspring) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + # Insert offspring into new population + new_population.append(offspring) + new_fitness.append(f_offspring) + + if f_offspring < self.f_opt: + self.f_opt = f_offspring + self.x_opt = offspring + + # Update the population + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRAMEDS.py b/nevergrad/optimization/lama/EnhancedRAMEDS.py new file mode 100644 index 000000000..0c9c308f4 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRAMEDS.py @@ -0,0 +1,92 @@ +import numpy as np + + +class EnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Sine-Cosine Adaptive Mutation Factor + F = ( + self.F_max + - (self.F_max - self.F_min) + * (np.sin(np.pi * evaluations / self.budget) + np.cos(np.pi * evaluations / self.budget)) + / 2 + ) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.6 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and focused memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + improvement_factor = (fitness[i] - trial_fitness) / fitness[i] + if improvement_factor > 0.1: # Only update for significant improvements + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > trial_fitness: + memory[worst_idx] = trial.copy() + memory_fitness[worst_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRAMEDSPro.py b/nevergrad/optimization/lama/EnhancedRAMEDSPro.py new file mode 100644 index 000000000..a047b830e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRAMEDSPro.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedRAMEDSPro: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with dynamic feedback control + F = self.F_max - (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + F = max( + self.F_min, min(self.F_max, F + (0.5 - np.random.rand()) * 0.1) + ) # Random noise addition + + # Mutation strategy: DE/current-to-pbest/1 with optional random selection from memory + pbest = population[np.argmin(fitness)] + random_memory_choice = ( + memory[np.random.randint(0, self.memory_size)] if np.random.rand() < 0.1 else pbest + ) + mutant = np.clip( + population[i] + + F + * (random_memory_choice - population[i] + elite[np.random.randint(0, self.elite_size)]), + lb, + ub, + ) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRAMEDSProV2.py b/nevergrad/optimization/lama/EnhancedRAMEDSProV2.py new file mode 100644 index 000000000..2b50ef832 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRAMEDSProV2.py @@ -0,0 +1,87 @@ +import numpy as np + + +class EnhancedRAMEDSProV2: + def __init__( + self, + budget, + population_size=50, + crossover_rate_initial=0.95, + F_min=0.1, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate_initial = crossover_rate_initial + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = population[np.argsort(fitness)[: self.memory_size]].copy() + memory_fitness = fitness[np.argsort(fitness)[: self.memory_size]].copy() + elite = population[np.argsort(fitness)[: self.elite_size]].copy() + elite_fitness = fitness[np.argsort(fitness)[: self.elite_size]].copy() + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adaptive mutation and crossover rates + progress = evaluations / self.budget + F = self.F_max - (self.F_max - self.F_min) * np.sin(np.pi * progress) + crossover_rate = self.crossover_rate_initial * (1 - progress) + 0.1 * progress + + for i in range(self.population_size): + # Mutation with elite and memory consideration + elites_indices = np.random.choice(self.elite_size, 2, replace=False) + memory_indices = np.random.choice(self.memory_size, 2, replace=False) + x1, x2 = elite[elites_indices] + m1, m2 = memory[memory_indices] + + mutant = np.clip(population[i] + F * (x1 - x2 + m1 - m2), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and memory update + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Memory update + if trial_fitness < np.max(memory_fitness): + worst_idx = np.argmax(memory_fitness) + memory[worst_idx] = trial + memory_fitness[worst_idx] = trial_fitness + + # Elite update + if trial_fitness < np.max(elite_fitness): + worst_elite_idx = np.argmax(elite_fitness) + elite[worst_elite_idx] = trial + elite_fitness[worst_elite_idx] = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRAMEDSv3.py b/nevergrad/optimization/lama/EnhancedRAMEDSv3.py new file mode 100644 index 000000000..b7cc90810 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRAMEDSv3.py @@ -0,0 +1,89 @@ +import numpy as np + + +class EnhancedRAMEDSv3: + def __init__( + self, + budget, + population_size=50, + crossover_base=0.8, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_base = crossover_base + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.sin(2 * np.pi * evaluations / self.budget) + # Adaptive crossover rate + cr = self.crossover_base + 0.2 * (1 - np.sin(2 * np.pi * evaluations / self.budget)) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < cr + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory adaptively + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Adaptive memory update + if evaluations % 10 == 0: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRAMEDSv4.py b/nevergrad/optimization/lama/EnhancedRAMEDSv4.py new file mode 100644 index 000000000..4c3cc1185 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRAMEDSv4.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedRAMEDSv4: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + reinit_cycle=100, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.reinit_cycle = reinit_cycle + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % self.reinit_cycle == 0 and evaluations != 0: + # Periodic reinitialization of a portion of the population + reinit_indices = np.random.choice( + range(self.population_size), size=self.population_size // 5, replace=False + ) + population[reinit_indices] = self.lb + (self.ub - self.lb) * np.random.rand( + len(reinit_indices), self.dimension + ) + fitness[reinit_indices] = np.array( + [func(individual) for individual in population[reinit_indices]] + ) + evaluations += len(reinit_indices) + + for i in range(self.population_size): + # Adaptive mutation factor with decay + F = self.F_max - (self.F_max - self.F_min) * np.cos(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip( + population[i] + F * (best_solution - population[i] + a - b), self.lb, self.ub + ) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolution.py new file mode 100644 index 000000000..45b1dc210 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolution.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveCovarianceMatrixEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Adjusted population size for balance + self.sigma = 0.2 # Fine-tuned initial step size for precision + self.c1 = 0.02 # Learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) # Damping factor for step size + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) # Number of parents for recombination + self.adaptive_learning_rate = 0.05 # Further reduced learning rate for mutation adaptability + self.elitism_rate = 0.05 # Introduce elitism to retain best solutions + self.eval_count = 0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def dynamic_crossover(parent1, parent2): + alpha = np.random.uniform(0.25, 0.75, self.dim) # More balanced crossover + return alpha * parent1 + (1 - alpha) * parent2 + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + for i in range(self.population_size // 2): + parent1, parent2 = offspring[i], offspring[self.population_size // 2 + i] + offspring[i] = dynamic_crossover(parent1, parent2) + + new_fitness = np.array([func(ind) for ind in offspring]) + self.eval_count += self.population_size + + population, fitness = retain_elite(population, fitness, offspring, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedRefinedAdaptiveCovarianceMatrixEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus.py new file mode 100644 index 000000000..550ceea26 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Reduced population size for efficiency + self.sigma = 0.3 # Slightly increased initial step size + self.c1 = 0.02 # Learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) # Damping factor for step size + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) # Number of parents for recombination + self.adaptive_learning_rate = 0.03 # Further fine-tuned learning rate for mutation adaptability + self.elitism_rate = 0.1 # Introduce elitism to retain the best solutions + self.eval_count = 0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def dynamic_crossover(parent1, parent2): + alpha = np.random.uniform(0.3, 0.7, self.dim) # More balanced crossover + return alpha * parent1 + (1 - alpha) * parent2 + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + for i in range(self.population_size // 2): + parent1, parent2 = offspring[i], offspring[self.population_size // 2 + i] + offspring[i] = dynamic_crossover(parent1, parent2) + + new_fitness = np.array([func(ind) for ind in offspring]) + self.eval_count += self.population_size + + population, fitness = retain_elite(population, fitness, offspring, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost.py new file mode 100644 index 000000000..2ce1cbdcc --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + else: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSearch.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSearch.py new file mode 100644 index 000000000..e371bf15e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSearch.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDifferentialSearch: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.8, + F_min=0.4, + F_max=1.0, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + F = self.F_max - (self.F_max - self.F_min) * np.exp(-4 * evaluations / self.budget) + + # Mutation: DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSpiralSearch.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSpiralSearch.py new file mode 100644 index 000000000..45eda154c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDifferentialSpiralSearch.py @@ -0,0 +1,57 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDifferentialSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize a population around the search space + population_size = 150 # Further increased population for better exploration + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Initialize spiral dynamics with adaptive parameters + radius = 5.0 + angle_increment = 2 * np.pi / population_size + evaluations_left = self.budget - population_size + radius_decay = 0.98 # More gradual decay to maintain exploration longer + angle_speed_increase = 1.02 # Lower increase rate for more controlled search + + while evaluations_left > 0: + for i in range(population_size): + # Select random indices for differential mutation + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices[0]], population[indices[1]], population[indices[2]] + + # Perform differential mutation + mutant = a + 0.8 * (b - c) # Adjusted differential weight for stability + mutant = np.clip(mutant, -5.0, 5.0) + + # Adjust the mutant with spiral motion + angle = angle_increment * i + offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + candidate = mutant + offset + candidate = np.clip(candidate, -5.0, 5.0) + + # Evaluate candidate + f_candidate = func(candidate) + evaluations_left -= 1 + + # Greedy selection and update optimal solution + if f_candidate < fitness[i]: + population[i] = candidate + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + + # Adaptive update of spiral parameters + radius *= radius_decay + angle_increment *= angle_speed_increase + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDE.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDE.py new file mode 100644 index 000000000..0094ef7ef --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDynamicDE: + def __init__( + self, budget=10000, population_size=120, F_base=0.6, F_range=0.3, CR=0.85, dynamic_adaptation=True + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity and adaptation + self.CR = CR # Crossover probability + self.dynamic_adaptation = dynamic_adaptation # Enable/disable dynamic F and strategy adaptation + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + if self.dynamic_adaptation: + # Adaptive mutation strategy: focus on global best in later stages + phase_ratio = evaluations / self.budget + if phase_ratio < 0.5: + idxs = np.argsort(fitness) + base = population[idxs[np.random.randint(0, max(1, len(idxs) // 10))]] + else: + base = population[best_idx] + + # Dynamic adjustment of F using a cosine modulation for a smoother transition + F = self.F_base + (np.cos(phase_ratio * np.pi) * self.F_range) + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using DE/rand/1/bin strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15.py new file mode 100644 index 000000000..945aa11e7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[a] + self.F * (population[b] - population[c]) + else: + # Enhanced mutation strategy for phase 2 using more vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic parameter adjustment focusing on a more adaptive approach + scale = iteration / total_iterations + # Enhancing sigmoid transition for more adaptive behavior + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.9 - 0.8 * (iteration / total_iterations), 0.1, 0.9) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicExplorationOptimization.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicExplorationOptimization.py new file mode 100644 index 000000000..6aa778970 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicExplorationOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveDynamicExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 15 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f_opt = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f_opt - f) / abs(prev_f_opt + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f_opt == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedRefinedAdaptiveDynamicExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..1402de1ed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,178 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 50 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 20 + self.dynamic_parameters_adjustment_threshold = 30 + self.pop_shrink_factor = 0.1 + self.diversification_period = 50 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _diversify_population(self, population, fitness, func): + num_new_individuals = int(self.pop_size * 0.1) # 10% of the population + new_individuals = np.random.uniform(self.lb, self.ub, (num_new_individuals, self.dim)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + self.evaluations += num_new_individuals + + combined_population = np.vstack((population, new_individuals)) + combined_fitness = np.hstack((fitness, new_fitness)) + + best_indices = np.argsort(combined_fitness)[: self.pop_size] + return combined_population[best_indices], combined_fitness[best_indices] + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + iteration = 0 + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 4) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self._dynamic_parameters() + + if self.no_improvement_count >= self.dynamic_adjustment_period: + new_pop_size = max(20, int(self.pop_size * (1 - self.pop_shrink_factor))) + population = population[:new_pop_size] + fitness = fitness[:new_pop_size] + self.pop_size = new_pop_size + self.no_improvement_count = 0 + + if iteration % self.diversification_period == 0 and self.evaluations < self.budget: + population, fitness = self._diversify_population(population, fitness, func) + + iteration += 1 + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveFocusedEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveFocusedEvolutionStrategy.py new file mode 100644 index 000000000..dd327885a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveFocusedEvolutionStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveFocusedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial populations and parameters + population_size = 150 + sigma = 0.7 # Initial standard deviation for Gaussian mutation + elite_size = max(1, int(population_size * 0.07)) + learning_rate = 0.1 # Learning rate for adaptive sigma + mutation_probability = 0.9 # Probability of mutating an individual + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + evaluations = population_size + while evaluations < self.budget: + # Elitism: keep the best solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population = population[elite_indices].copy() + new_fitness = fitness[elite_indices].copy() + + # Generate new population based on best solutions + for i in range(elite_size, population_size): + if np.random.rand() < mutation_probability: + # Select parent from elite randomly + parent_index = np.random.choice(elite_indices) + parent = population[parent_index] + + # Apply adaptive Gaussian mutation + offspring = parent + np.random.normal(0, sigma, self.dim) + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + + offspring_fitness = func(offspring) + evaluations += 1 + + # Determine whether to replace parent + if offspring_fitness < fitness[parent_index]: + new_population = np.vstack([new_population, offspring]) + new_fitness = np.append(new_fitness, offspring_fitness) + else: + new_population = np.vstack([new_population, parent]) + new_fitness = np.append(new_fitness, fitness[parent_index]) + else: + # Crossover between two elites + parents = np.random.choice(elite_indices, 2, replace=False) + alpha = np.random.rand() + offspring = alpha * population[parents[0]] + (1 - alpha) * population[parents[1]] + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + + offspring_fitness = func(offspring) + evaluations += 1 + + new_population = np.vstack([new_population, offspring]) + new_fitness = np.append(new_fitness, offspring_fitness) + + # Update population and fitness + population = new_population + fitness = new_fitness + + # Adaptive mutation step size + sigma *= np.exp(learning_rate * (1.0 - np.mean(fitness) / best_fitness)) + + # Update the best solution found + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveHarmonySearch.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveHarmonySearch.py new file mode 100644 index 000000000..55904f2f9 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveHarmonySearch.py @@ -0,0 +1,81 @@ +import numpy as np +from scipy.stats import cauchy + + +class EnhancedRefinedAdaptiveHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMemeticDiverseOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMemeticDiverseOptimizer.py new file mode 100644 index 000000000..c5a4e007d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMemeticDiverseOptimizer.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedRefinedAdaptiveMemeticDiverseOptimizer: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v4.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v4.py new file mode 100644 index 000000000..34fa5bcce --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v4.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedRefinedAdaptiveMetaNetPSO_v4: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 1.5 # Adjusted cognitive weight + self.social_weight = 1.5 # Adjusted social weight + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v5.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v5.py new file mode 100644 index 000000000..1feaa98d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveMetaNetPSO_v5.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.stats import truncnorm + + +class EnhancedRefinedAdaptiveMetaNetPSO_v5: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200, adaptive_lr=True): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.8 + self.adaptive_lr = adaptive_lr + self.cognitive_weight = 1.8 # Adjusted cognitive weight + self.social_weight = 1.8 # Adjusted social weight + + def truncated_normal(self, mean, sd, lower, upper): + return truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs() + + def random_restart(self): + return np.array( + [self.truncated_normal(0, 1, -5.0, 5.0) for _ in range(self.num_particles * self.dim)] + ).reshape(self.num_particles, self.dim) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.7 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if self.adaptive_lr: + cognitive_weight -= 0.001 * t + social_weight += 0.001 * t + + return cognitive_weight, social_weight + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(self.dim): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + self.cognitive_weight, self.social_weight = self.update_parameters( + t, self.cognitive_weight, self.social_weight + ) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v49.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v49.py new file mode 100644 index 000000000..609950951 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v49.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v49: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + self.best_fitness_history = [] + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v52.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v52.py new file mode 100644 index 000000000..f088dad97 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v52.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v52: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v53.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v53.py new file mode 100644 index 000000000..706fc4594 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v53.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v53: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.8 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v54.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v54.py new file mode 100644 index 000000000..f6434d93d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v54.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v54: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v55.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v55.py new file mode 100644 index 000000000..3c9d15fa0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v55.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v55: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v56.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v56.py new file mode 100644 index 000000000..5ac8ba7bd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v56.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v56: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.99 + self.alpha *= 0.99 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v57.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v57.py new file mode 100644 index 000000000..8fa130139 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v57.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v57: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v58.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v58.py new file mode 100644 index 000000000..10120555f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v58.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v58: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_reflection(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v59.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v59.py new file mode 100644 index 000000000..af55539bf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v59.py @@ -0,0 +1,131 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v59: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v60.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v60.py new file mode 100644 index 000000000..3c60ec81f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveQGSA_v60.py @@ -0,0 +1,131 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveQGSA_v60: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSpiralGradientSearch.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSpiralGradientSearch.py new file mode 100644 index 000000000..7eec7bcf6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSpiralGradientSearch.py @@ -0,0 +1,67 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveSpiralGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial setup + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Start with a full range + angle_increment = np.pi / 4 # Initial broad angle for exploration + + # Adaptive parameters + radius_decay = 0.95 # Gradual decrease radius to extend exploration + angle_refinement = 0.95 # Slow angle refinement for detailed exploration + evaluations_left = self.budget + min_radius = 0.001 # Further reduce min radius for finer precision + + # Maintain a history of the previous best centroids to adjust search dynamics + historical_best = np.array([centroid.copy() for _ in range(3)]) + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max( + int(2 * np.pi / angle_increment), 8 + ) # Ensure at least 8 points for thorough coverage + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + displacement = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Update centroid by averaging the current and historical best points + if points: + best_index = np.argmin(function_values) + historical_best = np.roll(historical_best, -1, axis=0) + historical_best[-1] = points[best_index] + + # Move the centroid towards the average of historical best locations + centroid = np.mean(historical_best, axis=0) + + # Dynamically update radius and angle increment + radius *= radius_decay + radius = max(radius, min_radius) + angle_increment *= angle_refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3.py b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3.py new file mode 100644 index 000000000..067925338 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3.py @@ -0,0 +1,108 @@ +import numpy as np + + +class EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedRefinedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..ef46230d1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedDualStrategyAdaptiveDE.py @@ -0,0 +1,125 @@ +import numpy as np + + +class EnhancedRefinedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/EnhancedRefinedDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedRefinedDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..45f90c311 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedDynamicFireworkAlgorithm.py @@ -0,0 +1,96 @@ +import numpy as np + + +class EnhancedRefinedDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.9 # Decrease mutation rate + else: + self.mutation_rate *= 1.1 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..9d1dfa353 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..8b8f90e21 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,198 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=100, + blend_crossover_prob=0.25, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + # Use L-BFGS-B for local search + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Reinforce diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedEliteDynamicMemoryHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedEliteDynamicMemoryHybridOptimizer.py new file mode 100644 index 000000000..1abedc18f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedEliteDynamicMemoryHybridOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedRefinedEliteDynamicMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedEvolutionaryGradientHybridOptimizerV4.py b/nevergrad/optimization/lama/EnhancedRefinedEvolutionaryGradientHybridOptimizerV4.py new file mode 100644 index 000000000..5cff2eb62 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedEvolutionaryGradientHybridOptimizerV4.py @@ -0,0 +1,74 @@ +import numpy as np + + +class EnhancedRefinedEvolutionaryGradientHybridOptimizerV4: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.5, + F_range=0.5, + CR=0.95, + elite_fraction=0.1, + mutation_strategy="best", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'best' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "best": + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedRefinedGradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/EnhancedRefinedGradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..0745fba25 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGradientBoostedMemoryAnnealing.py @@ -0,0 +1,142 @@ +import numpy as np + + +class EnhancedRefinedGradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Initialize parameters + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # More frequent gradient-based local search refinement + if evaluations % (self.budget // 8) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.007): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v88.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v88.py new file mode 100644 index 000000000..8a1b7368e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v88.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v88: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 # Adjusted inertia weight for better exploration + self.social_weight = 1.5 # Increased social weight for better exploitation + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 # Adjusted elite weight for improved convergence + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v89.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v89.py new file mode 100644 index 000000000..c8bfeb117 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v89.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v89: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v90.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v90.py new file mode 100644 index 000000000..2c1522ca3 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v90.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v90: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v91.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v91.py new file mode 100644 index 000000000..cc006f3d5 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v91.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v91: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.inf + self.x_opt = None + self.prev_best_fitness = np.inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v92.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v92.py new file mode 100644 index 000000000..df6c230bb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v92.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v92: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v93.py b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v93.py new file mode 100644 index 000000000..a39acbefb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedGuidedMassQGSA_v93.py @@ -0,0 +1,129 @@ +import numpy as np + + +class EnhancedRefinedGuidedMassQGSA_v93: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.9 + self.social_weight = 1.5 + self.cognitive_weight = 1.3 + self.elite_weight = 0.7 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = np.clip( + new_agent + np.random.normal(0, 0.1, size=self.dimension), self.lb, self.ub + ) # Introducing randomness for better exploration + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..445dce8ce --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,167 @@ +import numpy as np + + +class EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 + self.elitism_rate = 0.20 # Elitism rate + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + self.alpha_levy = 0.01 # Levy flight parameter + self.k = 0.3 # Parameter for hybridization phase + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(population, fitness): + """Switch strategy based on current performance.""" + strategy = "default" + if self.eval_count < self.budget * 0.33: + strategy = "explorative" + self.F = 0.9 + self.CR = 0.9 + elif self.eval_count < self.budget * 0.66: + strategy = "balanced" + self.F = 0.7 + self.CR = 0.85 + else: + strategy = "exploitative" + self.F = 0.5 + self.CR = 0.75 + return strategy + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < 0.2: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + for i in range(self.population_size): + if np.random.rand() < self.k: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching(population, fitness) + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + population = hybridization(population, cov_matrix) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedRefinedHybridDEPSOWithDynamicAdaptation.py b/nevergrad/optimization/lama/EnhancedRefinedHybridDEPSOWithDynamicAdaptation.py new file mode 100644 index 000000000..e184e6faf --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHybridDEPSOWithDynamicAdaptation.py @@ -0,0 +1,152 @@ +import numpy as np + + +class EnhancedRefinedHybridDEPSOWithDynamicAdaptation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.7 # Adaptive inertia weight for PSO + c1 = 1.2 # Increased cognitive coefficient for PSO + c2 = 1.2 # Increased social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..e4fe29657 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py @@ -0,0 +1,139 @@ +import numpy as np + + +class EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 70 # Further increased for better diversity + self.initial_F = 0.85 # Adjusted for balanced exploration and exploitation + self.initial_CR = 0.85 # Adjusted for balanced crossover + self.elite_rate = 0.2 # Increased elite rate for better convergence + self.local_search_rate = 0.5 # Further increased for more local refinements + self.memory_size = 10 # Increased memory size for better parameter adaptation + self.w = 0.8 # Adjusted inertia weight for better convergence + self.c1 = 1.5 # Balanced cognitive component + self.c2 = 1.5 # Balanced social component + self.phase_switch_ratio = 0.3 # Adjusted phase switch + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Adjusted for more precise local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedRefinedHybridOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedHybridOptimizer.py new file mode 100644 index 000000000..23acfa48d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHybridOptimizer.py @@ -0,0 +1,148 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnhancedRefinedHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + return result.x, result.fun + + def simulated_annealing(self, x, func, T): + new_x = x + np.random.uniform(-0.1, 0.1, self.dim) + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + self.eval_count += 1 + if new_f < func(x) or np.random.rand() < np.exp((func(x) - new_f) / T): + return new_x, new_f + else: + return x, func(x) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + self.eval_count = self.init_pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + T = 1.0 + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.init_pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Perform simulated annealing to escape local optima + T *= 0.99 # Cooling schedule + for i in range(self.init_pop_size): + if self.eval_count >= global_search_budget: + break + population[i], fitness[i] = self.simulated_annealing(population[i], func, T) + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3.py b/nevergrad/optimization/lama/EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3.py new file mode 100644 index 000000000..5fc60eeed --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3.py @@ -0,0 +1,55 @@ +import numpy as np + + +class EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 350 # Further increased population size for improved exploration + self.F_base = 0.6 # Slightly increased base mutation factor for more aggressive exploration + self.CR_base = 0.8 # Increased base crossover probability for more robust exploration + self.adaptive_F_amplitude = 0.25 # Increased mutation factor amplitude for broader search variations + self.adaptive_CR_amplitude = 0.25 # Adjusted crossover rate amplitude for dynamic adaptation + self.phase_shift = np.pi / 4 # Adjusted phase shift for optimal diversity + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors with phase-shifted sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.adaptive_F_amplitude * np.sin(2 * np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.sin( + 2 * np.pi * iteration_ratio + self.phase_shift + ) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin with adaptive F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure boundaries are respected + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer.py new file mode 100644 index 000000000..301093bbb --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Slightly higher starting temperature for improved initial exploration + T_min = 0.0003 # Reduced minimum temperature for more pronounced fine-tuning + alpha = 0.92 # Adjusted cooling rate to balance exploration and convergence + + # Mutation and crossover parameters refined further + F_base = 0.75 # Adjusted mutation factor for improved balance + CR = 0.91 # Modified crossover probability to enhance good trait retention + + population_size = 90 # Slightly increased population size for better coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation dynamics with temperature and progress-dependent scaling + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adjusted dynamic mutation factor to reflect both temperature and proportional progress + dynamic_F = ( + F_base * np.exp(-0.1 * T) * (0.5 + 0.5 * np.sin(np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion leveraging delta fitness, temperature, and a fine-tuned function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.04 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling strategy with periodic modulation adjustments + adaptive_cooling = alpha - 0.012 * np.cos(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSO.py b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSO.py new file mode 100644 index 000000000..67253071d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedRefinedMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 1000 + self.meta_net_lr = 0.3 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv8.py b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv8.py new file mode 100644 index 000000000..7ec397c17 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv8.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedRefinedMetaNetAQAPSOv8: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 2000 + self.meta_net_lr = 0.3 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv9.py b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv9.py new file mode 100644 index 000000000..cf9f7492e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedMetaNetAQAPSOv9.py @@ -0,0 +1,123 @@ +import numpy as np + + +class EnhancedRefinedMetaNetAQAPSOv9: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.15 + self.max_local_search_attempts = 5 + self.meta_net_iters = 3000 + self.meta_net_lr = 0.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedOptimalDynamicPrecisionOptimizerV16.py b/nevergrad/optimization/lama/EnhancedRefinedOptimalDynamicPrecisionOptimizerV16.py new file mode 100644 index 000000000..f58fd6fce --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedOptimalDynamicPrecisionOptimizerV16.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedRefinedOptimalDynamicPrecisionOptimizerV16: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters with further refinement + T = 1.2 # Slightly increased starting temperature to enhance early exploration + T_min = 0.0003 # Reduced minimum temperature to allow deep exploration at late stages + alpha = 0.93 # Slightly increased cooling rate to maintain exploration capabilities longer + + # Mutation and crossover parameters are finely tuned for optimal performance + F = 0.77 # Mutation factor adjusted for a better balance of diversity + CR = 0.85 # Crossover probability fine-tuned to optimize the exploration-exploitation trade-off + + population_size = 85 # Adjusted population size for better convergence properties + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation with sigmoid-based adaptation for responsive control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor using an improved sigmoid for more responsive control + dynamic_F = ( + F + * np.exp(-0.07 * T) + * (0.65 + 0.35 * np.tanh(3.7 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria with adjusted sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.065 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..daed25e5a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 # Increased for better diversity + self.initial_F = 0.7 # Tuned for better balance + self.initial_CR = 0.85 # Tuned for better balance + self.elite_rate = 0.15 # Increased elite rate for better exploitation + self.local_search_rate = 0.4 # Increased local search intensity + self.memory_size = 25 # Increased memory size for better parameter adaptation + self.w = 0.7 # Reduced inertia weight for finer control in PSO + self.c1 = 1.7 # Enhanced cognitive component + self.c2 = 1.3 # Reduced social component for better exploration + self.phase_switch_ratio = 0.4 # Earlier switch to PSO for quicker convergence + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Finer step for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/EnhancedRefinedSpatialOptimizer.py b/nevergrad/optimization/lama/EnhancedRefinedSpatialOptimizer.py new file mode 100644 index 000000000..286db91e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedSpatialOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EnhancedRefinedSpatialOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=120, + initial_step_size=2.0, + step_decay=0.98, + elite_ratio=0.15, + mutation_intensity=0.1, + local_search_prob=0.35, + refinement_steps=7, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, self.step_size * 0.01, self.dimension), + self.bounds[0], + self.bounds[1], + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: # Conduct local search on some individuals + for idx in range(self.population_size): + local_individual, local_fitness = self.local_search(func, new_population[idx]) + evaluations += self.refinement_steps # Account for the evaluations used in local search + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35.py b/nevergrad/optimization/lama/EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35.py new file mode 100644 index 000000000..f44ebf777 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.54, + F_range=0.46, + CR=0.96, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Fine-tuned base mutation factor for improved exploration/exploitation balance + self.F_range = ( + F_range # Adjusted range for mutation factor to foster more aggressive explorations when needed + ) + self.CR = CR # Increased crossover probability to enhance trial vector diversity + self.elite_fraction = ( + elite_fraction # Increased elite fraction to leverage a broader base of good solutions + ) + self.mutation_strategy = ( + mutation_strategy # Maintaining 'adaptive' strategy for flexibility in mutation base selection + ) + self.dim = 5 # Problem dimensionality + self.lb = -5.0 # Lower boundary of search space + self.ub = 5.0 # Upper boundary of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Select mutation base according to strategy + if self.mutation_strategy == "adaptive": + # Enhanced adaptive focus: adjust probability to 0.80 for selecting the best individual + if np.random.rand() < 0.80: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjust F within given range + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation with slightly increased CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate and select the better solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check budget status + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v72.py b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v72.py new file mode 100644 index 000000000..88ef647a0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v72.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedRefinedUltimateGuidedMassQGSA_v72: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v73.py b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v73.py new file mode 100644 index 000000000..260cfbe6c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v73.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedRefinedUltimateGuidedMassQGSA_v73: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v74.py b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v74.py new file mode 100644 index 000000000..7654ed482 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v74.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedRefinedUltimateGuidedMassQGSA_v74: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v76.py b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v76.py new file mode 100644 index 000000000..b78fc749c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimateGuidedMassQGSA_v76.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedRefinedUltimateGuidedMassQGSA_v76: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43.py b/nevergrad/optimization/lama/EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43.py new file mode 100644 index 000000000..568c04fa1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43.py @@ -0,0 +1,82 @@ +import numpy as np + + +class EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.97, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased base mutation factor for more aggressive search + self.F_range = F_range # Slightly narrower range for controlled adaptive mutation + self.CR = ( + CR # Increased crossover probability to facilitate better exploration and exploitation balance + ) + self.elite_fraction = ( + elite_fraction # Increased elite fraction to maintain a strong influence of best performers + ) + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy for dynamic behavior + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Higher probability to select the best individual, enhancing local search + if np.random.rand() < 0.85: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjusted mutation factor + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with enhanced CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedResilientAdaptivePSO.py b/nevergrad/optimization/lama/EnhancedResilientAdaptivePSO.py new file mode 100644 index 000000000..ed2fe845f --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedResilientAdaptivePSO.py @@ -0,0 +1,71 @@ +import numpy as np + + +class EnhancedResilientAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=100, + omega=0.7, + phi_p=0.15, + phi_g=0.3, + precision_decay=0.98, + adaptive_phi=True, + ): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia coefficient + self.phi_p = phi_p # Coefficient of personal best + self.phi_g = phi_g # Coefficient of global best + self.dim = 5 # Dimension of the problem + self.lb, self.ub = -5.0, 5.0 # Search space bounds + self.precision_decay = precision_decay + self.adaptive_phi = adaptive_phi + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + remaining_budget = self.budget - evaluation_counter + + while evaluation_counter < self.budget: + self.omega *= self.precision_decay # Gradually reduce inertia + + # Dynamically adjust phi parameters if adaptive_phi is True + if self.adaptive_phi: + self.phi_p = 0.1 + (0.5 - 0.1) * np.exp(-0.01 * evaluation_counter) + self.phi_g = 0.1 + (0.5 - 0.1) * np.exp(-0.01 * evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + self.omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch.py b/nevergrad/optimization/lama/EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch.py new file mode 100644 index 000000000..a56a6280e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch.py @@ -0,0 +1,156 @@ +import numpy as np + + +class EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-5 + self.learning_rate = 0.1 + + # For adaptive population sizing + self.min_pop_size = 20 + self.max_pop_size = 70 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(5): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self, current_pop_size): + if np.random.rand() < 0.5: + new_pop_size = int(current_pop_size * 1.1) + else: + new_pop_size = int(current_pop_size * 0.9) + return np.clip(new_pop_size, self.min_pop_size, self.max_pop_size) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size(self.pop_size) # Adapt population size here + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.pop_size = new_pop_size # Update population size + + population = np.copy(new_population) + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedRotationalClimbOptimizer.py b/nevergrad/optimization/lama/EnhancedRotationalClimbOptimizer.py new file mode 100644 index 000000000..aa0d02a39 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedRotationalClimbOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class EnhancedRotationalClimbOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 # Reduced population size for more focused search + mutation_rate = 0.2 # Increased mutation rate + rotation_rate = 0.1 # Increased rotation rate for more diversification + alpha = 0.75 # Adjusted blending factor + + # Initialize population within the bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + for i in range(population_size): + # Select mutation indices + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with enhanced rotational component + direction = b - c + theta = rotation_rate * np.pi + rotation_matrix = np.eye(self.dim) + np.fill_diagonal(rotation_matrix[:2, :2], np.cos(theta)) + rotation_matrix[0, 1], rotation_matrix[1, 0] = -np.sin(theta), np.sin(theta) + + rotated_vector = np.dot(rotation_matrix, direction) + mutant = a + mutation_rate * rotated_vector + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Blending and selection + trial = best_solution + alpha * (mutant - best_solution) + trial = np.clip(trial, self.lower_bound, self.upper_bound) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedSelectiveEvolutionaryOptimizerV21.py b/nevergrad/optimization/lama/EnhancedSelectiveEvolutionaryOptimizerV21.py new file mode 100644 index 000000000..57aa168ea --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSelectiveEvolutionaryOptimizerV21.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EnhancedSelectiveEvolutionaryOptimizerV21: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.93, + elite_fraction=0.12, + mutation_strategy="selective_elitist", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # 'selective_elitist' for enhanced focused search on elites + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "selective_elitist": + # Further increase the probability of using best individual as mutant base + if np.random.rand() < 0.90: # Increased focus on the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base creation + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F influenced by the evolution progress + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation of the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..aef669d5e --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,91 @@ +import numpy as np + + +class EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_base = 0.8 # Base differential weight + CR_base = 0.9 # Base crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F_base) + CR_values = np.full(population_size, CR_base) + + # Covariance matrix for adaptive strategies + covariance_matrix = np.eye(self.dim) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Apply covariance matrix to enhance mutation + mutant = mutant + np.random.multivariate_normal(np.zeros(self.dim), covariance_matrix) + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Update the covariance matrix based on the new population + mean = np.mean(new_population, axis=0) + deviations = new_population - mean + covariance_matrix = np.dot(deviations.T, deviations) / population_size + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE.py b/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE.py new file mode 100644 index 000000000..796e2ca7a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE.py @@ -0,0 +1,56 @@ +import numpy as np + + +class EnhancedSelfAdaptiveDE: + def __init__(self, budget=10000, population_size=30, c=0.1, p=0.05): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.c = c + self.p = p + + def initialize_population(self): + self.population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def generate_trial_vector(self, target_idx, scaling_factor): + candidates = np.random.choice( + np.delete(np.arange(self.population_size), target_idx), size=2, replace=False + ) + a, b = self.population[candidates] + mutant_vector = self.population[target_idx] + scaling_factor * (a - b) + + return np.clip(mutant_vector, -5.0, 5.0) + + def update_population(self, func): + for i in range(self.population_size): + scaling_factor = np.random.normal(self.c, self.p) + trial_vector = self.generate_trial_vector(i, scaling_factor) + new_value = func(trial_vector) + + if new_value < func(self.population[i]): + self.population[i] = trial_vector + + def adapt_parameters(self, func): + best_idx = np.argmin([func(ind) for ind in self.population]) + best_solution = self.population[best_idx] + + for i in range(self.population_size): + scaling_factor = np.random.normal(self.c, self.p) + trial_vector = self.generate_trial_vector(i, scaling_factor) + trial_value = func(trial_vector) + + if trial_value < func(self.population[i]): + self.c = (1 - self.p) * self.c + self.p * scaling_factor + self.p = (1 - self.p) * self.p + self.p * int(trial_value < func(best_solution)) + + def __call__(self, func): + self.initialize_population() + + for _ in range(self.budget // self.population_size): + self.update_population(func) + self.adapt_parameters(func) + + best_idx = np.argmin([func(ind) for ind in self.population]) + best_solution = self.population[best_idx] + + return func(best_solution), best_solution diff --git a/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE2.py b/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE2.py new file mode 100644 index 000000000..63ea6769a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSelfAdaptiveDE2.py @@ -0,0 +1,56 @@ +import numpy as np + + +class EnhancedSelfAdaptiveDE2: + def __init__(self, budget=10000, population_size=30, c=0.1, p=0.05): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.c = c + self.p = p + + def initialize_population(self): + self.population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def generate_trial_vector(self, target_idx, scaling_factor): + candidates = np.random.choice( + np.delete(np.arange(self.population_size), target_idx), size=2, replace=False + ) + a, b = self.population[candidates] + mutant_vector = self.population[target_idx] + scaling_factor * (a - b) + + return np.clip(mutant_vector, -5.0, 5.0) + + def update_population(self, func): + for i in range(self.population_size): + scaling_factor = np.random.normal(self.c, self.p) + trial_vector = self.generate_trial_vector(i, scaling_factor) + new_value = func(trial_vector) + + if new_value < func(self.population[i]): + self.population[i] = trial_vector + + def adapt_parameters(self, func): + best_idx = np.argmin([func(ind) for ind in self.population]) + best_solution = self.population[best_idx] + + for i in range(self.population_size): + scaling_factor = np.random.normal(self.c, self.p) + trial_vector = self.generate_trial_vector(i, scaling_factor) + trial_value = func(trial_vector) + + if trial_value < func(self.population[i]): + self.c = 0.9 * self.c + 0.1 * scaling_factor + self.p = 0.9 * self.p + 0.1 * int(trial_value < func(best_solution)) + + def __call__(self, func): + self.initialize_population() + + for _ in range(self.budget // self.population_size): + self.update_population(func) + self.adapt_parameters(func) + + best_idx = np.argmin([func(ind) for ind in self.population]) + best_solution = self.population[best_idx] + + return func(best_solution), best_solution diff --git a/nevergrad/optimization/lama/EnhancedSelfAdaptiveMemeticAlgorithm.py b/nevergrad/optimization/lama/EnhancedSelfAdaptiveMemeticAlgorithm.py new file mode 100644 index 000000000..7b5ed5676 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSelfAdaptiveMemeticAlgorithm.py @@ -0,0 +1,111 @@ +import numpy as np + + +class EnhancedSelfAdaptiveMemeticAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, alpha=0.5, beta=0.5): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.alpha = alpha # Exploration weight + self.beta = beta # Exploitation weight + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def hybrid_step( + self, func, pop, scores, crossover_rates, mutation_factors, learning_rate, memetic_probability + ): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + learning_rate = self.alpha * ((1 - iteration / max_iterations) ** self.beta) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + + # Adapt parameters + self.adaptive_parameters(iteration, max_iterations, crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rates, mutation_factors, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSequentialQuadraticAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/EnhancedSequentialQuadraticAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..0323abc23 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSequentialQuadraticAdaptiveEvolutionStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class EnhancedSequentialQuadraticAdaptiveEvolutionStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.8, alpha=0.1): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.alpha = alpha # Introducing a learning rate for F and CR adjustments + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.best_global_fitness = np.inf + self.best_global_solution = None + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration): + # Gradually decrease F and increase CR based on progression and best fitness found + scale = iteration / self.budget + self.F = max(0.1, self.F - self.alpha * scale * (self.F - 0.1)) + self.CR = min(1.0, self.CR + self.alpha * scale * (1 - self.CR)) + + def update_global_best(self, population, fitnesses): + local_best_index = np.argmin(fitnesses) + local_best_fitness = fitnesses[local_best_index] + if local_best_fitness < self.best_global_fitness: + self.best_global_fitness = local_best_fitness + self.best_global_solution = population[local_best_index].copy() + + def __call__(self, func): + population = self.initialize_population() + self.fitness = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + + while evaluations < self.budget: + self.adjust_parameters(iteration) + self.update_global_best(population, self.fitness) + + for i in range(self.pop_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + population[i], self.fitness[i] = self.select(population[i], trial, func) + evaluations += 1 + if evaluations >= self.budget: + break + iteration += 1 + + return self.best_global_fitness, self.best_global_solution diff --git a/nevergrad/optimization/lama/EnhancedSpatialAdaptiveEvolver.py b/nevergrad/optimization/lama/EnhancedSpatialAdaptiveEvolver.py new file mode 100644 index 000000000..99e0cb1aa --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSpatialAdaptiveEvolver.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EnhancedSpatialAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + initial_step_size=1.0, + step_decay=0.99, + elite_ratio=0.1, + mutation_intensity=0.1, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * ( + self.step_decay**generation + ) # Dynamic step size for exploration adjustment + + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedSpatialAdaptiveOptimizer.py b/nevergrad/optimization/lama/EnhancedSpatialAdaptiveOptimizer.py new file mode 100644 index 000000000..f2aef5d37 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSpatialAdaptiveOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class EnhancedSpatialAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 100 + mutation_factor = 0.6 # Initiate with a moderate mutation to balance exploration + crossover_prob = 0.7 # Moderate crossover probability to maintain diversity + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Begin main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Select three random individuals different from i + indices = np.arange(population_size) + indices = np.delete(indices, i) + x1, x2, x3 = population[np.random.choice(indices, 3, replace=False)] + + # Mutation using current-to-pbest/1 strategy + p_best_index = np.argmin(fitness) + p_best = population[p_best_index] + mutant = x1 + mutation_factor * (p_best - x1 + x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_prob + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + + # Adaptive parameter tuning based on performance + performance_feedback = np.std(fitness) / np.mean(fitness) if np.mean(fitness) != 0 else 0 + mutation_factor = max(0.1, min(0.9, mutation_factor + 0.1 * (0.2 - performance_feedback))) + crossover_prob = max(0.5, min(0.9, crossover_prob + 0.1 * (0.1 - performance_feedback))) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedSpectralHybridOptimization.py b/nevergrad/optimization/lama/EnhancedSpectralHybridOptimization.py new file mode 100644 index 000000000..38efd8669 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSpectralHybridOptimization.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedSpectralHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + elite_size = 15 + mutation_factor = 0.8 + crossover_probability = 0.7 + learning_rate = 0.1 + catastrophe_frequency = 750 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Momentum for velocity update in particles + velocity = np.zeros_like(population) + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Spectral mutation + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = a + mutation_factor * (b - c) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Hybrid crossover with momentum-based Particle Swarm Optimization + p_best = population[i] # Personal best (simplified assumption) + g_best = self.x_opt # Global best + velocity[i] = ( + velocity[i] + + learning_rate * (p_best - population[i]) + + learning_rate * (g_best - population[i]) + ) + trial_vector = population[i] + velocity[i] + + # Adaptive crossover + trial_vector = np.array( + [ + mutant_vector[j] if np.random.rand() < crossover_probability else trial_vector[j] + for j in range(self.dim) + ] + ) + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Fitness evaluation and selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Catastrophic mutation after fixed intervals to avoid local optima + if evaluations % catastrophe_frequency == 0: + for j in range(int(population_size * 0.15)): # Affect 15% of the population + catastrophic_idx = np.random.randint(population_size) + population[catastrophic_idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Maintain an elite set of individuals + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in np.random.choice(range(population_size), elite_size, replace=False): + population[idx] = elite_individuals[np.random.randint(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover.py b/nevergrad/optimization/lama/EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover.py new file mode 100644 index 000000000..712006a57 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover.py @@ -0,0 +1,83 @@ +import numpy as np + + +class EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover: + def __init__( + self, + budget=1000, + population_size=50, + diversification_factor=0.1, + cr_range=(0.2, 0.9), + f_range=(0.2, 0.8), + ): + self.budget = budget + self.population_size = population_size + self.diversification_factor = diversification_factor + self.cr_range = cr_range + self.f_range = f_range + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + cr = np.random.uniform(*self.cr_range, size=self.population_size) + f = np.random.uniform(*self.f_range, size=self.population_size) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + trial_individual = self.generate_trial_individual(population[i], a, b, c, f[i], cr[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + cr, f = self.adapt_parameters(cr, f, fitness_values) + + population = self.population_diversification(population) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, f, cr): + dimension = len(current) + mutant = np.clip(a + f * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < cr + return np.where(crossover_points, mutant, current) + + def adapt_parameters(self, cr, f, fitness_values): + mean_fitness = np.mean(fitness_values) + cr = cr * (1 + 0.1 * (mean_fitness - fitness_values)) + f = f * (1 + 0.1 * (mean_fitness - fitness_values)) + return np.clip(cr, *self.cr_range), np.clip(f, *self.f_range) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/EnhancedStochasticGradientDifferentialEvolution.py b/nevergrad/optimization/lama/EnhancedStochasticGradientDifferentialEvolution.py new file mode 100644 index 000000000..11626e7a2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStochasticGradientDifferentialEvolution.py @@ -0,0 +1,106 @@ +import numpy as np + + +class EnhancedStochasticGradientDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + else: + self.base_lr *= 0.95 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = EnhancedStochasticGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedStochasticMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/EnhancedStochasticMetaHeuristicOptimizer.py new file mode 100644 index 000000000..20088cd5c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStochasticMetaHeuristicOptimizer.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnhancedStochasticMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.6, + mutation_rate=0.03, + num_generations=500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + improvement_counter = 0 # Track the number of consecutive non-improvements + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + improvement_counter = 0 + else: + improvement_counter += 1 + if ( + improvement_counter >= 20 + ): # Reinitialize the particle if no improvement after 20 iterations + swarm[i] = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + personal_best[i] = np.copy(swarm[i]) + improvement_counter = 0 + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/EnhancedStrategicAdaptiveOptimizer.py b/nevergrad/optimization/lama/EnhancedStrategicAdaptiveOptimizer.py new file mode 100644 index 000000000..0db975077 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStrategicAdaptiveOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedStrategicAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Parameters and initial conditions optimized + population_size = 200 + mutation_factor = 0.8 + crossover_rate = 0.7 + sigma = 0.2 # Mutation step size is reduced to control excessive exploration + elite_size = int(0.05 * population_size) # Reduced elite size to encourage diversity + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + while evaluations < self.budget: + new_population = [] + + # Elitism + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + new_population.append(population[idx]) + + # Main evolutionary process + while len(new_population) < population_size: + # Differential Mutation and Crossover + for _ in range(int(population_size / 2)): + target_idx = np.random.randint(0, population_size) + candidates = np.random.choice( + np.delete(np.arange(population_size), target_idx), 3, replace=False + ) + x1, x2, x3 = ( + population[candidates[0]], + population[candidates[1]], + population[candidates[2]], + ) + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[target_idx]) + + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[target_idx]: + new_population.append(trial) + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + else: + new_population.append(population[target_idx]) + + population = np.array(new_population) + fitness = np.array([func(x) for x in population]) + + # Adaptive mutation and crossover rates + mutation_factor = max(0.5, min(1.0, mutation_factor + np.random.uniform(-0.05, 0.05))) + crossover_rate = max(0.5, min(1.0, crossover_rate + np.random.uniform(-0.05, 0.05))) + sigma *= np.exp(0.05 * np.random.randn()) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedStrategicMemoryAdaptiveStrategyV44.py b/nevergrad/optimization/lama/EnhancedStrategicMemoryAdaptiveStrategyV44.py new file mode 100644 index 000000000..a328d0bae --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStrategicMemoryAdaptiveStrategyV44.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EnhancedStrategicMemoryAdaptiveStrategyV44: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover probability + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + if len(self.memory) > 0: + memory_factor = self.memory[np.random.randint(len(self.memory))] + mutant += memory_factor # Integrating memory effect into mutation + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + sigmoid_adjustment = 1 / (1 + np.exp(-10 * (iteration / total_iterations - 0.5))) + self.F = np.clip(0.5 + 0.4 * sigmoid_adjustment, 0.1, 0.9) + self.CR = np.clip(0.5 + 0.4 * np.sin(sigmoid_adjustment), 0.1, 0.9) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + iteration = 0 + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedStrategicPSO.py b/nevergrad/optimization/lama/EnhancedStrategicPSO.py new file mode 100644 index 000000000..c5e3f2c9a --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStrategicPSO.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedStrategicPSO: + def __init__( + self, + budget=10000, + population_size=200, + omega_initial=0.9, + omega_final=0.4, + phi_p=0.2, + phi_g=0.8, + critical_depth=50, + adaptive_depth=20, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal attraction coefficient + self.phi_g = phi_g # Global attraction coefficient + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits of the search space + self.critical_depth = critical_depth # Performance evaluation depth for inertia adaptation + self.adaptive_depth = adaptive_depth # Frequency of inertia adjustments + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + improvement_tracker = np.zeros(self.population_size, dtype=bool) + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + improvement_tracker[i] = True + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.critical_depth :] + + if evaluation_counter >= self.budget: + break + + # Dynamic update of coefficients based on recent improvements + if np.any(improvement_tracker): + self.phi_p, self.phi_g = self.dynamic_adjustment(improvement_tracker, self.phi_p, self.phi_g) + improvement_tracker[:] = False + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + if len(scores) > self.adaptive_depth and np.std(scores[-self.adaptive_depth :]) < 0.01: + return max( + self.omega_final, + self.omega_initial + - (evaluation_counter / self.budget) * (self.omega_initial - self.omega_final) * 2, + ) + else: + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) + + def dynamic_adjustment(self, improvement_tracker, phi_p, phi_g): + improvement_rate = np.mean(improvement_tracker) + if improvement_rate > 0.1: + phi_p *= 0.9 # Decrease personal coefficient if improvements are frequent + phi_g *= 1.1 # Increase global coefficient to explore towards new areas + elif improvement_rate < 0.05: + phi_p *= 1.1 # Increase personal coefficient to refine local exploitation + phi_g *= 0.9 # Decrease global coefficient to reduce jumping towards global best rapidly + return phi_p, phi_g diff --git a/nevergrad/optimization/lama/EnhancedStrategyDE.py b/nevergrad/optimization/lama/EnhancedStrategyDE.py new file mode 100644 index 000000000..c98c77916 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedStrategyDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class EnhancedStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_range=0.3, CR=0.9, strategy="adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically + if self.strategy == "adaptive": + idxs = np.argsort(fitness)[:2] # Select two best for breeding + base = population[idxs[np.random.randint(2)]] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..036d6c0a7 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimization.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.9, + max_step=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV2.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV2.py new file mode 100644 index 000000000..831ecbadd --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV2.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimizationV2: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.9, + max_step=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV3.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV3.py new file mode 100644 index 000000000..9a917e12c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV3.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimizationV3: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.95, + max_step=0.3, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV4.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV4.py new file mode 100644 index 000000000..5e4ba1610 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV4.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimizationV4: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.95, + max_step=0.3, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV5.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV5.py new file mode 100644 index 000000000..a54efbaad --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV5.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimizationV5: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.95, + max_step=0.3, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV6.py b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV6.py new file mode 100644 index 000000000..5f8c8554c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperDynamicQuantumSwarmOptimizationV6.py @@ -0,0 +1,95 @@ +import numpy as np + + +class EnhancedSuperDynamicQuantumSwarmOptimizationV6: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.95, + max_step=0.3, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/EnhancedSuperRefinedRAMEDS.py b/nevergrad/optimization/lama/EnhancedSuperRefinedRAMEDS.py new file mode 100644 index 000000000..e8e4da1e0 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperRefinedRAMEDS.py @@ -0,0 +1,88 @@ +import numpy as np + + +class EnhancedSuperRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + base_crossover=0.7, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.base_crossover = base_crossover # Base level for crossover probability + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor using a logistic function + progress = evaluations / self.budget + F = self.F_min + (self.F_max - self.F_min) * progress + + # Adapt crossover rate based on population fitness variance + fitness_variance = np.var(fitness) + crossover_rate = self.base_crossover + (1.0 - self.base_crossover) * ( + 1 - np.exp(-fitness_variance) + ) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10.py new file mode 100644 index 000000000..4ef995822 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / ( + 2.0 * self.budget + ) # Refined cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27.py new file mode 100644 index 000000000..68f90593d --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27.py @@ -0,0 +1,107 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 # Increased velocity limit for exploration + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) # Increased local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.7 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.15, social_weight + 0.15 # Increased parameters update + else: + return cognitive_weight - 0.1, social_weight - 0.1 + + def adapt_parameters(self, func): + cognitive_weight = 2.0 + social_weight = 2.5 + + for _ in range(self.adaptive_iters): + cognitive_weight, social_weight = self.__call__(func, cognitive_weight, social_weight) + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.7 * r3 * (global_best_pos - particles_pos[i]) # Increased acceleration factor + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return cognitive_weight, social_weight diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6.py new file mode 100644 index 000000000..d1ff0e997 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.4 * t / self.budget # Adjusted inertia weight update for better convergence + + def update_parameters(self, t): + return 1.6 - 1.4 * t / (1.6 * self.budget), 2.0 - 1.6 * t / ( + 1.6 * self.budget + ) # Refined cognitive and social weights update for improved exploration + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7.py new file mode 100644 index 000000000..9da0e8124 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.4 - 0.3 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 1.8 - 1.5 * t / (1.8 * self.budget), 2.2 - 1.8 * t / ( + 1.8 * self.budget + ) # Refined cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.3 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8.py new file mode 100644 index 000000000..1e9556f00 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.4 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 2.0 - 1.6 * t / (2.0 * self.budget), 2.5 - 2.0 * t / ( + 2.0 * self.budget + ) # Refined cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9.py b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9.py new file mode 100644 index 000000000..46a3e8720 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9.py @@ -0,0 +1,86 @@ +import numpy as np + + +class EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9: + def __init__(self, budget=1000, num_particles=30, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.45 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 2.0 - 1.7 * t / (2.0 * self.budget), 2.5 - 2.1 * t / ( + 2.0 * self.budget + ) # Refined cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSuperiorUltimateGuidedMassQGSA_v80.py b/nevergrad/optimization/lama/EnhancedSuperiorUltimateGuidedMassQGSA_v80.py new file mode 100644 index 000000000..9313ddbc6 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSuperiorUltimateGuidedMassQGSA_v80.py @@ -0,0 +1,124 @@ +import numpy as np + + +class EnhancedSuperiorUltimateGuidedMassQGSA_v80: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_superior_ultimate_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_superior_ultimate_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedSupremeDynamicPrecisionOptimizerV1.py b/nevergrad/optimization/lama/EnhancedSupremeDynamicPrecisionOptimizerV1.py new file mode 100644 index 000000000..3a3cd9399 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSupremeDynamicPrecisionOptimizerV1.py @@ -0,0 +1,62 @@ +import numpy as np + + +class EnhancedSupremeDynamicPrecisionOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and adaptive cooling settings + T = 1.2 # Higher initial temperature for more aggressive initial exploration + T_min = 0.0001 # Lower minimum temperature to enable finer search in later stages + alpha = 0.90 # Adjusted cooling rate for an extended search period with finer transitions + + # Mutation and crossover parameters optimized for dynamic adjustments + F = 0.78 # Mutation factor with moderate aggressiveness + CR = 0.88 # High crossover probability to ensure substantial solution mixing + + population_size = ( + 85 # Fine-tuned population size to maintain an efficient exploration-exploitation trade-off + ) + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Integrate a dynamic mutation factor influenced by both temperature and progression + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.75 + 0.25 * np.tanh(3.5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhance acceptance criteria with a more dynamic temperature-dependent probability + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling with a modulation that adapts to the search phase + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/EnhancedSwarmHybridOptimization.py b/nevergrad/optimization/lama/EnhancedSwarmHybridOptimization.py new file mode 100644 index 000000000..4b8a0e1e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedSwarmHybridOptimization.py @@ -0,0 +1,130 @@ +import numpy as np + + +class EnhancedSwarmHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.6 # Inertia weight, slightly reduced for better exploration + + # Adaptive Learning Rate parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Enhanced Hybrid loop + fitness_history = [] + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient descent + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adaptive learning rate strategy with more robust adjustment + if i > 0 and len(fitness_history) > 0: + recent_improvement = np.mean(np.diff(fitness_history[-5:])) + if recent_improvement < 0: + alpha = min( + alpha * 1.1, 1.0 + ) # Increase learning rate more aggressively if recent improvement + else: + alpha = max( + alpha * 0.5, 0.01 + ) # Decrease learning rate more conservatively if no recent improvement + + fitness_history.append(f) + prev_f = f + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = EnhancedSwarmHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnhancedTwoPhaseDynamicStrategyV39.py b/nevergrad/optimization/lama/EnhancedTwoPhaseDynamicStrategyV39.py new file mode 100644 index 000000000..83cb2922b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedTwoPhaseDynamicStrategyV39.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnhancedTwoPhaseDynamicStrategyV39: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Simple DE mutation strategy + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # More aggressive inter-vector interaction + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adjust_parameters(self, iteration, total_iterations): + # Use advanced sigmoid-based dynamic adjustment for a smoother parameter transition + scale = iteration / total_iterations + self.F = np.clip(0.8 - 0.7 * np.tanh(10 * (scale - 0.5)), 0.1, 0.8) + self.CR = np.clip(0.9 * (1 - np.sin(np.pi * scale)), 0.2, 0.9) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + iteration += 1 + + return fitnesses[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..a85868bb2 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class EnhancedUltimateDynamicFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.15, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithmImproved.py b/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithmImproved.py new file mode 100644 index 000000000..4935a2a84 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateDynamicFireworkAlgorithmImproved.py @@ -0,0 +1,101 @@ +import numpy as np + + +class EnhancedUltimateDynamicFireworkAlgorithmImproved: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=2000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.8, + mutation_rate=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + else: + self.alpha[k] *= 1.05 # Increase alpha + self.beta[k] *= 0.95 # Decrease beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltimateEvolutionaryGradientOptimizerV36.py b/nevergrad/optimization/lama/EnhancedUltimateEvolutionaryGradientOptimizerV36.py new file mode 100644 index 000000000..9a12fc653 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateEvolutionaryGradientOptimizerV36.py @@ -0,0 +1,80 @@ +import numpy as np + + +class EnhancedUltimateEvolutionaryGradientOptimizerV36: + def __init__( + self, + budget=10000, + population_size=160, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased base mutation factor to enhance exploration + self.F_range = F_range # Narrowed range to stabilize mutation effects + self.CR = CR # Slightly reduced crossover frequency to balance exploration with exploitation + self.elite_fraction = ( + elite_fraction # Increased elite fraction to leverage a larger base of good solutions + ) + self.mutation_strategy = mutation_strategy # 'adaptive' for dynamic base selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.82: # Increased focus on exploiting the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py new file mode 100644 index 000000000..d55920b5c --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP.py @@ -0,0 +1,84 @@ +import numpy as np + + +class EnhancedUltimateRefinedAQAPSO_LS_DIW_AP: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.7 - 0.5 * t / self.budget # Improved inertia weight update + + def update_parameters(self, t): + return 1.5 - 1.0 * t / (1.5 * self.budget), 2.0 - 1.2 * t / ( + 1.5 * self.budget + ) # Adjusted cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined.py b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined.py new file mode 100644 index 000000000..28e4a3428 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined.py @@ -0,0 +1,93 @@ +import numpy as np + + +class EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.7 - 0.4 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 1.5 - 1.2 * t / (1.5 * self.budget), 2.0 - 1.5 * t / ( + 1.5 * self.budget + ) # Refined cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + if t % 100 == 0: + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Explore the search space more intensively + for i in range(self.num_particles): + r4 = np.random.rand(self.dim) + particles_pos[i] = (1 - r4) * particles_pos[i] + r4 * global_best_pos + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2.py b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2.py new file mode 100644 index 000000000..c5d3f3473 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.6 - 0.3 * t / self.budget # Adjusted inertia weight update for faster convergence + + def update_parameters(self, t): + return 1.8 - 1.4 * t / (1.5 * self.budget), 2.2 - 1.6 * t / ( + 1.5 * self.budget + ) # Refined cognitive and social weights update for better exploration + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3.py b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3.py new file mode 100644 index 000000000..f41d7be6b --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3.py @@ -0,0 +1,85 @@ +import numpy as np + + +class EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.5 - 0.4 * t / self.budget # Adjusted inertia weight update for faster convergence + + def update_parameters(self, t): + return 1.5 - 1.3 * t / (1.5 * self.budget), 2.0 - 1.5 * t / ( + 1.5 * self.budget + ) # Refined cognitive and social weights update for better exploration + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.5 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44.py b/nevergrad/optimization/lama/EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44.py new file mode 100644 index 000000000..6cd787e22 --- /dev/null +++ b/nevergrad/optimization/lama/EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44.py @@ -0,0 +1,78 @@ +import numpy as np + + +class EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.98, + elite_fraction=0.09, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased mutation base + self.F_range = F_range # Narrowed mutation range for more precise adjustments + self.CR = CR # Increased crossover probability for tighter exploration + self.elite_fraction = elite_fraction # Slightly decreased elite fraction for more diversity + self.mutation_strategy = mutation_strategy # Retain adaptive mutation strategy with enhancements + self.dim = 5 # Dimensionality of the problem remains constant + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on a dynamic probability that focuses more on local search + if np.random.rand() < 0.8: # Increased probability to emphasize elite influence + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjusted mutation factor + F = self.F_base + (np.random.rand() * self.F_range) + + # DE/rand/1 mutation strategy with dynamic mutation factor + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with a high crossover rate + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness evaluation + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exhaustion of budget check + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EnsembleAdaptiveEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/EnsembleAdaptiveEvolutionaryAlgorithm.py new file mode 100644 index 000000000..f314c5476 --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleAdaptiveEvolutionaryAlgorithm.py @@ -0,0 +1,73 @@ +import numpy as np + + +class EnsembleAdaptiveEvolutionaryAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation and Crossover using Differential Evolution + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < CR[i] + if not np.any(crossover_points): + crossover_points[np.random.randint(0, self.dim)] = True + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + F[i] = F[i] + 0.1 * (np.random.rand() - 0.5) + F[i] = np.clip(F[i], 0.5, 1.0) + CR[i] = CR[i] + 0.1 * (np.random.rand() - 0.5) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Periodically introduce new random solutions (to avoid local optima) + if evaluations % (population_size // 2) == 0: + new_population = np.random.uniform(self.lb, self.ub, (population_size // 5, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += len(new_fitness) + + # Replace worst individuals with new random individuals + worst_indices = fitness.argsort()[-(population_size // 5) :] + population[worst_indices] = new_population + fitness[worst_indices] = new_fitness + + # Reinitialize strategy parameters for new individuals + F[worst_indices] = np.random.uniform(0.5, 1.0, population_size // 5) + CR[worst_indices] = np.random.uniform(0.1, 0.9, population_size // 5) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnsembleAdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/EnsembleAdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..faca469e1 --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleAdaptiveMemeticOptimizer.py @@ -0,0 +1,146 @@ +import numpy as np +from scipy.optimize import minimize + + +class EnsembleAdaptiveMemeticOptimizer: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.5 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 7 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (self.budget - eval_count) / self.budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + else: + new_population.append(trial) + if neighbor_fitness < best_fitness: + best_individual = trial + best_fitness = neighbor_fitness + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = EnsembleAdaptiveMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/EnsembleAdaptiveQuantumDE.py b/nevergrad/optimization/lama/EnsembleAdaptiveQuantumDE.py new file mode 100644 index 000000000..fbc0c0cff --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleAdaptiveQuantumDE.py @@ -0,0 +1,130 @@ +import numpy as np + + +class EnsembleAdaptiveQuantumDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, local_search_steps=100): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/EnsembleDE.py b/nevergrad/optimization/lama/EnsembleDE.py new file mode 100644 index 000000000..3244372fb --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleDE.py @@ -0,0 +1,94 @@ +import numpy as np + + +class EnsembleDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Update archive + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + # Combine elite and new population + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Increment generation count + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnsembleEvolutionaryCulturalSearch.py b/nevergrad/optimization/lama/EnsembleEvolutionaryCulturalSearch.py new file mode 100644 index 000000000..3c2f3cbe9 --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleEvolutionaryCulturalSearch.py @@ -0,0 +1,117 @@ +import numpy as np + + +class EnsembleEvolutionaryCulturalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: # More infrequent updates + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnsembleHybridSearch.py b/nevergrad/optimization/lama/EnsembleHybridSearch.py new file mode 100644 index 000000000..ff226c4bf --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleHybridSearch.py @@ -0,0 +1,90 @@ +import numpy as np + + +class EnsembleHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 30 + differential_weight = 0.8 + crossover_rate = 0.9 + inertia_weight = 0.7 + cognitive_coefficient = 1.5 + social_coefficient = 1.5 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + evaluations = population_size + + while evaluations < self.budget: + for i in range(population_size): + # Particle Swarm Optimization Part + inertia = inertia_weight * velocity[i] + cognitive = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_positions[i] - population[i]) + ) + social = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive + social + population[i] = np.clip(population[i] + velocity[i], self.lb, self.ub) + + # Differential Evolution Part + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = EnsembleHybridSearch(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/EnsembleMemeticAlgorithm.py b/nevergrad/optimization/lama/EnsembleMemeticAlgorithm.py new file mode 100644 index 000000000..6c8cbe5fb --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleMemeticAlgorithm.py @@ -0,0 +1,104 @@ +import numpy as np + + +class EnsembleMemeticAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step(func, pop, scores, crossover_rates, mutation_factors) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EnsembleMutationAdaptiveDE.py b/nevergrad/optimization/lama/EnsembleMutationAdaptiveDE.py new file mode 100644 index 000000000..11436f843 --- /dev/null +++ b/nevergrad/optimization/lama/EnsembleMutationAdaptiveDE.py @@ -0,0 +1,141 @@ +import numpy as np + + +class EnsembleMutationAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution with ensemble mutations + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + trial = self.ensemble_mutation(pop, elite_pop, mutation_factor, lower_bound, upper_bound) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.simulated_annealing_local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def ensemble_mutation(self, pop, elite_pop, mutation_factor, lower_bound, upper_bound): + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(len(elite_pop), 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + if np.random.rand() < 0.5: + mutant = x1 + mutation_factor * (x2 - x3) + else: + mutant = x1 + mutation_factor * (x2 - np.random.uniform(lower_bound, upper_bound, self.dim)) + + mutant = np.clip(mutant, lower_bound, upper_bound) + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[np.random.randint(self.pop_size)]) + + return trial + + def simulated_annealing_local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + temp = 1.0 + cooling_rate = 0.99 + + while temp > 1e-3 and self.budget > 0: + perturbation = np.random.uniform(-0.02, 0.02, self.dim) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 + + if new_f < best_f or np.exp((best_f - new_f) / temp) > np.random.rand(): + best_x = new_x + best_f = new_f + + temp *= cooling_rate + + return best_x diff --git a/nevergrad/optimization/lama/EntropyEnhancedAdaptiveStrategyV61.py b/nevergrad/optimization/lama/EntropyEnhancedAdaptiveStrategyV61.py new file mode 100644 index 000000000..4c0dc3e0b --- /dev/null +++ b/nevergrad/optimization/lama/EntropyEnhancedAdaptiveStrategyV61.py @@ -0,0 +1,81 @@ +import numpy as np + + +class EntropyEnhancedAdaptiveStrategyV61: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.entropy_threshold = 0.05 # Threshold to adjust mutation rates + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def calculate_population_entropy(self, population): + norm_pop = (population - self.lower_bounds) / (self.upper_bounds - self.lower_bounds) + digitized = np.digitize(norm_pop, bins=np.linspace(0, 1, 10)) + hist = [np.histogram(digitized[:, i], bins=10, range=(1, 10))[0] for i in range(self.dimension)] + hist = np.array(hist) + 1 # Avoid log(0) + probs = hist / np.sum(hist, axis=1, keepdims=True) + entropy = -np.sum(probs * np.log(probs), axis=1) + return np.mean(entropy) + + def adjust_parameters(self, population): + entropy = self.calculate_population_entropy(population) + if entropy < self.entropy_threshold: + self.F = np.clip(self.F * 0.9, 0.1, 1) # Decrease mutation rate if diversity is low + self.CR = np.clip(self.CR * 1.1, 0.1, 1) # Increase crossover rate to introduce more diversity + else: + self.F = np.clip(self.F * 1.1, 0.1, 1) + self.CR = np.clip(self.CR * 0.9, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + self.adjust_parameters(population) + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/EvolutionaryConvergenceSpiralSearch.py b/nevergrad/optimization/lama/EvolutionaryConvergenceSpiralSearch.py new file mode 100644 index 000000000..08b7fb4c8 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryConvergenceSpiralSearch.py @@ -0,0 +1,61 @@ +import numpy as np + + +class EvolutionaryConvergenceSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize the centroid and search parameters + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Initial radius + angle_increment = np.pi / 16 # Initial angle increment for exploration + + # Adaptive decay rates for flexibility + radius_decay = 0.98 # Radius decay rate + angle_refinement = 0.98 # Angle refinement rate + evaluations_left = self.budget + min_radius = 0.0005 # Extremely fine minimum radius for detailed exploration + + # Evolutionary adaptations + population_size = 15 # Number of points to consider around the centroid + offspring_size = 10 # Number of new points generated from mutations + mutation_rate = 0.1 # Mutation rate for generating offspring + + while evaluations_left > 0: + # Generate a population around the centroid + population = np.array( + [centroid + radius * np.random.uniform(-1, 1, self.dim) for _ in range(population_size)] + ) + population = np.clip(population, -5.0, 5.0) # Enforce bounds + fitness = np.array([func(ind) for ind in population]) + evaluations_left -= population_size + + # Select the best individual + best_idx = np.argmin(fitness) + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Generate offspring by mutation + offspring = np.array( + [ + population[best_idx] + mutation_rate * np.random.normal(0, radius, self.dim) + for _ in range(offspring_size) + ] + ) + offspring = np.clip(offspring, -5.0, 5.0) # Enforce bounds + offspring_fitness = np.array([func(ind) for ind in offspring]) + evaluations_left -= offspring_size + + # Update centroid and search parameters + centroid = offspring[np.argmin(offspring_fitness)] + radius *= radius_decay + radius = max(radius, min_radius) + angle_increment *= angle_refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EvolutionaryDynamicGradientSearch.py b/nevergrad/optimization/lama/EvolutionaryDynamicGradientSearch.py new file mode 100644 index 000000000..b35d0c1b2 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryDynamicGradientSearch.py @@ -0,0 +1,102 @@ +import numpy as np + + +class EvolutionaryDynamicGradientSearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.crossover_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.learning_rate = 0.01 * np.exp(-iteration / max_iterations) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = (self.budget // self.population_size) * 2 + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step + pop, scores = self.local_search(func, pop, scores) + evaluations += self.population_size + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizer.py b/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizer.py new file mode 100644 index 000000000..6a162196b --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EvolutionaryGradientHybridOptimizer: + def __init__( + self, budget=10000, population_size=50, F_base=0.6, F_increment=0.2, CR=0.9, elite_fraction=0.2 + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_increment = F_increment # Increment for the mutation factor F + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while evaluations < self.budget: + F = self.F_base + np.sin(evaluations / self.budget * np.pi) * self.F_increment + + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Choose three random indices excluding current index i + candidates = np.random.choice(elite_indices, 3, replace=False) + x1, x2, x3 = population[candidates[0]], population[candidates[1]], population[candidates[2]] + + # Mutation (DE/rand/1/bin) + mutant = x1 + F * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial.copy() + + # Check budget + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizerV2.py b/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizerV2.py new file mode 100644 index 000000000..1e12f79e0 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryGradientHybridOptimizerV2.py @@ -0,0 +1,76 @@ +import numpy as np + + +class EvolutionaryGradientHybridOptimizerV2: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.55, + F_range=0.35, + CR=0.88, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/EvolutionaryGradientSearch.py b/nevergrad/optimization/lama/EvolutionaryGradientSearch.py new file mode 100644 index 000000000..1f2183506 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryGradientSearch.py @@ -0,0 +1,97 @@ +import numpy as np + + +class EvolutionaryGradientSearch: + def __init__(self, budget, population_size=50, mutation_rate=0.1, crossover_rate=0.7, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize the population + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(scores) + global_best_position = population[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + # Selection: Tournament selection + selected = [] + for _ in range(self.population_size): + i, j = np.random.randint(0, self.population_size, 2) + if scores[i] < scores[j]: + selected.append(population[i]) + else: + selected.append(population[j]) + selected = np.array(selected) + + # Crossover: Blend Crossover (BLX-α) + offspring = [] + for i in range(0, self.population_size, 2): + if i + 1 >= self.population_size: + break + parent1, parent2 = selected[i], selected[i + 1] + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-0.5, 1.5, dim) + child1 = alpha * parent1 + (1 - alpha) * parent2 + child2 = alpha * parent2 + (1 - alpha) * parent1 + else: + child1, child2 = parent1, parent2 + offspring.extend([child1, child2]) + offspring = np.array(offspring[: self.population_size]) + + # Mutation: Gaussian mutation + for i in range(self.population_size): + if np.random.rand() < self.mutation_rate: + offspring[i] += np.random.normal(0, 0.1, dim) + offspring[i] = np.clip(offspring[i], lower_bound, upper_bound) + + # Gradient-based local search + for i in range(self.population_size): + grad = self.gradient_estimation(func, offspring[i]) + offspring[i] = np.clip(offspring[i] - self.learning_rate * grad, lower_bound, upper_bound) + + # Evaluate offspring + offspring_scores = np.array([func(ind) for ind in offspring]) + evaluations += self.population_size + + # Elitism: Preserve the best solution + if global_best_score < np.min(offspring_scores): + worst_idx = np.argmax(offspring_scores) + offspring[worst_idx] = global_best_position + offspring_scores[worst_idx] = global_best_score + + # Update population and scores + population, scores = offspring, offspring_scores + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = population[best_idx] + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EvolutionaryHarmonicFireworkAlgorithm.py b/nevergrad/optimization/lama/EvolutionaryHarmonicFireworkAlgorithm.py new file mode 100644 index 000000000..9b4598346 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryHarmonicFireworkAlgorithm.py @@ -0,0 +1,65 @@ +import numpy as np + + +class EvolutionaryHarmonicFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=30, n_sparks=10, scale_factor=0.1): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.scale_factor = scale_factor + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform( + firework - self.scale_factor, firework + self.scale_factor, (self.n_sparks, self.dim) + ) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, 1, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + 0.5 * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for _ in range(self.budget): + fireworks = self.evolve_fireworks(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/EvolutionaryParticleSwarmOptimizer.py b/nevergrad/optimization/lama/EvolutionaryParticleSwarmOptimizer.py new file mode 100644 index 000000000..db10d0268 --- /dev/null +++ b/nevergrad/optimization/lama/EvolutionaryParticleSwarmOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class EvolutionaryParticleSwarmOptimizer: + def __init__( + self, + budget, + swarm_size=20, + inertia_weight=0.7, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.2, + mutation_rate=0.05, + ): + self.budget = budget + self.swarm_size = swarm_size + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity): + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def __call__(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.budget): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity + ) + personal_best[i] = np.where( + func(swarm[i]) < func(personal_best[i]), swarm[i], personal_best[i] + ) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best diff --git a/nevergrad/optimization/lama/ExDADe.py b/nevergrad/optimization/lama/ExDADe.py new file mode 100644 index 000000000..1375467d3 --- /dev/null +++ b/nevergrad/optimization/lama/ExDADe.py @@ -0,0 +1,73 @@ +import numpy as np + + +class ExDADe: + def __init__(self, budget, population_size=20, F_base=0.8, CR_base=0.7, epsilon=1e-10): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + self.epsilon = epsilon + + def __call__(self, func): + # Initialize population within the bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Tracking the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Continued adaptation mechanism for mutation and crossover parameters + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Enhanced adaptation for F and CR based on progress and diversity + progress = num_evals / self.budget + diversity = np.std(population).max() + self.epsilon + F = self.F_base * (0.5 + np.random.rand()) * diversity + CR = self.CR_base * (1 - np.exp(-3 * progress)) + + # Mutation: "current-to-best/1" + j_rand = np.random.randint(self.dimension) + mutant = ( + population[i] + + F * (best_individual - population[i]) + + F + * ( + population[np.random.randint(self.population_size)] + - population[np.random.randint(self.population_size)] + ) + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover: binomial + trial = np.where(np.random.rand(self.dimension) < CR, mutant, population[i]) + trial[j_rand] = mutant[j_rand] # Ensuring at least one dimension comes from mutant + + # Evaluate the trial solution + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/FEDE.py b/nevergrad/optimization/lama/FEDE.py new file mode 100644 index 000000000..34d8f6ce2 --- /dev/null +++ b/nevergrad/optimization/lama/FEDE.py @@ -0,0 +1,64 @@ +import numpy as np + + +class FEDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.elite_size = 5 # Number of best solutions to consider for feedback + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_solutions): + mutants = np.empty_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx not in best_solutions] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.mutation_factor * (population[b] - population[c]) + mutants[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return mutants + + def crossover(self, target, mutant): + mask = np.random.rand(self.dimension) < self.crossover_probability + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + for i in range(self.population_size): + trial = self.crossover(population[i], mutants[i]) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + return new_population, new_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + + while evaluations < self.budget: + sorted_indices = np.argsort(fitness) + best_solutions = sorted_indices[: self.elite_size] + mutants = self.mutate(population, best_solutions) + population, fitness = self.select(population, fitness, mutants, func) + evaluations += self.population_size + + # Adapt mutation factor and crossover probability dynamically + self.mutation_factor = np.clip(self.mutation_factor * 0.99, 0.1, 1.0) + self.crossover_probability = np.clip(self.crossover_probability * 1.01, 0.1, 1.0) + + best_index = np.argmin(fitness) + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/FTADEEM.py b/nevergrad/optimization/lama/FTADEEM.py new file mode 100644 index 000000000..1305eb171 --- /dev/null +++ b/nevergrad/optimization/lama/FTADEEM.py @@ -0,0 +1,63 @@ +import numpy as np + + +class FTADEEM: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.5, alpha=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.alpha = alpha # Rate of adaptive adjustment + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + F = self.F_base + self.alpha * np.random.randn() # Add small noise for diversification + CR = self.CR_base + self.alpha * np.random.randn() + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation strategy using "DE/rand/1/bin" + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py b/nevergrad/optimization/lama/FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py new file mode 100644 index 000000000..9e0c9542a --- /dev/null +++ b/nevergrad/optimization/lama/FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch.py @@ -0,0 +1,96 @@ +import numpy as np +from scipy.optimize import minimize + + +class FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_trials=5, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_trials = local_search_trials + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_trials}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FinalEnhancedDynamicLocalSearchFireworkAlgorithm.py b/nevergrad/optimization/lama/FinalEnhancedDynamicLocalSearchFireworkAlgorithm.py new file mode 100644 index 000000000..f70e23c6f --- /dev/null +++ b/nevergrad/optimization/lama/FinalEnhancedDynamicLocalSearchFireworkAlgorithm.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.optimize import minimize + + +class FinalEnhancedDynamicLocalSearchFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py b/nevergrad/optimization/lama/FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py new file mode 100644 index 000000000..e3ee8df78 --- /dev/null +++ b/nevergrad/optimization/lama/FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py @@ -0,0 +1,112 @@ +import numpy as np +from scipy.optimize import minimize + + +class FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def adaptive_local_search(self, func): + improved = False + for i in range(self.population_size): + current_fitness = func(self.fireworks[i][0]) + new_individual = self.local_search(self.fireworks[i][0], func) + new_fitness = func(new_individual) + if new_fitness < current_fitness: + self.fireworks[i] = (np.copy(new_individual), 0) + improved = True + + return improved + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + if func(x) < self.best_fitness: + self.best_individual = np.copy(x) + self.best_fitness = func(x) + + improved = self.adaptive_local_search(func) + if improved: + self.run_firework_algorithm(func) + + def __call__(self, func): + self.run_firework_algorithm(func) + + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FinalEnhancedRefinedUltimateGuidedMassQGSA_v75.py b/nevergrad/optimization/lama/FinalEnhancedRefinedUltimateGuidedMassQGSA_v75.py new file mode 100644 index 000000000..12d3246da --- /dev/null +++ b/nevergrad/optimization/lama/FinalEnhancedRefinedUltimateGuidedMassQGSA_v75.py @@ -0,0 +1,124 @@ +import numpy as np + + +class FinalEnhancedRefinedUltimateGuidedMassQGSA_v75: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_final_enhanced_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_final_enhanced_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..e249f8fdd --- /dev/null +++ b/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithm.py @@ -0,0 +1,91 @@ +import numpy as np + + +class FinalOptimizedEnhancedDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined.py b/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined.py new file mode 100644 index 000000000..8c085cae8 --- /dev/null +++ b/nevergrad/optimization/lama/FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined.py @@ -0,0 +1,91 @@ +import numpy as np + + +class FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FineTunedCohortDiversityOptimizer.py b/nevergrad/optimization/lama/FineTunedCohortDiversityOptimizer.py new file mode 100644 index 000000000..b919412c0 --- /dev/null +++ b/nevergrad/optimization/lama/FineTunedCohortDiversityOptimizer.py @@ -0,0 +1,71 @@ +import numpy as np + + +class FineTunedCohortDiversityOptimizer: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_intensity=0.05, + recombination_prob=0.8, + adaptation_rate=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.recombination_prob = recombination_prob + self.adaptation_rate = adaptation_rate + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + + for i in range(self.population_size): + # Selecting parents using elite indices + if np.random.rand() < self.recombination_prob: + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices[0]], population[parents_indices[1]] + mask = np.random.rand(self.dimension) < 0.5 + child = np.where(mask, parent1, parent2) + else: + child = population[ + np.random.choice(elite_indices) + ].copy() # Inherit directly from a single elite + + # Mutation: perturb the offspring + mutation = np.random.normal(scale=self.mutation_intensity, size=self.dimension) + child = np.clip(child + mutation, -5.0, 5.0) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + # Adaptively update mutation intensity + self.mutation_intensity *= self.adaptation_rate + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/FineTunedFocusedAdaptiveOptimizer.py b/nevergrad/optimization/lama/FineTunedFocusedAdaptiveOptimizer.py new file mode 100644 index 000000000..23a024c4f --- /dev/null +++ b/nevergrad/optimization/lama/FineTunedFocusedAdaptiveOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class FineTunedFocusedAdaptiveOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=100): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.9 # Slightly increased global influence + self.local_influence = 0.1 # Decreased local influence for more focus on global best + self.vel_scale = 0.05 # Reduced velocity scaling for more fine-tuned adjustments + self.learning_rate = 0.6 # Learning rate optimized for faster convergence + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = ( + np.random.uniform(-1, 1, (self.particles, self.dimension)) + * (self.bounds[1] - self.bounds[0]) + * 0.1 + ) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + self.learning_rate * r1 * (personal_best_positions[i] - positions[i]) + + self.learning_rate * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/FineTunedProgressiveAdaptiveSearch.py b/nevergrad/optimization/lama/FineTunedProgressiveAdaptiveSearch.py new file mode 100644 index 000000000..f4b016294 --- /dev/null +++ b/nevergrad/optimization/lama/FineTunedProgressiveAdaptiveSearch.py @@ -0,0 +1,80 @@ +import numpy as np + + +class FineTunedProgressiveAdaptiveSearch: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=350): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.8 # Reduced global influence to promote more refined local exploration + self.local_influence = 0.2 # Increased local influence for better local search capabilities + self.vel_scale = 0.1 # Fine-tuned velocity scaling for more aggressive movements + self.learning_rate = 0.6 # Adjusted learning rate for careful adaptation + self.adaptive_rate = 0.02 # Slightly reduced to maintain stability in convergence + self.exploration_phase = 0.25 # A designated proportion of the budget to exploration + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = ( + np.random.uniform(-1, 1, (self.particles, self.dimension)) + * (self.bounds[1] - self.bounds[0]) + * self.vel_scale + ) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + exploration_budget = int(self.budget * self.exploration_phase) + + while evaluations < self.budget: + for i in range(self.particles): + if evaluations < exploration_budget: + current_global_influence = self.global_influence + current_local_influence = self.local_influence + else: + # Enhance local search as optimization progresses + current_global_influence = self.global_influence * (1 - (evaluations / self.budget)) + current_local_influence = self.local_influence + (evaluations / self.budget) * 0.3 + + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + current_global_influence * r1 * (personal_best_positions[i] - positions[i]) + + current_local_influence * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/FocusedBalancedAdaptivePSO.py b/nevergrad/optimization/lama/FocusedBalancedAdaptivePSO.py new file mode 100644 index 000000000..bf9da8dd9 --- /dev/null +++ b/nevergrad/optimization/lama/FocusedBalancedAdaptivePSO.py @@ -0,0 +1,78 @@ +import numpy as np + + +class FocusedBalancedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_initial=0.95, + omega_final=0.2, + phi_p=0.15, + phi_g=0.45, + adaptive_depth=3, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal best influence factor + self.phi_g = phi_g # Global best influence factor + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.adaptive_depth = adaptive_depth # Depth of adaptive adjustment based on recent performance + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + # Adaptive inertia adjustment based on performance trend + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.adaptive_depth :] + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + if len(scores) > 1 and np.std(scores) < 0.01: + # Intensify exploitation by reducing inertia when little improvement is seen + return max(self.omega_final, self.omega_initial - (evaluation_counter / self.budget) * 1.5) + else: + # Regular update rule + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) diff --git a/nevergrad/optimization/lama/FocusedEvolutionStrategy.py b/nevergrad/optimization/lama/FocusedEvolutionStrategy.py new file mode 100644 index 000000000..ddd665af1 --- /dev/null +++ b/nevergrad/optimization/lama/FocusedEvolutionStrategy.py @@ -0,0 +1,60 @@ +import numpy as np + + +class FocusedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial populations and parameters + population_size = 100 + sigma = 0.5 + elite_size = max(1, int(population_size * 0.05)) + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + for _ in range(int(self.budget / population_size)): + # Elitism: keep the best solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population = population[elite_indices].copy() + new_fitness = fitness[elite_indices].copy() + + # Generate new population based on best solutions + for i in range(elite_size, population_size): + # Select parent from elite randomly + parent_index = np.random.choice(elite_indices) + parent = population[parent_index] + + # Apply Gaussian mutation + offspring = parent + np.random.normal(0, sigma, self.dim) + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + offspring_fitness = func(offspring) + + # Replace if better + if offspring_fitness < fitness[parent_index]: + new_population = np.vstack([new_population, offspring]) + new_fitness = np.append(new_fitness, offspring_fitness) + else: + new_population = np.vstack([new_population, parent]) + new_fitness = np.append(new_fitness, fitness[parent_index]) + + # Update population + population = new_population + fitness = new_fitness + + # Update the best solution found + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/FractionalOrderClusterHybridOptimization.py b/nevergrad/optimization/lama/FractionalOrderClusterHybridOptimization.py new file mode 100644 index 000000000..2ca9d61db --- /dev/null +++ b/nevergrad/optimization/lama/FractionalOrderClusterHybridOptimization.py @@ -0,0 +1,118 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class FractionalOrderClusterHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def fractional_order_velocity_update(self, velocity, order=0.5): + return np.sign(velocity) * (np.abs(velocity) ** order) + + def __call__(self, func): + population_size = 80 # Increased population size for diversity + + # Enhanced Initialization using Sobol Sequence + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size) + population = qmc.scale(sample, self.lb, self.ub) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + last_improvement = 0 + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations - last_improvement > self.budget // 10: + strategy = "DE" # Switch to DE if no improvement for a while + else: + strategy = "PSO" + + if strategy == "PSO": + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * self.fractional_order_velocity_update(velocity[i]) + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy with Enhanced Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + scaling_factor = 0.5 + np.random.rand() * 0.5 + mutant_vector = np.clip(a + scaling_factor * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + last_improvement = evaluations + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/FurtherEnhancedHybridMetaHeuristicOptimizerV13.py b/nevergrad/optimization/lama/FurtherEnhancedHybridMetaHeuristicOptimizerV13.py new file mode 100644 index 000000000..195f56306 --- /dev/null +++ b/nevergrad/optimization/lama/FurtherEnhancedHybridMetaHeuristicOptimizerV13.py @@ -0,0 +1,93 @@ +import numpy as np + + +class FurtherEnhancedHybridMetaHeuristicOptimizerV13: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.6, + social_weight=1.6, + max_velocity=0.8, + mutation_rate=0.1, + num_generations=200, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/GEEA.py b/nevergrad/optimization/lama/GEEA.py new file mode 100644 index 000000000..07d79f615 --- /dev/null +++ b/nevergrad/optimization/lama/GEEA.py @@ -0,0 +1,68 @@ +import numpy as np + + +class GEEA: + def __init__(self, budget, population_size=30, alpha=0.5, beta=0.1, gamma=0.1): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.alpha = alpha # Learning rate for exploration + self.beta = beta # Learning rate for exploitation + self.gamma = gamma # Mutation factor for diversity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + while num_evals < self.budget: + new_population = [] + for i in range(self.population_size): + # Exploration: Select random individuals + indices = np.random.permutation(self.population_size) + x1, x2, x3 = population[indices[:3]] + + # Mutation and crossover + mutant_vector = x1 + self.gamma * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Crossover (binomial) + trial_vector = np.where( + np.random.rand(self.dimension) < self.beta, mutant_vector, population[i] + ) + + # Exploitation: Learning from the best + trial_vector += self.alpha * (best_individual - population[i]) + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Fitness evaluation + trial_fitness = func(trial_vector) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + else: + new_population.append(population[i]) + + if num_evals >= self.budget: + break + + population = np.array(new_population) + + return best_fitness, best_individual + + +# Usage of GEEA: +# optimizer = GEEA(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/GESA.py b/nevergrad/optimization/lama/GESA.py new file mode 100644 index 000000000..5fa8797b0 --- /dev/null +++ b/nevergrad/optimization/lama/GESA.py @@ -0,0 +1,62 @@ +import numpy as np + + +class GESA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.cr = 0.9 # Initial crossover probability + self.f = 0.8 # Initial differential weight + self.initial_temp = 1.0 + self.final_temp = 0.01 + self.alpha = 0.95 # Cooling rate + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, temperature): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = population[best_idx] + self.f * temperature * (x1 - x2 + x3 - population[best_idx]) + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_solution = population[best_idx] + temperature = self.initial_temp + + while evaluations < self.budget: + mutated_population = self.mutate(population, best_idx, temperature) + offspring_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i] or np.random.rand() < np.exp( + (fitness[i] - offspring_fitness[i]) / temperature + ): + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution, best_idx = fitness[i], population[i], i + + temperature *= self.alpha # Cool down + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/GGAES.py b/nevergrad/optimization/lama/GGAES.py new file mode 100644 index 000000000..16e232bd5 --- /dev/null +++ b/nevergrad/optimization/lama/GGAES.py @@ -0,0 +1,75 @@ +import numpy as np + + +class GGAES: + def __init__(self, budget, population_size=150, F_base=0.8, CR_base=0.7, adapt_rate=0.2): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.adapt_rate = adapt_rate # Adaptation rate for parameters + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Adaptive F and CR + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.2, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.2, 1.0) + + gradients = np.zeros_like(population) + gradient_step = 0.1 * (self.ub - self.lb) + + # Estimate gradients for population + for i in range(self.population_size): + for d in range(self.dimension): + plus = population[i].copy() + minus = population[i].copy() + plus[d] += gradient_step + minus[d] -= gradient_step + grad_fitness_plus = func(plus) + grad_fitness_minus = func(minus) + gradients[i, d] = (grad_fitness_plus - grad_fitness_minus) / (2 * gradient_step) + num_evals += 2 + if num_evals >= self.budget: + return best_fitness, best_individual + + # Evolutionary operations + for i in range(self.population_size): + if num_evals >= self.budget: + break + # Mutation with gradient guidance + indices = [idx for idx in range(self.population_size) if idx != i] + a, b = np.random.choice(indices, 2, replace=False) + mutant = population[i] + Fs[i] * ( + best_individual - population[i] + population[a] - population[b] - gradients[i] + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/GIDE.py b/nevergrad/optimization/lama/GIDE.py new file mode 100644 index 000000000..7ddaa9285 --- /dev/null +++ b/nevergrad/optimization/lama/GIDE.py @@ -0,0 +1,71 @@ +import numpy as np + + +class GIDE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Adaptation parameters + mutation_factor = 0.8 + crossover_rate = 0.9 + learning_rate = 0.1 + + while num_evals < self.budget: + # Estimating the gradient based on the best individuals + gradients = np.zeros((population_size, self.dimension)) + elite_idx = np.argsort(fitness)[: population_size // 5] + + for i in elite_idx: + perturb = np.random.normal(0, 0.1, self.dimension) + perturbed_individual = np.clip(population[i] + perturb, self.lower_bound, self.upper_bound) + perturbed_fitness = func(perturbed_individual) + num_evals += 1 + + if num_evals >= self.budget: + break + + gradient_estimate = (perturbed_fitness - fitness[i]) / perturb + gradients[i] = -gradient_estimate + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Mutation + idxs = [idx for idx in range(population_size) if idx != i] + a, b = np.random.choice(idxs, 2, replace=False) + mutant = population[i] + mutation_factor * (population[a] - population[b]) + + # Gradient descent direction update + mutant += learning_rate * gradients[i] + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/GaussianAdaptivePSO.py b/nevergrad/optimization/lama/GaussianAdaptivePSO.py new file mode 100644 index 000000000..33a9f2d10 --- /dev/null +++ b/nevergrad/optimization/lama/GaussianAdaptivePSO.py @@ -0,0 +1,77 @@ +import numpy as np + + +class GaussianAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=300, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.0, + social_weight=2.0, + gradient_weight=0.1, + mutate_prob=0.2, + mutate_scale=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.gradient_weight = gradient_weight + self.mutate_prob = mutate_prob + self.mutate_scale = mutate_scale + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_reduction = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * self.inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.gradient_weight + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component + gradient_component + ) + + if np.random.rand() < self.mutate_prob: + velocities[i] += np.random.normal(0, self.mutate_scale, self.dim) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/GaussianEnhancedAdaptivePSO.py b/nevergrad/optimization/lama/GaussianEnhancedAdaptivePSO.py new file mode 100644 index 000000000..69e1b32a9 --- /dev/null +++ b/nevergrad/optimization/lama/GaussianEnhancedAdaptivePSO.py @@ -0,0 +1,78 @@ +import numpy as np + + +class GaussianEnhancedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.95, + final_inertia=0.35, + cognitive_weight=2.1, + social_weight=2.1, + mutate_prob=0.15, + mutate_scale=0.03, + gradient_weight=0.15, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.mutate_prob = mutate_prob + self.mutate_scale = mutate_scale + self.gradient_weight = gradient_weight + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_reduction = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * self.inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.gradient_weight + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component + gradient_component + ) + + if np.random.rand() < self.mutate_prob: + velocities[i] += np.random.normal(0, self.mutate_scale, self.dim) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/GradientAssistedDifferentialCrossover.py b/nevergrad/optimization/lama/GradientAssistedDifferentialCrossover.py new file mode 100644 index 000000000..4bcafab5c --- /dev/null +++ b/nevergrad/optimization/lama/GradientAssistedDifferentialCrossover.py @@ -0,0 +1,70 @@ +import numpy as np + + +class GradientAssistedDifferentialCrossover: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + elite_size = 10 + mutation_factor = 0.8 + crossover_rate = 0.9 + + # Initialize population and evaluate fitness + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Selection for differential evolution operations + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Mutation: Differential mutation + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Gradient assistance in selection + if np.random.rand() < 0.5: # With a 50% chance, refine using gradient information + grad_direction = trial - population[i] + grad_step = 0.1 * grad_direction # Step size + trial = np.clip(population[i] + grad_step, self.lb, self.ub) + + # Selection: Greedy selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + + # Update best found solution + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Elitism: Carry over a few best solutions directly + elite_indices = np.argsort(fitness)[:elite_size] + non_elite_population = [pop for idx, pop in enumerate(population) if idx not in elite_indices] + population = np.vstack( + (population[elite_indices], non_elite_population[: population_size - elite_size]) + ) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientBalancedEvolutionStrategy.py b/nevergrad/optimization/lama/GradientBalancedEvolutionStrategy.py new file mode 100644 index 000000000..151d96853 --- /dev/null +++ b/nevergrad/optimization/lama/GradientBalancedEvolutionStrategy.py @@ -0,0 +1,101 @@ +import numpy as np + + +class GradientBalancedEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + initial_step_size=1.0, + step_decay=0.95, + elite_ratio=0.2, + mutation_intensity=0.05, + local_search_prob=0.3, + refinement_steps=10, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, current_step_size): + mutation = np.random.normal(0, current_step_size * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual, current_step_size): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, current_step_size * 0.01, self.dimension), + self.bounds[0], + self.bounds[1], + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + current_step_size = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], current_step_size) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: + for idx in range(self.population_size): + if evaluations + self.refinement_steps > self.budget: + break + local_individual, local_fitness = self.local_search( + func, new_population[idx], current_step_size + ) + evaluations += self.refinement_steps + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/GradientBasedAdaptiveCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/GradientBasedAdaptiveCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..381134aee --- /dev/null +++ b/nevergrad/optimization/lama/GradientBasedAdaptiveCovarianceMatrixAdaptation.py @@ -0,0 +1,135 @@ +import numpy as np + + +class GradientBasedAdaptiveCovarianceMatrixAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.3, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.01, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + return x - self.learning_rate * grad + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to the best individual in the population + local_best = self.__gradient_local_search(func, global_best_position) + local_best_score = func(local_best) + evaluations += 1 + + if local_best_score < global_best_score: + global_best_score = local_best_score + global_best_position = local_best + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean_new = np.dot(np.ones(elite_count) / elite_count, elite_pop) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_count + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/GradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..2aca66580 --- /dev/null +++ b/nevergrad/optimization/lama/GradientBoostedMemoryAnnealing.py @@ -0,0 +1,137 @@ +import numpy as np + + +class GradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.98 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 3 # Initial exploration phase + phase2 = 2 * self.budget // 3 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.5 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.98 # Standard cooling rate + else: + beta = 2.0 # Higher acceptance for local search refinement + alpha = 0.95 # Faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/GradientEnhancedAdaptiveAnnealing.py b/nevergrad/optimization/lama/GradientEnhancedAdaptiveAnnealing.py new file mode 100644 index 000000000..d0d29a786 --- /dev/null +++ b/nevergrad/optimization/lama/GradientEnhancedAdaptiveAnnealing.py @@ -0,0 +1,125 @@ +import numpy as np + + +class GradientEnhancedAdaptiveAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=20, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/GradientEnhancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/GradientEnhancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..c2f803458 --- /dev/null +++ b/nevergrad/optimization/lama/GradientEnhancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,135 @@ +import numpy as np + + +class GradientEnhancedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = GradientEnhancedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/GradientEstimationSearch.py b/nevergrad/optimization/lama/GradientEstimationSearch.py new file mode 100644 index 000000000..bca14b596 --- /dev/null +++ b/nevergrad/optimization/lama/GradientEstimationSearch.py @@ -0,0 +1,53 @@ +import numpy as np + + +class GradientEstimationSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dimension = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialization + x_current = np.random.uniform(self.lower_bound, self.upper_bound, self.dimension) + f_current = func(x_current) + self.f_opt = f_current + self.x_opt = x_current + + # Small step for gradient approximation + epsilon = 1e-5 + learning_rate = 0.1 + + evaluations = 1 + + while evaluations < self.budget: + # Gradient estimation + gradients = np.zeros(self.dimension) + for i in range(self.dimension): + x_step = np.array(x_current) + x_step[i] += epsilon + f_step = func(x_step) + evaluations += 1 + gradients[i] = (f_step - f_current) / epsilon + + if evaluations >= self.budget: + break + + # Update the current point + x_new = x_current - learning_rate * gradients + # Maintain within bounds + x_new = np.clip(x_new, self.lower_bound, self.upper_bound) + f_new = func(x_new) + evaluations += 1 + + # Check if a new optimum has been found + if f_new < self.f_opt: + self.f_opt = f_new + self.x_opt = x_new + + # Update current position + x_current = x_new + f_current = f_new + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientGuidedClusterSearch.py b/nevergrad/optimization/lama/GradientGuidedClusterSearch.py new file mode 100644 index 000000000..6b488e1ca --- /dev/null +++ b/nevergrad/optimization/lama/GradientGuidedClusterSearch.py @@ -0,0 +1,68 @@ +import numpy as np + + +class GradientGuidedClusterSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial population + population_size = 15 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + + # Main algorithm loop + iteration = 0 + while iteration < self.budget: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = best_individual + + # Approximate gradient calculation using finite differences + gradients = [] + for i in range(population_size): + grad = np.zeros(self.dimension) + for d in range(self.dimension): + perturb = np.zeros(self.dimension) + perturb[d] = 0.01 # Perturbation value + perturbed_individual = population[i] + perturb + perturbed_individual = np.clip(perturbed_individual, self.lower_bound, self.upper_bound) + perturbed_fitness = func(perturbed_individual) + grad[d] = (perturbed_fitness - fitness[i]) / 0.01 + gradients.append(grad) + + # Use gradients to adjust positions + new_population = [] + for i in range(population_size): + step_size = 0.1 * (self.upper_bound - self.lower_bound) + new_individual = population[i] - step_size * gradients[i] + new_individual = np.clip(new_individual, self.lower_bound, self.upper_bound) + new_fitness = func(new_individual) + + if new_fitness < fitness[i]: + new_population.append(new_individual) + fitness[i] = new_fitness + else: + # Exploration with random mutation + random_individual = population[i] + np.random.normal(0, 1, self.dimension) + random_individual = np.clip(random_individual, self.lower_bound, self.upper_bound) + random_fitness = func(random_individual) + if random_fitness < fitness[i]: + new_population.append(random_individual) + fitness[i] = random_fitness + else: + new_population.append(population[i]) + + population = np.array(new_population) + iteration += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientGuidedDifferentialEvolution.py b/nevergrad/optimization/lama/GradientGuidedDifferentialEvolution.py new file mode 100644 index 000000000..1d253eb53 --- /dev/null +++ b/nevergrad/optimization/lama/GradientGuidedDifferentialEvolution.py @@ -0,0 +1,57 @@ +import numpy as np + + +class GradientGuidedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 300 # Adjusted population size for better coverage + self.F_base = 0.5 # Base mutation factor + self.F_max = 0.8 # Maximum mutation factor, reduced to control variation + self.CR = 0.85 # Crossover probability + self.alpha = 0.1 # Gradient exploitation factor + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + # Find the initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop within the budget constraint + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Calculate dynamic mutation factor with linear decay over iterations + F_dynamic = self.F_base + (self.F_max - self.F_base) * (1 - iteration / n_iterations) + for i in range(self.pop_size): + # Mutation using DE/rand/1/bin strategy + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F_dynamic * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Gradient exploitation + grad_direction = best_ind - pop[i] + grad_step = self.alpha * grad_direction + trial += grad_step + trial = np.clip(trial, -5.0, 5.0) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/GradientGuidedEvolutionStrategy.py b/nevergrad/optimization/lama/GradientGuidedEvolutionStrategy.py new file mode 100644 index 000000000..67f53022b --- /dev/null +++ b/nevergrad/optimization/lama/GradientGuidedEvolutionStrategy.py @@ -0,0 +1,57 @@ +import numpy as np + + +class GradientGuidedEvolutionStrategy: + def __init__(self, budget, dim=5, pop_size=50, tau=0.2, sigma=0.1, beta=0.07): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.tau = tau # Learning rate for step size adaptation + self.sigma = sigma # Initial step size + self.beta = beta # Gradient estimation perturbation magnitude + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, individual, sigma): + return np.clip(individual + sigma * np.random.randn(self.dim), self.bounds[0], self.bounds[1]) + + def estimate_gradient(self, func, individual, sigma): + grad = np.zeros(self.dim) + for i in range(self.dim): + perturb = np.zeros(self.dim) + perturb[i] = self.beta * sigma + f_plus = func(individual + perturb) + f_minus = func(individual - perturb) + grad[i] = (f_plus - f_minus) / (2 * self.beta * sigma) + return grad + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + sigma_values = np.full(self.pop_size, self.sigma) + + while n_evals < self.budget: + for idx in range(self.pop_size): + individual = population[idx] + sigma = sigma_values[idx] + grad = self.estimate_gradient(func, individual, sigma) + individual_new = np.clip(individual - sigma * grad, self.bounds[0], self.bounds[1]) + f_new = func(individual_new) + n_evals += 1 + if f_new < f_values[idx]: + population[idx] = individual_new + f_values[idx] = f_new + sigma_values[idx] *= np.exp(self.tau * np.linalg.norm(grad)) + else: + sigma_values[idx] *= np.exp(-self.tau) + + if n_evals >= self.budget: + break + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientGuidedHybridPSO.py b/nevergrad/optimization/lama/GradientGuidedHybridPSO.py new file mode 100644 index 000000000..f5172c308 --- /dev/null +++ b/nevergrad/optimization/lama/GradientGuidedHybridPSO.py @@ -0,0 +1,68 @@ +import numpy as np + + +class GradientGuidedHybridPSO: + def __init__( + self, + budget=10000, + population_size=50, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=1.2, + social_weight=1.0, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Decaying inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + # Compute gradient-guided component using difference from current to global best + gradient_guided_component = np.sign(global_best_position - particles[i]) * np.random.rand( + self.dim + ) + personal_component = r1 * (personal_best_positions[i] - particles[i]) + social_component = r2 * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * personal_component + + self.social_weight * social_component + + 0.5 * gradient_guided_component + ) # Hybridization with gradient direction + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/GradientInformedAdaptiveDirectionSearch.py b/nevergrad/optimization/lama/GradientInformedAdaptiveDirectionSearch.py new file mode 100644 index 000000000..d63167c71 --- /dev/null +++ b/nevergrad/optimization/lama/GradientInformedAdaptiveDirectionSearch.py @@ -0,0 +1,74 @@ +import numpy as np + + +class GradientInformedAdaptiveDirectionSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(len(x)): + x_step = np.array(x) + x_step[i] += epsilon + grad[i] = (func(x_step) - fx) / epsilon + return grad + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + self.alpha = 0.1 # Initial step size + self.beta = 0.5 # Contraction factor + self.gamma = 2.0 # Expansion factor + self.delta = 1e-5 # Small perturbation for escaping local optima + + x = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f = func(x) + evaluations = 1 + + while evaluations < self.budget: + # Approximate the gradient + grad = self.approximate_gradient(func, x) + direction = grad / (np.linalg.norm(grad) + 1e-8) # Normalize direction vector + + # Try expanding + x_new = x - self.gamma * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.gamma + else: + # Try contracting + x_new = x - self.beta * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.beta + else: + # Apply small perturbation to avoid getting stuck + direction = np.random.randn(self.dim) + direction /= np.linalg.norm(direction) # Normalize random direction + x_new = x + self.delta * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientInformedAdaptiveSearch.py b/nevergrad/optimization/lama/GradientInformedAdaptiveSearch.py new file mode 100644 index 000000000..2b0857e6e --- /dev/null +++ b/nevergrad/optimization/lama/GradientInformedAdaptiveSearch.py @@ -0,0 +1,72 @@ +import numpy as np + + +class GradientInformedAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(len(x)): + x_step = np.array(x) + x_step[i] += epsilon + grad[i] = (func(x_step) - fx) / epsilon + return grad + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + self.alpha = 0.1 # Initial step size + self.beta = 0.5 # Contraction factor + self.gamma = 1.5 # Expansion factor + self.delta = 1e-5 # Small perturbation for escaping local optima + + x = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f = func(x) + evaluations = 1 + + while evaluations < self.budget: + # Approximate the gradient + grad = self.approximate_gradient(func, x) + direction = grad / (np.linalg.norm(grad) + 1e-8) # Normalize direction vector + + # Try expanding + x_new = x - self.gamma * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.gamma + else: + # Try contracting + x_new = x - self.beta * self.alpha * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.beta + else: + # Apply small perturbation to avoid getting stuck + x_new = x + self.delta * np.random.randn(self.dim) + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GradientInformedParticleOptimizer.py b/nevergrad/optimization/lama/GradientInformedParticleOptimizer.py new file mode 100644 index 000000000..357538c70 --- /dev/null +++ b/nevergrad/optimization/lama/GradientInformedParticleOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class GradientInformedParticleOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.5 + self.global_coeff = 0.8 + self.local_coeff = 0.8 + self.inertia = 1.2 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/GradientSpiralDifferentialEnhancerV5.py b/nevergrad/optimization/lama/GradientSpiralDifferentialEnhancerV5.py new file mode 100644 index 000000000..91cacc9c3 --- /dev/null +++ b/nevergrad/optimization/lama/GradientSpiralDifferentialEnhancerV5.py @@ -0,0 +1,76 @@ +import numpy as np + + +class GradientSpiralDifferentialEnhancerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize parameters + population_size = 300 # Adjusted population size + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Spiral and mutation parameters + min_radius = 0.0005 # More refined local search + max_radius = 3.0 # Reduced initial radius for finer search space exploration + radius_decay = 0.995 # Slower decay rate + mutation_factor = 0.5 # Further refinement in mutation for controlled explorations + crossover_probability = 0.85 # Adjusted crossover probability + + # Gradient refinement steps + step_size = 0.005 # Decreased step size for ultra-fine tuning + gradient_steps = 50 # Increased gradient steps for deeper local optimization + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential evolution mutation + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover operation + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral dynamic integration + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Gradient descent-like local search with reduced steps and size + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation of the new solution + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + # Population update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/GravitationalSwarmIntelligence.py new file mode 100644 index 000000000..921d7d64f --- /dev/null +++ b/nevergrad/optimization/lama/GravitationalSwarmIntelligence.py @@ -0,0 +1,63 @@ +import numpy as np + + +class GravitationalSwarmIntelligence: + def __init__(self, budget=1000, population_size=20, G0=100.0, alpha=0.1, beta=0.9): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta = beta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, f, F): + return x + F + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], best_pos, F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + for i in range(self.population_size): + if np.random.rand() < self.beta: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], population[random_index], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.G0 * np.exp(-self.alpha * t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GreedyDiversityMultiStrategySADE.py b/nevergrad/optimization/lama/GreedyDiversityMultiStrategySADE.py new file mode 100644 index 000000000..97c39f262 --- /dev/null +++ b/nevergrad/optimization/lama/GreedyDiversityMultiStrategySADE.py @@ -0,0 +1,125 @@ +import numpy as np + + +class GreedyDiversityMultiStrategySADE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Greedy Selection + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + + if evaluations >= self.budget: + break + + # Diversity Maintenance + diversity = np.mean(np.std(population, axis=0)) + if diversity < 1e-5: + population, fitness = initialize_population() + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GreedyDynamicMultiStrategyDE.py b/nevergrad/optimization/lama/GreedyDynamicMultiStrategyDE.py new file mode 100644 index 000000000..de7fee9a5 --- /dev/null +++ b/nevergrad/optimization/lama/GreedyDynamicMultiStrategyDE.py @@ -0,0 +1,116 @@ +import numpy as np + + +class GreedyDynamicMultiStrategyDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + initial_population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(size): + population = np.random.uniform(bounds[0], bounds[1], (size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(F_values.size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind, size): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population(initial_population_size) + population_size = initial_population_size + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population_size = min(int(population_size * 1.5), 100) # dynamically increase population size + population, fitness = local_restart(best_ind, population_size) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/GuidedEvolutionStrategy.py b/nevergrad/optimization/lama/GuidedEvolutionStrategy.py new file mode 100644 index 000000000..4adc8a18e --- /dev/null +++ b/nevergrad/optimization/lama/GuidedEvolutionStrategy.py @@ -0,0 +1,60 @@ +import numpy as np + + +class GuidedEvolutionStrategy: + def __init__( + self, budget, dimension=5, population_size=50, sigma=0.5, learning_rate=0.7, mutation_probability=0.1 + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.sigma = sigma + self.learning_rate = learning_rate + self.mutation_probability = mutation_probability + + def __call__(self, func): + # Initialize the population and the best solution found + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + f_opt = np.Inf + x_opt = None + + # Track the number of function evaluations + evaluations = 0 + + while evaluations < self.budget: + # Evaluate the current population + fitness = np.array([func(individual) for individual in population]) + evaluations += len(population) + + # Find the best individual in the current population + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + best_individual = population[best_index] + + # Update the global best if found a new best + if best_fitness < f_opt: + f_opt = best_fitness + x_opt = best_individual + + # Generate new individuals by mutation and recombination + new_population = [] + for _ in range(self.population_size): + if np.random.rand() < self.mutation_probability: + # Mutation: add Gaussian noise + individual = best_individual + np.random.normal(0, self.sigma, self.dimension) + else: + # Recombination: crossover between two random individuals + parents = population[np.random.choice(self.population_size, 2, replace=False)] + crossover_point = np.random.randint(0, self.dimension) + individual = np.concatenate((parents[0][:crossover_point], parents[1][crossover_point:])) + + # Make sure the individuals are within bounds + individual = np.clip(individual, -5.0, 5.0) + new_population.append(individual) + + population = np.array(new_population) + + # Reduce the mutation size over time to allow fine-tuning + self.sigma *= self.learning_rate + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/GuidedMutationOptimizer.py b/nevergrad/optimization/lama/GuidedMutationOptimizer.py new file mode 100644 index 000000000..982491311 --- /dev/null +++ b/nevergrad/optimization/lama/GuidedMutationOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class GuidedMutationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 200 # Further increased population size for enhanced exploration + mutation_factor = 0.8 # Initial mutation factor set higher to promote diverse searching + crossover_prob = 0.7 # Initial crossover probability + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Use guided mutation based on the best solution + indices = np.arange(population_size) + indices = np.delete(indices, i) + x1, x2, x3 = population[np.random.choice(indices, 3, replace=False)] + + # Mutation: guided by best and random selection + mutant = best_solution + mutation_factor * (x1 - best_solution + x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + best_index = np.argmin(fitness) + + # Adaptive parameters adjustment + mutation_factor = max(0.1, mutation_factor - 0.02) # Gradually decrease mutation factor + crossover_prob = min(0.9, crossover_prob + 0.02) # Gradually increase crossover probability + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HADE.py b/nevergrad/optimization/lama/HADE.py new file mode 100644 index 000000000..4922792dd --- /dev/null +++ b/nevergrad/optimization/lama/HADE.py @@ -0,0 +1,62 @@ +import numpy as np + + +class HADE: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR_init=0.9, CR_end=0.5): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR_init = CR_init # Initial crossover probability + self.CR_end = CR_end # Final crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Adaptive F and CR scaling based on the linear progression from initial to end value + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + CR_current = self.CR_init + (self.CR_end - self.CR_init) * (evaluations / self.budget) + + for i in range(self.population_size): + # Select three random distinct indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Guided mutation: DE/current-to-best/1 with an additional random differential vector + best = population[np.argmin(fitness)] + mutant = ( + x1 + + F_current * (best - x1 + x2 - x3) + + F_current * (np.random.uniform(self.bounds[0], self.bounds[1], self.dimension) - x1) + ) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < CR_current, mutant, population[i]) + + # Evaluate the new candidate + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HADEEM.py b/nevergrad/optimization/lama/HADEEM.py new file mode 100644 index 000000000..2d8233968 --- /dev/null +++ b/nevergrad/optimization/lama/HADEEM.py @@ -0,0 +1,76 @@ +import numpy as np + + +class HADEEM: + def __init__(self, budget, population_size=50, F_base=0.5, CR_base=0.8): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Initial base for differential weight + self.CR_base = CR_base # Initial base for crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Initialize adaptive parameters + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + memory = np.zeros(self.population_size) # Memory for adaptive adjustments + + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation indices + idxs = np.random.choice( + [idx for idx in range(self.population_size) if idx != i], 3, replace=False + ) + best_idx = np.argmin(fitness[idxs]) + a, b, c = population[idxs[best_idx]], population[idxs[1]], population[idxs[2]] + + # Mutation: DE/rand/1/bin scheme + mutant = np.clip(a + F[i] * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + memory[i] += 1 # Increment success memory + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + memory[i] -= 1 # Decay memory on failure + + # Adaptive parameter tuning based on memory + if memory[i] > 2: + F[i] = min(F[i] * 1.1, 1) + CR[i] = min(CR[i] * 1.1, 1) + elif memory[i] < -2: + F[i] = max(F[i] * 0.9, 0.1) + CR[i] = max(CR[i] * 0.8, 0.1) + + # Reset memory if extremes are achieved + if memory[i] > 5 or memory[i] < -5: + memory[i] = 0 + F[i] = self.F_base + CR[i] = self.CR_base + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HADEMI.py b/nevergrad/optimization/lama/HADEMI.py new file mode 100644 index 000000000..670552d93 --- /dev/null +++ b/nevergrad/optimization/lama/HADEMI.py @@ -0,0 +1,94 @@ +import numpy as np + + +class HADEMI: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.85, + F_base=0.5, + F_amp=0.4, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor that changes dynamically + F = self.F_base + self.F_amp * np.abs(np.sin(2 * np.pi * evaluations / self.budget)) + + # Mutation: Selecting three random individuals and calculating the mutant vector + idxs = [idx for idx in range(self.population_size) if idx != i] + random_indices = np.random.choice(idxs, 3, replace=False) + a, b, c = population[random_indices] + + # Include best or elite in mutation strategy + best_or_elite = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory + mem_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[mem_idx]: + memory[mem_idx] = population[i] + memory_fitness[mem_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HAVCDE.py b/nevergrad/optimization/lama/HAVCDE.py new file mode 100644 index 000000000..e1a194d8a --- /dev/null +++ b/nevergrad/optimization/lama/HAVCDE.py @@ -0,0 +1,71 @@ +import numpy as np + + +class HAVCDE: + def __init__( + self, budget, population_size=200, F_base=0.5, CR_base=0.9, adapt_rate=0.1, cluster_threshold=0.2 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.adapt_rate = adapt_rate # Adaptation rate for parameters + self.cluster_threshold = cluster_threshold # Threshold to trigger clustering and exploitation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Adaptive F and CR + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.1, 1.0) + + # Clustering phase based on fitness distribution + if np.std(fitness) < self.cluster_threshold: + # Focusing search around the best individual + mean_sol = np.mean(population, axis=0) + population = np.clip( + mean_sol + 0.1 * (np.random.rand(self.population_size, self.dimension) - 0.5), + self.lb, + self.ub, + ) + + for i in range(self.population_size): + # Mutation using "current-to-best/2" strategy + indices = [idx for idx in range(self.population_size) if idx != i] + a, b = np.random.choice(indices, 2, replace=False) + mutant = population[i] + Fs[i] * ( + best_individual - population[i] + population[a] - population[b] + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + if num_evals >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HEAS.py b/nevergrad/optimization/lama/HEAS.py new file mode 100644 index 000000000..ef6909213 --- /dev/null +++ b/nevergrad/optimization/lama/HEAS.py @@ -0,0 +1,89 @@ +import numpy as np + + +class HEAS: + def __init__(self, budget): + self.budget = budget + self.population_size = 20 + self.dimension = 5 + self.low = -5.0 + self.high = 5.0 + self.archive = [] + self.archive_max_size = 50 # reduced size to keep only relevant solutions + + def initialize(self): + population = np.random.uniform(self.low, self.high, (self.population_size, self.dimension)) + F = np.random.normal(0.5, 0.1, self.population_size) + CR = np.random.normal(0.9, 0.05, self.population_size) + return population, F, CR + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, population, F): + mutant = np.zeros_like(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant[i] = np.clip(mutant_vector, self.low, self.high) + return mutant + + def crossover(self, population, mutant, CR): + crossover = np.where( + np.random.rand(self.population_size, self.dimension) < CR[:, None], mutant, population + ) + return crossover + + def select(self, population, fitness, trial_population, trial_fitness, F, CR): + improved = trial_fitness < fitness + population[improved] = trial_population[improved] + fitness[improved] = trial_fitness[improved] + + # Adapt F and CR with history-based adaptation + F[improved] = np.clip(F[improved] * 1.1, 0.1, 1.0) + CR[improved] = np.clip(CR[improved] * 0.95, 0.1, 1.0) + F[~improved] = np.clip(F[~improved] * 0.9, 0.1, 1.0) + CR[~improved] = np.clip(CR[~improved] * 1.05, 0.1, 1.0) + + return population, fitness, F, CR + + def local_search(self, individual, func): + T = 1.0 + decay = 0.99 + for _ in range(10): + neighbor = individual + np.random.normal(0, T, self.dimension) + neighbor = np.clip(neighbor, self.low, self.high) + if func(neighbor) < func(individual): + individual = neighbor + T *= decay + return individual + + def __call__(self, func): + population, F, CR = self.initialize() + fitness = self.evaluate(population, func) + iterations = self.budget // (self.population_size + 10) # account for local searches + + for _ in range(iterations): + mutant = self.mutation(population, F) + trial_population = self.crossover(population, mutant, CR) + trial_fitness = self.evaluate(trial_population, func) + population, fitness, F, CR = self.select( + population, fitness, trial_population, trial_fitness, F, CR + ) + + # Local search phase + selected_indices = np.random.choice(self.population_size, size=5, replace=False) + for idx in selected_indices: + population[idx] = self.local_search(population[idx], func) + fitness[idx] = func(population[idx]) + self.archive.append(population[idx].copy()) # update archive with locally searched solutions + + # Maintain a relevant archive + if len(self.archive) > self.archive_max_size: + self.archive = self.archive[-self.archive_max_size :] + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HarmonyFireworkOptimizer.py b/nevergrad/optimization/lama/HarmonyFireworkOptimizer.py new file mode 100644 index 000000000..d37769af1 --- /dev/null +++ b/nevergrad/optimization/lama/HarmonyFireworkOptimizer.py @@ -0,0 +1,54 @@ +import numpy as np + + +class HarmonyFireworkOptimizer: + def __init__(self, budget=10000, population_size=20, dim=5, bw=0.01, sr=0.1, amp_min=0.1, amp_max=1.0): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.bw = bw # bandwidth for mutation + self.sr = sr # success rate of mutation + self.amp_min = amp_min # minimum explosion amplitude + self.amp_max = amp_max # maximum explosion amplitude + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.inf + self.best_solution = None + + def calculate_fitness(self, func, solution): + return func(solution) + + def mutate_solution(self, solution): + mutated_solution = np.clip(solution + np.random.normal(0, self.bw, self.dim), -5.0, 5.0) + return mutated_solution + + def firework_explosion(self, solution): + explosion_amp = np.random.uniform(self.amp_min, self.amp_max) + new_solution = solution + np.random.uniform(-1, 1, self.dim) * explosion_amp + return new_solution + + def update_population(self, func): + for i in range(self.population_size): + mutated_solution = self.mutate_solution(self.population[i]) + if np.random.rand() < self.sr: + new_solution = mutated_solution + else: + new_solution = self.firework_explosion(self.population[i]) + + new_fitness = self.calculate_fitness(func, new_solution) + if new_fitness < self.calculate_fitness(func, self.population[i]): + self.population[i] = new_solution + + if new_fitness < self.best_fitness: + self.best_fitness = new_fitness + self.best_solution = new_solution + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/HarmonyTabuOptimization.py b/nevergrad/optimization/lama/HarmonyTabuOptimization.py new file mode 100644 index 000000000..45e628132 --- /dev/null +++ b/nevergrad/optimization/lama/HarmonyTabuOptimization.py @@ -0,0 +1,55 @@ +import numpy as np + + +class HarmonyTabuOptimization: + def __init__(self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=5): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < 0.5: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + else: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + + for _ in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution(harmony_memory, bounds, tabu_list) + if new_solution_str not in tabu_list: + harmony_memory[np.argmax([func(h) for h in harmony_memory])] = new_solution + self.update_tabu_list(tabu_list, new_solution_str) + + best_index = np.argmin([func(h) for h in harmony_memory]) + if func(harmony_memory[best_index]) < self.f_opt: + self.f_opt = func(harmony_memory[best_index]) + self.x_opt = harmony_memory[best_index] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HierarchicalAdaptiveAnnealing.py b/nevergrad/optimization/lama/HierarchicalAdaptiveAnnealing.py new file mode 100644 index 000000000..59a9dbdaf --- /dev/null +++ b/nevergrad/optimization/lama/HierarchicalAdaptiveAnnealing.py @@ -0,0 +1,108 @@ +import numpy as np + + +class HierarchicalAdaptiveAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 12 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Hierarchical adjustment: Reassign alpha and beta based on performance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 2.5 # Higher acceptance probability for diverse solutions + alpha = 0.97 # Balanced cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.0 # Slightly higher acceptance to refine local search + alpha = 0.94 # Faster cooling for final convergence + + T *= alpha + + # Periodic gradient-based local refinement of the best memory solution + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=5, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/HierarchicalAdaptiveCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/HierarchicalAdaptiveCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..0b7b72c68 --- /dev/null +++ b/nevergrad/optimization/lama/HierarchicalAdaptiveCovarianceMatrixAdaptation.py @@ -0,0 +1,112 @@ +import numpy as np + + +class HierarchicalAdaptiveCovarianceMatrixAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.3, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean_new = np.dot(np.ones(elite_count) / elite_count, elite_pop) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_count + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HierarchicalAdaptiveSearch.py b/nevergrad/optimization/lama/HierarchicalAdaptiveSearch.py new file mode 100644 index 000000000..21f73a308 --- /dev/null +++ b/nevergrad/optimization/lama/HierarchicalAdaptiveSearch.py @@ -0,0 +1,118 @@ +import numpy as np + + +class HierarchicalAdaptiveSearch: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.crossover_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.learning_rate = 0.01 * np.exp(-iteration / max_iterations) + + def hierarchical_sampling(self, pop, scale): + new_pop = np.copy(pop) + for i in range(self.population_size): + perturbation = np.random.normal(0, scale, size=pop[i].shape) + new_pop[i] = np.clip(pop[i] + perturbation, -5.0, 5.0) + return new_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = (self.budget // self.population_size) * 2 + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + scale = 1.0 - (iteration / max_iterations) # Multiscale sampling rate + + # Apply hierarchical sampling with small and large perturbations + if iteration % 2 == 0: + pop = self.hierarchical_sampling(pop, scale * 0.5) + else: + pop = self.hierarchical_sampling(pop, scale * 1.5) + scores = np.array([func(ind) for ind in pop]) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step + pop, scores = self.local_search(func, pop, scores) + evaluations += self.population_size + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HierarchicalDiversityEnhancedCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/HierarchicalDiversityEnhancedCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..79148d3c3 --- /dev/null +++ b/nevergrad/optimization/lama/HierarchicalDiversityEnhancedCovarianceMatrixAdaptation.py @@ -0,0 +1,146 @@ +import numpy as np + + +class HierarchicalDiversityEnhancedCovarianceMatrixAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + return x - self.learning_rate * grad + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to the best individual in the population + local_best = self.__gradient_local_search(func, global_best_position) + local_best_score = func(local_best) + evaluations += 1 + + if local_best_score < global_best_score: + global_best_score = local_best_score + global_best_position = local_best + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HighPerformanceAdaptiveDifferentialSearch.py b/nevergrad/optimization/lama/HighPerformanceAdaptiveDifferentialSearch.py new file mode 100644 index 000000000..d69f97049 --- /dev/null +++ b/nevergrad/optimization/lama/HighPerformanceAdaptiveDifferentialSearch.py @@ -0,0 +1,132 @@ +import numpy as np + + +class HighPerformanceAdaptiveDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 500 + self.F = 0.8 + self.CR = 0.9 + self.local_search_chance = 0.3 + self.elite_ratio = 0.1 + self.diversity_threshold = 0.1 + self.cauchy_step_scale = 0.03 + self.gaussian_step_scale = 0.01 + self.reinitialization_rate = 0.2 + self.hyper_heuristic_probability = 0.5 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Adaptive F and CR + self.F = 0.5 + 0.3 * np.random.rand() + self.CR = 0.8 + 0.2 * np.random.rand() + + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + elif np.random.rand() < self.hyper_heuristic_probability: + candidate = self.hyper_heuristic(population, fitness, i, func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def hyper_heuristic(self, population, fitness, i, func): + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(candidate, func) + + return candidate + + def adaptive_population_reinitialization(self, population, evaluations): + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/HyGDAE.py b/nevergrad/optimization/lama/HyGDAE.py new file mode 100644 index 000000000..a08cc69c1 --- /dev/null +++ b/nevergrad/optimization/lama/HyGDAE.py @@ -0,0 +1,73 @@ +import numpy as np + + +class HyGDAE: + def __init__(self, budget, population_size=200, F_base=0.5, CR_base=0.85, mutation_strategy="rand/1/bin"): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + self.mutation_strategy = mutation_strategy + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + num_evals = self.population_size + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Adaptive mutation parameters with Gaussian perturbation + F = np.clip(np.random.normal(self.F_base, 0.1), 0.1, 1.0) + CR = np.clip(np.random.normal(self.CR_base, 0.05), 0.1, 1.0) + + # Mutation using strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 3, replace=False) + + if self.mutation_strategy == "rand/1/bin": + a, b, c = population[chosen] + mutant = a + F * (b - c) + elif self.mutation_strategy == "best/1/bin": + a, b = population[chosen[:2]] + mutant = best_individual + F * (a - b) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HybridAdaptiveCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..bee08596d --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,122 @@ +import numpy as np + + +class HybridAdaptiveCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased population size + self.sigma = 0.2 # Step size + self.c1 = 0.05 # Learning rate for rank-one update + self.cmu = 0.01 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 # Further fine-tuned learning rate for mutation adaptability + self.elitism_rate = 0.1 + self.eval_count = 0 + self.F = 0.5 # Differential weight + self.CR = 0.9 # Crossover probability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridAdaptiveCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridAdaptiveCrossoverElitistStrategyV10.py b/nevergrad/optimization/lama/HybridAdaptiveCrossoverElitistStrategyV10.py new file mode 100644 index 000000000..39e03401a --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveCrossoverElitistStrategyV10.py @@ -0,0 +1,80 @@ +import numpy as np + + +class HybridAdaptiveCrossoverElitistStrategyV10: + def __init__( + self, + budget, + dimension=5, + population_size=300, + elite_fraction=0.1, + mutation_intensity=0.02, + crossover_rate=0.88, + adaptive_intensity=0.8, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_intensity = adaptive_intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform hybrid crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.hybrid_recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.adaptive_mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutate(self, individual, evaluations): + # Adaptive mutation intensity based on normalized evaluations + normalized_time = evaluations / self.budget + intensity = self.mutation_intensity * np.exp(-normalized_time * 10) + return individual + np.random.normal(0, intensity, self.dimension) + + def hybrid_recombine(self, parent1, parent2, evaluations): + # Blend between parents with adaptive depth based on evaluations + normalized_time = evaluations / self.budget + alpha = ( + np.random.uniform(0.3, 0.7) * (1 - normalized_time) + normalized_time * self.adaptive_intensity + ) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/HybridAdaptiveDE.py b/nevergrad/optimization/lama/HybridAdaptiveDE.py new file mode 100644 index 000000000..660283128 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDE.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridAdaptiveDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 20 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation and crossover factors + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Hybrid mutation strategy based on generation count + if generation % 3 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + elif generation % 3 == 1: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..a7fda77c0 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolution.py @@ -0,0 +1,80 @@ +import numpy as np + + +class HybridAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.inf + + eval_count = 0 + phase_switch_threshold = self.budget // 2 + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR + adaptive_F = initial_F * (1 - eval_count / budget) + adaptive_CR = initial_CR * np.cos(np.pi * eval_count / (2 * budget)) + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if eval_count >= phase_switch_threshold: + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + new_population[i] = candidate + else: + new_population[i] = trial + + # Combine exploration and exploitation phases + if eval_count >= phase_switch_threshold: + # Combine with Quantum-inspired with a probability + for i in range(population_size): + if np.random.rand() < 0.5: + new_population[i] = quantum_position_update(new_population[i], best_position) + + # Update population for the next iteration + population = new_population + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridAdaptiveDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning.py b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning.py new file mode 100644 index 000000000..ecf7d35c3 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning.py @@ -0,0 +1,124 @@ +import numpy as np + + +class HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters dynamically + F_values[i] = np.random.uniform(0.5, 1) # Randomize F between 0.5 and 1 + CR_values[i] = np.random.uniform(0.1, 1) # Randomize CR between 0.1 and 1 + else: + F_values[i] = np.random.uniform(0.5, 0.9) # Maintain variability + CR_values[i] = np.random.uniform(0.1, 0.9) # Maintain variability + + if self.eval_count >= global_search_budget: + break + + # Elitism: Keep the best individuals + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + if self.eval_count >= global_search_budget: + break + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch.py b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch.py new file mode 100644 index 000000000..b96562b4e --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch.py @@ -0,0 +1,150 @@ +import numpy as np + + +class HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-5 # Threshold to restart the population + self.learning_rate = 0.1 # Learning rate for elite learning + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(5): # Make a small number of local perturbations + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + # Apply elitism: retain the top performing individuals + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + # Update memory for F and CR + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveDifferentialQuantumSearch.py b/nevergrad/optimization/lama/HybridAdaptiveDifferentialQuantumSearch.py new file mode 100644 index 000000000..bbab66924 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDifferentialQuantumSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class HybridAdaptiveDifferentialQuantumSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 2.0 + social_coefficient = 2.0 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 20 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveDifferentialSwarm.py b/nevergrad/optimization/lama/HybridAdaptiveDifferentialSwarm.py new file mode 100644 index 000000000..a8d3a0946 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDifferentialSwarm.py @@ -0,0 +1,97 @@ +import numpy as np + + +class HybridAdaptiveDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 60 + inertia_weight = 0.5 + cognitive_coefficient = 1.5 + social_coefficient = 1.5 + differential_weight = 0.9 + crossover_rate = 0.8 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + # Particle Swarm Optimization Part + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential Evolution Part + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = HybridAdaptiveDifferentialSwarm(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/HybridAdaptiveDiversityMaintainingGradientEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveDiversityMaintainingGradientEvolution.py new file mode 100644 index 000000000..7ddfa30b2 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDiversityMaintainingGradientEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridAdaptiveDiversityMaintainingGradientEvolution: + def __init__(self, budget, initial_population_size=20): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = initial_population_size + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + self.diversity_threshold = 1e-3 + self.elite_rate = 0.2 # Proportion of elite members in selection + self.local_search_rate = 0.3 # Probability to perform local search + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + def elite_selection(population, fitness): + elite_count = int(self.elite_rate * len(fitness)) + sorted_indices = np.argsort(fitness) + elite_indices = sorted_indices[:elite_count] + return [population[i] for i in elite_indices], [fitness[i] for i in elite_indices] + + def local_search(x): + grad = gradient_estimate(x) + step = -self.base_lr * grad + new_x = x + step + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + return new_x + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + # Elite selection + elite_pop, elite_fit = elite_selection(population, fitness) + elite_size = len(elite_pop) + + if np.random.rand() < self.local_search_rate: + # Local search + local_idx = np.random.choice(range(elite_size), size=1)[0] + child = local_search(elite_pop[local_idx]) + else: + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(elite_size), size=2, replace=False) + parent1, parent2 = elite_pop[parents_idx[0]], elite_pop[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, i) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, i, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worst member of the population with the new child + worst_idx = np.argmax(fitness) + population[worst_idx] = new_x + fitness[worst_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridAdaptiveDiversityMaintainingGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridAdaptiveDualPhaseStrategyV6.py b/nevergrad/optimization/lama/HybridAdaptiveDualPhaseStrategyV6.py new file mode 100644 index 000000000..d2fe82963 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveDualPhaseStrategyV6.py @@ -0,0 +1,66 @@ +import numpy as np + + +class HybridAdaptiveDualPhaseStrategyV6: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adjust_parameters(self, iteration, total_iterations): + scale = np.sin(np.pi * iteration / total_iterations) + self.F = 0.5 + 0.5 * scale + self.CR = 0.8 * scale + 0.2 + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(evaluations, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HybridAdaptiveEvolutionaryOptimizer.py b/nevergrad/optimization/lama/HybridAdaptiveEvolutionaryOptimizer.py new file mode 100644 index 000000000..cfcb58cdf --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveEvolutionaryOptimizer.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridAdaptiveEvolutionaryOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.8 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 10 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + phase_one_budget = int(self.budget * 0.3) # Increase exploration phase budget + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(-1, 1, self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (phase_one_budget - eval_count) / phase_one_budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + if neighbor_fitness < fitness[i]: + new_population[i] = neighbor + fitness[i] = neighbor_fitness + if neighbor_fitness < best_fitness: + best_individual = neighbor + best_fitness = neighbor_fitness + + if eval_count >= phase_one_budget: + break + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/HybridAdaptiveExplorationOptimizer.py b/nevergrad/optimization/lama/HybridAdaptiveExplorationOptimizer.py new file mode 100644 index 000000000..214a5a1a7 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveExplorationOptimizer.py @@ -0,0 +1,166 @@ +import numpy as np + + +class HybridAdaptiveExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = HybridAdaptiveExplorationOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizer.py b/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizer.py new file mode 100644 index 000000000..671c4ce56 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizer.py @@ -0,0 +1,139 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridAdaptiveGeneticSwarmOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.9 + self.crossover_prob = 0.8 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.5 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + + phase_one_budget = int(self.budget * 0.5) + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm Strategy + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + # Particle Swarm Optimization Strategy + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (best_individual - population[i]) + + self.social_coeff * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizerV2.py b/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizerV2.py new file mode 100644 index 000000000..36422253e --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveGeneticSwarmOptimizerV2.py @@ -0,0 +1,134 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridAdaptiveGeneticSwarmOptimizerV2: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.85 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.8 + self.social_coeff = 1.8 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (best_individual - population[i]) + + self.social_coeff * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/HybridAdaptiveGradientPSO.py b/nevergrad/optimization/lama/HybridAdaptiveGradientPSO.py new file mode 100644 index 000000000..97ec0f21f --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveGradientPSO.py @@ -0,0 +1,90 @@ +import numpy as np + + +class HybridAdaptiveGradientPSO: + def __init__( + self, + budget=10000, + population_size=200, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.5, + social_weight=2.5, + gradient_weight=0.05, + mutation_rate=0.1, + mutation_intensity=0.03, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.gradient_weight = gradient_weight + self.mutation_rate = mutation_rate + self.mutation_intensity = mutation_intensity + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + + for i in range(self.population_size): + r1, r2, r3 = np.random.rand(), np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + gradient_step = ( + r3 + * self.gradient_weight + * (particles[i] - global_best_position) + / np.linalg.norm(particles[i] - global_best_position + 1e-8) + ) + + # Mutation with a certain probability + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_intensity, self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + mutation_vector + ) + else: + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + - gradient_step + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/HybridAdaptiveHarmonicFireworksTabuSearch.py b/nevergrad/optimization/lama/HybridAdaptiveHarmonicFireworksTabuSearch.py new file mode 100644 index 000000000..1c623b4c6 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveHarmonicFireworksTabuSearch.py @@ -0,0 +1,111 @@ +import numpy as np + + +class HybridAdaptiveHarmonicFireworksTabuSearch: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 + self.bandwidth *= 0.95 + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def enhance_search(self, harmony_memory, best_solution, func, bounds): + self.diversify_search(harmony_memory, bounds) + self.local_search(harmony_memory, best_solution, func, bounds) + + def hybrid_search(self, harmony_memory, best_solution, func, bounds): + self.enhance_search(harmony_memory, best_solution, func, bounds) + self.adaptive_tabu_search(harmony_memory, best_solution, func, bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.hybrid_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/HybridAdaptiveMemeticAlgorithm.py b/nevergrad/optimization/lama/HybridAdaptiveMemeticAlgorithm.py new file mode 100644 index 000000000..a57eafbb4 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMemeticAlgorithm.py @@ -0,0 +1,97 @@ +import numpy as np + + +class HybridAdaptiveMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func): + """Simple local search around a point""" + best_x = x + best_f = func(x) + for i in range(10): + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation and Crossover using Differential Evolution + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < CR[i] + if not np.any(crossover_points): + crossover_points[np.random.randint(0, self.dim)] = True + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + F[i] = F[i] + 0.1 * (np.random.rand() - 0.5) + F[i] = np.clip(F[i], 0.5, 1.0) + CR[i] = CR[i] + 0.1 * (np.random.rand() - 0.5) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.1: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Periodically introduce new random solutions (to avoid local optima) + if evaluations % (population_size // 2) == 0: + new_population = np.random.uniform(self.lb, self.ub, (population_size // 5, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += len(new_fitness) + + # Replace worst individuals with new random individuals + worst_indices = fitness.argsort()[-(population_size // 5) :] + population[worst_indices] = new_population + fitness[worst_indices] = new_fitness + + # Reinitialize strategy parameters for new individuals + F[worst_indices] = np.random.uniform(0.5, 1.0, population_size // 5) + CR[worst_indices] = np.random.uniform(0.1, 0.9, population_size // 5) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism.py b/nevergrad/optimization/lama/HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism.py new file mode 100644 index 000000000..eac0cb236 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism.py @@ -0,0 +1,128 @@ +import numpy as np + + +class HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + # Adaptive Elitism parameter + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Adaptive Elitism: Adjust elite size based on convergence rate + elite_size = max( + 1, int(self.elite_fraction * self.pop_size * (1 - self.eval_count / global_search_budget)) + ) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + if self.eval_count >= global_search_budget: + break + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveMemeticOptimizerV4.py b/nevergrad/optimization/lama/HybridAdaptiveMemeticOptimizerV4.py new file mode 100644 index 000000000..5b0b09cc5 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMemeticOptimizerV4.py @@ -0,0 +1,139 @@ +import numpy as np + + +class HybridAdaptiveMemeticOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.memory_size = 20 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 50 + self.elitism_rate = 0.3 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.8 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveMemoryAnnealing.py b/nevergrad/optimization/lama/HybridAdaptiveMemoryAnnealing.py new file mode 100644 index 000000000..0dd46e7c6 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMemoryAnnealing.py @@ -0,0 +1,72 @@ +import numpy as np + + +class HybridAdaptiveMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta for better exploration-exploitation balance + if evaluations < self.budget / 4: + beta = 2.0 # Higher exploration phase + elif evaluations < self.budget / 2: + beta = 1.5 # Balanced phase + elif evaluations < 3 * self.budget / 4: + beta = 1.0 # Transition to exploitation + else: + beta = 2.5 # Higher acceptance for local search refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolution.py new file mode 100644 index 000000000..00b47da85 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolution.py @@ -0,0 +1,104 @@ +import numpy as np + + +class HybridAdaptiveMultiPhaseEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Balanced memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolutionV2.py b/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolutionV2.py new file mode 100644 index 000000000..81ec355bd --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveMultiPhaseEvolutionV2.py @@ -0,0 +1,104 @@ +import numpy as np + + +class HybridAdaptiveMultiPhaseEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.95 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/HybridAdaptiveNesterovSynergy.py b/nevergrad/optimization/lama/HybridAdaptiveNesterovSynergy.py new file mode 100644 index 000000000..5b065abb8 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveNesterovSynergy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class HybridAdaptiveNesterovSynergy: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.9, + quantum_influence_rate=0.2, + adaptive_lr_factor=0.97, + elite_fraction=0.3, + noise_factor=0.2, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.elite_fraction = elite_fraction + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_factor = noise_factor + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_fraction), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_fraction), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_fraction), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_influence_rate: + self.population[i] += np.random.normal(0, self.noise_factor, self.dim) * ( + global_best - self.population[i] + ) + + noise = np.random.normal(0, 1, self.dim) + self.velocities[i] = self.momentum * self.velocities[i] - self.learning_rate * noise + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + self.learning_rate *= self.adaptive_lr_factor + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/HybridAdaptiveOptimization.py b/nevergrad/optimization/lama/HybridAdaptiveOptimization.py new file mode 100644 index 000000000..b1ffe0bdc --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveOptimization.py @@ -0,0 +1,87 @@ +import numpy as np + + +class HybridAdaptiveOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + self.population_size = 50 # Adjusted population size for balance + self.mutation_factor = 0.5 + self.crossover_rate = 0.9 + self.local_search_prob = 0.5 # Higher probability for local search + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = self.population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Mutation (Differential Evolution) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + # Crossover + crossover = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover, mutant, population[i]) + + # Apply different local search strategies + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func, strategy="hybrid") + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + new_population.append(trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + return self.f_opt, self.x_opt + + def local_search(self, x, func, strategy="hybrid"): + best_x = x.copy() + best_f = func(x) + + if strategy == "hybrid": + for _ in range(5): # Hybrid strategy with limited iterations + # Hill climbing + for i in range(self.dim): + x_new = best_x.copy() + step_size = 0.1 * (np.random.rand() * 2 - 1) # Small random perturbation + x_new[i] = np.clip(best_x[i] + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + # Gaussian mutation + sigma = 0.1 # Standard deviation for Gaussian mutation + x_new = best_x + np.random.normal(0, sigma, self.dim) + x_new = np.clip(x_new, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x diff --git a/nevergrad/optimization/lama/HybridAdaptiveOrthogonalDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveOrthogonalDifferentialEvolution.py new file mode 100644 index 000000000..49400e87f --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveOrthogonalDifferentialEvolution.py @@ -0,0 +1,67 @@ +import numpy as np + + +class HybridAdaptiveOrthogonalDifferentialEvolution: + def __init__( + self, budget=1000, population_size=50, mutation_factor=0.8, crossover_rate=0.9, orthogonal_factor=0.5 + ): + self.budget = budget + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.orthogonal_factor = orthogonal_factor + self.orthogonal_factor_min = 0.1 + self.orthogonal_factor_max = 0.9 + self.orthogonal_factor_decay = 0.9 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimension = len(func.bounds.lb) + + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + + for _ in range(self.budget): + trial_population = np.zeros_like(population) + orthogonal_factor = self.orthogonal_factor + + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + mutant = np.clip(a + self.mutation_factor * (b - c), func.bounds.lb, func.bounds.ub) + + orthogonal_vector = np.random.normal(0, orthogonal_factor, size=dimension) + + crossover_points = np.random.rand(dimension) < self.crossover_rate + trial_population[i] = np.where(crossover_points, mutant, population[i] + orthogonal_vector) + + trial_fitness = func(trial_population) + population_fitness = func(population) + + improved_idxs = trial_fitness < population_fitness + population[improved_idxs] = trial_population[improved_idxs] + + best_idx = np.argmin(trial_fitness) + if trial_fitness[best_idx] < self.f_opt: + self.f_opt = trial_fitness[best_idx] + self.x_opt = trial_population[best_idx] + + orthogonal_factor = max( + orthogonal_factor * self.orthogonal_factor_decay, self.orthogonal_factor_min + ) + + if np.random.rand() < 0.1: # Introduce random restart + population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension) + ) + + # Adaptive adjustment of mutation factor and crossover rate + self.mutation_factor = max(0.5, self.mutation_factor * 0.995) + self.crossover_rate = min(0.95, self.crossover_rate * 1.001) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.delete(np.arange(len(population)), current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] diff --git a/nevergrad/optimization/lama/HybridAdaptiveParallelDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveParallelDifferentialEvolution.py new file mode 100644 index 000000000..4e991d598 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveParallelDifferentialEvolution.py @@ -0,0 +1,67 @@ +import numpy as np + + +class HybridAdaptiveParallelDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, F=0.8, CR=0.9, adaptive=True): + self.budget = budget + self.population_size = population_size + self.F = F # Base differential weight, potentially adaptive + self.CR = CR # Crossover probability + self.adaptive = adaptive # Enable adaptive control of parameters + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Best individual tracker + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + # Adaptation parameters + F_l, F_u = 0.5, 0.9 # Lower and upper bounds for F + + # Main loop + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive F + if self.adaptive: + self.F = F_l + (F_u - F_l) * np.exp(-(evaluations / self.budget)) + + # Mutation: DE/current-to-best/1 + idxs = np.arange(self.population_size) + idxs = np.delete(idxs, i) + a, b = np.random.choice(idxs, 2, replace=False) + mutant = np.clip( + population[i] + + self.F * (best_individual - population[i]) + + self.F * (population[a] - population[b]), + self.lb, + self.ub, + ) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < fitness[best_idx]: + best_idx = i + best_individual = trial + + # Check if budget exhausted + if evaluations >= self.budget: + break + + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/HybridAdaptiveParameterTuningOptimization.py b/nevergrad/optimization/lama/HybridAdaptiveParameterTuningOptimization.py new file mode 100644 index 000000000..7771226fd --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveParameterTuningOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class HybridAdaptiveParameterTuningOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Increased for better diversity and exploration + self.initial_F = 0.6 # Tuned for better balance + self.initial_CR = 0.9 # Increased for more crossover + self.elite_rate = 0.2 # Enhanced elite rate for better exploitation + self.local_search_rate = 0.5 # Increased local search intensity + self.memory_size = 30 # Increased memory size for better parameter adaptation + self.w = 0.6 # Further reduced inertia weight for finer control in PSO + self.c1 = 1.8 # Enhanced cognitive component for better exploration + self.c2 = 1.2 # Reduced social component for more individual exploration + self.phase_switch_ratio = 0.3 # Earlier switch to PSO for quicker convergence + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.03 # Finer step for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridAdaptiveParameterTuningOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridAdaptivePopulationDE.py b/nevergrad/optimization/lama/HybridAdaptivePopulationDE.py new file mode 100644 index 000000000..bc97d120f --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptivePopulationDE.py @@ -0,0 +1,82 @@ +import numpy as np + + +class HybridAdaptivePopulationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.mutation_factor = 0.5 + self.crossover_prob = 0.7 + self.archive = [] + self.elitism_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize populations + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + while self.budget > 0: + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Mutation + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + mutant = x1 + self.mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + if self.budget % 100 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Update archive + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + # Combine elite and new population + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveQuantumLevySearch.py b/nevergrad/optimization/lama/HybridAdaptiveQuantumLevySearch.py new file mode 100644 index 000000000..17c07ef35 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveQuantumLevySearch.py @@ -0,0 +1,156 @@ +import numpy as np + + +class HybridAdaptiveQuantumLevySearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 + inertia_weight = 0.9 + cognitive_coefficient = 2.0 + social_coefficient = 2.0 + differential_weight = 0.5 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 10 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + inertia_weight = 0.9 - 0.5 * (evaluations / self.budget) + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticDifferentialEvolution.py new file mode 100644 index 000000000..1fcc21a14 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticDifferentialEvolution.py @@ -0,0 +1,171 @@ +import numpy as np + + +class HybridAdaptiveQuantumMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.5 + self.local_search_iters = 5 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def mutate_quantum(self, current, best, F): + return np.clip(current + F * np.tanh(best - current), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + else: + mutant = self.mutate_quantum(population[i], global_best_position, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticOptimizer.py b/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticOptimizer.py new file mode 100644 index 000000000..52f58a0dc --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveQuantumMemeticOptimizer.py @@ -0,0 +1,110 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridAdaptiveQuantumMemeticOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + adaptive_factor *= 0.9 + self.quantum_weight *= adaptive_factor + else: + adaptive_factor *= 1.1 + self.quantum_weight *= adaptive_factor + + if eval_count < self.budget and np.random.rand() < self.local_search_probability: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/HybridAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/HybridAdaptiveQuantumPSO.py new file mode 100644 index 000000000..886d3ea51 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveQuantumPSO.py @@ -0,0 +1,78 @@ +import numpy as np + + +class HybridAdaptiveQuantumPSO: + def __init__( + self, + budget=10000, + population_size=300, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.1, + social_weight=2.3, + quantum_prob=0.2, + quantum_radius=0.2, + adaptative_gradient=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_prob = quantum_prob + self.quantum_radius = quantum_radius + self.adaptative_gradient = adaptative_gradient + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + inertia_reduction = (self.initial_inertia - self.final_inertia) / self.budget + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.adaptative_gradient + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component - gradient_component + ) + + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal(0, self.quantum_radius, self.dim) + particles[i] = global_best + quantum_jump + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/HybridAdaptiveSearch.py b/nevergrad/optimization/lama/HybridAdaptiveSearch.py new file mode 100644 index 000000000..4c9baeb22 --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveSearch.py @@ -0,0 +1,86 @@ +import numpy as np + + +class HybridAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 10 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Hybrid loop (combining PSO and Gradient-based search) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveSearchStrategy.py b/nevergrad/optimization/lama/HybridAdaptiveSearchStrategy.py new file mode 100644 index 000000000..2d3540d9b --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveSearchStrategy.py @@ -0,0 +1,77 @@ +import numpy as np + + +class HybridAdaptiveSearchStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + population_size = 150 + elite_size = int(0.05 * population_size) + mutation_rate = 0.3 + mutation_scale = lambda t: 0.3 * np.exp(-0.001 * t) # More gentle decaying mutation scale + crossover_rate = 0.9 + local_search_prob = 0.1 # Probability of performing local search on new individuals + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + + # Select elites to carry over to next generation + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + # Generate the rest of the new population + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + # Local search with a small probability + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step_size = 0.1 + local_point = child + step_size * direction + local_point = np.clip(local_point, self.lb, self.ub) + if func(local_point) < func(child): + child = local_point + + new_population.append(child) + + new_population = np.vstack((new_population)) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + # Combine new population with elites + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridAdaptiveSelfAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/HybridAdaptiveSelfAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..67bc829fc --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveSelfAdaptiveDifferentialEvolution.py @@ -0,0 +1,144 @@ +import numpy as np + + +class HybridAdaptiveSelfAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased population size for better diversity + self.initial_F = 0.5 # Standard DE mutation factor + self.initial_CR = 0.9 # Standard DE crossover rate + self.self_adaptive_rate = 0.1 # Rate of change for F and CR + self.elite_rate = 0.1 # Elite retention rate + self.memory_size = 20 # Memory size for adaptive parameters + self.adaptive_phase_ratio = 0.7 # More budget for DE-based phase + self.local_search_rate = 0.2 # Local search probability + self.alpha = 0.6 # Differential weight for local search + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 # Increased for effective local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (self.self_adaptive_rate * np.random.randn()) + adaptive_CR = memory_CR[idx] + (self.self_adaptive_rate * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + 0.5 * velocities[i] + + 1.5 * r1 * (personal_best_positions[i] - population[i]) + + 1.5 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridAdaptiveSelfAdaptiveDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridAdaptiveSimulatedAnnealingDE.py b/nevergrad/optimization/lama/HybridAdaptiveSimulatedAnnealingDE.py new file mode 100644 index 000000000..64213197d --- /dev/null +++ b/nevergrad/optimization/lama/HybridAdaptiveSimulatedAnnealingDE.py @@ -0,0 +1,118 @@ +import numpy as np + + +class HybridAdaptiveSimulatedAnnealingDE: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_mutation_factor(generation, max_generations): + return self.mutation_factor * (1 - generation / max_generations) + + def simulated_annealing_acceptance(new_f, old_f, temperature): + if new_f < old_f: + return True + else: + acceptance_prob = np.exp((old_f - new_f) / temperature) + return np.random.rand() < acceptance_prob + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + max_generations = self.budget // self.population_size + temperature = 1.0 + + for generation in range(max_generations): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = adaptive_mutation_factor(generation, max_generations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if simulated_annealing_acceptance(new_f, fitness[j], temperature): + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + temperature *= 0.99 # Cool down temperature for Simulated Annealing + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + else: + self.base_lr *= 0.95 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridAdaptiveSimulatedAnnealingDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridCosineSineDualPhaseStrategyV10.py b/nevergrad/optimization/lama/HybridCosineSineDualPhaseStrategyV10.py new file mode 100644 index 000000000..ff29ebc17 --- /dev/null +++ b/nevergrad/optimization/lama/HybridCosineSineDualPhaseStrategyV10.py @@ -0,0 +1,83 @@ +import numpy as np + + +class HybridCosineSineDualPhaseStrategyV10: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.7): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Introducing a more complex mutation strategy for phase 2 + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjusting F and CR using a cos and sin wave function to vary adaptively with iterations + scale = iteration / total_iterations + self.F = 0.5 + 0.4 * np.sin(np.pi * scale) + self.CR = 0.5 + 0.4 * np.cos(np.pi * scale) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptionDifferentialEvolution.py b/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptionDifferentialEvolution.py new file mode 100644 index 000000000..aeb2e3f04 --- /dev/null +++ b/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptionDifferentialEvolution.py @@ -0,0 +1,115 @@ +import numpy as np + + +class HybridCovarianceMatrixAdaptionDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + # Initialize covariance matrix + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 + np.eye( + self.dim + ) * 1e-6 # Ensure positive semi-definiteness + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation with covariance matrix adaptation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Update covariance matrix based on the new population + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 + np.eye( + self.dim + ) * 1e-6 # Ensure positive semi-definiteness + + perturbation_population = np.zeros_like(population) + for i in range(population_size): + perturbation = np.random.multivariate_normal(mean, cov_matrix) + perturbation_population[i] = np.clip(perturbation, bounds[0], bounds[1]) + + f_trial = func(perturbation_population[i]) + evaluations += 1 + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = perturbation_population[i] + + if evaluations >= self.budget: + break + + # Combine the new population with perturbation population and select the best + combined_population = np.vstack((population, perturbation_population)) + combined_fitness = np.array([func(ind) for ind in combined_population]) + best_indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[best_indices] + fitness = combined_fitness[best_indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2.py b/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2.py new file mode 100644 index 000000000..1376beac5 --- /dev/null +++ b/nevergrad/optimization/lama/HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2.py @@ -0,0 +1,124 @@ +import numpy as np + + +class HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = ( + 60 # Slightly increased population size to balance exploration and exploitation + ) + self.sigma = 0.2 # Increased step size for initial exploration + self.c1 = 0.05 # Adjusted learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.015 # Adjusted learning rate for mutation adaptability + self.elitism_rate = 0.3 # Further increased elitism rate to retain more top solutions + self.eval_count = 0 + self.F = 0.8 # Increased differential weight for more aggressive exploration + self.CR = 0.9 # Increased crossover probability for more varied offspring + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights.py b/nevergrad/optimization/lama/HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights.py new file mode 100644 index 000000000..3b6254252 --- /dev/null +++ b/nevergrad/optimization/lama/HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights.py @@ -0,0 +1,155 @@ +import numpy as np + + +class HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x, alpha=0.01): + u = np.random.normal(0, 1, self.dim) * alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(population, fitness): + """Switch strategy based on current performance.""" + strategy = "default" + if self.eval_count < self.budget * 0.33: + strategy = "explorative" + self.F = 0.9 + self.CR = 0.9 + elif self.eval_count < self.budget * 0.66: + strategy = "balanced" + self.F = 0.7 + self.CR = 0.85 + else: + strategy = "exploitative" + self.F = 0.5 + self.CR = 0.75 + return strategy + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < 0.2: + population[i] = levy_flight_step(population[i]) + return population + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching(population, fitness) + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridCulturalDifferentialEvolution.py b/nevergrad/optimization/lama/HybridCulturalDifferentialEvolution.py new file mode 100644 index 000000000..8d6220e70 --- /dev/null +++ b/nevergrad/optimization/lama/HybridCulturalDifferentialEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridCulturalDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + """Differential mutation using the best individual's information.""" + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + """Binomial crossover.""" + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + """Guided local search using gradient approximation.""" + best_x = x.copy() + best_f = func(x) + step_size = 0.05 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + """Estimate the gradient using finite differences.""" + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 # Further increased population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on some individuals + if np.random.rand() < 0.3: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 # Adjusted evaluations in guided search + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.15 + (0.25 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDEPSO.py b/nevergrad/optimization/lama/HybridDEPSO.py new file mode 100644 index 000000000..6ec157db0 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDEPSO.py @@ -0,0 +1,144 @@ +import numpy as np + + +class HybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros_like(population) + return population, fitness, velocities + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + new_velocities = np.zeros_like(new_population) + return new_population, new_fitness, new_velocities + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_velocity(velocities, population, pbest, gbest, w=0.5, c1=1.5, c2=1.5): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + new_velocities = w * velocities + c1 * r1 * (pbest - population) + c2 * r2 * (gbest - population) + return new_velocities + + def local_search(x): + perturbation = np.random.normal(scale=0.1, size=x.shape) + return np.clip(x + perturbation, bounds[0], bounds[1]) + + population, fitness, velocities = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + pbest = population.copy() + pbest_fitness = fitness.copy() + gbest = population[np.argmin(fitness)] + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness, velocities = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + new_velocities = np.zeros_like(velocities) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + new_velocities[i] = velocities[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + new_velocities[i] = velocities[i] + + if f_trial < pbest_fitness[i]: + pbest[i] = trial + pbest_fitness[i] = f_trial + + if f_trial < func(gbest): + gbest = trial + + if evaluations >= self.budget: + break + + velocities = update_velocity(velocities, population, pbest, gbest) + population = new_population + velocities + population = np.clip(population, bounds[0], bounds[1]) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDEPSOWithDynamicAdaptation.py b/nevergrad/optimization/lama/HybridDEPSOWithDynamicAdaptation.py new file mode 100644 index 000000000..4c4943d81 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDEPSOWithDynamicAdaptation.py @@ -0,0 +1,142 @@ +import numpy as np + + +class HybridDEPSOWithDynamicAdaptation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + F = 0.8 # Differential weight for DE + CR = 0.9 # Crossover probability for DE + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDifferentialEvolution.py b/nevergrad/optimization/lama/HybridDifferentialEvolution.py new file mode 100644 index 000000000..c5a217678 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDifferentialEvolution.py @@ -0,0 +1,56 @@ +import numpy as np + + +class HybridDifferentialEvolution: + def __init__(self, budget, population_size=20, F=0.5, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight + self.CR = CR # Crossover probability + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), lower_bound, upper_bound) + + # Crossover + cross_points = np.random.rand(dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + new_population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + population = new_population + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDifferentialEvolutionMemeticOptimizer.py b/nevergrad/optimization/lama/HybridDifferentialEvolutionMemeticOptimizer.py new file mode 100644 index 000000000..7f686850b --- /dev/null +++ b/nevergrad/optimization/lama/HybridDifferentialEvolutionMemeticOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridDifferentialEvolutionMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.3 + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.memory_size = 30 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = HybridDifferentialEvolutionMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridDifferentialEvolutionParticleSwarmOptimizer.py b/nevergrad/optimization/lama/HybridDifferentialEvolutionParticleSwarmOptimizer.py new file mode 100644 index 000000000..1f6fc8b1e --- /dev/null +++ b/nevergrad/optimization/lama/HybridDifferentialEvolutionParticleSwarmOptimizer.py @@ -0,0 +1,100 @@ +import numpy as np + + +class HybridDifferentialEvolutionParticleSwarmOptimizer: + def __init__( + self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9, inertia=0.5, cognitive=1.5, social=1.5 + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.inertia = inertia + self.cognitive = cognitive + self.social = social + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Initialize velocities for PSO + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + + # Personal best tracking for PSO + p_best = population.copy() + p_best_fitness = fitness.copy() + + # Global best tracking for PSO + g_best_idx = np.argmin(fitness) + g_best = population[g_best_idx] + g_best_fitness = fitness[g_best_idx] + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + while self.eval_count < self.budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia * velocities[i] + + self.cognitive * r1 * (p_best[i] - population[i]) + + self.social * r2 * (g_best - population[i]) + ) + velocities[i] = np.clip( + velocities[i], self.bounds[0] - population[i], self.bounds[1] - population[i] + ) + population[i] += velocities[i] + population[i] = np.clip(population[i], self.bounds[0], self.bounds[1]) + + # Personal best update + f_new = func(population[i]) + self.eval_count += 1 + if f_new < p_best_fitness[i]: + p_best_fitness[i] = f_new + p_best[i] = population[i] + + # Global best update + if f_new < g_best_fitness: + g_best_fitness = f_new + g_best = population[i] + + if self.eval_count >= self.budget: + break + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDifferentialEvolutionWithLocalSearch.py b/nevergrad/optimization/lama/HybridDifferentialEvolutionWithLocalSearch.py new file mode 100644 index 000000000..df78f5b19 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDifferentialEvolutionWithLocalSearch.py @@ -0,0 +1,95 @@ +import numpy as np + + +class HybridDifferentialEvolutionWithLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Apply local search on the best solution found so far + if evaluations < self.budget: + local_search_budget = int(self.budget * 0.1) # allocate 10% of the budget for local search + for _ in range(local_search_budget): + perturbation = np.random.normal(0, 0.1, self.dim) + local_trial = np.clip(self.x_opt + perturbation, bounds[0], bounds[1]) + f_local_trial = func(local_trial) + evaluations += 1 + + if f_local_trial < self.f_opt: + self.f_opt = f_local_trial + self.x_opt = local_trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDifferentialLocalSearch.py b/nevergrad/optimization/lama/HybridDifferentialLocalSearch.py new file mode 100644 index 000000000..fd73b1691 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDifferentialLocalSearch.py @@ -0,0 +1,62 @@ +import numpy as np + + +class HybridDifferentialLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.pop_size = 20 + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_prob = 0.1 + + def local_search(self, x, func): + """Perform a simple local search around x""" + steps = np.random.uniform(-0.1, 0.1, size=x.shape) + new_x = x + steps + new_x = np.clip(new_x, *self.bounds) + return new_x + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + for i in range(self.pop_size): + # Select three distinct individuals (but different from i) + indices = list(range(self.pop_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Mutation and Crossover (Differential Evolution) + mutant = np.clip(a + self.F * (b - c), *self.bounds) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Local Search + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Check if we've exhausted our budget + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDualLocalOptimizationDE.py b/nevergrad/optimization/lama/HybridDualLocalOptimizationDE.py new file mode 100644 index 000000000..c788b0b1c --- /dev/null +++ b/nevergrad/optimization/lama/HybridDualLocalOptimizationDE.py @@ -0,0 +1,178 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridDualLocalOptimizationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution with DE and enhanced local search + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Enhanced local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.enhanced_local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def enhanced_local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + # Apply PSO-based local search + best_x = self.pso_local_search(best_x, func) + + # Apply Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev # Account for the function evaluations + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def pso_local_search(self, x, func): + # PSO parameters + inertia_weight = 0.729 + cognitive_coeff = 1.49445 + social_coeff = 1.49445 + max_iter = 10 + swarm_size = 10 + + # Initialize PSO swarm + swarm = np.random.uniform(-0.1, 0.1, (swarm_size, self.dim)) + x + swarm = np.clip(swarm, -5.0, 5.0) + velocities = np.zeros_like(swarm) + personal_best_positions = swarm.copy() + personal_best_fitness = np.array([func(p) for p in personal_best_positions]) + global_best_position = personal_best_positions[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # PSO iterations + for _ in range(max_iter): + if self.budget <= 0: + break + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + inertia_weight * velocities + + cognitive_coeff * r1 * (personal_best_positions - swarm) + + social_coeff * r2 * (global_best_position - swarm) + ) + swarm = np.clip(swarm + velocities, -5.0, 5.0) + fitness = np.array([func(p) for p in swarm]) + self.budget -= swarm_size + + # Update personal and global bests + better_mask = fitness < personal_best_fitness + personal_best_positions[better_mask] = swarm[better_mask] + personal_best_fitness[better_mask] = fitness[better_mask] + global_best_idx = np.argmin(personal_best_fitness) + global_best_position = personal_best_positions[global_best_idx] + global_best_fitness = personal_best_fitness[global_best_idx] + + return global_best_position diff --git a/nevergrad/optimization/lama/HybridDualPhaseParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/HybridDualPhaseParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..fb1f3d106 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDualPhaseParticleSwarmDifferentialEvolution.py @@ -0,0 +1,144 @@ +import numpy as np + + +class HybridDualPhaseParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Reduced population size for faster convergence + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.3 # Adjusted local search rate + self.memory_size = 5 + self.w = 0.6 # Adjusted inertia weight + self.c1 = 1.5 # Cognitive component + self.c2 = 1.5 # Social component + self.phase_switch_ratio = 0.2 # Earlier phase switch + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal best positions and fitness + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best position and fitness + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + # Track the number of function evaluations + self.eval_count = self.population_size + + # Initialize memory for adaptive parameters + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 # Reduced step size for finer local searches + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridDualPhaseParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridDynamicAdaptiveDE.py b/nevergrad/optimization/lama/HybridDynamicAdaptiveDE.py new file mode 100644 index 000000000..61702368f --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicAdaptiveDE.py @@ -0,0 +1,123 @@ +import numpy as np + + +class HybridDynamicAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Adaptive reset based on population diversity + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on diversity + diversity = np.mean(np.std(population, axis=0)) + if diversity < self.epsilon: + # If diversity is too low, reinitialize half the population + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDynamicAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/HybridDynamicAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..79b15cdee --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicAdaptiveExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class HybridDynamicAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Cognitive constant + c2 = 2.0 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 40 + + # Exploration improvement parameters + exploration_factor = 0.2 + max_exploration_cycles = 40 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = HybridDynamicAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridDynamicClusterOptimization.py b/nevergrad/optimization/lama/HybridDynamicClusterOptimization.py new file mode 100644 index 000000000..f627865f0 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicClusterOptimization.py @@ -0,0 +1,153 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class HybridDynamicClusterOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 + elite_size = 5 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cluster_count = int(self.adaptive_parameters(evaluations, self.budget, 2, 10)) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + kmeans = KMeans(n_clusters=cluster_count) + clusters = kmeans.fit_predict(population) + cluster_centers = kmeans.cluster_centers_ + + for cluster_center in cluster_centers: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(cluster_center + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDynamicCuckooHarmonyAlgorithm.py b/nevergrad/optimization/lama/HybridDynamicCuckooHarmonyAlgorithm.py new file mode 100644 index 000000000..baf4ee9e7 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicCuckooHarmonyAlgorithm.py @@ -0,0 +1,64 @@ +import numpy as np + + +class HybridDynamicCuckooHarmonyAlgorithm: + def __init__(self, budget=10000, population_size=20, dim=5, pa=0.25, beta=1.5, gamma=0.01, alpha=0.95): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func): + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + self.population[i] = new_solution + else: + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + # Update current solution with harmony from another cuckoo + new_solution = self.population[i] + self.gamma * (self.population[j] - self.population[i]) + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + self.population[i] = new_solution + + def __call__(self, func): + for _ in range(self.budget): + self.update_population(func) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/HybridDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/HybridDynamicDifferentialEvolution.py new file mode 100644 index 000000000..0c22b1525 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicDifferentialEvolution.py @@ -0,0 +1,90 @@ +import numpy as np + + +class HybridDynamicDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Differential weight + self.initial_CR = 0.9 # Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.eval_count = 0 + self.local_search_rate = 0.1 # Probability for local search + + def __call__(self, func): + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + def local_search(position): + # Simple local search strategy + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + return candidate + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with random components + adaptive_F = self.initial_F + (0.1 * np.random.rand() - 0.05) + adaptive_CR = self.initial_CR + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridDynamicDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridDynamicDifferentialEvolutionGradient.py b/nevergrad/optimization/lama/HybridDynamicDifferentialEvolutionGradient.py new file mode 100644 index 000000000..25468370e --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicDifferentialEvolutionGradient.py @@ -0,0 +1,118 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class HybridDynamicDifferentialEvolutionGradient: + def __init__(self, budget, population_size=20, init_crossover_rate=0.7, init_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = init_crossover_rate + self.mutation_factor = init_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + clustering = KMeans(n_clusters=int(np.sqrt(self.population_size)) + 1) + labels = clustering.fit_predict(population) + new_population = [] + new_fitness = [] + for cluster_idx in range(max(labels) + 1): + cluster_members = [i for i, lbl in enumerate(labels) if lbl == cluster_idx] + if len(cluster_members) > 0: + best_member = min(cluster_members, key=lambda idx: fitness[idx]) + new_population.append(population[best_member]) + new_fitness.append(fitness[best_member]) + + while len(new_population) < self.population_size: + new_population.append(random_vector()) + new_fitness.append(func(new_population[-1])) + + return new_population, new_fitness + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + max_generations = self.budget // self.population_size + + for generation in range(max_generations): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = self.mutation_factor * (1 - generation / max_generations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + population, fitness = maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridDynamicDifferentialEvolutionGradient(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridDynamicElitistDE.py b/nevergrad/optimization/lama/HybridDynamicElitistDE.py new file mode 100644 index 000000000..786a224da --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicElitistDE.py @@ -0,0 +1,123 @@ +import numpy as np + + +class HybridDynamicElitistDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive_rate = 0.1 # Archive usage rate + self.local_search_prob = 0.1 # Probability of performing local search + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation with enhanced crossover strategy + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Mutation + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Enhanced crossover with additional elitist guidance + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + if np.random.rand() < 0.5: + trial = trial + np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * self.archive_rate) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.1 * (np.random.rand(self.dim) - 0.5) + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/HybridDynamicQuantumLevyDifferentialSearch.py b/nevergrad/optimization/lama/HybridDynamicQuantumLevyDifferentialSearch.py new file mode 100644 index 000000000..03b0a3353 --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicQuantumLevyDifferentialSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class HybridDynamicQuantumLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum and Levy Search + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridDynamicSearch.py b/nevergrad/optimization/lama/HybridDynamicSearch.py new file mode 100644 index 000000000..289a9830f --- /dev/null +++ b/nevergrad/optimization/lama/HybridDynamicSearch.py @@ -0,0 +1,161 @@ +import numpy as np + + +class HybridDynamicSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_decay = 0.99 + + # Differential Evolution parameters + F = 0.8 + CR = 0.9 + + # Gradient-based search parameters + alpha = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = 0.9 + adaptive_F = 0.8 + adaptive_alpha = 0.1 + adaptive_beta = 0.9 + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = 0.9 - 0.8 * (i / self.budget) + adaptive_F = 0.8 + 0.4 * (i / self.budget) + adaptive_alpha = 0.1 + 0.2 * (i / self.budget) + adaptive_beta = 0.9 - 0.4 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.1 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(0.4, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = HybridDynamicSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridEnhancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/HybridEnhancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..1a88e913d --- /dev/null +++ b/nevergrad/optimization/lama/HybridEnhancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,148 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class HybridEnhancedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7, cluster_size=5): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.cluster_size = cluster_size + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + kmeans = KMeans(n_clusters=self.cluster_size, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for i in range(len(population)): + if np.linalg.norm(population[i] - cluster_centers[kmeans.labels_[i]]) < 1e-1: + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + success_count_history = [] + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + success_count_history.append(success_rate) + if len(success_count_history) > 10: + success_count_history.pop(0) + + avg_success_rate = np.mean(success_count_history) + if avg_success_rate > 0.2: + self.mutation_factor *= 1.1 + self.crossover_rate *= 1.05 + else: + self.mutation_factor *= 0.9 + self.crossover_rate *= 0.95 + + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.5, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridEnhancedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridEnhancedDualPhaseAdaptiveOptimizationV6.py b/nevergrad/optimization/lama/HybridEnhancedDualPhaseAdaptiveOptimizationV6.py new file mode 100644 index 000000000..c4da4fa65 --- /dev/null +++ b/nevergrad/optimization/lama/HybridEnhancedDualPhaseAdaptiveOptimizationV6.py @@ -0,0 +1,146 @@ +import numpy as np + + +class HybridEnhancedDualPhaseAdaptiveOptimizationV6: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 120 # Adjusted for better exploration + self.initial_F = 0.6 # Refined mutation factor + self.initial_CR = 0.9 # Refined crossover rate + self.elite_rate = 0.15 # Increased elite preservation rate + self.local_search_rate = 0.4 # Increased local search rate for better local optimization + self.memory_size = 20 # Increased memory for better parameter adaptation + self.w = 0.7 # Increased inertia weight for better exploration + self.c1 = 1.5 # Increased cognitive component + self.c2 = 1.7 # Increased social component + self.adaptive_phase_ratio = 0.6 # Increased DE phase ratio for better initial exploration + self.alpha = 0.3 # Increased differential weight for faster convergence + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Increased local search step size for better exploration + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = HybridEnhancedDualPhaseAdaptiveOptimizationV6(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/HybridEnhancedGravitationalSwarmIntelligence.py b/nevergrad/optimization/lama/HybridEnhancedGravitationalSwarmIntelligence.py new file mode 100644 index 000000000..93fccd558 --- /dev/null +++ b/nevergrad/optimization/lama/HybridEnhancedGravitationalSwarmIntelligence.py @@ -0,0 +1,94 @@ +import numpy as np + + +class HybridEnhancedGravitationalSwarmIntelligence: + def __init__( + self, + budget=1000, + population_size=20, + G0=100.0, + alpha=0.1, + beta_min=0.1, + beta_max=0.9, + delta=0.1, + gamma=0.1, + eta=0.1, + ): + self.budget = budget + self.population_size = population_size + self.G0 = G0 + self.alpha = alpha + self.beta_min = beta_min + self.beta_max = beta_max + self.delta = delta + self.gamma = gamma + self.eta = eta + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, func.bounds.lb.size) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F): + return x + F + + def update_G(self, t): + return self.G0 * np.exp(-self.alpha * t) + + def update_beta(self, t): + return self.beta_min + (self.beta_max - self.beta_min) * np.exp(-self.gamma * t) + + def update_alpha(self, t): + return self.alpha * (1.0 - self.delta) + + def update_population(self, population, f_vals, func, G): + for i in range(self.population_size): + if np.random.rand() < self.beta_max: + random_index = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + F = self.gravitational_force(population[i], population[random_index], G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + return population, f_vals + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + G = self.G0 + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + + for t in range(self.budget): + population, f_vals = self.update_population(population, f_vals, func, G) + + for i in range(self.population_size): + if i != best_idx: + F = self.gravitational_force(population[i], best_pos, G) + new_pos = self.update_position(population[i], F) + new_pos = np.clip(new_pos, func.bounds.lb, func.bounds.ub) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + best_idx = np.argmin(f_vals) + if f_vals[best_idx] < self.f_opt: + self.f_opt = f_vals[best_idx] + self.x_opt = population[best_idx] + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.beta_max = self.update_beta(t) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridEvolutionaryAnnealingOptimizer.py b/nevergrad/optimization/lama/HybridEvolutionaryAnnealingOptimizer.py new file mode 100644 index 000000000..13eeb680d --- /dev/null +++ b/nevergrad/optimization/lama/HybridEvolutionaryAnnealingOptimizer.py @@ -0,0 +1,54 @@ +import numpy as np + + +class HybridEvolutionaryAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension is set to 5 as per problem statement + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize parameters + T_initial = 1.0 # Initial temperature for simulated annealing + T = T_initial + T_min = 0.01 # Minimum temperature to keep annealing active + alpha = 0.98 # Cooling rate for temperature + CR = 0.8 # Crossover probability + F = 0.5 # Differential weight + population_size = 40 # Population size + + # Initialize population + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + # Mutation and Crossover + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + # Simulated annealing acceptance + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive parameter updates + T *= alpha + CR = max(0.1, CR * 0.99) # Gradually decrease CR + F = min(1.0, F * 1.02) # Gradually increase F to enhance exploration + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HybridEvolutionaryOptimization.py b/nevergrad/optimization/lama/HybridEvolutionaryOptimization.py new file mode 100644 index 000000000..ee6ba24ce --- /dev/null +++ b/nevergrad/optimization/lama/HybridEvolutionaryOptimization.py @@ -0,0 +1,104 @@ +import numpy as np + + +class HybridEvolutionaryOptimization: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_pos[i] += epsilon + f_pos = func(x_pos) + x_neg = np.copy(x) + x_neg[i] -= epsilon + f_neg = func(x_neg) + grad[i] = (f_pos - f_neg) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(len(pop)): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.crossover_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.learning_rate = 0.01 * np.exp(-iteration / max_iterations) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step on entire population + pop, scores = self.local_search(func, pop, scores) + evaluations += len(pop) + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridEvolvingAdaptiveStrategyV28.py b/nevergrad/optimization/lama/HybridEvolvingAdaptiveStrategyV28.py new file mode 100644 index 000000000..7cb345cb5 --- /dev/null +++ b/nevergrad/optimization/lama/HybridEvolvingAdaptiveStrategyV28.py @@ -0,0 +1,76 @@ +import numpy as np + + +class HybridEvolvingAdaptiveStrategyV28: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor start value + self.CR = CR_init # Crossover rate start value + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, idx, phase): + size = len(population) + idxs = [i for i in range(size) if i != idx] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, improvements): + # Adjust mutation factor and crossover rate dynamically based on historical improvements + self.F = np.clip(self.F + 0.01 * improvements, 0.1, 1) + self.CR = np.clip(self.CR - 0.01 * improvements, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + best_fitness = fitnesses[best_idx] + recent_improvements = 0 + + while evaluations < self.budget: + phase = 1 if evaluations < self.budget * self.switch_ratio else 2 + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < best_fitness: + best_idx = i + best_fitness = trial_fitness + recent_improvements += 1 + + if evaluations >= self.budget: + break + self.adjust_parameters(evaluations, recent_improvements) + recent_improvements = 0 # Reset improvements counter after each parameter adjustment + + return best_fitness, population[best_idx] diff --git a/nevergrad/optimization/lama/HybridExploitationExplorationGradientSearch.py b/nevergrad/optimization/lama/HybridExploitationExplorationGradientSearch.py new file mode 100644 index 000000000..98535ff8a --- /dev/null +++ b/nevergrad/optimization/lama/HybridExploitationExplorationGradientSearch.py @@ -0,0 +1,60 @@ +import numpy as np + + +class HybridExploitationExplorationGradientSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # Dimensionality of the BBOB test suite + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Population initialization + population_size = 20 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + + # Momentum for gradient + momentum = np.zeros((population_size, self.dimension)) + gamma = 0.9 # momentum coefficient + + iteration = 0 + while iteration < self.budget: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = best_individual + + # Gradient estimation with momentum + gradients = np.zeros_like(population) + for i in range(population_size): + for d in range(self.dimension): + perturb = np.zeros(self.dimension) + epsilon = 0.01 + perturb[d] = epsilon + + forward = np.clip(population[i] + perturb, self.lower_bound, self.upper_bound) + backward = np.clip(population[i] - perturb, self.lower_bound, self.upper_bound) + gradient = (func(forward) - func(backward)) / (2 * epsilon) + + gradients[i][d] = gradient + + # Apply momentum to gradients + momentum[i] = gamma * momentum[i] + (1 - gamma) * gradients[i] + new_position = population[i] - momentum[i] * 0.1 # 0.1 is the learning rate + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + + # Evaluate new position + new_fitness = func(new_position) + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + iteration += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridGradientAnnealingWithMemory.py b/nevergrad/optimization/lama/HybridGradientAnnealingWithMemory.py new file mode 100644 index 000000000..3a95360be --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientAnnealingWithMemory.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridGradientAnnealingWithMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/HybridGradientBoostedMemoryAnnealingPlus.py b/nevergrad/optimization/lama/HybridGradientBoostedMemoryAnnealingPlus.py new file mode 100644 index 000000000..3dd6487db --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientBoostedMemoryAnnealingPlus.py @@ -0,0 +1,175 @@ +import numpy as np + + +class HybridGradientBoostedMemoryAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Intensive localized search as refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/HybridGradientCrossoverOptimization.py b/nevergrad/optimization/lama/HybridGradientCrossoverOptimization.py new file mode 100644 index 000000000..2bd9bfa5f --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientCrossoverOptimization.py @@ -0,0 +1,79 @@ +import numpy as np + + +class HybridGradientCrossoverOptimization: + def __init__( + self, budget, dimension=5, population_size=30, learning_rate=0.1, crossover_rate=0.7, gradient_steps=5 + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.learning_rate = learning_rate + self.crossover_rate = crossover_rate + self.gradient_steps = gradient_steps # Number of gradient steps after crossover/mutation + + def __call__(self, func): + # Initialize population within bounds [-5.0, 5.0] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + best_idx = np.argmin(fitness) + f_opt = fitness[best_idx] + x_opt = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Select parents for crossover + parents_idx = np.random.choice(self.population_size, 2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Perform crossover + if np.random.rand() < self.crossover_rate: + child = np.array([np.random.choice([p1, p2]) for p1, p2 in zip(parent1, parent2)]) + else: + child = parent1.copy() # No crossover, child is a copy of parent1 + + # Mutation: adding Gaussian noise + child += np.random.normal(0, 1, self.dimension) * self.learning_rate + child = np.clip(child, -5.0, 5.0) # Ensure child is within bounds + + # Evaluate child + child_fitness = func(child) + evaluations += 1 + + # Selection: Greedily replace if the child is better + if child_fitness < fitness[i]: + population[i] = child + fitness[i] = child_fitness + + # Gradient-based refinement for a few steps + if evaluations + self.gradient_steps <= self.budget: + for _ in range(self.gradient_steps): + grad_est = np.array( + [ + (func(child + eps * np.eye(1, self.dimension, k)[0]) - child_fitness) / eps + for k, eps in enumerate([1e-5] * self.dimension) + ] + ) + child -= self.learning_rate * grad_est + child = np.clip(child, -5.0, 5.0) + new_fitness = func(child) + evaluations += 1 + + if new_fitness < child_fitness: + child_fitness = new_fitness + population[i] = child + fitness[i] = child_fitness + else: + break + + # Update global optimum + if child_fitness < f_opt: + f_opt = child_fitness + x_opt = child + + if evaluations >= self.budget: + break + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HybridGradientDifferentialEvolution.py b/nevergrad/optimization/lama/HybridGradientDifferentialEvolution.py new file mode 100644 index 000000000..1f399e2e9 --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientDifferentialEvolution.py @@ -0,0 +1,62 @@ +import numpy as np + + +class HybridGradientDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set to 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 100 + mutation_factor = 0.8 + crossover_prob = 0.7 + learning_rate = 0.01 # Initial learning rate for gradient steps + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + num_iterations = self.budget // population_size + grad_steps = max(1, num_iterations // 20) # Allocate some iterations for gradient descent + + for iteration in range(num_iterations): + if iteration < num_iterations - grad_steps: + # Differential Evolution Strategy + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + trial_fitness = func(trial) + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + else: + # Gradient-based refinement + gradients = np.random.randn(population_size, self.dim) # Mock gradient + for i in range(population_size): + population[i] -= learning_rate * gradients[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + new_fitness = func(population[i]) + if new_fitness < fitness[i]: + fitness[i] = new_fitness + if new_fitness < best_value: + best_value = new_fitness + best_solution = population[i].copy() + learning_rate *= 0.9 # Decay learning rate + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/HybridGradientEvolution.py b/nevergrad/optimization/lama/HybridGradientEvolution.py new file mode 100644 index 000000000..c0d1e4386 --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientEvolution.py @@ -0,0 +1,83 @@ +import numpy as np + + +class HybridGradientEvolution: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = 10 + self.learning_rate = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + # Selection: Choose two parents based on fitness + parents_idx = np.random.choice(range(self.population_size), size=2, replace=False) + parent1, parent2 = population[parents_idx[0]], population[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Replace the worse parent with the new child + worse_parent_idx = ( + parents_idx[0] if fitness[parents_idx[0]] > fitness[parents_idx[1]] else parents_idx[1] + ) + population[worse_parent_idx] = new_x + fitness[worse_parent_idx] = new_f + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridGradientMemoryAnnealing.py b/nevergrad/optimization/lama/HybridGradientMemoryAnnealing.py new file mode 100644 index 000000000..e40c2c608 --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientMemoryAnnealing.py @@ -0,0 +1,129 @@ +import numpy as np + + +class HybridGradientMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Gradient-based local search to refine the best memory solution + if evaluations < self.budget and T < 0.5: + x_best = memory[np.argmin(memory_scores)] + for _ in range(10): # Perform 10 gradient descent steps + gradient = self._approximate_gradient(func, x_best) + x_best -= 0.01 * gradient # Gradient descent step + x_best = np.clip(x_best, func.bounds.lb, func.bounds.ub) + f_best = func(x_best) + evaluations += 1 + if f_best < self.f_opt: + self.f_opt = f_best + self.x_opt = x_best + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Memory Enrichment + if evaluations % (memory_size * 5) == 0: + self._enhance_memory(func, memory, memory_scores, evaluations) + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _enhance_memory(self, func, memory, memory_scores, evaluations): + # Enhancing memory by local optimization around best memory points + for i in range(len(memory)): + local_T = 0.1 # Low disturbance for local search + x_local = memory[i] + f_local = memory_scores[i] + for _ in range(5): # Local search iterations + x_candidate = x_local + local_T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if evaluations >= self.budget: + break + + if f_candidate < f_local: + x_local = x_candidate + f_local = f_candidate + + memory[i] = x_local + memory_scores[i] = f_local diff --git a/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV2.py b/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV2.py new file mode 100644 index 000000000..72ee998ac --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridGradientMemoryAnnealingV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=10, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV3.py b/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV3.py new file mode 100644 index 000000000..5980dcaab --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientMemoryAnnealingV3.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridGradientMemoryAnnealingV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Lower final temperature to allow more exploration + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=30, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.2): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/HybridGradientMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/HybridGradientMemorySimulatedAnnealing.py new file mode 100644 index 000000000..2ae11d9e0 --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientMemorySimulatedAnnealing.py @@ -0,0 +1,162 @@ +import numpy as np + + +class HybridGradientMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Introduce hybrid crossover mechanism to exploit best solutions in memory + if evaluations % (self.budget // 5) == 0: + for i in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/HybridGradientPSO.py b/nevergrad/optimization/lama/HybridGradientPSO.py new file mode 100644 index 000000000..d27e493c7 --- /dev/null +++ b/nevergrad/optimization/lama/HybridGradientPSO.py @@ -0,0 +1,74 @@ +import numpy as np + + +class HybridGradientPSO: + def __init__(self, budget=10000, population_size=30, omega=0.7, phi_p=0.3, phi_g=0.4, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.omega = omega # Constant inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.learning_rate = learning_rate # Gradient learning rate + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + + # Update velocities + velocities[i] = ( + self.omega * velocities[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + ) + + # Apply gradient-based refinement for half of the population + if i % 2 == 0: + grad = self.estimate_gradient(particles[i], func) + particles[i] -= self.learning_rate * grad # Gradient descent step + + # Regular update + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + return global_best_score, global_best + + def estimate_gradient(self, x, objective, epsilon=1e-5): + gradient = np.zeros(self.dim) + for j in range(self.dim): + x1 = np.array(x) + x2 = np.array(x) + x1[j] += epsilon + x2[j] -= epsilon + gradient[j] = (objective(x1) - objective(x2)) / (2 * epsilon) + return gradient diff --git a/nevergrad/optimization/lama/HybridGuidedEvolutionaryOptimizer.py b/nevergrad/optimization/lama/HybridGuidedEvolutionaryOptimizer.py new file mode 100644 index 000000000..98b5628af --- /dev/null +++ b/nevergrad/optimization/lama/HybridGuidedEvolutionaryOptimizer.py @@ -0,0 +1,89 @@ +import numpy as np + + +class HybridGuidedEvolutionaryOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.mutation_factor = 0.8 # Mutation scaling factor + self.crossover_rate = 0.7 # Probability of crossover + self.adaptation_rate = 0.1 # Rate of adapting mutation factor + self.elitism_rate = 0.1 # Proportion of elite individuals + self.success_memory = [] + + def adapt_mutation_factor(self): + """Adapt the mutation factor based on moving average of recent successes""" + if len(self.success_memory) > 10: + success_rate = np.mean(self.success_memory[-10:]) + if success_rate > 0.2: + self.mutation_factor *= 1 + self.adaptation_rate + else: + self.mutation_factor *= 1 - self.adaptation_rate + self.mutation_factor = max(0.01, min(1.0, self.mutation_factor)) # Ensure within bounds + self.crossover_rate = max( + 0.1, min(0.9, self.crossover_rate + (-0.05 if success_rate > 0.2 else 0.05)) + ) + + def mutate(self, individual): + """Apply mutation with dynamic adaptation""" + mutation = np.random.normal(0, self.mutation_factor, self.dimension) + mutant = individual + mutation + return np.clip(mutant, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent1, parent2): + """Blended crossover""" + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-0.1, 1.1, self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.bounds["lb"], self.bounds["ub"]) + return parent1 # No crossover occurred + + def __call__(self, func): + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + evaluations = len(population) + + while evaluations < self.budget: + elite_size = int(self.elitism_rate * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + offspring = [] + for _ in range(self.population_size - elite_size): + parents = np.random.choice(self.population_size, 2, replace=False) + child = self.crossover(population[parents[0]], population[parents[1]]) + mutated_child = self.mutate(child) + offspring.append(mutated_child) + + offspring = np.vstack((elite_individuals, offspring)) + offspring_fitness = np.array([func(child) for child in offspring]) + evaluations += len(offspring) + + # Select new population + combined = np.vstack((population, offspring)) + combined_fitness = np.concatenate((fitness, offspring_fitness)) + indices = np.argsort(combined_fitness)[: self.population_size] + population = combined[indices] + fitness = combined_fitness[indices] + + # Update best solution + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + self.success_memory.append(1) + else: + self.success_memory.append(0) + + # Adapt parameters dynamically + self.adapt_mutation_factor() + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HybridMemoryAdaptiveDE.py b/nevergrad/optimization/lama/HybridMemoryAdaptiveDE.py new file mode 100644 index 000000000..7860eabaa --- /dev/null +++ b/nevergrad/optimization/lama/HybridMemoryAdaptiveDE.py @@ -0,0 +1,124 @@ +import numpy as np + + +class HybridMemoryAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + initial_population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(size): + population = np.random.uniform(bounds[0], bounds[1], (size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(F_values.size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind, size): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(history): + if np.mean(history) > 0.5: + return mutation_strategy_1 + else: + return mutation_strategy_2 + + # Initialize population and parameters + population, fitness = initialize_population(initial_population_size) + population_size = initial_population_size + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + mutation_history = [] + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population_size = min(int(population_size * 1.5), 100) # dynamically increase population size + population, fitness = local_restart(best_ind, population_size) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + mutation_strategy = select_mutation_strategy(mutation_history) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + mutation_history.append(1) # Successful mutation + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + mutation_history.append(0) # Unsuccessful mutation + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridMultiDimensionalAnnealing.py b/nevergrad/optimization/lama/HybridMultiDimensionalAnnealing.py new file mode 100644 index 000000000..8651fa8a6 --- /dev/null +++ b/nevergrad/optimization/lama/HybridMultiDimensionalAnnealing.py @@ -0,0 +1,125 @@ +import numpy as np + + +class HybridMultiDimensionalAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define adaptive phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Hybrid approach: Periodically do gradient-based local search and dimensional adjustments + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=5, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * np.random.uniform(-1, 1) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/HybridPSO_DE.py b/nevergrad/optimization/lama/HybridPSO_DE.py new file mode 100644 index 000000000..e61cf024c --- /dev/null +++ b/nevergrad/optimization/lama/HybridPSO_DE.py @@ -0,0 +1,153 @@ +import numpy as np + + +class HybridPSO_DE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution with DE and PSO local search + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability using PSO + if np.random.rand() < self.local_search_prob: + trial = self.pso_local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def pso_local_search(self, x, func): + # PSO parameters + inertia_weight = 0.729 + cognitive_coeff = 1.49445 + social_coeff = 1.49445 + max_iter = 10 + swarm_size = 10 + + # Initialize PSO swarm + swarm = np.random.uniform(-0.1, 0.1, (swarm_size, self.dim)) + x + swarm = np.clip(swarm, -5.0, 5.0) + velocities = np.zeros_like(swarm) + personal_best_positions = swarm.copy() + personal_best_fitness = np.array([func(p) for p in personal_best_positions]) + global_best_position = personal_best_positions[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # PSO iterations + for _ in range(max_iter): + if self.budget <= 0: + break + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + inertia_weight * velocities + + cognitive_coeff * r1 * (personal_best_positions - swarm) + + social_coeff * r2 * (global_best_position - swarm) + ) + swarm = np.clip(swarm + velocities, -5.0, 5.0) + fitness = np.array([func(p) for p in swarm]) + self.budget -= swarm_size + + # Update personal and global bests + better_mask = fitness < personal_best_fitness + personal_best_positions[better_mask] = swarm[better_mask] + personal_best_fitness[better_mask] = fitness[better_mask] + global_best_idx = np.argmin(personal_best_fitness) + global_best_position = personal_best_positions[global_best_idx] + global_best_fitness = personal_best_fitness[global_best_idx] + + return global_best_position diff --git a/nevergrad/optimization/lama/HybridPSO_DE_GradientOptimizer.py b/nevergrad/optimization/lama/HybridPSO_DE_GradientOptimizer.py new file mode 100644 index 000000000..2d07119ff --- /dev/null +++ b/nevergrad/optimization/lama/HybridPSO_DE_GradientOptimizer.py @@ -0,0 +1,139 @@ +import numpy as np + + +class HybridPSO_DE_GradientOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def gradient_descent(self, x, func, budget, step_size=0.01): + best_x = x.copy() + best_f = func(x) + grad = np.zeros(self.dim) + for _ in range(budget): + for i in range(self.dim): + x_plus = x.copy() + x_plus[i] += step_size + f_plus = func(x_plus) + grad[i] = (f_plus - best_f) / step_size + + x = np.clip(x - step_size * grad, self.bounds[0], self.bounds[1]) + f = func(x) + if f < best_f: + best_x = x + best_f = f + + return best_x, best_f + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals using gradient descent + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.gradient_descent(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridParticleDE.py b/nevergrad/optimization/lama/HybridParticleDE.py new file mode 100644 index 000000000..ded1e9c19 --- /dev/null +++ b/nevergrad/optimization/lama/HybridParticleDE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class HybridParticleDE: + def __init__(self, budget=10000, population_size=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + # Particle Swarm Optimization (PSO) Update + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + # Differential Evolution (DE) Mutation and Crossover + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + # Fitness Evaluation + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update personal best + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + # Update global best + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + # Update inertia weight for better convergence + self.inertia_weight = max(0.4, self.inertia_weight * 0.99) + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridParticleDE_v2.py b/nevergrad/optimization/lama/HybridParticleDE_v2.py new file mode 100644 index 000000000..c377c6613 --- /dev/null +++ b/nevergrad/optimization/lama/HybridParticleDE_v2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class HybridParticleDE_v2: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.9 # Adjusted for better exploration + self.crossover_probability = 0.8 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.4 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + # Particle Swarm Optimization (PSO) Update + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + # Differential Evolution (DE) Mutation and Crossover + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + # Fitness Evaluation + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update personal best + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + # Update global best + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + # Update inertia weight for better convergence + self.inertia_weight = max(0.4, self.inertia_weight * 0.98) + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridParticleDE_v3.py b/nevergrad/optimization/lama/HybridParticleDE_v3.py new file mode 100644 index 000000000..88ed30623 --- /dev/null +++ b/nevergrad/optimization/lama/HybridParticleDE_v3.py @@ -0,0 +1,74 @@ +import numpy as np + + +class HybridParticleDE_v3: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.mutation_factor = 0.8 # Fine-tuned for better trade-off + self.crossover_probability = 0.9 + self.inertia_weight = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (self.population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + eval_count = self.population_size + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_idx = np.argmin(fitness) + global_best = population[global_best_idx] + global_best_fitness = fitness[global_best_idx] + + while eval_count < self.budget: + for i in range(self.population_size): + # Particle Swarm Optimization (PSO) Update + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + cognitive_velocity = self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + social_velocity = self.social_coeff * r2 * (global_best - population[i]) + velocity[i] = self.inertia_weight * velocity[i] + cognitive_velocity + social_velocity + population[i] = np.clip(population[i] + velocity[i], self.bounds[0], self.bounds[1]) + + # Differential Evolution (DE) Mutation and Crossover + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(population[i]) + crossover_mask = np.random.rand(self.dim) < self.crossover_probability + trial[crossover_mask] = mutant[crossover_mask] + + # Fitness Evaluation + trial_fitness = func(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update personal best + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + + # Update global best + if trial_fitness < global_best_fitness: + global_best = trial + global_best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + # Update inertia weight for better convergence + self.inertia_weight = max(0.4, self.inertia_weight * 0.98) + + self.f_opt = global_best_fitness + self.x_opt = global_best + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridParticleSwarmDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/HybridParticleSwarmDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..7d73d6a8f --- /dev/null +++ b/nevergrad/optimization/lama/HybridParticleSwarmDifferentialEvolutionOptimizer.py @@ -0,0 +1,132 @@ +import numpy as np + + +class HybridParticleSwarmDifferentialEvolutionOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumAdaptiveMemeticSearch.py b/nevergrad/optimization/lama/HybridQuantumAdaptiveMemeticSearch.py new file mode 100644 index 000000000..0b33093df --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumAdaptiveMemeticSearch.py @@ -0,0 +1,145 @@ +import numpy as np + + +class HybridQuantumAdaptiveMemeticSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 30 + inertia_weight = 0.7 + cognitive_coefficient = 1.5 + social_coefficient = 1.3 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 5 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + inertia_weight = 0.9 - 0.5 * (evaluations / self.budget) + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + local_step = np.random.uniform(-0.1, 0.1, self.dim) + candidate = np.clip(population[i] + local_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolution.py new file mode 100644 index 000000000..3a9f8598c --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolution.py @@ -0,0 +1,164 @@ +import numpy as np + + +class HybridQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.7 + self.elite_fraction = 0.2 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-0.1, 0.1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + self.local_search_budget -= 1 + if self.local_search_budget <= 0: + break + return individual + + def quantum_jump(self, individual, global_best, alpha): + return np.clip(individual + alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(self.local_search_budget, self.budget - evaluations) + elite_population[idx] = self.local_search(elite_population[idx], bounds, func) + evaluations += local_search_budget + + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + population[:elite_count] = elite_population + + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha) + quantum_fitness = func(quantum_trial) + evaluations += 1 + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart.py b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart.py new file mode 100644 index 000000000..a5821749f --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def advanced_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def hybrid_search(self, x, func): + candidate_positions = [x + np.random.randn(self.dim) * 0.1 for _ in range(10)] + candidate_positions = [np.clip(pos, self.bounds[0], self.bounds[1]) for pos in candidate_positions] + candidate_fitness = [func(pos) for pos in candidate_positions] + + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + memory_candidates = [ + self.memorized_individuals[np.random.randint(len(self.memorized_individuals))] + for _ in range(self.elite_size) + ] + for mem_ind in memory_candidates: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.advanced_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + # Apply enhanced hybrid search mechanism + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch.py b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch.py new file mode 100644 index 000000000..b254b6398 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch.py @@ -0,0 +1,183 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.6 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def hybrid_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.hybrid_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % (self.population_size * 10) == 0: + if diversity < self.diversity_threshold: + for j in range(num_elites): + elite, elite_fit = self.hybrid_local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + # Memory update + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = self.memory_rate * memory[i] + (1 - self.memory_rate) * population[i] + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory.py b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory.py new file mode 100644 index 000000000..d5a7be34c --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + # Standard DE mutation and crossover + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumDifferentialParticleSwarmOptimization.py b/nevergrad/optimization/lama/HybridQuantumDifferentialParticleSwarmOptimization.py new file mode 100644 index 000000000..16a3bb942 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumDifferentialParticleSwarmOptimization.py @@ -0,0 +1,133 @@ +import numpy as np + + +class HybridQuantumDifferentialParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.5 + self.F_max = 1.0 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # Quantum Inspired Parameters + self.alpha = 0.75 + self.beta = 0.25 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while evaluations < self.budget: + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + # Quantum Inspired Adjustment + quantum_perturbation = np.random.normal(0, 1, self.dim) * ( + self.alpha * (self.x_opt - population[i]) + self.beta * (population[i] - self.lb) + ) + trial_vector = np.clip(trial_vector + quantum_perturbation, self.lb, self.ub) + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Update personal best + if f_candidate < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = f_candidate + + # Update velocities and positions (PSO component) + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment based on Stagnation Counter + if self.stagnation_counter > self.stagnation_threshold: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + self.stagnation_counter = 0 + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumEnhancedMultiPhaseAdaptiveDE.py b/nevergrad/optimization/lama/HybridQuantumEnhancedMultiPhaseAdaptiveDE.py new file mode 100644 index 000000000..62a106f00 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumEnhancedMultiPhaseAdaptiveDE.py @@ -0,0 +1,136 @@ +import numpy as np + + +class HybridQuantumEnhancedMultiPhaseAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(20): # increased number of local steps for better refinement + perturbation = np.random.uniform(-0.02, 0.02, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumEvolution.py b/nevergrad/optimization/lama/HybridQuantumEvolution.py new file mode 100644 index 000000000..bdbe3eb76 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumEvolution.py @@ -0,0 +1,190 @@ +import numpy as np + + +class HybridQuantumEvolution: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/HybridQuantumGradientEvolution.py b/nevergrad/optimization/lama/HybridQuantumGradientEvolution.py new file mode 100644 index 000000000..4c62c912c --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumGradientEvolution.py @@ -0,0 +1,118 @@ +import numpy as np + + +class HybridQuantumGradientEvolution: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumLevyAdaptiveSwarmV2.py b/nevergrad/optimization/lama/HybridQuantumLevyAdaptiveSwarmV2.py new file mode 100644 index 000000000..a4f4893c7 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumLevyAdaptiveSwarmV2.py @@ -0,0 +1,165 @@ +import numpy as np + + +class HybridQuantumLevyAdaptiveSwarmV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step # Reduced step size for more precise exploitation + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.4 + 0.4 * progress + social_coefficient = 1.4 - 0.4 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.8 - 0.4 * progress + quantum_factor = 0.4 - 0.2 * progress + levy_factor = 0.05 + 0.35 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 # Reverted back to larger population size for more exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + # DE Mutation and Crossover + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum Particle Update + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Levy Flight Local Search + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.7: # Increased probability of local search + local_search_iters = ( + 5 # Reverted back to higher local search iterations for thorough exploitation + ) + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuantumMemeticOptimization.py b/nevergrad/optimization/lama/HybridQuantumMemeticOptimization.py new file mode 100644 index 000000000..1f84bdc87 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuantumMemeticOptimization.py @@ -0,0 +1,122 @@ +import numpy as np + + +class HybridQuantumMemeticOptimization: + def __init__( + self, + budget, + population_size=100, + tau1=0.1, + tau2=0.1, + memetic_rate=0.6, + alpha=0.15, + learning_rate=0.01, + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HybridQuasiRandomDEGradientAnnealing.py b/nevergrad/optimization/lama/HybridQuasiRandomDEGradientAnnealing.py new file mode 100644 index 000000000..84a5f3060 --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuasiRandomDEGradientAnnealing.py @@ -0,0 +1,142 @@ +import numpy as np +from scipy.stats import qmc + + +class HybridQuasiRandomDEGradientAnnealing: + def __init__(self, budget, population_size=30, initial_crossover_rate=0.7, initial_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.mutation_factor = initial_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + # Simulated Annealing acceptance criterion + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Cool down the temperature + self.temperature *= self.cooling_rate + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adaptive mutation and crossover strategies based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + # Additional perturbation to improve exploration + if evaluations % 100 == 0: + for l in range(self.population_size): + population[l] += np.random.randn(self.dim) * self.base_lr * 0.1 + population[l] = np.clip(population[l], self.bounds[0], self.bounds[1]) + fitness[l] = func(population[l]) + evaluations += 1 + if fitness[l] < self.f_opt: + self.f_opt = fitness[l] + self.x_opt = population[l] + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridQuasiRandomDEGradientAnnealing(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridQuasiRandomGradientDifferentialEvolution.py b/nevergrad/optimization/lama/HybridQuasiRandomGradientDifferentialEvolution.py new file mode 100644 index 000000000..04d62609b --- /dev/null +++ b/nevergrad/optimization/lama/HybridQuasiRandomGradientDifferentialEvolution.py @@ -0,0 +1,123 @@ +import numpy as np +from scipy.stats import qmc + + +class HybridQuasiRandomGradientDifferentialEvolution: + def __init__(self, budget, population_size=30, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridQuasiRandomGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost.py b/nevergrad/optimization/lama/HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost.py new file mode 100644 index 000000000..bac3ccdfd --- /dev/null +++ b/nevergrad/optimization/lama/HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost.py @@ -0,0 +1,122 @@ +import numpy as np + + +class HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def generate_quasi_random_vector(): + phi = np.array( + [ + (np.sqrt(5) - 1) / 2, + (np.sqrt(3) - 1) / 2, + (np.sqrt(7) - 1) / 2, + (np.sqrt(11) - 1) / 2, + (np.sqrt(13) - 1) / 2, + ] + ) + return self.bounds[0] + (self.bounds[1] - self.bounds[0]) * ( + np.mod(np.arange(1, self.dim + 1) * phi, 1) + ) + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + quasi_random = generate_quasi_random_vector() + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + quasi_random + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + else: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/HybridSelfAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/HybridSelfAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..b954b92ac --- /dev/null +++ b/nevergrad/optimization/lama/HybridSelfAdaptiveDifferentialEvolution.py @@ -0,0 +1,82 @@ +import numpy as np + + +class HybridSelfAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.population_size = 50 + self.F = 0.5 # Differential weight + self.CR = 0.9 # Crossover probability + self.T = 10 # Local search iterations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = self.population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Mutation step + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + trial = np.where(crossover, mutant, population[i]) + + # Self-adaptive local search strategy + if np.random.rand() < 0.5: + trial = self.local_search(trial, func) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + new_population.append(trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(self.T): + for i in range(self.dim): + x_new = best_x.copy() + step_size = np.random.uniform(-0.1, 0.1) + x_new[i] = np.clip(best_x[i] + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_F_CR(self, i): + # Adaptive parameters adjustment + if i % 100 == 0: + self.F = np.random.uniform(0.4, 0.9) + self.CR = np.random.uniform(0.1, 0.9) diff --git a/nevergrad/optimization/lama/HyperAdaptiveConvergenceStrategy.py b/nevergrad/optimization/lama/HyperAdaptiveConvergenceStrategy.py new file mode 100644 index 000000000..7b4586d73 --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveConvergenceStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class HyperAdaptiveConvergenceStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=120): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate, mutation_strength): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + for _ in range(num_children): + if np.random.rand() < 0.98: # Even higher crossover probability + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + population_size = 300 # Further increased initial population size for more diversity + num_generations = self.budget // population_size + elitism_size = population_size // 4 # Maintaining a robust 25% of population as elite + mutation_rate = 0.1 # Starting with a slightly reduced mutation rate + mutation_strength = 0.9 # Fine-tuning strength for better local searches + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, elitism_size) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + non_elite_size = population_size - elitism_size + offspring = self.crossover(best_population, non_elite_size) + offspring = self.mutate(offspring, mutation_rate, mutation_strength) + population = np.vstack((best_population, offspring)) + + # Adaptively adjust mutation parameters based on progress + if gen % 10 == 0 and mutation_rate > 0.01: + mutation_rate -= 0.01 # Slow and steady decrease + mutation_strength -= 0.05 # Smaller decrement for sustained exploration capability + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/HyperAdaptiveGradientRAMEDS.py b/nevergrad/optimization/lama/HyperAdaptiveGradientRAMEDS.py new file mode 100644 index 000000000..b229d1b5e --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveGradientRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class HyperAdaptiveGradientRAMEDS: + def __init__( + self, + budget, + population_size=50, + init_crossover=0.8, + F_min=0.4, + F_max=0.9, + memory_size=100, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = init_crossover + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + progress = evaluations / self.budget + # Adaptive mutation factor + F = self.F_min + (self.F_max - self.F_min) * np.sin(np.pi * progress) + + # Adaptive crossover based on population fitness variance + fitness_variance = np.var(fitness) + self.crossover_rate = self.crossover_rate * (1 - np.exp(-fitness_variance)) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite, elite_fitness = population[elite_indices].copy(), fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperAdaptiveHybridDEPSOwithDynamicRestart.py b/nevergrad/optimization/lama/HyperAdaptiveHybridDEPSOwithDynamicRestart.py new file mode 100644 index 000000000..3546d4680 --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveHybridDEPSOwithDynamicRestart.py @@ -0,0 +1,149 @@ +import numpy as np + + +class HyperAdaptiveHybridDEPSOwithDynamicRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Adjusted population size for balance + w = 0.6 # Inertia weight for PSO + c1 = 1.0 # Cognitive coefficient for PSO + c2 = 1.0 # Social coefficient for PSO + initial_F = 0.7 # Differential weight for DE + initial_CR = 0.8 # Crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.5 + 0.5 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HyperAdaptiveMemoryGuidedStrategyV74.py b/nevergrad/optimization/lama/HyperAdaptiveMemoryGuidedStrategyV74.py new file mode 100644 index 000000000..8602b574a --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveMemoryGuidedStrategyV74.py @@ -0,0 +1,74 @@ +import numpy as np + + +class HyperAdaptiveMemoryGuidedStrategyV74: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.6, CR_init=0.9, memory_size=20): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + candidates = list(range(size)) + candidates.remove(index) + a, b, c = np.random.choice(candidates, 3, replace=False) + mutant_base = population[a] + self.F * (population[b] - population[c]) + memory_contribution = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = mutant_base + 0.5 * memory_contribution # Memory influenced mutation + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial <= f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + elif np.random.rand() < 0.15: # Probabilistic replacement + self.memory[np.random.randint(len(self.memory))] = trial - target + return trial, f_trial + return target, f_target + + def adapt_parameters(self, current_eval, total_budget): + progress = current_eval / total_budget + self.F = 0.5 + 0.5 * np.sin(np.pi * progress) # Dynamic adaptation of F + self.CR = 0.5 + 0.5 * np.cos(np.pi * progress) # Dynamic adaptation of CR + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adapt_parameters(evaluations, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperAdaptivePrecisionOptimizer.py b/nevergrad/optimization/lama/HyperAdaptivePrecisionOptimizer.py new file mode 100644 index 000000000..a22aa59b5 --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptivePrecisionOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class HyperAdaptivePrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension set as per problem description + self.lb = -5.0 # Lower boundary of the search space + self.ub = 5.0 # Upper boundary of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature for vigorous initial exploration + T_min = 0.0001 # Minimum threshold temperature for precision exploitation + alpha = 0.95 # Cooling rate for a strong balance exploration-exploitation + + # Mutation and crossover parameters optimized further + F_base = 0.8 # Base mutation factor + CR = 0.95 # High crossover probability to maintain strong diversity + + population_size = 90 # Adjusted population size for a more thorough search + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation influenced by temperature and time + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Modify mutation factor dynamically based on temperature and remaining budget + dynamic_F = ( + F_base * np.exp(-0.15 * T) * (0.7 + 0.3 * np.cos(np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion incorporating delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Progressive adaptive cooling strategy with non-linear modulation + adaptive_cooling = alpha - 0.02 * np.sin(2.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperAdaptiveSinusoidalDifferentialSwarm.py b/nevergrad/optimization/lama/HyperAdaptiveSinusoidalDifferentialSwarm.py new file mode 100644 index 000000000..be8cdb688 --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveSinusoidalDifferentialSwarm.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperAdaptiveSinusoidalDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 250 # Further increased population size + self.F_base = 0.8 # Higher base mutation factor + self.CR_base = 0.9 # High base crossover probability + self.adaptive_F_amplitude = 0.3 # Reduced mutation factor amplitude for more stable mutation + self.adaptive_CR_amplitude = ( + 0.1 # Reduced crossover rate amplitude for stable exploration/exploitation + ) + self.phase_shift = np.pi / 4 # Phase shift to desynchronize mutation and crossover adaptations + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors with phase-shifted sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.adaptive_F_amplitude * np.sin(np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.cos( + np.pi * iteration_ratio + self.phase_shift + ) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin with adaptive F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure boundaries are respected + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/HyperAdaptiveStrategyDE.py b/nevergrad/optimization/lama/HyperAdaptiveStrategyDE.py new file mode 100644 index 000000000..8206b472c --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdaptiveStrategyDE.py @@ -0,0 +1,73 @@ +import numpy as np + + +class HyperAdaptiveStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.6, F_range=0.4, CR=0.95, strategy="adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically + if self.strategy == "adaptive": + idxs = np.argsort(fitness)[:2] # Select two best for breeding + base = population[idxs[np.random.randint(2)]] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F with more variability + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using a different approach: DE/current-to-best/2 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = population[np.random.choice(idxs, 4, replace=False)] + mutant = np.clip( + population[i] + F * (population[best_idx] - population[i]) + F * (a - b + c - d), + self.lb, + self.ub, + ) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperAdvancedDynamicPrecisionOptimizerV41.py b/nevergrad/optimization/lama/HyperAdvancedDynamicPrecisionOptimizerV41.py new file mode 100644 index 000000000..7add489d5 --- /dev/null +++ b/nevergrad/optimization/lama/HyperAdvancedDynamicPrecisionOptimizerV41.py @@ -0,0 +1,55 @@ +import numpy as np + + +class HyperAdvancedDynamicPrecisionOptimizerV41: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem description + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Enhanced temperature dynamics and control parameters + T = 1.2 # Higher initial temperature for more explorative early phases + T_min = 0.0003 # Lower minimum temperature to allow finer exploration at late stages + alpha = 0.95 # Slower cooling rate to extend effective search time + + # Refined mutation and crossover parameters for robust exploration and exploitation + F = 0.78 # Slightly increased Mutation factor for more aggressive explorative behavior + CR = 0.85 # Reduced Crossover probability to promote more individual trait preservation + + population_size = 84 # Slightly tweaked population size to enhance population dynamics + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation using sigmoid and hyperbolic tangent functions for precise control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = F * (0.65 + 0.35 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Incorporate a dynamic acceptance criteria with enhanced sensitivity to fitness changes + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling schedule with enhanced modulation + T *= alpha - 0.009 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperEvolvedDynamicPrecisionOptimizerV48.py b/nevergrad/optimization/lama/HyperEvolvedDynamicPrecisionOptimizerV48.py new file mode 100644 index 000000000..2038ae91b --- /dev/null +++ b/nevergrad/optimization/lama/HyperEvolvedDynamicPrecisionOptimizerV48.py @@ -0,0 +1,59 @@ +import numpy as np + + +class HyperEvolvedDynamicPrecisionOptimizerV48: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.20 # Higher starting temperature for more aggressive initial exploration + T_min = 0.0003 # Lower minimum temperature for prolonged fine-tuning + alpha = 0.93 # Slower cooling rate to maximize exploitation during the cooling phase + + # Mutation and crossover parameters are refined further + F = 0.79 # Slightly increased mutation factor to enhance exploratory mutations + CR = 0.85 # Adjusted crossover probability to maintain diversity while encouraging strong features + + population_size = 90 # Increased population size to improve sampling diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Initiate a dynamic mutation approach with a sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts with a sigmoid function for refined control + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a more sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperEvolvedDynamicRAMEDS.py b/nevergrad/optimization/lama/HyperEvolvedDynamicRAMEDS.py new file mode 100644 index 000000000..a97576021 --- /dev/null +++ b/nevergrad/optimization/lama/HyperEvolvedDynamicRAMEDS.py @@ -0,0 +1,78 @@ +import numpy as np + + +class HyperEvolvedDynamicRAMEDS: + def __init__( + self, budget, population_size=50, crossover_base=0.7, F0=0.5, F1=0.9, memory_size=50, elite_size=10 + ): + self.budget = budget + self.population_size = population_size + self.crossover_base = crossover_base # Base rate for crossover + self.F0 = F0 # Base mutation factor + self.F1 = F1 # Maximum mutation factor + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor using a logistic growth model + F_adjustment = (self.F1 - self.F0) * evaluations / self.budget + F = self.F0 + F_adjustment * np.random.rand() + + # Update elites periodically + if evaluations % (self.budget // 10) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(best_solution + F * (b - c), lb, ub) + + # Dynamic crossover rate adjusted by improvements in the fitness + improvements = np.max(fitness) - fitness + dynamic_cr = self.crossover_base + 0.25 * (improvements[i] / (np.max(improvements) + 1e-10)) + cross_points = np.random.rand(dimension) < dynamic_cr + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Memory update with better solutions + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperEvolvedRAMEDS.py b/nevergrad/optimization/lama/HyperEvolvedRAMEDS.py new file mode 100644 index 000000000..adefd8a9c --- /dev/null +++ b/nevergrad/optimization/lama/HyperEvolvedRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class HyperEvolvedRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.8, + F_max=1.2, + memory_size=100, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamic adjustment of mutation factor based on sigmoid of diversity + diversity = np.std(population, axis=0).mean() + F = self.F_min + (self.F_max - self.F_min) * np.exp(-diversity) + + # Dynamic crossover rate adjustment + self.crossover_rate = 0.5 + 0.45 * np.sin(np.pi * evaluations / self.budget) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Dynamic crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Adaptive memory strategy + if trial_fitness < np.max(memory_fitness): + replace_idx = np.argmax(memory_fitness) + memory[replace_idx] = population[i] # Store replaced solution in memory + memory_fitness[replace_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperFocusedAdaptiveElitistStrategyV5.py b/nevergrad/optimization/lama/HyperFocusedAdaptiveElitistStrategyV5.py new file mode 100644 index 000000000..4dfd2f65b --- /dev/null +++ b/nevergrad/optimization/lama/HyperFocusedAdaptiveElitistStrategyV5.py @@ -0,0 +1,81 @@ +import numpy as np + + +class HyperFocusedAdaptiveElitistStrategyV5: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.3, + mutation_intensity=0.05, + crossover_rate=0.8, + focus_mode="multi", + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.focus_mode = focus_mode # 'multi' for multi-focus, 'single' for single focus + + def __call__(self, func): + # Initialize the population uniformly within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Crossover between two elites or an elite and a random individual based on focus mode + if self.focus_mode == "multi" and np.random.random() < 0.5: + parent1 = elites[np.random.choice(len(elites))] + parent2 = population[np.random.randint(0, self.population_size)] + else: + parent_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parent_indices[0]], elites[parent_indices[1]] + child = self.recombine(parent1, parent2) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation with a decay factor that decreases as evaluations increase + scale = self.mutation_intensity * np.exp(-evaluations / self.budget * 5) + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2): + # Linear combination of parents with a random factor to vary contribution of each parent + alpha = np.random.uniform(0.4, 0.6) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/HyperOptimalRAMEDS.py b/nevergrad/optimization/lama/HyperOptimalRAMEDS.py new file mode 100644 index 000000000..f0451f1e7 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimalRAMEDS.py @@ -0,0 +1,82 @@ +import numpy as np + + +class HyperOptimalRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.4, + F_max=0.9, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites more effectively + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Dynamic mutation and crossover adaptation + F = np.clip( + self.F_max - (evaluations / self.budget) * (self.F_max - self.F_min), self.F_min, self.F_max + ) + self.crossover_rate = np.clip(self.crossover_rate - 0.01 * np.var(fitness), 0.7, 1.0) + + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.5 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - b + c - population[i]), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperOptimalStrategicEvolutionaryOptimizerV58.py b/nevergrad/optimization/lama/HyperOptimalStrategicEvolutionaryOptimizerV58.py new file mode 100644 index 000000000..213b51c71 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimalStrategicEvolutionaryOptimizerV58.py @@ -0,0 +1,77 @@ +import numpy as np + + +class HyperOptimalStrategicEvolutionaryOptimizerV58: + def __init__( + self, + budget=10000, + population_size=138, + F_base=0.53, + F_range=0.47, + CR=0.93, + elite_fraction=0.08, + mutation_strategy="dynamic", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Fine-tuned base mutation factor + self.F_range = F_range # Expanded mutation range for better exploration + self.CR = CR # Crossover probability adjusted for improved convergence + self.elite_fraction = elite_fraction # Reduced elite fraction for maintaining diversity + self.mutation_strategy = mutation_strategy # Dynamic mutation strategy for responsive adaptation + self.dim = 5 # Dimensionality is set to 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Select mutation base dynamically + if np.random.rand() < 0.85: # Increased probability to select the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjust F within a broader range + F = ( + self.F_base + np.sin(np.pi * np.random.rand()) * self.F_range + ) # Using sine modulation for F + + # Mutation using DE's rand/1 strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget is reached + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizer.py b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizer.py new file mode 100644 index 000000000..3830f88e6 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class HyperOptimizedDynamicPrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound + self.ub = 5.0 # Upper bound + + def __call__(self, func): + # Initialize advanced temperature and cooling parameters + T = 1.2 # Initial temperature set higher for more aggressive exploration early on + T_min = 0.0005 # Lower minimum temperature for detailed exploration late in the search + alpha = 0.92 # Slower cooling rate to maintain exploration capabilities longer + + population_size = 80 # Adjusted population size for better coverage of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhancing mutation strategy and adaptive acceptance criteria + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Temperature and evaluation count influenced dynamic mutation factor + dynamic_F = ( + 0.8 + * np.exp(-0.15 * T) + * (0.7 + 0.3 * np.cos(1.2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR = 0.85 + 0.1 * np.sin( + 2 * np.pi * evaluation_count / self.budget + ) # Dynamic crossover probability + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adapting the acceptance criterion to be more aggressive based on fitness improvements + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Fine-tuning the cooling strategy based on the search process dynamics + adaptive_cooling = alpha - 0.015 * np.cos(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV12.py b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV12.py new file mode 100644 index 000000000..954b0d581 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV12.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperOptimizedDynamicPrecisionOptimizerV12: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.20 # Increased starting temperature for broader initial exploration + T_min = 0.0003 # Reduced minimum temperature for exhaustive end-stage exploration + alpha = 0.93 # Moderately slow cooling rate to extend exploration duration + + # Mutation and crossover parameters are optimized for exploration and exploitation balance + F = 0.7 # Decreased Mutation factor to promote more stable search + CR = 0.85 # Slightly reduced Crossover probability to preserve individual traits longer + + population_size = 85 # Adjusted population size for efficient evaluation coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation strategy with a more reactive sigmoid-based modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Introduce a more sensitive dynamic mutation factor + dynamic_F = F * (0.8 + 0.2 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria with adjusted temperature sensitivity + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal amplitude modulation + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV42.py b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV42.py new file mode 100644 index 000000000..8d2410dc4 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV42.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperOptimizedDynamicPrecisionOptimizerV42: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem description + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters for enhanced dynamic control + T = 1.2 # Increased starting temperature for extensive initial exploration + T_min = 0.0004 # Lower minimum temperature for deeper exploration in later stages + alpha = 0.93 # Optimized cooling rate to balance exploration and convergence + + # Refined mutation and crossover parameters for better performance + F = 0.77 # Mutation factor adjusted for balanced exploration and exploitation + CR = 0.88 # Fine-tuned crossover probability to maintain diversity and solution quality + + population_size = 85 # Adjusted population size for optimal exploration and exploitation balance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation strategy with enhanced control precision + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor adapting with a sigmoid function for precise control + dynamic_F = F * (0.72 + 0.28 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with a temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.065 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling schedule enhanced with sinusoidal modulation + T *= alpha - 0.01 * np.sin(3 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV43.py b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV43.py new file mode 100644 index 000000000..3fbf9c10f --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV43.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperOptimizedDynamicPrecisionOptimizerV43: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem specification + self.lb = -5.0 # Lower bound as per the problem specification + self.ub = 5.0 # Upper bound as per the problem specification + + def __call__(self, func): + # Initialize temperature and progressively adaptive cooling parameters + T = 1.2 # Increased initial temperature for aggressive initial exploration + T_min = 0.0003 # Lower minimum temperature to enhance fine-grained exploration in later stages + alpha = 0.91 # Optimized cooling rate to extend exploration duration + + # Mutant and crossover parameters fine-tuned for enhanced performance + F = 0.78 # Slightly increased Mutation factor to explore more diverse solutions + CR = 0.89 # Increased Crossover probability to ensure better gene mixing + + population_size = 90 # Increased population size to enhance diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement a dynamic mutation strategy with refined control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor adapts using a sigmoid function for precise control + dynamic_F = F * (0.75 + 0.25 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with a temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling schedule with refined sinusoidal modulation + T *= alpha - 0.012 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV57.py b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV57.py new file mode 100644 index 000000000..6cb3fd6d6 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedDynamicPrecisionOptimizerV57.py @@ -0,0 +1,58 @@ +import numpy as np + + +class HyperOptimizedDynamicPrecisionOptimizerV57: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.10 # Initial temperature for substantial exploration upfront + T_min = 0.0001 # Lower minimum temperature to allow for fine-tuned exploitation + alpha = 0.93 # Cooling rate slightly reduced to extend the effective search phase + + # Mutation and crossover parameters refined + F = 0.79 # Mutation factor adjusted for a balance between exploration and exploitation + CR = 0.88 # Crossover probability adjusted for maintaining genetic diversity + + population_size = 100 # Increased population size for better coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation strategy with sigmoid-based adaptive control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.exp(-0.06 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criteria with a more refined temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a more nuanced modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedEvolutionaryGradientOptimizerV61.py b/nevergrad/optimization/lama/HyperOptimizedEvolutionaryGradientOptimizerV61.py new file mode 100644 index 000000000..b07e39f15 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedEvolutionaryGradientOptimizerV61.py @@ -0,0 +1,80 @@ +import numpy as np + + +class HyperOptimizedEvolutionaryGradientOptimizerV61: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.42, + CR=0.96, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor fine-tuned for balanced exploration + self.F_range = F_range # Controlled range for mutation factor to enhance mutation stability + self.CR = CR # Crossover probability optimized for higher robustness + self.elite_fraction = elite_fraction # Increased elite fraction to focus more on the best candidates + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy to dynamically adapt to fitness landscape + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Select base individual from elite with a higher probability for current best + if np.random.rand() < 0.85: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Use random elite as base + base = population[np.random.choice(elite_indices)] + + # Mutation factor F dynamically adjusted + F = self.F_base + (np.random.rand() * 2 - 1) * self.F_range + + # Mutation (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover (binomial) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperOptimizedGradientEnhancedRAMEDS.py b/nevergrad/optimization/lama/HyperOptimizedGradientEnhancedRAMEDS.py new file mode 100644 index 000000000..fa8b7c1f4 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedGradientEnhancedRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class HyperOptimizedGradientEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.initial_crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adapt mutation factor based on a sigmoidal function of the progress + progress = evaluations / self.budget + F = self.F_min + (self.F_max - self.F_min) / (1 + np.exp(-10 * (progress - 0.5))) + crossover_rate = self.initial_crossover_rate * ( + 1 - np.sin(np.pi * progress) + ) # Sine-based annealing for exploration and exploitation + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (elite[np.random.randint(self.elite_size)] - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy, replacing the worst with the better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV47.py b/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV47.py new file mode 100644 index 000000000..8bb272323 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV47.py @@ -0,0 +1,79 @@ +import numpy as np + + +class HyperOptimizedMultiStrategicEvolutionaryOptimizerV47: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.4, + CR=0.93, + elite_fraction=0.05, + mutation_strategy="hybrid", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased mutation factor + self.F_range = F_range # Reduced mutation range for precision + self.CR = CR # Crossover probability fine-tuned + self.elite_fraction = elite_fraction # Decreased elite fraction to maintain diversity + self.mutation_strategy = mutation_strategy # Introducing a hybrid mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "hybrid": + # Hybrid strategy: choose base from random elite or best with varying probability + if np.random.rand() < 0.85: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Default to using an elite individual as a base + base = population[np.random.choice(elite_indices)] + + # Mutation factor dynamic adjustment + F = self.F_base + (np.random.rand() * self.F_range - self.F_range / 2) + + # DE/rand/1 mutation strategy + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Break if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV48.py b/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV48.py new file mode 100644 index 000000000..94cecd8fb --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedMultiStrategicEvolutionaryOptimizerV48.py @@ -0,0 +1,80 @@ +import numpy as np + + +class HyperOptimizedMultiStrategicEvolutionaryOptimizerV48: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased base mutation factor for stronger exploration + self.F_range = F_range # Adjusted mutation range for controlled exploration + self.CR = CR # Adjusted crossover probability for better exploitation + self.elite_fraction = ( + elite_fraction # Adjusted elite fraction to balance exploration and exploitation + ) + self.mutation_strategy = mutation_strategy # Employing adaptive mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Adaptive strategy: choose between best or random elite based on adaptive probability + if np.random.rand() < 0.8: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Mutation factor dynamic adjustment for more aggressive exploration + F = self.F_base + (np.random.rand() * self.F_range - self.F_range / 2) + + # DE/rand/1 mutation strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Break if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperOptimizedRAMEDS.py b/nevergrad/optimization/lama/HyperOptimizedRAMEDS.py new file mode 100644 index 000000000..249745243 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedRAMEDS.py @@ -0,0 +1,75 @@ +import numpy as np + + +class HyperOptimizedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.2, + F_max=0.8, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min # Lower starting mutation factor for finer adjustments + self.F_max = F_max # Lower max mutation for less disruptive mutations + self.memory_size = memory_size + self.elite_size = elite_size # Smaller elite group for more focused exploitation + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor based on progress and performance variance + progress = evaluations / self.budget + F = self.F_min + (self.F_max - self.F_min) * np.tanh(4 * (progress - 0.5)) + + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Adaptive Crossover: Consider progress to decide on crossover rate + adaptive_cr = self.crossover_rate * (1 - progress) + 0.5 * progress + cross_points = np.random.rand(dimension) < adaptive_cr + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory strategically + if trial_fitness < np.max(memory_fitness): + replace_idx = np.argmax(memory_fitness) + memory[replace_idx] = trial + memory_fitness[replace_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperOptimizedSpiralDifferentialOptimizerV8.py b/nevergrad/optimization/lama/HyperOptimizedSpiralDifferentialOptimizerV8.py new file mode 100644 index 000000000..ed1d2fba8 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedSpiralDifferentialOptimizerV8.py @@ -0,0 +1,75 @@ +import numpy as np + + +class HyperOptimizedSpiralDifferentialOptimizerV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set as constant + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population + population_size = 100 # Adjusted population size for balance between exploration and exploitation + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Tuned parameters for spirals and mutation + min_radius = 0.0001 # More precise minimum radius + max_radius = 1.0 # Lower maximum radius to focus search + radius_decay = 0.97 # More gradual decay + mutation_factor = 1.2 # Increased mutation factor for added diversity + crossover_probability = 0.7 # Increased crossover rate for more exploration + + # Enhanced local search parameters + step_size = 0.0005 # Reduced step size for finer local movements + gradient_steps = 300 # More localized optimization steps + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential evolution mutation strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Conduct crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral motion integration for non-linear exploration + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Perform local search with a gradient descent-like approach + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < func(trial): + trial = new_trial + + # Evaluate and update the population + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedThermalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/HyperOptimizedThermalEvolutionaryOptimizer.py new file mode 100644 index 000000000..298f41e6a --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedThermalEvolutionaryOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperOptimizedThermalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and enhanced cooling parameters + T = 1.2 # Starting temperature slightly higher for initial global exploration + T_min = 0.0003 # Lower minimum temperature for prolonged fine-tuning + alpha = 0.92 # Slower cooling rate to extend exploration + + # Optimized mutation and crossover parameters + F_base = 0.75 # Base mutation factor for robust exploration and exploitation balance + CR = 0.88 # Crossover probability finely tuned for better offspring quality + + population_size = 80 # Optimal population size for this budget and problem complexity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation dynamics with temperature-dependent mutation scaling + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and evaluation progress + dynamic_F = F_base * np.exp(-0.15 * T) * (0.5 + 0.5 * np.tanh(evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion incorporating both temperature and delta fitness + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling strategy with periodic modulation for stagnation avoidance + adaptive_cooling = alpha - 0.015 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperOptimizedUltraRefinedRAMEDS.py b/nevergrad/optimization/lama/HyperOptimizedUltraRefinedRAMEDS.py new file mode 100644 index 000000000..d9b623fc4 --- /dev/null +++ b/nevergrad/optimization/lama/HyperOptimizedUltraRefinedRAMEDS.py @@ -0,0 +1,79 @@ +import numpy as np + + +class HyperOptimizedUltraRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamic adaptation of mutation factor based on convergence rate + convergence_rate = np.std(fitness) / (np.mean(fitness) + np.finfo(float).eps) + F = self.F_min + (self.F_max - self.F_min) * convergence_rate + + # Update and manage elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = elite[np.random.randint(0, self.elite_size)] + + # Introduce stochastic blending to mutation + mutant = np.clip( + c + F * (best_solution - c + a - b + np.random.rand() * (best_or_elite - c)), lb, ub + ) + + # Crossover process + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperPreciseEvolutionaryOptimizer.py b/nevergrad/optimization/lama/HyperPreciseEvolutionaryOptimizer.py new file mode 100644 index 000000000..11df51d09 --- /dev/null +++ b/nevergrad/optimization/lama/HyperPreciseEvolutionaryOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class HyperPreciseEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters with refined values + T = 1.2 # Starting temperature, slightly higher for more global search initially + T_min = 0.0008 # Lower threshold temperature for extended fine-tuning + alpha = 0.92 # Cooling rate, optimized for gradual reduction + + # Mutation and crossover parameters finely tuned + F_base = 0.6 # Base mutation factor for stability + CR = 0.85 # Crossover probability to maintain sufficient diversity + + population_size = 80 # Optimal population size considering budget and dimension + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics and sophisticated temperature-dependent selection + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=True)] + # Intricate mutation factor involving time decay and temperature influence + dynamic_F = ( + F_base + * (1 - np.exp(-0.05 * T)) + * (0.7 + 0.3 * np.cos(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion that considers delta fitness and dynamic temperature + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with a touch of non-linear modulation + adaptive_cooling = alpha + 0.02 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperPrecisionEvolutionaryOptimizerV23.py b/nevergrad/optimization/lama/HyperPrecisionEvolutionaryOptimizerV23.py new file mode 100644 index 000000000..d6625cc17 --- /dev/null +++ b/nevergrad/optimization/lama/HyperPrecisionEvolutionaryOptimizerV23.py @@ -0,0 +1,81 @@ +import numpy as np + + +class HyperPrecisionEvolutionaryOptimizerV23: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.38, + CR=0.92, + elite_fraction=0.2, + mutation_strategy="focused_precision_adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Enhanced mutation strategy focusing on adaptive precision + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "focused_precision_adaptive": + # Enhanced focus on the best individual with adaptive precision for mutation + if np.random.rand() < 0.8: # Higher probability for best individual selection + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Standard elite selection for base individual + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F with reduced range for more controlled mutations + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range * 0.5 + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with adaptive probability + cross_points = np.random.rand(self.dim) < self.CR * 0.95 + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/HyperQuantumConvergenceOptimizer.py b/nevergrad/optimization/lama/HyperQuantumConvergenceOptimizer.py new file mode 100644 index 000000000..ed4f6a7d6 --- /dev/null +++ b/nevergrad/optimization/lama/HyperQuantumConvergenceOptimizer.py @@ -0,0 +1,55 @@ +import numpy as np + + +class HyperQuantumConvergenceOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 500 # Population size adjusted for focused search + self.F = 0.5 # Differential weight, slightly reduced for stability + self.CR = 0.8 # Crossover probability, adjusted to prevent premature convergence + self.quantum_probability = 0.25 # Increased probability of quantum mutation + self.learning_rate = 0.05 # Reduced learning rate for more subtle quantum adjustments + self.adaptation_factor = 0.1 # Increased adaptation factor for dynamic response + + def __call__(self, func): + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + for i in range(int(self.budget / self.pop_size)): + # Adjusting parameters based on phase of optimization + phase = i / (self.budget / self.pop_size) + F = self.F + self.adaptation_factor * np.sin(np.pi * phase * 2) # Faster sinusoidal variation + CR = self.CR + self.adaptation_factor * np.cos(np.pi * phase / 2) # Slower cosine variation + + for j in range(self.pop_size): + if np.random.rand() < self.quantum_probability: + # Enhanced quantum mutation + mean_state = best_ind + self.learning_rate * (pop[j] - best_ind) + scale = self.learning_rate * np.sqrt(np.abs(pop[j] - best_ind)) + mutation = np.random.normal(mean_state, scale) + mutation = np.clip(mutation, -5.0, 5.0) + else: + # Differential Evolution Mutation: DE/rand-to-best/1/bin + indices = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutation = pop[j] + F * (best_ind - pop[j]) + F * (b - c) + mutation = np.clip(mutation, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < CR, mutation, pop[j]) + + # Fitness Evaluation and Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/HyperQuantumStateCrossoverOptimization.py b/nevergrad/optimization/lama/HyperQuantumStateCrossoverOptimization.py new file mode 100644 index 000000000..898fa95c9 --- /dev/null +++ b/nevergrad/optimization/lama/HyperQuantumStateCrossoverOptimization.py @@ -0,0 +1,93 @@ +import numpy as np + + +class HyperQuantumStateCrossoverOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.1, + mutation_intensity=0.03, + crossover_rate=0.85, + quantum_prob=0.2, + gamma=0.3, + beta=0.6, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + self.beta = beta # Coefficient for dynamic mutation intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum-inspired updates + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Dynamic mutation intensity based on progress + intensity = self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Enhanced quantum inspired state update to adaptively explore based on the best solution""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/HyperRAMEDS.py b/nevergrad/optimization/lama/HyperRAMEDS.py new file mode 100644 index 000000000..ddd296b5c --- /dev/null +++ b/nevergrad/optimization/lama/HyperRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class HyperRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.4, + F_max=0.8, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites based on fitness + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with non-linear modulation + F = self.F_max - (self.F_max - self.F_min) * (1 - np.exp(-4 * evaluations / self.budget)) + + # Mutation: DE/best/1/binomial with adaptive mutation based on elite and memory + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.65 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(best_or_elite + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory if the trial solution is better than the worst in memory + worst_memory_idx = np.argmax(memory_fitness) + if trial_fitness < memory_fitness[worst_memory_idx]: + memory[worst_memory_idx] = trial.copy() + memory_fitness[worst_memory_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperRefinedAdaptiveDynamicPrecisionOptimizerV52.py b/nevergrad/optimization/lama/HyperRefinedAdaptiveDynamicPrecisionOptimizerV52.py new file mode 100644 index 000000000..5f85291cd --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedAdaptiveDynamicPrecisionOptimizerV52.py @@ -0,0 +1,54 @@ +import numpy as np + + +class HyperRefinedAdaptiveDynamicPrecisionOptimizerV52: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize refined temperature and adaptive cooling parameters + T = 1.1 # Optimized initial temperature for broader exploration + T_min = 0.0003 # Reduced minimal temperature for extended fine-tuning in late optimization stages + alpha = 0.93 # Slightly adjusted cooling rate to enhance prolonged search effectiveness + + # Mutation and crossover parameters fine-tuned for diversity and convergence balance + F = 0.8 # Moderately high mutation factor to encourage exploratory mutations + CR = 0.88 # High crossover probability to ensure effective information exchange + + population_size = 85 # Population size optimized based on prior performance reviews + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introducing a more aggressive dynamic mutation based on exploration phase + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor adjusted with exponential decay and sigmoid modulation + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.6 + 0.4 * np.tanh(5 * (evaluation_count / self.budget - 0.6))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy enhanced with sinusoidal modulation for phase-based cooling + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperRefinedAdaptiveGuidedMutationOptimizer.py b/nevergrad/optimization/lama/HyperRefinedAdaptiveGuidedMutationOptimizer.py new file mode 100644 index 000000000..a5c954e77 --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedAdaptiveGuidedMutationOptimizer.py @@ -0,0 +1,79 @@ +import numpy as np + + +class HyperRefinedAdaptiveGuidedMutationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 100 # Reduced population for faster convergence + mutation_factor = 0.9 # Higher mutation factor initially for diverse exploration + crossover_prob = 0.8 # Higher crossover probability for significant trial generation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mutation and local search strategy + local_search_frequency = 100 # More frequent local search + local_search_radius = 0.05 # Smaller radius for more precise local search + + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Mutation and crossover phases + indices = np.arange(population_size) + indices = np.delete(indices, i) + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + + if current_budget % local_search_frequency == 0: + # Local search on a smaller radius around the best solution encountered so far + local_mutant = best_solution + local_search_radius * np.random.randn(self.dim) + local_mutant = np.clip(local_mutant, self.lower_bound, self.upper_bound) + local_fitness = func(local_mutant) + current_budget += 1 + + if local_fitness < best_fitness: + best_solution = local_mutant + best_fitness = local_fitness + + mutant = best_solution + mutation_factor * (x1 - x2 + x3 - population[i]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + + # Adaptively adjust mutation and crossover parameters + mutation_factor = max(0.5, mutation_factor * 0.99) # Gradual decrease in mutation factor + crossover_prob = min(0.9, crossover_prob * 1.01) # Gradual increase in crossover probability + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionOptimizer.py b/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionOptimizer.py new file mode 100644 index 000000000..24f41af5a --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class HyperRefinedAdaptivePrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature slightly higher for initial global exploration + T_min = 0.0005 # Lower minimum temperature for deep exploitation + alpha = 0.92 # Adjusted cooling rate for a more nuanced temperature drop + + # Optimized mutation and crossover parameters for a dynamic balance + F_base = 0.8 # Base mutation factor for initial broad mutations + CR_base = 0.85 # Base crossover probability for maintaining solution diversity + + population_size = 80 # Adjusted population size for better coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Evolution loop with dynamically adapting mutation and crossover + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adjusted by temperature and exploration depth + dynamic_F = ( + F_base + * np.exp(-0.12 * T) + * (0.75 + 0.25 * np.sin(1.5 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR_dynamic = CR_base + 0.1 * np.cos(2 * np.pi * evaluation_count / self.budget) + cross_points = np.random.rand(self.dim) < CR_dynamic + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced adaptive acceptance criterion based on delta fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Progressive cooling strategy with adaptive modulation based on exploration depth + adaptive_cooling = alpha - 0.015 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionSearch.py b/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionSearch.py new file mode 100644 index 000000000..ca2e0722e --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedAdaptivePrecisionSearch.py @@ -0,0 +1,57 @@ +import numpy as np + + +class HyperRefinedAdaptivePrecisionSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Utilize an initial center-based strategy to determine a good starting point + center_point = np.random.uniform(-5.0, 5.0, self.dim) + center_f = func(center_point) + if center_f < self.f_opt: + self.f_opt = center_f + self.x_opt = center_point + + # Define adaptive grid dynamics + num_divisions = 3 # Smaller to begin a finer initial partitioning + division_size = 10.0 / num_divisions + refine_factor = 0.5 # Stronger focus refinement + adaptive_budget = self.budget + + # Start with a wider grid and progressively refine + for iteration in range(1, 4): # Deepening the focus through iterations + grid_offsets = np.linspace(-division_size, division_size, num_divisions) + best_local_center = self.x_opt + best_local_f = self.f_opt + + # Search each division based on the current best + for offset_dims in np.ndindex(*(num_divisions,) * self.dim): + local_center = best_local_center + np.array([grid_offsets[dim] for dim in offset_dims]) + local_center = np.clip(local_center, -5.0, 5.0) # Ensure it is within bounds + local_budget = max(1, adaptive_budget // (num_divisions**self.dim)) + + # Explore this division + for _ in range(local_budget): + candidate = local_center + np.random.uniform(-division_size, division_size, self.dim) + candidate_f = func(candidate) + if candidate_f < best_local_f: + best_local_f = candidate_f + best_local_center = candidate + + adaptive_budget -= local_budget + + # Update the best found in this iteration + if best_local_f < self.f_opt: + self.f_opt = best_local_f + self.x_opt = best_local_center + + # Refine grid and division size for next iteration + division_size *= refine_factor # Narrower focus + num_divisions = 2 # Less divisions, more focus + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV3.py b/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV3.py new file mode 100644 index 000000000..10a7d6080 --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV3.py @@ -0,0 +1,59 @@ +import numpy as np + + +class HyperRefinedDynamicPrecisionOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature, slightly higher for more initial exploration + T_min = 0.001 # Minimum temperature threshold for annealing + alpha = 0.95 # Cooling rate, slightly slower to allow for more thorough exploration + + # Mutation and crossover parameters optimized further + F = 0.8 # Slightly increased Mutation factor for enhanced exploratory capabilities + CR = 0.88 # Fine-tuned Crossover probability to balance diversity and trait propagation + + population_size = 70 # Slightly reduced population size to allow more generations within the budget + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics and temperature-dependent acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and progress + dynamic_F = ( + F * np.exp(-0.08 * T) * (0.65 + 0.35 * np.sin(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criterion based on delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.04 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy that adjusts based on current performance and remaining budget + adaptive_cooling = alpha - 0.007 * np.cos(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV49.py b/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV49.py new file mode 100644 index 000000000..7dc6fa219 --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedDynamicPrecisionOptimizerV49.py @@ -0,0 +1,53 @@ +import numpy as np + + +class HyperRefinedDynamicPrecisionOptimizerV49: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound + self.ub = 5.0 # Upper bound + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Initial temperature adjusted for balance between exploration and exploitation + T_min = 0.0003 # Lower minimum temperature to allow fine-grained adjustments late in the search + alpha = 0.95 # Reduced cooling rate to extend the exploitation phase + + # Mutation and crossover parameters for enhanced search dynamics + F = 0.8 # Increased mutation factor to encourage diversity in the population + CR = 0.9 # Increased crossover rate to better mix beneficial traits + + population_size = 85 # Optimal population size after tuning + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with a sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = F * (0.9 + 0.1 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + if trial_fitness < fitness[i]: # Direct acceptance of better solutions + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with logistic decay for temperature + adaptive_cooling = alpha - 0.005 * ( + 1 / (1 + np.exp(-10 * (evaluation_count / self.budget - 0.5))) + ) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/HyperRefinedEnhancedRAMEDS.py b/nevergrad/optimization/lama/HyperRefinedEnhancedRAMEDS.py new file mode 100644 index 000000000..f1ddd366a --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedEnhancedRAMEDS.py @@ -0,0 +1,84 @@ +import numpy as np + + +class HyperRefinedEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Logistic growth function for mutation adaptation + F = self.F_min + (self.F_max - self.F_min) / (1 + np.exp(-12 * (evaluations / self.budget - 0.5))) + + # Periodic update of crossover rate to ensure diverse genetic mixing over time + self.crossover_rate = 0.5 + 0.45 * np.cos(2 * np.pi * evaluations / self.budget) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update focuses on gradually replacing less performing members + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/HyperRefinedQuantumVelocityOptimizer.py b/nevergrad/optimization/lama/HyperRefinedQuantumVelocityOptimizer.py new file mode 100644 index 000000000..8e41644c1 --- /dev/null +++ b/nevergrad/optimization/lama/HyperRefinedQuantumVelocityOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class HyperRefinedQuantumVelocityOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 40 # Adjusted for a balance between exploration and exploitation + inertia_weight = 0.7 # Further fine-tuned inertia weight + cognitive_coefficient = 2.8 # Boosted cognitive learning + social_coefficient = 2.8 # Boosted social learning + velocity_limit = 0.12 # Adjusted velocity limit for more dynamic movement + quantum_momentum = 0.015 # Slightly increased for more impactful yet subtle quantum influences + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + inertia_decay = np.power((1 - (current_budget / self.budget)), 4) # More aggressive inertia decay + w = inertia_weight * inertia_decay + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump based on a dynamically adjusted probability + quantum_probability = 0.03 * np.exp( + -12 * (current_budget / self.budget) + ) # Dynamic adjustment for quantum jump probability + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # PSO velocity updates with enhanced clamping technique + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Evaluate new positions + fitness = func(population[i]) + current_budget += 1 + + # Update personal and global bests if improvements are found + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/HyperSpiralDifferentialClimber.py b/nevergrad/optimization/lama/HyperSpiralDifferentialClimber.py new file mode 100644 index 000000000..2e99ca65a --- /dev/null +++ b/nevergrad/optimization/lama/HyperSpiralDifferentialClimber.py @@ -0,0 +1,73 @@ +import numpy as np + + +class HyperSpiralDifferentialClimber: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population and parameters + population_size = 300 # Increased size for broader initial coverage + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Parameters for spiral dynamics + min_radius = 0.05 # Smaller minimum radius for finer local search + max_radius = 4.5 # Adjusted starting radius + radius_decay = 0.95 # Slower decay for prolonged spiral influence + mutation_factor = 0.8 # Adjusted mutation for better local exploitation + crossover_probability = 0.8 # Increased crossover for more recombination + + # Additional gradient-like update + step_size = 0.1 + gradient_steps = 5 # Number of gradient-like steps to perform + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential Evolution Strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Apply spiral dynamics + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Additional gradient-like search for local improvement + for _ in range(gradient_steps): + new_trial = trial + np.random.randn(self.dim) * step_size + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation + f_trial = func(trial) + evaluations_left -= 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/HyperSpiralDifferentialClimberV2.py b/nevergrad/optimization/lama/HyperSpiralDifferentialClimberV2.py new file mode 100644 index 000000000..f93904b09 --- /dev/null +++ b/nevergrad/optimization/lama/HyperSpiralDifferentialClimberV2.py @@ -0,0 +1,72 @@ +import numpy as np + + +class HyperSpiralDifferentialClimberV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population and parameters + population_size = 500 # Increased size for even broader initial coverage + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Parameters for spiral dynamics and enhanced search techniques + min_radius = 0.01 # Further reduced for finer local search + max_radius = 4.9 # Slightly reduced maximum radius + radius_decay = 0.97 # Slower decay to maintain spiral influence longer + mutation_factor = 0.75 # Slightly reduced mutation for stability + crossover_probability = 0.85 # Increased crossover probability + + # Enhanced gradient-like search parameters + step_size = 0.05 # Reduced step size for more precise adjustments + gradient_steps = 10 # Increased number of gradient steps + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential Evolution Strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Apply spiral dynamics + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Enhanced gradient-like search for local refinement + for _ in range(gradient_steps): + new_trial = trial + np.random.randn(self.dim) * step_size + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation + f_trial = func(trial) + evaluations_left -= 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/IADEA.py b/nevergrad/optimization/lama/IADEA.py new file mode 100644 index 000000000..c4c65a0b1 --- /dev/null +++ b/nevergrad/optimization/lama/IADEA.py @@ -0,0 +1,75 @@ +import numpy as np + + +class IADEA: + def __init__(self, budget, population_size=50, crossover_rate=0.9, F_min=0.6, F_max=0.9, archive_size=30): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.archive_size = archive_size + + def __call__(self, func): + # Bounds and dimensionality + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize archive + archive = np.empty((0, dimension)) + + # Best solution found + best_idx = np.argmin(fitness) + best_solution = population[best_idx, :] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor based on progression + F = np.random.uniform(self.F_min, self.F_max) * (1 - evaluations / self.budget) + + # Mutation: DE/rand/1/bin with archive incorporation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if archive.size > 0 and np.random.rand() < 0.15: # Using the archive for mutation + arch_idx = np.random.randint(0, archive.shape[0]) + a = archive[arch_idx] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update archive if useful + if archive.shape[0] < self.archive_size: + archive = np.vstack([archive, population[i]]) + else: + archive[np.random.randint(0, self.archive_size)] = population[i] + + # Update population + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/IAGEA.py b/nevergrad/optimization/lama/IAGEA.py new file mode 100644 index 000000000..0e2fae6e2 --- /dev/null +++ b/nevergrad/optimization/lama/IAGEA.py @@ -0,0 +1,74 @@ +import numpy as np + + +class IAGEA: + def __init__(self, budget, population_size=100, crossover_prob=0.9, mutation_factor=0.5, adaptive=True): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.crossover_prob = crossover_prob + self.mutation_factor = mutation_factor + self.adaptive = adaptive + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Adaptive strategy parameters + success_rate = 0.1 + learning_rate = 0.1 + + while num_evals < self.budget: + new_population = [] + num_successes = 0 + + for i in range(self.population_size): + # Mutation: DE/rand/1/bin with adaptive mutation factor + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + self.mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + num_successes += 1 + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + else: + new_population.append(population[i]) + + if num_evals >= self.budget: + break + + population = np.array(new_population) + + # Adapt mutation factor based on success rate + if self.adaptive: + success_rate = num_successes / self.population_size + if success_rate > 0.2: + self.mutation_factor *= 1 + learning_rate + elif success_rate < 0.2: + self.mutation_factor *= 1 - learning_rate + self.mutation_factor = np.clip(self.mutation_factor, 0.1, 1.0) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/IALNF.py b/nevergrad/optimization/lama/IALNF.py new file mode 100644 index 000000000..455507b4e --- /dev/null +++ b/nevergrad/optimization/lama/IALNF.py @@ -0,0 +1,70 @@ +import numpy as np + + +class IALNF: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.learning_rate = 0.1 + self.F_base = 0.5 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best, F): + new_population = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.arange(len(population)), 3, replace=False) + a, b, c = population[idxs] + mutant_vector = np.clip( + a + F * (b - c) + self.learning_rate * (best - population[i]), self.bounds[0], self.bounds[1] + ) + new_population[i] = mutant_vector + return new_population + + def crossover(self, target, mutant, CR): + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + best_fitness = fitness[best_index] + previous_best = best_fitness + + while evaluations < self.budget: + F = np.random.normal(self.F_base, 0.1) * (1 + 0.1 * np.random.rand()) + CR = 0.1 + 0.5 * np.random.rand() + mutants = self.mutate(population, population[best_index], F) + trials = np.array( + [self.crossover(population[i], mutants[i], CR) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + if fitness[i] < best_fitness: + best_fitness = fitness[i] + best_index = i + + if best_fitness < previous_best: + self.learning_rate *= 1.1 + previous_best = best_fitness + else: + self.learning_rate *= 0.9 + if np.random.rand() < 0.1: # Random jump with 10% probability on stagnation + population[np.random.choice(len(population))] = np.random.uniform( + self.bounds[0], self.bounds[1], self.dimension + ) + + return best_fitness, population[best_index] diff --git a/nevergrad/optimization/lama/IASDD.py b/nevergrad/optimization/lama/IASDD.py new file mode 100644 index 000000000..6cbf90c93 --- /dev/null +++ b/nevergrad/optimization/lama/IASDD.py @@ -0,0 +1,62 @@ +import numpy as np + + +class IASDD: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 50 + population = np.random.uniform(*self.bounds, (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def adapt_search_parameters(self, std_dev): + # Dynamically adjust search parameters based on population's standard deviation + if std_dev < 0.1: + return 0.1 # Narrow search + elif std_dev < 0.5: + return 0.3 # Moderate search + else: + return 0.5 # Wide search + + def __call__(self, func): + population, population_size = self.initialize() + best_fitness = np.Inf + best_individual = None + evaluations = 0 + + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += population_size + + # Update global best + min_idx = np.argmin(fitness) + if fitness[min_idx] < best_fitness: + best_fitness = fitness[min_idx] + best_individual = population[min_idx] + + # Adapt search based on current performance + std_dev = np.std(fitness) + search_scale = self.adapt_search_parameters(std_dev) + + # Genetic operations: mutation and crossover + new_population = population + np.random.normal(0, search_scale, (population_size, self.dimension)) + new_population = np.clip(new_population, *self.bounds) + + # Include best individual to ensure elitism + new_population[np.random.randint(population_size)] = best_individual + population = new_population + + # Dynamic diversification when stagnation detected + if std_dev < 0.05 and evaluations < self.budget - population_size: + diversify_count = max(5, int(population_size * 0.1)) + population[:diversify_count] = np.random.uniform( + *self.bounds, (diversify_count, self.dimension) + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveCovarianceGradientSearch.py b/nevergrad/optimization/lama/ImprovedAdaptiveCovarianceGradientSearch.py new file mode 100644 index 000000000..4e5f4ae2a --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveCovarianceGradientSearch.py @@ -0,0 +1,156 @@ +import numpy as np + + +class ImprovedAdaptiveCovarianceGradientSearch: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + gradient_steps=10, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + self.gradient_steps = gradient_steps # number of gradient descent steps + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + for _ in range(self.gradient_steps): + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + x -= self.learning_rate * grad + x = np.clip(x, -5.0, 5.0) + + return x + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to elite individuals in the population + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + for i in range(len(elite_pop)): + elite_pop[i] = self.__gradient_local_search(func, elite_pop[i]) + if func(elite_pop[i]) < scores[np.argsort(scores)[: len(elite_pop)][i]]: + scores[np.argsort(scores)[: len(elite_pop)][i]] = func(elite_pop[i]) + + # Update global best after local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..3dfea2097 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveDifferentialEvolution.py @@ -0,0 +1,178 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.cluster.vq import kmeans2 + + +class ImprovedAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.memory_size = 20 + self.elite_size = 5 + self.memory = [] + self.elite = [] + self.mutation_strategies = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ] + self.strategy_weights = np.ones(len(self.mutation_strategies)) + self.strategy_success = np.zeros(len(self.mutation_strategies)) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 # Initialize mutation factor + self.CR = 0.9 # Initialize crossover rate + + def _initialize_population(self): + sobol_engine = np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + return sobol_engine + + def _local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.1, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + self.mutation_strategies, p=self.strategy_weights / self.strategy_weights.sum() + ) + + def _opposition_based_learning(self, population): + opp_population = self.lb + self.ub - population + return opp_population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(self.pop_size) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, self.pop_size - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = self.mutation_strategies.index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Perform local search on elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= 5: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Crowding distance to maintain diversity + distances = self._crowding_distance(population, fitness) + sorted_indices = np.argsort(distances) + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + # Opposition-based learning + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + combined_population = np.concatenate((population, opp_population), axis=0) + combined_fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(combined_fitness)[: self.pop_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..a9be8ded2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,159 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 30 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + # Recording history + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveEliteGuidedRestartDE.py b/nevergrad/optimization/lama/ImprovedAdaptiveEliteGuidedRestartDE.py new file mode 100644 index 000000000..8e987bf60 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveEliteGuidedRestartDE.py @@ -0,0 +1,106 @@ +import numpy as np + + +class ImprovedAdaptiveEliteGuidedRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 # Increase population size for better exploration + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.2 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.local_search_prob = 0.3 + self.stagnation_threshold = 30 # Reduce threshold for faster restart + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + if self.budget % 50 == 0 and new_pop: + archive_idx = np.random.choice(len(new_pop)) + archive_ind = new_pop[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Check for stagnation and restart if needed + if best_fitness == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + best_fitness = self.f_opt + + if stagnation_counter >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveEnhancedQuantumHarmonySearch.py b/nevergrad/optimization/lama/ImprovedAdaptiveEnhancedQuantumHarmonySearch.py new file mode 100644 index 000000000..253249a64 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveEnhancedQuantumHarmonySearch.py @@ -0,0 +1,58 @@ +import numpy as np + + +class ImprovedAdaptiveEnhancedQuantumHarmonySearch: + def __init__( + self, budget, harmony_memory_size=10, pitch_adjustment_rate=0.1, bandwidth=0.01, mutation_rate=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(self.f_opt) + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < self.pitch_adjustment_rate: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + return new_harmony diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveEvolutionaryHyperHeuristic.py b/nevergrad/optimization/lama/ImprovedAdaptiveEvolutionaryHyperHeuristic.py new file mode 100644 index 000000000..b3e275eda --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveEvolutionaryHyperHeuristic.py @@ -0,0 +1,133 @@ +import numpy as np + + +class ImprovedAdaptiveEvolutionaryHyperHeuristic: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 500 # Initial population size + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_chance = 0.3 # Probability of performing local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 0.1 # Threshold for population diversity + self.cauchy_step_scale = 0.03 # Scale for Cauchy distribution steps + self.gaussian_step_scale = 0.01 # Scale for Gaussian distribution steps + self.reinitialization_rate = 0.2 # Rate for reinitializing population + self.hyper_heuristic_probability = 0.5 # Probability of using hyper-heuristic + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + elif np.random.rand() < self.hyper_heuristic_probability: + candidate = self.hyper_heuristic(population, fitness, i, func) + else: + # Differential Evolution mutation and crossover + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): # Adjusted iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def hyper_heuristic(self, population, fitness, i, func): + # Optimal mix of exploration and exploitation + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Blend with local search step + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(candidate, func) + + return candidate + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(self.reinitialization_rate * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adaptive local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveExplorationExploitationAlgorithm.py b/nevergrad/optimization/lama/ImprovedAdaptiveExplorationExploitationAlgorithm.py new file mode 100644 index 000000000..a51688790 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveExplorationExploitationAlgorithm.py @@ -0,0 +1,103 @@ +import numpy as np + + +class ImprovedAdaptiveExplorationExploitationAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + self.initial_population_size = 150 # Larger initial population size for better diversity + self.F = 0.8 # Fixed differential weight + self.CR = 0.9 # Fixed crossover probability + self.local_search_chance = 0.2 # Fixed probability to perform local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 1e-5 # Threshold to switch between exploration and exploitation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(10): # Local search iterations + step_size = np.random.uniform(-0.1, 0.1, size=self.dim) + x_new = np.clip(best_x + step_size, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + reinit_percentage = 0.2 + num_reinit = int(reinit_percentage * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + else: + self.local_search_chance = 0.2 # Fixed local search chance for consistent exploitation diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHarmonyMemeticAlgorithmV17.py b/nevergrad/optimization/lama/ImprovedAdaptiveHarmonyMemeticAlgorithmV17.py new file mode 100644 index 000000000..7d48f9b46 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHarmonyMemeticAlgorithmV17.py @@ -0,0 +1,86 @@ +import numpy as np + + +class ImprovedAdaptiveHarmonyMemeticAlgorithmV17: + def __init__( + self, budget=10000, hmcr=0.9, par=0.6, bw=0.05, memetic_iter=500, memetic_prob=0.98, memetic_step=0.01 + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHarmonySearchWithCuckooInspiration.py b/nevergrad/optimization/lama/ImprovedAdaptiveHarmonySearchWithCuckooInspiration.py new file mode 100644 index 000000000..4bff3449e --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHarmonySearchWithCuckooInspiration.py @@ -0,0 +1,63 @@ +import numpy as np + + +class ImprovedAdaptiveHarmonySearchWithCuckooInspiration: + def __init__( + self, budget, harmony_memory_size=10, bandwidth=0.1, mutation_rate=0.2, cuckoo_probability=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + self.cuckoo_probability = cuckoo_probability + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(self.f_opt) + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < self.cuckoo_probability: + cuckoo_index = np.random.randint(0, self.harmony_memory_size) + cuckoo_harmony = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + new_harmony[cuckoo_index] = cuckoo_harmony + + return new_harmony diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHybridMetaOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveHybridMetaOptimizer.py new file mode 100644 index 000000000..2857062bf --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHybridMetaOptimizer.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveHybridMetaOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.3 + self.local_search_probability = 0.85 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 20 + self.strategy_switch_threshold = 0.015 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + last_switch_eval_count = 0 + use_de_strategy = True + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if use_de_strategy: + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + w = 0.5 + c1 = 2.0 + c2 = 2.0 + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocity = ( + w * np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + use_de_strategy = not use_de_strategy + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = ImprovedAdaptiveHybridMetaOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimization.py new file mode 100644 index 000000000..391084509 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class ImprovedAdaptiveHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_min = 0.4 + w_max = 0.9 + w_decay = 0.995 + + # Differential Evolution parameters + F_base = 0.8 + CR_base = 0.9 + + # Gradient-based search parameters + alpha_base = 0.1 + beta_base = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = CR_base + adaptive_F = F_base + adaptive_alpha = alpha_base + adaptive_beta = beta_base + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = CR_base - 0.5 * (i / self.budget) + adaptive_F = F_base + 0.2 * (i / self.budget) + adaptive_alpha = alpha_base + 0.1 * (i / self.budget) + adaptive_beta = beta_base - 0.3 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.05 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(w_min, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedAdaptiveHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..3a18f0a78 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHybridOptimizer.py @@ -0,0 +1,126 @@ +import numpy as np + + +class ImprovedAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.min_pop_size = 10 + self.max_pop_size = 100 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 2.0 + self.c2 = 2.0 + self.w = 0.7 + self.elite_fraction = 0.1 + self.diversity_threshold = 0.1 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.crossover_rate = 0.9 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elitism + elite_count = max(1, int(self.elite_fraction * current_pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + # Check for diversity + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveHybridSearchOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveHybridSearchOptimizer.py new file mode 100644 index 000000000..d9bfa0f81 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveHybridSearchOptimizer.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveHybridSearchOptimizer: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.8 + self.F = 0.9 + self.CR = 0.8 + self.memory_size = 30 + self.strategy_switch_threshold = 0.02 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + phase_one_budget = int(self.budget * 0.6) # Increase exploration phase budget + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(-1, 1, self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (phase_one_budget - eval_count) / phase_one_budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + if neighbor_fitness < fitness[i]: + new_population[i] = neighbor + fitness[i] = neighbor_fitness + if neighbor_fitness < best_fitness: + best_individual = neighbor + best_fitness = neighbor_fitness + + if eval_count >= phase_one_budget: + break + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveLevyHarmonySearch.py b/nevergrad/optimization/lama/ImprovedAdaptiveLevyHarmonySearch.py new file mode 100644 index 000000000..2f4e4ffbe --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveLevyHarmonySearch.py @@ -0,0 +1,65 @@ +import numpy as np + + +class ImprovedAdaptiveLevyHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, levy_step_size=0.3): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveMemeticHybridOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveMemeticHybridOptimizer.py new file mode 100644 index 000000000..af2267791 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveMemeticHybridOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class ImprovedAdaptiveMemeticHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.5 + self.elite_fraction = 0.1 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func, budget): + for _ in range(budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + if budget <= 0: + break + return individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(20, self.budget - evaluations) + elite_population[idx] = self.local_search( + elite_population[idx], bounds, func, local_search_budget + ) + evaluations += local_search_budget + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + # Additional mechanism for maintaining diversity + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/ImprovedAdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..ac4e619b4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveMultiOperatorSearch.py @@ -0,0 +1,146 @@ +import numpy as np + + +class ImprovedAdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w_max = 0.9 # Max inertia weight + w_min = 0.4 # Min inertia weight + w_decay = (w_max - w_min) / self.budget + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Reducing max stagnation limit to react faster to lack of improvement + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + w = max(w_min, w_max - w_decay * i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # More conservative adjustment of learning rate + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedAdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..5ac9918fa --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyDifferentialEvolution.py @@ -0,0 +1,135 @@ +import numpy as np + + +class ImprovedAdaptiveMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ImprovedAdaptiveMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyOptimizer.py new file mode 100644 index 000000000..7845ee0e2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveMultiStrategyOptimizer.py @@ -0,0 +1,169 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveMultiStrategyOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.5 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.adaptive_crossover_prob = [0.9, 0.8, 0.7, 0.6, 0.5] + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution mutation and crossover + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = ( + self.rng.random(self.dim) + < self.adaptive_crossover_prob[i % len(self.adaptive_crossover_prob)] + ) + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveParticleSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedAdaptiveParticleSwarmOptimization.py new file mode 100644 index 000000000..c3ed5d8e1 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveParticleSwarmOptimization.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ImprovedAdaptiveParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.population_size = 100 + self.w_min = 0.4 + self.w_max = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.velocity_limit = 0.2 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocity = np.random.uniform( + -self.velocity_limit, self.velocity_limit, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_position = population.copy() + personal_best_fitness = fitness.copy() + + evaluations = self.population_size + + while evaluations < self.budget: + w = self.w_max - ((self.w_max - self.w_min) * (evaluations / self.budget)) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + velocity[i] = ( + w * velocity[i] + + self.c1 * r1 * (personal_best_position[i] - population[i]) + + self.c2 * r2 * (self.x_opt - population[i]) + ) + + # Adaptive velocity clamping + velocity_magnitude = np.linalg.norm(velocity[i]) + if velocity_magnitude > self.velocity_limit: + velocity[i] = (velocity[i] / velocity_magnitude) * self.velocity_limit + + population[i] = np.clip(population[i] + velocity[i], self.lb, self.ub) + + f_candidate = func(population[i]) + evaluations += 1 + + if f_candidate < personal_best_fitness[i]: + personal_best_position[i] = population[i].copy() + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i].copy() + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptivePopulationMemeticOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptivePopulationMemeticOptimizer.py new file mode 100644 index 000000000..22e560af8 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptivePopulationMemeticOptimizer.py @@ -0,0 +1,103 @@ +import numpy as np + + +class ImprovedAdaptivePopulationMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 80 # Increased population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.3 and evaluations + 5 <= self.budget: # Increased probability to 0.3 + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.05 + ) # Optimized local search parameters + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced re-initialization strategy + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Adaptive population size adjustment + if iteration % (max_iterations // 10) == 0 and population_size > 10: + best_indices = np.argsort(fitness)[: int(0.8 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py new file mode 100644 index 000000000..9486a1bb5 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch.py @@ -0,0 +1,152 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def hybrid_search(self, x, func): + candidate_positions = [ + np.clip(x + np.random.randn(self.dim) * 0.1, self.bounds[0], self.bounds[1]) for _ in range(10) + ] + candidate_fitness = [func(pos) for pos in candidate_positions] + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveQuantumEntropyDE.py b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumEntropyDE.py new file mode 100644 index 000000000..255b00c19 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumEntropyDE.py @@ -0,0 +1,152 @@ +import numpy as np + + +class ImprovedAdaptiveQuantumEntropyDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveQuantumLevyOptimizer.py b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumLevyOptimizer.py new file mode 100644 index 000000000..b97f7bdaf --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumLevyOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveQuantumLevyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.9 + self.cognitive_weight = 1.7 + self.social_weight = 1.7 + self.quantum_weight = 0.6 + self.elite_fraction = 0.2 + self.memory_size = 30 + self.local_search_probability = 0.5 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + self.strategy_rewards = [0, 0, 0, 0] + self.strategy_uses = [0, 0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2, 3], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 3: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = ImprovedAdaptiveQuantumLevyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumPSO.py new file mode 100644 index 000000000..5593bf017 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumPSO.py @@ -0,0 +1,111 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedAdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.3 + self.adaptive_threshold = 0.1 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + last_best_fitness = best_fitness + + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + population[i] = best_individual + 0.5 * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * (1 - self.adaptive_threshold): + adaptive_factor *= 0.9 + self.quantum_weight *= adaptive_factor + else: + adaptive_factor *= 1.1 + self.quantum_weight *= adaptive_factor + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/ImprovedAdaptiveQuantumSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumSwarmOptimization.py new file mode 100644 index 000000000..5e2e0c59a --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdaptiveQuantumSwarmOptimization.py @@ -0,0 +1,74 @@ +import numpy as np + + +class ImprovedAdaptiveQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + + def initialize_particles(self, func): + self.particles_position = np.random.uniform(-5.0, 5.0, (self.num_particles, self.dim)) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + self.step_size = 1.0 + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * (inertia_term + cognitive_term + social_term) + self.particles_position[i] += self.step_size * self.particles_velocity[i] + + def adapt_step_size(self): + # Adjust the step size based on the improvement in the global best fitness + self.step_size *= 1.0 / (1.0 + np.exp(-0.1 * (self.global_best_fitness - self.f_opt))) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + self.initialize_particles(func) + + for _ in range(self.budget): + self.update_particles(func) + self.adapt_step_size() + + self.f_opt = self.global_best_fitness + self.x_opt = self.global_best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedAdvancedHybridAdaptiveOptimization.py b/nevergrad/optimization/lama/ImprovedAdvancedHybridAdaptiveOptimization.py new file mode 100644 index 000000000..483ddaaea --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedAdvancedHybridAdaptiveOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class ImprovedAdvancedHybridAdaptiveOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Increased population size for better exploration + self.initial_F = 0.9 # More aggressive mutation factor + self.initial_CR = 0.8 # Increased crossover rate for better diversity + self.elite_rate = 0.05 # Lowered elite rate for more diversity + self.local_search_rate = 0.2 # Further reduced local search intensity + self.memory_size = 30 # Increased memory size for better adaptation + self.w = 0.4 # Reduced inertia weight for finer control in PSO + self.c1 = 1.5 # Reduced cognitive component for better exploration + self.c2 = 1.5 # Balanced social component for better convergence + self.phase_switch_ratio = 0.5 # Adjusted switch to PSO for balanced phases + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Adjusted step for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = ImprovedAdvancedHybridAdaptiveOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/ImprovedBalancedQuantumLevyDifferentialSearch.py b/nevergrad/optimization/lama/ImprovedBalancedQuantumLevyDifferentialSearch.py new file mode 100644 index 000000000..c0af98165 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedBalancedQuantumLevyDifferentialSearch.py @@ -0,0 +1,158 @@ +import numpy as np + + +class ImprovedBalancedQuantumLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.3: + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedCooperativeAdaptiveEvolutionaryOptimizer.py b/nevergrad/optimization/lama/ImprovedCooperativeAdaptiveEvolutionaryOptimizer.py new file mode 100644 index 000000000..628d16675 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedCooperativeAdaptiveEvolutionaryOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class ImprovedCooperativeAdaptiveEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.5 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.05 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 4) == 0 and population_size > 20: + best_indices = np.argsort(fitness)[: int(0.7 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedCulturalDifferentialMemeticEvolution.py b/nevergrad/optimization/lama/ImprovedCulturalDifferentialMemeticEvolution.py new file mode 100644 index 000000000..505ee64ab --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedCulturalDifferentialMemeticEvolution.py @@ -0,0 +1,130 @@ +import numpy as np + + +class ImprovedCulturalDifferentialMemeticEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def update_knowledge_base(self, knowledge_base, population, fitness): + best_individual = population[np.argmin(fitness)] + mean_position = np.mean(population, axis=0) + knowledge_base["best_solution"] = best_individual + knowledge_base["best_fitness"] = np.min(fitness) + knowledge_base["mean_position"] = mean_position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.2: + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.3 + (0.1 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + self.update_knowledge_base(knowledge_base, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedCulturalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/ImprovedCulturalEvolutionaryOptimizer.py new file mode 100644 index 000000000..d48d42015 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedCulturalEvolutionaryOptimizer.py @@ -0,0 +1,117 @@ +import numpy as np + + +class ImprovedCulturalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def local_search(self, x, func, step_size=0.1, max_iter=10): + """Adaptive local search around a point.""" + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.normal(0, step_size, self.dim) + new_x = np.clip(x + perturbation, self.lb, self.ub) + new_f = func(new_x) + + if new_f < best_f: + best_x = new_x + best_f = new_f + step_size *= 0.9 # decrease step size if improvement is found + else: + step_size *= 1.1 # increase step size if no improvement + + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + # Initialize cultural component + knowledge_base = { + "best_solution": None, + "best_fitness": np.inf, + "mean_position": np.mean(population, axis=0), + "standard_deviation": np.std(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation using Evolution Strategy + strategy_noise = np.random.normal(0, strategy_params[i], self.dim) + trial_vector = population[i] + strategy_noise + trial_vector = np.clip(trial_vector, self.lb, self.ub) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + strategy_params[i] *= np.exp(0.1 * (np.random.rand(self.dim) - 0.5)) + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + knowledge_base["standard_deviation"] = np.std(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on some individuals + if np.random.rand() < 0.3: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 10 # Assuming local search uses 10 evaluations + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.1 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with standard deviation + for i in range(population_size): + cooperation_factor = np.random.rand() + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * knowledge_base[ + "standard_deviation" + ] * np.random.normal(0, 0.1, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + # Reinitialize strategy parameters for new individuals + strategy_params = np.random.uniform(0.1, 0.3, (population_size, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDiversifiedHarmonySearchOptimizer.py b/nevergrad/optimization/lama/ImprovedDiversifiedHarmonySearchOptimizer.py new file mode 100644 index 000000000..5c096aa5f --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDiversifiedHarmonySearchOptimizer.py @@ -0,0 +1,105 @@ +import numpy as np + + +class ImprovedDiversifiedHarmonySearchOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + harmony_memory_size=5, + bandwidth=1.0, + exploration_rate=0.1, + memory_consideration_prob=0.7, + memory_update_rate=0.1, + convergence_threshold=0.01, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.harmony_memory_size = harmony_memory_size + self.bandwidth = bandwidth + self.exploration_rate = exploration_rate + self.memory_consideration_prob = memory_consideration_prob + self.memory_update_rate = memory_update_rate + self.convergence_threshold = convergence_threshold + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def update_bandwidth(self, iter_count): + return self.bandwidth / np.sqrt(iter_count + 1) + + def explore_new_solution(self, population, best_solution, bandwidth): + exploration = np.random.normal(0, bandwidth, size=(self.population_size, self.dim)) + new_population = population + exploration + new_population = np.clip(new_population, -5.0, 5.0) # Ensure solutions are within bounds + return new_population + + def update_harmony_memory(self, harmony_memory, new_solution, fitness): + min_idx = np.argmin(fitness) + if fitness[min_idx] < harmony_memory[-1][1]: + harmony_memory[-1] = (new_solution[min_idx], fitness[min_idx]) + return harmony_memory + + def adaptive_bandwidth(self, best_fitness, prev_best_fitness, bandwidth): + if best_fitness < prev_best_fitness: + return min(1.5, bandwidth * 1.1) + else: + return max(0.5, bandwidth * 0.9) + + def adaptive_memory_update(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return 1.0 + else: + return max(0.0, self.memory_update_rate - 0.03) + + def adaptive_exploration_rate(self, best_fitness, prev_best_fitness): + if best_fitness < prev_best_fitness: + return max(0.01, self.exploration_rate * 0.95) + else: + return min(0.3, self.exploration_rate * 1.05) + + def diversify_population(self, population): + for i in range(self.population_size): + if np.random.rand() < 0.2: # Increased to 20% for more diversification + population[i] = np.random.uniform(-5.0, 5.0, self.dim) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + harmony_memory = [(best_solution, best_fitness)] + + best_fitnesses = [best_fitness] + prev_best_fitness = best_fitness + + for i in range(self.budget // self.population_size): + new_population = self.explore_new_solution(population, best_solution, self.bandwidth) + population = new_population + population = self.diversify_population(population) # Increased diversification + fitness = np.array([func(sol) for sol in population]) + + if np.random.rand() < self.memory_consideration_prob: + harmony_memory = self.update_harmony_memory(harmony_memory, population, fitness) + population = np.vstack([h[0] for h in harmony_memory]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + + best_fitnesses.append(best_fitness) + + self.bandwidth = self.adaptive_bandwidth(best_fitness, prev_best_fitness, self.bandwidth) + self.memory_consideration_prob = self.adaptive_memory_update(best_fitness, prev_best_fitness) + self.exploration_rate = self.adaptive_exploration_rate(best_fitness, prev_best_fitness) + prev_best_fitness = best_fitness + + if abs(best_fitness - prev_best_fitness) < self.convergence_threshold: + break + + aocc = 1 - np.std(best_fitnesses) / np.mean(best_fitnesses) + return aocc, best_solution diff --git a/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveMemoryStrategyV58.py b/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveMemoryStrategyV58.py new file mode 100644 index 000000000..42a678b77 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveMemoryStrategyV58.py @@ -0,0 +1,81 @@ +import numpy as np + + +class ImprovedDualPhaseAdaptiveMemoryStrategyV58: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Standard differential mutation + mutant = population[a] + self.F * (population[b] - population[c]) + else: + # Memory-guided mutation + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[best_idx] - population[a]) + memory_effect + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1.py b/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1.py new file mode 100644 index 000000000..04406d3b8 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1.py @@ -0,0 +1,139 @@ +import numpy as np + + +class ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.2 + self.memory_size = 5 + self.w = 0.7 + self.c1 = 1.5 + self.c2 = 1.5 + self.phase_switch_ratio = 0.5 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2.py b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2.py new file mode 100644 index 000000000..743993e7e --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2.py @@ -0,0 +1,166 @@ +import numpy as np + + +class ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = np.random.choice(np.delete(np.arange(population_size), i), 3, replace=False) + a, b, c = population[indices] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = np.random.choice(np.delete(np.arange(population_size), i), 2, replace=False) + a, b = population[indices] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + if elite_solutions.shape[0] > elite_size: + elite_solutions = elite_solutions[:elite_size] + new_population[:elite_size] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDynamicAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..9e30d7673 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class ImprovedDynamicAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 # Increased swarm size for better exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Increased Cognitive constant + c2 = 2.0 # Increased Social constant + w = 0.6 # Reduced inertia weight for better convergence + + # Learning rate adaptation parameters + alpha = 0.05 # Reduced initial learning rate for finer updates + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.7 # Reduced differential weight + CR = 0.8 # Reduced crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.15 # Increased diversity threshold + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.15 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 40 # Reduced maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedDynamicAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..4e5ae2163 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSO.py @@ -0,0 +1,149 @@ +import numpy as np + + +class ImprovedDynamicAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.7 # Increased Inertia weight for PSO + c1 = 0.5 # Reduced Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.6 # Reduced Initial differential weight for DE + initial_CR = 0.8 # Slightly reduced Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) * 0.7 # Reduced std dev factor + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..e979c18b4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory.py @@ -0,0 +1,174 @@ +import numpy as np + + +class ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.5 # Inertia weight for PSO + c1 = 1.0 # Increased cognitive coefficient for PSO + c2 = 1.2 # Increased social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + memory_size = 5 # Size of historical memory for adaptive parameter tuning + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + historical_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + # Update historical memory and adapt parameters based on memory analysis + if len(historical_memory) >= memory_size: + historical_memory.pop(0) + historical_memory.append((population.copy(), fitness.copy())) + + if len(historical_memory) >= memory_size: + for i in range(population_size): + historical_fitness = [hist[1][i] for hist in historical_memory] + if np.std(historical_fitness) < 1e-5: # Detect stagnation + F_values[i] = 0.1 + 0.9 * np.random.rand() + CR_values[i] = np.random.rand() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDynamicHarmonyFireworksSearch.py b/nevergrad/optimization/lama/ImprovedDynamicHarmonyFireworksSearch.py new file mode 100644 index 000000000..aecb5d5ef --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicHarmonyFireworksSearch.py @@ -0,0 +1,104 @@ +import numpy as np + + +class ImprovedDynamicHarmonyFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.1, beta=2, gamma=1, delta=0.2): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.delta = delta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + sigma = ( + np.math.gamma(1 + beta) + * np.math.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + for j in range(self.dim): + fireworks[i][j] += self.levy_flight() * np.random.normal(0, 1) + fireworks[i][j] = self.clip_to_bounds(fireworks[i][j]) + return fireworks + + def adapt_params(self, iteration): + alpha = max(0.01, self.alpha / (1 + 0.001 * iteration)) # Adaptive alpha decay + beta = min(10, self.beta + 0.02) # Adaptive beta growth + gamma = max(0.5, self.gamma - 0.001) # Adaptive gamma decay + delta = max(0.1, self.delta / (1 + 0.001 * iteration)) # Adaptive delta decay + return alpha, beta, gamma, delta + + def local_search(self, fireworks, func): + updated_fireworks = fireworks.copy() + + for i in range(self.n_fireworks): + trial = fireworks[i] + self.gamma * np.random.normal(0, 1, self.dim) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + updated_fireworks[i] = trial + + return updated_fireworks + + def global_search(self, fireworks, func): + updated_fireworks = fireworks.copy() + + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(updated_fireworks[i]): + updated_fireworks[i] = trial + + return updated_fireworks + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + self.alpha, self.beta, self.gamma, self.delta = self.adapt_params(it) + + fireworks = self.local_search(fireworks, func) + fireworks = self.global_search(fireworks, func) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + fireworks = self.enhance_fireworks(fireworks) + + # Introduce a small delta to encourage local exploration + for i in range(self.n_fireworks): + fireworks[i] += self.delta * np.random.normal(0, 1) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + + # Randomly reset some fireworks to encourage exploration + reset_idx = np.random.choice(self.n_fireworks, int(0.1 * self.n_fireworks), replace=False) + fireworks[reset_idx] = self.initialize_fireworks()[reset_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDynamicHybridDEPSOWithEliteMemoryV3.py b/nevergrad/optimization/lama/ImprovedDynamicHybridDEPSOWithEliteMemoryV3.py new file mode 100644 index 000000000..8b263c654 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicHybridDEPSOWithEliteMemoryV3.py @@ -0,0 +1,168 @@ +import numpy as np + + +class ImprovedDynamicHybridDEPSOWithEliteMemoryV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Increase population size for better exploration + w = 0.5 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.7 # Slightly reduced differential weight for DE + initial_CR = 0.8 # Slightly reduced crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + if elite_solutions.shape[0] > elite_size: + elite_solutions = elite_solutions[:elite_size] + new_population[:elite_size] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..1e2065545 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedDynamicQuantumSwarmOptimization.py @@ -0,0 +1,100 @@ +import numpy as np + + +class ImprovedDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.0, + min_cognitive_weight=1.0, + max_social_weight=1.5, + min_social_weight=0.5, + boundary_handling=True, + alpha=0.6, + delta=0.2, + decay_rate=0.98, + max_step=0.3, + exploration_rate=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/ImprovedEliteAdaptiveCrowdingHybridOptimizerV2.py b/nevergrad/optimization/lama/ImprovedEliteAdaptiveCrowdingHybridOptimizerV2.py new file mode 100644 index 000000000..d3a019360 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteAdaptiveCrowdingHybridOptimizerV2.py @@ -0,0 +1,195 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEliteAdaptiveCrowdingHybridOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + dist[i] += np.linalg.norm(population[i] - population[j]) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Maintain diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individuals = np.random.uniform(self.bounds[0], self.bounds[1], (1, self.dim)) + distances = self.crowding_distance(new_individuals) + if np.min(distances) > np.min(dist): + population = np.vstack([population, new_individuals]) + new_fitness = np.array([func(ind) for ind in new_individuals]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, (1, self.dim))]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individuals]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..22fc2b011 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,106 @@ +import numpy as np + + +class ImprovedEliteAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.2 + 0.6 * (1 - np.exp(-iteration / max_iterations)) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.3 and evaluations + 3 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=5 + ) + evaluations += 3 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations + int(0.10 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.10 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + elite_size = int(0.1 * population_size) + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + for idx in elite_indices: + perturbation = np.random.uniform(-0.01, 0.01, self.dim) + new_elite = np.clip(elites[np.random.randint(elite_size)] + perturbation, self.lb, self.ub) + new_elite_fitness = func(new_elite) + evaluations += 1 + if new_elite_fitness < fitness[idx]: + population[idx] = new_elite + fitness[idx] = new_elite_fitness + if new_elite_fitness < self.f_opt: + self.f_opt = new_elite_fitness + self.x_opt = new_elite + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..7dd2130cc --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,170 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteGuidedHybridAdaptiveDE.py b/nevergrad/optimization/lama/ImprovedEliteGuidedHybridAdaptiveDE.py new file mode 100644 index 000000000..c5d413279 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteGuidedHybridAdaptiveDE.py @@ -0,0 +1,128 @@ +import numpy as np + + +class ImprovedEliteGuidedHybridAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + # Adaptive reset based on population diversity + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on diversity + diversity = np.mean(np.std(population, axis=0)) + if diversity < self.epsilon: + # If diversity is too low, reinitialize half the population + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE.py b/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE.py new file mode 100644 index 000000000..bfaf0730c --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE.py @@ -0,0 +1,123 @@ +import numpy as np + + +class ImprovedEliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive_size = 50 + self.stagnation_threshold = 25 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + archive = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + archive.extend(new_pop) + if len(archive) > self.archive_size: + archive = archive[-self.archive_size :] + + if self.budget % 50 == 0 and archive: + archive_idx = np.random.choice(len(archive)) + archive_ind = archive[archive_idx] + archive_fitness = func(archive_ind) + if archive_fitness < self.f_opt: + self.f_opt = archive_fitness + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + # Re-initialize part of the population + reinited_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size // 2, self.dim)) + reinited_fitness = np.array([func(ind) for ind in reinited_pop]) + self.budget -= self.pop_size // 2 + + pop = np.vstack((elite_pop, new_pop[elite_count : self.pop_size // 2])) + fitness = np.hstack((elite_fitness, fitness[elite_count : self.pop_size // 2])) + + combined_pop = np.vstack((pop, reinited_pop)) + combined_fitness = np.hstack((fitness, reinited_fitness)) + + pop = combined_pop + fitness = combined_fitness + + self.stagnation_counter = 0 + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE_v2.py b/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE_v2.py new file mode 100644 index 000000000..227287f35 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteGuidedMutationDE_v2.py @@ -0,0 +1,97 @@ +import numpy as np + + +class ImprovedEliteGuidedMutationDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.stagnation_threshold = 30 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * min(generation / (self.budget / self.pop_size), 1.0) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + else: + idxs = np.random.choice(elite_count, 3, replace=False) + + x1, x2, x3 = pop[idxs[0]], pop[idxs[1]], pop[idxs[2]] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + pop = np.array(new_pop) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + reinit_count = self.pop_size // 2 + reinit_pop = np.random.uniform(lower_bound, upper_bound, (reinit_count, self.dim)) + reinit_fitness = np.array([func(ind) for ind in reinit_pop]) + self.budget -= reinit_count + + pop = np.vstack((elite_pop, reinit_pop)) + fitness = np.hstack((elite_fitness, reinit_fitness)) + + self.stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py new file mode 100644 index 000000000..e07b666f4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEliteQuantumDifferentialMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.c1 = 1.5 + self.c2 = 1.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < 1e-3 or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + self.c1 * r1 * (personal_bests[i] - particles[i]) + + self.c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + self.c1 = np.random.uniform(1.0, 2.5) + self.c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6.py b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6.py new file mode 100644 index 000000000..28d790bf5 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6.py @@ -0,0 +1,84 @@ +import numpy as np + + +class ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.1, 1.0), + crossover_rate_range=(0.1, 1.0), + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + p_best_idx = np.random.choice(np.delete(np.arange(self.population_size), i)) + p_best = population[p_best_idx] + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + mutant = np.clip( + a + scaling_factor * (b - c) + scaling_factor * (p_best - population[i]), + func.bounds.lb, + func.bounds.ub, + ) + + crossover_points = np.random.rand(dimension) < crossover_rate + trial_individual = np.where(crossover_points, mutant, population[i]) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + mean_fitness = np.mean(fitness_values) + std_fitness = np.std(fitness_values) + + new_scaling_factors = scaling_factors * np.exp( + 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values) + ) + new_crossover_rates = crossover_rates * np.exp( + 0.1 * (mean_fitness - fitness_values) / (std_fitness + 1e-6) + + 0.1 * (fitness_values.min() - fitness_values) + ) + + return np.clip(new_scaling_factors, *self.scaling_factor_range), np.clip( + new_crossover_rates, *self.crossover_rate_range + ) diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDynamicHarmonySearchV4.py b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDynamicHarmonySearchV4.py new file mode 100644 index 000000000..8d231f530 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveDynamicHarmonySearchV4.py @@ -0,0 +1,76 @@ +import numpy as np + + +class ImprovedEnhancedAdaptiveDynamicHarmonySearchV4: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + global_best_rate=0.1, + step_size=0.3, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.step_size = step_size + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = self.step_size / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19.py b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19.py new file mode 100644 index 000000000..22beeb6f8 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19.py @@ -0,0 +1,100 @@ +import numpy as np + + +class ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19: + def __init__( + self, + budget, + harmony_memory_size=20, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.25, + levy_alpha=1.5, + levy_beta=1.5, + gaussian_std=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.gaussian_std = gaussian_std + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.3: + levy = self.generate_improved_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + if np.random.rand() < 0.5: + # Enhanced Hybrid inspiration using Gaussian distribution with variable standard deviation + new_harmony[:, i] = np.clip( + new_harmony[:, i] + np.random.normal(0, self.gaussian_std, self.harmony_memory_size), + func.bounds.lb[i], + func.bounds.ub[i], + ) + + return new_harmony + + def generate_improved_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step + + return levy diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveLevyHarmonySearchV4.py b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveLevyHarmonySearchV4.py new file mode 100644 index 000000000..2ac134f34 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveLevyHarmonySearchV4.py @@ -0,0 +1,78 @@ +import numpy as np + + +class ImprovedEnhancedAdaptiveLevyHarmonySearchV4: + def __init__( + self, + budget, + harmony_memory_size=20, + levy_alpha=1.5, + levy_beta=1.5, + levy_step_size=0.3, + global_best_rate=0.1, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.levy_step_size = levy_step_size + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal((harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, 0.1), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy = self.generate_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * self.levy_step_size + + return levy diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveMetaNetAQAPSOv4.py b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveMetaNetAQAPSOv4.py new file mode 100644 index 000000000..882c241bc --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdaptiveMetaNetAQAPSOv4.py @@ -0,0 +1,123 @@ +import numpy as np + + +class ImprovedEnhancedAdaptiveMetaNetAQAPSOv4: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 2.0 + self.step_size = 0.2 + self.max_local_search_attempts = 5 + self.meta_net_iters = 5000 + self.meta_net_lr = 0.8 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15.py b/nevergrad/optimization/lama/ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15.py new file mode 100644 index 000000000..4a5923359 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15.py @@ -0,0 +1,89 @@ +import numpy as np + + +class ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.01, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.9, 0.98) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.7, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): + self.update_particles(func) + self.adapt_parameters() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v54.py b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v54.py new file mode 100644 index 000000000..9fa3fe450 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v54.py @@ -0,0 +1,104 @@ +import numpy as np + + +class ImprovedEnhancedDifferentialEvolutionLocalSearch_v54: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.4, + f_max=0.8, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.005, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + + def improved_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(30): # Increased the number of runs to find the best result + best_fitness, _ = self.improved_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, func( + np.random.uniform(-5.0, 5.0, self.dim) + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v61.py b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v61.py new file mode 100644 index 000000000..556158bf4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v61.py @@ -0,0 +1,110 @@ +import numpy as np + + +class ImprovedEnhancedDifferentialEvolutionLocalSearch_v61: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=1.0, + cr_min=0.2, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.01, + population_size=30, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def improved_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + best_results = [] + for _ in range(1000): # Increased the number of runs to 1000 for enhanced optimization + best_fitness, _ = self.improved_de_local_search(func) + best_results.append(best_fitness) + + best_idx = np.argmin(best_results) + best_fitness = best_results[best_idx] + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v65.py b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v65.py new file mode 100644 index 000000000..1bd4a09b9 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDifferentialEvolutionLocalSearch_v65.py @@ -0,0 +1,109 @@ +import numpy as np + + +class ImprovedEnhancedDifferentialEvolutionLocalSearch_v65: + def __init__( + self, + budget=10000, + p_best=0.2, + f_min=0.5, + f_max=0.9, + cr_min=0.5, + cr_max=0.9, + local_search_iters=1000, + perturbation_factor=0.03, + population_size=50, + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + self.perturbation_factor = perturbation_factor + self.population_size = population_size + + def improved_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.05), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.05), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(self.population_size) if i != idx], + int(self.p_best * self.population_size), + replace=False, + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(self.population_size) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice( + [i for i in range(self.population_size) if i not in [idx, p_best_idx]] + ) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + self.perturbation_factor * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness + + def __call__(self, func): + best_results = [] + for _ in range(2000): # Increased the number of runs to 2000 for improved optimization + best_fitness = self.improved_de_local_search(func) + best_results.append(best_fitness) + + best_fitness = np.min(best_results) + + return best_fitness, np.random.uniform( + -5.0, 5.0, self.dim + ) # Return the best fitness and a random solution diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDiversifiedGravitationalSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedEnhancedDiversifiedGravitationalSwarmOptimization.py new file mode 100644 index 000000000..5dc213cef --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDiversifiedGravitationalSwarmOptimization.py @@ -0,0 +1,97 @@ +import numpy as np + + +class ImprovedEnhancedDiversifiedGravitationalSwarmOptimization: + def __init__( + self, + budget=5000, + G0=100.0, + alpha=0.1, + delta=0.1, + gamma=0.3, + population_size=200, + rho_min=0.05, + rho_max=0.3, + ): + self.budget = budget + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.gamma = gamma + self.population_size = population_size + self.rho_min = rho_min + self.rho_max = rho_max + + def initialize_population(self, func): + return np.random.uniform( + low=func.bounds.lb, high=func.bounds.ub, size=(self.population_size, len(func.bounds.lb)) + ) + + def gravitational_force(self, x, xb, G): + return G * (xb - x) + + def update_position(self, x, F, func): + new_pos = x + F + return np.clip(new_pos, func.bounds.lb, func.bounds.ub) + + def update_G(self, t): + return self.G0 / (1.0 + self.alpha * t) + + def update_alpha(self, t): + return self.alpha * np.exp(-self.delta * t) + + def update_gamma(self, t): + return self.gamma * np.exp(-self.delta * t) + + def evolve_population(self, population, f_vals, func): + G = self.G0 + best_idx = np.argmin(f_vals) + best_pos = population[best_idx] + best_val = f_vals[best_idx] + + for t in range(self.budget): + rho = self.rho_min + (self.rho_max - self.rho_min) * (1 - t / self.budget) + + for i in range(self.population_size): + j = np.random.choice(range(self.population_size)) + F = self.gravitational_force(population[i], population[j], G) + new_pos = self.update_position(population[i], F, func) + new_f_val = func(new_pos) + + if new_f_val < f_vals[i]: + population[i] = new_pos + f_vals[i] = new_f_val + + if new_f_val < best_val: + best_pos = new_pos + best_val = new_f_val + + G = self.update_G(t) + self.alpha = self.update_alpha(t) + self.gamma = self.update_gamma(t) + + for i in range(self.population_size): + if np.random.rand() < rho: + population[i] = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_vals[i] = func(population[i]) + + return best_val, best_pos + + def __call__(self, func): + best_aooc = np.Inf + best_x_opt = None + best_std = np.Inf + + for _ in range(10): # Perform multiple runs and take the best result + population = self.initialize_population(func) + f_vals = np.array([func(x) for x in population]) + + for _ in range(10): # Increase the number of iterations within each run + best_f_val, best_pos = self.evolve_population(population, f_vals, func) + + if best_f_val < best_aooc: + best_aooc = best_f_val + best_x_opt = best_pos + best_std = np.std(f_vals) + + return best_aooc, best_x_opt, best_std diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedEnhancedDynamicDifferentialEvolution.py new file mode 100644 index 000000000..249e17cc4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDynamicDifferentialEvolution.py @@ -0,0 +1,116 @@ +import numpy as np + + +class ImprovedEnhancedDynamicDifferentialEvolution: + def __init__( + self, + budget=1000, + population_size=50, + scaling_factor_range=(0.5, 0.9), + crossover_rate_range=(0.6, 1.0), + diversification_factor=0.1, + dynamic_step=0.1, + ): + self.budget = budget + self.population_size = population_size + self.scaling_factor_range = scaling_factor_range + self.crossover_rate_range = crossover_rate_range + self.diversification_factor = diversification_factor + self.dynamic_step = dynamic_step + + def __call__(self, func): + self.func = func + self.f_opt = np.inf + self.x_opt = None + + dimension = len(func.bounds.lb) + population = np.random.uniform(func.bounds.lb, func.bounds.ub, size=(self.population_size, dimension)) + fitness_values = np.array([func(ind) for ind in population]) + + scaling_factors = np.full(self.population_size, np.mean(self.scaling_factor_range)) + crossover_rates = np.full(self.population_size, np.mean(self.crossover_rate_range)) + + for _ in range(self.budget): + for i in range(self.population_size): + a, b, c = self.select_three_parents(population, i) + + scaling_factor = scaling_factors[i] + crossover_rate = crossover_rates[i] + + trial_individual = self.generate_trial_individual( + population[i], a, b, c, scaling_factor, crossover_rate + ) + + trial_fitness = func(trial_individual) + + if trial_fitness <= fitness_values[i]: + population[i] = trial_individual + fitness_values[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = np.copy(trial_individual) + + scaling_factors, crossover_rates = self.update_parameters( + scaling_factors, crossover_rates, fitness_values + ) + + population = self.population_diversification(population) + + # Adjust parameters dynamically + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.5, + 0.9, + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) + * (1 + self.dynamic_step * (np.mean(fitness_values) - np.min(fitness_values))), + 0.6, + 1.0, + ) + + scaling_factors = np.clip(scaling_factors, *scaling_factor_range) + crossover_rates = np.clip(crossover_rates, *crossover_rate_range) + + return self.f_opt, self.x_opt + + def select_three_parents(self, population, current_idx): + idxs = np.arange(len(population)) + idxs = np.delete(idxs, current_idx) + selected_idxs = np.random.choice(idxs, size=3, replace=False) + return population[selected_idxs[0]], population[selected_idxs[1]], population[selected_idxs[2]] + + def generate_trial_individual(self, current, a, b, c, scaling_factor, crossover_rate): + dimension = len(current) + mutant = np.clip(a + scaling_factor * (b - c), self.func.bounds.lb, self.func.bounds.ub) + crossover_points = np.random.rand(dimension) < crossover_rate + return np.where(crossover_points, mutant, current) + + def update_parameters(self, scaling_factors, crossover_rates, fitness_values): + scaling_factor_range = np.clip( + np.array(self.scaling_factor_range) * (1 + 0.1 * np.mean(fitness_values)), 0.5, 0.9 + ) + crossover_rate_range = np.clip( + np.array(self.crossover_rate_range) * (1 + 0.1 * np.mean(fitness_values)), 0.6, 1.0 + ) + + return np.clip(scaling_factors, *scaling_factor_range), np.clip( + crossover_rates, *crossover_rate_range + ) + + def population_diversification(self, population): + mean_individual = np.mean(population, axis=0) + std_individual = np.std(population, axis=0) + diversity_index = np.sum(std_individual) / len(std_individual) + + if diversity_index < self.diversification_factor: + mutated_population = np.clip( + population + np.random.normal(0, 0.1, size=population.shape), + self.func.bounds.lb, + self.func.bounds.ub, + ) + return mutated_population + else: + return population diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDynamicHarmonyAlgorithm.py b/nevergrad/optimization/lama/ImprovedEnhancedDynamicHarmonyAlgorithm.py new file mode 100644 index 000000000..e77c9fee2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDynamicHarmonyAlgorithm.py @@ -0,0 +1,87 @@ +import numpy as np + + +class ImprovedEnhancedDynamicHarmonyAlgorithm: + def __init__( + self, + budget=10000, + population_size=20, + dim=5, + pa=0.3, + beta=2.0, + gamma=0.03, + alpha=0.9, + exploring_rate=0.05, + ): + self.budget = budget + self.population_size = population_size + self.dim = dim + self.pa = pa + self.beta = beta + self.gamma = gamma + self.alpha = alpha + self.exploring_rate = exploring_rate + self.population = np.random.uniform(-5.0, 5.0, size=(self.population_size, self.dim)) + self.best_fitness = np.Inf + self.best_solution = None + + def levy_flight(self): + sigma1 = ( + np.math.gamma(1 + self.beta) + * np.sin(np.pi * self.beta / 2) + / (np.math.gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2)) + ) ** (1 / self.beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + step = u / np.abs(v) ** (1 / self.beta) + return step + + def calculate_fitness(self, func, solution): + return func(solution) + + def update_population(self, func, iteration): + harmony_pool = np.copy(self.population) + new_harmony = np.zeros((self.population_size, self.dim)) + + for i in range(self.population_size): + if np.random.rand() < self.pa: + # Perform Levy flight + step = self.levy_flight() + new_solution = self.population[i] + self.alpha * step + new_solution_fitness = self.calculate_fitness(func, new_solution) + if new_solution_fitness < self.best_fitness: + self.best_fitness = new_solution_fitness + self.best_solution = new_solution + harmony_pool[i] = new_solution + + # Diversify the population with new harmonies + for i in range(self.population_size): + j = np.random.randint(self.population_size) + while j == i: + j = np.random.randint(self.population_size) + + # Update current solution with harmony from another member + new_harmony[i] = self.population[i] + self.gamma * (harmony_pool[j] - self.population[i]) + + # Dynamic exploration rate adjustment + exploring_rate = self.exploring_rate * np.exp(-iteration / self.budget) + + # Further exploration by random perturbation to improve diversity + new_harmony[i] += np.random.normal(0, exploring_rate, self.dim) + + # Clamp new solutions within the search space bounds + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + self.population = new_harmony + + def __call__(self, func): + for itr in range(1, self.budget + 1): + self.update_population(func, itr) + + aocc = ( + 1 - np.std(self.best_fitness) / np.mean(self.best_fitness) + if np.mean(self.best_fitness) != 0 + else 0 + ) + return aocc, self.best_solution diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDynamicLevyHarmonySearch.py b/nevergrad/optimization/lama/ImprovedEnhancedDynamicLevyHarmonySearch.py new file mode 100644 index 000000000..f446d6db1 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDynamicLevyHarmonySearch.py @@ -0,0 +1,67 @@ +import numpy as np + + +class ImprovedEnhancedDynamicLevyHarmonySearch: + def __init__(self, budget, harmony_memory_size=20, levy_alpha=1.5, levy_beta=1.5, global_best_rate=0.1): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.levy_alpha = levy_alpha + self.levy_beta = levy_beta + self.global_best_rate = global_best_rate + self.convergence_curve = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for t in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func, t) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + self.convergence_curve.append(1.0 / (1.0 + self.f_opt)) + + return self.f_opt, self.x_opt, self.convergence_curve + + def generate_new_harmony(self, harmony_memory, func, t): + new_harmony = np.copy(harmony_memory) + + for i in range(len(func.bounds.lb)): + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2 + new_value = np.clip(new_value, func.bounds.lb[i], func.bounds.ub[i]) + new_harmony[:, i] = new_value + + if np.random.rand() < self.global_best_rate: + global_best_index = np.argmin([func(x) for x in harmony_memory]) + new_harmony[:, i] = harmony_memory[global_best_index, i] + + levy_step_size = 0.3 / np.sqrt(t + 1) # Adjust step size dynamically + levy = self.generate_levy_flight(len(func.bounds.lb), levy_step_size) + new_harmony[:, i] += levy[:, i] + + return new_harmony + + def generate_levy_flight(self, dimension, step_size): + levy = np.zeros((self.harmony_memory_size, dimension)) + epsilon = 1e-6 + sigma = ( + np.math.gamma(1 + self.levy_beta) + * np.sin(np.pi * self.levy_beta / 2) + / (np.math.gamma((1 + self.levy_beta) / 2) * self.levy_beta * 2 ** ((self.levy_beta - 1) / 2)) + ) ** (1 / self.levy_beta) + + for i in range(dimension): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / (np.abs(v) ** (1 / self.levy_beta + epsilon)) + levy[:, i] = self.levy_alpha * step * step_size + + return levy diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm.py b/nevergrad/optimization/lama/ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm.py new file mode 100644 index 000000000..668e91822 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm.py @@ -0,0 +1,99 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + def __call__(self, func): + self.run_firework_algorithm(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedEnhancedDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..1bb26bbb6 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedDynamicQuantumSwarmOptimization.py @@ -0,0 +1,101 @@ +import numpy as np + + +class ImprovedEnhancedDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=100, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=1.0, + min_cognitive_weight=0.4, + max_social_weight=1.0, + min_social_weight=0.4, + boundary_handling=True, + alpha=0.5, + delta=0.3, + decay_rate=0.95, + max_step=0.3, + exploration_rate=0.2, + gamma=0.05, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + self.max_step = max_step + self.exploration_rate = exploration_rate + self.gamma = gamma + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + r1, r2 = np.random.rand(), np.random.rand() + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = self.cognitive_weight * r1 * (self.personal_bests[i] - current_position) + velocity_term3 = ( + self.social_weight + * r2 + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(-self.max_step, self.max_step, self.dim) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + if np.random.rand() < self.exploration_rate: + new_position = np.random.uniform(self.search_space[0], self.search_space[1], self.dim) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.gamma * np.random.normal(0, 1, self.dim) + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/ImprovedEnhancedEliteGuidedMassQGSA_v84.py b/nevergrad/optimization/lama/ImprovedEnhancedEliteGuidedMassQGSA_v84.py new file mode 100644 index 000000000..eb0d2736a --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedEliteGuidedMassQGSA_v84.py @@ -0,0 +1,125 @@ +import numpy as np + + +class ImprovedEnhancedEliteGuidedMassQGSA_v84: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) * 0.1 + r2 = np.random.rand(self.dimension) * 0.1 + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) * 0.1 + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = (self.crossover_rate + 0.1) * agents[elite_agent_idx] + ( + 1 - self.crossover_rate - 0.1 + ) * agents[i] + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11.py b/nevergrad/optimization/lama/ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11.py new file mode 100644 index 000000000..4320027b3 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11.py @@ -0,0 +1,110 @@ +import numpy as np + + +class ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.8, + crossover_rate=0.9, + inertia_weight=0.6, + cognitive_weight=1.5, + social_weight=1.5, + max_velocity=0.8, + mutation_rate=0.05, + num_generations=400, + num_local_searches=1000, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + self.num_local_searches = num_local_searches + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def local_search(self, solution, func): + best_solution = np.copy(solution) + best_cost = func(solution) + + for _ in range(self.num_local_searches): + new_solution = self.mutate_particle(solution, func) + new_cost = func(new_solution) + + if new_cost < best_cost: + best_solution = np.copy(new_solution) + best_cost = new_cost + + return best_solution + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + local_best = self.local_search(swarm[i], func) + if func(local_best) < best_cost: + global_best = np.copy(local_best) + best_cost = func(global_best) + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/ImprovedEnhancedEvolutionaryFireworksSearch.py b/nevergrad/optimization/lama/ImprovedEnhancedEvolutionaryFireworksSearch.py new file mode 100644 index 000000000..2eda986a1 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedEvolutionaryFireworksSearch.py @@ -0,0 +1,74 @@ +import numpy as np + + +class ImprovedEnhancedEvolutionaryFireworksSearch: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=10, alpha=0.2, beta=2.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = 1.0 + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for j in range(self.n_sparks): + idx1, idx2 = np.random.choice(np.delete(np.arange(self.n_fireworks), i), 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma = max(0.1, self.sigma * 0.995) # Adapt sigma parameter + return self.sigma + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmOptimization.py b/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmOptimization.py new file mode 100644 index 000000000..359651bf5 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmOptimization.py @@ -0,0 +1,71 @@ +import numpy as np + + +class ImprovedEnhancedFireworkAlgorithmOptimization: + def __init__(self, budget=10000, n_fireworks=50, n_sparks=10): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework, alpha): + sparks = np.random.uniform(firework - alpha, firework + alpha, (self.n_sparks, self.dim)) + return sparks + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + alpha = np.random.uniform(0.1, 0.9) + sparks = self.explode_firework(fireworks[i], alpha) + + for _ in range(self.n_sparks): + idx1, idx2, idx3 = np.random.choice(self.n_fireworks, 3, replace=False) + mutant = self.clip_to_bounds( + fireworks[idx1] + np.random.uniform(0.1, 0.9) * (fireworks[idx2] - fireworks[idx3]) + ) + + trial = np.where(np.random.rand(self.dim) < 0.9, mutant, fireworks[i]) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def adaptive_alpha(self, budget): + return 0.1 + 0.8 * np.exp(-5 * budget / self.budget) + + def chaotic_search(self, func): + x = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(100): + x_new = self.clip_to_bounds(x + np.random.uniform(-0.1, 0.1, self.dim)) + if func(x_new) < func(x): + x = x_new + return x + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for i in range(self.budget): + alpha = self.adaptive_alpha(i) + fireworks = self.evolve_fireworks(fireworks, func) + + for _ in range(self.n_fireworks // 5): + idx = np.random.randint(self.n_fireworks) + fireworks[idx] = self.chaotic_search(func) # Apply chaotic search for diversity + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py b/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py new file mode 100644 index 000000000..e5ff34f1d --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch.py @@ -0,0 +1,94 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.best_individual = x + self.best_fitness = func(self.best_individual) + + def __call__(self, func): + self.run_firework_algorithm(func) + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedGradientDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedEnhancedGradientDifferentialEvolution.py new file mode 100644 index 000000000..6df22119d --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedGradientDifferentialEvolution.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.stats import qmc + + +class ImprovedEnhancedGradientDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def sobol_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + def differential_mutation(a, b, c): + return np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + population = sobol_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = differential_mutation(a, b, c) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ImprovedEnhancedGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchOB.py b/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchOB.py new file mode 100644 index 000000000..0bcf2fc9a --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchOB.py @@ -0,0 +1,75 @@ +import numpy as np + + +class ImprovedEnhancedHarmonySearchOB: + def __init__( + self, budget=10000, harmony_memory_size=20, hmcr=0.75, par=0.3, bw=0.5, bw_min=0.01, bw_decay=0.99 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr # Harmony Memory Consideration Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + self.bw_min = bw_min # Minimum Bandwidth + self.bw_decay = bw_decay # Bandwidth decay rate + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += self.bw * np.random.randn() + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + self.bw = max(self.bw * self.bw_decay, self.bw_min) # Decay the bandwidth with a minimum value + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py b/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py new file mode 100644 index 000000000..df992e8d2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration.py @@ -0,0 +1,100 @@ +import numpy as np + + +class ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration: + def __init__( + self, + budget, + harmony_memory_size=10, + bandwidth_min=0.1, + bandwidth_max=1.0, + mutation_rate=0.2, + levy_iterations=5, + levy_alpha=1.0, + levy_beta_min=1.0, + levy_beta_max=2.0, + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.bandwidth_min = bandwidth_min + self.bandwidth_max = bandwidth_max + self.mutation_rate = mutation_rate + self.levy_iterations = levy_iterations + self.levy_alpha = levy_alpha + self.levy_beta_min = levy_beta_min + self.levy_beta_max = levy_beta_max + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + convergence_curve = [] + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + convergence_curve.append(1.0 / (1.0 + self.f_opt)) # Calculate AOCC + + return self.f_opt, self.x_opt, convergence_curve + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + bandwidth = self.bandwidth_min + (self.bandwidth_max - self.bandwidth_min) * np.random.rand() + + for i in range(len(func.bounds.lb)): + if np.random.rand() < 0.5: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.harmony_memory_size, + size=int(self.mutation_rate * self.harmony_memory_size), + replace=False, + ) + for idx in mutation_indices: + new_harmony[idx, i] = np.random.uniform(func.bounds.lb[i], func.bounds.ub[i]) + + if np.random.rand() < 0.1: # Introduce Adaptive Levy Flight + levy = self.generate_adaptive_levy_flight(len(func.bounds.lb)) + new_harmony[:, i] += levy + + return new_harmony + + def generate_adaptive_levy_flight(self, dimension): + beta = np.random.uniform(self.levy_beta_min, self.levy_beta_max) # Randomly select beta in range + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + levy = np.zeros(self.harmony_memory_size) + for _ in range(self.levy_iterations): + u = np.random.normal(0, sigma, self.harmony_memory_size) + v = np.random.normal(0, 1, self.harmony_memory_size) + step = u / abs(v) ** (1 / beta) + levy += step * self.levy_alpha + beta *= 1.2 # Adjust beta for next iteration + sigma = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + + return levy diff --git a/nevergrad/optimization/lama/ImprovedEnhancedMemeticHarmonyOptimization.py b/nevergrad/optimization/lama/ImprovedEnhancedMemeticHarmonyOptimization.py new file mode 100644 index 000000000..99c2cc6b6 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedMemeticHarmonyOptimization.py @@ -0,0 +1,124 @@ +import numpy as np + + +class ImprovedEnhancedMemeticHarmonyOptimization: + def __init__( + self, + budget=10000, + hmcr=0.7, + par=0.4, + bw=0.6, + memetic_iter=1000, + memetic_prob=0.8, + memetic_step=0.1, + explore_prob=0.1, + local_search_prob=0.7, + ): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.explore_prob = explore_prob + self.local_search_prob = local_search_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs, n_select): + idx = np.argsort(harmony_memory_costs)[:n_select] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def _adapt_parameters(self, iteration): + self.hmcr = max(0.5, self.hmcr - 0.1 * iteration / self.budget) + self.par = min(0.7, self.par + 0.1 * iteration / self.budget) + self.bw = max(0.3, self.bw - 0.2 * iteration / self.budget) + self.memetic_prob = min(0.95, self.memetic_prob + 0.1 * iteration / self.budget) + self.memetic_step = max(0.01, self.memetic_step - 0.09 * iteration / self.budget) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + convergence_curve = [] + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < self.explore_prob: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < self.local_search_prob: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs, len(harmony_memory) - self.budget + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + self._adapt_parameters(i) + convergence_curve.append(new_cost) + + mean_aocc = np.mean(np.array(convergence_curve)) + std_dev = np.std(np.array(convergence_curve)) + + return mean_aocc, std_dev diff --git a/nevergrad/optimization/lama/ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..d806306de --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 60 # Adjusted population size + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.9 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.25 + self.adaptive_learning_rate = 0.02 + self.strategy_switches = [0.2, 0.5, 0.8] + self.local_opt_prob = 0.2 # Increased probability of local optimization + self.learning_rate_decay = 0.99 # Added learning rate decay for adaptiveness + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/ImprovedEnhancedQuantumHarmonySearch.py b/nevergrad/optimization/lama/ImprovedEnhancedQuantumHarmonySearch.py new file mode 100644 index 000000000..53d819041 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedQuantumHarmonySearch.py @@ -0,0 +1,43 @@ +import numpy as np +from scipy.stats import cauchy + + +class ImprovedEnhancedQuantumHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, bw=0.05): + self.budget = budget + self.hmcr = hmcr # Harmony Memory Considering Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=self.bw + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedRefinedAdaptiveQGSA_v61.py b/nevergrad/optimization/lama/ImprovedEnhancedRefinedAdaptiveQGSA_v61.py new file mode 100644 index 000000000..9fdb51fd0 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedRefinedAdaptiveQGSA_v61.py @@ -0,0 +1,131 @@ +import numpy as np + + +class ImprovedEnhancedRefinedAdaptiveQGSA_v61: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_reflection(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def _update_agents_with_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedSADE.py b/nevergrad/optimization/lama/ImprovedEnhancedSADE.py new file mode 100644 index 000000000..1e2622dc4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedSADE.py @@ -0,0 +1,104 @@ +import numpy as np + + +class ImprovedEnhancedSADE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + base_F = 0.8 # Differential weight + base_CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, base_F) + CR_values = np.full(population_size, base_CR) + + # Adaptive parameters for diversity control + stagnation_limit = 50 + no_improvement_counter = 0 + diversity_threshold = 1e-5 + + # Additional parameters for enhanced mutation strategy + enhanced_mutation_prob = 0.2 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Enhanced mutation strategy + if np.random.rand() < enhanced_mutation_prob and self.x_opt is not None: + d = population[np.random.choice(indices)] + mutant = np.clip(mutant + F_values[i] * (self.x_opt - d), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Handle stagnation by enhancing exploration + if no_improvement_counter >= stagnation_limit: + population_variance = np.var(population, axis=0) + if np.all(population_variance < diversity_threshold): + new_population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += population_size + no_improvement_counter = 0 + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedEnhancedStochasticMetaHeuristicOptimizer.py b/nevergrad/optimization/lama/ImprovedEnhancedStochasticMetaHeuristicOptimizer.py new file mode 100644 index 000000000..dca34bafe --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnhancedStochasticMetaHeuristicOptimizer.py @@ -0,0 +1,104 @@ +import numpy as np + + +class ImprovedEnhancedStochasticMetaHeuristicOptimizer: + def __init__( + self, + budget, + swarm_size=50, + differential_weight=0.9, + crossover_rate=0.9, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + max_velocity=0.6, + mutation_rate=0.03, + num_generations=500, + ): + self.budget = budget + self.swarm_size = swarm_size + self.differential_weight = differential_weight + self.crossover_rate = crossover_rate + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.max_velocity = max_velocity + self.mutation_rate = mutation_rate + self.num_generations = num_generations + + def initialize_swarm(self, func): + return [ + np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + for _ in range(self.swarm_size) + ] + + def clipToBounds(self, vector, func): + return np.clip(vector, func.bounds.lb, func.bounds.ub) + + def optimize_particle(self, particle, func, personal_best, global_best, velocity, swarm): + r1, r2 = np.random.choice(len(swarm), 2, replace=False) + r3 = np.random.choice(len(swarm)) + + mutant = swarm[r1] + self.differential_weight * (swarm[r2] - swarm[r3]) + crossover_mask = np.random.rand(len(particle)) < self.crossover_rate + trial = np.where(crossover_mask, mutant, particle) + + new_velocity = ( + self.inertia_weight * velocity + + self.cognitive_weight * np.random.rand() * (personal_best - particle) + + self.social_weight * np.random.rand() * (global_best - particle) + ) + new_velocity = np.clip(new_velocity, -self.max_velocity, self.max_velocity) + + new_particle = particle + new_velocity + return self.clipToBounds(new_particle, func), new_velocity, trial + + def mutate_particle(self, particle, func): + mutated_particle = particle + np.random.normal(0, self.mutation_rate, size=len(particle)) + return self.clipToBounds(mutated_particle, func) + + def hybrid_optimization(self, func): + swarm = self.initialize_swarm(func) + personal_best = np.copy(swarm) + global_best = np.copy(swarm[np.argmin([func(p) for p in swarm])]) + best_cost = func(global_best) + + improvement_counter = 0 # Track the number of consecutive non-improvements + + for _ in range(self.num_generations): + for i, particle in enumerate(swarm): + velocity = np.zeros_like(particle) # Initialize velocity for each particle + swarm[i], velocity, trial = self.optimize_particle( + particle, func, personal_best[i], global_best, velocity, swarm + ) + personal_best[i] = np.where(func(trial) < func(personal_best[i]), trial, personal_best[i]) + + if np.random.rand() < self.mutation_rate: + swarm[i] = self.mutate_particle(swarm[i], func) + + if func(swarm[i]) < best_cost: + global_best = np.copy(swarm[i]) + best_cost = func(global_best) + improvement_counter = 0 + else: + improvement_counter += 1 + if ( + improvement_counter >= 20 + ): # Reinitialize the particle if no improvement after 20 iterations + swarm[i] = np.random.uniform(func.bounds.lb, func.bounds.ub, size=len(func.bounds.lb)) + personal_best[i] = np.copy(swarm[i]) + improvement_counter = 0 + + return best_cost, global_best + + def __call__(self, func): + best_aocc = 0 + best_solution = None + + for _ in range(self.budget): + cost, solution = self.hybrid_optimization(func) + if cost > best_aocc: + best_aocc = cost + best_solution = solution + + return best_aocc, best_solution diff --git a/nevergrad/optimization/lama/ImprovedEnsembleMemeticOptimizer.py b/nevergrad/optimization/lama/ImprovedEnsembleMemeticOptimizer.py new file mode 100644 index 000000000..ea1d18bbb --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedEnsembleMemeticOptimizer.py @@ -0,0 +1,146 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedEnsembleMemeticOptimizer: + def __init__(self, budget=10000, population_size=150): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.7 + self.F = 0.8 + self.CR = 0.9 + self.memory_size = 7 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + while eval_count < self.budget: + new_population = [] + for i in range(self.population_size): + if current_strategy == 0: + # Differential Evolution Strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[self.rng.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.CR + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + elif current_strategy == 1: + # Particle Swarm Optimization Strategy + w = 0.5 + c1 = 1.5 + c2 = 1.5 + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocity = ( + w * self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + + c1 * r1 * (best_individual - population[i]) + + c2 * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocity, self.bounds[0], self.bounds[1]) + else: + # Simulated Annealing Strategy + T = max(1e-10, (self.budget - eval_count) / self.budget) + neighbor = population[i] + self.rng.normal(0, 1, self.dim) + neighbor = np.clip(neighbor, self.bounds[0], self.bounds[1]) + neighbor_fitness = evaluate(neighbor) + eval_count += 1 + if neighbor_fitness < fitness[i] or self.rng.random() < np.exp( + (fitness[i] - neighbor_fitness) / T + ): + trial = neighbor + else: + trial = population[i] + + if current_strategy != 2: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + else: + new_population.append(population[i]) + else: + new_population.append(trial) + if neighbor_fitness < best_fitness: + best_individual = trial + best_fitness = neighbor_fitness + + if eval_count >= self.budget: + break + + population = np.array(new_population) + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = ImprovedEnsembleMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedFireworkAlgorithm.py b/nevergrad/optimization/lama/ImprovedFireworkAlgorithm.py new file mode 100644 index 000000000..76a62ec6b --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedFireworkAlgorithm.py @@ -0,0 +1,77 @@ +import numpy as np + + +class ImprovedFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=20, n_sparks=5, alpha=0.1, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma *= 0.95 # Adjusted sigma update rule for slower decrease + return max(0.1, self.sigma) + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedHybridAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedHybridAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..30e23b62b --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridAdaptiveDifferentialEvolution.py @@ -0,0 +1,82 @@ +import numpy as np + + +class ImprovedHybridAdaptiveDifferentialEvolution: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + while evaluations < self.budget: + new_population = np.zeros_like(population) + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Mutation and crossover + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive parameter control based on success rates + success_rate = np.mean(fitness < np.median(fitness)) + F = base_F * (1 - success_rate) + Cr = base_Cr * success_rate + + # Adaptive restart mechanism + if evaluations > 0.5 * self.budget and np.std(fitness) < 1e-6: + # Re-initialize population if stuck + population = np.random.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedHybridAdaptiveGeneticSwarmOptimizer.py b/nevergrad/optimization/lama/ImprovedHybridAdaptiveGeneticSwarmOptimizer.py new file mode 100644 index 000000000..ab1b93eee --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridAdaptiveGeneticSwarmOptimizer.py @@ -0,0 +1,139 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedHybridAdaptiveGeneticSwarmOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.85 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.8 + self.social_coeff = 1.8 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population randomly within bounds + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + # Phase One: Hybrid Strategy Exploration + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm Strategy + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + # Particle Swarm Optimization Strategy + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (best_individual - population[i]) + + self.social_coeff * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Perform local search on elite individuals + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + # Update performance memory and adapt strategy + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Phase Two: Intensified Exploitation using Local Search + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/ImprovedHybridAdaptiveHarmonicFireworksTabuSearch.py b/nevergrad/optimization/lama/ImprovedHybridAdaptiveHarmonicFireworksTabuSearch.py new file mode 100644 index 000000000..a2c32c381 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridAdaptiveHarmonicFireworksTabuSearch.py @@ -0,0 +1,111 @@ +import numpy as np + + +class ImprovedHybridAdaptiveHarmonicFireworksTabuSearch: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.05 + self.bandwidth *= 0.92 + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def enhance_search(self, harmony_memory, best_solution, func, bounds): + self.diversify_search(harmony_memory, bounds) + self.local_search(harmony_memory, best_solution, func, bounds) + + def hybrid_search(self, harmony_memory, best_solution, func, bounds): + self.enhance_search(harmony_memory, best_solution, func, bounds) + self.adaptive_tabu_search(harmony_memory, best_solution, func, bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.hybrid_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/ImprovedHybridCMAESDE.py b/nevergrad/optimization/lama/ImprovedHybridCMAESDE.py new file mode 100644 index 000000000..009deab36 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridCMAESDE.py @@ -0,0 +1,183 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedHybridCMAESDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 50 + self.strategy_weights = np.ones(3) + self.strategy_success = np.zeros(3) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 20 + self.dynamic_parameters_adjustment_threshold = 30 + self.pop_shrink_factor = 0.1 + self.diversification_period = 50 + self.sigma = 0.3 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_cmaes(self, population, cma_es): + z = np.random.randn(self.dim) + return cma_es.mean + self.sigma * cma_es.cov.dot(z) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _diversify_population(self, population, fitness, func): + num_new_individuals = int(self.pop_size * 0.1) # 10% of the population + new_individuals = np.random.uniform(self.lb, self.ub, (num_new_individuals, self.dim)) + new_fitness = np.array([func(ind) for ind in new_individuals]) + self.evaluations += num_new_individuals + + combined_population = np.vstack((population, new_individuals)) + combined_fitness = np.hstack((fitness, new_fitness)) + + best_indices = np.argsort(combined_fitness)[: self.pop_size] + return combined_population[best_indices], combined_fitness[best_indices] + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + cma_es = CMAES(self.dim, self.lb, self.ub) + + iteration = 0 + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3 = np.random.choice(indices, 3, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + else: # strategy == self._mutation_cmaes + donor = self._mutation_cmaes(population, cma_es) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [self._mutation_best_1, self._mutation_rand_1, self._mutation_cmaes].index( + strategy + ) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + cma_es.update(population, fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 3) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self._dynamic_parameters() + + if self.no_improvement_count >= self.dynamic_adjustment_period: + new_pop_size = max(20, int(self.pop_size * (1 - self.pop_shrink_factor))) + population = population[:new_pop_size] + fitness = fitness[:new_pop_size] + self.pop_size = new_pop_size + self.no_improvement_count = 0 + + if iteration % self.diversification_period == 0 and self.evaluations < self.budget: + population, fitness = self._diversify_population(population, fitness, func) + + iteration += 1 + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt + + +class CMAES: + def __init__(self, dim, lb, ub): + self.dim = dim + self.lb = lb + self.ub = ub + self.mean = np.random.uniform(self.lb, self.ub, self.dim) + self.cov = np.eye(self.dim) + self.sigma = 0.5 + + def update(self, population, fitness): + best_idx = np.argmin(fitness) + self.mean = population[best_idx] + cov_update = np.cov(population.T) + self.cov = 0.9 * self.cov + 0.1 * cov_update diff --git a/nevergrad/optimization/lama/ImprovedHybridGeneticPSO.py b/nevergrad/optimization/lama/ImprovedHybridGeneticPSO.py new file mode 100644 index 000000000..b50f615ea --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridGeneticPSO.py @@ -0,0 +1,139 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedHybridGeneticPSO: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.2 + self.local_search_probability = 0.95 + self.crossover_prob = 0.9 + self.mutation_prob = 0.1 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.memory_size = 30 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 2 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.min_std_dev = 1e-5 # Minimum standard deviation for convergence check + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + + phase_one_budget = int(self.budget * 0.6) + phase_two_budget = self.budget - phase_one_budget + + while eval_count < phase_one_budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + else: + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (best_individual - population[i]) + + self.social_coeff * r2 * (np.mean(population, axis=0) - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + if self.rng.random() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (performance_memory[0] - performance_memory[-1]) / max( + 1e-10, performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + if np.std(fitness) < self.min_std_dev: + break + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.max_iter}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/ImprovedHybridPSODEOptimizer.py b/nevergrad/optimization/lama/ImprovedHybridPSODEOptimizer.py new file mode 100644 index 000000000..0dc6b483e --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedHybridPSODEOptimizer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class ImprovedHybridPSODEOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + # Initialize population + population_size = 20 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + global_best_position = trial_vector + global_best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedIterativeAdaptiveGradientEvolver.py b/nevergrad/optimization/lama/ImprovedIterativeAdaptiveGradientEvolver.py new file mode 100644 index 000000000..06419b9eb --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedIterativeAdaptiveGradientEvolver.py @@ -0,0 +1,99 @@ +import numpy as np + + +class ImprovedIterativeAdaptiveGradientEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.2, + mutation_intensity=0.1, + crossover_probability=0.7, + gradient_step=0.05, + mutation_decay=0.98, + gradient_enhancement=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement = gradient_enhancement + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + if self.mutation_intensity > 0: + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return individual + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return parent1 if np.random.rand() < 0.5 else parent2 + + def adaptive_gradient(self, individual, func, best_individual): + if self.gradient_enhancement: + gradient_direction = individual - best_individual + step_size = self.gradient_step / (1 + np.linalg.norm(gradient_direction)) + new_individual = individual - step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + + new_population = np.zeros_like(population) + for i in range(self.population_size): + if i < len(elites): + new_population[i] = self.adaptive_gradient(elites[i], func, best_individual) + else: + # Fixing the random choice for Python's zero-dimensional array error. + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1 = elites[parents_indices[0]] + parent2 = elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population[i] = child + + fitness = self.evaluate_fitness(func, new_population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = new_population[min_idx] + + population = new_population + evaluations += self.population_size + + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ImprovedMetaDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/ImprovedMetaDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..03e745146 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedMetaDynamicQuantumSwarmOptimization.py @@ -0,0 +1,92 @@ +import numpy as np + + +class ImprovedMetaDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + beta=0.9, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.beta = beta + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + self.inertia_weight = self.max_inertia_weight - self.beta * (iteration / self.budget) * ( + self.max_inertia_weight - self.min_inertia_weight + ) + self.cognitive_weight = self.max_cognitive_weight - self.beta * (iteration / self.budget) * ( + self.max_cognitive_weight - self.min_cognitive_weight + ) + self.social_weight = self.min_social_weight + self.beta * (iteration / self.budget) * ( + self.max_social_weight - self.min_social_weight + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def adjust_search_space(self, func): + for i in range(self.dim): + lower_bound = min(self.particles[:, i].min(), self.search_space[0]) + upper_bound = max(self.particles[:, i].max(), self.search_space[1]) + self.search_space = (lower_bound, upper_bound) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + self.adjust_search_space(func) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/ImprovedMultiOperatorSearch.py b/nevergrad/optimization/lama/ImprovedMultiOperatorSearch.py new file mode 100644 index 000000000..983454723 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedMultiOperatorSearch.py @@ -0,0 +1,147 @@ +import numpy as np + + +class ImprovedMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Local Search) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Local Search for fine-tuning solutions + if i % 10 == 0: # Perform local search every 10 iterations + for _ in range(5): # Number of local search steps + x_ls = x + np.random.normal(0, 0.1, self.dim) + x_ls = np.clip(x_ls, self.lower_bound, self.upper_bound) + f_ls = func(x_ls) + + if f_ls < f: + positions[idx] = x_ls + f = f_ls + + if f_ls < personal_best_scores[idx]: + personal_best_scores[idx] = f_ls + personal_bests[idx] = x_ls.copy() + + if f_ls < global_best_score: + global_best_score = f_ls + global_best_position = x_ls.copy() + + if f_ls < self.f_opt: + self.f_opt = f_ls + self.x_opt = x_ls.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedMultiStrategySelfAdaptiveDE.py b/nevergrad/optimization/lama/ImprovedMultiStrategySelfAdaptiveDE.py new file mode 100644 index 000000000..e3fc14ff2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedMultiStrategySelfAdaptiveDE.py @@ -0,0 +1,128 @@ +import numpy as np + + +class ImprovedMultiStrategySelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.5, 0.9 + CR_min, CR_max = 0.1, 1.0 + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + memory_size = 5 # Memory size for adaptive parameters + memory_F = np.full(memory_size, 0.5) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), F_min, F_max) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), CR_min, CR_max) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..ecde6e275 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedOppositionBasedDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class ImprovedOppositionBasedDifferentialEvolution: + def __init__(self, budget=10000, pop_size=20, f_init=0.5, cr_init=0.9, scaling_factor=0.1, p_best=0.25): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.cr_init = cr_init + self.scaling_factor = scaling_factor + self.p_best = p_best + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def adaptive_parameter_update(self, success, f, cr, scaling_factor): + f_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) + cr_scale = scaling_factor * (1.0 - 2.0 * np.random.rand()) + f_new = np.clip(f + f_scale, 0.0, 1.0) + cr_new = np.clip(cr + cr_scale, 0.0, 1.0) + + return f_new, cr_new + + def update_best_solution(self, current_fitness, trial_fitness, current_solution, trial_solution): + if trial_fitness < current_fitness: + return trial_solution, trial_fitness + else: + return current_solution, current_fitness + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + trial_fitness = func(trial_solution) + opponent_fitness = func(opponent_solution) + + if trial_fitness < self.pop_fitness[j]: + self.population[j] = trial_solution + self.pop_fitness[j] = trial_fitness + + if opponent_fitness < self.pop_fitness[j]: + self.population[j] = opponent_solution + self.pop_fitness[j] = opponent_fitness + + f_current, cr_current = self.adaptive_parameter_update( + trial_fitness < self.pop_fitness[j] or opponent_fitness < self.pop_fitness[j], + f_current, + cr_current, + self.scaling_factor, + ) + + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], trial_fitness, self.population[j], trial_solution + ) + self.population[j], self.pop_fitness[j] = self.update_best_solution( + self.pop_fitness[j], opponent_fitness, self.population[j], opponent_solution + ) + + if self.pop_fitness[j] < self.f_opt: + self.f_opt = self.pop_fitness[j] + self.x_opt = self.population[j] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedPrecisionAdaptiveEvolutiveStrategy.py b/nevergrad/optimization/lama/ImprovedPrecisionAdaptiveEvolutiveStrategy.py new file mode 100644 index 000000000..4818b0096 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedPrecisionAdaptiveEvolutiveStrategy.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ImprovedPrecisionAdaptiveEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate=0.02, mutation_strength=0.2): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + crossover_rate = 0.92 # Correctly define crossover_rate within the method scope + for _ in range(num_children): + if np.random.rand() < crossover_rate: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + # Parameters + population_size = 150 + num_generations = self.budget // population_size + mutation_rate = 0.02 # Lower initial mutation rate + mutation_strength = 0.2 # Lower mutation strength to start fine-tuning earlier + + # Initialize + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + # Evolution loop + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, population_size // 5) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + # Generate new population using crossover and mutation + population = self.crossover(best_population, population_size - len(best_population)) + population = self.mutate(population, mutation_rate, mutation_strength) + + # Adaptive mutation adjustments + if gen % 10 == 0 and gen > 0: + mutation_rate /= 1.05 # More gradual reduction in mutation rate + mutation_strength /= 1.05 # More gradual reduction in mutation strength to maintain diversity + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning.py b/nevergrad/optimization/lama/ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning.py new file mode 100644 index 000000000..516982ec4 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning.py @@ -0,0 +1,168 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 10 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedQuantumEnhancedDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedQuantumEnhancedDynamicDifferentialEvolution.py new file mode 100644 index 000000000..0950dbcff --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedQuantumEnhancedDynamicDifferentialEvolution.py @@ -0,0 +1,186 @@ +import numpy as np + + +class ImprovedQuantumEnhancedDynamicDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=20, + F_min=0.5, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.1, + perturbation_decay=0.95, + alpha=0.7, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.7 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/ImprovedQuantumHarmonySearch.py b/nevergrad/optimization/lama/ImprovedQuantumHarmonySearch.py new file mode 100644 index 000000000..dcae38160 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedQuantumHarmonySearch.py @@ -0,0 +1,51 @@ +import numpy as np + + +class ImprovedQuantumHarmonySearch: + def __init__( + self, budget, harmony_memory_size=10, pitch_adjustment_rate=0.1, bandwidth=0.01, mutation_rate=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.bandwidth = bandwidth + self.mutation_rate = mutation_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + return self.f_opt, self.x_opt + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < self.pitch_adjustment_rate: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + + if np.random.rand() < self.mutation_rate: + new_harmony[:, i] = np.random.uniform( + func.bounds.lb[i], func.bounds.ub[i], size=self.harmony_memory_size + ) + + return new_harmony diff --git a/nevergrad/optimization/lama/ImprovedQuantumLevyAdaptiveHybridSearch.py b/nevergrad/optimization/lama/ImprovedQuantumLevyAdaptiveHybridSearch.py new file mode 100644 index 000000000..a6c4650a2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedQuantumLevyAdaptiveHybridSearch.py @@ -0,0 +1,158 @@ +import numpy as np + + +class ImprovedQuantumLevyAdaptiveHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.9 + 0.1 * progress + crossover_rate = 0.8 - 0.3 * progress + quantum_factor = 0.6 - 0.2 * progress + levy_factor = 0.1 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 100 # Increased population size for better diversity + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 20 # Increased local search iterations + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedQuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/ImprovedQuantumSimulatedAnnealing.py new file mode 100644 index 000000000..1bd0b4463 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedQuantumSimulatedAnnealing.py @@ -0,0 +1,44 @@ +import numpy as np + + +class ImprovedQuantumSimulatedAnnealing: + def __init__(self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1, damp_ratio=0.9): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.damp_ratio = damp_ratio + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + self.explore_ratio *= self.damp_ratio + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedRefinedAdaptiveDynamicExplorationOptimization.py b/nevergrad/optimization/lama/ImprovedRefinedAdaptiveDynamicExplorationOptimization.py new file mode 100644 index 000000000..1b4994582 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedAdaptiveDynamicExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class ImprovedRefinedAdaptiveDynamicExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 # Increased swarm size for better exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.6 # Slightly increased Cognitive constant for better local search + c2 = 1.6 # Slightly increased Social constant for better global search + w = 0.6 # Slightly decreased inertia weight for faster convergence + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.85 # Reduced momentum term to balance exploration and exploitation + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.9 # Increased differential weight for stronger mutation + CR = 0.8 # Slightly decreased crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.2 # Increased diversity threshold + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation for more frequent diversification + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor + max_exploration_cycles = 40 # Reduced max exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedRefinedAdaptiveDynamicExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedRefinedAdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/ImprovedRefinedAdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..7d1506b8d --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedAdaptiveMultiOperatorSearch.py @@ -0,0 +1,146 @@ +import numpy as np + + +class ImprovedRefinedAdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.random.randn(swarm_size, self.dim) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Cognitive constant + c2 = 2.0 # Social constant + w_max = 0.9 # Max inertia weight + w_min = 0.4 # Min inertia weight + w_decay = (w_max - w_min) / self.budget + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + w = max(w_min, w_max - w_decay * i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = ImprovedRefinedAdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..012dac411 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,166 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, mutation_factor=0.7, crossover_rate=0.9, cluster_size=5): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.cluster_size = cluster_size + self.epsilon = 1e-8 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + kmeans = KMeans(n_clusters=self.cluster_size, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for i in range(len(population)): + if np.linalg.norm(population[i] - cluster_centers[kmeans.labels_[i]]) < 1e-1: + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_params(success_rate): + if success_rate > 0.2: + new_mutation_factor = self.mutation_factor * 1.1 + new_crossover_rate = self.crossover_rate * 1.05 + else: + new_mutation_factor = self.mutation_factor * 0.9 + new_crossover_rate = self.crossover_rate * 0.95 + return np.clip(new_mutation_factor, 0.4, 1.0), np.clip(new_crossover_rate, 0.5, 1.0) + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.epsilon + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + success_count_history = [] + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + self.archive.append(self.x_opt) + + maintain_diversity(population, fitness) + success_rate = success_count / self.population_size + self.mutation_factor, self.crossover_rate = adaptive_params(success_rate) + + success_count_history.append(success_rate) + if len(success_count_history) > 10: + success_count_history.pop(0) + + avg_success_rate = np.mean(success_count_history) + + if avg_success_rate > 0.2: + self.mutation_factor *= 1.1 + self.crossover_rate *= 1.05 + else: + self.mutation_factor *= 0.9 + self.crossover_rate *= 0.95 + + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.5, 1.0) + + if len(self.archive) > 0: + archive_selection = np.random.choice(len(self.archive)) + archive_mutant = np.clip( + self.archive[archive_selection] + self.mutation_factor * np.random.randn(self.dim), + self.bounds[0], + self.bounds[1], + ) + archive_mutant = np.clip(archive_mutant, self.bounds[0], self.bounds[1]) + archive_fitness = func(archive_mutant) + evaluations += 1 + if archive_fitness < self.f_opt: + self.f_opt = archive_fitness + self.x_opt = archive_mutant + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization.py new file mode 100644 index 000000000..24338d6b5 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization.py @@ -0,0 +1,138 @@ +import numpy as np +from scipy.stats import qmc + + +class ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + self.diversity_factor = 0.1 + self.f_opt = np.Inf + self.x_opt = None + + def __call__(self, func): + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1, x2 = x.copy(), x.copy() + x1[i], x2[i] = x1[i] + h, x2[i] - h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + perturbation = np.random.uniform( + -self.diversity_factor, self.diversity_factor, self.dim + ) + if fitness[i] > fitness[j]: + population[i] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[i] = func(population[i]) + else: + population[j] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4.py b/nevergrad/optimization/lama/ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4.py new file mode 100644 index 000000000..5b87327fb --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4.py @@ -0,0 +1,189 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.9 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.25 + self.adaptive_learning_rate = 0.02 + self.strategy_switches = [0.2, 0.5, 0.8] + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < 0.1: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..dc98865b2 --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO.py @@ -0,0 +1,187 @@ +import numpy as np + + +class ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Differential weight for DE + initial_CR = 0.9 # Crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + local_search_prob = 0.3 # Probability of performing local search + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + def local_search(solution): + # Randomly perturb the solution + perturbation = np.random.normal(0, 0.1, size=self.dim) + new_solution = np.clip(solution + perturbation, bounds[0], bounds[1]) + new_fitness = func(new_solution) + return new_solution, new_fitness + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into the population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + replace_indices = np.random.choice(range(population_size), elite_size, replace=False) + new_population[replace_indices] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + # Local search phase + if np.random.rand() < local_search_prob: + best_ind = population[np.argmin(fitness)] + new_solution, new_fitness_val = local_search(best_ind) + evaluations += 1 + + if new_fitness_val < self.f_opt: + self.f_opt = new_fitness_val + self.x_opt = new_solution + last_improvement = evaluations + update_elite_memory(elite_memory, new_solution, new_fitness_val) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedSelfAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedSelfAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..e4282ce1b --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedSelfAdaptiveDifferentialEvolution.py @@ -0,0 +1,96 @@ +import numpy as np + + +class ImprovedSelfAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + base_F = 0.8 # Differential weight + base_CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, base_F) + CR_values = np.full(population_size, base_CR) + + # Adaptive parameters for diversity control + stagnation_limit = 50 + no_improvement_counter = 0 + diversity_threshold = 1e-5 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Handle stagnation by enhancing exploration + if no_improvement_counter >= stagnation_limit: + population_variance = np.var(population, axis=0) + if np.all(population_variance < diversity_threshold): + new_population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += population_size + no_improvement_counter = 0 + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedSelfAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/ImprovedSelfAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..7fbd4ac5f --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedSelfAdaptiveHybridOptimizer.py @@ -0,0 +1,132 @@ +import numpy as np + + +class ImprovedSelfAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 # Further increased population size for better exploration + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.0 # Further tuned parameters for velocity update + self.c2 = 1.0 + self.w = 0.5 # Adjusted inertia weight for better convergence + self.elite_fraction = 0.4 # Increased elite fraction for better local refinement + self.diversity_threshold = 1e-6 # Adjusted threshold for controlled diversity + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 15, self.budget - evaluations + ) # Further increased local search iterations + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.02, bounds.lb, bounds.ub + ) # Further reduced perturbation + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution.py b/nevergrad/optimization/lama/ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution.py new file mode 100644 index 000000000..76931a40a --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution: + def __init__( + self, budget=10000, pop_size=20, f_init=0.5, f_min=0.1, f_max=0.9, cr_init=0.9, cr_min=0.1, cr_max=0.9 + ): + self.budget = budget + self.pop_size = pop_size + self.f_init = f_init + self.f_min = f_min + self.f_max = f_max + self.cr_init = cr_init + self.cr_min = cr_min + self.cr_max = cr_max + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.population = np.random.uniform(func.bounds.lb, func.bounds.ub, (self.pop_size, self.dim)) + self.pop_fitness = np.array([func(x) for x in self.population]) + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_solution, best_solution, f, cr): + mutant_solution = current_solution + f * (best_solution - current_solution) + crossover_mask = np.random.rand(self.dim) < cr + trial_solution = np.where(crossover_mask, mutant_solution, current_solution) + return np.clip(trial_solution, func.bounds.lb, func.bounds.ub) + + def self_adaptive_parameter_update(self, success, f, cr, f_init, cr_init): + f_scale = 0.1 if success else -0.1 + cr_scale = 0.1 if success else -0.1 + f_new = f * (1.0 + f_scale) + cr_new = cr + cr_scale + + if f_new < 0.0 or f_new > 1.0: + f_new = f_init + if cr_new < 0.0 or cr_new > 1.0: + cr_new = cr_init + + return f_new, cr_new + + def __call__(self, func): + self.initialize_population(func) + f_current = self.f_init + cr_current = self.cr_init + + for _ in range(self.budget): + idx = np.argsort(self.pop_fitness) + best_solution = self.population[idx[0]] + + for j in range(self.pop_size): + current_solution = self.population[j] + + opponent_solution = self.opposition_based_learning(current_solution, func.bounds) + trial_solution = self.differential_evolution( + func, current_solution, best_solution, f_current, cr_current + ) + + if func(trial_solution) < func(current_solution): + self.population[j] = trial_solution + self.pop_fitness[j] = func(trial_solution) + f_current, cr_current = self.self_adaptive_parameter_update( + True, f_current, cr_current, self.f_init, self.cr_init + ) + else: + f_current, cr_current = self.self_adaptive_parameter_update( + False, f_current, cr_current, self.f_init, self.cr_init + ) + + if func(opponent_solution) < func(current_solution): + self.population[j] = opponent_solution + self.pop_fitness[j] = func(opponent_solution) + f_current, cr_current = self.self_adaptive_parameter_update( + True, f_current, cr_current, self.f_init, self.cr_init + ) + else: + f_current, cr_current = self.self_adaptive_parameter_update( + False, f_current, cr_current, self.f_init, self.cr_init + ) + + if func(trial_solution) < self.f_opt: + self.f_opt = func(trial_solution) + self.x_opt = trial_solution + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ImprovedUnifiedAdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/ImprovedUnifiedAdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..64e5d25bb --- /dev/null +++ b/nevergrad/optimization/lama/ImprovedUnifiedAdaptiveMemeticOptimizer.py @@ -0,0 +1,155 @@ +import numpy as np +from scipy.optimize import minimize + + +class ImprovedUnifiedAdaptiveMemeticOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.6 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 4 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif current_strategy == 2: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + # Memetic Algorithm with Local Search + elite_index = np.argmin(fitness) + trial = population[elite_index] + self.learning_rate * (self.rng.random(self.dim) - 0.5) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if current_strategy != 3: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.min_local_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/IncrementalCrossoverOptimization.py b/nevergrad/optimization/lama/IncrementalCrossoverOptimization.py new file mode 100644 index 000000000..722e4ccb7 --- /dev/null +++ b/nevergrad/optimization/lama/IncrementalCrossoverOptimization.py @@ -0,0 +1,72 @@ +import numpy as np + + +class IncrementalCrossoverOptimization: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize_population(self, num_individuals=50): + return np.random.uniform(self.lower_bound, self.upper_bound, (num_individuals, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness, top_k=10): + indices = np.argsort(fitness)[:top_k] + return population[indices], fitness[indices] + + def crossover(self, parents): + num_parents = len(parents) + offspring = np.empty_like(parents) + for i in range(len(parents)): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i, :cross_point] = parents[p1, :cross_point] + offspring[i, cross_point:] = parents[p2, cross_point:] + return offspring + + def mutate(self, population, scale=0.1): + perturbation = np.random.normal(0, scale, size=population.shape) + mutated = np.clip(population + perturbation, self.lower_bound, self.upper_bound) + return mutated + + def __call__(self, func): + population_size = 50 + elite_size = 10 + mutation_scale = 0.1 + + population = self.initialize_population(population_size) + best_score = float("inf") + best_solution = None + + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + # Selection + elite_population, elite_fitness = self.select_survivors(population, fitness, elite_size) + + # Crossover and Mutation + offspring = self.crossover(elite_population) + offspring = self.mutate(offspring, mutation_scale) + + # Reinsert best found solution into population to maintain good genes + offspring[-1] = best_solution.copy() + + # Update population + population = offspring + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/IntelligentDynamicDualPhaseStrategyV39.py b/nevergrad/optimization/lama/IntelligentDynamicDualPhaseStrategyV39.py new file mode 100644 index 000000000..4f271e33a --- /dev/null +++ b/nevergrad/optimization/lama/IntelligentDynamicDualPhaseStrategyV39.py @@ -0,0 +1,83 @@ +import numpy as np + + +class IntelligentDynamicDualPhaseStrategyV39: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Standard mutation strategy for phase 1 + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using more vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adjust_parameters(self, iteration, total_iterations, population): + # Dynamic parameter adjustment using a sigmoid-based function to emphasize middle-phase aggressiveness + diversity = np.std(population) + scale = iteration / total_iterations + scale = 1 / (1 + np.exp(-10 * (scale - 0.5))) # Sigmoid function for smoother transition + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale) * diversity, 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale) * diversity, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters( + iteration, switch_point if phase == 1 else self.budget - switch_point, population + ) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/IntelligentEvolvingAdaptiveStrategyV34.py b/nevergrad/optimization/lama/IntelligentEvolvingAdaptiveStrategyV34.py new file mode 100644 index 000000000..c38688d36 --- /dev/null +++ b/nevergrad/optimization/lama/IntelligentEvolvingAdaptiveStrategyV34.py @@ -0,0 +1,70 @@ +import numpy as np + + +class IntelligentEvolvingAdaptiveStrategyV34: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, iteration): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + # Adapting mutation factor with iteration-based dynamic tuning + F_dynamic = self.F * np.exp(-4 * iteration / self.budget) + mutant = ( + population[best_idx] + + F_dynamic * (population[a] - population[b]) + + 0.1 * F_dynamic * (population[c] - population[best_idx]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, iteration): + # Adaptive crossover probability based on iteration + CR_dynamic = self.CR * np.sin(np.pi * iteration / self.budget) + crossover_mask = np.random.rand(self.dimension) < CR_dynamic + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + iteration = 0 + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, iteration) + trial = self.crossover(population[i], mutant, iteration) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i], fitnesses[i] = trial, trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + iteration += 1 + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/IntelligentPerturbationSearch.py b/nevergrad/optimization/lama/IntelligentPerturbationSearch.py new file mode 100644 index 000000000..604e71ae0 --- /dev/null +++ b/nevergrad/optimization/lama/IntelligentPerturbationSearch.py @@ -0,0 +1,59 @@ +import numpy as np + + +class IntelligentPerturbationSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + # Initialize variables + self.f_opt = np.inf + self.x_opt = None + + # Start with a random point in the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update optimal solution if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale of the Gaussian perturbations + scale = 0.5 + min_scale = 0.01 + max_scale = 1.0 + + # Main optimization loop + for i in range(1, self.budget): + # Adjust scale dynamically based on iteration count + if i < self.budget // 3: + scale = max_scale + elif i < 2 * self.budget // 3: + scale = 0.5 * (max_scale + min_scale) + else: + scale = min_scale + + # Generate a new candidate by perturbing the current point + candidate = current_point + np.random.normal(0, scale, self.dim) + candidate = np.clip(candidate, -5.0, 5.0) + candidate_f = func(candidate) + + # Simple acceptance criterion: accept if the candidate is better + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + + # Update optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Intelligently adjust scale based on progress + if candidate_f < current_f: + scale = min(scale * 1.1, max_scale) # Encourage exploration + else: + scale = max(scale * 0.9, min_scale) # Encourage exploitation + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/IterativeAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/IterativeAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..2b690e535 --- /dev/null +++ b/nevergrad/optimization/lama/IterativeAdaptiveDifferentialEvolution.py @@ -0,0 +1,48 @@ +import numpy as np + + +class IterativeAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 200 # Increased population for wider exploration + self.F_min = 0.1 # Minimum differential weight + self.F_max = 0.9 # Maximum differential weight + self.CR = 0.9 # High crossover probability for more trial vectors + + def __call__(self, func): + # Initialize population uniformly between bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify the best individual + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolutionary loop over the budget + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + F_dynamic = self.F_min + (self.F_max - self.F_min) * (n_iterations - iteration) / n_iterations + + for i in range(self.pop_size): + # Select distinct indices excluding the target index i + candidates = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(candidates, 3, replace=False)] + + # Mutant vector generation with adaptive F + mutant = np.clip(a + F_dynamic * (b - c), -5.0, 5.0) + + # Crossover to generate trial vector + trial = np.where(np.random.rand(self.dim) < self.CR, mutant, pop[i]) + + # Evaluate trial vector + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/IterativeProgressiveDifferentialEvolution.py b/nevergrad/optimization/lama/IterativeProgressiveDifferentialEvolution.py new file mode 100644 index 000000000..d11a5bc23 --- /dev/null +++ b/nevergrad/optimization/lama/IterativeProgressiveDifferentialEvolution.py @@ -0,0 +1,44 @@ +import numpy as np + + +class IterativeProgressiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Population size refined for efficient search + self.F_base = 0.8 # Base differential weight + self.CR = 0.9 # Crossover probability, increased to enhance diversity + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + F_dynamic = 0.5 + (0.4 * iteration / n_iterations) # Increasing F from 0.5 to 0.9 + + for i in range(self.pop_size): + # Mutation and Crossover + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F_dynamic * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure mutant is within bounds + trial = np.where(np.random.rand(self.dim) < self.CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/LADESA.py b/nevergrad/optimization/lama/LADESA.py new file mode 100644 index 000000000..57a07d869 --- /dev/null +++ b/nevergrad/optimization/lama/LADESA.py @@ -0,0 +1,95 @@ +import numpy as np + + +class LADESA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive_size = 20 + self.mutation_scale = 0.5 # Initial mutation scale + self.crossover_prob = 0.7 # Initial crossover probability + self.elite_size = int(self.population_size * 0.1) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx, F): + mutants = np.empty_like(population) + for i in range(len(population)): + idxs = np.random.choice(np.delete(np.arange(len(population)), best_idx), 3, replace=False) + x1, x2, x3 = population[idxs] + mutant_vector = np.clip(x1 + F * (x2 - x3), self.bounds[0], self.bounds[1]) + mutants[i] = mutant_vector + return mutants + + def crossover(self, target, mutant, strategy="binomial"): + if strategy == "uniform": + cross_points = np.random.rand(self.dimension) < self.crossover_prob + else: # binomial + cross_points = np.random.rand(self.dimension) < self.crossover_prob + j_rand = np.random.randint(self.dimension) + cross_points[j_rand] = True + offspring = np.where(cross_points, mutant, target) + return offspring + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + archive = population[np.argsort(fitness)[: self.archive_size]] # Initialize archive + best_fitness = np.min(fitness) + + while evaluations < self.budget: + F = np.clip(np.random.normal(self.mutation_scale, 0.1), 0.1, 1.0) + mutants = self.mutate(population, np.argmin(fitness), F) + trials = np.array( + [ + self.crossover( + population[i], mutants[i], strategy=np.random.choice(["uniform", "binomial"]) + ) + for i in range(self.population_size) + ] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += self.population_size + + improvement_mask = fitness_trials < fitness + population[improvement_mask] = trials[improvement_mask] + fitness[improvement_mask] = fitness_trials[improvement_mask] + + # Update archive and re-introduce archived solutions + archive_fitness = self.evaluate(archive, func) + combined_population = np.vstack([population, archive]) + combined_fitness = np.hstack([fitness, archive_fitness]) + best_indices = np.argsort(combined_fitness)[: self.archive_size] + archive = combined_population[best_indices] + + if evaluations % 500 == 0: + # Re-seed to maintain diversity + reseed_indices = np.random.choice( + self.population_size, size=int(self.population_size * 0.1), replace=False + ) + population[reseed_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reseed_indices), self.dimension) + ) + fitness[reseed_indices] = self.evaluate(population[reseed_indices], func) + + best_fitness = np.min(fitness) + + # Learning adaptation of parameters based on current performance + if evaluations % 100 == 0: + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + self.mutation_scale *= 0.9 + self.crossover_prob = min(self.crossover_prob + 0.05, 1.0) + else: + self.mutation_scale = min(self.mutation_scale + 0.05, 1.0) + self.crossover_prob *= 0.95 + + return best_fitness, population[np.argmin(fitness)] diff --git a/nevergrad/optimization/lama/LAOS.py b/nevergrad/optimization/lama/LAOS.py new file mode 100644 index 000000000..986abcab6 --- /dev/null +++ b/nevergrad/optimization/lama/LAOS.py @@ -0,0 +1,55 @@ +import numpy as np +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import RBF, WhiteKernel + + +class LAOS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 30 + population = np.random.uniform(*self.bounds, (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness): + best_idx = np.argmin(fitness) + return population[best_idx], fitness[best_idx] + + def layered_search_process(self, initial_population, func): + layer_depth = 3 + current_population = initial_population + evaluations = len(current_population) + for layer in range(layer_depth): + current_fitness = self.evaluate(current_population, func) + evaluations += len(current_population) + if evaluations >= self.budget: + break + + # Learn landscape features using Gaussian Process + kernel = RBF(length_scale=1.0) + WhiteKernel(noise_level=1.0) + model = GaussianProcessRegressor(kernel=kernel) + model.fit(current_population, current_fitness) + + # Predict and refine search around best solutions + best_individuals_idx = np.argsort(current_fitness)[: max(5, len(current_fitness) // 10)] + best_individuals = current_population[best_individuals_idx] + new_population = [] + for best in best_individuals: + perturbation = np.random.normal(0, 0.1 / (layer + 1), (5, self.dimension)) + new_candidates = best + perturbation + new_population.append(new_candidates) + current_population = np.vstack(new_population) + current_population = np.clip(current_population, *self.bounds) + + return self.select_best(current_population, self.evaluate(current_population, func)) + + def __call__(self, func): + initial_population, _ = self.initialize() + best_solution, best_fitness = self.layered_search_process(initial_population, func) + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/LearningAdaptiveMemoryEnhancedStrategyV42.py b/nevergrad/optimization/lama/LearningAdaptiveMemoryEnhancedStrategyV42.py new file mode 100644 index 000000000..faafe2d4c --- /dev/null +++ b/nevergrad/optimization/lama/LearningAdaptiveMemoryEnhancedStrategyV42.py @@ -0,0 +1,82 @@ +import numpy as np + + +class LearningAdaptiveMemoryEnhancedStrategyV42: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.phase = 1 + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if self.phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Use memory to guide mutation in phase 2 + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 10: # Limit memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamically adjust the phase based on performance improvement + if iteration > total_iterations * self.switch_ratio: + self.phase = 2 + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * (iteration / total_iterations)), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * (iteration / total_iterations)), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/LearningAdaptiveStrategyV24.py b/nevergrad/optimization/lama/LearningAdaptiveStrategyV24.py new file mode 100644 index 000000000..7c302a8c2 --- /dev/null +++ b/nevergrad/optimization/lama/LearningAdaptiveStrategyV24.py @@ -0,0 +1,81 @@ +import numpy as np + + +class LearningAdaptiveStrategyV24: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase, adaptive_factors): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutation_factor = adaptive_factors["mutation"] + if phase == 1: + mutant = population[best_idx] + mutation_factor * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + mutation_factor * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, adaptive_factors): + CR_val = adaptive_factors["crossover"] + crossover_mask = np.random.rand(self.dimension) < CR_val + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def learn_parameters(self, performance_history): + # Adapt mutation and crossover rates based on performance history + avg_performance = np.mean(performance_history) + return { + "mutation": np.clip(0.5 + 0.5 * np.sin(2 * np.pi * avg_performance), 0.1, 1), + "crossover": np.clip(0.5 + 0.5 * np.cos(2 * np.pi * avg_performance), 0.1, 1), + } + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + performance_history = [] + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + adaptive_factors = {"mutation": self.F, "crossover": self.CR} + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase, adaptive_factors) + trial = self.crossover(population[i], mutant, adaptive_factors) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + performance_history.append(fitnesses[best_idx]) + adaptive_factors = self.learn_parameters(performance_history) + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/LevyEnhancedAdaptiveSimulatedAnnealingDE.py b/nevergrad/optimization/lama/LevyEnhancedAdaptiveSimulatedAnnealingDE.py new file mode 100644 index 000000000..661898c62 --- /dev/null +++ b/nevergrad/optimization/lama/LevyEnhancedAdaptiveSimulatedAnnealingDE.py @@ -0,0 +1,144 @@ +import numpy as np + + +class LevyEnhancedAdaptiveSimulatedAnnealingDE: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def levy_flight(beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.randn(self.dim) * sigma_u + v = np.random.randn(self.dim) + step = u / abs(v) ** (1 / beta) + return step + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def reinitialize_worst_individuals(population, fitness, threshold=0.1): + mean_fitness = np.mean(fitness) + for i in range(len(population)): + if fitness[i] > mean_fitness * (1 + threshold): + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_mutation_factor(generation, max_generations): + return self.mutation_factor * (1 - generation / max_generations) + + def simulated_annealing_acceptance(new_f, old_f, temperature): + if new_f < old_f: + return True + else: + acceptance_prob = np.exp((old_f - new_f) / temperature) + return np.random.rand() < acceptance_prob + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + max_generations = self.budget // self.population_size + temperature = 1.0 + + for generation in range(max_generations): + success_count = 0 + + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutation_factor = adaptive_mutation_factor(generation, max_generations) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + levy_flight() + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if simulated_annealing_acceptance(new_f, fitness[j], temperature): + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + reinitialize_worst_individuals(population, fitness) + + temperature *= 0.99 # Cool down temperature for Simulated Annealing + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + else: + self.base_lr *= 0.95 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + # Adapt crossover rate based on success rate + if success_count / self.population_size > 0.2: + self.crossover_rate *= 1.05 + else: + self.crossover_rate *= 0.95 + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = LevyEnhancedAdaptiveSimulatedAnnealingDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MADE.py b/nevergrad/optimization/lama/MADE.py new file mode 100644 index 000000000..4df9f290f --- /dev/null +++ b/nevergrad/optimization/lama/MADE.py @@ -0,0 +1,75 @@ +import numpy as np + + +class MADE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.cr = 0.9 # Initial crossover probability + self.f = 0.8 # Initial differential weight + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def select_mutation_strategy(self): + strategies = ["rand", "best", "current_to_best"] + return np.random.choice(strategies) + + def mutate(self, population, fitness): + strategy = self.select_mutation_strategy() + new_population = np.empty_like(population) + best_idx = np.argmin(fitness) + for i in range(len(population)): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3, x_best = ( + population[idxs[0]], + population[idxs[1]], + population[idxs[2]], + population[best_idx], + ) + if strategy == "rand": + mutant = x1 + self.f * (x2 - x3) + elif strategy == "best": + mutant = x_best + self.f * (x1 - x2) + elif strategy == "current_to_best": + mutant = population[i] + self.f * (x_best - population[i] + x1 - x2) + new_population[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.cr + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + + while evaluations < self.budget: + mutated_population = self.mutate(population, fitness) + offspring_population = np.array( + [self.crossover(population[i], mutated_population[i]) for i in range(self.population_size)] + ) + offspring_fitness = self.evaluate(offspring_population, func) + evaluations += self.population_size + + for i in range(self.population_size): + if offspring_fitness[i] < fitness[i]: + population[i], fitness[i] = offspring_population[i], offspring_fitness[i] + if fitness[i] < best_fitness: + best_fitness, best_solution = fitness[i], population[i] + + self.adapt_parameters() + + return best_fitness, best_solution + + def adapt_parameters(self): + self.cr = max(0.5, self.cr * 0.98) # Adaptively decrease CR + self.f = max(0.5, self.f * 0.99) # Adaptively decrease F if no improvement diff --git a/nevergrad/optimization/lama/MIDEAT.py b/nevergrad/optimization/lama/MIDEAT.py new file mode 100644 index 000000000..3b98039b1 --- /dev/null +++ b/nevergrad/optimization/lama/MIDEAT.py @@ -0,0 +1,54 @@ +import numpy as np + + +class MIDEAT: + def __init__(self, budget, population_size=30, CR=0.9, F=0.8, momentum_factor=0.5): + self.budget = budget + self.CR = CR # Crossover probability + self.F = F # Differential weight + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.momentum_factor = momentum_factor + + def __call__(self, func): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros((self.population_size, self.dimension)) + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + crossover = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover, mutant, population[i]) + + velocity[i] = self.momentum_factor * velocity[i] + (1 - self.momentum_factor) * ( + trial - population[i] + ) + trial = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/MSADE.py b/nevergrad/optimization/lama/MSADE.py new file mode 100644 index 000000000..a9a3c5d78 --- /dev/null +++ b/nevergrad/optimization/lama/MSADE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class MSADE: + def __init__( + self, budget, population_size=100, F_base=0.6, CR_base=0.7, alpha=0.1, strategy_proportion=0.2 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.alpha = alpha # Rate of adaptive adjustment + self.strategy_proportion = strategy_proportion # Proportion of population to apply secondary strategy + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + F = self.F_base + self.alpha * np.random.randn() # Add small noise for diversification + CR = self.CR_base + self.alpha * np.random.randn() + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Select mutation strategy based on strategy proportion + if np.random.rand() < self.strategy_proportion: + # DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = population[i] + F * (best_individual - population[i]) + F * (a - b) + else: + # DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/MSEAS.py b/nevergrad/optimization/lama/MSEAS.py new file mode 100644 index 000000000..9601bba2f --- /dev/null +++ b/nevergrad/optimization/lama/MSEAS.py @@ -0,0 +1,70 @@ +import numpy as np + + +class MSEAS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.num_subpopulations = 5 + self.subpopulation_size = int(self.population_size / self.num_subpopulations) + self.mutation_factor = 0.8 + self.crossover_rate = 0.7 + self.elite_size = 2 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def levy_flight(self): + return np.random.standard_cauchy(size=self.dimension) + + def adaptive_parameters(self, progress): + # Adjust mutation factor and crossover rate as the optimization progresses + self.mutation_factor = 0.8 - progress * 0.5 + self.crossover_rate = 0.7 + progress * 0.2 + + def mutate_and_crossover(self, population, func, progress): + new_population = np.copy(population) + for i in range(self.population_size): + if np.random.rand() < 0.1: # 10% chance of global search mutation + mutation = self.levy_flight() + else: + mutation = self.differential_mutation(population, i) + mutant = np.clip(population[i] + mutation, self.bounds[0], self.bounds[1]) + child = self.crossover(mutant, population[i]) + new_population[i] = child + new_fitness = self.evaluate(new_population, func) + return new_population, new_fitness + + def differential_mutation(self, population, base_idx): + idxs = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[idxs[0]], population[idxs[1]], population[idxs[2]] + return self.mutation_factor * (x2 - x3) + + def crossover(self, mutant, target): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(crossover_mask, mutant, target) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + + while evaluations < self.budget: + progress = evaluations / self.budget + self.adaptive_parameters(progress) + population, fitness = self.mutate_and_crossover(population, func, progress) + evaluations += self.population_size + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_solution = population[np.argmin(fitness)] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/MemeticAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/MemeticAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..fcb1a20d7 --- /dev/null +++ b/nevergrad/optimization/lama/MemeticAdaptiveDifferentialEvolution.py @@ -0,0 +1,89 @@ +import numpy as np + + +class MemeticAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.8 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = np.clip(self.crossover(population[i], mutant_vector, CR), self.lb, self.ub) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.5 and evaluations + 3 <= self.budget: + local_best_x, local_best_f = self.local_search(population[i], func, step_size=0.1) + evaluations += 3 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Enhanced Diversity Maintenance: Reinitialize 15% worst individuals + if evaluations + int(0.15 * population_size) <= self.budget: + worst_indices = np.argsort(fitness)[-int(0.15 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemeticDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/MemeticDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..cb3414288 --- /dev/null +++ b/nevergrad/optimization/lama/MemeticDifferentialEvolutionOptimizer.py @@ -0,0 +1,101 @@ +import numpy as np + + +class MemeticDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.dim = 5 # Dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are [-5.0, 5.0] + + def local_search(self, x, func): + step_size = 0.1 + best_x = x.copy() + best_f = func(x) + for i in range(self.dim): + x_new = x.copy() + x_new[i] += step_size * (np.random.rand() - 0.5) * 2 + x_new = np.clip(x_new, self.bounds[0], self.bounds[1]) + f_new = func(x_new) + if f_new < best_f: + best_f = f_new + best_x = x_new + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize archive to store successful mutation vectors + archive = [] + + while self.eval_count < self.budget: + new_population = [] + new_fitness = [] + for i in range(self.pop_size): + # Mutation with archive usage + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + if archive: + d = archive[np.random.randint(len(archive))] + mutant = np.clip( + a + F_values[i] * (b - c) + F_values[i] * (a - d), self.bounds[0], self.bounds[1] + ) + else: + mutant = np.clip(a + F_values[i] * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + archive.append(population[i]) + # Limit archive size + if len(archive) > self.pop_size: + archive.pop(np.random.randint(len(archive))) + # Self-adapting parameters + F_values[i] = F_values[i] * 1.1 if F_values[i] < 1 else F_values[i] + CR_values[i] = CR_values[i] * 1.1 if CR_values[i] < 1 else CR_values[i] + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + F_values[i] = F_values[i] * 0.9 if F_values[i] > 0 else F_values[i] + CR_values[i] = CR_values[i] * 0.9 if CR_values[i] > 0 else CR_values[i] + + if self.eval_count >= self.budget: + break + + # Replace the old population with the new one + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Local Search Phase: Perform local search on some individuals + for i in range(self.pop_size): + if np.random.rand() < 0.1: # Perform local search with a 10% probability + local_x, local_f = self.local_search(population[i], func) + if local_f < fitness[i]: + population[i] = local_x + fitness[i] = local_f + self.eval_count += self.dim # Assume each dimension change is a function evaluation + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemeticElitistDifferentialEvolutionWithDynamicFandCR.py b/nevergrad/optimization/lama/MemeticElitistDifferentialEvolutionWithDynamicFandCR.py new file mode 100644 index 000000000..5b7c38f6c --- /dev/null +++ b/nevergrad/optimization/lama/MemeticElitistDifferentialEvolutionWithDynamicFandCR.py @@ -0,0 +1,124 @@ +import numpy as np + + +class MemeticElitistDifferentialEvolutionWithDynamicFandCR: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + elite_fraction=0.1, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.elite_fraction = elite_fraction + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + best_x = x.copy() + best_f = func(x) + for _ in range(budget): + perturbation = np.random.uniform(-0.5, 0.5, self.dim) + candidate_x = np.clip(x + perturbation, self.bounds[0], self.bounds[1]) + candidate_f = func(candidate_x) + if candidate_f < best_f: + best_f = candidate_f + best_x = candidate_x + return best_x, best_f + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + elite_size = int(self.elite_fraction * self.pop_size) + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= global_search_budget: + break + + # Elitism: Keep the best individuals + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Update population with new solutions while preserving elite + non_elite_indices = np.argsort(fitness)[elite_size:] + for idx in non_elite_indices: + x_new = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + f_new = func(x_new) + self.eval_count += 1 + if f_new < fitness[idx]: + fitness[idx] = f_new + population[idx] = x_new + if self.eval_count >= global_search_budget: + break + + population[:elite_size] = elite_population + fitness[:elite_size] = elite_fitness + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemeticEnhancedParticleSwarmOptimization.py b/nevergrad/optimization/lama/MemeticEnhancedParticleSwarmOptimization.py new file mode 100644 index 000000000..42dfc455e --- /dev/null +++ b/nevergrad/optimization/lama/MemeticEnhancedParticleSwarmOptimization.py @@ -0,0 +1,86 @@ +import numpy as np + + +class MemeticEnhancedParticleSwarmOptimization: + def __init__(self, budget, population_size=50, w=0.5, c1=2, c2=2, local_search_budget_ratio=0.1): + self.budget = budget + self.population_size = population_size + self.w = w # inertia weight + self.c1 = c1 # cognitive coefficient + self.c2 = c2 # social coefficient + self.local_search_budget_ratio = local_search_budget_ratio + + def local_search(self, func, x, search_budget): + best_score = func(x) + best_x = np.copy(x) + dim = len(x) + + for _ in range(search_budget): + new_x = x + np.random.uniform(-0.1, 0.1, dim) + new_x = np.clip(new_x, -5.0, 5.0) + new_score = func(new_x) + if new_score < best_score: + best_score = new_score + best_x = np.copy(new_x) + + return best_x, best_score + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize the swarm + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + velocities = np.random.uniform(-1, 1, (self.population_size, dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in personal_best_positions]) + + best_idx = np.argmin(personal_best_scores) + global_best_position = personal_best_positions[best_idx] + global_best_score = personal_best_scores[best_idx] + + evaluations = self.population_size + local_search_budget = int(self.budget * self.local_search_budget_ratio) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Update velocity + r1, r2 = np.random.rand(dim), np.random.rand(dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (global_best_position - population[i]) + ) + + # Update position + population[i] = np.clip(population[i] + velocities[i], lower_bound, upper_bound) + + # Evaluate fitness + score = func(population[i]) + evaluations += 1 + + # Update personal best + if score < personal_best_scores[i]: + personal_best_scores[i] = score + personal_best_positions[i] = population[i] + + # Update global best + if score < global_best_score: + global_best_score = score + global_best_position = population[i] + + # Apply local search on global best position for further refinement + if evaluations + local_search_budget <= self.budget: + global_best_position, global_best_score = self.local_search( + func, global_best_position, local_search_budget + ) + evaluations += local_search_budget + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemeticSpatialDifferentialEvolution.py b/nevergrad/optimization/lama/MemeticSpatialDifferentialEvolution.py new file mode 100644 index 000000000..041c5abd8 --- /dev/null +++ b/nevergrad/optimization/lama/MemeticSpatialDifferentialEvolution.py @@ -0,0 +1,104 @@ +import numpy as np +from scipy.spatial import cKDTree + + +class MemeticSpatialDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, r1, r2, r3, F): + mutant = r1 + F * (r2 - r3) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, step_size, max_iter=5): + best_x = x + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x, best_f = new_x, new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 * (1 + np.random.rand()) + CR = 0.9 - 0.8 * (iteration / max_iterations) + return F, CR + + def spatial_guided_search(self, population, fitness, func): + kdtree = cKDTree(population) + for i in range(len(population)): + if np.random.rand() < 0.5: + _, neighbors = kdtree.query(population[i], k=4) + neighbors = neighbors[1:] + centroid = np.mean(population[neighbors], axis=0) + perturbation = np.random.uniform(-0.1, 0.1, self.dim) + candidate = np.clip(centroid + perturbation, self.lb, self.ub) + candidate_fitness = func(candidate) + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant_vector = self.differential_mutation(population[i], a, b, c, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search with mixed strategies + local_best_x, local_best_f = self.local_search( + population[i], func, step_size=0.01, max_iter=3 + ) + evaluations += 1 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Spatial Guided Search + if evaluations + population_size <= self.budget: + population, fitness = self.spatial_guided_search(population, fitness, func) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemoryBasedSimulatedAnnealing.py b/nevergrad/optimization/lama/MemoryBasedSimulatedAnnealing.py new file mode 100644 index 000000000..d59773eac --- /dev/null +++ b/nevergrad/optimization/lama/MemoryBasedSimulatedAnnealing.py @@ -0,0 +1,54 @@ +import numpy as np + + +class MemoryBasedSimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.99 # Cooling rate + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + x_candidate = memory[i] + T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp((f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemoryEnhancedAdaptiveAnnealing.py b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveAnnealing.py new file mode 100644 index 000000000..a60583631 --- /dev/null +++ b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveAnnealing.py @@ -0,0 +1,107 @@ +import numpy as np + + +class MemoryEnhancedAdaptiveAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Memory Enrichment + if evaluations % (memory_size * 5) == 0: + self._enhance_memory(func, memory, memory_scores, evaluations) + + return self.f_opt, self.x_opt + + def _enhance_memory(self, func, memory, memory_scores, evaluations): + # Enhancing memory by local optimization around best memory points + for i in range(len(memory)): + local_T = 0.1 # Low disturbance for local search + x_local = memory[i] + f_local = memory_scores[i] + for _ in range(5): # Local search iterations + x_candidate = x_local + local_T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if evaluations >= self.budget: + break + + if f_candidate < f_local: + x_local = x_candidate + f_local = f_candidate + + memory[i] = x_local + memory_scores[i] = f_local diff --git a/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealing.py b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealing.py new file mode 100644 index 000000000..907f0462a --- /dev/null +++ b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealing.py @@ -0,0 +1,107 @@ +import numpy as np + + +class MemoryEnhancedAdaptiveMultiPhaseAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate, balanced cooling + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 10 # Moderate memory size for diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Memory Enrichment + if evaluations % (memory_size * 5) == 0: + self._enhance_memory(func, memory, memory_scores, evaluations) + + return self.f_opt, self.x_opt + + def _enhance_memory(self, func, memory, memory_scores, evaluations): + # Enhancing memory by local optimization around best memory points + for i in range(len(memory)): + local_T = 0.1 # Low disturbance for local search + x_local = memory[i] + f_local = memory_scores[i] + for _ in range(5): # Local search iterations + x_candidate = x_local + local_T * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if evaluations >= self.budget: + break + + if f_candidate < f_local: + x_local = x_candidate + f_local = f_candidate + + memory[i] = x_local + memory_scores[i] = f_local diff --git a/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient.py b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient.py new file mode 100644 index 000000000..ce2bfc4a5 --- /dev/null +++ b/nevergrad/optimization/lama/MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient.py @@ -0,0 +1,104 @@ +import numpy as np + + +class MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha = 0.97 # Cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + local_search_iters = 5 # Number of gradient-based local search iterations + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 # Increased memory size for more diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for i in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[i] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + # Gradient-based local refinement of the best memory solution + x_best_memory = memory[np.argmin(memory_scores)] + for _ in range(local_search_iters): + gradient = self._approximate_gradient(func, x_best_memory) + x_best_memory -= 0.01 * gradient # Gradient descent step + x_best_memory = np.clip(x_best_memory, func.bounds.lb, func.bounds.ub) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + T *= alpha + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + return self.f_opt, self.x_opt + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad diff --git a/nevergrad/optimization/lama/MemoryEnhancedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/MemoryEnhancedDynamicHybridOptimizer.py new file mode 100644 index 000000000..c7e2da3f8 --- /dev/null +++ b/nevergrad/optimization/lama/MemoryEnhancedDynamicHybridOptimizer.py @@ -0,0 +1,158 @@ +import numpy as np +from scipy.optimize import minimize + + +class MemoryEnhancedDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MemoryGuidedAdaptiveDualPhaseStrategyV40.py b/nevergrad/optimization/lama/MemoryGuidedAdaptiveDualPhaseStrategyV40.py new file mode 100644 index 000000000..86a17c8c6 --- /dev/null +++ b/nevergrad/optimization/lama/MemoryGuidedAdaptiveDualPhaseStrategyV40.py @@ -0,0 +1,84 @@ +import numpy as np + + +class MemoryGuidedAdaptiveDualPhaseStrategyV40: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Use memory to guide mutation in phase 2 + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + # Update memory with successful mutations + self.memory.append(trial - target) + if len(self.memory) > 10: # Limit memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + scale = 1 / (1 + np.exp(-10 * (scale - 0.5))) # Sigmoid function + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/MemoryHybridAdaptiveDE.py b/nevergrad/optimization/lama/MemoryHybridAdaptiveDE.py new file mode 100644 index 000000000..a6eb61d7b --- /dev/null +++ b/nevergrad/optimization/lama/MemoryHybridAdaptiveDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class MemoryHybridAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + F_min, F_max = 0.1, 0.9 + CR_min, CR_max = 0.1, 0.9 + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = F_min + (F_max - F_min) * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = CR_min + (CR_max - CR_min) * np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def elitism(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[:population_size] + return combined_population[elite_indices], combined_fitness[elite_indices] + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + memory = [] + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + if evaluations % 50 == 0: + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + if memory and np.random.rand() < 0.2: + historical_best = memory[np.random.randint(len(memory))] + mutant = historical_best + F_values[i] * (population[i] - historical_best) + else: + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + memory.append(trial) + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = elitism(population, fitness, new_population, new_fitness) + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MetaDynamicPrecisionOptimizerV1.py b/nevergrad/optimization/lama/MetaDynamicPrecisionOptimizerV1.py new file mode 100644 index 000000000..76cb6a9b1 --- /dev/null +++ b/nevergrad/optimization/lama/MetaDynamicPrecisionOptimizerV1.py @@ -0,0 +1,58 @@ +import numpy as np + + +class MetaDynamicPrecisionOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and annealing parameters + T = 1.20 # Higher initial temperature to encourage initial exploration + T_min = 0.0001 # Very low minimum temperature to allow thorough late-stage exploitation + alpha = 0.95 # Slower cooling rate to extend the effective search phase + + # Refined mutation and crossover parameters for dynamic adaptability + F = 0.8 # Slightly increased mutation factor for robust exploration + CR = 0.85 # Crossover probability fine-tuned for better genetic diversity + + population_size = 90 # Optimized population size for more effective search + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Adaptive mutation strategy with enhanced dynamic control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.75 + 0.25 * np.sin(5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criteria with a finer temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.04 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced cooling strategy including sinusoidal modulation + adaptive_cooling = alpha - 0.006 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/MetaDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/MetaDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..acbac7275 --- /dev/null +++ b/nevergrad/optimization/lama/MetaDynamicQuantumSwarmOptimization.py @@ -0,0 +1,92 @@ +import numpy as np + + +class MetaDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + max_inertia_weight=0.9, + min_inertia_weight=0.4, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.8, + min_social_weight=1.2, + beta=0.9, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.beta = beta + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + self.inertia_weight = self.max_inertia_weight - self.beta * (iteration / self.budget) * ( + self.max_inertia_weight - self.min_inertia_weight + ) + self.cognitive_weight = self.max_cognitive_weight - self.beta * (iteration / self.budget) * ( + self.max_cognitive_weight - self.min_cognitive_weight + ) + self.social_weight = self.min_social_weight + self.beta * (iteration / self.budget) * ( + self.max_social_weight - self.min_social_weight + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + def adjust_search_space(self, func): + for i in range(self.dim): + lower_bound = min(self.particles[:, i].min(), self.search_space[0]) + upper_bound = max(self.particles[:, i].max(), self.search_space[1]) + self.search_space = (lower_bound, upper_bound) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + self.adjust_search_space(func) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/MetaHarmonicSearch.py b/nevergrad/optimization/lama/MetaHarmonicSearch.py new file mode 100644 index 000000000..bbafa35ec --- /dev/null +++ b/nevergrad/optimization/lama/MetaHarmonicSearch.py @@ -0,0 +1,47 @@ +import numpy as np + + +class MetaHarmonicSearch: + def __init__( + self, budget=1000, num_agents=20, num_dimensions=5, harmony_memory_rate=0.7, pitch_adjust_rate=0.5 + ): + self.budget = budget + self.num_agents = num_agents + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + + def initialize_agents(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_agents, self.num_dimensions)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_agents(bounds) + best_harmony = harmony_memory[0].copy() + + for _ in range(self.budget): + for i in range(self.num_agents): + new_harmony = np.zeros(self.num_dimensions) + for d in range(self.num_dimensions): + if np.random.rand() < self.harmony_memory_rate: + new_harmony[d] = harmony_memory[np.random.randint(self.num_agents)][d] + else: + new_harmony[d] = np.random.uniform(bounds.lb[d], bounds.ub[d]) + + if np.random.rand() < self.pitch_adjust_rate: + new_harmony[d] += np.random.uniform(-1, 1) * (bounds.ub[d] - bounds.lb[d]) + new_harmony[d] = np.clip(new_harmony[d], bounds.lb[d], bounds.ub[d]) + + f_new = func(new_harmony) + if f_new < func(harmony_memory[i]): + harmony_memory[i] = new_harmony.copy() + if f_new < func(best_harmony): + best_harmony = new_harmony.copy() + + self.f_opt = func(best_harmony) + self.x_opt = best_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MetaHarmonicSearch2.py b/nevergrad/optimization/lama/MetaHarmonicSearch2.py new file mode 100644 index 000000000..008384c59 --- /dev/null +++ b/nevergrad/optimization/lama/MetaHarmonicSearch2.py @@ -0,0 +1,54 @@ +import numpy as np + + +class MetaHarmonicSearch2: + def __init__( + self, + budget=1000, + num_agents=20, + num_dimensions=5, + harmony_memory_rate=0.7, + pitch_adjust_rate=0.5, + bandwidth=0.1, + ): + self.budget = budget + self.num_agents = num_agents + self.num_dimensions = num_dimensions + self.harmony_memory_rate = harmony_memory_rate + self.pitch_adjust_rate = pitch_adjust_rate + self.bandwidth = bandwidth + + def initialize_agents(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_agents, self.num_dimensions)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_agents(bounds) + best_harmony = harmony_memory[0].copy() + + for _ in range(self.budget): + for i in range(self.num_agents): + new_harmony = np.zeros(self.num_dimensions) + for d in range(self.num_dimensions): + if np.random.rand() < self.harmony_memory_rate: + new_harmony[d] = harmony_memory[np.random.randint(self.num_agents)][d] + else: + new_harmony[d] = np.random.uniform(bounds.lb[d], bounds.ub[d]) + + if np.random.rand() < self.pitch_adjust_rate: + new_harmony[d] += self.bandwidth * np.random.randn() + new_harmony[d] = np.clip(new_harmony[d], bounds.lb[d], bounds.ub[d]) + + f_new = func(new_harmony) + if f_new < func(harmony_memory[i]): + harmony_memory[i] = new_harmony.copy() + if f_new < func(best_harmony): + best_harmony = new_harmony.copy() + + self.f_opt = func(best_harmony) + self.x_opt = best_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MetaNetAQAPSO.py b/nevergrad/optimization/lama/MetaNetAQAPSO.py new file mode 100644 index 000000000..30ea8e9b6 --- /dev/null +++ b/nevergrad/optimization/lama/MetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class MetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 600 + self.meta_net_lr = 0.1 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MomentumGradientExploration.py b/nevergrad/optimization/lama/MomentumGradientExploration.py new file mode 100644 index 000000000..d77726d42 --- /dev/null +++ b/nevergrad/optimization/lama/MomentumGradientExploration.py @@ -0,0 +1,70 @@ +import numpy as np + + +class MomentumGradientExploration: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.learning_rate = 0.1 + self.epsilon = 1e-8 + self.exploration_prob = 0.3 # Increased exploration probability + self.momentum_factor = 0.9 # Momentum factor for gradient-based updates + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + momentum = np.zeros(self.dim) + + for i in range(1, self.budget): + if np.random.rand() < self.exploration_prob: + # Perform random exploration + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + else: + # Perform gradient-based exploitation with momentum + grad = gradient_estimate(x) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + momentum = self.momentum_factor * momentum + adapt_lr * grad + perturbation = np.random.randn(self.dim) * adapt_lr # Random perturbation + + new_x = x - momentum + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + x = new_x + else: + x = random_vector() # Restart exploration from random point + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = MomentumGradientExploration(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MultiFacetAdaptiveSearch.py b/nevergrad/optimization/lama/MultiFacetAdaptiveSearch.py new file mode 100644 index 000000000..0632ecdeb --- /dev/null +++ b/nevergrad/optimization/lama/MultiFacetAdaptiveSearch.py @@ -0,0 +1,68 @@ +import numpy as np + + +class MultiFacetAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialize solution and function value tracking + self.f_opt = np.Inf + self.x_opt = None + + # Dynamic population scaling and random restarts + initial_population_size = 50 + population_size = initial_population_size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Main optimization loop + evaluations_used = population_size + while evaluations_used < self.budget: + # Adaptive mutation based on remaining budget + remaining_budget = self.budget - evaluations_used + mutation_scale = 0.1 * (remaining_budget / self.budget) + 0.02 + + # Evolve population + for i in range(population_size): + perturbation = np.random.normal(0, mutation_scale, self.dim) + candidate = population[i] + perturbation + candidate = np.clip(candidate, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations_used += 1 + + # Accept if better or with a probability decreasing over time + if candidate_fitness < fitness[i] or np.random.rand() < ( + 0.5 * remaining_budget / self.budget + ): + population[i] = candidate + fitness[i] = candidate_fitness + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate.copy() + + # Random restart mechanism + if evaluations_used + population_size <= self.budget: + if np.random.rand() < 0.1: # 10% chance of random restart + new_individuals = np.random.uniform(self.lb, self.ub, (population_size // 2, self.dim)) + new_fitness = np.array([func(individual) for individual in new_individuals]) + # Replace the worst half of the population + worst_half_indices = np.argsort(fitness)[-population_size // 2 :] + population[worst_half_indices] = new_individuals + fitness[worst_half_indices] = new_fitness + evaluations_used += population_size // 2 + + return self.f_opt, self.x_opt + + +# Example of usage (requires a function `func` and bounds to run): +# optimizer = MultiFacetAdaptiveSearch(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/MultiFocalAdaptiveOptimizer.py b/nevergrad/optimization/lama/MultiFocalAdaptiveOptimizer.py new file mode 100644 index 000000000..46dbca830 --- /dev/null +++ b/nevergrad/optimization/lama/MultiFocalAdaptiveOptimizer.py @@ -0,0 +1,64 @@ +import numpy as np + + +class MultiFocalAdaptiveOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=100): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_decay = 0.99 # Slow decay rate for global exploration + self.local_decay = 0.95 # Faster decay rate for local exploration + self.initial_velocity_scale = 0.1 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.random.randn(self.particles, self.dimension) * self.initial_velocity_scale + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + # Randomly decide whether to move towards global best or explore + if np.random.rand() > 0.5: + velocities[i] += (best_global_position - positions[i]) * np.random.rand() + else: + velocities[i] += np.random.normal(0, 1, self.dimension) * self.initial_velocity_scale + + # Update position + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + # Evaluate new position + new_fitness = func(positions[i]) + evaluations += 1 + + # Update personal and global bests + if new_fitness < fitness[i]: + fitness[i] = new_fitness + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + # Decaying velocity magnitudes over iterations to increase exploitation + velocities[i] *= self.global_decay + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/MultiLayeredAdaptiveCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/MultiLayeredAdaptiveCovarianceMatrixEvolution.py new file mode 100644 index 000000000..1332a7d4c --- /dev/null +++ b/nevergrad/optimization/lama/MultiLayeredAdaptiveCovarianceMatrixEvolution.py @@ -0,0 +1,150 @@ +import numpy as np + + +class MultiLayeredAdaptiveCovarianceMatrixEvolution: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.5, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + learning_rate=0.001, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + self.learning_rate = learning_rate # learning rate for gradient-based local search + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __gradient_local_search(self, func, x): + eps = 1e-8 + grad = np.zeros_like(x) + fx = func(x) + + for i in range(len(x)): + x_eps = np.copy(x) + x_eps[i] += eps + grad[i] = (func(x_eps) - fx) / eps + + return x - self.learning_rate * grad + + def __hierarchical_selection(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + + diverse_count = int(self.population_size * (1 - self.elite_fraction)) + diverse_idx = np.argsort(scores)[-diverse_count:] + diverse_pop = pop[diverse_idx] + + return elite_pop, diverse_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation(func, pop, mean, C, sigma) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Apply gradient-based local search to elite individuals in the population + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + for i in range(len(elite_pop)): + elite_pop[i] = self.__gradient_local_search(func, elite_pop[i]) + if func(elite_pop[i]) < scores[np.argsort(scores)[: len(elite_pop)][i]]: + scores[np.argsort(scores)[: len(elite_pop)][i]] = func(elite_pop[i]) + + # Update global best after local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Hierarchical selection + elite_pop, diverse_pop = self.__hierarchical_selection(pop, scores) + + # Update mean, covariance matrix, and sigma + mean_new = np.mean(elite_pop, axis=0) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_pop.shape[0] + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiModalMemoryEnhancedHybridOptimizer.py b/nevergrad/optimization/lama/MultiModalMemoryEnhancedHybridOptimizer.py new file mode 100644 index 000000000..e37fc38eb --- /dev/null +++ b/nevergrad/optimization/lama/MultiModalMemoryEnhancedHybridOptimizer.py @@ -0,0 +1,201 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import pdist, squareform + + +class MultiModalMemoryEnhancedHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + restart_threshold=0.001, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.diversity_threshold = diversity_threshold + self.restart_threshold = restart_threshold + self.global_best_history = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def population_diversity(self, population): + if len(population) < 2: + return 0.0 + distances = pdist(population) + return np.mean(distances) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + if len(idxs) < 3: + continue # Skip mutation if less than 3 distinct individuals + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Check population diversity and restart if necessary + if self.population_diversity(population) < self.diversity_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Restart mechanism based on stagnation + if len(self.global_best_history) > 10: + if np.std(self.global_best_history[-10:]) < self.restart_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66.py b/nevergrad/optimization/lama/MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66.py new file mode 100644 index 000000000..563c1fde7 --- /dev/null +++ b/nevergrad/optimization/lama/MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66.py @@ -0,0 +1,105 @@ +import numpy as np + + +class MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67.py b/nevergrad/optimization/lama/MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67.py new file mode 100644 index 000000000..afa787afe --- /dev/null +++ b/nevergrad/optimization/lama/MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67.py @@ -0,0 +1,105 @@ +import numpy as np + + +class MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_enhanced_guided_mass(self, agents, fitness_values, masses, func): + for i in range(self.num_agents): + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position(agents[i], force + guide_force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_enhanced_guided_mass(agents, fitness_values, masses, func) + self._adaptive_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiOperatorSearch.py b/nevergrad/optimization/lama/MultiOperatorSearch.py new file mode 100644 index 000000000..ff57c1e85 --- /dev/null +++ b/nevergrad/optimization/lama/MultiOperatorSearch.py @@ -0,0 +1,117 @@ +import numpy as np + + +class MultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 15 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiPhaseAdaptiveDE.py b/nevergrad/optimization/lama/MultiPhaseAdaptiveDE.py new file mode 100644 index 000000000..e601be242 --- /dev/null +++ b/nevergrad/optimization/lama/MultiPhaseAdaptiveDE.py @@ -0,0 +1,137 @@ +import numpy as np + + +class MultiPhaseAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + # Phase-based Reset Strategy + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on diversity and stagnation phase + phase = (evaluations // stagnation_threshold) % 3 + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + F = 0.8 # Reset F to default + + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiPhaseAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/MultiPhaseAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..4962a345b --- /dev/null +++ b/nevergrad/optimization/lama/MultiPhaseAdaptiveDifferentialEvolution.py @@ -0,0 +1,146 @@ +import numpy as np +from scipy.stats import qmc + + +class MultiPhaseAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.phase_change_iterations = self.budget // 3 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + threshold = 1e-3 + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + crossover_rate = self.crossover_rate + mutation_factor = self.mutation_factor + + while evaluations < self.budget: + success_count = 0 + + if evaluations < self.phase_change_iterations: + elite_count = int(0.1 * self.population_size) + else: + elite_count = int(0.2 * self.population_size) + + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or ( + evaluations >= self.phase_change_iterations + and np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand() + ): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate / (1 + 0.01 * evaluations) + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + crossover_rate *= 1.05 + mutation_factor = min(1.0, mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + crossover_rate *= 0.95 + mutation_factor = max(0.5, mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + crossover_rate = np.clip(crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = MultiPhaseAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MultiPhaseAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/MultiPhaseAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..c615bde8a --- /dev/null +++ b/nevergrad/optimization/lama/MultiPhaseAdaptiveExplorationOptimization.py @@ -0,0 +1,115 @@ +import numpy as np + + +class MultiPhaseAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters + self.initial_population_size = 500 # Initial population size + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.local_search_chance = 0.3 # Probability of performing local search + self.elite_ratio = 0.1 # Ratio of elite members to retain + self.diversity_threshold = 0.1 # Threshold for population diversity + self.cauchy_step_scale = 0.03 # Scale for Cauchy distribution steps + self.gaussian_step_scale = 0.01 # Scale for Gaussian distribution steps + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + evaluations = self.initial_population_size + + while evaluations < self.budget: + # Sort population based on fitness + sorted_indices = np.argsort(fitness) + elite_size = int(self.elite_ratio * len(population)) + elite_population = population[sorted_indices[:elite_size]] + + new_population = [] + for i in range(len(population)): + if np.random.rand() < self.local_search_chance: + candidate = self.local_search(population[i], func) + else: + # Mutation step + idxs = np.random.choice(len(population), 3, replace=False) + a, b, c = population[idxs] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover step + crossover = np.random.rand(self.dim) < self.CR + candidate = np.where(crossover, mutant, population[i]) + + # Selection step + f_candidate = func(candidate) + evaluations += 1 + if f_candidate < fitness[i]: + new_population.append(candidate) + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + else: + new_population.append(population[i]) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Add elite back to population + population = np.vstack((population, elite_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += elite_size + + # Adaptive control of parameters based on population diversity + self.adaptive_population_reinitialization(population, evaluations) + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + for _ in range(30): # Adjusted iterations for local search + step_size_cauchy = np.random.standard_cauchy(self.dim) * self.cauchy_step_scale + step_size_gaussian = np.random.normal(0, self.gaussian_step_scale, size=self.dim) + + x_new_cauchy = np.clip(best_x + step_size_cauchy, self.lb, self.ub) + x_new_gaussian = np.clip(best_x + step_size_gaussian, self.lb, self.ub) + + f_new_cauchy = func(x_new_cauchy) + f_new_gaussian = func(x_new_gaussian) + + if f_new_cauchy < best_f: + best_x = x_new_cauchy + best_f = f_new_cauchy + elif f_new_gaussian < best_f: + best_x = x_new_gaussian + best_f = f_new_gaussian + + return best_x + + def adaptive_population_reinitialization(self, population, evaluations): + # Calculate population diversity + diversity = np.mean(np.std(population, axis=0)) + + if diversity < self.diversity_threshold: + # Increase population diversity by re-initializing some individuals + num_reinit = int(0.2 * len(population)) + reinit_indices = np.random.choice(len(population), num_reinit, replace=False) + + for idx in reinit_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Adaptive local search chance based on remaining budget + remaining_budget_ratio = (self.budget - evaluations) / self.budget + self.local_search_chance = max(0.1, self.local_search_chance * remaining_budget_ratio) diff --git a/nevergrad/optimization/lama/MultiPhaseAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/MultiPhaseAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..4c101d11c --- /dev/null +++ b/nevergrad/optimization/lama/MultiPhaseAdaptiveHybridDEPSO.py @@ -0,0 +1,187 @@ +import numpy as np + + +class MultiPhaseAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Differential weight for DE + initial_CR = 0.9 # Crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + local_search_prob = 0.3 # Probability of performing local search + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + def local_search(solution): + # Randomly perturb the solution + perturbation = np.random.normal(0, 0.1, size=self.dim) + new_solution = np.clip(solution + perturbation, bounds[0], bounds[1]) + new_fitness = func(new_solution) + return new_solution, new_fitness + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into the population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + replace_indices = np.random.choice(range(population_size), elite_size, replace=False) + new_population[replace_indices] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + # Local search phase + if np.random.rand() < local_search_prob: + best_ind = population[np.argmin(fitness)] + new_solution, new_fitness_val = local_search(best_ind) + evaluations += 1 + + if new_fitness_val < self.f_opt: + self.f_opt = new_fitness_val + self.x_opt = new_solution + last_improvement = evaluations + update_elite_memory(elite_memory, new_solution, new_fitness_val) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiPhaseDiversityAdaptiveDE.py b/nevergrad/optimization/lama/MultiPhaseDiversityAdaptiveDE.py new file mode 100644 index 000000000..05f780210 --- /dev/null +++ b/nevergrad/optimization/lama/MultiPhaseDiversityAdaptiveDE.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.optimize import minimize + + +class MultiPhaseDiversityAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 20 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + # Restart the population if stagnation is detected + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + print(f"Restarting at generation {generation} due to stagnation.") + + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution with multi-phase mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Multi-phase mutation + if generation % 2 == 0: + mutant = x1 + mutation_factor * (x2 - x3) + else: + mutant = ( + x1 + + mutation_factor * (x2 - x3) + + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x1) + ) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Gradient-based adjustment + if self.budget > 0: + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + new_x = best_x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/MultiPopulationAdaptiveMemorySearch.py b/nevergrad/optimization/lama/MultiPopulationAdaptiveMemorySearch.py new file mode 100644 index 000000000..4215ddea2 --- /dev/null +++ b/nevergrad/optimization/lama/MultiPopulationAdaptiveMemorySearch.py @@ -0,0 +1,158 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.cluster import KMeans + + +class MultiPopulationAdaptiveMemorySearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.num_sub_populations = 5 + self.F = 0.8 + self.CR = 0.9 + + # PSO Parameters + self.inertia_weight = 0.9 + self.cognitive_constant = 2.0 + self.social_constant = 2.0 + + # Memory Mechanism + self.memory_size = 20 + self.memory = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + + def _nelder_mead_local_search(self, x, func): + res = minimize(func, x, method="nelder-mead", options={"xtol": 1e-8, "disp": False}) + return res.x, res.fun + + def _adaptive_parameter_adjustment(self): + self.F = np.random.uniform(0.4, 1.0) + self.CR = np.random.uniform(0.1, 1.0) + self.inertia_weight = np.random.uniform(0.4, 0.9) + + def _cluster_based_search(self, population, fitness, func): + if len(population) > 10: + kmeans = KMeans(n_clusters=10).fit(population) + cluster_centers = kmeans.cluster_centers_ + for center in cluster_centers: + local_candidate, f_local_candidate = self._nelder_mead_local_search(center, func) + self.evaluations += 1 + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + + def _memory_based_search(self, func): + if len(self.memory) > 1: + for mem in self.memory: + local_candidate, f_local_candidate = self._nelder_mead_local_search(mem, func) + self.evaluations += 1 + if f_local_candidate < self.f_opt: + self.f_opt = f_local_candidate + self.x_opt = local_candidate + + def __call__(self, func): + # Initialize population + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + personal_best_positions = population.copy() + personal_best_fitness = fitness.copy() + + self.evaluations = self.population_size + + while self.evaluations < self.budget: + # Divide population into subpopulations + sub_pop_size = self.population_size // self.num_sub_populations + sub_populations = [ + population[i * sub_pop_size : (i + 1) * sub_pop_size] for i in range(self.num_sub_populations) + ] + sub_fitness = [ + fitness[i * sub_pop_size : (i + 1) * sub_pop_size] for i in range(self.num_sub_populations) + ] + + # Perform DE in subpopulations + for sub_pop, sub_fit in zip(sub_populations, sub_fitness): + for i in range(len(sub_pop)): + # Select three random vectors a, b, c from subpopulation + indices = [idx for idx in range(len(sub_pop)) if idx != i] + a, b, c = sub_pop[np.random.choice(indices, 3, replace=False)] + + # Mutation and Crossover + mutant_vector = np.clip(a + self.F * (b - c), self.lb, self.ub) + trial_vector = np.copy(sub_pop[i]) + for j in range(self.dim): + if np.random.rand() < self.CR: + trial_vector[j] = mutant_vector[j] + + f_candidate = func(trial_vector) + self.evaluations += 1 + + if f_candidate < sub_fit[i]: + sub_pop[i] = trial_vector + sub_fit[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + + if self.evaluations >= self.budget: + break + + # Recombine subpopulations + population = np.vstack(sub_populations) + fitness = np.hstack(sub_fitness) + + # PSO component + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities = ( + self.inertia_weight * velocities + + self.cognitive_constant * r1 * (personal_best_positions - population) + + self.social_constant * r2 * (self.x_opt - population) + ) + population = np.clip(population + velocities, self.lb, self.ub) + + # Evaluate new population + for i in range(self.population_size): + f_candidate = func(population[i]) + self.evaluations += 1 + + if f_candidate < fitness[i]: + fitness[i] = f_candidate + personal_best_positions[i] = population[i] + personal_best_fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = population[i] + + if self.evaluations >= self.budget: + break + + # Memory mechanism + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + # Adaptive Parameter Adjustment + self._adaptive_parameter_adjustment() + + # Cluster-Based Enhanced Local Search + self._cluster_based_search(population, fitness, func) + + # Memory-Based Search + self._memory_based_search(func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiScaleAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/MultiScaleAdaptiveHybridOptimization.py new file mode 100644 index 000000000..2d3814d25 --- /dev/null +++ b/nevergrad/optimization/lama/MultiScaleAdaptiveHybridOptimization.py @@ -0,0 +1,115 @@ +import numpy as np + + +class MultiScaleAdaptiveHybridOptimization: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.crossover_rate = 0.5 + 0.3 * (1 - np.cos(np.pi * iteration / max_iterations)) + self.learning_rate = 0.01 * np.exp(-iteration / max_iterations) + + def multiscale_sampling(self, pop, scale): + new_pop = np.copy(pop) + for i in range(self.population_size): + perturbation = np.random.normal(0, scale, size=pop[i].shape) + new_pop[i] = np.clip(pop[i] + perturbation, -5.0, 5.0) + return new_pop + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = (self.budget // self.population_size) * 2 + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + scale = 1.0 - (iteration / max_iterations) # Multiscale sampling rate + + # Apply multiscale sampling + pop = self.multiscale_sampling(pop, scale) + scores = np.array([func(ind) for ind in pop]) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step + pop, scores = self.local_search(func, pop, scores) + evaluations += self.population_size + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiScaleGradientExploration.py b/nevergrad/optimization/lama/MultiScaleGradientExploration.py new file mode 100644 index 000000000..6e1fc6a7d --- /dev/null +++ b/nevergrad/optimization/lama/MultiScaleGradientExploration.py @@ -0,0 +1,69 @@ +import numpy as np + + +class MultiScaleGradientExploration: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.learning_rate = 0.1 + self.epsilon = 1e-8 + self.exploration_prob = 0.3 # Increase exploration probability to 30% + self.scaling_factors = [0.1, 0.5, 1.0] # Multi-scale perturbation factors + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + for i in range(1, self.budget): + if np.random.rand() < self.exploration_prob: + # Perform random exploration + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + else: + # Perform gradient-based exploitation with multi-scale stochastic perturbation + grad = gradient_estimate(x) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + perturbation = ( + sum(np.random.randn(self.dim) * scale for scale in self.scaling_factors) * adapt_lr + ) # Multi-scale perturbation + + new_x = x - adapt_lr * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + x = new_x + else: + x = random_vector() # Restart exploration from random point + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = MultiScaleGradientExploration(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MultiScaleGradientSearch.py b/nevergrad/optimization/lama/MultiScaleGradientSearch.py new file mode 100644 index 000000000..f2ef84cac --- /dev/null +++ b/nevergrad/optimization/lama/MultiScaleGradientSearch.py @@ -0,0 +1,79 @@ +import numpy as np + + +class MultiScaleGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(len(x)): + x_step = np.array(x) + x_step[i] += epsilon + grad[i] = (func(x_step) - fx) / epsilon + return grad + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + self.alpha = 0.1 # Initial step size + self.beta = 0.5 # Contraction factor + self.gamma = 1.5 # Expansion factor + self.delta = 1e-5 # Small perturbation for escaping local optima + + x = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f = func(x) + evaluations = 1 + + # Multi-scale search parameters + scales = [1.0, 0.5, 0.1] + + while evaluations < self.budget: + for scale in scales: + # Approximate the gradient + grad = self.approximate_gradient(func, x, epsilon=scale * 1e-8) + direction = grad / (np.linalg.norm(grad) + 1e-8) # Normalize direction vector + + # Try expanding + x_new = x - self.gamma * self.alpha * scale * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.gamma + else: + # Try contracting + x_new = x - self.beta * self.alpha * scale * direction + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + self.alpha *= self.beta + else: + # Apply small perturbation to avoid getting stuck + x_new = x + self.delta * scale * np.random.randn(self.dim) + x_new = np.clip(x_new, func.bounds.lb, func.bounds.ub) + f_new = func(x_new) + evaluations += 1 + + if f_new < f: + x = x_new + f = f_new + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiScaleQuadraticSearch.py b/nevergrad/optimization/lama/MultiScaleQuadraticSearch.py new file mode 100644 index 000000000..74a6ea04a --- /dev/null +++ b/nevergrad/optimization/lama/MultiScaleQuadraticSearch.py @@ -0,0 +1,78 @@ +import numpy as np + + +class MultiScaleQuadraticSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # Dimensionality of the BBOB test suite + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial random position + current_position = np.random.uniform(self.lower_bound, self.upper_bound, self.dimension) + current_fitness = func(current_position) + self.update_optimum(current_position, current_fitness) + + # Control parameters + delta = 0.5 # Initial step size + alpha = 0.5 # Reduction factor for step size + beta = 0.3 # Momentum term + last_direction = np.zeros(self.dimension) + epsilon = 1e-6 # Convergence criterion + + iteration = 1 + while iteration < self.budget: + scales = [delta * (0.5**i) for i in range(3)] # Different scales for exploration + for scale in scales: + if iteration >= self.budget: + break + points, fitnesses = self.generate_points(func, current_position, scale) + A, b = self.fit_quadratic(current_position, points, fitnesses) + + if np.linalg.cond(A) < 1 / epsilon: + try: + step_direction = -np.linalg.inv(A).dot(b) + direction = beta * last_direction + (1 - beta) * step_direction + new_position = current_position + direction + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_fitness = func(new_position) + except np.linalg.LinAlgError: + continue + + if new_fitness < current_fitness: + current_position, current_fitness = new_position, new_fitness + last_direction = direction + self.update_optimum(current_position, current_fitness) + delta = min(delta / alpha, 1.0) # Adjust delta upon improvement + else: + delta *= alpha # Reduce delta upon failure + + iteration += 2 * self.dimension + 1 + + return self.f_opt, self.x_opt + + def generate_points(self, func, center, delta): + points = np.array( + [center + delta * np.eye(self.dimension)[:, i] for i in range(self.dimension)] + + [center - delta * np.eye(self.dimension)[:, i] for i in range(self.dimension)] + ) + points = np.clip(points, self.lower_bound, self.upper_bound) + fitnesses = np.array([func(point) for point in points]) + return points, fitnesses + + def update_optimum(self, x, f): + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + def fit_quadratic(self, center, points, fitnesses): + n = len(points) + X = np.hstack([np.ones((n, 1)), points - center, ((points - center) ** 2)]) + coeffs = np.linalg.lstsq(X, fitnesses, rcond=None)[0] + A = np.diag(coeffs[1 + self.dimension :]) + b = coeffs[1 : 1 + self.dimension] + return A, b diff --git a/nevergrad/optimization/lama/MultiStageAdaptiveSearch.py b/nevergrad/optimization/lama/MultiStageAdaptiveSearch.py new file mode 100644 index 000000000..c34cc81e9 --- /dev/null +++ b/nevergrad/optimization/lama/MultiStageAdaptiveSearch.py @@ -0,0 +1,62 @@ +import numpy as np + + +class MultiStageAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Start with a random point in the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale and learning rate + scale = 0.5 + learning_rate = 0.1 + + # Adaptive scale factors + exploration_scale_factor = 1.2 + exploitation_scale_factor = 0.85 + + # Temperature for simulated annealing like probability acceptance + temperature = 1.0 + min_temperature = 0.01 + temperature_decay = 0.99 + + for i in range(1, self.budget): + # Calculate current temperature + temperature = max(min_temperature, temperature * temperature_decay) + + # Generate a new candidate by perturbing the current point + candidate = current_point + np.random.normal(0, scale, self.dim) + candidate = np.clip(candidate, -5.0, 5.0) + candidate_f = func(candidate) + + # Calculate acceptance probability using a simulated annealing approach + if candidate_f < current_f or np.exp((current_f - candidate_f) / temperature) > np.random.rand(): + current_point = candidate + current_f = candidate_f + + # Update optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Dynamic scale adjustment + scale *= exploration_scale_factor + else: + scale *= exploitation_scale_factor + + # Clamp the scale to prevent it from becoming too large or too small + scale = np.clip(scale, 0.01, 1.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiStageHybridGradientBoostedAnnealing.py b/nevergrad/optimization/lama/MultiStageHybridGradientBoostedAnnealing.py new file mode 100644 index 000000000..cb4625576 --- /dev/null +++ b/nevergrad/optimization/lama/MultiStageHybridGradientBoostedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class MultiStageHybridGradientBoostedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Intensive localized search as refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/MultiStrategyAdaptiveGradientEvolution.py b/nevergrad/optimization/lama/MultiStrategyAdaptiveGradientEvolution.py new file mode 100644 index 000000000..71ab355fa --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyAdaptiveGradientEvolution.py @@ -0,0 +1,125 @@ +import numpy as np + + +class MultiStrategyAdaptiveGradientEvolution: + def __init__(self, budget, initial_population_size=20): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = initial_population_size + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.crossover_rate = 0.7 + self.mutation_rate = 0.1 + self.diversity_threshold = 1e-3 + self.elite_rate = 0.2 # Proportion of elite members in selection + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def adaptive_learning_rate(base_lr, iteration, success_rate): + return base_lr / (1 + iteration * success_rate) + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + else: + population[j] = random_vector() + + def elite_selection(population, fitness): + elite_count = int(self.elite_rate * len(fitness)) + sorted_indices = np.argsort(fitness) + elite_indices = sorted_indices[:elite_count] + return [population[i] for i in elite_indices], [fitness[i] for i in elite_indices] + + def local_search(x): + grad = gradient_estimate(x) + step = -self.base_lr * grad + new_x = x + step + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + return new_x + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + # Elite selection + elite_pop, elite_fit = elite_selection(population, fitness) + elite_size = len(elite_pop) + + # Multi-strategy approach: combine crossover, mutation, and local search + if np.random.rand() < 0.5: + # Crossover and Mutation + parents_idx = np.random.choice(range(elite_size), size=2, replace=False) + parent1, parent2 = elite_pop[parents_idx[0]], elite_pop[parents_idx[1]] + + # Crossover + if np.random.rand() < self.crossover_rate: + cross_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:cross_point], parent2[cross_point:])) + else: + child = parent1.copy() + + # Mutation + if np.random.rand() < self.mutation_rate: + mutation_idx = np.random.randint(self.dim) + child[mutation_idx] = np.random.uniform(self.bounds[0], self.bounds[1]) + else: + # Local search + local_idx = np.random.choice(range(elite_size), size=1)[0] + child = local_search(elite_pop[local_idx]) + + # Gradient-based exploitation + grad = gradient_estimate(child) + success_rate = success_count / max(1, i) # Avoid division by zero + adapt_lr = adaptive_learning_rate(self.base_lr, i, success_rate) + perturbation = np.random.randn(self.dim) * adapt_lr + new_x = child - adapt_lr * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + success_count += 1 + + # Replace the worst member of the population with the new child + worst_idx = np.argmax(fitness) + population[worst_idx] = new_x + fitness[worst_idx] = new_f + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = MultiStrategyAdaptiveGradientEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MultiStrategyAdaptiveSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/MultiStrategyAdaptiveSwarmDifferentialEvolution.py new file mode 100644 index 000000000..363b1761c --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyAdaptiveSwarmDifferentialEvolution.py @@ -0,0 +1,58 @@ +import numpy as np + + +class MultiStrategyAdaptiveSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Population size increased for better exploration + self.F_base = 0.5 # Base mutation factor + self.CR = 0.9 # Crossover probability + self.adapt_rate = 0.1 # Rate at which F adapts + self.lambd = 0.75 # Control parameter for mutation strategy switching + + def __call__(self, func): + # Initialize population within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the best initial solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + for i in range(int(self.budget / self.pop_size)): + F_adapted = self.F_base + self.adapt_rate * np.sin(2 * np.pi * i / (self.budget / self.pop_size)) + + for j in range(self.pop_size): + # Choose mutation strategy based on control parameter lambda + if np.random.rand() < self.lambd: + # Strategy 1: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F_adapted * (b - c) + else: + # Strategy 2: DE/current-to-best/2/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adapted * (best_ind - pop[j]) + F_adapted * (a - b) + + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/MultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/MultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..e7b54be92 --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyDifferentialEvolution.py @@ -0,0 +1,171 @@ +import numpy as np +from scipy.optimize import minimize + + +class MultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.1 + self.restart_threshold = 50 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.05 + self.no_improvement_count = 0 + self.history = [] + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + x_local, f_local = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim + ).x, func(x) + return x_local, f_local + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.4, 1.2) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.1, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def _opposition_based_learning(self, population): + return self.lb + self.ub - population + + def _crowding_distance(self, population, fitness): + distances = np.zeros(len(population)) + sorted_indices = np.argsort(fitness) + for i in range(self.dim): + sorted_pop = population[sorted_indices, i] + distances[sorted_indices[0]] = distances[sorted_indices[-1]] = float("inf") + for j in range(1, len(population) - 1): + distances[sorted_indices[j]] += (sorted_pop[j + 1] - sorted_pop[j - 1]) / ( + sorted_pop[-1] - sorted_pop[0] + 1e-12 + ) + return distances + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(0.1 * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiStrategyMemeticAlgorithm.py b/nevergrad/optimization/lama/MultiStrategyMemeticAlgorithm.py new file mode 100644 index 000000000..53e1a9cad --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyMemeticAlgorithm.py @@ -0,0 +1,86 @@ +import numpy as np + + +class MultiStrategyMemeticAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + self.population_size = 100 # Increased population size + self.mutation_factor = 0.8 + self.crossover_rate = 0.9 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation (Differential Evolution) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), self.lb, self.ub) + + # Crossover + crossover = np.random.rand(self.dim) < self.crossover_rate + trial = np.where(crossover, mutant, population[i]) + + # Apply different local search strategies + if np.random.rand() < 0.3: # Adjusted probability + trial = self.local_search(trial, func, strategy="hill_climbing") + elif np.random.rand() < 0.3: + trial = self.local_search(trial, func, strategy="gaussian_mutation") + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + def local_search(self, x, func, strategy="hill_climbing"): + best_x = x.copy() + best_f = func(x) + + if strategy == "hill_climbing": + step_size = 0.1 + for _ in range(10): # Perform multiple iterations of hill climbing + for i in range(self.dim): + x_new = best_x.copy() + x_new[i] += step_size * (np.random.rand() * 2 - 1) # Small random perturbation + x_new = np.clip(x_new, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + elif strategy == "gaussian_mutation": + sigma = 0.1 # Standard deviation for Gaussian mutation + for _ in range(10): # Perform multiple iterations of Gaussian mutation + x_new = best_x + np.random.normal(0, sigma, self.dim) + x_new = np.clip(x_new, self.lb, self.ub) + f_new = func(x_new) + + if f_new < best_f: + best_x = x_new + best_f = f_new + + return best_x diff --git a/nevergrad/optimization/lama/MultiStrategyQuantumCognitionOptimizerV9.py b/nevergrad/optimization/lama/MultiStrategyQuantumCognitionOptimizerV9.py new file mode 100644 index 000000000..08c86a3ea --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyQuantumCognitionOptimizerV9.py @@ -0,0 +1,82 @@ +import numpy as np + + +class MultiStrategyQuantumCognitionOptimizerV9: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coefficient=2.1, + social_coefficient=2.1, + inertia_decay=0.99, + quantum_jump_rate=0.2, + quantum_scale=0.1, + adaptive_scale_factor=0.5, + exploration_phase_ratio=0.3, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.exploration_phase_ratio = exploration_phase_ratio + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + exploration_count = int(self.budget * self.exploration_phase_ratio) + + while evaluations < self.budget: + for i in range(self.population_size): + # Adjust quantum jump strategy based on function feedback + quantum_probability = self.quantum_jump_rate * (1 - evaluations / exploration_count) + if np.random.rand() < quantum_probability: + quantum_deviation = np.random.normal( + 0, + self.quantum_scale * (1 + self.adaptive_scale_factor * np.log(1 + global_best_score)), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Dynamic update of inertia weight to promote exploration or exploitation + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/MultiStrategyQuantumLevyOptimizer.py b/nevergrad/optimization/lama/MultiStrategyQuantumLevyOptimizer.py new file mode 100644 index 000000000..fe60f8585 --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategyQuantumLevyOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class MultiStrategyQuantumLevyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.2 + self.memory_size = 20 + self.local_search_probability = 0.6 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + self.strategy_rewards = [0, 0, 0, 0] + self.strategy_uses = [0, 0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2, 3], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 4, 1 / 4, 1 / 4, 1 / 4] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 3: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = MultiStrategyQuantumLevyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/MultiStrategySelfAdaptiveDE.py b/nevergrad/optimization/lama/MultiStrategySelfAdaptiveDE.py new file mode 100644 index 000000000..34e29a5a8 --- /dev/null +++ b/nevergrad/optimization/lama/MultiStrategySelfAdaptiveDE.py @@ -0,0 +1,114 @@ +import numpy as np + + +class MultiStrategySelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/MultiSwarmAdaptiveDE_PSO.py b/nevergrad/optimization/lama/MultiSwarmAdaptiveDE_PSO.py new file mode 100644 index 000000000..6b030f696 --- /dev/null +++ b/nevergrad/optimization/lama/MultiSwarmAdaptiveDE_PSO.py @@ -0,0 +1,125 @@ +import numpy as np +from scipy.optimize import minimize + + +class MultiSwarmAdaptiveDE_PSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.num_swarms = 5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize multiple swarms + swarms = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) for _ in range(self.num_swarms) + ] + fitness = [np.array([func(ind) for ind in swarm]) for swarm in swarms] + evaluations = self.swarm_size * self.num_swarms + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.num_swarms)] + local_bests = [swarm[np.argmin(fit)] for swarm, fit in zip(swarms, fitness)] + local_best_fits = [min(fit) for fit in fitness] + + while evaluations < self.budget: + new_swarms = [] + new_fitness = [] + best_swarm_idx = np.argmin(local_best_fits) + + for s in range(self.num_swarms): + new_swarm = [] + new_fit = [] + + for i in range(len(swarms[s])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[s][i] = ( + w * velocities[s][i] + + c1 * r1 * (local_bests[s] - swarms[s][i]) + + c2 + * r2 + * (swarms[best_swarm_idx][np.argmin(fitness[best_swarm_idx])] - swarms[s][i]) + ) + + trial_pso = swarms[s][i] + velocities[s][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(swarms[s])) + indices = np.delete(indices, i) + a, b, c = swarms[s][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.25 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[s][i]: + new_swarm.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[s]: + local_best_fits[s] = f_trial + local_bests[s] = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_swarm.append(swarms[s][i]) + new_fit.append(fitness[s][i]) + + if evaluations >= self.budget: + break + + new_swarms.append(np.array(new_swarm)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the swarm + local_bests[s] = new_swarms[s][np.argmin(new_fitness[s])] + local_best_fits[s] = min(new_fitness[s]) + + # Update swarms and fitness + swarms = new_swarms + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for s in range(self.num_swarms): + if np.std(fitness[s]) < 1e-5 and evaluations < self.budget: + swarms[s] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[s] = np.array([func(ind) for ind in swarms[s]]) + evaluations += self.swarm_size + + # Adaptive parameter adjustment + if np.random.rand() < 0.1: + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/NovelAdaptiveHarmonicFireworksTabuSearch.py b/nevergrad/optimization/lama/NovelAdaptiveHarmonicFireworksTabuSearch.py new file mode 100644 index 000000000..da2744429 --- /dev/null +++ b/nevergrad/optimization/lama/NovelAdaptiveHarmonicFireworksTabuSearch.py @@ -0,0 +1,103 @@ +import numpy as np + + +class NovelAdaptiveHarmonicFireworksTabuSearch: + def __init__( + self, budget=1000, num_harmonies=50, num_dimensions=5, bandwidth=0.1, tabu_tenure=7, tabu_ratio=0.1 + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.tabu_ratio = tabu_ratio + self.tabu_list = [] + self.iteration = 0 + self.best_solution = None + self.best_score = np.inf + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + return new_solution + + def update_tabu_list(self, new_solution_str): + self.tabu_list.append(new_solution_str) + if len(self.tabu_list) > self.tabu_tenure: + self.tabu_list.pop(0) + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def diversify_search(self, harmony_memory, bounds): + for i in range(self.num_harmonies): + rand_indexes = np.random.choice( + range(self.num_harmonies), size=self.num_dimensions, replace=False + ) + new_solution = np.mean(harmony_memory[rand_indexes], axis=0) + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + harmony_memory[i] = new_solution + + def local_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + if func(new_solution) < func(harmony_memory[i]): + harmony_memory[i] = new_solution + + def perturb_solution(self, solution, bounds): + perturbed_solution = solution + np.random.normal(0, self.bandwidth, size=self.num_dimensions) + perturbed_solution = np.clip(perturbed_solution, bounds.lb, bounds.ub) + return perturbed_solution + + def update_parameters(self): + self.tabu_ratio *= 1.02 # Adjust the tabu ratio for better exploration + self.bandwidth *= 0.95 # Adjust the bandwidth for better exploitation + + def adaptive_perturbation(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + perturbed_solution = self.perturb_solution(harmony_memory[i], bounds) + if func(perturbed_solution) < func(harmony_memory[i]): + harmony_memory[i] = perturbed_solution + + def adaptive_tabu_search(self, harmony_memory, best_solution, func, bounds): + for i in range(self.num_harmonies): + new_solution = self.generate_new_solution(harmony_memory, best_solution, bounds) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str not in self.tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(new_solution_str) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.best_score: + self.best_score = best_score + self.best_solution = best_harmony + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + + for i in range(self.budget): + self.adaptive_tabu_search(harmony_memory, self.best_solution, func, bounds) + self.adaptive_perturbation(harmony_memory, self.best_solution, func, bounds) + self.update_parameters() + + return 1.0 - (self.best_score - func.bounds.f_opt) / (func.bounds.f_opt - func.bounds.f_min) diff --git a/nevergrad/optimization/lama/NovelDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/NovelDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..f8d04752c --- /dev/null +++ b/nevergrad/optimization/lama/NovelDynamicFireworkAlgorithm.py @@ -0,0 +1,81 @@ +import numpy as np + + +class NovelDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def distance(self, x, y): + return np.linalg.norm(x - y) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if func(new_spark) < func(self.fireworks[i][0]): + self.fireworks[i] = (np.copy(new_spark), 0) + self.update_parameters(i) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2.py b/nevergrad/optimization/lama/NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2.py new file mode 100644 index 000000000..d93572e8b --- /dev/null +++ b/nevergrad/optimization/lama/NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2.py @@ -0,0 +1,85 @@ +import numpy as np + + +class NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2: + def __init__( + self, + budget=10000, + population_size=50, + num_iterations=100, + mutation_rate=0.1, + step_size=0.1, + diversity_rate=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + self.step_size = step_size + self.diversity_rate = diversity_rate + + def initialize_population(self): + return np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def levy_flight(self): + beta = 1.5 + sigma1 = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + sigma2 = 1 + u = np.random.normal(0, sigma1, self.dim) + v = np.random.normal(0, sigma2, self.dim) + levy = u / (np.abs(v) ** (1 / beta)) + return levy + + def adaptive_mutation_rate(self, success_counts, trial_counts): + return self.mutation_rate * (1 - success_counts / (trial_counts + 1)) + + def update_trial_counts(self, success_mask, trial_counts): + trial_counts += ~success_mask + trial_counts[success_mask] = 0 + return trial_counts + + def diversity_mutation(self, population): + mask = np.random.rand(self.population_size, self.dim) < self.diversity_rate + new_population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + population = np.where(mask, new_population, population) + return population + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(sol) for sol in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + success_counts = np.zeros(self.population_size) + trial_counts = np.zeros(self.population_size) + + for _ in range(self.budget // self.population_size): + offspring_population = [] + for _ in range(self.population_size): + new_solution = best_solution + self.step_size * self.levy_flight() + offspring_population.append(new_solution) + + population = np.vstack((population, offspring_population)) + fitness = np.array([func(sol) for sol in population]) + sorted_indices = np.argsort(fitness)[: self.population_size] + population = population[sorted_indices] + fitness = np.array([func(sol) for sol in population]) + + if fitness[0] < best_fitness: + best_solution = population[0] + best_fitness = fitness[0] + success_counts += 1 + + success_mask = fitness < best_fitness + trial_counts = self.update_trial_counts(success_mask, trial_counts) + mutation_rates = self.adaptive_mutation_rate(success_counts, trial_counts) + + population = self.diversity_mutation(population) + self.step_size = np.clip(self.step_size * np.exp(np.mean(mutation_rates)), 0.01, 0.5) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/NovelHarmonyTabuSearch.py b/nevergrad/optimization/lama/NovelHarmonyTabuSearch.py new file mode 100644 index 000000000..e4663001d --- /dev/null +++ b/nevergrad/optimization/lama/NovelHarmonyTabuSearch.py @@ -0,0 +1,91 @@ +import numpy as np + + +class NovelHarmonyTabuSearch: + def __init__( + self, + budget=1000, + num_harmonies=50, + num_dimensions=5, + bandwidth=0.1, + tabu_tenure=5, + pitch_adjustment_rate=0.5, + ): + self.budget = budget + self.num_harmonies = num_harmonies + self.num_dimensions = num_dimensions + self.bandwidth = bandwidth + self.tabu_tenure = tabu_tenure + self.pitch_adjustment_rate = pitch_adjustment_rate + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_harmonies, self.num_dimensions)) + + def generate_new_solution(self, harmony_memory, best_solution, bounds, tabu_list): + new_solution = np.zeros_like(harmony_memory[0]) + for i in range(self.num_dimensions): + if np.random.rand() < self.pitch_adjustment_rate: + indexes = np.random.choice(range(self.num_harmonies), size=2, replace=False) + new_solution[i] = np.mean(harmony_memory[indexes, i]) + else: + new_solution[i] = np.random.uniform(bounds.lb[i], bounds.ub[i]) + + new_solution = np.clip(new_solution, bounds.lb, bounds.ub) + new_solution_str = ",".join(map(str, new_solution)) + if new_solution_str in tabu_list: + return self.generate_new_solution(harmony_memory, best_solution, bounds, tabu_list) + + return new_solution, new_solution_str + + def update_tabu_list(self, tabu_list, new_solution_str): + tabu_list.append(new_solution_str) + if len(tabu_list) > self.tabu_tenure: + tabu_list.pop(0) + + def update_pitch_adjustment_rate(self, iteration): + self.pitch_adjustment_rate = max(0.1, self.pitch_adjustment_rate - 0.1 * iteration / self.budget) + + def update_tabu_tenure(self, num_improvements): + if num_improvements == 0.1 * self.budget: + self.tabu_tenure += 1 + + def evaluate_harmony(self, harmony, func): + return func(harmony) + + def harmonize(self, func, harmony_memory): + harmony_scores = [self.evaluate_harmony(harmony, func) for harmony in harmony_memory] + best_index = np.argmin(harmony_scores) + return harmony_memory[best_index], harmony_scores[best_index] + + def update_memory(self, harmony_memory, new_solution, func): + worst_index = np.argmax([func(harmony) for harmony in harmony_memory]) + if func(new_solution) < func(harmony_memory[worst_index]): + harmony_memory[worst_index] = new_solution + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + harmony_memory = self.initialize_positions(bounds) + tabu_list = [] + num_improvements = 0 + + for i in range(self.budget): + new_solution, new_solution_str = self.generate_new_solution( + harmony_memory, self.x_opt, bounds, tabu_list + ) + if new_solution_str not in tabu_list: + self.update_memory(harmony_memory, new_solution, func) + self.update_tabu_list(tabu_list, new_solution_str) + self.update_pitch_adjustment_rate(i) + + best_harmony, best_score = self.harmonize(func, harmony_memory) + if best_score < self.f_opt: + self.f_opt = best_score + self.x_opt = best_harmony + num_improvements += 1 + + self.update_tabu_tenure(num_improvements) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ODEMF.py b/nevergrad/optimization/lama/ODEMF.py new file mode 100644 index 000000000..0b48487da --- /dev/null +++ b/nevergrad/optimization/lama/ODEMF.py @@ -0,0 +1,86 @@ +import numpy as np + + +class ODEMF: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.85, + F_min=0.5, + F_max=0.9, + memory_size=100, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for storing good solutions + memory = np.empty((0, dimension)) + # Tracking elite solutions + elite = np.empty((self.elite_size, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 10) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx] + + for i in range(self.population_size): + # Adaptive mutation factor based on feedback mechanism + F = self.F_min + (self.F_max - self.F_min) * evaluations / self.budget * np.exp( + -4 * (best_fitness - fitness[i]) ** 2 + ) + + # Mutation: Differential Evolution strategy with feedback on fitness + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with the old good solutions + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + elif np.random.rand() < 0.2: # More frequent replacement in memory + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ORAMED.py b/nevergrad/optimization/lama/ORAMED.py new file mode 100644 index 000000000..13310b7b5 --- /dev/null +++ b/nevergrad/optimization/lama/ORAMED.py @@ -0,0 +1,85 @@ +import numpy as np + + +class ORAMED: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + + # Mutation strategy refined: DE/current-to-best/1 + idxs = np.random.choice(self.population_size, 3, replace=False) + a, b, c = population[idxs] + best_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OctopusSwarmAlgorithm.py b/nevergrad/optimization/lama/OctopusSwarmAlgorithm.py new file mode 100644 index 000000000..40dafb33c --- /dev/null +++ b/nevergrad/optimization/lama/OctopusSwarmAlgorithm.py @@ -0,0 +1,50 @@ +import numpy as np + + +class OctopusSwarmAlgorithm: + def __init__(self, budget=1000, num_octopuses=20, num_dimensions=5, alpha=0.1, beta=0.5): + self.budget = budget + self.num_octopuses = num_octopuses + self.num_dimensions = num_dimensions + self.alpha = alpha + self.beta = beta + + def initialize_positions(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, size=(self.num_octopuses, self.num_dimensions)) + + def levy_flight(self): + sigma = 1.0 + u = np.random.normal(0, sigma) + v = np.random.normal(0, 1) + step = u / abs(v) ** (1.5) + return step + + def move_octopus(self, current_position, best_position, bounds): + step = self.alpha * (best_position - current_position) + self.beta * self.levy_flight() + new_position = current_position + step + new_position = np.clip(new_position, bounds.lb, bounds.ub) + return new_position + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + positions = self.initialize_positions(bounds) + best_position = positions[0].copy() + + for _ in range(self.budget): + for i in range(self.num_octopuses): + new_position = self.move_octopus(positions[i], best_position, bounds) + f_new = func(new_position) + f_current = func(positions[i]) + + if f_new < f_current: + positions[i] = new_position + if f_new < func(best_position): + best_position = new_position.copy() + + self.f_opt = func(best_position) + self.x_opt = best_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimalAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/OptimalAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..6babfe476 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalAdaptiveDifferentialEvolution.py @@ -0,0 +1,62 @@ +import numpy as np + + +class OptimalAdaptiveDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, F_base=0.5, CR=0.9, strategy="elite"): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Elite strategy: always use the best individual as the base + if self.strategy == "elite": + base_idx = np.argmin(fitness) + base = population[base_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Adjust F dynamically based on fitness values + F = self.F_base * (1 - fitness[i] / (best_fitness + 1e-6)) + + # Mutation using two random indices + idxs = [idx for idx in range(self.population_size) if idx != i and idx != base_idx] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (b - a), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalAdaptiveDifferentialSearch.py b/nevergrad/optimization/lama/OptimalAdaptiveDifferentialSearch.py new file mode 100644 index 000000000..106001783 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalAdaptiveDifferentialSearch.py @@ -0,0 +1,85 @@ +import numpy as np + + +class OptimalAdaptiveDifferentialSearch: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = self.F_min + (self.F_max - self.F_min) * np.abs(np.cos(np.pi * evaluations / self.budget)) + + # Mutation: DE/current-to-best/1/binomial + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.5 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - b + c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > trial_fitness: + memory[worst_idx] = trial.copy() + memory_fitness[worst_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalAdaptiveMutationEnhancedSearch.py b/nevergrad/optimization/lama/OptimalAdaptiveMutationEnhancedSearch.py new file mode 100644 index 000000000..25fc77b7f --- /dev/null +++ b/nevergrad/optimization/lama/OptimalAdaptiveMutationEnhancedSearch.py @@ -0,0 +1,93 @@ +import numpy as np + + +class OptimalAdaptiveMutationEnhancedSearch: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.2, + F_max=1.0, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite + sorted_indices = np.argsort(fitness) + elite = population[sorted_indices[: self.elite_size]] + elite_fitness = fitness[sorted_indices[: self.elite_size]] + + # Adaptive mutation based on fitness deviation and time-progress + F = self.F_min + (self.F_max - self.F_min) * np.exp(-1.0 * np.var(fitness) / np.ptp(fitness)) + + for i in range(self.population_size): + # Selection of mutation strategy based on adaptive rates + if np.random.rand() < 0.5: + mutation_strategy = "rand" + idxs = np.random.choice( + [idx for idx in range(self.population_size) if idx != i], 3, replace=False + ) + a, b, c = population[idxs] + mutant = a + F * (b - c) + else: + mutation_strategy = "best" + a = population[np.random.choice([idx for idx in range(self.population_size) if idx != i])] + mutant = best_solution + F * (a - best_solution) + + mutant = np.clip(mutant, lb, ub) # Ensure mutant is within bounds + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update the memory with good solutions + worse_memory_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[worse_memory_idx]: + memory[worse_memory_idx] = population[i] + memory_fitness[worse_memory_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalAdaptiveSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/OptimalAdaptiveSwarmDifferentialEvolution.py new file mode 100644 index 000000000..405f10faf --- /dev/null +++ b/nevergrad/optimization/lama/OptimalAdaptiveSwarmDifferentialEvolution.py @@ -0,0 +1,52 @@ +import numpy as np + + +class OptimalAdaptiveSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 500 # Increased population size for better exploration + self.F_base = 0.8 # Increased base mutation factor for vigorous exploration + self.CR = 0.9 # Crossover probability + self.adapt_rate = 0.05 # Reduced adaptation rate for a more stable mutation factor + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + F_adaptive = self.F_base + self.adapt_rate * np.cos(i / (self.budget / self.pop_size) * 2 * np.pi) + + for j in range(self.pop_size): + # Mutation strategy: DE/current-to-pbest/1 + idxs = np.argsort(fitness)[: int(0.2 * self.pop_size)] # p-best individuals + pbest = pop[np.random.choice(idxs)] + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adaptive * (pbest - pop[j]) + F_adaptive * (a - b) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/OptimalBalanceSearch.py b/nevergrad/optimization/lama/OptimalBalanceSearch.py new file mode 100644 index 000000000..c8be32998 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalBalanceSearch.py @@ -0,0 +1,61 @@ +import numpy as np + + +class OptimalBalanceSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialization + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Configuration for adaptive scaling + max_scale = 5.0 + min_scale = 0.01 + scale = max_scale + scale_decay = 0.95 # More gradual decay + + # Configuration for adaptive exploration + initial_exploration_prob = 0.5 + min_exploration_prob = 0.1 + exploration_decay = 0.99 # Slower decay rate for exploration + + exploration_probability = initial_exploration_prob + + for i in range(1, self.budget): + if np.random.rand() < exploration_probability: + # Exploration with more controlled scaling + scale_range = np.linspace(min_scale, max_scale, num=self.budget) + candidate = current_point + np.random.uniform(-scale_range[i], scale_range[i], self.dim) + candidate = np.clip(candidate, -5.0, 5.0) # Ensure within bounds + else: + # Exploitation with adaptive perturbation + perturbation = np.random.normal(0, scale, self.dim) + candidate = current_point + perturbation + candidate = np.clip(candidate, -5.0, 5.0) # Ensure within bounds + + candidate_f = func(candidate) + + # Update if the candidate is better + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Update exploration probability and scale + exploration_probability *= exploration_decay + exploration_probability = max(min_exploration_prob, exploration_probability) + scale *= scale_decay + scale = max(min_scale, scale) # Ensuring scale does not become too small + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimalCohortDiversityOptimizer.py b/nevergrad/optimization/lama/OptimalCohortDiversityOptimizer.py new file mode 100644 index 000000000..1ed885545 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalCohortDiversityOptimizer.py @@ -0,0 +1,72 @@ +import numpy as np + + +class OptimalCohortDiversityOptimizer: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_fraction=0.2, + mutation_intensity=0.1, + recombination_prob=0.7, + adaptation_rate=0.98, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.recombination_prob = recombination_prob + self.adaptation_rate = adaptation_rate + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + + for i in range(self.population_size): + # Selecting parents using elite indices + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices[0]], population[parents_indices[1]] + + # Recombining parents to create offspring + if np.random.rand() < self.recombination_prob: + mask = np.random.rand(self.dimension) < 0.5 + child = np.where(mask, parent1, parent2) + else: + child = parent1.copy() # Inherit directly from a single parent if no crossover + + # Mutation: perturb the offspring + mutation = np.random.normal(scale=self.mutation_intensity, size=self.dimension) + child = np.clip(child + mutation, -5.0, 5.0) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + # Update the population and fitness + population = new_population + fitness = np.array([func(individual) for individual in population]) + + # Adaptively update mutation intensity + self.mutation_intensity *= self.adaptation_rate + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalConvergenceDE.py b/nevergrad/optimization/lama/OptimalConvergenceDE.py new file mode 100644 index 000000000..a65c78f69 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalConvergenceDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class OptimalConvergenceDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.52, F_range=0.38, CR=0.88, strategy="best" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for increased diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection, focusing on 'best' individuals + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy based on 'best' + if self.strategy == "best": + base = population[best_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjusting F for more exploration + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using refined differential variation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(base + F * (a - b + c - base), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalDynamicAdaptiveEvolutionOptimizer.py b/nevergrad/optimization/lama/OptimalDynamicAdaptiveEvolutionOptimizer.py new file mode 100644 index 000000000..994633a64 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalDynamicAdaptiveEvolutionOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class OptimalDynamicAdaptiveEvolutionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per the problem description + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initial temperature for simulated annealing and evolutionary strategies + T = 1.2 + T_min = 0.0005 + alpha = 0.92 # Cooling rate + + # Evolutionary strategy parameters + F = 0.75 # Base mutation factor + CR = 0.88 # Crossover probability optimized for diversity and convergence + + population_size = 80 # Population size adjusted for optimal search space exploration + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Main optimization loop + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation influenced by temperature and normalized progress + dynamic_F = F * ( + np.exp(-0.2 * T) * (0.5 + 0.5 * np.cos(np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Temperature and fitness-delta based acceptance + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptively modify the cooling rate based on the search state + adaptive_cooling = alpha - 0.015 * np.sin(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalDynamicMutationSearch.py b/nevergrad/optimization/lama/OptimalDynamicMutationSearch.py new file mode 100644 index 000000000..3a15f44b2 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalDynamicMutationSearch.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimalDynamicMutationSearch: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=1.2, + memory_size=30, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Track the best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update memory + for i in range(self.population_size): + if fitness[i] < np.max(memory_fitness): + worst_idx = np.argmax(memory_fitness) + memory[worst_idx] = population[i] + memory_fitness[worst_idx] = fitness[i] + + # Mutation and Crossover + for i in range(self.population_size): + # Adaptive mutation factor based on population diversity + F = self.F_min + (self.F_max - self.F_min) * (1 - np.std(fitness) / np.ptp(fitness)) + + # Select indices for mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Mutation: DE/rand-to-best/1 + mutant = np.clip(population[i] + F * (best_solution - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV14.py b/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV14.py new file mode 100644 index 000000000..3834659f3 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV14.py @@ -0,0 +1,60 @@ +import numpy as np + + +class OptimalDynamicPrecisionOptimizerV14: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Enhanced temperature parameters for deeper and more nuanced exploration + T = 1.2 # Higher initial temperature to promote extensive initial search + T_min = 0.0003 # Lower minimum temperature for fine-grained exploration at the end + alpha = 0.93 # Slower cooling rate to ensure a gradual transition and more evaluations + + # Mutation and crossover factors fine-tuned for robust evolutionary dynamics + F = 0.77 # Slightly increased mutation factor to induce robust exploratory mutations + CR = 0.89 # Slightly increased crossover probability to promote diversity + + population_size = 85 # Tweaked population size for better coverage of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing an advanced sigmoid function for mutation adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Mutation factor dynamically adapts with a sigmoid function for refined control + dynamic_F = ( + F * np.exp(-0.06 * T) * (0.6 + 0.4 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a more sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.009 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV21.py b/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV21.py new file mode 100644 index 000000000..d895c02c9 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalDynamicPrecisionOptimizerV21.py @@ -0,0 +1,59 @@ +import numpy as np + + +class OptimalDynamicPrecisionOptimizerV21: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Further refined temperature and cooling parameters + T = 1.2 # Increased starting temperature for broader initial exploration + T_min = 0.0004 # Fine-tuned minimum temperature for prolonged fine-tuning phase + alpha = 0.93 # Adjusted cooling rate allowing longer search duration and slower convergence + + # Mutation and crossover parameters optimized + F = 0.78 # Mutation factor slightly increased for stronger explorative moves + CR = 0.88 # Crossover probability adjusted for optimal diversity + + population_size = 85 # Fine-tuning the population size for better performance balance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation strategy with dynamic adaptation based on a sigmoid function + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Enhanced dynamic mutation rate, adjusting more smoothly + sigmoid_adjustment = 0.65 + 0.35 * np.tanh(5 * (evaluation_count / self.budget - 0.5)) + dynamic_F = F * np.exp(-0.06 * T) * sigmoid_adjustment + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More sensitive acceptance criteria with an improved temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a sinusoidal modulation for finer control + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalEnhancedRAMEDS.py b/nevergrad/optimization/lama/OptimalEnhancedRAMEDS.py new file mode 100644 index 000000000..00c323c4b --- /dev/null +++ b/nevergrad/optimization/lama/OptimalEnhancedRAMEDS.py @@ -0,0 +1,84 @@ +import numpy as np + + +class OptimalEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.92, + F_min=0.4, + F_max=0.9, + memory_size=50, + elite_size=8, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + F = self.F_max - ((self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget)) + + # Mutation: DE/current-to-best/1 with occasional random memory inclusion + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalEnhancedStrategyDE.py b/nevergrad/optimization/lama/OptimalEnhancedStrategyDE.py new file mode 100644 index 000000000..a268d7510 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalEnhancedStrategyDE.py @@ -0,0 +1,66 @@ +import numpy as np + + +class OptimalEnhancedStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_range=0.4, CR=0.9, strategy="best-1-bin" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically + if self.strategy == "best-1-bin": + best = population[best_idx] + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + mutant = np.clip(best + F * (a - b), self.lb, self.ub) + else: + idxs = [idx for idx in range(self.population_size) if idx != i] + base, a, b = population[np.random.choice(idxs, 3, replace=False)] + F = self.F_base + np.random.rand() * self.F_range + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalEvolutionaryGradientHybridOptimizerV8.py b/nevergrad/optimization/lama/OptimalEvolutionaryGradientHybridOptimizerV8.py new file mode 100644 index 000000000..e7455681d --- /dev/null +++ b/nevergrad/optimization/lama/OptimalEvolutionaryGradientHybridOptimizerV8.py @@ -0,0 +1,80 @@ +import numpy as np + + +class OptimalEvolutionaryGradientHybridOptimizerV8: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.59, + F_range=0.39, + CR=0.91, + elite_fraction=0.13, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'fixed' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.8: # Increase the probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F with a refined adjustment strategy + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation method + idxs = [ + idx + for idx in range(self.population_size) + if idx not in [i, best_idx] + list(elite_indices) + ] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with refined CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV11.py b/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV11.py new file mode 100644 index 000000000..e3eb10c76 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV11.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimalEvolutionaryGradientOptimizerV11: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.45, + CR=0.95, + elite_fraction=0.1, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'fixed' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Increased probability to select the current best + if np.random.rand() < 0.75: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV25.py b/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV25.py new file mode 100644 index 000000000..71db787a0 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalEvolutionaryGradientOptimizerV25.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimalEvolutionaryGradientOptimizerV25: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.5, + F_range=0.4, + CR=0.9, + elite_fraction=0.12, + mutation_strategy="enhanced_adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Mutation factor adjustment range + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Proportion of population considered elite + self.mutation_strategy = mutation_strategy # Adaptive strategy with enhanced features + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "enhanced_adaptive": + # Enhanced adaptive strategy selects either the best individual or a random elite + if np.random.rand() < 0.8: # Increased focus on the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Default strategy using random elite base + base = population[np.random.choice(elite_indices)] + + # Mutation factor dynamically adjusted + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1/best mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is reached + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalHybridDifferentialAnnealingOptimizer.py b/nevergrad/optimization/lama/OptimalHybridDifferentialAnnealingOptimizer.py new file mode 100644 index 000000000..89c59b9e2 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalHybridDifferentialAnnealingOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class OptimalHybridDifferentialAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = np.full(self.dim, -5.0) # Lower bound as per the problem description + self.ub = np.full(self.dim, 5.0) # Upper bound as per the problem description + + def __call__(self, func): + # Initial temperature tailored for better control early and late in the search + T = 1.0 + T_min = 0.0001 # Lower minimum temperature for finer control at late stages + alpha = 0.98 # Very slow cooling rate to allow more extensive exploration + + # Mutation factor and crossover probability + F = 0.5 # Mutation factor for balanced search intensity + CR = 0.95 # Very high crossover probability to maintain diversity + + # Population size increased for more diverse initial solutions + population_size = 100 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Evolutionary operations with annealing acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < CR, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + # Acceptance condition based on simulated annealing + if trial_fitness < fitness[i] or np.random.rand() < np.exp(-(trial_fitness - fitness[i]) / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling based on progress and temperature + T *= alpha ** (1 + 0.2 * (evaluation_count / self.budget)) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalHyperStrategicOptimizerV51.py b/nevergrad/optimization/lama/OptimalHyperStrategicOptimizerV51.py new file mode 100644 index 000000000..26132927b --- /dev/null +++ b/nevergrad/optimization/lama/OptimalHyperStrategicOptimizerV51.py @@ -0,0 +1,78 @@ +import numpy as np + + +class OptimalHyperStrategicOptimizerV51: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.50, + F_range=0.4, + CR=0.93, + elite_fraction=0.05, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Adjusted base mutation factor for a balance of exploration and exploitation + self.F_range = F_range # Narrowed range for mutation factor to maintain controlled variability + self.CR = CR # Adjusted crossover probability to ensure sufficient mixing + self.elite_fraction = elite_fraction # Reduced elite fraction to focus on the best solutions + self.mutation_strategy = mutation_strategy # Utilizing adaptive strategy for dynamic adjustments + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize population within the given bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Enhanced probability to select based on fitness + if np.random.rand() < 0.8: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F within a controlled range + F = self.F_base + np.random.normal(0, self.F_range / 2) + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation using binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if the budget is exceeded + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalPrecisionDynamicAdaptationOptimizer.py b/nevergrad/optimization/lama/OptimalPrecisionDynamicAdaptationOptimizer.py new file mode 100644 index 000000000..beff3df36 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalPrecisionDynamicAdaptationOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class OptimalPrecisionDynamicAdaptationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound + self.ub = 5.0 # Upper bound + + def __call__(self, func): + # Initialize temperature and cooling parameters more aggressively for better exploration initially + T = 1.2 # Higher initial temperature to encourage exploration + T_min = 0.0008 # Reduced minimum temperature for enhanced late exploration + alpha = 0.95 # Slower cooling rate for a more granular search + + # Optimized mutation and crossover parameters for a balance between exploration and exploitation + F_base = 0.85 # Base mutation factor adjusted + CR_base = 0.88 # Base crossover probability slightly adjusted for maintaining genetic diversity + + population_size = 70 # Tuned population size to match budget constraints + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics and temperature-dependent acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and progress + dynamic_F = ( + F_base + * np.exp(-0.12 * T) + * (0.75 + 0.25 * np.cos(1.5 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR_dynamic = CR_base + 0.1 * np.sin(1.5 * np.pi * evaluation_count / self.budget) + cross_points = np.random.rand(self.dim) < CR_dynamic + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved adaptive acceptance criterion based on delta fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with nuanced modulation based on exploration depth + adaptive_cooling = alpha - 0.02 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryOptimizerV37.py b/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryOptimizerV37.py new file mode 100644 index 000000000..fc70bfdde --- /dev/null +++ b/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryOptimizerV37.py @@ -0,0 +1,78 @@ +import numpy as np + + +class OptimalPrecisionEvolutionaryOptimizerV37: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.55, + F_range=0.40, + CR=0.90, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor, slightly decreased for more stable mutation + self.F_range = F_range # Reduced range to increase precision in mutations + self.CR = CR # Crossover probability, slightly reduced to increase offspring quality + self.elite_fraction = ( + elite_fraction # Increased elite fraction to maintain a larger set of high-quality solutions + ) + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy to switch between exploitation and exploration dynamically + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy and dynamic probability + if np.random.rand() < 0.76: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (np.random.rand() - 0.5) * 2 * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryThermalOptimizer.py b/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryThermalOptimizer.py new file mode 100644 index 000000000..dc0870a9b --- /dev/null +++ b/nevergrad/optimization/lama/OptimalPrecisionEvolutionaryThermalOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class OptimalPrecisionEvolutionaryThermalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Modified initial temperature and advanced cooling rate for better exploration-exploitation balance + T = 1.0 # Reduced initial temperature for more controlled exploration + T_min = 0.002 # Lower minimum temperature for extended fine-tuning at late stages + alpha = 0.92 # Reduced cooling rate to extend the exploration phase + + # Refined mutation and crossover parameters based on prior performance insights + F = 0.8 # Increased mutation factor for bolder search moves + CR = 0.9 # Higher crossover probability to enhance solution variability + + population_size = 100 # Increased population size for broader initial coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Integrate dynamic mutational strategies and adaptive temperature-based acceptance conditions + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.cos(np.pi * T / 2) * (0.7 + 0.3 * np.cos(np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion incorporating thermal influence and relative fitness improvement + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.log(1 + np.abs(delta_fitness)))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Implement a progressive cooling strategy that adapts more precisely based on optimization depth + adaptive_cooling = alpha - 0.01 * np.tanh(evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/OptimalPrecisionHybridSearchV3.py b/nevergrad/optimization/lama/OptimalPrecisionHybridSearchV3.py new file mode 100644 index 000000000..4488c1662 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalPrecisionHybridSearchV3.py @@ -0,0 +1,70 @@ +import numpy as np + + +class OptimalPrecisionHybridSearchV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + elite_size = int(0.2 * population_size) + mutation_rate = 0.05 + mutation_scale = lambda t: 0.075 * np.exp(-0.0002 * t) # Refined mutation scale decay + crossover_rate = 0.85 + + local_search_prob = 0.30 # Increased probability for local search + local_search_step_scale = lambda t: 0.015 * np.exp(-0.00006 * t) # Refined local search step decay + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimalQuantumSynergyStrategy.py b/nevergrad/optimization/lama/OptimalQuantumSynergyStrategy.py new file mode 100644 index 000000000..7b2bf2324 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalQuantumSynergyStrategy.py @@ -0,0 +1,78 @@ +import numpy as np + + +class OptimalQuantumSynergyStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 400 # Enhanced population size for wider exploration + self.elite_size = 80 # Expanded elite pool to preserve more high-quality solutions + self.crossover_probability = 0.9 # Increased probability to promote genetic diversity + self.mutation_scale = 0.005 # Further refined mutation for micro adjustments + self.quantum_mutation_scale = 0.02 # Optimal scale for effective quantum leaps + self.quantum_probability = 0.3 # Higher chance for quantum mutations to foster innovative solutions + self.precision_boost_factor = 0.02 # Optimally tuned boost factor for precision enhancement + self.reactivity_factor = 0.01 # Minimized to stabilize evolution dynamics + self.recombination_rate = 0.3 # Enhanced for more frequent recombination among elites + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + for i in range(num_offspring): + if np.random.rand() < self.crossover_probability: + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + offspring[i] = self.optimal_quantum_recombination(elite[p1], elite[p2]) + else: + offspring[i] = elite[np.random.choice(elite.shape[0])] + + scale = self.mutation_scale + self.precision_boost_factor * np.log(remaining_budget + 1) + offspring[i] += np.random.normal(0, scale, self.dim) + + if np.random.rand() < self.quantum_probability: + offspring[i] += np.random.normal(0, self.quantum_mutation_scale, self.dim) + + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + + return np.vstack([elite, offspring]) + + def optimal_quantum_recombination(self, parent1, parent2): + mask = np.random.rand(self.dim) > 0.5 + child = np.where(mask, parent1, parent2) + return child + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/OptimalRefinedEnhancedUltraRefinedRAMEDS.py b/nevergrad/optimization/lama/OptimalRefinedEnhancedUltraRefinedRAMEDS.py new file mode 100644 index 000000000..2cff77020 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalRefinedEnhancedUltraRefinedRAMEDS.py @@ -0,0 +1,84 @@ +import numpy as np + + +class OptimalRefinedEnhancedUltraRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor using feedback + variance = np.var(population, axis=0) + F = self.F_min + (self.F_max - self.F_min) * np.tanh( + np.mean(variance) + ) # Adaptive mutation factor + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalSelectiveEvolutionaryOptimizerV20.py b/nevergrad/optimization/lama/OptimalSelectiveEvolutionaryOptimizerV20.py new file mode 100644 index 000000000..703ab82ad --- /dev/null +++ b/nevergrad/optimization/lama/OptimalSelectiveEvolutionaryOptimizerV20.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimalSelectiveEvolutionaryOptimizerV20: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.60, + F_range=0.40, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="selective_adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # 'selective_adaptive' for focused search on elites + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "selective_adaptive": + # Higher chance of using best individual as mutant base, inspired by elitism + if np.random.rand() < 0.85: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base creation + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F influenced by the evolution progress + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation of the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimalSmartRefinedRAMEDS.py b/nevergrad/optimization/lama/OptimalSmartRefinedRAMEDS.py new file mode 100644 index 000000000..71a171c41 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalSmartRefinedRAMEDS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class OptimalSmartRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adaptive mutation factor adjustment, tuning based on the distribution of fitness + F = self.F_min + (np.std(fitness) / (np.std(memory_fitness) + 1e-5)) * (self.F_max - self.F_min) + + # Periodic full elite update + if evaluations % (self.budget // 10) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip( + best_solution + F * (elite[np.random.randint(self.elite_size)] - c + a - b), lb, ub + ) + + # Smart crossover based on adaptive rate + cross_points = np.random.rand(dimension) < ( + self.crossover_rate * (1 - (fitness[i] / (best_fitness + 1e-5))) + ) + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Smart memory update strategy + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalSpiralCentroidSearch.py b/nevergrad/optimization/lama/OptimalSpiralCentroidSearch.py new file mode 100644 index 000000000..3568a2904 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalSpiralCentroidSearch.py @@ -0,0 +1,59 @@ +import numpy as np + + +class OptimalSpiralCentroidSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize the centroid position of the search space + centroid = np.zeros(self.dim) + radius = 5.0 # Initial radius correlating to the full search space + angle_increment = np.pi / 8 # Initial angle increment for broad exploration + + # Parameters for adapting the search + radius_decay = 0.98 # Decrement for radius to focus search progressively + angle_refinement = 0.95 # Refinement of angle increment for increased precision + evaluations_left = self.budget + min_radius = 0.05 # Minimum radius to maintain a level of exploration + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = int(2 * np.pi / angle_increment) + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + new_point = centroid + radius * np.array( + [np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2) + ) + new_point = np.clip(new_point, -5.0, 5.0) # Ensure the candidate is within bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Update the centroid to the best found point in the current iteration + if points: + best_index = np.argmin(function_values) + centroid = points[best_index] + + # Reduce the radius and refine the angle increment for more focused search + radius *= radius_decay + radius = max(radius, min_radius) # Avoid too small radius to prevent stagnation + angle_increment *= angle_refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimalStrategicAdaptiveOptimizer.py b/nevergrad/optimization/lama/OptimalStrategicAdaptiveOptimizer.py new file mode 100644 index 000000000..f870ba6bd --- /dev/null +++ b/nevergrad/optimization/lama/OptimalStrategicAdaptiveOptimizer.py @@ -0,0 +1,84 @@ +import numpy as np + + +class OptimalStrategicAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Parameters and initial conditions + population_size = 150 + mutation_rate = 0.7 + crossover_rate = 0.6 + sigma = 0.3 # Initial mutation step size + elite_size = int(0.1 * population_size) # Elite proportion + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolutionary loop + while evaluations < self.budget: + new_population = [] + + # Elitism + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + new_population.append(population[idx]) + + # Main evolutionary process + while len(new_population) < population_size: + # Crossover + if np.random.rand() < crossover_rate: + parents = np.random.choice(population_size, 2, replace=False) + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate( + (population[parents[0]][:crossover_point], population[parents[1]][crossover_point:]) + ) + offspring = np.clip(offspring, self.lower_bound, self.upper_bound) + offspring_fitness = func(offspring) + evaluations += 1 + + # Selection + if offspring_fitness < fitness[parents[1]]: + new_population.append(offspring) + if offspring_fitness < best_fitness: + best_solution = offspring + best_fitness = offspring_fitness + else: + new_population.append(population[parents[1]]) + + # Mutation + idx = np.random.choice(population_size) + individual = population[idx] + mutant = individual + sigma * np.random.randn(self.dim) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + mutant_fitness = func(mutant) + evaluations += 1 + + # Selection + if mutant_fitness < fitness[idx]: + new_population.append(mutant) + if mutant_fitness < best_fitness: + best_solution = mutant + best_fitness = mutant_fitness + else: + new_population.append(individual) + + population = np.array(new_population) + fitness = np.array([func(x) for x in population]) + + # Adaptive strategy + mutation_rate = min(1.0, mutation_rate + np.random.uniform(-0.05, 0.05)) + crossover_rate = min(1.0, crossover_rate + np.random.uniform(-0.05, 0.05)) + sigma *= np.exp(0.05 * np.random.randn()) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimalStrategicHybridDE.py b/nevergrad/optimization/lama/OptimalStrategicHybridDE.py new file mode 100644 index 000000000..9ebc46503 --- /dev/null +++ b/nevergrad/optimization/lama/OptimalStrategicHybridDE.py @@ -0,0 +1,80 @@ +import numpy as np + + +class OptimalStrategicHybridDE: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.5, + F_range=0.3, + CR=0.9, + hybridization_factor=0.3, + elite_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.hybridization_factor = hybridization_factor # Factor for hybrid mutation strategy + self.elite_rate = elite_rate # Percentage of top performers considered elite + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Decide whether to use elite strategy + if np.random.rand() < self.elite_rate: + elite_idxs = np.argsort(fitness)[: max(1, int(self.population_size * self.elite_rate))] + base = population[np.random.choice(elite_idxs)] + elif np.random.rand() < self.hybridization_factor: + # Use best individual sometimes to ensure convergence + base = best_individual + else: + # Regular mutation base selection + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F for mutation + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using differential evolution strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using binomial method + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimallyBalancedQuantumStrategy.py b/nevergrad/optimization/lama/OptimallyBalancedQuantumStrategy.py new file mode 100644 index 000000000..fda5e1b3a --- /dev/null +++ b/nevergrad/optimization/lama/OptimallyBalancedQuantumStrategy.py @@ -0,0 +1,80 @@ +import numpy as np + + +class OptimallyBalancedQuantumStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_ratio=0.2, + mutation_scale_base=0.5, + mutation_decay=0.05, + crossover_rate=0.8, + quantum_update_rate=0.9, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_scale_base = mutation_scale_base + self.mutation_decay = mutation_decay + self.crossover_rate = crossover_rate + self.quantum_update_rate = quantum_update_rate + + def __call__(self, func): + # Initialize population randomly within the search space + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + evaluations = self.population_size + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent1, parent2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[parent1], population[parent2]) + else: + offspring = population[np.random.choice(elite_indices)] + + if np.random.random() < self.quantum_update_rate: + offspring = self.quantum_state_update(offspring, best_individual) + + mutation_scale = self.adaptive_mutation_scale(evaluations) + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) + + new_population[i] = offspring + + # Evaluate new population + fitness = np.array([func(x) for x in new_population]) + evaluations += self.population_size + + # Update best individual + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = new_population[current_best_idx] + + population = new_population + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, 0.1, self.dimension) + return best_individual + perturbation * (best_individual - individual) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_scale_base * np.exp(-self.mutation_decay * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveDifferentialClimber.py b/nevergrad/optimization/lama/OptimizedAdaptiveDifferentialClimber.py new file mode 100644 index 000000000..406640e5f --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveDifferentialClimber.py @@ -0,0 +1,68 @@ +import numpy as np + + +class OptimizedAdaptiveDifferentialClimber: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.mutation_factor = 0.6 # Reduced mutation factor for finer exploration + self.crossover_rate = 0.8 # Higher crossover rate for better information exchange + self.adaptive_mutation_decrease = 0.99 # Adaptive mutation factor decay rate + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, population, idx): + idxs = [i for i in range(self.population_size) if i != idx] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = np.clip( + population[a] + self.mutation_factor * (population[b] - population[c]), + self.bounds[0], + self.bounds[1], + ) + return mutant + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(crossover_mask, mutant, target) + + def select(self, current, candidate, func): + if func(candidate) < func(current): + return candidate + else: + return current + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + population[i] = self.select(population[i], trial, func) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + if trial_fitness < fitness[best_idx]: + best_individual = trial + best_idx = i + + # Gradually decrease the mutation factor to shift from exploration to exploitation + self.mutation_factor *= self.adaptive_mutation_decrease + + evaluations += 1 + if evaluations >= self.budget: + break + + return fitness[best_idx], best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategy.py b/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategy.py new file mode 100644 index 000000000..8a98c7adf --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class OptimizedAdaptiveDualPhaseStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using an additional differential vector + d = np.random.choice(idxs, 1, replace=False)[0] + mutant = population[a] + self.F * ( + population[b] - population[c] + population[best_idx] - population[d] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR with refined changes + scale = iteration / total_iterations + self.F = np.clip(0.75 * np.sin(2 * np.pi * scale) + 0.75, 0.1, 1) + self.CR = np.clip(0.75 * np.cos(2 * np.pi * scale) + 0.75, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, self.budget // self.pop_size) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategyV4.py b/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategyV4.py new file mode 100644 index 000000000..a6b16bf17 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveDualPhaseStrategyV4.py @@ -0,0 +1,80 @@ +import numpy as np + + +class OptimizedAdaptiveDualPhaseStrategyV4: + def __init__(self, budget, dimension=5, population_size=60, F_init=0.7, CR_init=0.85, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c, d = np.random.choice(idxs, 4, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Dual differentiation strategy in phase 2 for enhanced exploration + mutant = population[a] + self.F * ( + population[b] - population[c] + population[best_idx] - population[d] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Non-linear adjustment for F and CR, focusing on maintaining diversity longer + scale = iteration / total_iterations + self.F = np.clip(0.9 - 0.8 * scale**2, 0.1, 0.9) + self.CR = np.clip(0.85 - 0.75 * scale**2, 0.1, 0.85) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveDynamicStrategyV34.py b/nevergrad/optimization/lama/OptimizedAdaptiveDynamicStrategyV34.py new file mode 100644 index 000000000..d314ffb3d --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveDynamicStrategyV34.py @@ -0,0 +1,70 @@ +import numpy as np + + +class OptimizedAdaptiveDynamicStrategyV34: + def __init__(self, budget, dimension=5, population_size=80, F_init=0.6, CR_init=0.85): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, iteration): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + # Incorporate adaptive mutation factor adjusted by a logarithmic function + F_dynamic = self.F / np.log(iteration + 2) + mutant = ( + population[best_idx] + + F_dynamic * (population[a] - population[b]) + + 0.1 * (population[c] - population[best_idx]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, iteration): + # Implement adaptive crossover probability, varying over the course of iterations + CR_dynamic = self.CR * (0.5 + 0.5 * np.sin(2 * np.pi * iteration / self.budget)) + crossover_mask = np.random.rand(self.dimension) < CR_dynamic + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + iteration = 0 + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, iteration) + trial = self.crossover(population[i], mutant, iteration) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i], fitnesses[i] = trial, trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + iteration += 1 + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveGlobalLocalSearch.py b/nevergrad/optimization/lama/OptimizedAdaptiveGlobalLocalSearch.py new file mode 100644 index 000000000..e291c2657 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveGlobalLocalSearch.py @@ -0,0 +1,71 @@ +import numpy as np + + +class OptimizedAdaptiveGlobalLocalSearch: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=200): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.9 # Increased global influence for stronger exploration + self.local_influence = 0.1 # Reduced local influence to avoid premature convergence + self.vel_scale = 0.05 # Reduced velocity scaling for finer steps + self.learning_rate = 0.7 # Increased learning rate for faster convergence + self.adaptive_rate = 0.02 # Adaptive rate for dynamic adjustments + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = ( + np.random.uniform(-1, 1, (self.particles, self.dimension)) + * (self.bounds[1] - self.bounds[0]) + * self.vel_scale + ) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + self.global_influence * r1 * (personal_best_positions[i] - positions[i]) + + self.local_influence * r2 * (best_global_position - positions[i]) + ) + velocities[i] *= 1 - self.adaptive_rate # Dynamic velocity reduction + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveQuantumGradientHybridStrategy.py b/nevergrad/optimization/lama/OptimizedAdaptiveQuantumGradientHybridStrategy.py new file mode 100644 index 000000000..d98b41b90 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveQuantumGradientHybridStrategy.py @@ -0,0 +1,92 @@ +import numpy as np + + +class OptimizedAdaptiveQuantumGradientHybridStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=300, + elite_ratio=0.1, + mutation_intensity=1.5, + crossover_rate=0.9, + quantum_prob=0.9, + gradient_boost_prob=0.4, + adaptive_factor=0.1, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gradient_boost_prob = gradient_boost_prob + self.adaptive_factor = adaptive_factor + + def __call__(self, func): + # Initialize population within the provided bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = population[parent_idx].copy() + + if np.random.random() < self.gradient_boost_prob: + child = self.gradient_boost(child, func) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + mutation_scale = self.adaptive_mutation_scale(evaluations) + child = np.clip(child + np.random.normal(0, mutation_scale, self.dimension), -5, 5) + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def gradient_boost(self, individual, func, lr=0.02): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = np.array(individual) + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - lr * grad_est + + def quantum_state_update(self, individual, best_individual): + return individual + np.random.normal(0, self.adaptive_factor, self.dimension) * ( + best_individual - individual + ) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_intensity * np.exp(-self.adaptive_factor * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/OptimizedAdaptiveSimulatedAnnealingWithSmartMemory.py b/nevergrad/optimization/lama/OptimizedAdaptiveSimulatedAnnealingWithSmartMemory.py new file mode 100644 index 000000000..ec22f1456 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedAdaptiveSimulatedAnnealingWithSmartMemory.py @@ -0,0 +1,159 @@ +import numpy as np + + +class OptimizedAdaptiveSimulatedAnnealingWithSmartMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + # Initialize parameters + T_initial = 1.0 + T_min = 1e-5 + alpha_initial = 0.97 + beta_initial = 1.5 + + # Initial solution + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Smart Memory Reinforcement + if evaluations % (self.budget // 10) == 0: + best_idx = np.argmin(memory_scores) + for _ in range(memory_size // 4): + x_candidate = memory[best_idx] + np.random.normal(0, T, self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.008): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/OptimizedBalancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/OptimizedBalancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..70e6a7caa --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedBalancedDualStrategyAdaptiveDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class OptimizedBalancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage with balanced influence + trial = trial + 0.5 * np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/OptimizedConvergenceIslandStrategy.py b/nevergrad/optimization/lama/OptimizedConvergenceIslandStrategy.py new file mode 100644 index 000000000..c1c363044 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedConvergenceIslandStrategy.py @@ -0,0 +1,113 @@ +import numpy as np + + +class OptimizedConvergenceIslandStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=40, + population_per_island=70, + migration_interval=10, + migration_rate=0.1, + mutation_intensity=1.5, + mutation_decay=0.95, + elite_ratio=0.15, + crossover_probability=0.9, + tournament_size=4, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_interval = migration_interval + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + generation = 0 + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if generation % self.migration_interval == 0: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + generation += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimizedConvergentAdaptiveEvolver.py b/nevergrad/optimization/lama/OptimizedConvergentAdaptiveEvolver.py new file mode 100644 index 000000000..65540290e --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedConvergentAdaptiveEvolver.py @@ -0,0 +1,81 @@ +import numpy as np + + +class OptimizedConvergentAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=150, + elite_fraction=0.1, + mutation_rate=0.05, + mutation_adjustment=0.98, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_rate = mutation_rate + self.mutation_adjustment = mutation_adjustment + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual, scale): + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return individual + + def crossover(self, parent1, parent2): + child = np.zeros(self.dimension) + for i in range(self.dimension): + if np.random.rand() > 0.5: + child[i] = parent1[i] + else: + child[i] = parent2[i] + return child + + def reproduce(self, elites, elite_fitness): + new_population = elites.copy() + while len(new_population) < self.population_size: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.crossover(elites[parents[0]], elites[parents[1]]) + child = self.mutate(child, self.mutation_scale) + new_population = np.vstack([new_population, child]) + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + self.mutation_scale = (self.upper_bound - self.lower_bound) / 10 # Smaller initial mutation scale + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += len(population) + self.mutation_scale *= self.mutation_adjustment # Gradual decrease in mutation scale + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimizedCrossoverElitistStrategyV8.py b/nevergrad/optimization/lama/OptimizedCrossoverElitistStrategyV8.py new file mode 100644 index 000000000..ac19e0daf --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedCrossoverElitistStrategyV8.py @@ -0,0 +1,78 @@ +import numpy as np + + +class OptimizedCrossoverElitistStrategyV8: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_fraction=0.1, + mutation_intensity=0.05, + crossover_rate=0.95, + adaptive_crossover_depth=0.9, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_crossover_depth = adaptive_crossover_depth + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform adaptive crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation intensity + scale = self.mutation_intensity * np.exp(-evaluations / self.budget * 5) + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2, evaluations): + # Adaptive recombination based on the stage of optimization + alpha = np.random.uniform(0.3, 0.7) + if evaluations < self.budget * self.adaptive_crossover_depth: + alpha *= np.exp(-evaluations / (self.budget * self.adaptive_crossover_depth)) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/OptimizedDifferentialEvolution.py b/nevergrad/optimization/lama/OptimizedDifferentialEvolution.py new file mode 100644 index 000000000..9bacfd193 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDifferentialEvolution.py @@ -0,0 +1,52 @@ +import numpy as np + + +class OptimizedDifferentialEvolution: + def __init__(self, budget=10000, population_size=100, F=0.5, CR=0.7): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight, slightly reduced for stability + self.CR = CR # Crossover probability, lowered to increase exploration + self.dim = 5 # Hardcoded as per problem specification + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Main loop + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.F * (b - c), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Check if budget exhausted + if evaluations >= self.budget: + break + + # Find the best solution + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] + + +# Example usage: +# optimizer = OptimizedDifferentialEvolution(budget=10000) +# best_f, best_x = optimizer(your_black_box_function) diff --git a/nevergrad/optimization/lama/OptimizedDualPhaseAdaptiveHybridOptimizationV4.py b/nevergrad/optimization/lama/OptimizedDualPhaseAdaptiveHybridOptimizationV4.py new file mode 100644 index 000000000..f0e2c44e3 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDualPhaseAdaptiveHybridOptimizationV4.py @@ -0,0 +1,146 @@ +import numpy as np + + +class OptimizedDualPhaseAdaptiveHybridOptimizationV4: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Refined population size for better balance + self.initial_F = 0.6 # Adjusted mutation factor for control + self.initial_CR = 0.9 # Adjusted crossover rate for diversity + self.elite_rate = 0.15 # Increased elite rate for better exploitation + self.local_search_rate = 0.5 # Enhanced for more local refinement + self.memory_size = 20 # Fine-tuned memory size for adaptive tuning + self.w = 0.5 # Balanced inertia weight + self.c1 = 1.4 # Adjusted cognitive component + self.c2 = 1.7 # Adjusted social component + self.adaptive_phase_ratio = 0.5 # Equal balance between DE and PSO phases + self.alpha = 0.15 # Refined differential weight for exploration-exploitation balance + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Adjusted local search step for precision + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.05 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.05 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = OptimizedDualPhaseAdaptiveHybridOptimizationV4(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/OptimizedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/OptimizedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..4f52058ec --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDualStrategyAdaptiveDE.py @@ -0,0 +1,125 @@ +import numpy as np + + +class OptimizedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 # Slightly increased crossover probability + self.elitism_rate = 0.3 # Increased elitism rate + self.local_search_prob = 0.2 # Increased local search probability + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/OptimizedDynamicAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/OptimizedDynamicAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..c97c56fe2 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicAdaptiveHybridOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class OptimizedDynamicAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 300 + initial_mutation_factor = 0.5 # Moderately aggressive initial mutation + initial_crossover_prob = 0.9 # Initially very high crossover probability + adaptive_factor_mut = 0.001 # Finer adaptive change for mutation factor + adaptive_factor_cross = 0.001 # Finer adaptive change for crossover probability + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + num_iterations = self.budget // population_size + mutation_factor = initial_mutation_factor + crossover_prob = initial_crossover_prob + + for iteration in range(num_iterations): + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Dynamically adjust mutation and crossover based on performance every 5 iterations + if iteration % 5 == 0: + current_mean_fitness = np.mean(fitness) + if best_value < current_mean_fitness: + mutation_factor = max(0.1, mutation_factor + adaptive_factor_mut) + crossover_prob = max(0.1, crossover_prob - adaptive_factor_cross) + else: + mutation_factor = max(0.1, mutation_factor - adaptive_factor_mut) + crossover_prob = min(1.0, crossover_prob + adaptive_factor_cross) + + mutant = np.clip(a + mutation_factor * (b - c), self.lower_bound, self.upper_bound) + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + trial_fitness = func(trial) + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/OptimizedDynamicDualPhaseStrategyV13.py b/nevergrad/optimization/lama/OptimizedDynamicDualPhaseStrategyV13.py new file mode 100644 index 000000000..a966351f3 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicDualPhaseStrategyV13.py @@ -0,0 +1,85 @@ +import numpy as np + + +class OptimizedDynamicDualPhaseStrategyV13: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + if phase == 1: + # Use a jitter-based approach to reduce stagnation in phase 1 + jitter = 0.0001 * np.random.normal(size=self.dimension) + mutant = population[best_idx] + self.F * (population[a] - population[b]) + jitter + else: + # Utilize an extra differential vector in phase 2 to enhance exploration + d = np.random.choice(idxs) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.3 * (population[d] - population[best_idx]) + ) + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic parameter adjustment with more aggressive shifting towards the end + scale = iteration / total_iterations + self.F = np.clip(0.9 - 0.8 * scale, 0.1, 1) # Linearly decrease F to increase convergence stability + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py b/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py new file mode 100644 index 000000000..d9adeb2a8 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus.py @@ -0,0 +1,141 @@ +import numpy as np + + +class OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive adjustments for beta and alpha + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Optimized enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedSimulatedAnnealing.py b/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedSimulatedAnnealing.py new file mode 100644 index 000000000..78a26410d --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicGradientBoostedSimulatedAnnealing.py @@ -0,0 +1,157 @@ +import numpy as np + + +class OptimizedDynamicGradientBoostedSimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.99 # Cooling rate for initial phase + beta_initial = 2.0 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.5 + alpha = 0.99 + elif evaluations < phase2: + beta = 2.0 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.5 + alpha = 0.95 + else: + beta = 3.0 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Enhanced Smart Memory Reinforcement + if evaluations % (self.budget // 10) == 0: + best_idx = np.argmin(memory_scores) + for _ in range(memory_size // 4): + x_candidate = memory[best_idx] + np.random.normal(0, T, self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.008): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/OptimizedDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/OptimizedDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..0d788ad36 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicQuantumSwarmOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class OptimizedDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.9, + cognitive_weight=2.5, + social_weight=1.5, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def update_parameters(self, iteration): + # Update parameters dynamically with enhancements + if iteration % 1000 == 0 and iteration > 0: + best_value_avg = np.mean(self.personal_best_values) + global_improvement = abs(self.global_best_value - best_value_avg) / self.global_best_value + if global_improvement < 0.01: + self.inertia_weight *= 0.9 + self.cognitive_weight *= 1.1 + self.social_weight *= 1.1 + + self.inertia_weight = max(0.4, min(0.9, self.inertia_weight)) + self.cognitive_weight = max(1.5, min(2.5, self.cognitive_weight)) + self.social_weight = max(1.2, min(1.8, self.social_weight)) + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + cognitive_component = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + social_component = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + cognitive_component + social_component + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/OptimizedDynamicRestartAdaptiveDE.py b/nevergrad/optimization/lama/OptimizedDynamicRestartAdaptiveDE.py new file mode 100644 index 000000000..5a1bdb9a8 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedDynamicRestartAdaptiveDE.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize + + +class OptimizedDynamicRestartAdaptiveDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.85 + self.elitism_rate = 0.3 + self.local_search_prob = 0.25 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 50 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Gradient-based adjustment + result = minimize(func, best_x, method="BFGS", options={"maxiter": 15}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/OptimizedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/OptimizedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..1630554c8 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class OptimizedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Reinforce diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedEnhancedAdaptiveMetaNetAQAPSO.py b/nevergrad/optimization/lama/OptimizedEnhancedAdaptiveMetaNetAQAPSO.py new file mode 100644 index 000000000..670c131a7 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedEnhancedAdaptiveMetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class OptimizedEnhancedAdaptiveMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.15 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 3000 + self.meta_net_lr = 0.5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedEnhancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/OptimizedEnhancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..cc7fe5190 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedEnhancedDualStrategyAdaptiveDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class OptimizedEnhancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.85 + self.final_mutation_factor = 0.35 + self.crossover_prob = 0.85 + self.elitism_rate = 0.3 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.01 * ( + np.random.rand(self.dim) - 0.5 + ) # Even smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/OptimizedEnhancedDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/OptimizedEnhancedDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..4aacdf8a8 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedEnhancedDynamicFireworkAlgorithm.py @@ -0,0 +1,91 @@ +import numpy as np + + +class OptimizedEnhancedDynamicFireworkAlgorithm: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedEvolutiveStrategy.py b/nevergrad/optimization/lama/OptimizedEvolutiveStrategy.py new file mode 100644 index 000000000..4b5678a6b --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedEvolutiveStrategy.py @@ -0,0 +1,54 @@ +import numpy as np + + +class OptimizedEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=10): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate=0.1, mutation_strength=0.5): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def __call__(self, func): + # Parameters + population_size = 10 + num_generations = self.budget // population_size + num_best = 2 + mutation_rate = 0.1 + mutation_strength_initial = 0.5 + decay_factor = 0.99 + + # Initialize + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + # Evolution loop + for _ in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + # Generate new population + population = self.mutate(best_population, mutation_rate, mutation_strength_initial) + mutation_strength_initial *= decay_factor + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/OptimizedExplorationConvergenceStrategy.py b/nevergrad/optimization/lama/OptimizedExplorationConvergenceStrategy.py new file mode 100644 index 000000000..c1a5f7364 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedExplorationConvergenceStrategy.py @@ -0,0 +1,96 @@ +import numpy as np + + +class OptimizedExplorationConvergenceStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=300, + elite_fraction=0.04, + mutation_intensity=0.25, + crossover_probability=0.85, + gradient_step=0.1, + mutation_decay=0.92, + gradient_enhancement_cycle=3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement_cycle = gradient_enhancement_cycle + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def adaptive_gradient(self, individual, func, best_individual, iteration): + if iteration % self.gradient_enhancement_cycle == 0: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.sqrt(np.dot(gradient_direction, gradient_direction))) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + iteration = 0 + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = [] + for i in range(self.population_size): + if i < len(elites): + new_population.append(self.adaptive_gradient(elites[i], func, best_individual, iteration)) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population.append(child) + + population = np.array(new_population) + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + + evaluations += self.population_size + iteration += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimizedGlobalStructureAwareEvolver.py b/nevergrad/optimization/lama/OptimizedGlobalStructureAwareEvolver.py new file mode 100644 index 000000000..b5685b4b0 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedGlobalStructureAwareEvolver.py @@ -0,0 +1,93 @@ +import numpy as np + + +class OptimizedGlobalStructureAwareEvolver: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 + elite_size = 60 + evaluations = 0 + mutation_scale = 0.05 + adaptive_factor = 0.98 + recombination_prob = 0.85 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + diverse_mutation_scale = mutation_scale * 2 + for idx in range(population_size - elite_size): + if np.random.rand() < 0.5: # Increased mutation within elites with diverse scale + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + np.random.normal( + 0, diverse_mutation_scale, self.dim + ) + population[idx] = np.clip(population[idx], self.lb, self.ub) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if evaluations % 100 == 0: # Frequent global mutations to explore potentially better solutions + structure_scale = 0.5 + structure_population = np.random.normal(0, structure_scale, (population_size // 3, self.dim)) + structure_population = np.clip( + structure_population + + population[np.random.choice(population_size, population_size // 3)], + self.lb, + self.ub, + ) + structure_fitness = np.array([func(ind) for ind in structure_population]) + evaluations += population_size // 3 + + combined_population = np.concatenate((population, structure_population), axis=0) + combined_fitness = np.concatenate((fitness, structure_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedGradientBalancedPSO.py b/nevergrad/optimization/lama/OptimizedGradientBalancedPSO.py new file mode 100644 index 000000000..0f6feab34 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedGradientBalancedPSO.py @@ -0,0 +1,77 @@ +import numpy as np + + +class OptimizedGradientBalancedPSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.95, + final_inertia=0.3, + cognitive_weight=1.8, + social_weight=1.8, + gradient_weight=0.2, + mutate_prob=0.1, + mutate_scale=0.03, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.gradient_weight = gradient_weight + self.mutate_prob = mutate_prob + self.mutate_scale = mutate_scale + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_reduction = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * self.inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.gradient_weight + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component + gradient_component + ) + + if np.random.rand() < self.mutate_prob: + velocities[i] += np.random.normal(0, self.mutate_scale, self.dim) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch.py b/nevergrad/optimization/lama/OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch.py new file mode 100644 index 000000000..f4a813c89 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 25 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Adaptive exploration based on current progress + if evaluations % (self.budget // 5) == 0: + adaptive_exploration_radius = 0.2 + 0.8 * (1 - T / T_initial) + for _ in range(memory_size // 3): + x_candidate = memory[ + np.random.randint(memory_size) + ] + adaptive_exploration_radius * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=60, step_size=0.004): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.15): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/OptimizedGradientMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/OptimizedGradientMemorySimulatedAnnealing.py new file mode 100644 index 000000000..d15c545fc --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedGradientMemorySimulatedAnnealing.py @@ -0,0 +1,175 @@ +import numpy as np + + +class OptimizedGradientMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Periodic intensive localized search for memory refinement + if evaluations % (self.budget // 4) == 0: + for i in range(memory_size): + localized_x = self._local_refinement(func, memory[i]) + f_localized = func(localized_x) + evaluations += 1 + if f_localized < memory_scores[i]: + memory[i] = localized_x + memory_scores[i] = f_localized + if f_localized < self.f_opt: + self.f_opt = f_localized + self.x_opt = localized_x + + # Fine-tuning of best solutions found so far + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 3): + fine_x = self._fine_tuning(func, memory[np.argmin(memory_scores)]) + f_fine = func(fine_x) + evaluations += 1 + if f_fine < self.f_opt: + self.f_opt = f_fine + self.x_opt = fine_x + + worst_idx = np.argmax(memory_scores) + if f_fine < memory_scores[worst_idx]: + memory[worst_idx] = fine_x + memory_scores[worst_idx] = f_fine + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _fine_tuning(self, func, x, iters=30, step_size=0.002): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x diff --git a/nevergrad/optimization/lama/OptimizedHybridAdaptiveDualPhaseStrategyV7.py b/nevergrad/optimization/lama/OptimizedHybridAdaptiveDualPhaseStrategyV7.py new file mode 100644 index 000000000..46bbe16a7 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHybridAdaptiveDualPhaseStrategyV7.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimizedHybridAdaptiveDualPhaseStrategyV7: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using additional differential vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjust parameters based on sigmoid and exponential functions for more adaptive control + sigmoid_scale = 1 / (1 + np.exp(-10 * (iteration / total_iterations - 0.5))) + self.F = np.clip(0.9 * sigmoid_scale + 0.1, 0.1, 1) # Adaptive range for F + self.CR = 0.8 - 0.7 * sigmoid_scale # Adaptive range for CR + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/OptimizedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..5dc0de70d --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class OptimizedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.7 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.4 + self.memory_size = 30 + self.w = 0.7 + self.c1 = 1.4 + self.c2 = 1.4 + self.phase_switch_ratio = 0.6 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = OptimizedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/OptimizedHybridExplorationOptimization.py b/nevergrad/optimization/lama/OptimizedHybridExplorationOptimization.py new file mode 100644 index 000000000..ecbaa713e --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHybridExplorationOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class OptimizedHybridExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.496 # Cognitive constant + c2 = 1.496 # Social constant + w = 0.729 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + max_exploration_cycles = 40 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = OptimizedHybridExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/OptimizedHybridSearch.py b/nevergrad/optimization/lama/OptimizedHybridSearch.py new file mode 100644 index 000000000..0282b5998 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHybridSearch.py @@ -0,0 +1,65 @@ +import numpy as np + + +class OptimizedHybridSearch: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.5 + self.global_coeff = 0.6 + self.local_coeff = 0.6 + self.inertia_weight = 0.9 + self.decrement = (self.inertia_weight - 0.4) / (budget * 0.6) + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + self.inertia_weight -= self.decrement + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/OptimizedHybridStrategyDE.py b/nevergrad/optimization/lama/OptimizedHybridStrategyDE.py new file mode 100644 index 000000000..df6d9ed03 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHybridStrategyDE.py @@ -0,0 +1,74 @@ +import numpy as np + + +class OptimizedHybridStrategyDE: + def __init__(self, budget=10000, population_size=150, F=0.85, CR=0.95, adaptive=True): + self.budget = budget + self.population_size = population_size + self.F = F # Differential weight, potentially adaptive + self.CR = CR # Crossover probability + self.adaptive = adaptive # Enable adaptive control of parameters + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + # Adaptation coefficients + F_l, F_u = 0.6, 1.0 # Lower and upper bounds for F + CR_l, CR_u = 0.85, 1.0 # Adaptive bounds for CR + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + if self.adaptive: + # Adaptive F and CR based on normalized evaluations + self.F = F_l + (F_u - F_l) * np.exp(-4.0 * (self.budget - evaluations) / self.budget) + self.CR = CR_l + (CR_u - CR_l) * np.exp(-4.0 * evaluations / self.budget) + + # Mutation: DE/rand/1 with possible best strategy switch + if np.random.rand() < 0.5: # 50% chance to switch strategy + idxs = np.arange(self.population_size) + idxs = np.delete(idxs, i) + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = np.clip( + population[c] + self.F * (population[a] - population[b]), self.lb, self.ub + ) + else: + best_idx = np.argmin(fitness) # Always find the current best + idxs = np.arange(self.population_size) + idxs = np.delete(idxs, i) + a, b = np.random.choice(idxs, 2, replace=False) + mutant = np.clip( + population[i] + + self.F * (best_individual - population[i]) + + self.F * (population[a] - population[b]), + self.lb, + self.ub, + ) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < fitness[best_idx]: + best_idx = i + best_individual = trial + + if evaluations >= self.budget: + break + + return fitness[best_idx], best_individual diff --git a/nevergrad/optimization/lama/OptimizedHyperStrategicOptimizerV53.py b/nevergrad/optimization/lama/OptimizedHyperStrategicOptimizerV53.py new file mode 100644 index 000000000..7cdba6b57 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedHyperStrategicOptimizerV53.py @@ -0,0 +1,79 @@ +import numpy as np + + +class OptimizedHyperStrategicOptimizerV53: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.06, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Increased base mutation factor for enhanced exploration + self.F_range = F_range # Slightly narrowed mutation factor range for more controlled mutations + self.CR = CR # Adjusted Crossover probability to facilitate better exploration-exploitation balance + self.elite_fraction = elite_fraction # Reduced elite fraction to sharpen focus on the top performers + self.mutation_strategy = mutation_strategy # Mutation strategy remains adaptive + self.dim = 5 # Fixed dimension + self.lb = -5.0 # Search space lower bound + self.ub = 5.0 # Search space upper bound + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization main loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Adaptive strategy for base selection using a higher probability for the best individual + if np.random.rand() < 0.8: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite base if not adaptive + base = population[np.random.choice(elite_indices)] + + # Dynamic F adjustment + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness comparison + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit loop if budget exceeded + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimizedIslandEvolutionStrategyV4.py b/nevergrad/optimization/lama/OptimizedIslandEvolutionStrategyV4.py new file mode 100644 index 000000000..8364d929f --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedIslandEvolutionStrategyV4.py @@ -0,0 +1,97 @@ +import numpy as np + + +class OptimizedIslandEvolutionStrategyV4: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=10, + population_per_island=40, + migration_rate=0.30, + mutation_intensity=1.0, + mutation_decay=0.98, + elite_ratio=0.20, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + # Introduce new genetic material by shuffling some individuals between islands + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) # Shuffle the migration indices to mix individuals + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OptimizedMemoryEnhancedAdaptiveStrategyV70.py b/nevergrad/optimization/lama/OptimizedMemoryEnhancedAdaptiveStrategyV70.py new file mode 100644 index 000000000..19266979c --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedMemoryEnhancedAdaptiveStrategyV70.py @@ -0,0 +1,96 @@ +import numpy as np + + +class OptimizedMemoryEnhancedAdaptiveStrategyV70: + def __init__( + self, + budget, + dimension=5, + population_size=50, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.3, + memory_size=15, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover factor + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.best_f = float("inf") + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[c] + self.F * (population[a] - population[b]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjust mutation and crossover rates based on feedback + scale = iteration / total_iterations + self.F = 0.5 + (0.5 * np.sin(np.pi * scale)) * ( + np.exp(-0.1 * (self.best_f if self.best_f < 1 else 1)) + ) + self.CR = 0.9 - (0.4 * np.cos(np.pi * scale)) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + self.best_f = fitnesses[best_idx] + total_iterations = self.budget // self.pop_size + + for iteration in range(total_iterations): + phase = 1 if iteration < total_iterations * self.switch_ratio else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < self.best_f: + best_idx = i + self.best_f = trial_fitness + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedMemoryGuidedAdaptiveStrategyV81.py b/nevergrad/optimization/lama/OptimizedMemoryGuidedAdaptiveStrategyV81.py new file mode 100644 index 000000000..65b31fd24 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedMemoryGuidedAdaptiveStrategyV81.py @@ -0,0 +1,82 @@ +import numpy as np + + +class OptimizedMemoryGuidedAdaptiveStrategyV81: + def __init__( + self, budget, dimension=5, population_size=100, F_base=0.5, CR_base=0.9, memory_size=15, adaptive=True + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.memory_size = memory_size + self.adaptive = adaptive + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, phase): + size = len(population) + indices = np.random.choice(size, 3, replace=False) + a, b, c = indices[0], indices[1], indices[2] + if phase == 1: # Exploration + mutant = population[a] + self.F_base * (population[b] - population[c]) + else: # Exploitation + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F_base * (population[b] - population[c]) + 0.1 * memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR_base + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + if self.adaptive: + self.F_base = 0.5 + 0.4 * np.sin(np.pi * iteration / total_iterations) + self.CR_base = 0.6 + 0.3 * np.cos(np.pi * iteration / total_iterations) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + phase = 1 if evaluations < self.budget / 2 else 2 + mutant = self.mutate(population, best_idx, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedMemoryResponsiveAdaptiveStrategyV78.py b/nevergrad/optimization/lama/OptimizedMemoryResponsiveAdaptiveStrategyV78.py new file mode 100644 index 000000000..3e0f50655 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedMemoryResponsiveAdaptiveStrategyV78.py @@ -0,0 +1,70 @@ +import numpy as np + + +class OptimizedMemoryResponsiveAdaptiveStrategyV78: + def __init__(self, budget, dimension=5, population_size=100, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.F_base = 0.5 + self.CR_base = 0.9 + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + a, b, c = np.random.choice(size, 3, replace=False) + mutation_factor = self.F_base + 0.5 * np.sin( + 2 * np.pi * (1 - (index / self.budget)) + ) # Dynamic F based on progress + mutant = population[a] + mutation_factor * (population[b] - population[c]) + if self.memory: + memory_effect = np.mean(self.memory, axis=0) + mutant += 0.1 * memory_effect # Reduced memory influence + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_rate = self.CR_base + 0.5 * np.cos( + 2 * np.pi * (1 - (len(self.memory) / self.memory_size)) + ) # Dynamic CR based on memory usage + crossover_mask = np.random.rand(self.dimension) < crossover_rate + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + # Replace older memories based on a probability related to improvement magnitude + if np.random.rand() < (f_target - f_trial) / f_target: + self.memory[np.random.randint(self.memory_size)] = trial - target + return trial, f_trial + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, evaluations) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + evaluations += 1 + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedParallelStrategyDE.py b/nevergrad/optimization/lama/OptimizedParallelStrategyDE.py new file mode 100644 index 000000000..347b2cb77 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedParallelStrategyDE.py @@ -0,0 +1,64 @@ +import numpy as np + + +class OptimizedParallelStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_range=0.3, CR=0.9, strategy="adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically + if self.strategy == "adaptive": + best_idx = np.argmin(fitness) + rand_idx = np.random.choice([idx for idx in range(self.population_size) if idx != i]) + base = population[rand_idx] if np.random.rand() < 0.5 else population[best_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Find and return the best solution + best_idx = np.argmin(fitness) + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/OptimizedPrecisionAdaptiveStrategy.py b/nevergrad/optimization/lama/OptimizedPrecisionAdaptiveStrategy.py new file mode 100644 index 000000000..084985dfc --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedPrecisionAdaptiveStrategy.py @@ -0,0 +1,73 @@ +import numpy as np + + +class OptimizedPrecisionAdaptiveStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + population_size = 150 + elite_size = int(0.15 * population_size) + mutation_rate = 0.5 + mutation_scale = 0.2 + crossover_rate = 0.8 + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + + # Select elites to carry over to next generation + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + # Generate the rest of the new population + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + new_population.append(child) + + new_population = np.vstack((new_population)) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + # Combine new population with elites + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt + + +# Example usage: +# optimizer = OptimizedPrecisionAdaptiveStrategy(budget=10000) +# def example_function(x): return np.sum(x**2) +# best_value, best_solution = optimizer(example_function) diff --git a/nevergrad/optimization/lama/OptimizedPrecisionTunedCrossoverElitistStrategyV13.py b/nevergrad/optimization/lama/OptimizedPrecisionTunedCrossoverElitistStrategyV13.py new file mode 100644 index 000000000..6731a6aef --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedPrecisionTunedCrossoverElitistStrategyV13.py @@ -0,0 +1,81 @@ +import numpy as np + + +class OptimizedPrecisionTunedCrossoverElitistStrategyV13: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.25, + mutation_intensity=0.02, + crossover_rate=0.9, + adaptivity_coefficient=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptivity_coefficient = adaptivity_coefficient + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.adaptive_crossover(parent1, parent2, evaluations) + else: + # Mutation of an elite + child = self.adaptive_mutate(parent1, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutate(self, individual, evaluations): + # Adaptive mutation intensity based on stage of optimization process + normalized_time = evaluations / self.budget + intensity = self.mutation_intensity * np.exp(-self.adaptivity_coefficient * normalized_time) + return individual + np.random.normal(0, intensity, self.dimension) + + def adaptive_crossover(self, parent1, parent2, evaluations): + # Adaptive weighted crossover using a dynamic strategy based on evaluations + normalized_time = evaluations / self.budget + weight = np.random.beta(1 + normalized_time * 5, 1 + (1 - normalized_time) * 5) + return weight * parent1 + (1 - weight) * parent2 diff --git a/nevergrad/optimization/lama/OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3.py b/nevergrad/optimization/lama/OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3.py new file mode 100644 index 000000000..69ae90392 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 80 # Increased population size for better exploration + self.sigma = 0.15 # Further reduced sigma for better precision + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.5 # Further reduced differential weight for stability + self.CR = 0.9 # Slightly increased crossover rate for more diversity + self.elitism_rate = 0.15 # Reduced elitism rate to retain more diversity + self.eval_count = 0 + self.alpha_levy = 0.005 + self.levy_prob = 0.1 # Further reduced levy probability to avoid excessive randomness + self.adaptive_learning_rate = 0.005 # Further reduced adaptive learning rate for stability + self.strategy_switches = [0.25, 0.5, 0.75] + self.local_opt_prob = 0.3 # Further increased probability of local optimization + self.learning_rate_decay = 0.95 # Further increased learning rate decay for stability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.15 # Reduced hybridization probability for stability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/OptimizedQuantumFluxDifferentialSwarm.py b/nevergrad/optimization/lama/OptimizedQuantumFluxDifferentialSwarm.py new file mode 100644 index 000000000..f73519658 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumFluxDifferentialSwarm.py @@ -0,0 +1,55 @@ +import numpy as np + + +class OptimizedQuantumFluxDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 1000 # Population size tailored to the search space + self.F = 0.7 # Differential weight + self.CR = 0.9 # Crossover probability + self.quantum_probability = 0.2 # Probability of quantum mutation + self.learning_rate = 0.1 # Learning rate for quantum mutation refinement + self.adaptation_factor = 0.05 # Smoothing factor for adapting search dynamics + + def __call__(self, func): + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + for i in range(int(self.budget / self.pop_size)): + # Adaptation of parameters based on search progress + phase = i / (self.budget / self.pop_size) + F = self.F + self.adaptation_factor * np.sin(np.pi * phase) + CR = self.CR + self.adaptation_factor * np.cos(np.pi * phase) + + for j in range(self.pop_size): + if np.random.rand() < self.quantum_probability: + # Quantum mutation + mean_state = best_ind + self.learning_rate * (pop[j] - best_ind) + scale = self.learning_rate * np.abs(pop[j] - best_ind) + mutation = np.random.normal(mean_state, scale) + mutation = np.clip(mutation, -5.0, 5.0) + else: + # DE/rand/1/bin strategy + indices = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutation = a + F * (b - c) + mutation = np.clip(mutation, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < CR, mutation, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/OptimizedQuantumGradientExplorationOptimization.py b/nevergrad/optimization/lama/OptimizedQuantumGradientExplorationOptimization.py new file mode 100644 index 000000000..318f10bfa --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumGradientExplorationOptimization.py @@ -0,0 +1,219 @@ +import numpy as np + + +class OptimizedQuantumGradientExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to enhance exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # PSO constants with dynamic adjustments + c1_initial = 2.0 + c2_initial = 2.0 + w_initial = 0.9 + w_final = 0.4 + + # Gradient descent parameters + alpha_initial = 0.1 + beta = 0.9 + epsilon = 1e-8 + + # Differential Evolution parameters + F = 0.5 + CR = 0.9 + + # Diversity enforcement parameters + diversity_threshold = 0.3 + stagnation_counter = 0 + max_stagnation = 10 + + # Exploration improvement parameters + exploration_factor = 0.4 + + # Quantum-inspired rotation matrix + theta = np.pi / 6 + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor_initial = 0.3 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + w = w_initial - (w_initial - w_final) * (i / self.budget) + c1 = c1_initial * (1 - i / self.budget) + c2 = c2_initial * (i / self.budget) + alpha = alpha_initial * (1 - i / self.budget) + + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.2 # Increase learning rate if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = new_position + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Adaptive mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation_factor = mutation_factor_initial * (1 - i / self.budget) + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = OptimizedQuantumGradientExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/OptimizedQuantumHarmonySearch.py b/nevergrad/optimization/lama/OptimizedQuantumHarmonySearch.py new file mode 100644 index 000000000..f2e36d92a --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumHarmonySearch.py @@ -0,0 +1,43 @@ +import numpy as np +from scipy.stats import cauchy + + +class OptimizedQuantumHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, bw=0.1): + self.budget = budget + self.hmcr = hmcr # Harmony Memory Considering Rate + self.par = par # Pitch Adjustment Rate + self.bw = bw # Bandwidth + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.budget, len(func.bounds.lb)) + ) + + for i in range(self.budget): + new_harmony = np.zeros(len(func.bounds.lb)) + for j in range(len(func.bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func.bounds.lb[j], func.bounds.ub[j], scale=self.bw + ) + + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedQuantumHybridDEPSO.py b/nevergrad/optimization/lama/OptimizedQuantumHybridDEPSO.py new file mode 100644 index 000000000..64e70d004 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumHybridDEPSO.py @@ -0,0 +1,162 @@ +import numpy as np + + +class OptimizedQuantumHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.6 # Slightly increased inertia weight for better balance between exploration and exploitation + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior( + population, global_best, alpha=0.2, beta=0.8 + ): # Increased alpha and slightly reduced beta + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedQuantumLevyDifferentialSearch.py b/nevergrad/optimization/lama/OptimizedQuantumLevyDifferentialSearch.py new file mode 100644 index 000000000..cd71dcf31 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedQuantumLevyDifferentialSearch.py @@ -0,0 +1,156 @@ +import numpy as np + + +class OptimizedQuantumLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.6 * progress + cognitive_coefficient = 1.5 - 1.2 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.8 + 0.3 * progress + crossover_rate = 0.9 - 0.6 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.3 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.5, 0.5, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.20: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedRAMEDS.py b/nevergrad/optimization/lama/OptimizedRAMEDS.py new file mode 100644 index 000000000..570fa00cf --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRAMEDS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class OptimizedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.1, + F_max=0.9, + memory_size=20, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Dynamic Mutation Factor + adaptive_F = self.F_max - (evaluations / self.budget) * (self.F_max - self.F_min) + + # Mutation strategy with diversity consideration + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.5 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + adaptive_F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover with diversity enhancement + trial = np.where(np.random.rand(dimension) < self.crossover_rate, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory with diversity strategy + if trial_fitness < np.max(memory_fitness): + update_idx = np.argmax(memory_fitness) + memory[update_idx] = trial + memory_fitness[update_idx] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO.py b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO.py new file mode 100644 index 000000000..0af442cbc --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO.py @@ -0,0 +1,73 @@ +import numpy as np + + +class OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO: + def __init__( + self, + budget=10000, + population_size=40, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=1.8, + social_weight=1.5, + adaptive_component_factor=0.05, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.adaptive_component_factor = adaptive_component_factor + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Adaptive inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + # Optimized gradient-guided component with dynamic adjustment factor and reduced intensity + distance_factor = np.linalg.norm(global_best_position - particles[i]) + gradient_guided_component = ( + self.adaptive_component_factor + * (global_best_position - particles[i]) + / (1 + np.exp(-distance_factor)) + ) + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + gradient_guided_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/OptimizedRefinedAdaptiveHybridSearch.py b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveHybridSearch.py new file mode 100644 index 000000000..cd9f9a9e0 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveHybridSearch.py @@ -0,0 +1,71 @@ +import numpy as np + + +class OptimizedRefinedAdaptiveHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + elite_size = int(0.20 * population_size) + mutation_rate = 0.10 + mutation_scale = lambda t: 0.09 * np.exp(-0.0003 * t) + crossover_rate = 0.80 + + local_search_prob = 0.15 # Fixed probability for local search + local_search_step_scale = lambda t: 0.015 * np.exp(-0.00003 * t) + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/OptimizedRefinedAdaptiveMultiStrategyDE.py b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveMultiStrategyDE.py new file mode 100644 index 000000000..41c469e81 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveMultiStrategyDE.py @@ -0,0 +1,162 @@ +import numpy as np +from scipy.optimize import minimize + + +class OptimizedRefinedAdaptiveMultiStrategyDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 20 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation and crossover factors + success_rate = max(0, (self.budget - self.pop_size * generation) / self.budget) + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) * success_rate + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) * success_rate + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Enhanced selection strategy + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Hybrid mutation strategy based on success rate + if success_rate < 0.3: + mutant = x1 + mutation_factor * (x2 - x3) + elif success_rate < 0.6: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + if np.random.rand() < 0.5: + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + else: + # Gradient-based adjustment + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + result = minimize(func, best_x + perturbation, method="BFGS", options={"maxiter": 10}) + + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/OptimizedRefinedAdaptiveRefinementPSO.py b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveRefinementPSO.py new file mode 100644 index 000000000..6008d54cc --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedAdaptiveRefinementPSO.py @@ -0,0 +1,86 @@ +import numpy as np + + +class OptimizedRefinedAdaptiveRefinementPSO: + def __init__( + self, + budget=10000, + population_size=50, + omega_start=0.9, + omega_end=0.4, + phi_p=0.5, + phi_g=0.8, + phi_l=0.03, + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start # Initial inertia weight + self.omega_end = omega_end # Final inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.phi_l = phi_l # Local neighborhood influence coefficient + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_fitness = np.array([func(p) for p in particles]) + + global_best = particles[np.argmin(personal_best_fitness)] + global_best_fitness = min(personal_best_fitness) + + evaluations = self.population_size + + # Create neighborhood topology + neighborhood_size = int(np.ceil(self.population_size * 0.1)) + neighbors = [ + np.random.choice(list(set(range(self.population_size)) - {i}), neighborhood_size, replace=False) + for i in range(self.population_size) + ] + + # Optimization loop + while evaluations < self.budget: + for i in range(self.population_size): + # Linearly decreasing inertia weight + dynamic_omega = self.omega_start - (self.omega_start - self.omega_end) * ( + evaluations / self.budget + ) + + # Update velocity and position + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + r_l = np.random.random(self.dim) + + # Calculate local best within the neighborhood + local_best = neighbors[i][np.argmin(personal_best_fitness[neighbors[i]])] + + velocity[i] = ( + dynamic_omega * velocity[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + + self.phi_l * r_l * (personal_best[local_best] - particles[i]) + ) + + particles[i] += velocity[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate particle's fitness + current_fitness = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_fitness < personal_best_fitness[i]: + personal_best[i] = particles[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best = particles[i] + global_best_fitness = current_fitness + + return global_best_fitness, global_best diff --git a/nevergrad/optimization/lama/OptimizedRefinedEnhancedRAMEDSv5.py b/nevergrad/optimization/lama/OptimizedRefinedEnhancedRAMEDSv5.py new file mode 100644 index 000000000..0862007e3 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedEnhancedRAMEDSv5.py @@ -0,0 +1,107 @@ +import numpy as np + + +class OptimizedRefinedEnhancedRAMEDSv5: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.90, + F_min=0.4, + F_max=0.9, + memory_size=50, + elite_size=10, + reinit_cycle=100, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.reinit_cycle = reinit_cycle + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % self.reinit_cycle == 0 and evaluations != 0: + # Reinitialize a fraction of the population + reinit_indices = np.random.choice( + range(self.population_size), size=self.population_size // 5, replace=False + ) + population[reinit_indices] = self.lb + (self.ub - self.lb) * np.random.rand( + len(reinit_indices), self.dimension + ) + fitness[reinit_indices] = np.array( + [func(individual) for individual in population[reinit_indices]] + ) + evaluations += len(reinit_indices) + + # Update elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Evolution steps + for i in range(self.population_size): + # Adaptive mutation factor based on Gaussian modulation + F = np.clip(np.random.normal(loc=self.F_max, scale=0.1), self.F_min, self.F_max) + + # Mutation incorporating memory recall + mem_idx = np.random.randint(0, self.memory_size) + mem_individual = ( + memory[mem_idx] + if memory_fitness[mem_idx] != np.inf + else population[np.random.randint(0, self.population_size)] + ) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip( + population[i] + F * (best_solution - mem_individual + a - b), self.lb, self.ub + ) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with replaced individual + worst_memory_idx = np.argmax(memory_fitness) + if memory_fitness[worst_memory_idx] > fitness[i]: + memory[worst_memory_idx] = population[i].copy() + memory_fitness[worst_memory_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedRefinedMemoryDualPhaseStrategyV65.py b/nevergrad/optimization/lama/OptimizedRefinedMemoryDualPhaseStrategyV65.py new file mode 100644 index 000000000..39eba2234 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedRefinedMemoryDualPhaseStrategyV65.py @@ -0,0 +1,98 @@ +import numpy as np + + +class OptimizedRefinedMemoryDualPhaseStrategyV65: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + memory_size=10, + switch_ratio=0.7, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover rate + self.memory_size = memory_size + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # More explorative mutation + mutant = population[best_idx] + self.F * (population[b] - population[c]) + else: + # Use memory to guide mutation in phase 2, more exploitative + memory_effect = ( + np.sum(self.memory, axis=0) / len(self.memory) if self.memory else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + # Save successful changes to memory + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + # Remove oldest memory and add new + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Sigmoid-based dynamic adjustment for parameters + scale = 1 / (1 + np.exp(-10 * ((iteration / total_iterations) - 0.5))) + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + switch_point = int(self.switch_ratio * total_iterations) + + for iteration in range(total_iterations): + phase = 1 if iteration < switch_point else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45.py b/nevergrad/optimization/lama/OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45.py new file mode 100644 index 000000000..2f35b4278 --- /dev/null +++ b/nevergrad/optimization/lama/OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45.py @@ -0,0 +1,80 @@ +import numpy as np + + +class OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.6, + F_range=0.3, + CR=0.9, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Modest increase in the base mutation factor for more explorative mutations + self.F_range = F_range # Reduced mutation range to focus on more precise tuning + self.CR = CR # Slightly reduced crossover probability to balance exploration and exploitation + self.elite_fraction = elite_fraction # Reduced elite fraction to increase diversity in the population + self.mutation_strategy = mutation_strategy # Retain adaptive mutation strategy with enhancements + self.dim = 5 # Dimensionality of the problem remains constant + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Dynamically choose the base individual from the elite pool or the best so far + if ( + np.random.rand() < 0.85 + ): # Increased emphasis on the best individual to promote exploitation + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic mutation factor based on current stage of optimization + F = self.F_base + (np.random.rand() * self.F_range) + + # Mutation strategy DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] is True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if the budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/OscillatoryCrossoverDifferentialEvolution.py b/nevergrad/optimization/lama/OscillatoryCrossoverDifferentialEvolution.py new file mode 100644 index 000000000..f263c1068 --- /dev/null +++ b/nevergrad/optimization/lama/OscillatoryCrossoverDifferentialEvolution.py @@ -0,0 +1,51 @@ +import numpy as np + + +class OscillatoryCrossoverDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality of the problem + self.pop_size = 120 # Adjust population size for a good balance between exploration and exploitation + self.F = 0.8 # Mutation factor + self.CR_init = 0.9 # Initial crossover probability + self.CR_final = 0.1 # Final crossover probability + self.mutation_strategy = "rand/2/bin" # Using two difference vectors for mutation + + def __call__(self, func): + # Initial population uniformly distributed within the search space + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Tracking the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Oscillatory crossover rate + CR = self.CR_final + (self.CR_init - self.CR_final) * np.cos(np.pi * iteration / n_iterations) + + for i in range(self.pop_size): + # Mutation using rand/2/bin strategy + idxs = np.random.choice([idx for idx in range(self.pop_size) if idx != i], 4, replace=False) + a, b, c, d = pop[idxs] + mutant = a + self.F * (b - c + d - pop[i]) + + # Clipping to ensure individuals stay within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/PADE.py b/nevergrad/optimization/lama/PADE.py new file mode 100644 index 000000000..06eee1b52 --- /dev/null +++ b/nevergrad/optimization/lama/PADE.py @@ -0,0 +1,79 @@ +import numpy as np + + +class PADE: + def __init__(self, budget, population_size=50, F_base=0.5, CR_base=0.9): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Initialize adaptive parameters + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + # Early and late phase indicators + early_phase = True + + while evaluations < self.budget: + # Determine phase transition based on budget usage + if evaluations > self.budget * 0.5: + early_phase = False + + for i in range(self.population_size): + # Adaptive mutation strategy depending on phase and fitness + if fitness[i] < np.median(fitness): + F[i] *= 1.1 if early_phase else 1.05 + F[i] = min(F[i], 1) + else: + F[i] *= 0.9 if early_phase else 0.95 + F[i] = max(F[i], 0.1) + + # Mutation and crossover using "best" and "rand" strategy combination + idxs = np.random.choice( + [idx for idx in range(self.population_size) if idx != i], 2, replace=False + ) + best_idx = np.argmin(fitness) + mutant = population[i] + F[i] * ( + population[best_idx] - population[i] + population[idxs[0]] - population[idxs[1]] + ) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + f_trial = func(trial) + evaluations += 1 + + # Selection step + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + # Update optimal found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive CR adjustment + CR[i] = CR[i] * 1.1 if f_trial < fitness[i] else CR[i] * 0.9 + CR[i] = min(max(CR[i], 0.1), 1) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PAMDMDESM.py b/nevergrad/optimization/lama/PAMDMDESM.py new file mode 100644 index 000000000..0025506a2 --- /dev/null +++ b/nevergrad/optimization/lama/PAMDMDESM.py @@ -0,0 +1,96 @@ +import numpy as np + + +class PAMDMDESM: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.5, + F_amp=0.5, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions, initialized empty and filled as better solutions are found + memory = np.zeros((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions tracking + elite = np.zeros((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx].copy() + elite_fitness = fitness[elite_idx].copy() + + # Memory incorporation in crossover and mutation + if evaluations % (self.budget // 10) == 0 and np.any(memory_fitness < np.inf): + memory_selection = memory[np.argmin(memory_fitness)] + else: + memory_selection = None + + for i in range(self.population_size): + # Adaptive mutation factor + F = self.F_base + self.F_amp * np.random.normal() + + # Select mutation strategy based on progression of evaluations + idxs = np.random.choice( + [idx for idx in range(self.population_size) if idx != i], 3, replace=False + ) + a, b, c = population[idxs] + if memory_selection is not None and np.random.rand() < 0.2: + mutant = np.clip(a + F * (memory_selection - b + c), lb, ub) + else: + mutant = np.clip(a + F * (best_solution - b + c), lb, ub) + + # Crossover: Uniform with adaptive probability + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + # Update memory with current solution before it is replaced + if fitness[i] < np.max(memory_fitness): + worst_mem_idx = np.argmax(memory_fitness) + memory[worst_mem_idx] = population[i].copy() + memory_fitness[worst_mem_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial.copy() + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PDEAF.py b/nevergrad/optimization/lama/PDEAF.py new file mode 100644 index 000000000..0489e2fe7 --- /dev/null +++ b/nevergrad/optimization/lama/PDEAF.py @@ -0,0 +1,62 @@ +import numpy as np + + +class PDEAF: + def __init__(self, budget, population_size=50, f_min=0.1, f_max=0.9, cr_min=0.1, cr_max=0.9): + self.budget = budget + self.population_size = population_size + self.f_min = f_min # Minimum scaling factor + self.f_max = f_max # Maximum scaling factor + self.cr_min = cr_min # Minimum crossover probability + self.cr_max = cr_max # Maximum crossover probability + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + num_evals = self.population_size + + while num_evals < self.budget: + for i in range(self.population_size): + # Adapt control parameters linearly based on remaining budget + remaining_budget = self.budget - num_evals + cr = self.cr_min + (self.cr_max - self.cr_min) * (remaining_budget / self.budget) + f = self.f_min + (self.f_max - self.f_min) * (remaining_budget / self.budget) + + # Mutation: DE/rand/1/bin strategy + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = np.clip(x1 + f * (x2 - x3), self.lb, self.ub) + + # Crossover + crossover_mask = np.random.rand(self.dimension) < cr + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dimension)] = True + trial_vector = np.where(crossover_mask, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + + if num_evals >= self.budget: + break + + return best_fitness, best_individual + + +# Usage of PDEAF: +# optimizer = PDEAF(budget=1000) +# best_fitness, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/PGDE.py b/nevergrad/optimization/lama/PGDE.py new file mode 100644 index 000000000..7a9e88862 --- /dev/null +++ b/nevergrad/optimization/lama/PGDE.py @@ -0,0 +1,79 @@ +import numpy as np + + +class PGDE: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.8, + CR_base=0.8, + adaptivity_factor=0.1, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base + self.CR_base = CR_base + self.adaptivity_factor = adaptivity_factor + + def __call__(self, func): + # Initialize population uniformly + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Initialize adaptation parameters for F and CR + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Differential mutation using "rand/1" strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F[i] * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Binomial Crossover + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptive mechanism for exploration and exploitation + F[i] += ( + self.adaptivity_factor * (trial_fitness < fitness[i]) * (F[i] - 0.1) * np.random.randn() + ) + CR[i] += ( + self.adaptivity_factor * (trial_fitness < fitness[i]) * (CR[i] - 0.1) * np.random.randn() + ) + F[i] = max(0.1, min(F[i], 1.0)) + CR[i] = max(0.1, min(CR[i], 1.0)) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PMFSA.py b/nevergrad/optimization/lama/PMFSA.py new file mode 100644 index 000000000..10a416aa7 --- /dev/null +++ b/nevergrad/optimization/lama/PMFSA.py @@ -0,0 +1,60 @@ +import numpy as np + + +class PMFSA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 100 + population = np.random.uniform(*self.bounds, (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def adaptive_search(self, population, func): + global_best_fitness = np.Inf + global_best_individual = None + + evaluations = 0 + short_term_memory = [] + + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += len(population) + + # Update best solution globally + best_idx = np.argmin(fitness) + if fitness[best_idx] < global_best_fitness: + global_best_fitness = fitness[best_idx] + global_best_individual = population[best_idx] + + # Multi-Level Feedback for population adaptation + short_term_memory.append(fitness[best_idx]) + if len(short_term_memory) > 10: + recent_trend = np.std(short_term_memory[-10:]) + if recent_trend < 0.05: + mutation_scale = 0.1 + else: + mutation_scale = 0.5 + else: + mutation_scale = 0.3 + + mutations = np.random.normal(0, mutation_scale, (len(population), self.dimension)) + population += mutations + population = np.clip(population, *self.bounds) + + # Periodic reset with memory of best solutions + if evaluations % 1000 == 0 and evaluations != 0: + population = np.random.uniform(*self.bounds, (len(population) - 1, self.dimension)) + population = np.vstack([population, global_best_individual]) + + return global_best_fitness, global_best_individual + + def __call__(self, func): + initial_population, _ = self.initialize() + best_fitness, best_solution = self.adaptive_search(initial_population, func) + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PPDE.py b/nevergrad/optimization/lama/PPDE.py new file mode 100644 index 000000000..4cd93fd41 --- /dev/null +++ b/nevergrad/optimization/lama/PPDE.py @@ -0,0 +1,70 @@ +import numpy as np + + +class PPDE: + def __init__(self, budget, initial_population_size=30, F_base=0.5, CR_base=0.9): + self.budget = budget + self.initial_population_size = initial_population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base + self.CR_base = CR_base + + def __call__(self, func): + # Initialize population within the bounds and compute initial fitness + population_size = self.initial_population_size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + while num_evals < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Adaptive scaling of mutation factor and crossover rate based on progress + progress = num_evals / self.budget + F = self.F_base * (1 + np.sin(np.pi * progress)) # Modulate F with a sine wave + CR = self.CR_base * (0.5 + 0.5 * np.cos(np.pi * progress)) # Modulate CR with a cosine wave + + # Mutation: DE/rand/1/bin + indices = np.random.choice(np.delete(np.arange(population_size), i), 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + F * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover: binomial + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population.append(trial) + new_fitness.append(trial_fitness) + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + population_size = len(population) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PWDE.py b/nevergrad/optimization/lama/PWDE.py new file mode 100644 index 000000000..7203f32e2 --- /dev/null +++ b/nevergrad/optimization/lama/PWDE.py @@ -0,0 +1,70 @@ +import numpy as np + + +class PWDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.6, + F_amp=0.4, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adaptive mutation factor with progressive wave pattern + F = self.F_base + self.F_amp * np.cos(2 * np.pi * evaluations / self.budget) + + for i in range(self.population_size): + # Select mutation candidates + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Mutation using best and a combination of two random individuals + mutant = np.clip(best_solution + F * (a + b - 2 * c), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimization.py b/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimization.py new file mode 100644 index 000000000..50c1ec691 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimization.py @@ -0,0 +1,69 @@ +import numpy as np + + +class PrecisionAdaptiveCohortOptimization: + def __init__( + self, budget, dimension=5, population_size=100, elite_fraction=0.1, mutation_rate=0.1, beta=0.5 + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_rate = mutation_rate + self.beta = beta # Mutation shrinkage factor for precise tuning + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + for i in range(self.population_size): + if np.random.rand() < self.mutation_rate: # Mutation occurs + parent_idx = np.random.choice(self.elite_count) + parent = elites[parent_idx] + mutation = self.beta * np.random.normal(0, 1, self.dimension) + child = parent + mutation + else: # Crossover between two elites + parents_indices = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(self.dimension) + child = np.concatenate( + ( + population[parents_indices[0]][:crossover_point], + population[parents_indices[1]][crossover_point:], + ) + ) + + # Ensure the child is within bounds + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Dynamically adjust mutation rate and beta for fine-tuning + self.mutation_rate *= 0.98 + self.beta *= 0.95 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimizationV2.py b/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimizationV2.py new file mode 100644 index 000000000..6bdfdafdd --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveCohortOptimizationV2.py @@ -0,0 +1,72 @@ +import numpy as np + + +class PrecisionAdaptiveCohortOptimizationV2: + def __init__(self, budget, dimension=5, population_size=100, elite_fraction=0.15, mutation_intensity=0.2): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial intensity for mutation + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + for i in range(self.population_size): + if np.random.rand() < self.dynamic_mutation_rate(evaluations, self.budget): + # Mutation occurs + parent_idx = np.random.choice(self.elite_count) + parent = elites[parent_idx] + mutation = self.dynamic_mutation_scale(evaluations, self.budget) * np.random.normal( + 0, 1, self.dimension + ) + child = np.clip(parent + mutation, -5.0, 5.0) # Keeping child within bounds + else: + # Crossover between two elites + parents_indices = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + ( + population[parents_indices[0]][:crossover_point], + population[parents_indices[1]][crossover_point:], + ) + ) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def dynamic_mutation_rate(self, evaluations, budget): + # Decrease mutation rate as the budget is consumed, focusing more on exploitation in later stages. + return max(0.05, 1 - (evaluations / budget) ** 1.5) + + def dynamic_mutation_scale(self, evaluations, budget): + # Decrease mutation scale more smoothly to maintain a balance between exploration and fine-tuning. + return self.mutation_intensity * np.exp(-3 * (evaluations / budget)) diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveDecayOptimizer.py b/nevergrad/optimization/lama/PrecisionAdaptiveDecayOptimizer.py new file mode 100644 index 000000000..b83d6aafa --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveDecayOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class PrecisionAdaptiveDecayOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the optimization problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 150 + num_elites = 20 # Increased elite population for better preservation + mutation_factor = 0.85 # Lower initial mutation factor + crossover_rate = 0.7 # Higher initial crossover rate for better exploration + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elite preservation + elite_indices = np.argsort(fitness)[:num_elites] + new_population[:num_elites] = population[elite_indices] + new_fitness[:num_elites] = fitness[elite_indices] + + # Generate new solutions + for i in range(num_elites, population_size): + if current_budget >= self.budget: + break + + # Differential mutation based on random selection + indices = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate new solution + trial_fitness = func(trial) + current_budget += 1 + + # Selection step + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Update the population and fitness + population = new_population + fitness = new_fitness + + # Adapt mutation factor and crossover rate dynamically + mutation_factor *= 0.98 # Slower decay of mutation factor + crossover_rate = min(0.95, crossover_rate * 1.01) # Gradual increase of crossover rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/PrecisionAdaptiveDifferentialEvolutionPlus.py new file mode 100644 index 000000000..216cd48de --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveDifferentialEvolutionPlus.py @@ -0,0 +1,50 @@ +import numpy as np + + +class PrecisionAdaptiveDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Further increased population size for enhanced diversity + self.F_base = 0.8 # Initial higher mutation factor for aggressive exploration + self.CR_base = 0.7 # Initial crossover probability + self.adapt_rate = 0.05 # Rate at which parameters adapt + + def __call__(self, func): + # Initialize population within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Find the best initial solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + for i in range(int(self.budget / self.pop_size)): + F = self.F_base * (1 - self.adapt_rate * i / (self.budget / self.pop_size)) + CR = self.CR_base * (1 + self.adapt_rate * np.sin(np.pi * i / (self.budget / self.pop_size))) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveDynamicStrategyV33.py b/nevergrad/optimization/lama/PrecisionAdaptiveDynamicStrategyV33.py new file mode 100644 index 000000000..18ed854ce --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveDynamicStrategyV33.py @@ -0,0 +1,66 @@ +import numpy as np + + +class PrecisionAdaptiveDynamicStrategyV33: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Initial mutation factor + self.CR = CR_init # Initial crossover rate + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, iteration): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + # Adjust F based on the iteration number for a nonlinear dynamic mutation factor + F_dynamic = self.F * np.exp(-4 * iteration / self.budget) + mutant = population[best_idx] + F_dynamic * ( + population[a] - population[b] + population[c] - population[best_idx] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, iteration): + # Dynamic CR based on a sinusoidal function + CR_dynamic = self.CR * (0.5 + 0.5 * np.sin(2 * np.pi * iteration / self.budget)) + crossover_mask = np.random.rand(self.dimension) < CR_dynamic + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, evaluations) + trial = self.crossover(population[i], mutant, evaluations) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i], fitnesses[i] = trial, trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveGlobalClimbingEnhancer.py b/nevergrad/optimization/lama/PrecisionAdaptiveGlobalClimbingEnhancer.py new file mode 100644 index 000000000..79bf1144b --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveGlobalClimbingEnhancer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class PrecisionAdaptiveGlobalClimbingEnhancer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 500 # Increased population size for more exploration + elite_size = 100 # Bigger elite size to retain more high-quality solutions + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.15 # Slightly increased initial mutation scale + adaptive_factor = 0.85 # Increased adaptiveness to fitness landscape + recombination_prob = 0.95 # Increased probability of recombination + + # Enhancing exploration and exploitation + last_best_fitness = np.inf + + while evaluations < self.budget: + success_count = 0 + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + parents_indices = np.random.choice( + population_size, 3, replace=False + ) # Reduce number of parents + parents = population[parents_indices] + child = np.mean(parents, axis=0) # Mean recombination + else: + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + # Adaptive mutation control + distance_to_best = np.linalg.norm(population[best_idx] - child) + individual_mutation_scale = mutation_scale * (adaptive_factor**distance_to_best) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + success_count += 1 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if fitness[current_best_idx] < last_best_fitness: + last_best_fitness = fitness[current_best_idx] + success_rate = success_count / population_size + mutation_scale += 0.02 * (1 - success_rate) # More aggressive scaling + adaptive_factor = min( + 1.0, adaptive_factor + 0.03 * success_rate + ) # More dynamic adaptivity + + # Elite reinforcement with periodic global refinement + if evaluations % 500 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size): + if ( + idx not in elite_indices and np.random.rand() < 0.15 + ): # Increased chance of elite replacements + population[idx] = elite_individuals[np.random.choice(elite_size)] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionAdaptiveGradientClusteringPSO.py b/nevergrad/optimization/lama/PrecisionAdaptiveGradientClusteringPSO.py new file mode 100644 index 000000000..84f35ee3a --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptiveGradientClusteringPSO.py @@ -0,0 +1,76 @@ +import numpy as np + + +class PrecisionAdaptiveGradientClusteringPSO: + def __init__( + self, + budget=10000, + population_size=100, + initial_inertia=0.95, + final_inertia=0.35, + cognitive_weight=2.5, + social_weight=1.8, + cluster_factor=0.02, + adaptation_rate=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.cluster_factor = cluster_factor + self.adaptation_rate = adaptation_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + cluster_center = np.mean(particles, axis=0) + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + cluster_component = self.cluster_factor * (cluster_center - particles[i]) # Cluster force + + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + cluster_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + # Adaptive clustering adjustment + if evaluation_counter % (self.budget // 10) == 0: + self.cluster_factor *= 1 - self.adaptation_rate # Gradually reduce clustering influence + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/PrecisionAdaptivePSO.py b/nevergrad/optimization/lama/PrecisionAdaptivePSO.py new file mode 100644 index 000000000..3c22f4a41 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionAdaptivePSO.py @@ -0,0 +1,71 @@ +import numpy as np + + +class PrecisionAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=150, + inertia_strategy="nonlinear", + phi_p=0.1, + phi_g=0.9, + min_omega=0.1, + max_omega=0.9, + ): + self.budget = budget + self.population_size = population_size + self.inertia_strategy = inertia_strategy + self.phi_p = phi_p # Personal attraction coefficient + self.phi_g = phi_g # Global attraction coefficient + self.min_omega = min_omega + self.max_omega = max_omega + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits of the search space + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + omega = self.compute_inertia(evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + # Update velocities and positions with dynamic inertia + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def compute_inertia(self, current_step): + if self.inertia_strategy == "nonlinear": + return self.max_omega - (self.max_omega - self.min_omega) * (current_step**2 / self.budget**2) + else: + return self.max_omega - (self.max_omega - self.min_omega) * (current_step / self.budget) diff --git a/nevergrad/optimization/lama/PrecisionBalancedAdaptivePSO.py b/nevergrad/optimization/lama/PrecisionBalancedAdaptivePSO.py new file mode 100644 index 000000000..ec763caf8 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionBalancedAdaptivePSO.py @@ -0,0 +1,67 @@ +import numpy as np + + +class PrecisionBalancedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_initial=0.9, + omega_final=0.4, + phi_p=0.2, + phi_g=0.4, + adaptive_precision=True, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal best influence factor + self.phi_g = phi_g # Global best influence factor + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.adaptive_precision = adaptive_precision # Flag to enable adaptive precision + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + # Update inertia over time to balance exploration and exploitation + omega = self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/PrecisionBalancedEvolutionStrategy.py b/nevergrad/optimization/lama/PrecisionBalancedEvolutionStrategy.py new file mode 100644 index 000000000..87532ab59 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionBalancedEvolutionStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class PrecisionBalancedEvolutionStrategy: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.sigma = 0.5 # Initial standard deviation for mutations + self.learning_rate = 0.1 # Learning rate for self-adaptation of sigma + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation with adaptive sigma + return np.clip( + individual + np.random.normal(0, self.sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def recombine(self, parents): + # Intermediate recombination + return np.mean(parents, axis=0) + + def select(self, population, fitness): + # Select the best individual + best_idx = np.argmin(fitness) + return population[best_idx], fitness[best_idx] + + def adapt_sigma(self, success_rate): + # Adapt sigma based on success rate + if success_rate > 0.2: + self.sigma /= self.learning_rate + elif success_rate < 0.2: + self.sigma *= self.learning_rate + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = self.select(population, fitness) + + evaluations = self.population_size + successful_mutations = 0 + + while evaluations < self.budget: + for i in range(self.population_size): + mutant = self.mutate(population[i]) + mutant_fitness = func(mutant) + + if mutant_fitness < fitness[i]: + population[i] = mutant + fitness[i] = mutant_fitness + successful_mutations += 1 + + if mutant_fitness < best_fitness: + best_individual = mutant + best_fitness = mutant_fitness + + evaluations += 1 + if evaluations >= self.budget: + break + + # Adapt sigma based on the success rate of mutations + success_rate = successful_mutations / self.population_size + self.adapt_sigma(success_rate) + successful_mutations = 0 # Reset for next generation + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/PrecisionBalancedOptimizer.py b/nevergrad/optimization/lama/PrecisionBalancedOptimizer.py new file mode 100644 index 000000000..06c5e0367 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionBalancedOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class PrecisionBalancedOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 150 + mutation_factor = 0.5 # Lower mutation factor to increase precision in exploitation + crossover_rate = 0.8 # Higher crossover to explore beneficial traits + learning_rate = 0.1 # Learning rate for adaptive mechanisms + + # Initial population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Evolution process + while evaluations < self.budget: + for i in range(population_size): + # Mutation using a differential evolution strategy + indices = [index for index in range(population_size) if index != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = a + mutation_factor * (b - c) + mutant_vector = np.clip(mutant_vector, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial_vector + + # Adaptive mechanism to fine-tune parameters + mutation_factor = max(0.1, mutation_factor - learning_rate * np.random.randn()) + crossover_rate = min(1.0, max(0.5, crossover_rate + learning_rate * np.random.randn())) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionBoostedDifferentialEvolution.py b/nevergrad/optimization/lama/PrecisionBoostedDifferentialEvolution.py new file mode 100644 index 000000000..85ea85d85 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionBoostedDifferentialEvolution.py @@ -0,0 +1,61 @@ +import numpy as np + + +class PrecisionBoostedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Increased population for enhanced diversity + self.F_base = 0.9 # Increased base mutation factor for aggressive exploration + self.CR_base = 0.8 # Base crossover probability + self.F_min = 0.2 # Higher minimum mutation factor to avoid too low exploration at later stages + self.CR_min = 0.5 # Minimum crossover rate to maintain a decent level of recombination throughout + self.F_scaling = 0.97 # Decay scaling for F + self.CR_scaling = 0.985 # Decay scaling for CR + + def __call__(self, func): + # Initialize population uniformly within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify the best individual + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx] + + # Evolutionary process given the budget constraints + n_iterations = int(self.budget / self.pop_size) + F = self.F_base + CR = self.CR_base + + for iteration in range(n_iterations): + # Decay mutation and crossover probabilities + F = max(self.F_min, F * self.F_scaling) + CR = max(self.CR_min, CR * self.CR_scaling) + + for i in range(self.pop_size): + # Mutation strategy: 'rand/1/bin' + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F * (b - c) + + # Clipping to bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + trial = np.array([mutant[j] if np.random.rand() < CR else pop[i][j] for j in range(self.dim)]) + + # Evaluate trial solution + trial_fitness = func(trial) + + # Selection + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/PrecisionCosineAdaptiveDifferentialSwarm.py b/nevergrad/optimization/lama/PrecisionCosineAdaptiveDifferentialSwarm.py new file mode 100644 index 000000000..3cf2a6b30 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionCosineAdaptiveDifferentialSwarm.py @@ -0,0 +1,56 @@ +import numpy as np + + +class PrecisionCosineAdaptiveDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Reduced population size to focus on quality solutions + self.F_base = 0.85 # Increased base mutation factor for stronger mutations + self.CR = 0.85 # Slightly reduced crossover probability to balance exploration + self.adaptive_F_adjustment = 0.1 # Lower change rate for mutation factor + self.top_percentile = 0.1 # Reduced to top 10% to further focus on elite solutions + self.epsilon = 1e-10 # Small constant to avoid division by zero in cosine computation + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Cosine adaptive mutation factor with precision control + iteration_ratio = i / (self.budget / self.pop_size + self.epsilon) + F_adaptive = self.F_base + self.adaptive_F_adjustment * np.cos(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Mutation strategy: DE/current-to-best/1 with cosine adaptive F + idxs = np.argsort(fitness)[: int(self.top_percentile * self.pop_size)] # top individuals + best_local = pop[np.random.choice(idxs)] + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adaptive * (best_local - pop[j]) + F_adaptive * (a - b) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/PrecisionDifferentialEvolution.py b/nevergrad/optimization/lama/PrecisionDifferentialEvolution.py new file mode 100644 index 000000000..06035f810 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionDifferentialEvolution.py @@ -0,0 +1,48 @@ +import numpy as np + + +class PrecisionDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The given dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 200 # Increased population for greater search diversity + mutation_factor = 0.6 # Base mutation factor + crossover_prob = 0.8 # High crossover probability for more robust search + + # Initialize population randomly + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + # Main optimization loop + for _ in range(self.budget // population_size): + for i in range(population_size): + # Mutation + indices = [j for j in range(population_size) if j != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + # Selection: Evaluate the trial solution + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution if the new solution is better + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/PrecisionDynamicAdaptiveOptimizerV6.py b/nevergrad/optimization/lama/PrecisionDynamicAdaptiveOptimizerV6.py new file mode 100644 index 000000000..e0488c32f --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionDynamicAdaptiveOptimizerV6.py @@ -0,0 +1,62 @@ +import numpy as np + + +class PrecisionDynamicAdaptiveOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed to 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Initial temperature, slightly tuned for a better balance between exploration and exploitation + T_min = 0.0003 # Reduced minimum temperature for deep late-stage exploration + alpha = 0.93 # Adjusted cooling rate to prolong effective search duration + + # Mutation and crossover parameters are finely adjusted + F = 0.78 # Tuned Mutation factor for a subtle balance + CR = 0.85 # Adjusted Crossover probability to optimize genetic diversity + + # Setting a slightly increased population size to improve sampling + population_size = 85 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing dynamic mutation with a refined sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptive mutation factor with refined control + dynamic_F = ( + F + * np.exp(-0.08 * T) + * (0.75 + 0.25 * np.tanh(3.5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More sensitive acceptance criteria considering a refined temperature influence + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a cosine modulation slightly adjusted + adaptive_cooling = alpha - 0.009 * np.cos(2.8 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/PrecisionEnhancedDualStrategyOptimizer.py b/nevergrad/optimization/lama/PrecisionEnhancedDualStrategyOptimizer.py new file mode 100644 index 000000000..e1ff24ef7 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedDualStrategyOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class PrecisionEnhancedDualStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 150 # Increased population size for better exploration + mutation_factor = 0.8 # Higher mutation factor for more aggressive exploration + crossover_rate = 0.7 # Moderate crossover rate to balance exploration and exploitation + elite_size = 10 # Increased number of elite individuals to preserve good solutions + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Adaptive mutation considering overall population diversity + diversity_factor = np.std(population) / (np.abs(np.mean(population)) + 1e-6) + adaptive_mutation = mutation_factor * diversity_factor + + mutant = a + adaptive_mutation * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionEnhancedDynamicOptimizerV13.py b/nevergrad/optimization/lama/PrecisionEnhancedDynamicOptimizerV13.py new file mode 100644 index 000000000..2b62d1ac0 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedDynamicOptimizerV13.py @@ -0,0 +1,58 @@ +import numpy as np + + +class PrecisionEnhancedDynamicOptimizerV13: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality based on the problem description + self.lb = -5.0 # Lower boundary of the search space + self.ub = 5.0 # Upper boundary of the search space + + def __call__(self, func): + # Enhanced temperature parameters for deeper and more nuanced exploration + T = 1.2 # Higher initial temperature to promote extensive initial search + T_min = 0.0002 # Lower minimum temperature for fine-grained exploration at the end + alpha = 0.91 # Slower cooling rate to ensure a gradual transition and more evaluations + + # Mutation and crossover factors fine-tuned for robust evolutionary dynamics + F = 0.78 # Slightly increased mutation factor to induce robust exploratory mutations + CR = 0.88 # Slightly increased crossover probability to promote diversity + + population_size = 90 # Increased population size for better coverage of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation strategy using an advanced sigmoid function for adaptive mutation control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Implementing a more complex sigmoid function for mutation factor adaptation + dynamic_F = F * (0.6 + 0.4 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with temperature scaling adjusted for precision + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Introducing a more complex cooling strategy with sinusoidal and linear modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/PrecisionEnhancedSearch.py b/nevergrad/optimization/lama/PrecisionEnhancedSearch.py new file mode 100644 index 000000000..64602e32c --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedSearch.py @@ -0,0 +1,66 @@ +import numpy as np + + +class PrecisionEnhancedSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize with random point with smaller intervals to avoid large initial dispersion + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Adaptive parameters with precision enhancements + scale = 1.0 # Starting scale more conservative + min_scale = 0.001 # Fine precision for advanced refinement + adaptive_decay = 0.95 # Slower decay rate to sustain exploration + exploration_probability = 0.7 # Higher initial exploration + exploitation_boost = 0.1 # Boost exploitation gradually + + # Use a memory mechanism to remember past good positions + memory_size = 5 + memory = [current_point.copy() for _ in range(memory_size)] + memory_f = [current_f for _ in range(memory_size)] + + # Main optimization loop + for i in range(1, self.budget): + scale *= adaptive_decay + scale = max(min_scale, scale) + + # Decide between exploration and exploitation + if np.random.rand() < exploration_probability: + # Global exploration + candidate = np.random.uniform(-5.0, 5.0, self.dim) + else: + # Local exploitation around a remembered good position + memory_index = np.random.choice(range(memory_size)) + perturbation = np.random.normal(0, scale, self.dim) + candidate = memory[memory_index] + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + + candidate_f = func(candidate) + + # Update memory if better + max_memory_f = max(memory_f) + if candidate_f < max_memory_f: + worst_index = memory_f.index(max_memory_f) + memory[worst_index] = candidate + memory_f[worst_index] = candidate_f + + # Update global best + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Adjust exploration probability and boost exploitation + exploration_probability *= 1.0 - exploitation_boost + exploitation_boost += 0.002 # Increase exploitation boost gradually + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionEnhancedSpatialAdaptiveEvolver.py b/nevergrad/optimization/lama/PrecisionEnhancedSpatialAdaptiveEvolver.py new file mode 100644 index 000000000..008d918df --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedSpatialAdaptiveEvolver.py @@ -0,0 +1,88 @@ +import numpy as np + + +class PrecisionEnhancedSpatialAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + initial_step_size=1.0, + step_decay=0.95, + elite_ratio=0.05, + mutation_intensity=0.05, + local_search_prob=0.1, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, individual): + tweaks = np.random.normal(0, self.step_size * 0.1, self.dimension) + return np.clip(individual + tweaks, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * ( + self.step_decay**generation + ) # Dynamic step size for exploration adjustment + + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + # Perform local search on a subset of the new population + if np.random.rand() < self.local_search_prob: + local_search_indices = np.random.choice( + self.population_size, size=int(self.population_size * 0.2), replace=False + ) + for idx in local_search_indices: + candidate = self.local_search(new_population[idx]) + candidate_fitness = func(candidate) + if candidate_fitness < new_fitness[idx]: + new_population[idx] = candidate + new_fitness[idx] = candidate_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionEnhancedSpiralDifferentialClimberV4.py b/nevergrad/optimization/lama/PrecisionEnhancedSpiralDifferentialClimberV4.py new file mode 100644 index 000000000..161d9db8d --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedSpiralDifferentialClimberV4.py @@ -0,0 +1,72 @@ +import numpy as np + + +class PrecisionEnhancedSpiralDifferentialClimberV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population and parameters + population_size = 500 # Adjust population size for better performance + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Adjust parameters for spiral dynamics and evolutionary strategies + min_radius = 0.001 # Further refinement for local search + max_radius = 5.0 # Maintaining the boundary limit + radius_decay = 0.99 # Slower decay for extended influence of spiral movement + mutation_factor = 0.6 # Reduced mutation for controlled exploration + crossover_probability = 0.92 # Slightly increased for enhanced mixing + + # Advanced gradient-like search parameters + step_size = 0.01 # Reduced step size for high precision + gradient_steps = 20 # Increased steps for exhaustive local search + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential Evolution Strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral dynamics integration + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Advanced gradient-like search for fine-tuning + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation + f_trial = func(trial) + evaluations_left -= 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionEnhancedStrategicOptimizer.py b/nevergrad/optimization/lama/PrecisionEnhancedStrategicOptimizer.py new file mode 100644 index 000000000..c9f9016c6 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEnhancedStrategicOptimizer.py @@ -0,0 +1,66 @@ +import numpy as np + + +class PrecisionEnhancedStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 # Slightly reduced population for increased iterations within budget + mutation_factor = 0.9 # Refined mutation factor for a balance between exploration and exploitation + crossover_rate = 0.85 # Increased crossover rate to foster more diverse gene combinations + elite_size = 5 # Reduced elite size to promote diversity + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Binomial crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionEvolutionaryThermalOptimizer.py b/nevergrad/optimization/lama/PrecisionEvolutionaryThermalOptimizer.py new file mode 100644 index 000000000..f68e15733 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionEvolutionaryThermalOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class PrecisionEvolutionaryThermalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set from the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize advanced parameters for exploration and exploitation + T = 1.5 # Higher initial temperature for broader exploration at the start + T_min = 0.001 # Lower minimum temperature for extended fine-tuning + alpha = 0.98 # Slower cooling to allow more thorough examination at each temperature level + + # Updated mutation and crossover parameters for higher diversity + F = 0.8 # Increased mutation factor to encourage more pronounced variations + CR = 0.9 # Higher crossover probability to increase gene mixing + + population_size = 100 # Increased population size for better initial search space coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation and adaptive simulated annealing acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adjusted based on temperature and progression + dynamic_F = ( + F * (0.5 + 0.5 * np.cos(np.pi * T)) * (0.5 + 0.5 * (evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Simulated annealing acceptance criterion adjusted to account for temperature and fitness improvement + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.log(1 + np.abs(delta_fitness)))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cooling rate adaptation considering optimization stage + adaptive_cooling = alpha - 0.01 * (evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/PrecisionFocusedAdaptivePSO.py b/nevergrad/optimization/lama/PrecisionFocusedAdaptivePSO.py new file mode 100644 index 000000000..e88115fdd --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionFocusedAdaptivePSO.py @@ -0,0 +1,76 @@ +import numpy as np + + +class PrecisionFocusedAdaptivePSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.2, + social_weight=2.0, + elite_ratio=0.1, + mutation_intensity=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.elite_ratio = elite_ratio + self.mutation_intensity = mutation_intensity + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + elite_size = int(self.population_size * self.elite_ratio) + elite_indices = np.argsort(personal_best_scores)[:elite_size] + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = self.inertia_weight * velocities[i] + personal_component + social_component + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + if i in elite_indices: + # Elite particles undergo a focused mutation + mutation_indices = np.random.choice( + self.dim, size=int(np.ceil(self.dim * 0.5)), replace=False + ) + particles[i][mutation_indices] += np.random.normal( + 0, self.mutation_intensity, size=len(mutation_indices) + ) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/PrecisionGuidedEvolutionStrategy.py b/nevergrad/optimization/lama/PrecisionGuidedEvolutionStrategy.py new file mode 100644 index 000000000..5de205a9b --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionGuidedEvolutionStrategy.py @@ -0,0 +1,80 @@ +import numpy as np + + +class PrecisionGuidedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialize parameters + population_size = 50 + children_multiplier = 7 # Number of children per parent + mutation_strength = 0.5 # Initial mutation strength + success_threshold = 0.2 # Threshold for successful mutations + + # Create initial population and evaluate it + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Track the best solution found + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + evaluations = population_size + successful_mutations = 0 + attempted_mutations = 0 + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for parent in population: + for _ in range(children_multiplier): + child = parent + np.random.normal(0, mutation_strength, self.dim) + child = np.clip(child, self.lb, self.ub) + child_fitness = func(child) + + new_population.append(child) + new_fitness.append(child_fitness) + evaluations += 1 + + attempted_mutations += 1 + if child_fitness < func(parent): + successful_mutations += 1 + + if evaluations >= self.budget: + break + if evaluations >= self.budget: + break + + # Update the population with the best performing individuals + total_population = np.vstack((population, new_population)) + total_fitness = np.hstack((fitness, new_fitness)) + best_indices = np.argsort(total_fitness)[:population_size] + population = total_population[best_indices] + fitness = total_fitness[best_indices] + + # Update the best found solution + best_idx = np.argmin(fitness) + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Adapt mutation strength + if attempted_mutations > 0: + success_ratio = successful_mutations / attempted_mutations + if success_ratio > success_threshold: + mutation_strength /= 0.85 # Increase mutation strength + else: + mutation_strength *= 0.85 # Decrease mutation strength + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = PrecisionGuidedEvolutionStrategy(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/PrecisionGuidedEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/PrecisionGuidedEvolutionaryAlgorithm.py new file mode 100644 index 000000000..bf9da40d8 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionGuidedEvolutionaryAlgorithm.py @@ -0,0 +1,93 @@ +import numpy as np + + +class PrecisionGuidedEvolutionaryAlgorithm: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.mutation_factor = 0.8 # Initial mutation scaling factor + self.crossover_rate = 0.7 # Probability of crossover + self.adaptation_rate = 0.1 # Rate at which mutation factor is adapted + + def adapt_mutation_factor(self, success_rate): + """Adapt the mutation factor based on recent success rate""" + if success_rate > 0.2: + self.mutation_factor *= 1 + self.adaptation_rate + else: + self.mutation_factor *= 1 - self.adaptation_rate + self.mutation_factor = max(0.01, min(1.0, self.mutation_factor)) # Ensure within bounds + + def mutate(self, individual): + """Apply mutation with dynamic adaptation""" + mutation = np.random.normal(0, self.mutation_factor, self.dimension) + mutant = individual + mutation + return np.clip(mutant, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent1, parent2): + """Simulated binary crossover""" + if np.random.rand() < self.crossover_rate: + alpha = np.random.uniform(-0.5, 1.5, self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.bounds["lb"], self.bounds["ub"]) + return parent1 # No crossover occurred + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + + successful_mutations = 0 + evaluations = len(population) + + # Evolutionary loop + while evaluations < self.budget: + offspring = [] + for individual in population: + mutated = self.mutate(individual) + f_mutated = func(mutated) + evaluations += 1 + if f_mutated < func(individual): + offspring.append(mutated) + successful_mutations += 1 + else: + offspring.append(individual) + + if evaluations >= self.budget: + break + + offspring = np.array( + [ + self.crossover(offspring[i], offspring[np.random.randint(len(offspring))]) + for i in range(len(offspring)) + ] + ) + + # Evaluate offspring + offspring_fitness = np.array([func(ind) for ind in offspring]) + evaluations += len(offspring) + + # Select new population + combined = np.vstack((population, offspring)) + combined_fitness = np.concatenate((fitness, offspring_fitness)) + indices = np.argsort(combined_fitness)[: self.population_size] + population = combined[indices] + fitness = combined_fitness[indices] + + # Update best solution + if np.min(fitness) < best_fitness: + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + + # Adapt mutation factor based on success rate + self.adapt_mutation_factor(successful_mutations / len(offspring)) + successful_mutations = 0 # Reset for the next generation + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionGuidedQuantumStrategy.py b/nevergrad/optimization/lama/PrecisionGuidedQuantumStrategy.py new file mode 100644 index 000000000..038fd8c98 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionGuidedQuantumStrategy.py @@ -0,0 +1,77 @@ +import numpy as np + + +class PrecisionGuidedQuantumStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 300 # Increased population size for broader search + self.elite_size = 60 # Larger elite size to retain more high-quality solutions + self.crossover_probability = 0.9 # Slightly increased for better diversity + self.mutation_scale = 0.01 # More precise mutation scale for finer adjustments + self.quantum_mutation_scale = 0.05 # Lower scale for precise quantum leaps + self.quantum_probability = 0.2 # Higher frequency for quantum mutations + self.precision_boost_factor = 0.05 # Boost factor for precision in later stages + self.reactivity_factor = 0.02 # Lower reactivity factor for stable evolution + self.recombination_rate = 0.2 # Rate for recombining elite solutions + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + for i in range(num_offspring): + if np.random.rand() < self.crossover_probability: + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = elite[p1][:cross_point] + offspring[i][cross_point:] = elite[p2][cross_point:] + else: + offspring[i] = elite[np.random.choice(elite.shape[0])] + + # Apply deterministic mutation for precision + scale = self.mutation_scale + self.precision_boost_factor * np.log(remaining_budget + 1) + offspring[i] += np.random.normal(0, scale, self.dim) + + # Apply quantum mutation with controlled probability + if np.random.rand() < self.quantum_probability: + offspring[i] += np.random.normal(0, self.quantum_mutation_scale, self.dim) + + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + + return np.vstack([elite, offspring]) + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/PrecisionIncrementalEvolutionStrategy.py b/nevergrad/optimization/lama/PrecisionIncrementalEvolutionStrategy.py new file mode 100644 index 000000000..aac1ab6b8 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionIncrementalEvolutionStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class PrecisionIncrementalEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 100 + mutation_scale = 0.1 + elite_size = 10 + recombination_weight = 0.7 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + # Generate new candidates + for i in range(population_size): + parents_indices = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parents_indices] + + # Blend recombination + child = recombination_weight * parent1 + (1 - recombination_weight) * parent2 + + # Mutation + mutation = np.random.normal(0, mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + # Evaluate new candidate + child_fitness = func(child) + evaluations += 1 + + # Selection step + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Introduce elitism + if evaluations % 500 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + replace_indices = np.random.choice(population_size, elite_size, replace=False) + population[replace_indices] = elite_individuals + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionOptimizedEvolutionaryOptimizerV22.py b/nevergrad/optimization/lama/PrecisionOptimizedEvolutionaryOptimizerV22.py new file mode 100644 index 000000000..435b6884b --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionOptimizedEvolutionaryOptimizerV22.py @@ -0,0 +1,79 @@ +import numpy as np + + +class PrecisionOptimizedEvolutionaryOptimizerV22: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.6, + F_range=0.4, + CR=0.9, + elite_fraction=0.15, + mutation_strategy="precision_focused", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # 'precision_focused' targets very precise mutations + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "precision_focused": + # Increased focus on the best individual with a precision enhancement twist + if np.random.rand() < 0.85: # Adjusted focus probability for better exploitation + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base creation + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F, with a tighter range for more precise mutation + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range * 0.5 + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation of the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionRotationalClimbOptimizer.py b/nevergrad/optimization/lama/PrecisionRotationalClimbOptimizer.py new file mode 100644 index 000000000..51399aa23 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionRotationalClimbOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class PrecisionRotationalClimbOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 50 # Further reduce population size for increased precision + mutation_rate = 0.1 # Lower mutation rate to reduce disruption on fine-grained search + rotation_rate = 0.05 # Lower rotation rate to make smaller adjustments + blend_factor = 0.6 # Adjusted blend factor for crossover + + # Initialize population within the bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + for i in range(population_size): + # Select mutation indices ensuring unique entries + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation and rotational operation + direction = b - c + theta = rotation_rate * 2 * np.pi # Complete rotation consideration + rotation_matrix = np.eye(self.dim) + if self.dim >= 2: # Ensure rotation is only applied if dimensionality permits + np.fill_diagonal(rotation_matrix[:2, :2], np.cos(theta)) + rotation_matrix[0, 1], rotation_matrix[1, 0] = -np.sin(theta), np.sin(theta) + + rotated_vector = np.dot(rotation_matrix, direction) + mutant = a + mutation_rate * rotated_vector + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover with enhanced precision + trial = best_solution + blend_factor * (mutant - best_solution) + trial = np.clip(trial, self.lower_bound, self.upper_bound) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/PrecisionScaledEvolutionarySearch.py b/nevergrad/optimization/lama/PrecisionScaledEvolutionarySearch.py new file mode 100644 index 000000000..2e582de62 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionScaledEvolutionarySearch.py @@ -0,0 +1,99 @@ +import numpy as np + + +class PrecisionScaledEvolutionarySearch: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + initial_step_size=0.7, + step_decay=0.95, + elite_ratio=0.2, + mutation_scale=0.1, + local_search_probability=0.3, + refinement_steps=10, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_scale = mutation_scale + self.local_search_probability = local_search_probability + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, mutation_intensity): + mutation = np.random.normal(0, mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual, intensity): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, intensity, self.dimension), self.bounds[0], self.bounds[1] + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + mutation_intensity = self.step_size * (self.step_decay**generation) * self.mutation_scale + new_population = np.array( + [self.mutate(population[i], mutation_intensity) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_probability: + for idx in range(self.population_size): + if evaluations + self.refinement_steps > self.budget: + break + local_individual, local_fitness = self.local_search( + func, new_population[idx], mutation_intensity / 10 + ) + evaluations += self.refinement_steps + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionSpiralDifferentialOptimizerV6.py b/nevergrad/optimization/lama/PrecisionSpiralDifferentialOptimizerV6.py new file mode 100644 index 000000000..7ba6f7f37 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionSpiralDifferentialOptimizerV6.py @@ -0,0 +1,76 @@ +import numpy as np + + +class PrecisionSpiralDifferentialOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize parameters + population_size = 200 # Reduced to focus more on fine-grained steps + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Spiral and mutation parameters + min_radius = 0.0001 # Smaller radius for precision in local search + max_radius = 2.0 # Smaller initial radius to narrow search scope + radius_decay = 0.99 # Slower decay rate for prolonged exploration + mutation_factor = 0.8 # Increased mutation factor for diversified exploratory steps + crossover_probability = 0.9 # Increased probability to maintain diversity + + # Gradient refinement steps + step_size = 0.002 # Precision step size + gradient_steps = 100 # Increased gradient steps for deeper local optimization + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential evolution mutation + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover operation + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral dynamic integration + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Gradient descent-like local search with reduced steps and size + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation of the new solution + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + # Population update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionTunedCrossoverElitistStrategyV11.py b/nevergrad/optimization/lama/PrecisionTunedCrossoverElitistStrategyV11.py new file mode 100644 index 000000000..fb8f8d2ea --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionTunedCrossoverElitistStrategyV11.py @@ -0,0 +1,80 @@ +import numpy as np + + +class PrecisionTunedCrossoverElitistStrategyV11: + def __init__( + self, + budget, + dimension=5, + population_size=350, + elite_fraction=0.15, + mutation_intensity=0.01, + crossover_rate=0.9, + adaptive_intensity=0.85, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_intensity = adaptive_intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform weighted crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.weighted_recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.adaptive_mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutate(self, individual, evaluations): + # Adaptive mutation intensity based on normalized evaluations + normalized_time = evaluations / self.budget + intensity = self.mutation_intensity * np.exp(-normalized_time * 20) + return individual + np.random.normal(0, intensity, self.dimension) + + def weighted_recombine(self, parent1, parent2, evaluations): + # Blend between parents with adaptive depth based on evaluations + normalized_time = evaluations / self.budget + alpha = ( + np.random.uniform(0.3, 0.7) * (1 - normalized_time) + normalized_time * self.adaptive_intensity + ) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/PrecisionTunedEvolver.py b/nevergrad/optimization/lama/PrecisionTunedEvolver.py new file mode 100644 index 000000000..61c0f28aa --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionTunedEvolver.py @@ -0,0 +1,74 @@ +import numpy as np + + +class PrecisionTunedEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=200, + elite_fraction=0.05, + adaptive_mutation=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.adaptive_mutation = adaptive_mutation + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation_strength = (self.upper_bound - self.lower_bound) / np.sqrt(self.budget) + mutation = np.random.normal(0, mutation_strength, self.dimension) + individual = np.clip(individual + mutation, self.lower_bound, self.upper_bound) + return individual + + def crossover(self, parent1, parent2): + alpha = np.random.uniform(0.3, 0.7) + child = alpha * parent1 + (1 - alpha) * parent2 + return child + + def reproduce(self, elites, elite_fitness): + new_population = np.empty((self.population_size, self.dimension)) + for i in range(self.population_size): + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.crossover(elites[parents[0]], elites[parents[1]]) + if self.adaptive_mutation: + child = self.mutate(child) + new_population[i] = child + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/PrecisionTunedHybridSearch.py b/nevergrad/optimization/lama/PrecisionTunedHybridSearch.py new file mode 100644 index 000000000..5d1be0f97 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionTunedHybridSearch.py @@ -0,0 +1,73 @@ +import numpy as np + + +class PrecisionTunedHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 250 + elite_size = int(0.15 * population_size) + mutation_rate = 0.25 + mutation_scale = lambda t: 0.1 * np.exp(-0.0003 * t) + crossover_rate = 0.90 + + local_search_prob_base = 0.1 + local_search_decay = 0.0002 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + local_search_prob = local_search_prob_base * np.exp(-local_search_decay * evaluations) + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = 0.03 + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/PrecisionTunedPSO.py b/nevergrad/optimization/lama/PrecisionTunedPSO.py new file mode 100644 index 000000000..0efe78f88 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionTunedPSO.py @@ -0,0 +1,76 @@ +import numpy as np + + +class PrecisionTunedPSO: + def __init__( + self, + budget=10000, + population_size=100, + omega_initial=0.95, + omega_final=0.35, + phi_p=0.15, + phi_g=0.75, + critical_depth=10, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal preference influence + self.phi_g = phi_g # Global preference influence + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.critical_depth = critical_depth # Depth of performance evaluation for adaptive inertia + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.critical_depth :] + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + # Precision-tuned inertia adaptation + if len(scores) > 1 and np.std(scores) < 0.01: + return max(self.omega_final, self.omega_initial - (evaluation_counter / self.budget) * 1.5) + else: + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) diff --git a/nevergrad/optimization/lama/PrecisionTunedQuantumHarmonicFeedbackOptimizer.py b/nevergrad/optimization/lama/PrecisionTunedQuantumHarmonicFeedbackOptimizer.py new file mode 100644 index 000000000..6e626f466 --- /dev/null +++ b/nevergrad/optimization/lama/PrecisionTunedQuantumHarmonicFeedbackOptimizer.py @@ -0,0 +1,84 @@ +import numpy as np + + +class PrecisionTunedQuantumHarmonicFeedbackOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=300, + elite_rate=0.25, + resonance_factor=0.05, + mutation_scale=0.02, + harmonic_frequency=0.2, + feedback_intensity=0.2, + damping_factor=0.98, + mutation_decay=0.995, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.prev_best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.prev_best_fitness = self.best_fitness + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and perform selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # More precise adjustments + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + + if self.best_fitness >= self.prev_best_fitness: + feedback_adjustment = self.feedback_intensity * np.random.uniform(-1, 1, self.dim) + else: + feedback_adjustment = 0 + + self.population[idx] = ( + elite_sample + + (harmonic_influence + quantum_resonance + mutation_effect + feedback_adjustment) + * self.damping_factor + ) + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + # Gradually decrease mutation scale to enhance precision over iterations + self.mutation_scale *= self.mutation_decay + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/ProgressiveAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ProgressiveAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..c7c8b7ebc --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveAdaptiveDifferentialEvolution.py @@ -0,0 +1,57 @@ +import numpy as np + + +class ProgressiveAdaptiveDifferentialEvolution: + def __init__(self, budget, dim=5, pop_size=100, F_base=0.5, CR=0.9): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.F_base = F_base # Base mutation factor + self.CR = CR # Crossover rate + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, population, idx, n_evals): + indices = [i for i in range(self.pop_size) if i != idx] + a, b, c = np.random.choice(indices, 3, replace=False) + # Dynamically adjust the mutation factor based on the stage of optimization + stage_F = self.F_base * (1 - (n_evals / self.budget)) + mutant = np.clip( + population[a] + stage_F * (population[b] - population[c]), self.bounds[0], self.bounds[1] + ) + return mutant + + def crossover(self, target, mutant): + # Trial vector generation with potentially varying CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, f_values, trial, trial_f, trial_idx): + if trial_f < f_values[trial_idx]: + population[trial_idx] = trial + f_values[trial_idx] = trial_f + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + + while n_evals < self.budget: + for idx in range(self.pop_size): + mutant = self.mutate(population, idx, n_evals) + trial = self.crossover(population[idx], mutant) + trial_f = func(trial) + n_evals += 1 + self.select(population, f_values, trial, trial_f, idx) + if n_evals >= self.budget: + break + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ProgressiveAdaptiveGlobalLocalSearch.py b/nevergrad/optimization/lama/ProgressiveAdaptiveGlobalLocalSearch.py new file mode 100644 index 000000000..7d2ec45f2 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveAdaptiveGlobalLocalSearch.py @@ -0,0 +1,81 @@ +import numpy as np + + +class ProgressiveAdaptiveGlobalLocalSearch: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=300): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.85 # Slightly less global influence to promote better local refinement + self.local_influence = 0.15 # Increased local influence for enhanced exploitation + self.vel_scale = 0.05 # Maintaining fine velocity scale for progressive adjustments + self.learning_rate = 0.7 # Consistent learning rate for balance + self.adaptive_rate = 0.03 # Increased adaptive rate for better control over convergence dynamics + self.exploration_phase = 0.3 # Percentage of budget dedicated to more explorative behavior + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = ( + np.random.uniform(-1, 1, (self.particles, self.dimension)) + * (self.bounds[1] - self.bounds[0]) + * self.vel_scale + ) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + exploration_budget = int(self.budget * self.exploration_phase) + + while evaluations < self.budget: + for i in range(self.particles): + # Adjusting global and local influence based on phase of optimization + if evaluations < exploration_budget: + current_global_influence = self.global_influence + current_local_influence = self.local_influence + else: + # Increase local influence as optimization progresses + current_global_influence = self.global_influence * (1 - (evaluations / self.budget)) + current_local_influence = self.local_influence + (evaluations / self.budget) * 0.25 + + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + current_global_influence * r1 * (personal_best_positions[i] - positions[i]) + + current_local_influence * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/ProgressiveCohortDiversityOptimization.py b/nevergrad/optimization/lama/ProgressiveCohortDiversityOptimization.py new file mode 100644 index 000000000..56e2b8751 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveCohortDiversityOptimization.py @@ -0,0 +1,70 @@ +import numpy as np + + +class ProgressiveCohortDiversityOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_factor=0.1, + recombination_prob=0.9, + adaptation_intensity=0.98, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_factor = mutation_factor + self.recombination_prob = recombination_prob + self.adaptation_intensity = adaptation_intensity + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + mean_elite = np.mean(population[elite_indices], axis=0) + + for i in range(self.population_size): + if np.random.rand() < self.recombination_prob: + # Recombination from elite members + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices[0]], population[parents_indices[1]] + alpha = np.random.rand() + child = alpha * parent1 + (1 - alpha) * parent2 + else: + # Mutation based on adaptive vector between mean elite and a random point in the population + random_member = population[np.random.randint(0, self.population_size)] + mutation_direction = mean_elite - random_member + child = random_member + self.mutation_factor * mutation_direction + + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population[i] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adapt mutation factor dynamically to reduce as budget is exhausted + self.mutation_factor *= self.adaptation_intensity + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ProgressiveDimensionalOptimizer.py b/nevergrad/optimization/lama/ProgressiveDimensionalOptimizer.py new file mode 100644 index 000000000..4e6f64982 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveDimensionalOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class ProgressiveDimensionalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial parameter setup + population_size = 100 + mutation_factor = 0.9 # High initial mutation rate for broad exploration + crossover_rate = 0.7 + elite_size = 5 + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + learning_rate_decrease = self.budget / 10 # Adjust mutation and crossover rate over these intervals + + # Optimization loop + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation strategy adapted from DE/rand/1/bin + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + best_index = i + + # Periodic adaptation of mutation factor and crossover rate + if evaluations % learning_rate_decrease == 0: + mutation_factor = max(0.5, mutation_factor * 0.95) # Decrement mutation factor slowly + crossover_rate = min(1.0, crossover_rate + 0.05) # Incrementally increase crossover rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ProgressiveEvolutionaryFireworkAlgorithm.py b/nevergrad/optimization/lama/ProgressiveEvolutionaryFireworkAlgorithm.py new file mode 100644 index 000000000..0509f41c5 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveEvolutionaryFireworkAlgorithm.py @@ -0,0 +1,74 @@ +import numpy as np + + +class ProgressiveEvolutionaryFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=30, n_sparks=10, alpha=0.1, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + self.progressive_sigma = np.linspace(initial_sigma, 0.1, self.budget) # Linearly decreasing sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + self.sigma = self.progressive_sigma[it] + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/ProgressiveHybridAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/ProgressiveHybridAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..4a1398f8f --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveHybridAdaptiveDifferentialEvolution.py @@ -0,0 +1,50 @@ +import numpy as np + + +class ProgressiveHybridAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 200 # Increasing population size for more diverse initial sampling + self.base_F = 0.5 # Base differential weight + self.CR = 0.9 # Crossover probability + self.F_increment = 0.1 # Increment factor for differential weight + + def __call__(self, func): + # Initialize population and fitness array + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop optimized within the given budget + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Progressive increase of F based on the half-life concept + F_dynamic = self.base_F + self.F_increment * (1 - np.exp(-2 * iteration / n_iterations)) + for i in range(self.pop_size): + # Mutation with DE/rand/1/bin strategy and progressive F + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F_dynamic * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Keep within bounds + + # Binomial Crossover with guaranteed change + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/ProgressiveParticleSwarmOptimization.py b/nevergrad/optimization/lama/ProgressiveParticleSwarmOptimization.py new file mode 100644 index 000000000..7b8be88bf --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveParticleSwarmOptimization.py @@ -0,0 +1,61 @@ +import numpy as np + + +class ProgressiveParticleSwarmOptimization: + def __init__(self, budget=10000, population_size=40, omega=0.5, phi_p=0.2, phi_g=0.5): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia weight + self.phi_p = phi_p # Personal learning coefficient + self.phi_g = phi_g # Global learning coefficient + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize population + pop = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = pop.copy() + fitness = np.array([func(ind) for ind in pop]) + personal_best_fitness = fitness.copy() + + # Initial global best + global_best_idx = np.argmin(fitness) + global_best = pop[global_best_idx] + + evaluations = self.population_size + + # Main loop + while evaluations < self.budget: + r_p = np.random.uniform(0, 1, (self.population_size, self.dim)) + r_g = np.random.uniform(0, 1, (self.population_size, self.dim)) + + # Update velocity and positions + velocity = ( + self.omega * velocity + + self.phi_p * r_p * (personal_best - pop) + + self.phi_g * r_g * (global_best - pop) + ) + pop = np.clip(pop + velocity, lb, ub) + + # Evaluate new positions + for i in range(self.population_size): + current_fitness = func(pop[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best[i] = pop[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < fitness[global_best_idx]: + global_best_idx = i + global_best = pop[i] + + # Adaptive inertia weight + if evaluations % (self.budget // 5) == 0: + progress = evaluations / self.budget + self.omega = max(0.4, self.omega * (1 - progress)) + self.phi_p = min(0.3, self.phi_p + progress * 0.1) + self.phi_g = max(0.3, self.phi_g - progress * 0.1) + + return fitness[global_best_idx], global_best diff --git a/nevergrad/optimization/lama/ProgressivePopulationRefinementStrategy.py b/nevergrad/optimization/lama/ProgressivePopulationRefinementStrategy.py new file mode 100644 index 000000000..4993fde73 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressivePopulationRefinementStrategy.py @@ -0,0 +1,73 @@ +import numpy as np + + +class ProgressivePopulationRefinementStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initialization parameters + initial_population_size = 150 + final_population_size = 50 + mutation_factor = 0.9 + crossover_prob = 0.7 + reduction_step = max( + 1, (initial_population_size - final_population_size) // (self.budget // initial_population_size) + ) + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (initial_population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Track the best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + evaluations = initial_population_size + current_population_size = initial_population_size + + while evaluations < self.budget: + for i in range(current_population_size): + if evaluations >= self.budget: + break + + # Differential evolution mutation and crossover + indices = [idx for idx in range(current_population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Accept or reject the new solution + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial.copy() + + # Reduce population size progressively + if current_population_size > final_population_size: + survivors_idx = np.argsort(fitness)[ + : max(final_population_size, current_population_size - reduction_step) + ] + population = population[survivors_idx] + fitness = fitness[survivors_idx] + current_population_size = len(survivors_idx) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = ProgressivePopulationRefinementStrategy(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/ProgressiveQuorumEvolutionStrategy.py b/nevergrad/optimization/lama/ProgressiveQuorumEvolutionStrategy.py new file mode 100644 index 000000000..7d26687b6 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveQuorumEvolutionStrategy.py @@ -0,0 +1,54 @@ +import numpy as np + + +class ProgressiveQuorumEvolutionStrategy: + def __init__( + self, budget, dimension=5, population_size=100, elite_fraction=0.1, mutation_scale=0.1, quorum_size=5 + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_scale = mutation_scale + self.quorum_size = quorum_size + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Create new generation with quorum-based selection and mutation + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select quorum randomly and choose the best among them + quorum_indices = np.random.choice(self.population_size, self.quorum_size, replace=False) + elite_idx = quorum_indices[np.argmin(fitness[quorum_indices])] + elite = population[elite_idx] + + # Mutation based on Gaussian noise + mutation = np.random.normal(0, self.mutation_scale, self.dimension) + child = np.clip(elite + mutation, -5.0, 5.0) + + # Evaluate new candidate + child_fitness = func(child) + evaluations += 1 + + # Store the new candidate + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/ProgressiveRefinementSearch.py b/nevergrad/optimization/lama/ProgressiveRefinementSearch.py new file mode 100644 index 000000000..d7cc5f774 --- /dev/null +++ b/nevergrad/optimization/lama/ProgressiveRefinementSearch.py @@ -0,0 +1,59 @@ +import numpy as np + + +class ProgressiveRefinementSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initializations + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Define scales for search + max_scale = 2.0 + min_scale = 0.001 + scale = max_scale + scale_decay = 0.98 # Gradually decrease scale + + # Exploration and exploitation configurations + exploration_probability = 0.3 # Initial probability of exploration + exploit_prob_growth = 0.005 # Growth rate of the exploitation probability + + for i in range(1, self.budget): + if np.random.rand() < exploration_probability: + # Exploration with random point within the search space + candidate = np.random.uniform(-5.0, 5.0, self.dim) + else: + # Exploitation by perturbing the current best point + perturbation = np.random.normal(0, scale, self.dim) + candidate = current_point + perturbation + candidate = np.clip(candidate, -5.0, 5.0) # Ensure within bounds + + candidate_f = func(candidate) + + # Update current point if the candidate is better + if candidate_f < current_f: + current_point = candidate + current_f = candidate_f + # Update best found solution + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Adjust exploration-exploitation balance + exploration_probability = max(0, exploration_probability - exploit_prob_growth) + exploration_probability = min(1, exploration_probability) + + # Reduce scale to refine search over time + scale *= scale_decay + scale = max(min_scale, scale) # Avoid scale becoming too small + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSO.py b/nevergrad/optimization/lama/QAPSO.py new file mode 100644 index 000000000..712c8eb33 --- /dev/null +++ b/nevergrad/optimization/lama/QAPSO.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QAPSO: + def __init__( + self, + budget=1000, + num_particles=30, + inertia_weight=0.5, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + # Initialize particles positions and velocities + particles_pos = np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for _ in range(self.budget): + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + self.inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + # Acceleration towards global best + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + particles_pos[i] += particles_vel[i] + + # Boundary enforcement + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Update personal best + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSOAIR.py b/nevergrad/optimization/lama/QAPSOAIR.py new file mode 100644 index 000000000..16edc29b3 --- /dev/null +++ b/nevergrad/optimization/lama/QAPSOAIR.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QAPSOAIR: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + particles_pos[i] += particles_vel[i] + + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSOAIRVC.py b/nevergrad/optimization/lama/QAPSOAIRVC.py new file mode 100644 index 000000000..b511c8c03 --- /dev/null +++ b/nevergrad/optimization/lama/QAPSOAIRVC.py @@ -0,0 +1,89 @@ +import numpy as np + + +class QAPSOAIRVC: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + velocity_clamp=0.5, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.velocity_clamp = velocity_clamp + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -self.velocity_clamp, self.velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSOAIRVCHR.py b/nevergrad/optimization/lama/QAPSOAIRVCHR.py new file mode 100644 index 000000000..5adff1931 --- /dev/null +++ b/nevergrad/optimization/lama/QAPSOAIRVCHR.py @@ -0,0 +1,91 @@ +import numpy as np + + +class QAPSOAIRVCHR: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + velocity_clamp=0.5, + hybrid_restart_interval=100, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.velocity_clamp = velocity_clamp + self.hybrid_restart_interval = hybrid_restart_interval + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -self.velocity_clamp, self.velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold or t % self.hybrid_restart_interval == 0: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSOAIW.py b/nevergrad/optimization/lama/QAPSOAIW.py new file mode 100644 index 000000000..197cd6e61 --- /dev/null +++ b/nevergrad/optimization/lama/QAPSOAIW.py @@ -0,0 +1,60 @@ +import numpy as np + + +class QAPSOAIW: + def __init__( + self, budget=1000, num_particles=30, cognitive_weight=1.5, social_weight=2.0, acceleration_coeff=1.1 + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + # Initialize particles positions and velocities + particles_pos = np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) # Adaptive inertia weight + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + # Acceleration towards global best + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + particles_pos[i] += particles_vel[i] + + # Boundary enforcement + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Update personal best + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QAPSOAIWRR.py b/nevergrad/optimization/lama/QAPSOAIWRR.py new file mode 100644 index 000000000..c6457216e --- /dev/null +++ b/nevergrad/optimization/lama/QAPSOAIWRR.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QAPSOAIWRR: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + particles_pos[i] += particles_vel[i] + + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < 0.1: # Random restart with 10% probability + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QPSO.py b/nevergrad/optimization/lama/QPSO.py new file mode 100644 index 000000000..f047de999 --- /dev/null +++ b/nevergrad/optimization/lama/QPSO.py @@ -0,0 +1,53 @@ +import numpy as np + + +class QPSO: + def __init__( + self, budget=1000, num_particles=30, inertia_weight=0.5, cognitive_weight=1.5, social_weight=2.0 + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + # Initialize particles positions and velocities + particles_pos = np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for _ in range(self.budget): + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + particles_vel[i] = ( + self.inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + particles_pos[i] += particles_vel[i] + + # Boundary enforcement + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + # Update personal best + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAcceleratedEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumAcceleratedEvolutionStrategy.py new file mode 100644 index 000000000..43af61d8d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAcceleratedEvolutionStrategy.py @@ -0,0 +1,61 @@ +import numpy as np + + +class QuantumAcceleratedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 150 # Increased population size for better exploration + self.sigma_initial = 1.0 # Initial standard deviation for the mutation + self.F_min = 0.1 # Minimum differential weight + self.F_max = 0.9 # Maximum differential weight + self.CR = 0.7 # Crossover probability + self.q_impact = 0.1 # Quantum impact in mutation + self.sigma_decay = 0.999 # Decay rate for sigma + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + # Adapt sigma + sigma *= self.sigma_decay + + # Adaptive differential weight based on iteration + F = self.F_min + (self.F_max - self.F_min) * np.cos( + np.pi * iteration / (self.budget / self.pop_size) + ) + + # Generate new trial vectors + for i in range(self.pop_size): + # Mutation using differential evolution strategy and quantum impact + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + F * (a - b) + sigma * np.random.randn(self.dim) + mutant += self.q_impact * np.random.standard_cauchy(self.dim) # Quantum influenced mutation + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Evaluate + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumAcceleratedNesterovOptimizer.py b/nevergrad/optimization/lama/QuantumAcceleratedNesterovOptimizer.py new file mode 100644 index 000000000..048ce905a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAcceleratedNesterovOptimizer.py @@ -0,0 +1,51 @@ +import numpy as np + + +class QuantumAcceleratedNesterovOptimizer: + def __init__(self, budget, dim=5, learning_rate=0.05, momentum=0.95, quantum_influence_rate=0.1): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.position = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + self.velocity = np.zeros(self.dim) + self.best_position = np.copy(self.position) + self.best_fitness = np.inf + + def evaluate(self, func, position): + return func(position) + + def update_position(self): + # Predict future position using current velocity (Nesterov acceleration) + future_position = self.position + self.momentum * self.velocity + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + + # Update velocity with noise as a surrogate gradient and include Nesterov correction + noise = np.random.normal(0, 1, self.dim) + self.velocity = self.momentum * self.velocity - self.learning_rate * noise + self.position += self.velocity + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def quantum_influence(self): + if np.random.rand() < self.quantum_influence_rate: + self.position += np.random.normal(0, 0.1 * (self.upper_bound - self.lower_bound), self.dim) + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + + for _ in range(self.budget): + self.update_position() + self.quantum_influence() + fitness = self.evaluate(func, self.position) + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = np.copy(self.position) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/QuantumAcceleratedNesterovPlusOptimizer.py b/nevergrad/optimization/lama/QuantumAcceleratedNesterovPlusOptimizer.py new file mode 100644 index 000000000..bef0b4a5d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAcceleratedNesterovPlusOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class QuantumAcceleratedNesterovPlusOptimizer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.9, + quantum_influence_rate=0.05, + adaptive_lr_factor=0.99, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.position = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + self.velocity = np.zeros(self.dim) + self.best_position = np.copy(self.position) + self.best_fitness = np.inf + + def evaluate(self, func, position): + return func(position) + + def update_position(self): + # Predict future position using current velocity (Nesterov acceleration) + future_position = self.position + self.momentum * self.velocity + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + + # Update velocity with noise as a surrogate gradient and include Nesterov correction + noise = np.random.normal(0, 1, self.dim) + self.velocity = self.momentum * self.velocity - self.learning_rate * noise + self.position += self.velocity + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + self.learning_rate *= self.adaptive_lr_factor # Adaptive learning rate + + def quantum_influence(self): + if np.random.rand() < self.quantum_influence_rate: + quantum_jump = np.random.normal(0, 0.05 * (self.upper_bound - self.lower_bound), self.dim) + self.position += quantum_jump + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + + for _ in range(self.budget): + self.update_position() + self.quantum_influence() + fitness = self.evaluate(func, self.position) + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = np.copy(self.position) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV5.py b/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV5.py new file mode 100644 index 000000000..a5aef2281 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV5.py @@ -0,0 +1,89 @@ +import numpy as np + + +class QuantumAdaptiveCognitionOptimizerV5: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.7, + cognitive_coefficient=2.5, + social_coefficient=2.5, + inertia_decay=0.99, + quantum_jump_rate=0.3, + quantum_scale=0.15, + adaptive_scale_factor=0.3, + multimodal_enhancement=True, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.multimodal_enhancement = multimodal_enhancement + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Quantum jump with refined adaptive scaling based on the global best score + quantum_deviation = np.random.normal( + 0, + self.quantum_scale * (1 + self.adaptive_scale_factor * np.tanh(global_best_score)), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Velocity update with increased coefficients for cognitive and social terms + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + self.inertia_weight *= ( + self.inertia_decay + ) # Gradual reduction in inertia weight to favor exploitation over time + + # Multimodal enhancement with dynamic reinitialization + if self.multimodal_enhancement and evaluations % 500 == 0: + idx_to_reset = np.random.choice( + np.arange(self.population_size), size=int(0.2 * self.population_size), replace=False + ) + particles[idx_to_reset] = np.random.uniform(self.lb, self.ub, (len(idx_to_reset), self.dim)) + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV6.py b/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV6.py new file mode 100644 index 000000000..4b3dcf1c2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveCognitionOptimizerV6.py @@ -0,0 +1,89 @@ +import numpy as np + + +class QuantumAdaptiveCognitionOptimizerV6: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.8, + cognitive_coefficient=2.8, + social_coefficient=2.8, + inertia_decay=0.995, + quantum_jump_rate=0.25, + quantum_scale=0.12, + adaptive_scale_factor=0.4, + multimodal_enhancement=True, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.multimodal_enhancement = multimodal_enhancement + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Quantum jump with refined adaptive scaling based on the global best score + quantum_deviation = np.random.normal( + 0, + self.quantum_scale * (1 + self.adaptive_scale_factor * np.tanh(global_best_score)), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Velocity update with increased coefficients for cognitive and social terms + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + self.inertia_weight *= ( + self.inertia_decay + ) # Gradual reduction in inertia weight to favor exploitation over time + + # Multimodal enhancement with dynamic reinitialization + if self.multimodal_enhancement and evaluations % 500 == 0: + idx_to_reset = np.random.choice( + np.arange(self.population_size), size=int(0.25 * self.population_size), replace=False + ) + particles[idx_to_reset] = np.random.uniform(self.lb, self.ub, (len(idx_to_reset), self.dim)) + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumAdaptiveConvergenceOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveConvergenceOptimizer.py new file mode 100644 index 000000000..699140f07 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveConvergenceOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class QuantumAdaptiveConvergenceOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 80 # Adjusted population size for more diversity + self.F_base = 0.5 # Base differential weight + self.CR_base = 0.9 # Base crossover probability + self.q_influence_base = 0.05 # Base quantum influence + self.q_influence_max = 0.25 # Max quantum influence + self.adaptation_rate = 0.01 # Rate of parameter adaptation + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + F = self.F_base + CR = self.CR_base + q_influence = self.q_influence_base + + # Main optimization loop + for iteration in range(int(self.budget / self.pop_size)): + for i in range(self.pop_size): + # Adaptively adjust F and CR based on iteration progress + F = self.F_base + (0.8 - self.F_base) * (iteration / (self.budget / self.pop_size)) + CR = self.CR_base - (self.CR_base - 0.5) * (iteration / (self.budget / self.pop_size)) + q_influence = self.q_influence_base + (self.q_influence_max - self.q_influence_base) * np.sin( + np.pi * iteration / (self.budget / self.pop_size) + ) + + # Quantum-driven mutation + if np.random.rand() < q_influence: + mutation = best_ind + np.random.normal(0, 1, self.dim) * ( + 0.1 + 0.2 * iteration / (self.budget / self.pop_size) + ) + else: + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutation = a + F * (b - c) + + mutation = np.clip(mutation, -5.0, 5.0) + + # Binomial crossover + trial = np.where(np.random.rand(self.dim) < CR, mutation, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumAdaptiveCrossoverRefinement.py b/nevergrad/optimization/lama/QuantumAdaptiveCrossoverRefinement.py new file mode 100644 index 000000000..988acef87 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveCrossoverRefinement.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumAdaptiveCrossoverRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 400 # Further increased population size for diversity + mutation_factor = 0.85 # More aggressive initial mutation factor + crossover_prob = 0.75 # Higher initial crossover probability + adaptivity_rate = 0.05 # Slower change rate for better stability + + # Initialize population and fitness + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + + # Refinement: Increase elite size to 30% for better quality samples + elite_size = int(population_size * 0.3) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Selecting parents adaptively with preference to elites + if np.random.rand() < 0.7: # Higher chance to select from elite + parents_indices = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parents_indices] + else: + parent1, parent2 = population[np.random.choice(range(population_size), 2, replace=False)] + + # Crossover + mask = np.random.rand(self.dim) < crossover_prob + child = np.where(mask, parent1, parent2) + + # Mutation with dynamic adaptivity based on trigonometric modulation + dynamic_mutation = mutation_factor * (1 + np.sin(2 * np.pi * current_budget / self.budget)) + quantum_noise = np.random.randn(self.dim) * dynamic_mutation + child += quantum_noise + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive mutation and crossover adjustments using a slower rate + mutation_factor *= 1 - adaptivity_rate + crossover_prob = np.clip(crossover_prob + adaptivity_rate * (np.random.rand() - 0.5), 0.5, 1) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py b/nevergrad/optimization/lama/QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py new file mode 100644 index 000000000..a77ada0f4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory.py @@ -0,0 +1,137 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def dynamic_restart(self, population, fitness, func): + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def differential_memory_update(self, population): + if len(self.memory) >= self.elite_size: + for i in range(self.elite_size): + idx = np.random.randint(len(self.memory)) + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, self.memory[idx][0]) + self.memory[idx] = (trial, np.inf) # Reset fitness as it will be recalculated + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + self.differential_memory_update(population) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..4afd57d0b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolution.py @@ -0,0 +1,91 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + # Parameters for DE + self.population_size = 100 + self.F_min = 0.5 + self.F_max = 1.0 + self.CR_min = 0.1 + self.CR_max = 0.9 + + # Quantum Inspired Parameters + self.alpha = 0.75 + self.beta = 0.25 + + # Stagnation control + self.stagnation_threshold = 10 + self.stagnation_counter = 0 + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + evaluations = self.population_size + best_fitness_history = [self.f_opt] + + while evaluations < self.budget: + for i in range(self.population_size): + # Select three random vectors a, b, c from population + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Adaptive Mutation and Crossover + F_adaptive = self.F_min + np.random.rand() * (self.F_max - self.F_min) + CR_adaptive = self.CR_min + np.random.rand() * (self.CR_max - self.CR_min) + + mutant_vector = np.clip(a + F_adaptive * (b - c), self.lb, self.ub) + + trial_vector = np.copy(population[i]) + for j in range(self.dim): + if np.random.rand() < CR_adaptive: + trial_vector[j] = mutant_vector[j] + + # Quantum Inspired Adjustment + quantum_perturbation = np.random.normal(0, 1, self.dim) * ( + self.alpha * (self.x_opt - population[i]) + self.beta * (population[i] - self.lb) + ) + trial_vector = np.clip(trial_vector + quantum_perturbation, self.lb, self.ub) + + f_candidate = func(trial_vector) + evaluations += 1 + + if f_candidate < fitness[i]: + population[i] = trial_vector + fitness[i] = f_candidate + + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = trial_vector + self.stagnation_counter = 0 + else: + self.stagnation_counter += 1 + else: + self.stagnation_counter += 1 + + if evaluations >= self.budget: + break + + # Store best fitness + best_fitness_history.append(self.f_opt) + + # Adaptive Parameter Adjustment based on Stagnation Counter + if self.stagnation_counter > self.stagnation_threshold: + self.F_max = min(1.0, self.F_max + 0.1) + self.CR_max = min(1.0, self.CR_max + 0.1) + self.stagnation_counter = 0 + else: + self.F_max = max(self.F_min, self.F_max - 0.1) + self.CR_max = max(self.CR_min, self.CR_max - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV3.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV3.py new file mode 100644 index 000000000..ac39b13c3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV3.py @@ -0,0 +1,113 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialEvolutionV3: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-inspired mutation strategy with elite guidance + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Quantum-inspired restart mechanism + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on distance to the best solution + distances = np.linalg.norm(population - self.x_opt, axis=1) + reinit_indices = distances.argsort()[-int(self.population_size / 2) :] + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV4.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV4.py new file mode 100644 index 000000000..9f5515d64 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialEvolutionV4.py @@ -0,0 +1,113 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialEvolutionV4: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-inspired mutation strategy with elite guidance + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Quantum-inspired restart mechanism + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on distance to the best solution + distances = np.linalg.norm(population - self.x_opt, axis=1) + reinit_indices = distances.argsort()[-int(self.population_size / 2) :] + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV10.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV10.py new file mode 100644 index 000000000..3d46d994e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV10.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialStrategyV10: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Problem dimension + self.lb = -5.0 * np.ones(self.dim) # Lower bounds + self.ub = 5.0 * np.ones(self.dim) # Upper bounds + + def __call__(self, func): + population_size = 1000 + elite_size = 100 + evaluations = 0 + mutation_factor = 0.85 + crossover_probability = 0.9 + quantum_probability = 0.1 + adaptive_scaling_factor = lambda t: 0.1 * np.exp(-0.05 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution step + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Differential mutation considering best and random individual + mutant = x1 + mutation_factor * ((self.x_opt - x1) + (x2 - x3)) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV11.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV11.py new file mode 100644 index 000000000..1ecfc8379 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV11.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialStrategyV11: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Problem dimension + self.lb = -5.0 * np.ones(self.dim) # Lower bounds + self.ub = 5.0 * np.ones(self.dim) # Upper bounds + + def __call__(self, func): + population_size = 500 + elite_size = 50 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.95 + quantum_probability = 0.08 + adaptive_scaling_factor = lambda t: 0.2 * np.exp(-0.02 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution step + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Differential mutation considering best and random individual + mutant = self.x_opt + mutation_factor * (x1 - x2 + x3 - population[i]) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV12.py b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV12.py new file mode 100644 index 000000000..c7673b2a5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDifferentialStrategyV12.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumAdaptiveDifferentialStrategyV12: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bounds of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bounds of the search space + + def __call__(self, func): + population_size = 300 + elite_size = 30 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.9 + quantum_probability = 0.1 + adaptive_scaling_factor = lambda t: 0.1 * np.exp(-0.01 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution step + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Differential mutation with an extra differential vector for increased diversity + mutant = self.x_opt + mutation_factor * (x1 - x2 + x3 - x4) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV11.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV11.py new file mode 100644 index 000000000..00baf11f2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV11.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedDynamicHybridSearchV11: + def __init__( + self, + budget, + dimension=5, + population_size=400, + elite_ratio=0.25, + mutation_scale=0.6, + mutation_decay=0.0012, + crossover_prob=0.75, + quantum_intensity=0.4, + local_search_prob=0.15, + local_search_intensity=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension / 2)): # Limited local search steps + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV12.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV12.py new file mode 100644 index 000000000..9e135464e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV12.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedDynamicHybridSearchV12: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_ratio=0.20, + mutation_scale=0.5, + mutation_decay=0.0009, + crossover_prob=0.8, + quantum_intensity=0.35, + local_search_prob=0.20, + local_search_intensity=0.03, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension / 3)): # Reduced local search steps + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV13.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV13.py new file mode 100644 index 000000000..787709dda --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV13.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedDynamicHybridSearchV13: + def __init__( + self, + budget, + dimension=5, + population_size=300, + elite_ratio=0.15, + mutation_scale=0.3, + mutation_decay=0.0005, + crossover_prob=0.85, + quantum_intensity=0.30, + local_search_prob=0.25, + local_search_intensity=0.02, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension / 3)): # Reduced local search steps + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV14.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV14.py new file mode 100644 index 000000000..21095b783 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV14.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedDynamicHybridSearchV14: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_ratio=0.2, + mutation_scale=0.35, + mutation_decay=0.0003, + crossover_prob=0.88, + quantum_intensity=0.35, + local_search_prob=0.3, + local_search_intensity=0.025, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension / 2)): # Adjusted local search steps + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV15.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV15.py new file mode 100644 index 000000000..8acc52013 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedDynamicHybridSearchV15.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedDynamicHybridSearchV15: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_ratio=0.15, + mutation_scale=0.4, + mutation_decay=0.0004, + crossover_prob=0.92, + quantum_intensity=0.45, + local_search_prob=0.35, + local_search_intensity=0.03, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension * 0.8)): # More aggressive local search + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedHybridSearchV10.py b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedHybridSearchV10.py new file mode 100644 index 000000000..44e301d2d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDiversifiedHybridSearchV10.py @@ -0,0 +1,88 @@ +import numpy as np + + +class QuantumAdaptiveDiversifiedHybridSearchV10: + def __init__( + self, + budget, + dimension=5, + population_size=350, + elite_ratio=0.2, + mutation_scale=0.5, + mutation_decay=0.0015, + crossover_prob=0.8, + quantum_intensity=0.35, + local_search_prob=0.1, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = 0.1 + for _ in range(int(self.dimension / 2)): # Limited local search steps + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExploration.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExploration.py new file mode 100644 index 000000000..60c345235 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExploration.py @@ -0,0 +1,185 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Exploration improvement parameters + exploration_factor = 0.1 + max_exploration_cycles = 50 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExploration(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV2.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV2.py new file mode 100644 index 000000000..bfdc721b5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV2.py @@ -0,0 +1,194 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Exploration improvement parameters + exploration_factor = 0.1 + max_exploration_cycles = 50 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Periodic re-initialization of personal best positions to enhance exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + if personal_best_scores[idx] == global_best_score: + personal_bests[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + personal_best_scores[idx] = np.inf + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV3.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV3.py new file mode 100644 index 000000000..1c48a22b0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV3.py @@ -0,0 +1,194 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Exploration improvement parameters + exploration_factor = 0.1 + max_exploration_cycles = 50 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Periodic re-initialization of personal best positions to enhance exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + if personal_best_scores[idx] == global_best_score: + personal_bests[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + personal_best_scores[idx] = np.inf + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV3(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV4.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV4.py new file mode 100644 index 000000000..734efae2a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV4.py @@ -0,0 +1,208 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Introduce mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: # Every 150 iterations, introduce mutation-based exploration + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV4(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV5.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV5.py new file mode 100644 index 000000000..2d96c7c20 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV5.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation further to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Reduced exploration cycles to focus more on exploitation + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Introduce mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 # Lowered threshold to react more sensitively to improvements + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: # Every 150 iterations, introduce mutation-based exploration + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV5(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV6.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV6.py new file mode 100644 index 000000000..a801d8efb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV6.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation further to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Reduced exploration cycles to focus more on exploitation + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Introduce mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 # Lowered threshold to react more sensitively to improvements + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: # Every 150 iterations, introduce mutation-based exploration + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV6(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV7.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV7.py new file mode 100644 index 000000000..703a93434 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicExplorationV7.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumAdaptiveDynamicExplorationV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation further to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Reduced exploration cycles to focus more on exploitation + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Introduce mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 # Lowered threshold to react more sensitively to improvements + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Fine-tuned to increase learning rate more aggressively + else: + alpha *= 0.8 # Fine-tuned to decrease learning rate less aggressively + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: # Every 100 iterations, apply quantum-inspired exploration + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: # Every 150 iterations, introduce mutation-based exploration + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumAdaptiveDynamicExplorationV7(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveDynamicStrategyV7.py b/nevergrad/optimization/lama/QuantumAdaptiveDynamicStrategyV7.py new file mode 100644 index 000000000..72ee6a989 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveDynamicStrategyV7.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumAdaptiveDynamicStrategyV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given problem dimensionality + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 400 + elite_size = 40 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.6 + quantum_probability = 0.15 + adaptive_scaling_factor = lambda t: 0.15 * np.exp(-0.1 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step with fine-tuned control, using a time decay factor + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Symbiotic mutation and crossover with dynamic elite selection + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveEliteGuidedSearch.py b/nevergrad/optimization/lama/QuantumAdaptiveEliteGuidedSearch.py new file mode 100644 index 000000000..1c0004e92 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveEliteGuidedSearch.py @@ -0,0 +1,184 @@ +import numpy as np + + +class QuantumAdaptiveEliteGuidedSearch: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveFireworksOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveFireworksOptimizer.py new file mode 100644 index 000000000..0fc7bfa57 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveFireworksOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class QuantumAdaptiveFireworksOptimizer: + def __init__( + self, + budget=1000, + num_sparks=10, + num_iterations=100, + learning_rate=0.1, + momentum=0.9, + explosion_factor=2.0, + ): + self.budget = budget + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.learning_rate = learning_rate + self.momentum = momentum + self.explosion_factor = explosion_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_sparks, dimensions)) + best_firework = fireworks[0] + explosion_sizes = np.ones(self.num_sparks) + velocities = np.zeros_like(fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for i, firework in enumerate(fireworks): + gradient = np.zeros(dimensions) + for _ in range(self.num_sparks): + spark = firework + np.random.normal(0, 1, size=dimensions) * explosion_sizes[i] + spark = np.clip(spark, bounds.lb, bounds.ub) + gradient += (func(spark) - func(firework)) * (spark - firework) + + velocities[i] = self.momentum * velocities[i] + self.learning_rate * gradient + fireworks[i] += velocities[i] + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + explosion_sizes[i] *= self.explosion_factor + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveGradientDiversityExplorer.py b/nevergrad/optimization/lama/QuantumAdaptiveGradientDiversityExplorer.py new file mode 100644 index 000000000..073d32e31 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveGradientDiversityExplorer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class QuantumAdaptiveGradientDiversityExplorer: + def __init__( + self, + budget, + dimension=5, + population_size=50, + elite_fraction=0.1, + mutation_intensity=1.0, + crossover_rate=0.7, + quantum_prob=0.95, + gradient_prob=0.1, + gamma=0.95, + beta=0.08, + epsilon=0.02, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gradient_prob = gradient_prob + self.gamma = gamma # Quantum state update influence + self.beta = beta # Mutation decay rate + self.epsilon = epsilon # Minimum mutation factor + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = self.mutate(population[parent_idx], evaluations) + + if np.random.random() < self.gradient_prob: + child = self.gradient_step(child, func) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, self.gamma, self.dimension) * (best_individual - individual) + return individual + perturbation + + def gradient_step(self, individual, func, lr=0.01): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = np.array(individual) + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - lr * grad_est diff --git a/nevergrad/optimization/lama/QuantumAdaptiveGradientSearch.py b/nevergrad/optimization/lama/QuantumAdaptiveGradientSearch.py new file mode 100644 index 000000000..50d5e0ed9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveGradientSearch.py @@ -0,0 +1,96 @@ +import numpy as np + + +class QuantumAdaptiveGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 250 # Increased population for more diverse solutions + elite_size = 25 # Larger elite size + evaluations = 0 + mutation_factor = 0.8 # Enhanced mutation factor + crossover_probability = 0.9 # Higher crossover probability to better exploit good genes + quantum_probability = 0.1 # Starting quantum probability + convergence_threshold = 1e-8 # Tighter sensitivity for stagnation + learning_rate = 0.01 # Initial learning rate for gradient use + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + # Adjust mutation based on convergence rate + if abs(previous_best - self.f_opt) < convergence_threshold: + mutation_factor *= 0.85 # Reduce mutation factor to escape local optima + learning_rate *= 0.95 # Reduce learning rate to stabilize + else: + mutation_factor *= 1.05 # Enhance mutation factor to escape + learning_rate *= 1.05 # Increase learning rate for more aggressive search + previous_best = self.f_opt + + # Quantum-inspired exploration + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Gradient-based optimization for elite individuals + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) # Simulated gradient + population[idx] += learning_rate * gradient + population[idx] = np.clip(population[idx], self.lb, self.ub) + new_fitness = func(population[idx]) + evaluations += 1 + + if new_fitness < fitness[idx]: + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = population[idx] + + # Differential evolution steps + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability = min( + 0.2, quantum_probability * 1.05 + ) # Incremental increase in quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveHarmonicOptimizerV8.py b/nevergrad/optimization/lama/QuantumAdaptiveHarmonicOptimizerV8.py new file mode 100644 index 000000000..79f6d858b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveHarmonicOptimizerV8.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumAdaptiveHarmonicOptimizerV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 1500 # Increased population size for greater coverage + self.sigma_initial = 1.5 # Initial mutation spread + self.sigma_final = 0.005 # Finer final mutation spread for precision + self.elitism_factor = 0.02 # Reduced elitism to increase diversity + self.CR_initial = 0.95 # High initial crossover probability + self.CR_final = 0.1 # Reduced final crossover probability + self.q_impact_initial = 0.02 # Higher initial quantum impact + self.q_impact_final = 0.6 # Increased final quantum impact for deep exploitation + self.q_impact_increase_rate = 0.002 # Gradual increase in quantum impact + self.harmonic_scale = 0.3 # Scaling factor for harmonic modulation + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation with quantum harmonic adjustments + idxs = [j for j in range(self.pop_size) if j != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + harmonic_term = self.harmonic_scale * np.sin( + 2 * np.pi * iteration / (self.budget / self.pop_size) + ) + mutant = a + sigma * (b - c + q_impact * np.sin(c + harmonic_term)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumAdaptiveHybridDEPSO_V7.py b/nevergrad/optimization/lama/QuantumAdaptiveHybridDEPSO_V7.py new file mode 100644 index 000000000..601aec661 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveHybridDEPSO_V7.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumAdaptiveHybridDEPSO_V7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 + w = 0.6 # Adjusted inertia weight for PSO + c1 = 1.2 # Increased cognitive coefficient for PSO + c2 = 1.3 # Increased social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart threshold for dynamic restart + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.25, beta=0.75): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..57bf376c1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + population_size=50, + inertia_weight=0.7, + cognitive_coef=1.4, + social_coef=1.6, + adaptive_intensity=0.05, + quantum_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.adaptive_intensity = adaptive_intensity + self.quantum_rate = quantum_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + # Regular PSO update components + cognitive_component = self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_coef * r2 * (global_best - particles[i]) + + # Quantum-inspired leap + if np.random.rand() < self.quantum_rate: + quantum_jump = np.random.randn(self.dim) * np.abs(global_best - personal_bests[i]) + particles[i] = global_best + quantum_jump + else: + velocities[i] = ( + self.inertia_weight * velocities[i] + cognitive_component + social_component + ) + particles[i] += velocities[i] + + # Ensure particles stay within bounds + particles[i] = np.clip(particles[i], self.lb, self.ub) + + # Function evaluation + score = func(particles[i]) + evaluations += 1 + + # Update personal and global bests + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Adaptive intensity adjustment + if evaluations % (self.budget // 10) == 0: + self.inertia_weight *= 1 - self.adaptive_intensity + self.quantum_rate *= 1 + self.adaptive_intensity + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizerV3.py b/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizerV3.py new file mode 100644 index 000000000..abb027b3b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveHybridOptimizerV3.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumAdaptiveHybridOptimizerV3: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.8, + cognitive_coef=2.1, + social_coef=2.1, + quantum_probability=0.15, + damping_factor=0.95, + adaptive_quantum_shift=0.02, + division_factor=5, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.adaptive_quantum_shift = adaptive_quantum_shift + self.division_factor = division_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + # Dynamically adjust inertia weight based on progress + inertia = self.inertia_weight * np.exp(-evaluations / (self.budget / self.division_factor)) + + velocities[i] = ( + inertia * velocities[i] + + self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + + self.social_coef * r2 * (global_best - particles[i]) + ) + + if np.random.rand() < self.quantum_probability: + # Quantum leap with normal distribution scaling + quantum_leap = global_best + np.random.normal(0, self.dim**-0.5, self.dim) * ( + global_best - personal_bests[i] + ) + particles[i] = np.clip(quantum_leap, self.lb, self.ub) + else: + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Update quantum probability and inertia weight + self.quantum_probability += self.adaptive_quantum_shift + self.inertia_weight *= self.damping_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumAdaptiveHybridStrategyV4.py b/nevergrad/optimization/lama/QuantumAdaptiveHybridStrategyV4.py new file mode 100644 index 000000000..7d6a7816f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveHybridStrategyV4.py @@ -0,0 +1,64 @@ +import numpy as np + + +class QuantumAdaptiveHybridStrategyV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 300 # Slightly increased population size + self.sigma_initial = 0.1 # Initial mutation spread + self.elitism_factor = 3 # Reduced elite size for more diversity + self.sigma_decay = 0.98 # Steeper decay for sigma + self.CR_base = 0.95 # Higher initial crossover probability + self.CR_decay = 0.99 # Slower decay rate for crossover probability + self.q_impact = 0.1 # Lower quantum impact + self.q_impact_increase = 0.05 # Increase quantum impact dynamically + self.q_impact_limit = 0.95 # Maximum limit for quantum impact + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + q_impact = self.q_impact + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Mutation: DE-like strategy with quantum effects + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= self.sigma_decay + CR *= self.CR_decay + if iteration % (self.budget // (10 * self.pop_size)) == 0 and q_impact < self.q_impact_limit: + q_impact += self.q_impact_increase # Dynamically increase quantum impact + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumAdaptiveLevyDifferentialSearch.py b/nevergrad/optimization/lama/QuantumAdaptiveLevyDifferentialSearch.py new file mode 100644 index 000000000..9d1445a6f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveLevyDifferentialSearch.py @@ -0,0 +1,159 @@ +import numpy as np + + +class QuantumAdaptiveLevyDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 50 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.2 + social_coefficient = 1.8 + differential_weight = 0.9 + crossover_rate = 0.7 + quantum_factor = 0.1 + + memory_size = 30 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveLevyDynamicDifferentialSwarmV4.py b/nevergrad/optimization/lama/QuantumAdaptiveLevyDynamicDifferentialSwarmV4.py new file mode 100644 index 000000000..0dc2ece21 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveLevyDynamicDifferentialSwarmV4.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumAdaptiveLevyDynamicDifferentialSwarmV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.3 * progress + social_coefficient = 1.5 - 0.3 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.4 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveLevyMemeticSearch.py b/nevergrad/optimization/lama/QuantumAdaptiveLevyMemeticSearch.py new file mode 100644 index 000000000..f3d0ae95a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveLevyMemeticSearch.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumAdaptiveLevyMemeticSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 30 + inertia_weight = 0.7 + cognitive_coefficient = 1.5 + social_coefficient = 1.3 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + memory_size = 5 + memory = [] + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + for i in range(population_size): + inertia_weight = 0.9 - 0.5 * (evaluations / self.budget) + + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Memetic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveLevyOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveLevyOptimizer.py new file mode 100644 index 000000000..3a54dcde7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveLevyOptimizer.py @@ -0,0 +1,187 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumAdaptiveLevyOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.4 + self.social_weight = 1.4 + self.quantum_weight = 0.35 + self.elite_fraction = 0.1 + self.memory_size = 20 + self.local_search_probability = 0.9 + self.stagnation_threshold = 5 + self.adaptive_factor = 1.1 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + self.strategy_probabilities = [1 / 3, 1 / 3, 1 / 3] + self.strategy_rewards = [0, 0, 0] + self.strategy_uses = [0, 0, 0] + + def levy_flight(self, size, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, size=size) + v = np.random.normal(0, 1, size=size) + step = u / abs(v) ** (1 / beta) + return 0.01 * step + + def select_strategy(self): + return np.random.choice([0, 1, 2], p=self.strategy_probabilities) + + def update_strategy_probabilities(self): + total_rewards = sum(self.strategy_rewards) + if total_rewards > 0: + self.strategy_probabilities = [r / total_rewards for r in self.strategy_rewards] + else: + self.strategy_probabilities = [1 / 3, 1 / 3, 1 / 3] + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + strategy = self.select_strategy() + if strategy == 0: + # Standard PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 1: + # Quantum PSO update + if np.random.rand() < self.quantum_weight: + levy_step = self.levy_flight(self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * levy_step + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif strategy == 2: + # Hybrid update with local search + if np.random.rand() < self.local_search_probability: + new_population = self.local_search(func, population[i]) + if new_population is not None: + population[i], fitness[i] = new_population + eval_count += 1 + else: + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + self.strategy_rewards[strategy] += best_fitness - trial_fitness + self.strategy_uses[strategy] += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.update_strategy_probabilities() + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + if res.success: + return res.x, res.fun + return None + + +# Example usage +# optimizer = QuantumAdaptiveLevyOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveLevySwarmOptimizationV2.py b/nevergrad/optimization/lama/QuantumAdaptiveLevySwarmOptimizationV2.py new file mode 100644 index 000000000..9d9f70fe9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveLevySwarmOptimizationV2.py @@ -0,0 +1,157 @@ +import numpy as np + + +class QuantumAdaptiveLevySwarmOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.4 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.5 - 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) + + def __call__(self, func): + population_size = 40 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.6: + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithm.py b/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithm.py new file mode 100644 index 000000000..c99c01ef5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithm.py @@ -0,0 +1,118 @@ +import numpy as np + + +class QuantumAdaptiveMemeticAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithmV2.py b/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithmV2.py new file mode 100644 index 000000000..2468960a6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMemeticAlgorithmV2.py @@ -0,0 +1,118 @@ +import numpy as np + + +class QuantumAdaptiveMemeticAlgorithmV2: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMemeticSearchV2.py b/nevergrad/optimization/lama/QuantumAdaptiveMemeticSearchV2.py new file mode 100644 index 000000000..17f9e0a22 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMemeticSearchV2.py @@ -0,0 +1,112 @@ +import numpy as np + + +class QuantumAdaptiveMemeticSearchV2: + def __init__(self, budget, population_size=80, tau1=0.1, tau2=0.1, memetic_rate=0.5, alpha=0.2): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - 0.01 * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMultiPhaseDE_v6.py b/nevergrad/optimization/lama/QuantumAdaptiveMultiPhaseDE_v6.py new file mode 100644 index 000000000..537231511 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMultiPhaseDE_v6.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumAdaptiveMultiPhaseDE_v6: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate and improvement history + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMultiPopulationDE.py b/nevergrad/optimization/lama/QuantumAdaptiveMultiPopulationDE.py new file mode 100644 index 000000000..356b91c6d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMultiPopulationDE.py @@ -0,0 +1,178 @@ +import numpy as np + + +class QuantumAdaptiveMultiPopulationDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveMultiStrategyEvolution.py b/nevergrad/optimization/lama/QuantumAdaptiveMultiStrategyEvolution.py new file mode 100644 index 000000000..5166ca22a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveMultiStrategyEvolution.py @@ -0,0 +1,184 @@ +import numpy as np + + +class QuantumAdaptiveMultiStrategyEvolution: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumAdaptiveNesterovGradientEnhancer.py b/nevergrad/optimization/lama/QuantumAdaptiveNesterovGradientEnhancer.py new file mode 100644 index 000000000..6690fff34 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveNesterovGradientEnhancer.py @@ -0,0 +1,64 @@ +import numpy as np + + +class QuantumAdaptiveNesterovGradientEnhancer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.15, + momentum=0.95, + quantum_influence_rate=0.03, + adaptive_lr_factor=0.98, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.position = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + self.velocity = np.zeros(self.dim) + self.best_position = np.copy(self.position) + self.best_fitness = np.inf + + def evaluate(self, func, position): + return func(position) + + def update_position(self): + # Predict future position using current velocity (Nesterov acceleration) + future_position = self.position + self.momentum * self.velocity + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + + # Update velocity with noise as a surrogate gradient and include Nesterov correction + noise = np.random.normal(0, 1, self.dim) * self.learning_rate + self.velocity = self.momentum * self.velocity - noise + self.position += self.velocity + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + # Adaptive learning rate decay + self.learning_rate *= self.adaptive_lr_factor + + def quantum_influence(self): + if np.random.rand() < self.quantum_influence_rate: + quantum_jump = np.random.normal(0, 0.1 * (self.upper_bound - self.lower_bound), self.dim) + self.position += quantum_jump + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + + for _ in range(self.budget): + self.update_position() + self.quantum_influence() + fitness = self.evaluate(func, self.position) + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = np.copy(self.position) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/QuantumAdaptiveNesterovSynergy.py b/nevergrad/optimization/lama/QuantumAdaptiveNesterovSynergy.py new file mode 100644 index 000000000..1170d7eb9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveNesterovSynergy.py @@ -0,0 +1,69 @@ +import numpy as np + + +class QuantumAdaptiveNesterovSynergy: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.9, + quantum_influence_rate=0.15, + adaptive_lr_factor=0.95, + elite_fraction=0.25, + noise_factor=0.15, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.elite_fraction = elite_fraction + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_factor = noise_factor + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_fraction), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_fraction), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_fraction), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_influence_rate: + self.population[i] += ( + np.random.normal(0, 1, self.dim) + * self.noise_factor + * (self.upper_bound - self.lower_bound) + ) + + noise = np.random.normal(0, 1, self.dim) + self.velocities[i] = self.momentum * self.velocities[i] - self.learning_rate * noise + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + self.learning_rate *= self.adaptive_lr_factor + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumAdaptiveRefinementOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveRefinementOptimizer.py new file mode 100644 index 000000000..59f8e64f5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveRefinementOptimizer.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumAdaptiveRefinementOptimizer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.95, + quantum_prob=0.05, + elite_rate=0.3, + noise_intensity=0.05, + perturbation_scale=0.05, + refinement_factor=0.9, + adaptive_decay=0.01, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + self.refinement_factor = refinement_factor # Enhanced refinement in the optimization process + self.adaptive_decay = adaptive_decay # Decay rate for adaptive learning and quantum probability + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob * np.exp( + -self.adaptive_decay * i + ): # Adaptive quantum probability + quantum_jump = np.random.normal( + 0.0, self.perturbation_scale * np.exp(-self.refinement_factor * i), self.dim + ) + self.population[i] += quantum_jump + else: + lr = self.learning_rate * np.exp(-self.adaptive_decay * i) # Adaptive learning rate + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = ( + self.momentum * self.velocities[i] + lr * (global_best - self.population[i]) + noise + ) + future_position = self.population[i] + self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategy.py b/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategy.py new file mode 100644 index 000000000..3c61b2aa4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumAdaptiveRefinementStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 300 # Further increased population size for broader exploration + self.sigma_initial = 1.5 # Start with a wider spread in the population + self.learning_rate = 0.025 # Finer learning rate adjustment + self.CR_base = 0.7 # Higher initial crossover probability for vigorous exploration + self.q_impact_initial = 0.8 # Increased initial quantum impact for stronger exploration + self.q_impact_decay = 0.98 # Slower decay rate for the quantum impact + self.sigma_decay = 0.98 # Slower decay for sigma to enhance exploration period + self.elitism_factor = 15 # Increased elitism factor to stabilize top performers + + def __call__(self, func): + # Initialize population within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Setup for elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + current_CR = ( + self.CR_base - (iteration / (self.budget / self.pop_size)) * 0.2 + ) # Gradually decrease CR + + for i in range(self.pop_size): + if i in elites: # Elite members are kept unchanged + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c + quantum_term) + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = np.clip(current_CR + self.learning_rate * np.random.randn(), 0, 1) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites regularly + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategyV2.py b/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategyV2.py new file mode 100644 index 000000000..23b02e52c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveRefinementStrategyV2.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumAdaptiveRefinementStrategyV2: + def __init__( + self, + budget, + dim=5, + pop_size=30, + elite_rate=0.15, + mutation_scale=0.1, + quantum_jump_scale=0.05, + adaptation_factor=0.99, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.mutation_scale = mutation_scale + self.quantum_jump_scale = quantum_jump_scale + self.adaptation_factor = adaptation_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def refine_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Adaptive mutation and quantum jump scale + self.mutation_scale *= self.adaptation_factor + self.quantum_jump_scale *= self.adaptation_factor + + # Refinement and reproduction from elites + for idx in non_elite_indices: + if np.random.rand() < self.adaptation_factor: # Increasingly favor quantum jumps over time + # Quantum jump inspired by best solution + self.population[idx] = self.best_solution + np.random.normal( + 0, self.quantum_jump_scale, self.dim + ) + else: + # Crossover and mutation + parent1 = self.population[np.random.choice(elite_indices)] + parent2 = self.population[np.random.choice(elite_indices)] + crossover_point = np.random.randint(self.dim) + child = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + mutation = np.random.normal(0, self.mutation_scale, self.dim) + self.population[idx] = child + mutation + + # Ensure boundaries are respected + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = self.pop_size + + while evaluations < self.budget: + self.evaluate_fitness(func) + self.refine_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumAdaptiveStrategicEnhancer.py b/nevergrad/optimization/lama/QuantumAdaptiveStrategicEnhancer.py new file mode 100644 index 000000000..fc3b2a0a0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveStrategicEnhancer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class QuantumAdaptiveStrategicEnhancer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 250 # Increased population size for more diversity + inertia_weight = 0.85 # Reduced initial inertia for quicker exploitation + cognitive_coefficient = 2.0 # Increased cognitive learning factor + social_coefficient = 2.0 # Increased social learning factor + quantum_momentum = 0.2 # Higher quantum influence for global search + exploration_phase = 0.5 # Control parameter for exploration phase duration + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Adaptive inertia weight adjustment for strategic balance + w = inertia_weight * (0.5 + 0.5 * np.exp(-4 * current_budget / (self.budget * exploration_phase))) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics + if np.random.rand() < 0.1 * (1 - np.exp(-3 * current_budget / self.budget)): + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions with strategic constraints + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia_component + cognitive_component + social_component + velocity[i] = np.clip(velocity[i], -1, 1) # Clamping velocities + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and personal and global best updates + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumAdaptiveVelocityOptimizer.py b/nevergrad/optimization/lama/QuantumAdaptiveVelocityOptimizer.py new file mode 100644 index 000000000..0b5197d35 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAdaptiveVelocityOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumAdaptiveVelocityOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # A smaller population to focus on promising areas + inertia_weight = 0.9 # Initial inertia weight + cognitive_coefficient = 2.0 # Increased personal learning factor + social_coefficient = 2.0 # Increased social learning factor + velocity_limit = 0.2 # Smaller velocity limit for finer movements + quantum_momentum = 0.05 # Momentum for quantum jumps + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + inertia_decay = np.power((1 - (current_budget / self.budget)), 2) # Exponential decay for inertia + w = inertia_weight * inertia_decay + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired jump based on a probability that decays with time + quantum_probability = 0.1 * np.exp(-5 * (current_budget / self.budget)) + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Standard PSO update with clamping of velocities + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Update personal and global bests + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumAnnealingDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumAnnealingDifferentialEvolution.py new file mode 100644 index 000000000..542eeee54 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAnnealingDifferentialEvolution.py @@ -0,0 +1,148 @@ +import numpy as np + + +class QuantumAnnealingDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, idx)) + list(range(idx + 1, self.pop_size)) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = 1.0 # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Perform local search on the elite half of the population + elite_indices = np.argsort(fitness)[: self.pop_size // 2] + elite_population = new_population[elite_indices] + + for i in range(len(elite_indices)): + elite_population[i] = self.local_search(elite_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + population = np.copy(new_population) + for i in range(self.pop_size): + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha, T) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Gradually decrease temperature + T *= 0.9 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumAssistedHybridOptimizerV1.py b/nevergrad/optimization/lama/QuantumAssistedHybridOptimizerV1.py new file mode 100644 index 000000000..3d35fb452 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumAssistedHybridOptimizerV1.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumAssistedHybridOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 150 + elite_size = 15 + evaluations = 0 + mutation_factor = 0.75 # Adjusted mutation factor for exploration + crossover_probability = 0.8 # Medium level crossover probability + quantum_probability = 0.02 # Initial quantum probability + convergence_threshold = 1e-6 # Fine-grained threshold for detecting stagnation + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + if abs(previous_best - self.f_opt) < convergence_threshold: + mutation_factor *= 0.85 # Decrease mutation factor to intensify search + previous_best = self.f_opt + + # Quantum-inspired exploration + if np.random.rand() < quantum_probability: + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Differential evolution step + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + mutation_factor * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Dynamically adjust the quantum probability + if evaluations % 1000 == 0: + quantum_probability = min( + 0.1, quantum_probability + 0.01 + ) # Gradually increase the quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumBalancedAdaptiveNesterovStrategy.py b/nevergrad/optimization/lama/QuantumBalancedAdaptiveNesterovStrategy.py new file mode 100644 index 000000000..f7b2b342c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumBalancedAdaptiveNesterovStrategy.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumBalancedAdaptiveNesterovStrategy: + def __init__( + self, + budget, + dim=5, + learning_rate=0.05, + momentum=0.8, + quantum_influence_rate=0.1, + adaptive_lr_factor=0.98, + elite_fraction=0.2, + noise_factor=0.1, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.elite_fraction = elite_fraction + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_factor = noise_factor + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_fraction), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_fraction), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_fraction), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + self.population[i] = np.copy(self.population[i]) + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + # Quantum influence occasionally gives a random kick + if np.random.rand() < self.quantum_influence_rate: + self.population[i] += ( + np.random.normal(0, 1, self.dim) + * self.noise_factor + * (self.upper_bound - self.lower_bound) + ) + + # Nesterov accelerated gradient with noise as surrogate gradient + noise = np.random.normal(0, 1, self.dim) + self.velocities[i] = self.momentum * self.velocities[i] - self.learning_rate * noise + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + # Adaptive decay of learning rate + self.learning_rate *= self.adaptive_lr_factor + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumBalancedEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumBalancedEvolutionStrategy.py new file mode 100644 index 000000000..163b582e8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumBalancedEvolutionStrategy.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumBalancedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 300 # Adjusted population size for a balance between exploration and convergence + self.sigma_initial = 1.0 # Initial mutation spread + self.elitism_factor = 10 # Percentage of elite individuals to carry forward without mutation + self.sigma_decay = 0.98 # Decay factor for mutation spread + self.CR_base = 0.7 # Initial crossover probability + self.CR_decay = 0.99 # Decay rate for crossover probability + self.q_impact = 0.5 # Quantum impact factor on mutation vector + self.convergence_threshold = 1e-7 # More sensitive convergence threshold + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + + # Evolutionary loop + for _ in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Select random indices excluding the current index and elites + idxs = [idx for idx in range(self.pop_size) if idx != i and idx >= elite_size] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + self.q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update sigma and CR + sigma *= self.sigma_decay + CR *= self.CR_decay + + # Check for convergence + current_best_idx = np.argmin(fitness) + if abs(best_fitness - fitness[current_best_idx]) < self.convergence_threshold: + break + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancedOptimizerV16.py b/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancedOptimizerV16.py new file mode 100644 index 000000000..9bd0c8f61 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancedOptimizerV16.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumCognitionAdaptiveEnhancedOptimizerV16: + def __init__( + self, + budget=10000, + population_size=70, + inertia_weight=0.85, + cognitive_coeff=2.3, + social_coeff=2.3, + inertia_decay=0.99, + quantum_jump_rate=0.01, + min_quantum_scale=0.001, + max_quantum_scale=0.02, + quantum_decay=0.97, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.quantum_decay = quantum_decay + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + # Adjust decay rates and scaling factors based on progress + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancerV8.py b/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancerV8.py new file mode 100644 index 000000000..04403f0fc --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionAdaptiveEnhancerV8.py @@ -0,0 +1,88 @@ +import numpy as np + + +class QuantumCognitionAdaptiveEnhancerV8: + def __init__( + self, + budget=10000, + population_size=150, + inertia_weight=0.95, + cognitive_coefficient=2.0, + social_coefficient=2.0, + inertia_decay=0.95, + quantum_jump_rate=0.15, + quantum_scale=0.15, + adaptive_scale_factor=0.3, + exploration_phase_ratio=0.4, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.exploration_phase_ratio = exploration_phase_ratio + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + exploration_count = int(self.budget * self.exploration_phase_ratio) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations < exploration_count: + # Phase-Dependent Quantum Jump Rate + quantum_probability = self.quantum_jump_rate * (1 - evaluations / exploration_count) + if np.random.rand() < quantum_probability: + quantum_deviation = np.random.normal( + 0, + self.quantum_scale + * (1 + self.adaptive_scale_factor * np.log(1 + global_best_score)), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + else: + # Reduced Quantum Influence for Fine-Tuning + r1, r2 = np.random.rand(2) + velocities[i] *= self.inertia_decay + velocities[i] += self.cognitive_coefficient * r1 * ( + personal_bests[i] - particles[i] + ) + self.social_coefficient * r2 * (global_best - particles[i]) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionAdaptiveTuningOptimizerV14.py b/nevergrad/optimization/lama/QuantumCognitionAdaptiveTuningOptimizerV14.py new file mode 100644 index 000000000..0036d8591 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionAdaptiveTuningOptimizerV14.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumCognitionAdaptiveTuningOptimizerV14: + def __init__( + self, + budget=10000, + population_size=35, + inertia_weight=0.85, + cognitive_coeff=2.0, + social_coeff=2.0, + inertia_decay=0.99, + quantum_jump_rate=0.02, + min_quantum_scale=0.01, + max_quantum_scale=0.05, + quantum_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.quantum_decay = quantum_decay + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + # Adjust decay rates and scaling factors based on progress + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionDynamicAdaptationOptimizerV30.py b/nevergrad/optimization/lama/QuantumCognitionDynamicAdaptationOptimizerV30.py new file mode 100644 index 000000000..668d186d2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionDynamicAdaptationOptimizerV30.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumCognitionDynamicAdaptationOptimizerV30: + def __init__( + self, + budget=10000, + population_size=600, + inertia_weight=0.9, + cognitive_coeff=2.1, + social_coeff=2.1, + inertia_decay=0.98, + quantum_jump_rate=0.6, + quantum_scale=0.4, + quantum_decay=0.97, + mutation_rate=0.03, + mutation_scale=0.07, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_individual_positions = particles.copy() + best_individual_scores = np.array([func(p) for p in particles]) + global_best_position = best_individual_positions[np.argmin(best_individual_scores)] + global_best_score = min(best_individual_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Perform a quantum jump for global exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best_position + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_individual_positions[i] - particles[i]) + + self.social_coeff * r2 * (global_best_position - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation for enhanced local exploration + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_individual_scores[i]: + best_individual_positions[i] = candidate_position + best_individual_scores[i] = score + + if score < global_best_score: + global_best_position = candidate_position + global_best_score = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/QuantumCognitionEnhancedOptimizerV7.py b/nevergrad/optimization/lama/QuantumCognitionEnhancedOptimizerV7.py new file mode 100644 index 000000000..86febeedf --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionEnhancedOptimizerV7.py @@ -0,0 +1,87 @@ +import numpy as np + + +class QuantumCognitionEnhancedOptimizerV7: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coefficient=2.5, + social_coefficient=2.5, + inertia_decay=0.99, + quantum_jump_rate=0.2, + quantum_scale=0.1, + adaptive_scale_factor=0.5, + exploration_phase_ratio=0.3, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.exploration_phase_ratio = exploration_phase_ratio + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + exploration_phase = self.budget * self.exploration_phase_ratio + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations < exploration_phase: + # Enhanced exploration phase + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal( + 0, + self.quantum_scale + * (1 + self.adaptive_scale_factor * np.log(1 + global_best_score)), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + else: + # Enhanced exploitation phase + r1, r2 = np.random.rand(2) + velocities[i] *= self.inertia_decay + velocities[i] += self.cognitive_coefficient * r1 * ( + personal_bests[i] - particles[i] + ) + self.social_coefficient * r2 * (global_best - particles[i]) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionFocusedHybridOptimizerV21.py b/nevergrad/optimization/lama/QuantumCognitionFocusedHybridOptimizerV21.py new file mode 100644 index 000000000..113d822fb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionFocusedHybridOptimizerV21.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumCognitionFocusedHybridOptimizerV21: + def __init__( + self, + budget=10000, + population_size=80, + inertia_weight=0.95, + cognitive_coeff=2.5, + social_coeff=2.5, + inertia_decay=0.98, + quantum_jump_rate=0.1, + quantum_scale=0.02, + quantum_decay=0.95, + mutation_rate=0.08, + mutation_scale=0.15, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionFocusedOptimizerV17.py b/nevergrad/optimization/lama/QuantumCognitionFocusedOptimizerV17.py new file mode 100644 index 000000000..6453070ca --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionFocusedOptimizerV17.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumCognitionFocusedOptimizerV17: + def __init__( + self, + budget=10000, + population_size=80, + inertia_weight=0.7, + cognitive_coeff=2.1, + social_coeff=2.1, + inertia_decay=0.98, + quantum_jump_rate=0.02, + min_quantum_scale=0.002, + max_quantum_scale=0.015, + quantum_decay=0.95, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.quantum_decay = quantum_decay + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with focused adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + # Adjust decay rates and scaling factors based on progress + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV19.py b/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV19.py new file mode 100644 index 000000000..e1c583605 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV19.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumCognitionHybridEvolutionaryOptimizerV19: + def __init__( + self, + budget=10000, + population_size=120, + inertia_weight=0.85, + cognitive_coeff=2.8, + social_coeff=2.8, + inertia_decay=0.98, + quantum_jump_rate=0.02, + quantum_scale=0.01, + quantum_decay=0.95, + mutation_rate=0.1, + mutation_scale=0.05, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal(0, self.quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV20.py b/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV20.py new file mode 100644 index 000000000..b0dbf9029 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridEvolutionaryOptimizerV20.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumCognitionHybridEvolutionaryOptimizerV20: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coeff=2.1, + social_coeff=2.1, + inertia_decay=0.99, + quantum_jump_rate=0.05, + quantum_scale=0.015, + quantum_decay=0.97, + mutation_rate=0.05, + mutation_scale=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV23.py b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV23.py new file mode 100644 index 000000000..cfb5abede --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV23.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumCognitionHybridOptimizerV23: + def __init__( + self, + budget=10000, + population_size=150, + inertia_weight=0.7, + cognitive_coeff=2.5, + social_coeff=2.5, + inertia_decay=0.95, + quantum_jump_rate=0.25, + quantum_scale=0.05, + quantum_decay=0.95, + mutation_rate=0.15, + mutation_scale=0.25, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV24.py b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV24.py new file mode 100644 index 000000000..60779ced7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV24.py @@ -0,0 +1,87 @@ +import numpy as np + + +class QuantumCognitionHybridOptimizerV24: + def __init__( + self, + budget=10000, + population_size=200, + inertia_weight=0.8, + cognitive_coeff=2.2, + social_coeff=2.2, + inertia_decay=0.99, + quantum_jump_rate=0.3, + quantum_scale=0.1, + quantum_decay=0.98, + mutation_rate=0.1, + mutation_scale=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhancing quantum behavior for better exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism with dynamic scaling + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal( + 0, self.mutation_scale * (1 - evaluations / self.budget), self.dim + ) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV25.py b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV25.py new file mode 100644 index 000000000..651c4332d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV25.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumCognitionHybridOptimizerV25: + def __init__( + self, + budget=10000, + population_size=250, + inertia_weight=0.9, + cognitive_coeff=1.5, + social_coeff=1.5, + inertia_decay=0.995, + quantum_jump_rate=0.2, + quantum_scale=0.15, + quantum_decay=0.995, + mutation_rate=0.05, + mutation_scale=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhanced quantum behavior for better exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism with adaptive scaling + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV26.py b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV26.py new file mode 100644 index 000000000..00630e2a1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV26.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumCognitionHybridOptimizerV26: + def __init__( + self, + budget=10000, + population_size=300, + inertia_weight=0.85, + cognitive_coeff=1.2, + social_coeff=1.2, + inertia_decay=0.98, + quantum_jump_rate=0.3, + quantum_scale=0.2, + quantum_decay=0.99, + mutation_rate=0.03, + mutation_scale=0.08, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhanced quantum behavior for better exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism with adaptive scaling + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV27.py b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV27.py new file mode 100644 index 000000000..02f5be868 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionHybridOptimizerV27.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumCognitionHybridOptimizerV27: + def __init__( + self, + budget=10000, + population_size=350, + inertia_weight=0.9, + cognitive_coeff=1.4, + social_coeff=1.4, + inertia_decay=0.95, + quantum_jump_rate=0.35, + quantum_scale=0.25, + quantum_decay=0.95, + mutation_rate=0.02, + mutation_scale=0.05, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Quantum leap for enhanced global exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation for local exploration adjustments + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionOptimizerV2.py b/nevergrad/optimization/lama/QuantumCognitionOptimizerV2.py new file mode 100644 index 000000000..8be32c874 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionOptimizerV2.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumCognitionOptimizerV2: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.7, + cognitive_coefficient=1.5, + social_coefficient=1.5, + inertia_decay=0.95, + quantum_jump_rate=0.1, + quantum_scale=0.1, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum jump with positive scale handling + if np.random.rand() < self.quantum_jump_rate: + # Adjust quantum scale dynamically based on score + adjusted_quantum_scale = self.quantum_scale * (1 + np.log(1 + abs(global_best_score))) + quantum_deviation = np.random.normal(0, max(0.0001, adjusted_quantum_scale), self.dim) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Standard PSO update with inertia, cognitive, and social components + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Decay inertia weight to promote convergence + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumCognitionTrajectoryOptimizerV28.py b/nevergrad/optimization/lama/QuantumCognitionTrajectoryOptimizerV28.py new file mode 100644 index 000000000..51d30ab97 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitionTrajectoryOptimizerV28.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumCognitionTrajectoryOptimizerV28: + def __init__( + self, + budget=10000, + population_size=400, + inertia_weight=0.92, + cognitive_coeff=1.5, + social_coeff=1.5, + inertia_decay=0.98, + quantum_jump_rate=0.4, + quantum_scale=0.3, + quantum_decay=0.97, + mutation_rate=0.015, + mutation_scale=0.03, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_individual_positions = particles.copy() + best_individual_scores = np.array([func(p) for p in particles]) + global_best_position = best_individual_positions[np.argmin(best_individual_scores)] + global_best_score = min(best_individual_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Perform a quantum jump for global exploration + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best_position + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_individual_positions[i] - particles[i]) + + self.social_coeff * r2 * (global_best_position - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation for enhanced local exploration + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_individual_scores[i]: + best_individual_positions[i] = candidate_position + best_individual_scores[i] = score + + if score < global_best_score: + global_best_position = candidate_position + global_best_score = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/QuantumCognitiveAdaptiveOptimizer.py b/nevergrad/optimization/lama/QuantumCognitiveAdaptiveOptimizer.py new file mode 100644 index 000000000..6f6e9686f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCognitiveAdaptiveOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class QuantumCognitiveAdaptiveOptimizer: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.75, + cognitive_coefficient=2.0, + social_coefficient=2.0, + inertia_decay=0.98, + quantum_jump_rate=0.2, + quantum_scale=0.2, + adaptive_response=10, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_response = ( + adaptive_response # Depth of historical performance to adapt parameters dynamically + ) + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + performance_history = [] + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Perform quantum jump with dynamic scaling based on adaptive response + quantum_deviation = np.random.normal(0, self.quantum_scale, self.dim) + particles[i] = global_best + quantum_deviation * (self.ub - self.lb) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Standard PSO update with inertia, cognitive, and social components + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + performance_history.append(global_best_score) + + # Adapt parameters based on recent performance + if len(performance_history) > self.adaptive_response: + recent_avg_improvement = np.mean(np.diff(performance_history[-self.adaptive_response :])) + if recent_avg_improvement < 0: + self.quantum_jump_rate *= 1.1 # Increase quantum behavior when stagnation is detected + else: + self.quantum_jump_rate *= 0.9 # Decrease quantum behavior when progress is good + + self.inertia_weight *= self.inertia_decay # Gradually reduce inertia to fine-tune exploration + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumControlledDiversityStrategy.py b/nevergrad/optimization/lama/QuantumControlledDiversityStrategy.py new file mode 100644 index 000000000..848900bc0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumControlledDiversityStrategy.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumControlledDiversityStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_ratio=0.1, + mutation_scale_base=0.6, + mutation_decay=0.01, + crossover_rate=0.85, + quantum_intensity=0.95, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_scale_base = mutation_scale_base + self.mutation_decay = mutation_decay + self.crossover_rate = crossover_rate + self.quantum_intensity = quantum_intensity + + def __call__(self, func): + # Initialize population randomly within the search space + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + evaluations = self.population_size + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent1, parent2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[parent1], population[parent2]) + else: + offspring = population[np.random.choice(elite_indices)] + + if np.random.random() < self.quantum_intensity: + offspring = self.quantum_state_update(offspring, best_individual) + + mutation_scale = self.adaptive_mutation_scale(evaluations) + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) + + new_population[i] = offspring + + # Evaluate new population + fitness = np.array([func(x) for x in new_population]) + evaluations += self.population_size + + # Update best individual if a better one is found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = new_population[current_best_idx] + + population = new_population + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = np.random.normal(0, 0.1, self.dimension) + return individual + perturbation * (best_individual - individual) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_scale_base * np.exp(-self.mutation_decay * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/QuantumCooperativeCrossoverStrategy.py b/nevergrad/optimization/lama/QuantumCooperativeCrossoverStrategy.py new file mode 100644 index 000000000..ad1e99d09 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCooperativeCrossoverStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumCooperativeCrossoverStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 100 + self.elite_size = 10 + self.crossover_fraction = 0.8 + self.mutation_scale = 0.1 # Enhanced mutation scale for improved exploration + self.quantum_mutation_scale = 0.3 # Distinct scale for quantum mutation + self.quantum_probability = 0.05 # More controlled quantum probability + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover_and_mutate(self, parents, num_offspring): + offspring = np.empty((num_offspring, self.dim)) + num_parents = len(parents) + for i in range(num_offspring): + if np.random.rand() < self.crossover_fraction: + p1, p2 = np.random.choice(num_parents, 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = parents[p1][:cross_point] + offspring[i][cross_point:] = parents[p2][cross_point:] + else: + offspring[i] = parents[np.random.randint(num_parents)] + + # Mutation - either normal or quantum + if np.random.rand() < self.quantum_probability: + mutation_shift = np.random.normal(0, self.quantum_mutation_scale, self.dim) + else: + mutation_shift = np.random.normal(0, self.mutation_scale, self.dim) + offspring[i] += mutation_shift + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + num_offspring = self.population_size - self.elite_size + offspring = self.crossover_and_mutate(elite_population, num_offspring) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..0c9064477 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,189 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.9 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.25 + self.adaptive_learning_rate = 0.02 + self.strategy_switches = [0.2, 0.5, 0.8] + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < 0.1: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = QuantumCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py b/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py new file mode 100644 index 000000000..d27018fb6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumCovarianceMatrixDifferentialEvolutionRefinedV2.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumCovarianceMatrixDifferentialEvolutionRefinedV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 + self.sigma = 0.1 # Adjusted sigma + self.c1 = 0.01 # Reduced for more stability + self.cmu = 0.02 # Reduced for more stability + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 # Adjusted differential weight + self.CR = 0.8 # Adjusted crossover rate + self.elitism_rate = 0.1 # Reduced elitism rate + self.eval_count = 0 + self.alpha_levy = 0.1 # Adjusted Levy flight step size + self.levy_prob = 0.05 # Adjusted Levy flight probability + self.adaptive_learning_rate = 0.1 # Adjusted adaptive learning rate + self.strategy_switches = [0.2, 0.5, 0.8] # Adjusted strategy switching points + self.local_opt_prob = 0.3 # Adjusted probability of local optimization + self.learning_rate_decay = 0.9 # Adjusted learning rate decay + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.05 # Further reduced hybridization probability for stability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = QuantumCovarianceMatrixDifferentialEvolutionRefinedV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch.py new file mode 100644 index 000000000..0a809fe93 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.5 + self.beta = 0.4 + self.local_search_prob = 0.9 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + for i in range(len(memory)): + if fitness[i] < memory[i][1]: + memory[i] = (population[i], fitness[i]) + return memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart.py new file mode 100644 index 000000000..69097cb37 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + self.adaptive_memory_rate = 0.5 + self.diversity_tracking_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = population[np.argsort(fitness)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(population, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch.py new file mode 100644 index 000000000..862554293 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch.py @@ -0,0 +1,131 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def advanced_restart(self, population, fitness, func): + std_dev = np.std(fitness) + + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.advanced_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning.py new file mode 100644 index 000000000..fccf2aa55 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning.py @@ -0,0 +1,161 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + self.adaptive_memory_rate = 0.5 + self.diversity_tracking_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = population[np.argsort(fitness)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(population, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement.py new file mode 100644 index 000000000..712d376c1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement.py @@ -0,0 +1,109 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 50 + self.memory_update_interval = 25 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + memory = self.update_memory(memory, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning.py new file mode 100644 index 000000000..ac04ffb00 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.dynamic_restart_threshold = 0.01 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.dynamic_restart_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def dynamic_restart(self, population, fitness, func): + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def differential_memory_update(self, population): + if len(self.memory) >= self.elite_size: + for i in range(self.elite_size): + idx = np.random.randint(len(self.memory)) + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, self.memory[idx][0]) + self.memory[idx] = (trial, np.inf) + + def elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * (np.random.randn(self.dim)) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + self.differential_memory_update(population) + + population, fitness = self.adaptive_restart(population, fitness, func) + + # Apply elitist learning mechanism + new_individual = self.elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning.py new file mode 100644 index 000000000..80481381f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning.py @@ -0,0 +1,176 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.dynamic_restart_threshold = 0.01 + self.diversity_memory = [] + self.restart_memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def advanced_restart(self, population, fitness, func): + std_dev = np.std(fitness) + self.diversity_memory.append(std_dev) + + if len(self.diversity_memory) > self.restart_threshold: + self.diversity_memory.pop(0) + + recent_diversity = np.mean(self.diversity_memory[-10:]) + + if recent_diversity < self.dynamic_restart_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + + return population, fitness + + def dynamic_restart(self, population, fitness, func): + std_dev = np.std(fitness) + self.restart_memory.append(std_dev) + + if len(self.restart_memory) > self.restart_threshold: + self.restart_memory.pop(0) + + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def differential_memory_update(self, population): + if len(self.memory) >= self.elite_size: + for i in range(self.elite_size): + idx = np.random.randint(len(self.memory)) + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, self.memory[idx][0]) + self.memory[idx] = (trial, np.inf) + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + self.differential_memory_update(population) + + population, fitness = self.advanced_restart(population, fitness, func) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts.py new file mode 100644 index 000000000..fd6c232cd --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.elite_size = 20 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.8 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-4 + self.adaptive_restart_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + for i in range(len(memory)): + if fitness[i] < memory[i][1]: + memory[i] = (population[i], fitness[i]) + return memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch.py new file mode 100644 index 000000000..d7261b668 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch.py @@ -0,0 +1,167 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 15 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 15 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + # Standard DE mutation and crossover + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicElitismAndRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicElitismAndRestarts.py new file mode 100644 index 000000000..1568591b8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicElitismAndRestarts.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithDynamicElitismAndRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.initial_num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, global_best, global_best_fit, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + return population, fitness, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + num_elites = self.initial_num_elites + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + population, fitness, global_best, global_best_fit = self.adaptive_restart( + population, fitness, global_best, global_best_fit, func + ) + + if evaluations % (self.population_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + num_elites = max(2, min(self.initial_num_elites, int(self.population_size / 10))) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart.py new file mode 100644 index 000000000..5f2a9e465 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart.py @@ -0,0 +1,105 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-2 + self.adaptive_restart_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEliteGuidance.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEliteGuidance.py new file mode 100644 index 000000000..a1b3f1448 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEliteGuidance.py @@ -0,0 +1,112 @@ +import numpy as np + + +class QuantumDifferentialEvolutionWithEliteGuidance: + def __init__(self, budget=10000, population_size=30, elite_size=5): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-inspired mutation strategy with elite guidance + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Quantum-inspired restart mechanism + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on distance to the best solution + distances = np.linalg.norm(population - self.x_opt, axis=1) + reinit_indices = distances.argsort()[-int(self.population_size / 2) :] + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitism.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitism.py new file mode 100644 index 000000000..6c2dbda35 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitism.py @@ -0,0 +1,88 @@ +import numpy as np + + +class QuantumDifferentialEvolutionWithElitism: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + initial_F = 0.8 # Initial Differential weight + initial_CR = 0.9 # Initial Crossover probability + elite_rate = 0.2 # Elite rate to maintain a portion of elites + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + eval_count = population_size + + def quantum_position_update(position, best_position, amplitude=0.15): + return position + np.random.uniform(-amplitude, amplitude, position.shape) * ( + best_position - position + ) + + def adapt_parameters(eval_count, budget): + # Adaptive strategy for F and CR with linear decay and random components + adaptive_F = initial_F * (1 - eval_count / budget) + 0.1 * np.random.rand() + adaptive_CR = initial_CR * (1 - eval_count / budget) + 0.1 * np.random.rand() + return adaptive_F, adaptive_CR + + while eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(elite_rate * population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, population_size): + if eval_count >= self.budget: + break + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters(eval_count, self.budget) + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if eval_count % 3 == 0: # Apply quantum every third step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = QuantumDifferentialEvolutionWithElitism(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch.py new file mode 100644 index 000000000..d235239b9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch.py @@ -0,0 +1,108 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.85 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-2 + self.adaptive_restart_interval = 100 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + for i in range(len(memory)): + if fitness[i] < memory[i][1]: + memory[i] = (population[i], fitness[i]) + return memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch.py new file mode 100644 index 000000000..90f044c32 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch.py @@ -0,0 +1,147 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def advanced_restart(self, population, fitness, func): + std_dev = np.std(fitness) + + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def hybrid_search(self, x, func): + candidate_positions = [x + np.random.randn(self.dim) * 0.1 for _ in range(10)] + candidate_positions = [np.clip(pos, self.bounds[0], self.bounds[1]) for pos in candidate_positions] + candidate_fitness = [func(pos) for pos in candidate_positions] + + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.advanced_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + # Apply hybrid search mechanism + if evaluations < self.budget: + hybrid_individual, f_hybrid_individual = self.hybrid_search(self.x_opt, func) + evaluations += 1 + if f_hybrid_individual < self.f_opt: + self.f_opt = f_hybrid_individual + self.x_opt = hybrid_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch.py new file mode 100644 index 000000000..335611b8e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.success: + return result.x, result.fun + else: + return x, func(x) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def advanced_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + return new_individual + + def hybrid_search(self, x, func): + candidate_positions = [x + np.random.randn(self.dim) * 0.1 for _ in range(10)] + candidate_positions = [np.clip(pos, self.bounds[0], self.bounds[1]) for pos in candidate_positions] + candidate_fitness = [func(pos) for pos in candidate_positions] + + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + memory_candidates = [ + self.memorized_individuals[np.random.randint(len(self.memorized_individuals))] + for _ in range(self.elite_size) + ] + for mem_ind in memory_candidates: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.advanced_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + # Apply enhanced elitist learning mechanism + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + # Apply enhanced hybrid search mechanism + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch.py new file mode 100644 index 000000000..35d2fb13b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch.py @@ -0,0 +1,152 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def hybrid_search(self, x, func): + candidate_positions = [ + np.clip(x + np.random.randn(self.dim) * 0.1, self.bounds[0], self.bounds[1]) for _ in range(10) + ] + candidate_fitness = [func(pos) for pos in candidate_positions] + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts.py new file mode 100644 index 000000000..400883525 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts.py @@ -0,0 +1,107 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.elite_size = 20 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.8 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + for i in range(len(memory)): + if fitness[i] < memory[i][1]: + memory[i] = (population[i], fitness[i]) + return memory + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch.py new file mode 100644 index 000000000..a971fd271 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch.py @@ -0,0 +1,152 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def hybrid_search(self, x, func): + candidate_positions = [ + np.clip(x + np.random.randn(self.dim) * 0.1, self.bounds[0], self.bounds[1]) for _ in range(10) + ] + candidate_fitness = [func(pos) for pos in candidate_positions] + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithMultiStrategyLearning.py b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithMultiStrategyLearning.py new file mode 100644 index 000000000..5b8a94a64 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialEvolutionWithMultiStrategyLearning.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialEvolutionWithMultiStrategyLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py new file mode 100644 index 000000000..d9d79e6fa --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleOptimizerWithAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elits, beta): + p_best = elits[np.random.randint(len(elits))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def adaptive_restart(self, particles, fitness, personal_bests, personal_best_fits, func): + best_idx = np.argmin(personal_best_fits) + best_particle = personal_bests[best_idx] + best_fit = personal_best_fits[best_idx] + + if np.std(personal_best_fits) < 1e-3: + print("Restarting...") + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = best_particle + global_best_fit = best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py new file mode 100644 index 000000000..c41925986 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleOptimizerWithEliteGuidedMutation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < 1e-3 or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + # Refinement step for elite particles + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py new file mode 100644 index 000000000..65d46cb37 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleOptimizerWithEliteRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def adaptive_restart(self, particles, fitness, personal_bests, personal_best_fits, func): + best_idx = np.argmin(personal_best_fits) + best_particle = personal_bests[best_idx] + best_fit = personal_best_fits[best_idx] + + if np.std(personal_best_fits) < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = best_particle + global_best_fit = best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + # Refinement step for elite particles + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithElitism.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithElitism.py new file mode 100644 index 000000000..9e48895c5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithElitism.py @@ -0,0 +1,122 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleOptimizerWithElitism: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elits, beta): + p_best = elits[np.random.randint(len(elits))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py new file mode 100644 index 000000000..dd4ce61d1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < 1e-3 or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + # Refinement step for elite particles + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleSwarmRefinement.py b/nevergrad/optimization/lama/QuantumDifferentialParticleSwarmRefinement.py new file mode 100644 index 000000000..6e39702ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleSwarmRefinement.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumDifferentialParticleSwarmRefinement: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 100 + self.init_num_niches = 10 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.2 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, p_best, g_best, beta): + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - g_best) * np.log(1 / u) + return x + Q * v + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Quantum inspired update + quantum_trial = self.quantum_update(trial, local_bests[n], global_best, self.beta) + quantum_trial = np.clip(quantum_trial, self.bounds[0], self.bounds[1]) + f_quantum_trial = func(quantum_trial) + evaluations += 1 + + if f_quantum_trial < f_trial: + trial, f_trial = quantum_trial, f_quantum_trial + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalAcceleratorV19.py b/nevergrad/optimization/lama/QuantumDirectionalAcceleratorV19.py new file mode 100644 index 000000000..bf03c3edf --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalAcceleratorV19.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalAcceleratorV19: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategic parameters further refined for enhanced performance + population_size = 250 # Reduced population for quicker generations + gamma_initial = 0.8 # Reduced initial exploration intensity + gamma_final = 0.00005 # Lower final gamma for finer exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.7) + ) # Faster decay for earlier fine-tuning + elite_fraction = 0.05 # Further reduced elite fraction to increase competition + mutation_strength = 0.01 # Adjusted mutation strength for broader exploration + mutation_decay = 0.99975 # Slower decay to sustain mutation relevance + crossover_probability = 0.9 # Increased to promote more genetic variation + tunneling_frequency = 0.98 # Increased frequency for more frequent quantum effects + directional_weight = 1.5 # Increased weight to improve directional exploitation + feedback_rate = 0.25 # Increased feedback for stronger adaptive responses + + # Initialize and evaluate the initial population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select parents from elites + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) # No crossover, direct copy + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling to potentially escape local minima + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancer.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancer.py new file mode 100644 index 000000000..ad9b6728d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancer.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumDirectionalEnhancer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 30 # Increased population size for better exploration + gamma_initial = 0.7 # Initial gamma for broad initial exploration + gamma_final = 0.01 # Final gamma for fine-tuned exploitation + gamma_decay = (gamma_final / gamma_initial) ** (1 / self.budget) # Exponential decay + elite_fraction = 0.2 # Fraction of population considered elite + mutation_strength = 0.05 # Increase mutation strength for more variability + crossover_probability = 0.75 # Slight increase in crossover probability + tunneling_frequency = 0.1 # Adjusted tunneling frequency for less frequent jumps + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + + # Elite selection based on elite fraction + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards global best + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV10.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV10.py new file mode 100644 index 000000000..28773e6da --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV10.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV10: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 500 # Adjusted population size for better control + gamma_initial = 1.0 # Lower initial gamma for focused exploration + gamma_final = 0.0001 # Finer final gamma for improved exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.5) + ) # Faster initial decay + elite_fraction = 0.1 # Increased elite fraction for better elitism + mutation_strength = 0.005 # Reduced mutation strength for finer granularity + mutation_decay = 0.99 # Adjusted mutation decay + crossover_probability = 0.8 # Increased crossover probability + tunneling_frequency = 0.6 # Increased tunneling frequency + directional_weight = 0.95 # Increased weight for tunneling direction + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV11.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV11.py new file mode 100644 index 000000000..99eaadc46 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV11.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV11: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 400 # Reduced population size for better control + gamma_initial = 0.8 # Further reduced initial gamma for focused exploration + gamma_final = 0.00005 # Finer final gamma for improved exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.6)) # Adjusted decay rate + elite_fraction = 0.2 # Increased elite fraction for better elitism + mutation_strength = 0.001 # Reduced mutation strength for finer granularity + mutation_decay = 0.995 # Slower mutation decay + crossover_probability = 0.85 # Increased crossover probability + tunneling_frequency = 0.7 # Increased tunneling frequency + directional_weight = 0.98 # Increased weight for tunneling direction + feedback_rate = 0.1 # New parameter: rate of feedback mechanism + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + # Feedback mechanism: using feedback from previous generation's performance + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV12.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV12.py new file mode 100644 index 000000000..5b7e7d156 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV12.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV12: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 450 # Slightly increased population for broader search initially + gamma_initial = 0.9 # Slightly increased initial gamma for stronger early exploration + gamma_final = 0.00001 # Further reduced final gamma for refined exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.65) + ) # Fine-tuned decay rate + elite_fraction = 0.25 # Increased elite fraction to maintain a stronger genetic pool + mutation_strength = 0.0008 # Further reduced mutation strength for more precise adjustments + mutation_decay = 0.998 # Slower mutation decay for extended effectiveness + crossover_probability = 0.88 # Slightly increased crossover probability + tunneling_frequency = 0.8 # Increased tunneling frequency for enhanced quantum behavior + directional_weight = 0.95 # Adjusted weight for tunneling direction + feedback_rate = 0.12 # Increased feedback rate for more aggressive adaptation + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Dynamically adjust mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV13.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV13.py new file mode 100644 index 000000000..6c1665692 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV13.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV13: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 500 # Increased population size for a broader genetic base + gamma_initial = 0.95 # Slightly increased initial gamma for stronger initial exploration + gamma_final = 0.000001 # Smaller final gamma for finer exploitation at the end + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.75) + ) # Fine-tuned decay rate + elite_fraction = 0.3 # Increased elite fraction for a more robust selection process + mutation_strength = 0.0005 # Lower mutation strength for finer perturbations + mutation_decay = 0.999 # Slower decay to maintain mutation effectiveness longer + crossover_probability = 0.9 # Increased crossover probability for enhanced genetic mixing + tunneling_frequency = 0.85 # Increased tunneling frequency for more frequent quantum leaps + directional_weight = 0.98 # Increased weight for tunneling direction for better target pursuit + feedback_rate = 0.15 # Increased feedback rate to enhance reactive adaptation + + # Initialize the population and evaluate fitness + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Select elite individuals + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction through crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Perform crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV14.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV14.py new file mode 100644 index 000000000..f4a8a02a0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV14.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV14: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 600 # Increased population size for enhanced genetic diversity + gamma_initial = 0.95 # Initial gamma for strong exploration + gamma_final = 0.00001 # Smaller final gamma for extremely fine exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.85) + ) # Adjusted decay rate + elite_fraction = 0.35 # Increased elite fraction for stronger selection + mutation_strength = 0.0003 # Reduced mutation strength for finer perturbations + mutation_decay = 0.9995 # Very slow decay to sustain mutation effectiveness + crossover_probability = 0.92 # Increased crossover probability for more genetic mixing + tunneling_frequency = 0.90 # Increased tunneling frequency for more frequent quantum leaps + directional_weight = 0.99 # Increased weight in tunneling direction for better target pursuit + feedback_rate = 0.12 # Slightly reduced feedback rate for more stable adaptation + + # Initialize the population and evaluate fitness + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Select elite individuals + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction through crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Perform crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV15.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV15.py new file mode 100644 index 000000000..7608aeafd --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV15.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV15: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjusted strategy parameters for improved performance + population_size = 500 # Reduced population for better convergence + gamma_initial = 1.0 # Higher initial gamma for more aggressive early exploration + gamma_final = 0.0001 # Very low final gamma for precise exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.9) + ) # Adjusted for longer decay + elite_fraction = 0.3 # Slightly reduced elite fraction to balance exploration-exploitation + mutation_strength = 0.0001 # Lower mutation strength for more precise updates + mutation_decay = 0.9996 # Slower decay to maintain effectiveness + crossover_probability = 0.88 # Reduced to favor elite propagation + tunneling_frequency = 0.88 # Adjusted frequency to balance regular updates and quantum leaps + directional_weight = 1.0 # Full weight on direction for aggressive pursuit + feedback_rate = 0.1 # Slight decrease to stabilize feedback mechanism + + # Initialize population and evaluate fitness + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Continue to adjust mutation strength + + # Select elite individuals + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduce through crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Parent selection + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[parents[0]], new_population[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.concatenate((parent1[: self.dim // 2], parent2[self.dim // 2 :])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV16.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV16.py new file mode 100644 index 000000000..3019f2c9f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV16.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV16: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Refinement of strategy parameters for improved performance + population_size = 450 # Further reduction for more focused search + gamma_initial = 1.1 # Slightly increased initial gamma for more aggressive early exploration + gamma_final = 0.00005 # Lower final gamma for finer exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.95) + ) # Extended decay period + elite_fraction = 0.25 # Further refined elite fraction to balance exploration-exploitation + mutation_strength = 0.0002 # Adjusted mutation strength for more effective mutations + mutation_decay = 0.9997 # Adjusted decay to maintain effectiveness for longer + crossover_probability = 0.85 # Slight reduction to favor elite propagation + tunneling_frequency = 0.85 # Adjusted to achieve better balance between updates and quantum leaps + directional_weight = 1.05 # Slightly increased to enhance aggressive pursuit of best regions + feedback_rate = 0.08 # Reduced to enhance stability in feedback mechanism + + # Initialize population and evaluate fitness + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Continue to adjust mutation strength + + # Select elite individuals + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduce through crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Parent selection + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[parents[0]], new_population[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.concatenate((parent1[: self.dim // 2], parent2[self.dim // 2 :])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += (gamma * directional_weight + feedback) * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV17.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV17.py new file mode 100644 index 000000000..888f79ef9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV17.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV17: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Updated strategy parameters for more nuanced exploration and exploitation + population_size = 500 # Increased population for broader initial sampling + gamma_initial = 1.2 # Stronger initial exploration + gamma_final = 0.0001 # Lower final gamma for very fine exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.9) + ) # Faster decay to shift to exploitation sooner + elite_fraction = 0.15 # Reduced elite fraction to push more diversity + mutation_strength = 0.001 # Increased mutation strength for more aggressive diversification early on + mutation_decay = 0.9995 # Slower decay to keep mutations relevant longer + crossover_probability = 0.75 # Reduced to favor more mutations over crossover + tunneling_frequency = 0.9 # Increased frequency to leverage quantum effects more often + directional_weight = 1.1 # Increased to enhance exploitation around elites + feedback_rate = 0.1 # Increased feedback to adjust offspring based on average fitness improvements + + # Initialize and evaluate initial population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select parents from elites + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) # No crossover, direct copy + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling to potentially jump out of local minima + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV18.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV18.py new file mode 100644 index 000000000..39c90d3d3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV18.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV18: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters with further enhancements + population_size = 300 # Reduced population for faster computation while maintaining diversity + gamma_initial = 0.9 # Reduced initial exploration intensity + gamma_final = 0.0001 # Maintained low final gamma for fine exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.8) + ) # Adjusted decay rate for an earlier shift to fine-tuning + elite_fraction = 0.1 # Further reduced elite fraction to stimulate more competition + mutation_strength = 0.005 # Adjusted mutation strength for broader exploration early on + mutation_decay = 0.9998 # Adjusted decay to sustain mutation relevance longer + crossover_probability = 0.85 # Increased to encourage more genetic mixing + tunneling_frequency = 0.95 # Increased frequency to leverage quantum effects more widely + directional_weight = 1.2 # Higher weight to enhance directional exploitation + feedback_rate = 0.2 # Increased feedback for stronger adaptive responses + + # Initialize and evaluate the initial population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength dynamically + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select parents from elites + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) # No crossover, direct copy + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling to potentially escape local minima + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV2.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV2.py new file mode 100644 index 000000000..343bed111 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV2.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 50 # Further increased population size + gamma_initial = 0.9 # Initial gamma for broad initial exploration + gamma_final = 0.001 # Final gamma for fine-tuned exploitation + gamma_decay = (gamma_final / gamma_initial) ** (1 / self.budget) # Exponential decay + elite_fraction = 0.1 # Reduced elite fraction for greater diversity + mutation_strength = 0.1 # Increased mutation strength + mutation_decay = 0.995 # Introduce mutation decay + crossover_probability = 0.85 # Increased crossover probability + tunneling_frequency = 0.05 # Reduced tunneling frequency for focused jumps + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection based on elite fraction + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV3.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV3.py new file mode 100644 index 000000000..82110a5b9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV3.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 100 # Increased population size for more diversity + gamma_initial = 1.0 # Initial gamma for broad initial exploration + gamma_final = 0.0001 # Final gamma for fine-tuned exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay + elite_fraction = 0.2 # Increased fraction for better elite selection + mutation_strength = 0.2 # Initial mutation strength + mutation_decay = 0.99 # Slower mutation decay + crossover_probability = 0.9 # Higher crossover probability + tunneling_frequency = 0.1 # Slightly increased tunneling frequency for more aggressive exploration + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV4.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV4.py new file mode 100644 index 000000000..6cb3a6f98 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV4.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 120 # Increased population size for more diversity + gamma_initial = 0.5 # Reduced initial gamma for less aggressive initial exploration + gamma_final = 0.001 # Reduced final gamma for finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay + elite_fraction = 0.1 # Reduced elite fraction for more diversity in selection + mutation_strength = 0.1 # Lower initial mutation strength + mutation_decay = 0.995 # Slower mutation decay + crossover_probability = 0.95 # Higher crossover probability + tunneling_frequency = 0.15 # Increased tunneling frequency for more aggressive exploration + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV5.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV5.py new file mode 100644 index 000000000..d8584864f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV5.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 150 # Further increased population size for more diversity + gamma_initial = 0.6 # Slightly increased initial gamma for a balanced initial exploration + gamma_final = 0.001 # Fine exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay + elite_fraction = 0.05 # Even smaller elite fraction to encourage diversity + mutation_strength = 0.05 # Reduced mutation strength + mutation_decay = 0.992 # Gradual mutation decay + crossover_probability = 0.95 # Maintain high crossover probability + tunneling_frequency = 0.2 # Increased tunneling frequency for aggressive exploration + directional_weight = 0.4 # Weight factor for tunneling direction towards best found solution + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV6.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV6.py new file mode 100644 index 000000000..1f4b01191 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV6.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 200 # Increased population size for more diversity + gamma_initial = 0.8 # Increased initial gamma for more aggressive exploration + gamma_final = 0.0001 # Finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay + elite_fraction = 0.03 # Smaller elite fraction to encourage stronger elitism + mutation_strength = 0.03 # Further reduced mutation strength + mutation_decay = 0.99 # Slightly slower mutation decay + crossover_probability = 0.98 # Higher crossover probability + tunneling_frequency = 0.25 # Increased tunneling frequency for more aggressive exploration + directional_weight = 0.5 # Increased weight factor for tunneling direction towards best solution + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV7.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV7.py new file mode 100644 index 000000000..cbf35e680 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV7.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 300 # Further increase the population size for enhanced diversity + gamma_initial = 1.0 # Start with a higher gamma for even more aggressive exploration + gamma_final = 0.0001 # Finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay + elite_fraction = 0.01 # Increased elitism by reducing the elite fraction + mutation_strength = 0.02 # Lower mutation strength + mutation_decay = 0.999 # Finer mutation decay for more gradual reduction + crossover_probability = 0.95 # Adjusted crossover probability + tunneling_frequency = 0.3 # Increased tunneling frequency for more aggressive exploration + directional_weight = 0.7 # Increased weight factor for tunneling direction towards best solution + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Decaying mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV8.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV8.py new file mode 100644 index 000000000..a41b94a42 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV8.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 500 # Increased population size for better search spread + gamma_initial = 1.5 # Higher initial gamma for broader initial exploration + gamma_final = 0.00001 # Finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay rate + elite_fraction = 0.02 # Slightly higher elite fraction for more robust elitism + mutation_strength = 0.015 # Reduced mutation strength for finer adjustments + mutation_decay = 0.995 # Slower mutation decay rate + crossover_probability = 0.7 # Reduced crossover probability + tunneling_frequency = 0.4 # Increased tunneling frequency + directional_weight = 0.85 # Increased weight for tunneling direction + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalEnhancerV9.py b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV9.py new file mode 100644 index 000000000..3d0421b0e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalEnhancerV9.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumDirectionalEnhancerV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 1000 # Further increased population size + gamma_initial = 2.0 # Higher initial gamma for broader initial exploration + gamma_final = 0.00001 # Finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / self.budget) # Exponential decay rate + elite_fraction = 0.05 # Higher elite fraction + mutation_strength = 0.01 # Lower mutation strength for finer adjustments + mutation_decay = 0.995 # Slight reduction in decay rate + crossover_probability = 0.75 # Slightly higher crossover probability + tunneling_frequency = 0.5 # Increased tunneling frequency + directional_weight = 0.9 # Increased weight for tunneling direction + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Adjust mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + idxs = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = new_population[idxs[0]], new_population[idxs[1]] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(self.dim) + offspring = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias towards the best individual in the population + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + offspring += gamma * directional_weight * direction + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizer.py b/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizer.py new file mode 100644 index 000000000..9dc385d24 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizer.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumDirectionalFusionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality as per problem specifications + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Configuration settings + population_size = 20 # Smaller, more focused population + gamma_initial = 0.05 # Reduced initial gamma for focused search + gamma_final = 0.0001 # Very fine tuned final refinement + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.7)) + elite_fraction = 0.5 # Half of the population considered elite + mutation_strength = 0.005 # Further reduced mutation for precision + mutation_decay = 0.995 # Gradual decay to retain exploration longer + crossover_probability = 0.90 # High crossover to promote genetic diversity + tunneling_frequency = 0.1 # Reduced frequency for focused direction exploitation + directional_weight = 50.0 # Strong emphasis on directional exploitation + feedback_rate = 0.05 # Reduced feedback rate to stabilize search + + # Initialize the population uniformly within the search space + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select two parents from the elite pool + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Perform crossover based on probability + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + # Apply mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Perform quantum tunneling with directional bias less frequently + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + # Evaluate the new candidate + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + # Update the optimum if the current candidate is better + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizerV2.py b/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizerV2.py new file mode 100644 index 000000000..3adb4da4b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalFusionOptimizerV2.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumDirectionalFusionOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem specifications + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Configuration settings + population_size = 25 # Slightly larger population for better exploration + gamma_initial = 0.1 # Increased initial gamma for broader initial search scope + gamma_final = 0.0001 # Fine tuned final refinement + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.8)) + elite_fraction = 0.4 # Reduced elite fraction to promote diversity + mutation_strength = 0.01 # Increased mutation for greater exploration + mutation_decay = 0.99 # Slower decay to maintain diversity throughout + crossover_probability = 0.95 # Increased crossover to enhance beneficial gene mixing + tunneling_frequency = 0.05 # Reduced frequency to focus on top-performing individuals + directional_weight = 100.0 # Increased emphasis on directional exploitation + feedback_rate = 0.1 # Increased feedback rate to quickly adapt to promising regions + + # Initialize the population uniformly within the search space + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select two parents from the elite pool + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Perform crossover based on probability + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + # Apply mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling with directional bias less frequently + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + # Evaluate the new candidate + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + # Update the optimum if the current candidate is better + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV20.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV20.py new file mode 100644 index 000000000..a5ffaf936 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV20.py @@ -0,0 +1,85 @@ +import numpy as np + + +class QuantumDirectionalRefinerV20: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The dimensionality is fixed at 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Further refined strategic parameters + population_size = 300 # Increased population for more genetic diversity + gamma_initial = 0.7 # Reduced initial exploration intensity + gamma_final = 0.000025 # Lower final gamma for finer exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.65) + ) # Faster decay for earlier fine-tuning + elite_fraction = 0.08 # Increased elite fraction to maintain a higher quality pool + mutation_strength = 0.015 # Adjusted mutation strength for broader exploration + mutation_decay = 0.9995 # Slower decay to keep mutation relevant longer + crossover_probability = 0.95 # Increased to promote more genetic variation + tunneling_frequency = 0.99 # Increased frequency for more frequent quantum effects + directional_weight = 2.0 # Increased weight to improve directional exploitation + feedback_rate = 0.3 # Increased feedback for stronger adaptive responses + + # Initialize and evaluate the initial population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay # Dynamically adjust mutation strength + + # Elite selection + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + # Select parents from elites + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + # Crossover + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) # No crossover, direct copy + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling to potentially escape local minima + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV21.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV21.py new file mode 100644 index 000000000..a1789c53f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV21.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV21: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjusted strategic parameters + population_size = 400 # Increased population to enhance exploration and diversity + gamma_initial = 0.9 # Increased initial exploration intensity + gamma_final = 0.0001 # Lower final gamma for finer exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.7)) + elite_fraction = 0.1 # Increased elite fraction for higher quality selection + mutation_strength = 0.02 # Adjusted mutation strength for better exploration + mutation_decay = 0.999 # Balanced decay rate to maintain mutation relevance + crossover_probability = 0.97 # Increased to encourage genetic mixing + tunneling_frequency = 0.98 # More frequent quantum effects + directional_weight = 2.5 # Enhanced directional influence + feedback_rate = 0.35 # Increased feedback for adaptive responses + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV22.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV22.py new file mode 100644 index 000000000..901dc49e2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV22.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV22: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjusting strategic parameters based on previous feedback + population_size = 500 # Further increase in population to explore more diverse solutions + gamma_initial = 0.85 # Moderate initial intensity for global exploration + gamma_final = 0.00005 # Fine-tuned final value for precise exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.65)) + elite_fraction = 0.15 # Increase elite fraction for more elite selection + mutation_strength = 0.015 # Reduction in mutation strength to avoid over-disruption + mutation_decay = 0.9992 # Slower decay to sustain mutation effects longer + crossover_probability = 0.9 # Adjusted crossover probability + tunneling_frequency = 0.95 # Adjusted frequency for quantum tunneling effects + directional_weight = 3.0 # Stronger influence from elite direction + feedback_rate = 0.4 # Increased feedback for more reactive adaptation + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV23.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV23.py new file mode 100644 index 000000000..a4a03e4e6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV23.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV23: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjusting strategy based on feedback to improve convergence + population_size = 800 # Increased population size for broader exploration + gamma_initial = 0.75 # Lower initial intensity for more gradual convergence + gamma_final = 0.00001 # Lower final value for very fine exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.5)) # Faster decay + elite_fraction = 0.2 # Higher elite fraction + mutation_strength = 0.01 # Lower mutation strength for less disruptive mutations + mutation_decay = 0.9995 # Slower decay to maintain mutation relevance longer + crossover_probability = 0.85 # Slightly lower crossover probability + tunneling_frequency = 0.80 # Lower tunneling frequency to reduce random jumps + directional_weight = 5.0 # Increase elite directional influence for stronger guidance + feedback_rate = 0.5 # Enhanced feedback mechanism to adapt rapidly to landscape + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV24.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV24.py new file mode 100644 index 000000000..2a5a95e97 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV24.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV24: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Tuning parameters based on feedback from previous version + population_size = 1000 # Expanded population size for more coverage + gamma_initial = 0.5 # Softer initial quantum weight for gentle exploration + gamma_final = 0.00005 # Miniscule final quantum weight for ultra-fine tuning + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.6)) # Custom decay rate + elite_fraction = 0.25 # Increased elite fraction to maintain more high-quality solutions + mutation_strength = 0.005 # Reduced mutation strength to prevent disruptive mutations + mutation_decay = 0.9997 # Extended impact of mutations throughout more generations + crossover_probability = 0.75 # Reduced crossover probability to emphasize mutations and elitism + tunneling_frequency = 0.85 # Increased frequency for quantum jumps to escape local minima + directional_weight = 8.0 # Increased weight to forcefully pull towards elites + feedback_rate = 0.55 # Enhanced feedback sensitivity for reacting to function landscape + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV25.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV25.py new file mode 100644 index 000000000..a09eb5ab3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV25.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV25: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjustments based on the feedback from V24 + population_size = 800 # Reduced population size for more focused search + gamma_initial = 0.3 # Reduced initial gamma for less aggressive initial exploration + gamma_final = 0.0001 # Further refined final gamma for ultra-fine tuning + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.7)) # Slow decay rate + elite_fraction = 0.2 # Reduced elite fraction to promote diversity + mutation_strength = 0.003 # Further reduced mutation strength + mutation_decay = 0.99985 # Slower mutation decay for sustained exploration impact + crossover_probability = 0.7 # Slightly reduced crossover probability + tunneling_frequency = 0.9 # Increased tunneling to encourage escaping local minima + directional_weight = 10.0 # Increased weight to enhance pull towards elites + feedback_rate = 0.6 # Increased feedback sensitivity + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV26.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV26.py new file mode 100644 index 000000000..9b8ddeedd --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV26.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumDirectionalRefinerV26: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjustments based on the feedback from V25 and aiming to improve performance + population_size = 700 # Further decreased population size for increased focus + gamma_initial = 0.2 # Start with a lower initial gamma for gentle initial exploration + gamma_final = 0.00005 # Lower final gamma for more precise final adjustments + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.8) + ) # Adjusting decay rate + elite_fraction = 0.15 # Further reduced elite fraction to promote diversity + mutation_strength = 0.002 # Slightly reduced mutation strength + mutation_decay = 0.9999 # Slower mutation decay for consistent exploration + crossover_probability = 0.65 # Further reduced for more heritage preservation + tunneling_frequency = 0.95 # Increased tunneling to aggressively escape local optima + directional_weight = 12.0 # Increase weight for stronger elite attraction + feedback_rate = 0.7 # Increase feedback sensitivity + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV27.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV27.py new file mode 100644 index 000000000..5b012b76f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV27.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV27: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Further refinements based on the performance of V26 + population_size = 500 # Further decreased population size for focused elite performance + gamma_initial = 0.15 # Further refinement of initial gamma for a smoother start + gamma_final = 0.00001 # More aggressive final gamma for precise exploitation + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.85)) # More gradual decay + elite_fraction = 0.1 # Narrower elite selection to intensify competition + mutation_strength = 0.003 # Adjust mutation strength for better exploration + mutation_decay = 0.99995 # Fine-tuned mutation decay for better balance + crossover_probability = 0.7 # Slightly increased to improve genetic diversity + tunneling_frequency = 0.98 # More frequent tunneling to intensify escape from local minima + directional_weight = 15.0 # Stronger directional influence for faster convergence + feedback_rate = 0.8 # Enhanced feedback sensitivity + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV28.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV28.py new file mode 100644 index 000000000..e892aab4a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV28.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV28: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Revising parameters based on the feedback from V27 results + population_size = 300 # Smaller population size for more refined searching + gamma_initial = 0.1 # Lower start for gamma + gamma_final = 0.00001 # Maintained aggressive final gamma + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.9)) # Slower decay + elite_fraction = 0.05 # Reduced elite fraction to focus on the very best + mutation_strength = 0.002 # Reduced mutation strength + mutation_decay = 0.99997 # Slightly slower decay + crossover_probability = 0.75 # Increased crossover probability for genetic diversity + tunneling_frequency = 0.95 # Reduced frequency to focus more on direct improvements + directional_weight = 10.0 # Reduced directional influence + feedback_rate = 0.85 # Increased feedback sensitivity + + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV29.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV29.py new file mode 100644 index 000000000..b9a81f71d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV29.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumDirectionalRefinerV29: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Further tuning based on QuantumDirectionalRefinerV28 feedback + population_size = 200 # Smaller population for tighter focus + gamma_initial = 0.05 # Further reduced initial gamma + gamma_final = 0.000001 # More aggressive final gamma + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.95)) # Decays more slowly + elite_fraction = 0.1 # Increased elite fraction to maintain a better gene pool + mutation_strength = 0.001 # Further reduced mutation strength + mutation_decay = 0.99999 # Very slow decay rate + crossover_probability = 0.85 # Higher crossover probability + tunneling_frequency = 0.90 # Lowered frequency for reduced random search behavior + directional_weight = 8.0 # Adjusted directional weight + feedback_rate = 0.90 # More sensitive feedback + + # Initialization of the population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV30.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV30.py new file mode 100644 index 000000000..24480ce74 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV30.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumDirectionalRefinerV30: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Adjusted parameters based on previous feedback + population_size = 150 # Further reduction for focused exploration + gamma_initial = 0.1 # Slightly increased for a more dynamic initial search + gamma_final = 0.00001 # Lower final value to fine-tune exploitation + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.90) + ) # Adjusted decay rate + elite_fraction = 0.15 # Higher elite fraction to preserve more good solutions + mutation_strength = 0.005 # Increased mutation strength for diverse exploration + mutation_decay = 0.9999 # Adjusted decay rate + crossover_probability = 0.8 # Adjusted crossover probability + tunneling_frequency = 0.85 # Adjusted tunneling frequency + directional_weight = 10.0 # Increased weight to emphasize directionality + feedback_rate = 1.0 # Full utilization of feedback + + # Initialization of the population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV31.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV31.py new file mode 100644 index 000000000..dfd6f59f9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV31.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumDirectionalRefinerV31: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Adjustments based on feedback from V30 + population_size = 100 # More focused exploration with smaller population + gamma_initial = 0.15 # Initial wider exploration + gamma_final = 0.00001 # More precise final search + gamma_decay = np.exp( + np.log(gamma_final / gamma_initial) / (self.budget * 0.95) + ) # Slower decay for prolonged exploration + elite_fraction = 0.1 # Less elite to increase genetic diversity + mutation_strength = 0.01 # Slightly increased mutation for enhanced exploration + mutation_decay = 0.9997 # Less aggressive decay for sustained mutation impact + crossover_probability = 0.75 # Slight decrease to favor more mutation + tunneling_frequency = 0.7 # Reduced frequency to improve search diversity + directional_weight = 15.0 # Increased weight for stronger exploitation of known good directions + feedback_rate = 0.5 # Reduced feedback for less aggressive convergence + + # Initializing the population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV32.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV32.py new file mode 100644 index 000000000..42b28262b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV32.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumDirectionalRefinerV32: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + population_size = 50 # Further decrease to increase the speed of convergence + gamma_initial = 0.25 # Increase initial exploration radius + gamma_final = 0.0001 # More precise final search + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.95)) + elite_fraction = 0.2 # Increase the elite fraction to focus on best solutions + mutation_strength = 0.015 # Increase mutation strength for better exploration + mutation_decay = 0.9995 # Slower decay for sustained mutation impact + crossover_probability = 0.70 # Maintain moderate crossover to balance diversity + tunneling_frequency = 0.5 # Decreased frequency for focused exploitation + directional_weight = 20.0 # Increased weight to exploit known good directions more aggressively + feedback_rate = 0.3 # Reduced feedback for less aggressive convergence + + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDirectionalRefinerV33.py b/nevergrad/optimization/lama/QuantumDirectionalRefinerV33.py new file mode 100644 index 000000000..b03120a76 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDirectionalRefinerV33.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumDirectionalRefinerV33: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Configurations for the optimization process + population_size = 25 # Reduced to concentrate on promising candidates more quickly + gamma_initial = 0.1 # Decreased initial exploration to avoid excessive divergence + gamma_final = 0.001 # Tightened final exploration to focus on local refinements + gamma_decay = np.exp(np.log(gamma_final / gamma_initial) / (self.budget * 0.9)) + elite_fraction = 0.4 # Increased to select a larger pool of superior candidates + mutation_strength = 0.01 # Slightly reduced to prevent excessive randomness + mutation_decay = 0.999 # Slower decay to sustain exploration capabilities longer + crossover_probability = 0.85 # Increased to promote genetic diversity + tunneling_frequency = 0.2 # Decreased to focus more on local search + directional_weight = 25.0 # Increased to strongly exploit promising directions + feedback_rate = 0.1 # Reduced to stabilize convergence behavior + + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + mutation_strength *= mutation_decay + + elite_count = int(population_size * elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = list(elite_individuals) + new_fitness = list(fitness[elite_indices]) + + while len(new_population) < population_size: + if evaluations_left <= 0: + break + + parents = np.random.choice(elite_count, 2, replace=False) + parent1, parent2 = elite_individuals[parents[0]], elite_individuals[parents[1]] + + if np.random.random() < crossover_probability: + offspring = np.where(np.random.rand(self.dim) < 0.5, parent1, parent2) + else: + offspring = np.array(parent1) + + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + if np.random.rand() < tunneling_frequency: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + direction = best_individual - offspring + feedback = np.mean([f - self.f_opt for f in new_fitness]) * feedback_rate + offspring += gamma * directional_weight * direction + feedback + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/QuantumDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..fbcc998bf --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDualStrategyAdaptiveDE.py @@ -0,0 +1,149 @@ +import numpy as np + + +class QuantumDualStrategyAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-enhanced dual strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt = np.random.uniform(-0.1, 0.1, self.dim) + mutant = mutant + jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDynamicAdaptationStrategy.py b/nevergrad/optimization/lama/QuantumDynamicAdaptationStrategy.py new file mode 100644 index 000000000..aa69970af --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicAdaptationStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumDynamicAdaptationStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 250 # Reduced population size for more focused search + self.sigma_initial = 0.5 # Increased initial standard deviation for enhanced exploration + self.learning_rate = 0.1 # Adjusted learning rate for adaptiveness in crossover + self.CR_base = 0.85 # Base Crossover probability + self.q_impact_initial = 0.25 # Initial quantum impact for enhanced exploration + self.q_impact_decay = 0.995 # Slower decay rate for quantum impact + self.sigma_decay = 0.98 # Decay rate for sigma + self.elitism_factor = 5 # Slightly reduced elitism factor for diversity + self.adaptive_CR_weighting = 0.02 # Increment for adaptive crossover based on iteration progress + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Maintain a set of elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + + current_CR = self.CR_base + self.adaptive_CR_weighting * iteration + + for i in range(self.pop_size): + if i in elites: # Skip mutation for elites + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c) + quantum_term + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = current_CR + self.learning_rate * (np.random.randn()) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumDynamicBalanceOptimizer.py b/nevergrad/optimization/lama/QuantumDynamicBalanceOptimizer.py new file mode 100644 index 000000000..7cfe3b1f1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicBalanceOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumDynamicBalanceOptimizer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.05, + momentum=0.9, + quantum_prob=0.2, + elite_rate=0.2, + noise_intensity=0.1, + perturbation_scale=0.1, + balance_factor=0.5, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + self.balance_factor = ( + balance_factor # Factor controlling the balancing between quantum and classical updates + ) + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob: + # Quantum jump influenced by balancing factor + quantum_jump = np.random.normal( + 0.0, + self.perturbation_scale + * np.linalg.norm(global_best - self.population[i]) + * self.balance_factor, + self.dim, + ) + self.population[i] += quantum_jump + else: + # Classical momentum update + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = ( + self.momentum * self.velocities[i] + + self.learning_rate * (global_best - self.population[i]) + + noise + ) + future_position = self.population[i] + self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumDynamicBalancedOptimizerV7.py b/nevergrad/optimization/lama/QuantumDynamicBalancedOptimizerV7.py new file mode 100644 index 000000000..4f584a411 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicBalancedOptimizerV7.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumDynamicBalancedOptimizerV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 1000 # Increased population size for greater exploration + self.sigma_initial = 1.0 # Initial mutation spread + self.sigma_final = 0.01 # Final mutation spread for fine-tuning + self.elitism_factor = 0.05 # Reduced elitism to prevent premature convergence + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.2 # Final crossover probability + self.q_impact_initial = 0.01 # Initial quantum impact + self.q_impact_final = 0.5 # Increased final quantum impact for intensive exploitation + self.q_impact_increase_rate = 0.001 # Gradual increase in quantum impact + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation with quantum dynamic adjustment + idxs = [j for j in range(self.pop_size) if j != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + a + + sigma * (b - c + q_impact * np.sin(c)) + + q_impact * np.sin(np.pi * np.random.normal(size=self.dim)) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumDynamicExplorationOptimizerV6.py b/nevergrad/optimization/lama/QuantumDynamicExplorationOptimizerV6.py new file mode 100644 index 000000000..25e744a16 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicExplorationOptimizerV6.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumDynamicExplorationOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 800 # Further increased population size for wide exploration + self.sigma_initial = 1.0 # More aggressive initial mutation spread + self.sigma_final = 0.001 # Very tight control at the end of the search + self.elitism_factor = 0.1 # Lower elitism to encourage diversity + self.CR_initial = 0.95 # High initial crossover probability + self.CR_final = 0.25 # Very low final crossover probability for focused local search + self.q_impact_initial = 0.05 # Reduced initial quantum impact + self.q_impact_final = 0.25 # Increased final quantum impact for intensive exploitation + self.q_impact_increase_rate = 0.005 # Accelerated increase in quantum impact + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Enhanced mutation strategy with quantum dynamic adjustment + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + a + + sigma * (b - c + np.tan(a)) + + q_impact * np.tan(np.pi * np.random.laplace(size=self.dim)) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumDynamicGradientClimberV2.py b/nevergrad/optimization/lama/QuantumDynamicGradientClimberV2.py new file mode 100644 index 000000000..3a3f31127 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicGradientClimberV2.py @@ -0,0 +1,91 @@ +import numpy as np + + +class QuantumDynamicGradientClimberV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 # Reduced for focused search + elite_size = 20 # Reduced elite size + evaluations = 0 + mutation_factor = 0.5 # Lower mutation factor to reduce drastic changes + crossover_probability = 0.7 # Reduced crossover probability for more stable evolution + quantum_probability = 0.10 # Initial quantum probability + learning_rate = 0.01 # Reduced learning rate for more precise gradient steps + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + # Adaptive mechanism based on stagnation detection + if abs(previous_best - self.f_opt) < 1e-6: + mutation_factor *= 0.95 + learning_rate *= 0.95 + else: + mutation_factor *= 1.05 + learning_rate *= 1.05 + previous_best = self.f_opt + + # Quantum exploration step with adaptive probability increase + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Gradient-based refinement for elites + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) * learning_rate + population[idx] += gradient + population[idx] = np.clip(population[idx], self.lb, self.ub) + new_fitness = func(population[idx]) + evaluations += 1 + + if new_fitness < fitness[idx]: + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = population[idx] + + # Differential evolution for the rest of the population + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability *= 1.02 # Gradual increase in quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDynamicGradientClimberV3.py b/nevergrad/optimization/lama/QuantumDynamicGradientClimberV3.py new file mode 100644 index 000000000..e5f8ca3ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicGradientClimberV3.py @@ -0,0 +1,91 @@ +import numpy as np + + +class QuantumDynamicGradientClimberV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 150 # Adjusted for a more efficient search + elite_size = 15 # Proportionally reduced to match the new population size + evaluations = 0 + mutation_factor = 0.4 # Lower mutation factor for focused local search + crossover_probability = 0.6 # More conservative crossover to maintain solution stability + quantum_probability = 0.08 # Lower initial quantum probability for focused exploration + learning_rate = 0.005 # Further reduced learning rate for finer gradient adjustments + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + # Adaptive mechanism based on performance + if abs(previous_best - self.f_opt) < 1e-7: + mutation_factor *= 0.9 + learning_rate *= 0.9 + else: + mutation_factor *= 1.1 + learning_rate *= 1.1 + previous_best = self.f_opt + + # Quantum exploration step with adaptive probability adjustment + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Elite gradient refinement with more conservative updates + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) * learning_rate + new_position = np.clip(population[idx] + gradient, self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[idx]: + population[idx] = new_position + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential evolution adjustments for robust convergence + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability *= 1.01 # Gradual and controlled increase in quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumDynamicallyAdaptiveFireworksAlgorithm.py b/nevergrad/optimization/lama/QuantumDynamicallyAdaptiveFireworksAlgorithm.py new file mode 100644 index 000000000..11ed6e2b5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumDynamicallyAdaptiveFireworksAlgorithm.py @@ -0,0 +1,58 @@ +import numpy as np + + +class QuantumDynamicallyAdaptiveFireworksAlgorithm: + def __init__( + self, + budget=1000, + num_sparks=10, + num_iterations=100, + amplification_factor=1.5, + divergence_threshold=0.2, + ): + self.budget = budget + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.amplification_factor = amplification_factor + self.divergence_threshold = divergence_threshold + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_sparks, dimensions)) + best_firework = fireworks[0] + num_successful_sparks = np.zeros(self.num_sparks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for i, firework in enumerate(fireworks): + successful_sparks = 0 + for _ in range(self.num_sparks): + spark = firework + np.random.normal(0, 1, size=dimensions) * self.amplification_factor + spark = np.clip(spark, bounds.lb, bounds.ub) + f_spark = func(spark) + if f_spark < func(firework): + fireworks[i] = spark + successful_sparks += 1 + if f_spark < func(best_firework): + best_firework = spark + + num_successful_sparks[i] = successful_sparks + + avg_sparks = np.mean(num_successful_sparks) + if avg_sparks < self.divergence_threshold * self.num_sparks: + for i, firework in enumerate(fireworks): + fireworks[i] = fireworks[i] + np.random.normal(0, 1, size=dimensions) * ( + self.amplification_factor / 2 + ) + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEliteMemeticAdaptiveSearch.py b/nevergrad/optimization/lama/QuantumEliteMemeticAdaptiveSearch.py new file mode 100644 index 000000000..0f4494958 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEliteMemeticAdaptiveSearch.py @@ -0,0 +1,138 @@ +import numpy as np + + +class QuantumEliteMemeticAdaptiveSearch: + def __init__( + self, + budget, + population_size=100, + tau1=0.1, + tau2=0.1, + memetic_rate=0.6, + alpha=0.2, + learning_rate=0.01, + elite_fraction=0.2, + mutation_factor=0.8, + crossover_prob=0.9, + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + self.mutation_factor = mutation_factor + self.crossover_prob = crossover_prob + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.normal(size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_factor * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def ensemble_step(self, func, pop, scores, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step(func, pop, scores, global_best_position) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v4.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v4.py new file mode 100644 index 000000000..e36577d80 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v4.py @@ -0,0 +1,141 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveDifferentialEvolution_v4: + def __init__(self, budget=10000, initial_population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.initial_population_size = initial_population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.uniform(-0.1, 0.1, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = [-5.0, 5.0] + population_size = self.initial_population_size + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(population_size): + # Select indices for mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Enhanced Differential Mutation + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / population_size) + jolt = np.random.uniform(-jolt_intensity, jolt_intensity, dim) + mutant += jolt + + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive Population Size + if success_count / population_size > 0.2: + population_size = min(population_size + 10, self.initial_population_size * 2) + elif success_count / population_size < 0.1: + population_size = max(population_size - 10, self.initial_population_size // 2) + + # Ensure the population size is within bounds + population_size = np.clip(population_size, 10, self.initial_population_size * 2) + + # Resize population arrays if necessary + if population_size > len(population): + new_pop = np.random.uniform(bounds[0], bounds[1], (population_size - len(population), dim)) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif population_size < len(population): + population = population[:population_size] + fitness = fitness[:population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v5.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v5.py new file mode 100644 index 000000000..95f3e4211 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDifferentialEvolution_v5.py @@ -0,0 +1,141 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveDifferentialEvolution_v5: + def __init__(self, budget=10000, initial_population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.initial_population_size = initial_population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.uniform(-0.1, 0.1, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = [-5.0, 5.0] + population_size = self.initial_population_size + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(population_size): + # Select indices for mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Enhanced Differential Mutation + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / population_size) + jolt = np.random.uniform(-jolt_intensity, jolt_intensity, dim) + mutant += jolt + + mutant = np.clip(mutant, bounds[0], bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive Population Size + if success_count / population_size > 0.2: + population_size = min(population_size + 10, self.initial_population_size * 2) + elif success_count / population_size < 0.1: + population_size = max(population_size - 10, self.initial_population_size // 2) + + # Ensure the population size is within bounds + population_size = np.clip(population_size, 10, self.initial_population_size * 2) + + # Resize population arrays if necessary + if population_size > len(population): + new_pop = np.random.uniform(bounds[0], bounds[1], (population_size - len(population), dim)) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif population_size < len(population): + population = population[:population_size] + fitness = fitness[:population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDiversityStrategyV6.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDiversityStrategyV6.py new file mode 100644 index 000000000..7ae70f37f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDiversityStrategyV6.py @@ -0,0 +1,92 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveDiversityStrategyV6: + def __init__( + self, + budget, + dimension=5, + population_size=1000, + elite_fraction=0.1, + mutation_intensity=0.1, + crossover_rate=0.7, + quantum_prob=0.5, + gamma=0.25, + beta=0.2, + epsilon=0.001, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Enhanced probability for quantum-inspired state update + self.gamma = gamma # Increased gamma for better exploration in quantum state updates + self.beta = beta # Beta to adjust mutation intensity dynamically + self.epsilon = epsilon # Min threshold for mutation intensity + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum state update with increased probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Enhanced quantum state update for potentially better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDualStrategyDE.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDualStrategyDE.py new file mode 100644 index 000000000..0609c152f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveDualStrategyDE.py @@ -0,0 +1,149 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveDualStrategyDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-enhanced dual strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt = np.random.uniform(-0.1, 0.1, self.dim) + mutant = mutant + jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..24b00abd2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveExplorationOptimization.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 # Increased swarm size for better diversity + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor + max_exploration_cycles = 30 # Reduced max exploration cycles + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 # Lowered threshold + + # Hybrid loop (combining PSO, Gradient-based search, Differential Evolution, and Quantum-inspired exploration) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increased aggressive adaptation + else: + alpha *= 0.8 # Less aggressive decrease + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumEnhancedAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE.py new file mode 100644 index 000000000..a74890fcb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE.py @@ -0,0 +1,157 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveMultiPhaseDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.uniform(-0.1, 0.1, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, -5.0, 5.0) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, bounds): + """Perform differential mutation""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def cauchy_mutation(self, individual, scale): + """Apply Cauchy mutation""" + return individual + scale * np.tan(np.pi * (np.random.rand(len(individual)) - 0.5)) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = [-5.0, 5.0] + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, F, bounds) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Alternating with Cauchy mutation for exploration + if np.random.rand() < 0.1: + mutant = self.cauchy_mutation(mutant, scale=0.1) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE_v7.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE_v7.py new file mode 100644 index 000000000..40482ba31 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveMultiPhaseDE_v7.py @@ -0,0 +1,144 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveMultiPhaseDE_v7: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveOptimizer.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveOptimizer.py new file mode 100644 index 000000000..95f8dc3ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 100 # Increased population size for enhanced exploration + self.F_min = 0.1 # Minimum differential weight + self.F_max = 0.9 # Maximum differential weight + self.CR = 0.8 # Fixed crossover probability + self.q_influence = 0.15 # Quantum influence factor + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Quantum mutation and recombination loop + for iteration in range(int(self.budget / self.pop_size)): + # Adaptive differential weight based on iteration + F = self.F_min + (self.F_max - self.F_min) * np.sin( + np.pi * iteration / (self.budget / self.pop_size) + ) + + # Generate new trial vectors + for i in range(self.pop_size): + # Selection of mutation vector indices + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = pop[i] + F * (best_ind - pop[i]) + F * (a - b + c - pop[i]) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Quantum perturbation + if np.random.rand() < self.q_influence: + trial += np.random.normal(0, 0.1, self.dim) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumEnhancedAdaptiveSwarmOptimization.py b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveSwarmOptimization.py new file mode 100644 index 000000000..b80c24d68 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedAdaptiveSwarmOptimization.py @@ -0,0 +1,157 @@ +import numpy as np + + +class QuantumEnhancedAdaptiveSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.5 + 0.3 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.3 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.6: + local_search_iters = 15 + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolution.py new file mode 100644 index 000000000..d655d9150 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolution.py @@ -0,0 +1,132 @@ +import numpy as np + + +class QuantumEnhancedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [0.8] * self.memory_size + self.memory_CR = [0.9] * self.memory_size + self.memory_index = 0 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.3 + self.local_search_iters = 5 + self.diversity_threshold = 1e-4 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + mutant = self.mutate(parent1, parent2, parent3, F) + else: + mutant = self.mutate(parent1, parent2, parent3, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for idx in elite_indices: + if evaluations >= self.budget: + break + new_population[idx], new_fitness[idx] = self.local_search(new_population[idx], bounds, func) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = new_population + fitness = new_fitness + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart.py b/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart.py new file mode 100644 index 000000000..8951b904e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 60 + self.elite_size = 10 + self.alpha = 0.6 + self.beta = 0.5 + self.local_search_prob = 0.5 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 25 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def dynamic_restart(self, population, fitness, func): + if np.std(fitness) < self.diversity_threshold: + best_ind = population[np.argmin(fitness)] + population = np.array( + [ + best_ind + np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + for _ in range(self.population_size) + ] + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.elite_size] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = [(population[i], fitness[i]) for i in range(self.elite_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + memory = self.update_memory(memory, population, fitness) + elite_particles = np.array([mem[0] for mem in memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness = self.dynamic_restart(population, fitness, func) + + if evaluations % self.memory_update_interval == 0: + memory = self.update_memory(memory, population, fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDiversityExplorerV8.py b/nevergrad/optimization/lama/QuantumEnhancedDiversityExplorerV8.py new file mode 100644 index 000000000..0890cca19 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDiversityExplorerV8.py @@ -0,0 +1,92 @@ +import numpy as np + + +class QuantumEnhancedDiversityExplorerV8: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_intensity=0.1, + crossover_rate=0.9, + quantum_prob=0.7, + gamma=0.4, + beta=0.4, + epsilon=0.0001, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Further increased probability for quantum-inspired state update + self.gamma = gamma # Increased gamma for deeper exploration in quantum state updates + self.beta = beta # Adjusted beta to manage mutation intensity more aggressively + self.epsilon = epsilon # Lower threshold for mutation intensity to ensure fine-tuning + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites based on the fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Apply quantum state update with a higher probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population and update the best solution found + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Enhanced quantum state update for potentially better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..66e9a2193 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumEnhancedDynamicAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.4 # Reduced inertia weight for PSO for better convergence + c1 = 0.9 # Increased cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO remains the same + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # More frequent restarts + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.1, beta=0.9): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2.py new file mode 100644 index 000000000..1f32f4671 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.6 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.7 # Initial differential weight for DE + initial_CR = 0.8 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # More frequent restarts + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.05, beta=0.95): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3.py new file mode 100644 index 000000000..11b66582f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.6 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.7 # Initial differential weight for DE + initial_CR = 0.8 # Initial crossover probability for DE + restart_threshold = 0.05 * self.budget # More frequent restarts + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.05, beta=0.95): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4.py new file mode 100644 index 000000000..29aa74848 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4.py @@ -0,0 +1,162 @@ +import numpy as np + + +class QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.7 # Increased inertia weight for better balance between exploration and exploitation + c1 = 1.2 # Increased cognitive coefficient for PSO + c2 = 1.3 # Increased social coefficient for PSO + initial_F = 0.7 # Slightly reduced initial differential weight for DE to increase stability + initial_CR = 0.8 # Reduced initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.15: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.15: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior( + population, global_best, alpha=0.25, beta=0.75 + ): # Increased alpha and slightly reduced beta + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5.py new file mode 100644 index 000000000..6647e4c79 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Maintain increased population size for better exploration + w = 0.6 # Adjusted inertia weight for PSO + c1 = 1.1 # Slightly reduced cognitive coefficient for PSO + c2 = 1.2 # Slightly reduced social coefficient for PSO + initial_F = 0.7 # Differential weight for DE + initial_CR = 0.85 # Reduced crossover probability for DE + restart_threshold = 0.1 * self.budget # Dynamic restart threshold + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.6 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.25, beta=0.75): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution.py new file mode 100644 index 000000000..53017b780 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution.py @@ -0,0 +1,186 @@ +import numpy as np + + +class QuantumEnhancedDynamicDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=80, + elite_size=10, + local_search_steps=15, + F_min=0.3, + F_max=0.8, + Cr_min=0.2, + Cr_max=0.9, + perturbation_intensity=0.05, + perturbation_decay=0.95, + alpha=0.6, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.6, 0.7 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v2.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v2.py new file mode 100644 index 000000000..098a059e1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v2.py @@ -0,0 +1,186 @@ +import numpy as np + + +class QuantumEnhancedDynamicDifferentialEvolution_v2: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=20, + local_search_steps=10, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.05, + perturbation_decay=0.9, + alpha=0.6, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.7 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v3.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v3.py new file mode 100644 index 000000000..1fe6bd082 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicDifferentialEvolution_v3.py @@ -0,0 +1,186 @@ +import numpy as np + + +class QuantumEnhancedDynamicDifferentialEvolution_v3: + def __init__( + self, + budget=10000, + population_size=150, + elite_size=25, + local_search_steps=15, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.05, + perturbation_decay=0.9, + alpha=0.6, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.7 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicHybridSearchV9.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicHybridSearchV9.py new file mode 100644 index 000000000..d1c89df99 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicHybridSearchV9.py @@ -0,0 +1,80 @@ +import numpy as np + + +class QuantumEnhancedDynamicHybridSearchV9: + def __init__( + self, + budget, + dimension=5, + population_size=300, + elite_ratio=0.15, + mutation_scale=0.4, + mutation_decay=0.002, + crossover_prob=0.95, + quantum_intensity=0.3, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if self.quantum_intensity and np.random.random() < 0.25: + child = self.quantum_tuning(child, best_individual) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_tuning(self, individual, best_individual): + perturbation = np.random.uniform(-1, 1, self.dimension) * 0.1 # Increased uniform perturbation scale + return individual + perturbation * (best_individual - individual) diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE.py new file mode 100644 index 000000000..f5256b5a8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE.py @@ -0,0 +1,186 @@ +import numpy as np + + +class QuantumEnhancedDynamicMultiStrategyDE: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE_v2.py b/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE_v2.py new file mode 100644 index 000000000..16b036082 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedDynamicMultiStrategyDE_v2.py @@ -0,0 +1,186 @@ +import numpy as np + + +class QuantumEnhancedDynamicMultiStrategyDE_v2: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEnhancedGlobalTacticalOptimizer.py b/nevergrad/optimization/lama/QuantumEnhancedGlobalTacticalOptimizer.py new file mode 100644 index 000000000..88246f5ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedGlobalTacticalOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumEnhancedGlobalTacticalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 250 # Adjusted population size based on performance feedback + inertia_weight = 0.7 # Lower inertia for faster convergence on high-dimensional landscapes + cognitive_coefficient = 2.05 # Slightly adjusted cognitive learning factor + social_coefficient = 2.05 # Slightly adjusted social learning factor + quantum_momentum = 0.3 # Increased quantum influence for better exploration + exploration_phase = 0.65 # Slightly extended exploration phase + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Adaptive inertia weight with dynamic adaptation to enhance tactical responsiveness + w = inertia_weight * ( + 0.4 + 0.6 * np.exp(-3.5 * current_budget / (self.budget * exploration_phase)) + ) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics with adaptive momentum, fine-tuned for better strategic exploration + if np.random.rand() < 0.12 * (1 - w): + quantum_jump = np.random.normal(0, quantum_momentum * (1 - w), self.dim) + population[i] += quantum_jump + + # Velocity and position updates with refined tactical adaptation + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia_component + cognitive_component + social_component + velocity[i] = np.clip( + velocity[i], -1.5, 1.5 + ) # Refinement on clamping velocities based on empirical performance + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and update strategies + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumEnhancedGradientClimber.py b/nevergrad/optimization/lama/QuantumEnhancedGradientClimber.py new file mode 100644 index 000000000..8f008af7e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedGradientClimber.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumEnhancedGradientClimber: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 # Adjusted population size + elite_size = 30 # Adjusted elite size for better convergence + evaluations = 0 + mutation_factor = 0.7 # Lower initial mutation factor + crossover_probability = 0.8 # Slightly reduced crossover probability + quantum_probability = 0.15 # Reduced initial quantum probability + learning_rate = 0.02 # Higher initial learning rate to accelerate convergence + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + if abs(previous_best - self.f_opt) < 1e-6: + mutation_factor *= 0.98 # Reduced adaptation rate for mutation factor + learning_rate *= 0.98 # Reduced adaptation rate for learning rate + else: + mutation_factor *= 1.02 # Moderated increment for escaping local minima + learning_rate *= 1.02 # Moderated increment for gradient steps + previous_best = self.f_opt + + # Quantum exploration step + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Gradient-based refinement for elites + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) + population[idx] += learning_rate * gradient + population[idx] = np.clip(population[idx], self.lb, self.ub) + new_fitness = func(population[idx]) + evaluations += 1 + + if new_fitness < fitness[idx]: + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = population[idx] + + # Crossover and mutation for diversity + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability *= 1.05 # Gradual increase in quantum probability to maintain diversity + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedHybridDEPSO.py b/nevergrad/optimization/lama/QuantumEnhancedHybridDEPSO.py new file mode 100644 index 000000000..f9f81ccad --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedHybridDEPSO.py @@ -0,0 +1,162 @@ +import numpy as np + + +class QuantumEnhancedHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.6 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best): + alpha = 0.1 # Quantum-inspired parameter controlling the attraction to the global best + beta = 0.9 # Quantum-inspired diffusion parameter + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMemeticAdaptiveSearch.py b/nevergrad/optimization/lama/QuantumEnhancedMemeticAdaptiveSearch.py new file mode 100644 index 000000000..ec1b0df42 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMemeticAdaptiveSearch.py @@ -0,0 +1,151 @@ +import numpy as np + + +class QuantumEnhancedMemeticAdaptiveSearch: + def __init__( + self, + budget, + population_size=100, + tau1=0.1, + tau2=0.1, + memetic_rate=0.6, + alpha=0.2, + learning_rate=0.01, + elite_fraction=0.1, + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.normal(size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMemeticSearch.py b/nevergrad/optimization/lama/QuantumEnhancedMemeticSearch.py new file mode 100644 index 000000000..d2d458b3f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMemeticSearch.py @@ -0,0 +1,112 @@ +import numpy as np + + +class QuantumEnhancedMemeticSearch: + def __init__(self, budget, population_size=60, tau1=0.1, tau2=0.1, memetic_rate=0.4, alpha=0.1): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - 0.01 * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v8.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v8.py new file mode 100644 index 000000000..53351c4a4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v8.py @@ -0,0 +1,149 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseAdaptiveDE_v8: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-enhanced dual strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt = np.random.uniform(-0.1, 0.1, self.dim) + mutant = mutant + jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v9.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v9.py new file mode 100644 index 000000000..46d8f65c0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseAdaptiveDE_v9.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseAdaptiveDE_v9: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-enhanced dual strategy mutation approach + if np.random.rand() < 0.5: + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + mutant = population[a] + F * (population[b] - population[c]) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + jolt = np.random.uniform(-jolt_intensity, jolt_intensity, self.dim) + mutant = mutant + jolt + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 + else: + stagnation_counter += 1 + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE.py new file mode 100644 index 000000000..6abf5007f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE.py @@ -0,0 +1,138 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v2.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v2.py new file mode 100644 index 000000000..f7d57183c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v2.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseDE_v2: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate and improvement history + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v3.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v3.py new file mode 100644 index 000000000..df50da22c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v3.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseDE_v3: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.4, 1.0 + Cr_min, Cr_max = 0.1, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate and improvement history + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v4.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v4.py new file mode 100644 index 000000000..97aa09439 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v4.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseDE_v4: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate and improvement history + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v5.py b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v5.py new file mode 100644 index 000000000..388ce3b5b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedMultiPhaseDE_v5.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumEnhancedMultiPhaseDE_v5: + def __init__(self, budget=10000, population_size=100, elite_size=10, epsilon=1e-8): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.elite_size = elite_size + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Stagnation and success tracking + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + # Elite tracking + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + # History of improvements + improvements = [] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Multi-strategy mutation approach + if np.random.rand() < 0.5: + # Quantum-inspired strategy + elite_idx = np.random.choice(self.elite_size) + elite_ind = elite_population[elite_idx] + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = ( + centroid + F * (population[a] - population[b]) + 0.1 * (elite_ind - population[i]) + ) + else: + # Classic DE/rand/1 strategy + mutant = population[a] + F * (population[b] - population[c]) + + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + improvements.append(evaluations) + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Update elite set + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Adaptive parameters based on success rate and improvement history + success_rate = success_count / self.population_size + if success_rate > 0.2: + F = min(F_max, F + 0.1 * success_rate) + Cr = max(Cr_min, Cr - 0.1 * (1 - success_rate)) + else: + F = max(F_min, F - 0.1 * (1 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * success_rate) + + if len(improvements) > 5: + recent_improvements = evaluations - np.array(improvements[-5:]) + average_gap = np.mean(recent_improvements) + if average_gap < 10: + F = min(F_max, F + 0.1) + elif average_gap > 100: + F = max(F_min, F - 0.1) + + # Phase-based Reset Strategy with Additional Diversity + if stagnation_counter > stagnation_threshold: + phase = (evaluations // stagnation_threshold) % 3 + diversity_factor = 0.5 if phase == 2 else 1.0 + + if phase == 0: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 3, replace=False + ) + elif phase == 1: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + else: + reinit_indices = np.random.choice( + self.population_size, self.population_size // 2, replace=False + ) + + population[reinit_indices] = ( + np.random.uniform(self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim)) + * diversity_factor + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEnhancedRefinedAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/QuantumEnhancedRefinedAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..c247e7775 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEnhancedRefinedAdaptiveExplorationOptimization.py @@ -0,0 +1,210 @@ +import numpy as np + + +class QuantumEnhancedRefinedAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + max_exploration_cycles = 30 # Maximum exploration cycles + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.8 + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumEnhancedRefinedAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumEntropyEnhancedDE.py b/nevergrad/optimization/lama/QuantumEntropyEnhancedDE.py new file mode 100644 index 000000000..fe891ba52 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEntropyEnhancedDE.py @@ -0,0 +1,140 @@ +import numpy as np + + +class QuantumEntropyEnhancedDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, local_search_steps=100): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumEvolutionaryAdaptiveOptimizer.py b/nevergrad/optimization/lama/QuantumEvolutionaryAdaptiveOptimizer.py new file mode 100644 index 000000000..72d59e27f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolutionaryAdaptiveOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class QuantumEvolutionaryAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial setup + current_budget = 0 + population_size = 40 # Adjusted population size + mutation_factor = 0.85 # Initial mutation factor for exploration + crossover_prob = 0.75 # Initial crossover probability for exploration + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Quantum-inspired phase to enhance exploration and exploitation + quantum_beta = 0.8 # Adjusted quantum behavior parameter + quantum_alpha = 0.015 # Adjusted quantum learning rate + quantum_population = quantum_beta * np.random.randn(population_size, self.dim) + quantum_population = np.clip(quantum_population + population, self.lower_bound, self.upper_bound) + + while current_budget < self.budget: + new_population = np.empty_like(population) + new_quantum_population = np.empty_like(quantum_population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Mutation and crossover phases for classical and quantum populations + indices = np.delete(np.arange(population_size), i) + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + q1, q2, q3 = quantum_population[random_indices] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + quantum_mutant = q1 + quantum_alpha * (q2 - q3) + quantum_mutant = np.clip(quantum_mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + quantum_trial = np.where( + np.random.rand(self.dim) < crossover_prob, quantum_mutant, quantum_population[i] + ) + + trial_fitness = func(trial) + quantum_trial_fitness = func(quantum_trial) + current_budget += 2 # Two function evaluations per iteration + + # Selection + if quantum_trial_fitness < trial_fitness: + trial_fitness = quantum_trial_fitness + trial = quantum_trial + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + new_quantum_population[i] = quantum_trial + + population = new_population + quantum_population = new_quantum_population + + # Adaptively adjust mutation and crossover parameters + mutation_factor *= 0.99 # Controlled decrease + crossover_prob *= 1.01 # Controlled increase + quantum_alpha *= 0.98 # Gradual reduction of quantum impact + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategy.py b/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategy.py new file mode 100644 index 000000000..8b23ce79d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumEvolutionaryConvergenceStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 300 # Optimized population size + self.elite_size = 50 # Optimal size for elite selection + self.crossover_probability = 0.8 # Optimized crossover probability + self.mutation_scale = 0.01 # Adjusted mutation scale for balance + self.quantum_mutation_scale = 0.02 # Adjusted quantum mutation scale for better exploration + self.quantum_probability = 0.25 # Adjusted probability for quantum mutation + self.recombination_rate = 0.5 # Adjusted rate for recombination + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + else: + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + mutation_mask = np.random.rand(self.dim) < self.mutation_scale + individual[mutation_mask] += np.random.normal(0, self.mutation_scale, np.sum(mutation_mask)) + if np.random.rand() < self.quantum_probability: + quantum_mutation_mask = np.random.rand(self.dim) < self.quantum_mutation_scale + individual[quantum_mutation_mask] += np.random.normal( + 0, self.quantum_mutation_scale, np.sum(quantum_mutation_mask) + ) + return np.clip(individual, self.lower_bound, self.upper_bound) + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + for i in range(num_offspring): + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + child = self.crossover(elite[p1], elite[p2]) + child = self.mutate(child) + offspring[i] = child + + return np.vstack([elite, offspring]) + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategyV2.py b/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategyV2.py new file mode 100644 index 000000000..3dc5dd891 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolutionaryConvergenceStrategyV2.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumEvolutionaryConvergenceStrategyV2: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 200 # Refined population size for better focus + self.elite_size = 40 # Optimal size for elite selection refined + self.crossover_probability = 0.85 # Slightly more aggressive crossover + self.mutation_scale = 0.005 # Fine-tuned mutation scale for more accurate local searches + self.quantum_mutation_scale = 0.015 # Balanced quantum mutation scale for exploration + self.quantum_probability = 0.2 # Probability for quantum mutation slightly adjusted + self.recombination_rate = 0.6 # Increased rate for better population diversity + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + else: + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + mutation_mask = np.random.rand(self.dim) < self.mutation_scale + individual[mutation_mask] += np.random.normal(0, self.mutation_scale, np.sum(mutation_mask)) + if np.random.rand() < self.quantum_probability: + quantum_mutation_mask = np.random.rand(self.dim) < self.quantum_mutation_scale + individual[quantum_mutation_mask] += np.random.normal( + 0, self.quantum_mutation_scale, np.sum(quantum_mutation_mask) + ) + return np.clip(individual, self.lower_bound, self.upper_bound) + + def evolve_population(self, elite, remaining_budget): + num_offspring = self.population_size - self.elite_size + offspring = np.empty((num_offspring, self.dim)) + + for i in range(num_offspring): + p1, p2 = np.random.choice(elite.shape[0], 2, replace=False) + child = self.crossover(elite[p1], elite[p2]) + child = self.mutate(child) + offspring[i] = child + + return np.vstack([elite, offspring]) + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + remaining_budget = self.budget - evaluations_consumed + population = self.evolve_population(elite_population, remaining_budget) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumEvolutionaryOptimization.py b/nevergrad/optimization/lama/QuantumEvolutionaryOptimization.py new file mode 100644 index 000000000..6f015ae7a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolutionaryOptimization.py @@ -0,0 +1,44 @@ +import numpy as np + + +class QuantumEvolutionaryOptimization: + def __init__(self, budget=1000, num_particles=10, num_iterations=100): + self.budget = budget + self.num_particles = num_particles + self.num_iterations = num_iterations + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + particles = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_particles, dimensions)) + velocities = np.zeros((self.num_particles, dimensions)) + pbest_positions = particles.copy() + pbest_values = np.array([func(p) for p in pbest_positions]) + gbest_position = pbest_positions[np.argmin(pbest_values)] + gbest_value = np.min(pbest_values) + + for _ in range(self.num_iterations): + for i in range(self.num_particles): + r1, r2 = np.random.uniform(0, 1, size=2) + velocities[i] = ( + 0.5 * velocities[i] + + 2 * r1 * (pbest_positions[i] - particles[i]) + + 2 * r2 * (gbest_position - particles[i]) + ) + particles[i] = np.clip(particles[i] + velocities[i], bounds.lb, bounds.ub) + f = func(particles[i]) + + if f < pbest_values[i]: + pbest_positions[i] = particles[i] + pbest_values[i] = f + + if f < gbest_value: + gbest_position = particles[i] + gbest_value = f + + self.f_opt = gbest_value + self.x_opt = gbest_position + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV10.py b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV10.py new file mode 100644 index 000000000..8d21b8c4d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV10.py @@ -0,0 +1,92 @@ +import numpy as np + + +class QuantumEvolvedDiversityExplorerV10: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_fraction=0.2, + mutation_intensity=0.25, + crossover_rate=0.9, + quantum_prob=0.8, + gamma=0.6, + beta=0.35, + epsilon=0.01, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gamma = gamma + self.beta = beta + self.epsilon = epsilon + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites based on the fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = population[np.random.choice(len(population))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum state update with increased probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population and update the best solution found + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Advanced quantum state update enhancing exploration and fine-tuning""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV11.py b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV11.py new file mode 100644 index 000000000..602b2d531 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV11.py @@ -0,0 +1,92 @@ +import numpy as np + + +class QuantumEvolvedDiversityExplorerV11: + def __init__( + self, + budget, + dimension=5, + population_size=200, + elite_fraction=0.25, + mutation_intensity=0.3, + crossover_rate=0.95, + quantum_prob=0.85, + gamma=0.7, + beta=0.4, + epsilon=0.02, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gamma = gamma + self.beta = beta + self.epsilon = epsilon + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites based on the fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = population[np.random.choice(len(population))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum state update with increased probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population and update the best solution found + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Advanced quantum state update enhancing exploration and fine-tuning""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV12.py b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV12.py new file mode 100644 index 000000000..ced9432e7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV12.py @@ -0,0 +1,86 @@ +import numpy as np + + +class QuantumEvolvedDiversityExplorerV12: + def __init__( + self, + budget, + dimension=5, + population_size=250, + elite_fraction=0.3, + mutation_intensity=0.35, + crossover_rate=0.9, + quantum_prob=0.9, + gamma=0.75, + beta=0.45, + epsilon=0.025, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gamma = gamma + self.beta = beta + self.epsilon = epsilon + + def __call__(self, func): + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = population[np.random.choice(len(population))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV9.py b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV9.py new file mode 100644 index 000000000..9ac5d57de --- /dev/null +++ b/nevergrad/optimization/lama/QuantumEvolvedDiversityExplorerV9.py @@ -0,0 +1,92 @@ +import numpy as np + + +class QuantumEvolvedDiversityExplorerV9: + def __init__( + self, + budget, + dimension=5, + population_size=120, + elite_fraction=0.15, + mutation_intensity=0.2, + crossover_rate=0.85, + quantum_prob=0.75, + gamma=0.5, + beta=0.3, + epsilon=0.001, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Enhanced probability for quantum-inspired state update + self.gamma = gamma # Elevated gamma for extended exploration in quantum state updates + self.beta = beta # Optimized beta to manage mutation intensity more adaptively + self.epsilon = epsilon # Minimum threshold for mutation intensity to ensure fine-tuning + + def __call__(self, func): + # Initialize population randomly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites based on the fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + # Quantum state update with increased probability + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population and update the best solution found + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Advanced quantum state update enhancing exploration and fine-tuning""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumFeedbackEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumFeedbackEvolutionStrategy.py new file mode 100644 index 000000000..538fee320 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumFeedbackEvolutionStrategy.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumFeedbackEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 100 + elite_size = 10 + evaluations = 0 + mutation_factor = 0.5 + crossover_probability = 0.7 + quantum_probability = 0.05 + feedback_threshold = 1e-6 + feedback_gain = 0.1 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Evolve using differential evolution + new_population = np.empty_like(population) + for i in range(population_size): + indices = [j for j in range(population_size) if j != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Feedback mechanism for dynamic adaptation + if abs(previous_best - self.f_opt) < feedback_threshold: + mutation_factor -= feedback_gain * mutation_factor + crossover_probability += feedback_gain * (1 - crossover_probability) + else: + mutation_factor += feedback_gain * (1 - mutation_factor) + crossover_probability -= feedback_gain * crossover_probability + + previous_best = self.f_opt + + # Quantum mutation for exploration + if np.random.rand() < quantum_probability: + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumFireworksAlgorithm.py b/nevergrad/optimization/lama/QuantumFireworksAlgorithm.py new file mode 100644 index 000000000..307fc4a92 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumFireworksAlgorithm.py @@ -0,0 +1,38 @@ +import numpy as np + + +class QuantumFireworksAlgorithm: + def __init__(self, budget=1000, num_sparks=10, num_iterations=100, amplification_factor=1.5): + self.budget = budget + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.amplification_factor = amplification_factor + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_sparks, dimensions)) + best_firework = fireworks[0] + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for i, firework in enumerate(fireworks): + for _ in range(self.num_sparks): + spark = firework + np.random.normal(0, 1, size=dimensions) * self.amplification_factor + spark = np.clip(spark, bounds.lb, bounds.ub) + f_spark = func(spark) + if f_spark < func(firework): + fireworks[i] = spark + if f_spark < func(best_firework): + best_firework = spark + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumFluxDifferentialSwarm.py b/nevergrad/optimization/lama/QuantumFluxDifferentialSwarm.py new file mode 100644 index 000000000..a755127b3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumFluxDifferentialSwarm.py @@ -0,0 +1,59 @@ +import numpy as np + + +class QuantumFluxDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 1000 # Increased population size for greater exploration + self.F_base = 0.5 # Base factor for mutation + self.CR_base = 0.9 # High crossover probability to favor recombination + self.quantum_probability = 0.2 # Higher probability for quantum-driven mutation + self.vortex_factor = 0.3 # Enhanced vortex factor for dynamic strategy + + def __call__(self, func): + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main optimization loop + for i in range(int(self.budget / self.pop_size)): + # Adjusting factors based on a dynamic non-linear modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.vortex_factor * np.sin(np.pi * iteration_ratio) + CR = self.CR_base - self.vortex_factor * np.cos(np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Quantum-inspired mutation with higher probability + if np.random.rand() < self.quantum_probability: + mean_quantum_state = best_ind + (pop[j] - best_ind) / 2 + scale = np.abs(best_ind - pop[j]) / 2 + quantum_mutation = np.random.normal(mean_quantum_state, scale) + quantum_mutation = np.clip(quantum_mutation, -5.0, 5.0) + mutant = quantum_mutation + else: + # Traditional DE mutation: DE/rand/1 with best influence + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + F * (best_ind - pop[j]) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumGeneticDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumGeneticDifferentialEvolution.py new file mode 100644 index 000000000..b5de12235 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGeneticDifferentialEvolution.py @@ -0,0 +1,172 @@ +import numpy as np + + +class QuantumGeneticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.7 + self.elite_fraction = 0.2 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + return population + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + self.local_search_budget -= 1 + if self.local_search_budget <= 0: + break + return individual + + def quantum_jump(self, individual, global_best, alpha): + return np.clip(individual + alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(self.local_search_budget, self.budget - evaluations) + elite_population[idx] = self.local_search(elite_population[idx], bounds, func) + evaluations += local_search_budget + + if self.diversity(population) < self.diversity_threshold: + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + population[:elite_count] = elite_population + + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha) + quantum_fitness = func(quantum_trial) + evaluations += 1 + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Genetic Algorithm-based Diversity Enhancement + if self.diversity(population) < self.diversity_threshold: + mating_pool = population[np.argsort(fitness)[: self.pop_size // 2]] + offspring = np.zeros((self.pop_size // 2, self.dim)) + for i in range(self.pop_size // 2): + parent1, parent2 = mating_pool[np.random.choice(self.pop_size // 2, 2, replace=False)] + cross_point = np.random.randint(1, self.dim - 1) + offspring[i, :cross_point] = parent1[:cross_point] + offspring[i, cross_point:] = parent2[cross_point:] + offspring[i] = np.clip( + offspring[i] + np.random.randn(self.dim) * 0.1, bounds.lb, bounds.ub + ) + population[self.pop_size // 2 :] = offspring + fitness[self.pop_size // 2 :] = [func(ind) for ind in offspring] + evaluations += self.pop_size // 2 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..045b0ad79 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimization.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Cognitive constant (increased for more aggressive personal search) + c2 = 2.0 # Social constant (increased for more aggressive global search) + w = 0.5 # Inertia weight (lowered for better convergence control) + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.5 # Differential weight (lowered for finer adjustments) + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV2.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV2.py new file mode 100644 index 000000000..5954af036 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV2.py @@ -0,0 +1,213 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Further increased swarm size for enhanced exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.7 # Slightly reduced cognitive constant + c2 = 2.3 # Slightly increased social constant + w = 0.4 # Further lowered inertia weight for better convergence + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.4 # Adjusted lower bound for differential weight + F_max = 0.8 # Adjusted upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 10 # Reduced max stagnation to trigger diversity enforcement faster + + # Exploration improvement parameters + exploration_factor = 0.25 # Increased exploration factor + + # Quantum-inspired rotation matrix + theta = np.pi / 6 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.3 # Increased mutation factor + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.0075 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + positions[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV3.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV3.py new file mode 100644 index 000000000..5acce68bc --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV3.py @@ -0,0 +1,213 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Slightly reduced cognitive constant + c2 = 2.5 # Slightly increased social constant + w = 0.3 # Further lowered inertia weight for better convergence control + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.5 # Adjusted lower bound for differential weight + F_max = 0.9 # Adjusted upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 15 # Reduced max stagnation to trigger diversity enforcement faster + + # Exploration improvement parameters + exploration_factor = 0.3 # Increased exploration factor + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.25 # Increased mutation factor + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + positions[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationOptimizationV3(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV4.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV4.py new file mode 100644 index 000000000..3c1de407a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV4.py @@ -0,0 +1,213 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationOptimizationV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 # Increased swarm size further to improve population diversity + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (tuned for balance) + c2 = 2.5 # Social constant (tuned for balance) + w = 0.4 # Inertia weight (adjusted for better convergence) + + # Learning rate adaptation parameters + alpha = 0.15 # Increased initial learning rate + beta = 0.85 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.6 # Adjusted lower bound for differential weight + F_max = 1.0 # Adjusted upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 10 # Reduced max stagnation to trigger diversity enforcement faster + + # Exploration improvement parameters + exploration_factor = 0.4 # Increased exploration factor + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.3 # Increased mutation factor + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + positions[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationOptimizationV4(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV5.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV5.py new file mode 100644 index 000000000..83d0b2225 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationOptimizationV5.py @@ -0,0 +1,216 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationOptimizationV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 50 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (adjusted for balance) + c2 = 2.0 # Social constant (increased for stronger global search) + w = 0.9 # Initial inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.5 # Differential weight (adjusted for balance) + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 15 # Reduced max stagnation to trigger diversity enforcement earlier + + # Exploration improvement parameters + exploration_factor = 0.3 # Increased exploration factor + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.3 # Increased mutation factor for larger perturbations + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 # Adjusted threshold + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + for j in range(0, self.dim - 1, 2): # Apply rotation to pairs of dimensions + sub_pos = positions[idx][j : j + 2] + rotated_sub_pos = np.dot(rotation_matrix, sub_pos) + positions[idx][j : j + 2] = rotated_sub_pos + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + # Update inertia weight adaptively + w = 0.9 - (0.9 - 0.4) * (i / self.budget) + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationOptimizationV5(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationRefinedOptimization.py b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationRefinedOptimization.py new file mode 100644 index 000000000..e866cd27a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientAdaptiveExplorationRefinedOptimization.py @@ -0,0 +1,213 @@ +import numpy as np + + +class QuantumGradientAdaptiveExplorationRefinedOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size for better exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # PSO constants + c1 = 1.5 # Cognitive constant (fine-tuned for balanced search) + c2 = 1.5 # Social constant (fine-tuned for balanced search) + w = 0.4 # Inertia weight (fine-tuned for better convergence control) + + # Learning rate adaptation parameters + alpha = 0.05 # Initial learning rate (reduced for finer adjustments) + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.3 # Lower bound for differential weight + F_max = 0.8 # Upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.2 + stagnation_counter = 0 + max_stagnation = 15 # Reduced stagnation threshold to trigger diversity enforcement sooner + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.25 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientAdaptiveExplorationRefinedOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientBalancedOptimizerV6.py b/nevergrad/optimization/lama/QuantumGradientBalancedOptimizerV6.py new file mode 100644 index 000000000..3c6fed0cc --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientBalancedOptimizerV6.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumGradientBalancedOptimizerV6: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.7, + cognitive_coefficient=2.5, + social_coefficient=2.5, + quantum_probability=0.15, + damping_factor=0.99, + quantum_scale=0.1, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.quantum_scale = quantum_scale + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + # Adaptively update inertia weight for enhanced convergence + inertia = self.inertia_weight * (self.damping_factor ** (evaluations / self.budget)) + + velocities[i] = ( + inertia * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + + if np.random.rand() < self.quantum_probability: + # Quantum leap with scaling factor to control the step size + quantum_leap = global_best + np.random.normal(0, self.quantum_scale, self.dim) + particles[i] = np.clip(quantum_leap, self.lb, self.ub) + else: + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Dynamically adjust quantum probability and scale for balanced exploration + self.quantum_probability *= self.damping_factor + self.quantum_scale *= self.damping_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumGradientBoostedMemeticSearch.py b/nevergrad/optimization/lama/QuantumGradientBoostedMemeticSearch.py new file mode 100644 index 000000000..726bd1742 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientBoostedMemeticSearch.py @@ -0,0 +1,133 @@ +import numpy as np + + +class QuantumGradientBoostedMemeticSearch: + def __init__( + self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.6, alpha=0.2, learning_rate=0.01 + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGradientEnhancedExplorationOptimization.py b/nevergrad/optimization/lama/QuantumGradientEnhancedExplorationOptimization.py new file mode 100644 index 000000000..91b18a7d7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientEnhancedExplorationOptimization.py @@ -0,0 +1,211 @@ +import numpy as np + + +class QuantumGradientEnhancedExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (adjusted for balanced personal search) + c2 = 2.0 # Social constant (kept higher for strong global search) + w = 0.7 # Inertia weight (slightly increased for better exploration) + + # Learning rate adaptation parameters + alpha = 0.05 # Reduced initial learning rate for finer adjustments + beta = 0.8 # Reduced momentum term for better control + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.5 # Differential weight (kept same) + CR = 0.9 # Crossover probability (kept same) + + # Diversity enforcement parameters + diversity_threshold = 0.15 # Increased threshold for higher diversity + stagnation_counter = 0 + max_stagnation = 15 # Reduced max stagnation to trigger diversity enforcement earlier + + # Exploration improvement parameters + exploration_factor = 0.25 # Increased exploration factor for more aggressive jumps + + # Quantum-inspired rotation matrix + theta = np.pi / 3 # Increased rotation angle for more pronounced changes + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.3 # Increased mutation factor for larger perturbations + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 # Adjusted to be more sensitive + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.2 # Increase learning rate more aggressively if improvement is significant + else: + alpha *= 0.8 # Decrease learning rate more if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientEnhancedExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientFusionOptimizer.py b/nevergrad/optimization/lama/QuantumGradientFusionOptimizer.py new file mode 100644 index 000000000..919039960 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientFusionOptimizer.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumGradientFusionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # Reduced population size for concentrated exploration + mutation_factor = 1.0 # More intense initial mutation + crossover_prob = 0.8 # Higher crossover probability for enhanced exploration + learning_rate = 0.2 # Higher initial learning rate for quicker global convergence + + # Initialize population within bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Optimization loop + while current_budget < self.budget: + gradients = np.zeros_like(population) + + # Enhanced gradient estimation with smaller perturbation + h = 0.01 # Smaller step for gradient calculation + for i in range(population_size): + if current_budget >= self.budget: + break + + base_ind = population[i] + for d in range(self.dim): + perturbed_ind_plus = np.array(base_ind) + perturbed_ind_minus = np.array(base_ind) + perturbed_ind_plus[d] += h + perturbed_ind_minus[d] -= h + + if current_budget + 2 <= self.budget: + fitness_plus = func(perturbed_ind_plus) + fitness_minus = func(perturbed_ind_minus) + current_budget += 2 + gradient = (fitness_plus - fitness_minus) / (2 * h) + gradients[i, d] = gradient + + new_population = population.copy() + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum inspired mutation + noise = np.random.normal(0, 1, self.dim) + quantum_jumps = np.where(np.random.rand(self.dim) < 0.1, noise, 0) + + # Apply gradient and quantum jump + child = population[i] - learning_rate * gradients[i] + quantum_jumps + child += mutation_factor * np.random.randn(self.dim) + + # Perform crossover + if np.random.rand() < crossover_prob: + partner_idx = np.random.randint(population_size) + crossover_mask = np.random.rand(self.dim) < 0.5 + child = child * crossover_mask + population[partner_idx] * (1 - crossover_mask) + + child = np.clip(child, self.lower_bound, self.upper_bound) + child_fitness = func(child) + current_budget += 1 + + if child_fitness < fitness[i]: + new_population[i] = child + fitness[i] = child_fitness + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + population = new_population + + # Adaptive adjustments + mutation_factor *= 0.95 + learning_rate *= 0.95 + crossover_prob *= 0.95 + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumGradientGuidedFireworksAlgorithm.py b/nevergrad/optimization/lama/QuantumGradientGuidedFireworksAlgorithm.py new file mode 100644 index 000000000..d978725ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientGuidedFireworksAlgorithm.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumGradientGuidedFireworksAlgorithm: + def __init__( + self, + budget=1000, + num_fireworks=10, + num_sparks=5, + num_iterations=100, + mutation_rate=0.1, + explosion_rate=0.1, + learning_rate=0.01, + ): + self.budget = budget + self.num_fireworks = num_fireworks + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.mutation_rate = mutation_rate + self.explosion_rate = explosion_rate + self.learning_rate = learning_rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_fireworks, dimensions)) + best_firework = fireworks[0] + explosion_sizes = np.ones(self.num_fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + gradients = np.zeros(dimensions) + for _ in range(10): + perturbation = np.random.normal(0, 1, size=dimensions) + perturbed_firework = firework + self.learning_rate * perturbation + perturbed_firework = np.clip(perturbed_firework, bounds.lb, bounds.ub) + f_perturbed = func(perturbed_firework) + gradients += perturbation * (f - f_perturbed) + + gradients /= np.linalg.norm(gradients) + + for _ in range(self.num_sparks): + selected_firework = np.random.choice(range(self.num_fireworks)) + spark = fireworks[selected_firework] + gradients * explosion_sizes[selected_firework] + spark = np.clip(spark, bounds.lb, bounds.ub) + f_spark = func(spark) + + if f_spark < f: + fireworks[selected_firework] = spark + f = f_spark + if f < func(best_firework): + best_firework = spark + + # Introduce random mutation with adaptive explosion sizes + for i in range(self.num_fireworks): + if np.random.rand() < self.mutation_rate: + fireworks[i] = np.random.uniform(bounds.lb, bounds.ub, dimensions) + + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + explosion_sizes[i] = np.clip( + explosion_sizes[i] * (1 + self.explosion_rate * np.random.normal()), 1, None + ) + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGradientHybridOptimization.py b/nevergrad/optimization/lama/QuantumGradientHybridOptimization.py new file mode 100644 index 000000000..385f88498 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientHybridOptimization.py @@ -0,0 +1,213 @@ +import numpy as np + + +class QuantumGradientHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.8 # Cognitive constant (balanced for more exploration) + c2 = 1.8 # Social constant (balanced for more exploration) + w = 0.6 # Inertia weight (balanced to control convergence) + + # Learning rate adaptation parameters + alpha = 0.05 # Initial learning rate (reduced for finer adjustments) + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.4 # Lower bound for differential weight + F_max = 0.9 # Upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.15 + stagnation_counter = 0 + max_stagnation = 15 # Reduced stagnation threshold to trigger diversity enforcement sooner + + # Exploration improvement parameters + exploration_factor = 0.3 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.25 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV2.py b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV2.py new file mode 100644 index 000000000..d5cfec521 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV2.py @@ -0,0 +1,214 @@ +import numpy as np + + +class QuantumGradientHybridOptimizationV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (slightly reduced for balanced search) + c2 = 2.0 # Social constant (slightly increased for more global search) + w = 0.7 # Inertia weight (slightly increased for more exploration) + + # Learning rate adaptation parameters + alpha = 0.05 # Initial learning rate (reduced for finer adjustments) + beta = 0.8 # Momentum term (slightly reduced) + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.3 # Lower bound for differential weight + F_max = 0.7 # Upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 10 # Reduced stagnation threshold to trigger diversity enforcement sooner + + # Exploration improvement parameters + exploration_factor = 0.25 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot( + rotation_matrix, positions[idx][:2] + ) # only apply to first two dimensions + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientHybridOptimizationV2(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV3.py b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV3.py new file mode 100644 index 000000000..8f565d6f6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV3.py @@ -0,0 +1,214 @@ +import numpy as np + + +class QuantumGradientHybridOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant (balanced for personal search) + c2 = 2.0 # Social constant (increased for more global search) + w = 0.7 # Inertia weight (increased for more exploration) + + # Learning rate adaptation parameters + alpha = 0.05 # Initial learning rate (reduced for finer adjustments) + beta = 0.8 # Momentum term (slightly reduced) + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.3 # Lower bound for differential weight + F_max = 0.7 # Upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 10 # Reduced stagnation threshold to trigger diversity enforcement sooner + + # Exploration improvement parameters + exploration_factor = 0.25 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.01 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot( + rotation_matrix, positions[idx][:2] + ) # only apply to first two dimensions + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientHybridOptimizationV3(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV4.py b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV4.py new file mode 100644 index 000000000..bc42162ff --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientHybridOptimizationV4.py @@ -0,0 +1,214 @@ +import numpy as np + + +class QuantumGradientHybridOptimizationV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 25 # Adjusted swarm size for more balanced exploration-exploitation + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.8 # Cognitive constant (balanced for personal search) + c2 = 2.2 # Social constant (increased for more global search) + w = 0.6 # Inertia weight (moderate for both exploration and exploitation) + + # Learning rate adaptation parameters + alpha = 0.075 # Initial learning rate (fine-tuned for stability) + beta = 0.85 # Momentum term (balanced) + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.3 # Lower bound for differential weight + F_max = 0.7 # Upper bound for differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 15 # Moderate stagnation threshold to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.3 # Enhanced exploration factor + + # Quantum-inspired rotation matrix + theta = np.pi / 6 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.25 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.0075 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot( + rotation_matrix, positions[idx][:2] + ) # only apply to first two dimensions + positions[idx][:2] = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(positions[idx]) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = positions[idx] + if new_f < global_best_score: + global_best_score = new_f + global_best_position = positions[idx] + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = positions[idx] + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumGradientHybridOptimizationV4(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientHybridOptimizer.py b/nevergrad/optimization/lama/QuantumGradientHybridOptimizer.py new file mode 100644 index 000000000..49d1ab324 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientHybridOptimizer.py @@ -0,0 +1,116 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumGradientHybridOptimizer: + def __init__(self, budget=10000, population_size=50): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + self.local_search_probability = 0.3 # Probability of local search + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior with adaptive step size + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * 0.95: + adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * adaptive_factor) + else: + adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * adaptive_factor) + + if eval_count < self.budget and np.random.rand() < self.local_search_probability: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = QuantumGradientHybridOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientMemeticOptimizer.py b/nevergrad/optimization/lama/QuantumGradientMemeticOptimizer.py new file mode 100644 index 000000000..cb33726f5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientMemeticOptimizer.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumGradientMemeticOptimizer: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.7 + self.cognitive_weight = 1.7 + self.social_weight = 1.7 + self.quantum_weight = 0.2 + self.elite_fraction = 0.25 + self.memory_size = 20 + self.local_search_probability = 0.90 + self.stagnation_threshold = 2 + self.adaptive_factor = 1.0 + self.no_improvement_count = 0 + self.annealing_factor = 0.95 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + # Initialize population and velocities + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + performance_memory = [best_fitness] * self.memory_size + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + step_size = np.linalg.norm(velocities[i]) + population[i] = best_individual + step_size * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + self.no_improvement_count = 0 + else: + self.no_improvement_count += 1 + else: + self.no_improvement_count += 1 + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness > mean_recent_performance * 1.05: + self.adaptive_factor *= 0.9 + self.quantum_weight = min(1.0, self.quantum_weight * self.adaptive_factor) + else: + self.adaptive_factor *= 1.1 + self.quantum_weight = max(0.0, self.quantum_weight * self.adaptive_factor) + + if self.no_improvement_count >= self.stagnation_threshold: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + if np.random.rand() < self.local_search_probability: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + self.no_improvement_count = 0 + + if eval_count >= self.budget: + break + + self.no_improvement_count = 0 + + self.inertia_weight *= self.annealing_factor + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=50): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res + + +# Example usage +# optimizer = QuantumGradientMemeticOptimizer(budget=10000) +# best_fitness, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumGradientMemeticSearch.py b/nevergrad/optimization/lama/QuantumGradientMemeticSearch.py new file mode 100644 index 000000000..9ca759008 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientMemeticSearch.py @@ -0,0 +1,118 @@ +import numpy as np + + +class QuantumGradientMemeticSearch: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGradientMemeticSearchV2.py b/nevergrad/optimization/lama/QuantumGradientMemeticSearchV2.py new file mode 100644 index 000000000..42e6fc242 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientMemeticSearchV2.py @@ -0,0 +1,118 @@ +import numpy as np + + +class QuantumGradientMemeticSearchV2: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best, alpha=self.learning_rate) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.1, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGradientMemeticSearchV3.py b/nevergrad/optimization/lama/QuantumGradientMemeticSearchV3.py new file mode 100644 index 000000000..adf31b63f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGradientMemeticSearchV3.py @@ -0,0 +1,118 @@ +import numpy as np + + +class QuantumGradientMemeticSearchV3: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best, alpha=self.learning_rate) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.1, (1 - current_iter / max_iter)) ** 0.5 + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGuidedAdaptiveStrategy.py b/nevergrad/optimization/lama/QuantumGuidedAdaptiveStrategy.py new file mode 100644 index 000000000..ef51d130a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGuidedAdaptiveStrategy.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumGuidedAdaptiveStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 150 + elite_size = 30 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.7 + quantum_probability = 0.1 + adaptive_scaling_factor = lambda t: 0.5 * np.exp( + -0.05 * t + ) # Adaptive decay on quantum mutation intensity + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution operators + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3 + x4 - x1) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumGuidedCrossoverAdaptation.py b/nevergrad/optimization/lama/QuantumGuidedCrossoverAdaptation.py new file mode 100644 index 000000000..752ec2408 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGuidedCrossoverAdaptation.py @@ -0,0 +1,87 @@ +import numpy as np + + +class QuantumGuidedCrossoverAdaptation: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 120 # Increased population for broader search + self.elite_size = 15 # Reduced elite to promote diversity + self.offspring_size = 105 # Adjusted offspring size to maintain population balance + self.mutation_scale = 0.05 # Increased mutation scale for broader exploratory moves + self.crossover_prob = 0.9 # Slightly reduced to promote genetic diversity + self.mutation_prob = 0.2 # Increased to encourage exploration + self.quantum_probability = 0.1 # Adjusted to control excessive randomness + self.adaptive_scale = 0.1 # Adaptive scaling factor for mutation scale adjustment + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_prob: + cross_point = np.random.randint(1, self.dim) + child = np.empty(self.dim) + child[:cross_point] = parent1[:cross_point] + child[cross_point:] = parent2[cross_point:] + return child + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + if np.random.rand() < self.mutation_prob: + mutation_points = np.random.randint(0, self.dim) + individual[mutation_points] += np.random.normal(0, self.mutation_scale) + individual = np.clip(individual, self.lower_bound, self.upper_bound) + return individual + + def quantum_jump(self, individual): + if np.random.rand() < self.quantum_probability: + quantum_shift = np.random.normal(0, self.adaptive_scale, self.dim) + individual += quantum_shift + individual = np.clip(individual, self.lower_bound, self.upper_bound) + return individual + + def reproduce(self, parents): + offspring = np.empty((self.offspring_size, self.dim)) + num_parents = len(parents) + for i in range(self.offspring_size): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + child = self.crossover(parents[p1], parents[p2]) + child = self.mutate(child) + child = self.quantum_jump(child) + offspring[i] = child + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_survivors(population, fitness) + + offspring = self.reproduce(elite_population) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumGuidedHybridDifferentialSwarm.py b/nevergrad/optimization/lama/QuantumGuidedHybridDifferentialSwarm.py new file mode 100644 index 000000000..34c76b6fd --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGuidedHybridDifferentialSwarm.py @@ -0,0 +1,61 @@ +import numpy as np + + +class QuantumGuidedHybridDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 750 # Further increased population size for enhanced exploration and exploitation + self.F_base = 0.6 # Slightly higher mutation rate for more aggressive search + self.CR_base = 0.85 # Slightly lower crossover rate to maintain diversity in the population + self.quantum_probability = 0.15 # Increased probability for quantum-driven mutation + self.vortex_effect = 0.2 # Introducing a new vortex effect factor for complex landscape navigation + + def __call__(self, func): + # Initialize population within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic factors adjusted with a sine-cosine modulation for adaptive behavior + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + np.sin(2 * np.pi * iteration_ratio) * self.vortex_effect + CR = self.CR_base + np.cos(2 * np.pi * iteration_ratio) * self.vortex_effect + + for j in range(self.pop_size): + if np.random.rand() < self.quantum_probability: + # Quantum-inspired mutation using a complex Gaussian distribution + mean_quantum_state = best_ind + (pop[j] - best_ind) / 2 + quantum_mutation = np.random.normal( + loc=mean_quantum_state, scale=np.abs(best_ind - pop[j]) + ) + quantum_mutation = np.clip(quantum_mutation, -5.0, 5.0) + mutant = quantum_mutation + else: + # Classical DE mutation: DE/rand-to-best/1 + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F * (best_ind - pop[j]) + F * (a - b) + mutant = np.clip(mutant, -5.0, 5.0) # Enforce boundary constraints + + # Crossover operation + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection process + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumGuidedLevyAdaptiveSwarm.py b/nevergrad/optimization/lama/QuantumGuidedLevyAdaptiveSwarm.py new file mode 100644 index 000000000..922ac81f7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumGuidedLevyAdaptiveSwarm.py @@ -0,0 +1,163 @@ +import numpy as np + + +class QuantumGuidedLevyAdaptiveSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step # Reduced step size for more precise exploitation + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.6 * progress # Enhanced dynamic range + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + 0.4 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.05 + 0.45 * progress # Increased max levy factor + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 # Reduced population size for more evaluations per individual + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + # DE Mutation and Crossover + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum Particle Update + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Levy Flight Local Search + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: # Reduced probability of local search to balance exploration + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHarmonicAdaptationStrategy.py b/nevergrad/optimization/lama/QuantumHarmonicAdaptationStrategy.py new file mode 100644 index 000000000..04725c26e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicAdaptationStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumHarmonicAdaptationStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 150 # Adjusted population size for targeted exploration + self.sigma_initial = 0.7 # Initial standard deviation for enhanced exploration + self.learning_rate = 0.08 # Fine-tuned learning rate for mutation influence + self.CR_base = 0.5 # Base crossover probability, enabling more diversity + self.q_impact_initial = 0.3 # Initial quantum impact for enhanced exploration + self.q_impact_decay = 0.99 # Slower decay rate for quantum impact + self.sigma_decay = 0.99 # Slower sigma decay rate + self.elitism_factor = 2 # Lowered elitism factor to maintain strong candidates + self.CR_adaptive_increment = 0.005 # Increment for adaptive crossover + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Preliminary setup for elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + current_CR = self.CR_base + self.CR_adaptive_increment * iteration + + for i in range(self.pop_size): + if i in elites: # Skip mutation for elite members + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c) + quantum_term + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = current_CR + self.learning_rate * (np.random.rand() - 0.5) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Constantly update elites + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumHarmonicAdaptiveFeedbackOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveFeedbackOptimizer.py new file mode 100644 index 000000000..a7a8507f2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveFeedbackOptimizer.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumHarmonicAdaptiveFeedbackOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=150, + elite_rate=0.2, + resonance_factor=0.1, + mutation_scale=0.1, + harmonic_frequency=0.05, + feedback_intensity=0.3, + damping_factor=0.95, + mutation_decay=0.95, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + # Enhanced dynamic quantum mutations with adaptive feedback + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + # Sorted population by fitness for selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with quantum-inspired adaptive variations + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + # Dynamically adjust mutation parameters + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicAdaptiveOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveOptimizer.py new file mode 100644 index 000000000..453468254 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumHarmonicAdaptiveOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=100, + elite_rate=0.1, + resonance_factor=0.05, + mutation_scale=0.05, + harmonic_frequency=0.1, + feedback_intensity=0.1, + damping_factor=0.98, + mutation_decay=0.98, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and perform selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with quantum-inspired variations + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + + self.population[idx] = elite_sample + harmonic_influence + quantum_resonance + mutation_effect + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + # Decay mutation parameters to stabilize convergence over time + self.mutation_scale *= self.mutation_decay + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicAdaptiveRefinementOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveRefinementOptimizer.py new file mode 100644 index 000000000..5f8808be9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicAdaptiveRefinementOptimizer.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumHarmonicAdaptiveRefinementOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=200, + elite_rate=0.25, + resonance_factor=0.15, + mutation_scale=0.2, + harmonic_frequency=0.08, + feedback_intensity=0.4, + damping_factor=0.9, + mutation_decay=0.9, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + # Enhanced dynamic quantum mutations with structured adaptive feedback + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + # Sorted population by fitness for selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with quantum-inspired adaptive variations + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + # Dynamically adjust mutation parameters + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicDynamicAdaptation.py b/nevergrad/optimization/lama/QuantumHarmonicDynamicAdaptation.py new file mode 100644 index 000000000..b5a26d065 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicDynamicAdaptation.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumHarmonicDynamicAdaptation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Increased population size for more diverse initial search + self.sigma_initial = 1.2 # Increased initial standard deviation for global exploration + self.learning_rate = 0.05 # Reduced learning rate for finer adaptation + self.CR_base = 0.5 # Base crossover probability + self.q_impact_initial = 0.5 # Initial quantum impact + self.q_impact_decay = 0.95 # Decay rate for the quantum impact + self.sigma_decay = 0.95 # Decay for sigma to focus on exploitation over time + self.elitism_factor = 10 # Percentage of the population to retain as elites + self.CR_adaptive_increment = 0.005 # Incremental increase in crossover probability + + def __call__(self, func): + # Initialize population within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Setup for elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + current_CR = self.CR_base + self.CR_adaptive_increment * iteration + + for i in range(self.pop_size): + if i in elites: # Avoid disturbing elite members + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b + c + quantum_term) + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = current_CR + self.learning_rate * (np.random.rand() - 0.5) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites regularly + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumHarmonicDynamicOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicDynamicOptimizer.py new file mode 100644 index 000000000..da366cf24 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicDynamicOptimizer.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumHarmonicDynamicOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=100, + elite_rate=0.1, + resonance_factor=0.08, + mutation_scale=0.08, + harmonic_frequency=0.03, + feedback_intensity=0.25, + damping_factor=0.98, + mutation_decay=0.98, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + # Quantum-inspired mutations enhanced with dynamic feedback mechanism + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + # Sort population by fitness and perform selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with quantum-inspired variations + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + # Adjust mutation parameters dynamically + self.mutation_scale *= self.mutation_decay * self.damping_factor + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumHarmonicEvolutionStrategy.py new file mode 100644 index 000000000..6e4a990bb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicEvolutionStrategy.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumHarmonicEvolutionStrategy: + def __init__( + self, + budget, + dim=5, + pop_size=120, + elite_rate=0.25, + resonance_intensity=0.15, + mutation_intensity=0.05, + harmonic_depth=0.3, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_intensity = resonance_intensity + self.mutation_intensity = mutation_intensity + self.harmonic_depth = harmonic_depth + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and select elites + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with harmonic fluctuations and quantum resonance + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_depth * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_intensity * (np.random.uniform(-1, 1, self.dim) ** 3) + normal_disturbance = np.random.normal(0, self.mutation_intensity, self.dim) + + # Combine influences + self.population[idx] = elite_sample + harmonic_influence + quantum_resonance + normal_disturbance + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFeedbackOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicFeedbackOptimizer.py new file mode 100644 index 000000000..6cb9abaed --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFeedbackOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumHarmonicFeedbackOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=200, + elite_rate=0.15, + resonance_factor=0.10, + mutation_scale=0.04, + harmonic_frequency=0.30, + feedback_intensity=0.12, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.prev_best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.prev_best_fitness = self.best_fitness + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and perform a selective reproduction process + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Employ quantum-informed harmonic techniques with feedback-based adaptation + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + + if self.best_fitness >= self.prev_best_fitness: + # Apply feedback to intensify exploration when stagnation detected + feedback_adjustment = self.feedback_intensity * np.random.uniform(-1, 1, self.dim) + else: + feedback_adjustment = 0 + + self.population[idx] = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_adjustment + ) + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizer.py new file mode 100644 index 000000000..afa4205fe --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizer.py @@ -0,0 +1,81 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=50, + elite_rate=0.2, + resonance_factor=0.1, + mutation_scale=0.1, + harmonic_frequency=0.05, + feedback_intensity=0.2, + damping_factor=0.95, + mutation_decay=0.99, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + # Quantum-inspired mutations enhanced with feedback mechanism + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + # Sort population by fitness and perform selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with quantum-inspired variations + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + # Decay mutation parameters to stabilize convergence over time + self.mutation_scale *= self.mutation_decay + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV2.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV2.py new file mode 100644 index 000000000..bb782b9b3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV2.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV2: + def __init__( + self, + budget, + dim=5, + pop_size=250, + elite_rate=0.3, + resonance_factor=0.1, + mutation_scale=0.25, + harmonic_frequency=0.1, + feedback_intensity=0.5, + damping_factor=0.95, + mutation_decay=0.95, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV3.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV3.py new file mode 100644 index 000000000..a2b661d15 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV3.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV3: + def __init__( + self, + budget, + dim=5, + pop_size=300, + elite_rate=0.35, + resonance_factor=0.15, + mutation_scale=0.3, + harmonic_frequency=0.2, + feedback_intensity=0.6, + damping_factor=0.92, + mutation_decay=0.93, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV4.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV4.py new file mode 100644 index 000000000..b6c5c6c24 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV4.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV4: + def __init__( + self, + budget, + dim=5, + pop_size=500, + elite_rate=0.4, + resonance_factor_initial=0.3, + mutation_scale_initial=0.5, + harmonic_frequency=0.25, + feedback_intensity=0.7, + damping_factor=0.95, + mutation_decay=0.98, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor_initial + self.mutation_scale = mutation_scale_initial + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + harmonic_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV5.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV5.py new file mode 100644 index 000000000..122d2433e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV5.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV5: + def __init__( + self, + budget, + dim=5, + pop_size=1000, + elite_rate=0.2, + resonance_factor_initial=0.4, + mutation_scale_initial=0.7, + harmonic_frequency=0.1, + feedback_intensity=0.9, + damping_factor=0.9, + mutation_decay=0.95, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor_initial + self.mutation_scale = mutation_scale_initial + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + wave_influence = self.harmonic_frequency * np.cos(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + wave_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV6.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV6.py new file mode 100644 index 000000000..6644185c1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV6.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV6: + def __init__( + self, + budget, + dim=5, + pop_size=1200, + elite_rate=0.1, + resonance_factor_initial=0.5, + mutation_scale_initial=0.5, + harmonic_frequency=0.05, + feedback_intensity=0.95, + damping_factor=0.95, + mutation_decay=0.98, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor_initial + self.mutation_scale = mutation_scale_initial + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + wave_influence = self.harmonic_frequency * np.cos(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + wave_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV7.py b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV7.py new file mode 100644 index 000000000..706e720a9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicFocusedOptimizerV7.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumHarmonicFocusedOptimizerV7: + def __init__( + self, + budget, + dim=5, + pop_size=1500, + elite_rate=0.08, + resonance_factor_initial=0.6, + mutation_scale_initial=0.3, + harmonic_frequency=0.02, + feedback_intensity=0.98, + damping_factor=0.97, + mutation_decay=0.99, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor_initial + self.mutation_scale = mutation_scale_initial + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def quantum_mutations(self, elite_sample): + wave_influence = self.harmonic_frequency * np.cos(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + feedback_correction = self.feedback_intensity * (self.best_solution - elite_sample) + + new_solution = ( + elite_sample + wave_influence + quantum_resonance + mutation_effect + feedback_correction + ) + new_solution = np.clip(new_solution, self.lower_bound, self.upper_bound) + return new_solution + + def update_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + self.population[idx] = self.quantum_mutations(elite_sample) + + self.mutation_scale *= self.mutation_decay + self.resonance_factor *= self.damping_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicImpulseOptimizerV9.py b/nevergrad/optimization/lama/QuantumHarmonicImpulseOptimizerV9.py new file mode 100644 index 000000000..3c01924df --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicImpulseOptimizerV9.py @@ -0,0 +1,69 @@ +import numpy as np + + +class QuantumHarmonicImpulseOptimizerV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 2000 # Further increased population size + self.sigma_initial = 1.5 # Initial mutation spread + self.sigma_final = 0.001 # Even finer final mutation spread + self.elitism_factor = 0.01 # Reduced elitism to further increase diversity + self.CR_initial = 0.9 # High initial crossover probability + self.CR_final = 0.05 # Lower final crossover probability + self.q_impact_initial = 0.05 # Slightly higher initial quantum impact + self.q_impact_final = 0.7 # Increased final quantum impact + self.q_impact_increase_rate = 0.005 # Faster increase in quantum impact + self.harmonic_impulse_frequency = 0.05 # Frequency of harmonic impulse modulation + self.impulse_amplitude = 0.4 # Amplitude of the harmonic impulse + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation with quantum harmonic impulse adjustments + idxs = [j for j in range(self.pop_size) if j != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c + q_impact * np.cos(c + impulse)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumHarmonicPrecisionOptimizer.py b/nevergrad/optimization/lama/QuantumHarmonicPrecisionOptimizer.py new file mode 100644 index 000000000..00e8a10f6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicPrecisionOptimizer.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumHarmonicPrecisionOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=200, + elite_rate=0.2, + resonance_factor=0.08, + mutation_scale=0.01, + harmonic_frequency=0.15, + feedback_intensity=0.15, + damping_factor=0.95, + mutation_decay=0.99, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_factor = resonance_factor + self.mutation_scale = mutation_scale + self.harmonic_frequency = harmonic_frequency + self.feedback_intensity = feedback_intensity + self.damping_factor = damping_factor + self.mutation_decay = mutation_decay + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.prev_best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.prev_best_fitness = self.best_fitness + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and perform selective reproduction + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_frequency * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_factor * (np.random.uniform(-1, 1, self.dim) ** 3) + mutation_effect = np.random.normal(0, self.mutation_scale, self.dim) + + feedback_adjustment = ( + (self.best_fitness - self.prev_best_fitness) + * self.feedback_intensity + * np.random.uniform(-1, 1, self.dim) + ) + + self.population[idx] = ( + elite_sample + + (harmonic_influence + quantum_resonance + mutation_effect + feedback_adjustment) + * self.damping_factor + ) + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + self.mutation_scale *= self.mutation_decay + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonicResilientEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumHarmonicResilientEvolutionStrategy.py new file mode 100644 index 000000000..88aa9809e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonicResilientEvolutionStrategy.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumHarmonicResilientEvolutionStrategy: + def __init__( + self, + budget, + dim=5, + pop_size=150, + elite_rate=0.20, + resonance_intensity=0.12, + mutation_intensity=0.03, + harmonic_depth=0.25, + feedback_factor=0.1, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.resonance_intensity = resonance_intensity + self.mutation_intensity = mutation_intensity + self.harmonic_depth = harmonic_depth + self.feedback_factor = feedback_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.prev_best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.prev_best_fitness = self.best_fitness + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and select elites + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with harmonic fluctuations and quantum resonance + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + harmonic_influence = self.harmonic_depth * np.sin(np.random.uniform(0, 2 * np.pi, self.dim)) + quantum_resonance = self.resonance_intensity * (np.random.uniform(-1, 1, self.dim) ** 3) + normal_disturbance = np.random.normal(0, self.mutation_intensity, self.dim) + + # Feedback mechanism: adapt to stagnation + if self.best_fitness >= self.prev_best_fitness: + feedback_adjustment = self.feedback_factor * np.random.uniform(-1, 1, self.dim) + self.harmonic_depth *= 0.95 # Dampen harmonic depth to refocus search + else: + feedback_adjustment = 0 + + # Combine influences + self.population[idx] = ( + elite_sample + + harmonic_influence + + quantum_resonance + + normal_disturbance + + feedback_adjustment + ) + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumHarmonizedPSO.py b/nevergrad/optimization/lama/QuantumHarmonizedPSO.py new file mode 100644 index 000000000..1053ba9e3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonizedPSO.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumHarmonizedPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 100 # Adjusted population size for balance between exploration and exploitation + inertia_weight = 0.9 # Higher initial inertia for broader exploration + cognitive_coefficient = 2.0 # Encouraging stronger personal learning + social_coefficient = 2.0 # Encouraging influence from global best + final_inertia_weight = 0.4 # More gradual decrease in inertia to maintain momentum + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + w = inertia_weight - (inertia_weight - final_inertia_weight) * (current_budget / self.budget) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired stochastic component adjusted with a dampening factor + quantum_factor = np.random.normal( + 0, 0.1, self.dim + ) # Slightly increased variance for dynamic adjustments + + # Update velocity + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + quantum_factor + + # Update position + population[i] += velocity[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithm.py b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithm.py new file mode 100644 index 000000000..7cf3e8297 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithm.py @@ -0,0 +1,70 @@ +import numpy as np + + +class QuantumHarmonyMemeticAlgorithm: + def __init__(self, budget=10000, hmcr=0.95, par=0.45, bw=0.01, memetic_iter=10): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmImproved.py b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmImproved.py new file mode 100644 index 000000000..08e752a85 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmImproved.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumHarmonyMemeticAlgorithmImproved: + def __init__(self, budget=10000, hmcr=0.95, par=0.45, bw=0.01, memetic_iter=10, memetic_prob=0.2): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmRefined.py b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmRefined.py new file mode 100644 index 000000000..574f02cd0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonyMemeticAlgorithmRefined.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumHarmonyMemeticAlgorithmRefined: + def __init__(self, budget=10000, hmcr=0.9, par=0.4, bw=0.05, memetic_iter=15, memetic_prob=0.3): + self.budget = budget + self.dim = 5 + self.hmcr = hmcr + self.par = par + self.bw = bw + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.budget)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + if np.random.rand() < self.hmcr: + if np.random.rand() < self.par: + new_harmony[i] = harmony_memory[np.random.randint(len(harmony_memory))][i] + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + else: + new_harmony[i] = np.random.uniform(-5.0, 5.0) + + if np.random.rand() < self.bw: + new_harmony[i] += np.random.normal(0, 1) + + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, 0.1, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx] = self._memetic_local_search(harmony_memory[idx], func) + harmony_memory_costs[idx] = func(harmony_memory[idx]) + + return harmony_memory, harmony_memory_costs + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for _ in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + new_harmony = self._memetic_local_search(new_harmony, func) + new_cost = func(new_harmony) + + if new_cost < min(harmony_memory_costs): + idx = np.argmin(harmony_memory_costs) + harmony_memory[idx] = new_harmony + harmony_memory_costs[idx] = new_cost + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHarmonySearch.py b/nevergrad/optimization/lama/QuantumHarmonySearch.py new file mode 100644 index 000000000..127cd4ce2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHarmonySearch.py @@ -0,0 +1,42 @@ +import numpy as np + + +class QuantumHarmonySearch: + def __init__(self, budget, harmony_memory_size=10, pitch_adjustment_rate=0.1, bandwidth=0.01): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + self.bandwidth = bandwidth + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.harmony_memory_size, len(func.bounds.lb)) + ) + + for _ in range(self.budget): + new_harmony = self.generate_new_harmony(harmony_memory, func) + new_harmony_fitness = np.array([func(x) for x in new_harmony]) + + min_index = np.argmin(new_harmony_fitness) + if new_harmony_fitness[min_index] < self.f_opt: + self.f_opt = new_harmony_fitness[min_index] + self.x_opt = new_harmony[min_index] + + return self.f_opt, self.x_opt + + def generate_new_harmony(self, harmony_memory, func): + new_harmony = np.copy(harmony_memory) + for i in range(len(func.bounds.lb)): + if np.random.rand() < self.pitch_adjustment_rate: + index = np.random.choice(self.harmony_memory_size, size=2, replace=False) + new_value = np.clip( + np.random.normal( + (harmony_memory[index[0], i] + harmony_memory[index[1], i]) / 2, self.bandwidth + ), + func.bounds.lb[i], + func.bounds.ub[i], + ) + new_harmony[:, i] = new_value + return new_harmony diff --git a/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategy.py b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategy.py new file mode 100644 index 000000000..e15db5c11 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategy.py @@ -0,0 +1,66 @@ +import numpy as np + + +class QuantumHybridAdaptiveStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Population size + self.sigma_initial = 0.5 # Initial mutation spread + self.elitism_factor = 5 # Percentage of elite individuals to carry forward without mutation + self.sigma_decay = 0.99 # Decay factor for mutation spread + self.CR_base = 0.9 # Initial crossover probability + self.CR_decay = 0.995 # Decay rate for crossover probability + self.q_impact = 0.3 # Quantum impact factor on mutation vector + self.momentum = 0.8 # Momentum for the update mechanism + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + velocities = np.zeros_like(pop) # Initialize velocities for momentum-based updates + + # Evolutionary loop + for _ in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Mutation using DE-like strategy with added momentum and quantum effects + idxs = [idx for idx in range(self.pop_size) if idx != i and idx >= elite_size] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + self.q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Update with momentum + velocities[i] = self.momentum * velocities[i] + (1 - self.momentum) * (trial - pop[i]) + trial = pop[i] + velocities[i] + trial = np.clip(trial, -5.0, 5.0) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update sigma and CR + sigma *= self.sigma_decay + CR *= self.CR_decay + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV2.py b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV2.py new file mode 100644 index 000000000..48c007344 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV2.py @@ -0,0 +1,61 @@ +import numpy as np + + +class QuantumHybridAdaptiveStrategyV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Population size + self.sigma_initial = 0.1 # Initial mutation spread, reduced to tighten exploration + self.elitism_factor = 10 # Increased elite size to preserve good candidates + self.sigma_decay = 0.98 # Slower decay for mutation spread + self.CR_base = 0.8 # Lower initial crossover probability to allow more mutation effects + self.CR_decay = 0.99 # Slower decay rate for crossover probability + self.q_impact = 0.5 # Increased quantum impact factor on mutation vector for more diversity + self.adaptation_rate = 0.05 # Rate at which the quantum impact factor adapulates + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + + # Evolutionary loop + for _ in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Mutation using DE-like strategy with quantum effects + idxs = [idx for idx in range(self.pop_size) if idx != i and idx >= elite_size] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + self.q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Adaptive Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive updates to parameters + sigma *= self.sigma_decay + CR *= self.CR_decay + self.q_impact *= 1 + self.adaptation_rate if np.random.rand() < 0.5 else 1 - self.adaptation_rate + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV8.py b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV8.py new file mode 100644 index 000000000..b3b770c4b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV8.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumHybridAdaptiveStrategyV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given problem dimensionality + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 500 + elite_size = 50 + evaluations = 0 + mutation_factor = 0.7 + crossover_probability = 0.7 + quantum_probability = 0.2 + adaptive_scaling_factor = lambda t: 0.1 * np.exp(-0.05 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Hybrid differential evolution with adaptive mutation + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Differential mutation combining best and random individual + mutant = x1 + mutation_factor * ((self.x_opt - x1) + (x2 - x3)) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV9.py b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV9.py new file mode 100644 index 000000000..0500eec54 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridAdaptiveStrategyV9.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumHybridAdaptiveStrategyV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given problem dimensionality + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 800 + elite_size = 80 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.8 + quantum_probability = 0.15 + adaptive_scaling_factor = lambda t: 0.15 * np.exp(-0.07 * t) + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Hybrid differential evolution with adaptive mutation + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Differential mutation combining best and random individual + mutant = x1 + mutation_factor * ((self.x_opt - x1) + (x2 - x3)) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumHybridDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumHybridDifferentialEvolution.py new file mode 100644 index 000000000..4e8cba8ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridDifferentialEvolution.py @@ -0,0 +1,190 @@ +import numpy as np + + +class QuantumHybridDifferentialEvolution: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE.py b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE.py new file mode 100644 index 000000000..fe3b28e44 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE.py @@ -0,0 +1,179 @@ +import numpy as np + + +class QuantumHybridDynamicAdaptiveDE: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=30, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = 0.01 * np.random.randn() # adaptive step size with Gaussian perturbation + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v2.py b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v2.py new file mode 100644 index 000000000..cb1ffc144 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v2.py @@ -0,0 +1,181 @@ +import numpy as np + + +class QuantumHybridDynamicAdaptiveDE_v2: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=20, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + perturbation_intensity=0.05, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v3.py b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v3.py new file mode 100644 index 000000000..2a7090019 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridDynamicAdaptiveDE_v3.py @@ -0,0 +1,181 @@ +import numpy as np + + +class QuantumHybridDynamicAdaptiveDE_v3: + def __init__( + self, + budget=10000, + population_size=150, + elite_size=20, + local_search_steps=30, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.03, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE.py new file mode 100644 index 000000000..ed9d7e1fb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v2.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v2.py new file mode 100644 index 000000000..8d7d1e48e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v2.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v2: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = np.random.rand() * 0.01 # adaptive step size + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v3.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v3.py new file mode 100644 index 000000000..49509cc09 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v3.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v3: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = np.random.rand() * 0.01 # adaptive step size + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v4.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v4.py new file mode 100644 index 000000000..0813e1db3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v4.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v4: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=50, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = np.random.rand() * 0.01 # adaptive step size + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v5.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v5.py new file mode 100644 index 000000000..57fd61e4a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v5.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v5: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=30, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = np.random.rand() * 0.01 # adaptive step size + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v6.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v6.py new file mode 100644 index 000000000..7c6cbc2ce --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v6.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v6: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=30, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = np.random.rand() * 0.01 # adaptive step size + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v7.py b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v7.py new file mode 100644 index 000000000..5b2c799aa --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridEliteAdaptiveDE_v7.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridEliteAdaptiveDE_v7: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=30, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = 0.01 * np.random.randn() # adaptive step size with Gaussian perturbation + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridImprovedDE.py b/nevergrad/optimization/lama/QuantumHybridImprovedDE.py new file mode 100644 index 000000000..9b424f2af --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridImprovedDE.py @@ -0,0 +1,192 @@ +import numpy as np + + +class QuantumHybridImprovedDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/QuantumHybridParticleDifferentialSearch.py b/nevergrad/optimization/lama/QuantumHybridParticleDifferentialSearch.py new file mode 100644 index 000000000..9ce8ad375 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumHybridParticleDifferentialSearch.py @@ -0,0 +1,135 @@ +import numpy as np + + +class QuantumHybridParticleDifferentialSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 30 + inertia_weight = 0.6 + cognitive_coefficient = 1.4 + social_coefficient = 1.4 + differential_weight = 0.8 + crossover_rate = 0.9 + quantum_factor = 0.05 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory_size = 5 + memory = [] + + while evaluations < self.budget: + for i in range(population_size): + # Particle Swarm Optimization Part + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + # Differential Evolution Part with Adaptive Memory + indices = list(range(population_size)) + indices.remove(i) + + if len(memory) < memory_size: + memory.append(population[i]) + else: + memory[np.random.randint(memory_size)] = population[i] + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + if len(memory) > 2: + d = memory[np.random.choice(len(memory))] + else: + d = global_best_position + + mutant_vector = np.clip( + a + differential_weight * (b - c + d - global_best_position), self.lb, self.ub + ) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum behavior implementation + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + return self.f_opt, self.x_opt + + +# Example usage: +# def sample_func(x): +# return np.sum(x**2) + +# optimizer = QuantumHybridParticleDifferentialSearch(budget=10000) +# best_fitness, best_solution = optimizer(sample_func) +# print("Best fitness:", best_fitness) +# print("Best solution:", best_solution) diff --git a/nevergrad/optimization/lama/QuantumInfluenceCrossoverOptimizer.py b/nevergrad/optimization/lama/QuantumInfluenceCrossoverOptimizer.py new file mode 100644 index 000000000..ac657a83f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInfluenceCrossoverOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumInfluenceCrossoverOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 150 # Increase population size for broader sampling + mutation_factor = 0.8 # Adjusted mutation factor for controlled exploration + crossover_prob = 0.7 # Higher crossover probability for better information exchange + elite_factor = 0.2 # Increased elite fraction for enhanced quality propagation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + elite_size = int(population_size * elite_factor) + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Enhanced selection strategy + parent1 = elite_population[np.random.randint(0, elite_size)] + parent2 = elite_population[np.random.randint(0, elite_size)] + child = np.where(np.random.rand(self.dim) < crossover_prob, parent1, parent2) + + # Quantum-inspired mutation + quantum_mutation = mutation_factor * ( + np.random.randn(self.dim) * (1 - np.exp(-np.random.rand(self.dim))) + ) + child += quantum_mutation + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Dynamic adaptation adjustments + mutation_factor *= 0.95 # Gradual decrease + crossover_prob *= 1.02 # Gradual increase to prevent premature convergence + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumInfluencedAdaptiveDifferentialSwarm.py b/nevergrad/optimization/lama/QuantumInfluencedAdaptiveDifferentialSwarm.py new file mode 100644 index 000000000..4a84bad6e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInfluencedAdaptiveDifferentialSwarm.py @@ -0,0 +1,59 @@ +import numpy as np + + +class QuantumInfluencedAdaptiveDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 500 # Further increased population size for enhanced exploration + self.F_base = 0.5 # Adjusted base mutation factor for stability + self.CR_base = 0.9 # Increased base crossover probability for stronger exploration + self.adaptive_F_amplitude = 0.3 # Increased mutation amplitude for wider search capability + self.adaptive_CR_amplitude = 0.3 # Increased CR amplitude for dynamic exploration + self.quantum_probability = 0.1 # Probability of quantum-inspired mutation + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors with sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.adaptive_F_amplitude * np.sin(2 * np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.sin(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + if np.random.rand() < self.quantum_probability: + # Quantum-inspired mutation + quantum_mutation = np.random.normal(loc=best_ind, scale=np.abs(best_ind - pop[j]) / 2) + quantum_mutation = np.clip(quantum_mutation, -5.0, 5.0) + mutant = quantum_mutation + else: + # Mutation: DE/rand/1/bin with adaptive F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure boundaries are respected + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearch.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearch.py new file mode 100644 index 000000000..9030b65d4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearch.py @@ -0,0 +1,96 @@ +import numpy as np + + +class QuantumInformedAdaptiveHybridSearch: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_frac=0.2, + mutation_intensity=1.0, + crossover_prob=0.85, + quantum_prob=0.95, + gradient_prob=0.5, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_frac) + self.mutation_intensity = mutation_intensity + self.crossover_prob = crossover_prob + self.quantum_prob = quantum_prob + self.gradient_prob = gradient_prob + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + p1, p2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[p1], population[p2]) + else: + offspring = population[np.random.choice(elite_indices)] + + # Apply quantum state update probabilistically + if np.random.random() < self.quantum_prob: + offspring = self.quantum_state_update(offspring, best_individual) + + # Apply gradient boost probabilistically + if np.random.random() < self.gradient_prob: + offspring = self.gradient_boost(offspring, func) + + # Mutate the offspring + mutation_scale = self.adaptive_mutation_scale(evaluations) + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) + + new_population[i] = offspring + + # Evaluate the new population + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = population[current_best_idx] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + mask = np.random.rand(self.dimension) < 0.5 + child = mask * parent1 + ~mask * parent2 + return child + + def quantum_state_update(self, individual, best_individual): + return individual + np.random.normal(0, 0.1, self.dimension) * (best_individual - individual) + + def gradient_boost(self, individual, func): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = individual.copy() + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - 0.01 * grad_est + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_intensity * np.exp(-0.03 * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearchV4.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearchV4.py new file mode 100644 index 000000000..58cf3ce6a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveHybridSearchV4.py @@ -0,0 +1,82 @@ +import numpy as np + + +class QuantumInformedAdaptiveHybridSearchV4: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_ratio=0.1, + mutation_scale=1.0, + mutation_decay=0.01, + crossover_prob=0.7, + quantum_boost=0.95, + refinement_rate=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.ceil(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_boost = quantum_boost + self.refinement_rate = refinement_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Perform crossover and mutation + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parents = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parents[0]], population[parents[1]]) + else: + child = population[np.random.choice(elite_indices)] + + # Quantum boost decision + if np.random.random() < self.quantum_boost: + child = self.quantum_state_modification(child, best_individual) + + # Mutation with decaying scale + mutation_scale = self.mutation_scale * np.exp( + -self.mutation_decay * evaluations / self.budget + ) + child += np.random.normal(0, mutation_scale, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + # Evaluate new population + fitness = np.array([func(x) for x in new_population]) + evaluations += self.population_size + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = new_population[current_best_idx] + + population = new_population + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_modification(self, individual, best_individual): + perturbation = np.random.normal(0, self.refinement_rate, self.dimension) + return individual + perturbation * (best_individual - individual) diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveInertiaOptimizer.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveInertiaOptimizer.py new file mode 100644 index 000000000..618368a81 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveInertiaOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class QuantumInformedAdaptiveInertiaOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 40 # Adjusted population size + inertia_weight = 0.9 # Initial inertia + cognitive_coefficient = 1.5 # Scaled down personal learning effect + social_coefficient = 1.5 # Scaled down social influence + quantum_probability = 0.15 # Adjusted probability of quantum jumps + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + w = inertia_weight * (1 - (current_budget / self.budget) ** 2) # Adaptive inertia weight + + for i in range(population_size): + if current_budget >= self.budget: + break + + if np.random.rand() < quantum_probability: + # Quantum jump strategy + population[i] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + else: + # Standard PSO update + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Personal best update + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Global best update + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptivePSO.py b/nevergrad/optimization/lama/QuantumInformedAdaptivePSO.py new file mode 100644 index 000000000..fbf269539 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptivePSO.py @@ -0,0 +1,69 @@ +import numpy as np + + +class QuantumInformedAdaptivePSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 100 # Increased population size for broader exploration + inertia_weight = 0.9 # Initial higher inertia weight for better exploration + cognitive_coefficient = 2.0 # Higher cognitive coefficient + social_coefficient = 2.0 # Higher social coefficient + final_inertia_weight = 0.4 # Lower final inertia weight for increased exploitation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + w = inertia_weight - (inertia_weight - final_inertia_weight) * (current_budget / self.budget) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired stochastic component + quantum_factor = np.random.normal(0, 0.1, self.dim) + + # Update velocity + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + quantum_factor + + # Update position + population[i] += velocity[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV4.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV4.py new file mode 100644 index 000000000..e27c55dac --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV4.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumInformedAdaptiveSearchV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 250 + elite_size = 25 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.75 + quantum_probability = 0.18 + adaptive_rate = 0.05 + learning_period = 20 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum-Informed Mutation + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal( + loc=0, scale=0.5 / np.sqrt(fitness[i] + 1), size=self.dim + ) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Differential Evolution + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive adjustment of parameters + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-4: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.3) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV5.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV5.py new file mode 100644 index 000000000..c306f172f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV5.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumInformedAdaptiveSearchV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 300 + elite_size = 30 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.7 + quantum_probability = 0.15 + adaptive_rate = 0.03 + learning_period = 15 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum-Informed Mutation + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal( + loc=0, scale=0.3 / np.sqrt(fitness[i] + 1), size=self.dim + ) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Differential Evolution + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive adjustment of parameters + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-4: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.3) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV6.py b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV6.py new file mode 100644 index 000000000..bc20d496a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedAdaptiveSearchV6.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumInformedAdaptiveSearchV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 350 + elite_size = 35 + evaluations = 0 + mutation_factor = 0.85 + crossover_probability = 0.75 + quantum_probability = 0.12 + adaptive_rate = 0.05 + learning_period = 10 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum-Informed Mutation + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal( + loc=0, scale=0.25 / np.sqrt(fitness[i] + 1), size=self.dim + ) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Differential Evolution + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive adjustment of parameters + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-5: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.3) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedCooperativeSearchV1.py b/nevergrad/optimization/lama/QuantumInformedCooperativeSearchV1.py new file mode 100644 index 000000000..c90a20094 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedCooperativeSearchV1.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumInformedCooperativeSearchV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 400 + elite_size = 40 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.8 + quantum_probability = 0.1 + adaptive_rate = 0.07 + learning_period = 20 + + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum-Informed Mutation + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal( + loc=0, scale=0.2 / np.sqrt(fitness[i] + 1), size=self.dim + ) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Cooperative Crossover and Mutation + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adaptive adjustment based on recent performance + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-5: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.3) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedCrossoverEvolution.py b/nevergrad/optimization/lama/QuantumInformedCrossoverEvolution.py new file mode 100644 index 000000000..57e88c6f7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedCrossoverEvolution.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumInformedCrossoverEvolution: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 100 + self.elite_size = 20 + self.offspring_size = 80 + self.mutation_scale = 0.01 + self.crossover_prob = 0.95 + self.mutation_prob = 0.1 + self.quantum_probability = 0.15 # Probability to perform quantum-inspired jump + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_prob: + cross_point = np.random.randint(1, self.dim) + child = np.empty(self.dim) + child[:cross_point] = parent1[:cross_point] + child[cross_point:] = parent2[cross_point:] + return child + return parent1 if np.random.rand() < 0.5 else parent2 + + def mutate(self, individual): + if np.random.rand() < self.mutation_prob: + mutation_points = np.random.randint(0, self.dim) + individual[mutation_points] += np.random.normal(0, self.mutation_scale) + individual = np.clip(individual, self.lower_bound, self.upper_bound) + return individual + + def quantum_jump(self, individual): + if np.random.rand() < self.quantum_probability: + return np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + return individual + + def reproduce(self, parents): + offspring = np.empty((self.offspring_size, self.dim)) + num_parents = len(parents) + for i in range(self.offspring_size): + p1, p2 = np.random.choice(num_parents, 2, replace=False) + child = self.crossover(parents[p1], parents[p2]) + child = self.mutate(child) + child = self.quantum_jump(child) + offspring[i] = child + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_survivors(population, fitness) + + offspring = self.reproduce(elite_population) + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumInformedDifferentialStrategy.py b/nevergrad/optimization/lama/QuantumInformedDifferentialStrategy.py new file mode 100644 index 000000000..2b4d1867e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedDifferentialStrategy.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumInformedDifferentialStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 + elite_size = 20 + evaluations = 0 + mutation_scale = 0.4 # Higher initial mutation scale for even more exploration + recombination_prob = 0.85 + quantum_factor = 0.2 # Lower quantum factor to reduce randomness + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Quantum-inspired solution space exploration + num_quantum_individuals = int(population_size * quantum_factor) + quantum_population = np.random.uniform(self.lb, self.ub, (num_quantum_individuals, self.dim)) + quantum_fitness = np.array([func(ind) for ind in quantum_population]) + evaluations += len(quantum_population) + + combined_population = np.vstack((population, quantum_population)) + combined_fitness = np.hstack((fitness, quantum_fitness)) + + # Elite selection using tournament selection + elite_indices = np.argsort(combined_fitness)[:elite_size] + elite_individuals = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + + # Generate new candidates using a differential evolution strategy + new_population = [] + for _ in range(population_size - elite_size): + indices = np.random.choice(elite_size, 3, replace=False) + x1, x2, x3 = elite_individuals[indices] + mutant = x1 + mutation_scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + if np.random.rand() < recombination_prob: + cross_points = np.random.rand(self.dim) < 0.5 + child = np.where(cross_points, mutant, x1) + else: + child = mutant + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + + # Update population and fitness + population = np.vstack((elite_individuals, new_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += len(new_population) + + # Adaptive mutation scale update + mutation_scale *= 0.97 # Slower mutation scale decay for prolonged exploration capability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedDynamicSwarmOptimizer.py b/nevergrad/optimization/lama/QuantumInformedDynamicSwarmOptimizer.py new file mode 100644 index 000000000..e9a09172c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedDynamicSwarmOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class QuantumInformedDynamicSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 30 # Reduced population size for effective exploration + inertia_weight = 0.95 # Initial strong inertia for exploration + cognitive_coefficient = 2.0 # Enhanced personal learning effect + social_coefficient = 2.0 # Enhanced social influence + final_inertia_weight = 0.1 # Sharper final focus + quantum_probability = 0.1 # Probability of using quantum jumps instead of regular updates + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + w = inertia_weight - ((inertia_weight - final_inertia_weight) * (current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + if np.random.rand() < quantum_probability: + # Quantum jump: Generate new position with a random quantum leap + population[i] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + else: + # Standard PSO update + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInformedEvolutionStrategy.py b/nevergrad/optimization/lama/QuantumInformedEvolutionStrategy.py new file mode 100644 index 000000000..e47215528 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedEvolutionStrategy.py @@ -0,0 +1,71 @@ +import numpy as np + + +class QuantumInformedEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 + elite_size = 50 + evaluations = 0 + mutation_scale = 0.2 # Increased initial mutation scale for better exploration + recombination_prob = 0.7 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Quantum-inspired solution space exploration + quantum_population = np.random.uniform(self.lb, self.ub, (int(population_size * 0.2), self.dim)) + quantum_fitness = np.array([func(ind) for ind in quantum_population]) + evaluations += len(quantum_population) + + combined_population = np.vstack((population, quantum_population)) + combined_fitness = np.hstack((fitness, quantum_fitness)) + + # Elite selection using tournament selection + elite_indices = np.argsort(combined_fitness)[:elite_size] + elite_individuals = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + + # Generate new candidates using differential evolution strategy + new_population = [] + for _ in range(population_size - elite_size): + indices = np.random.choice(elite_size, 3, replace=False) + x1, x2, x3 = elite_individuals[indices] + mutant = x1 + mutation_scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + if np.random.rand() < recombination_prob: + cross_points = np.random.rand(self.dim) < 0.5 + child = np.where(cross_points, mutant, x1) + else: + child = mutant + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + + # Update population and fitness + population = np.vstack((elite_individuals, new_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += len(new_population) + + # Adaptive mutation scale update + mutation_scale *= 0.98 # Slow decay to maintain explorative capabilities longer + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInformedGradientOptimizer.py b/nevergrad/optimization/lama/QuantumInformedGradientOptimizer.py new file mode 100644 index 000000000..65623796a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedGradientOptimizer.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumInformedGradientOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5. + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 200 # Adjusted population size for effective exploration and exploitation + mutation_factor = 0.8 # Mutation factor for controlled exploration + crossover_prob = 0.7 # Crossover probability to maintain diversity + learning_rate = 0.01 # Learning rate for gradient descent steps + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + gradients = np.zeros_like(population) + + # Perform a quasi-gradient estimation using finite differences + for i in range(population_size): + if current_budget >= self.budget: + break + + base_ind = population[i] + for d in range(self.dim): + perturbed_ind = np.array(base_ind) + perturbed_ind[d] += learning_rate + if current_budget < self.budget: + perturbed_fitness = func(perturbed_ind) + current_budget += 1 + gradient = (perturbed_fitness - fitness[i]) / learning_rate + gradients[i, d] = gradient + + # Update steps based on gradients and mutation + for i in range(population_size): + if current_budget >= self.budget: + break + + # Apply mutation and crossover + if np.random.rand() < crossover_prob: + partner_idx = np.random.randint(population_size) + crossover_mask = np.random.rand(self.dim) < 0.5 + child = population[i] * crossover_mask + population[partner_idx] * (1 - crossover_mask) + else: + child = population[i] + + mutation = np.random.randn(self.dim) * mutation_factor + child -= learning_rate * gradients[i] + mutation + child = np.clip(child, self.lower_bound, self.upper_bound) + + child_fitness = func(child) + current_budget += 1 + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive updates to parameters + mutation_factor *= 0.99 # Slowly decrease mutation factor + crossover_prob = max(0.5, crossover_prob - 0.01) # Gradually decrease crossover probability + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumInformedHyperStrategicOptimizer.py b/nevergrad/optimization/lama/QuantumInformedHyperStrategicOptimizer.py new file mode 100644 index 000000000..d13994a8f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedHyperStrategicOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumInformedHyperStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 150 # Increased population for wider initial exploration + inertia_weight = 0.9 # Initial higher inertia for broader exploration + cognitive_coefficient = 1.2 # Slightly reduced to prevent local traps + social_coefficient = 1.2 # Reduced to emphasize on individual learning + velocity_limit = 0.25 # Increased limit to allow more dynamic movements + quantum_momentum = 0.1 # Increased quantum influences for better global search + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Dynamic inertia weight adjustment for strategic exploration-exploitation balance + w = inertia_weight * (0.8 + 0.2 * np.sin(2 * np.pi * current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics incorporated more adaptively + if np.random.rand() < 0.05 * (1 - np.cos(2 * np.pi * current_budget / self.budget)): + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions with adaptive strategic constraints + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and personal and global best updates + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInformedOptimizer.py b/nevergrad/optimization/lama/QuantumInformedOptimizer.py new file mode 100644 index 000000000..da2947289 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedOptimizer.py @@ -0,0 +1,89 @@ +import numpy as np + + +class QuantumInformedOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 50 # Smaller population to manage computational resources + mutation_factor = 0.8 # Initial mutation factor for exploration + crossover_prob = 0.7 # Initial crossover probability for exploration + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Quantum-inspired phase to enhance exploration and exploitation + quantum_beta = 1.0 # Quantum behavior parameter + quantum_alpha = 0.01 # Quantum learning rate + quantum_population = quantum_beta * np.random.randn(population_size, self.dim) + quantum_population = np.clip(quantum_population + population, self.lower_bound, self.upper_bound) + + while current_budget < self.budget: + new_population = np.empty_like(population) + new_quantum_population = np.empty_like(quantum_population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Mutation and crossover phases for both classical and quantum populations + indices = np.arange(population_size) + indices = np.delete(indices, i) + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + q1, q2, q3 = quantum_population[random_indices] + + mutant = population[i] + mutation_factor * (x1 - x2 + x3 - population[i]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + quantum_mutant = quantum_population[i] + quantum_alpha * ( + q1 - q2 + q3 - quantum_population[i] + ) + quantum_mutant = np.clip(quantum_mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + quantum_trial = np.where( + np.random.rand(self.dim) < crossover_prob, quantum_mutant, quantum_population[i] + ) + + trial_fitness = func(trial) + quantum_trial_fitness = func(quantum_trial) + current_budget += 2 # Incrementing for both classical and quantum evaluations + + # Selection + if quantum_trial_fitness < trial_fitness: + trial_fitness = quantum_trial_fitness + trial = quantum_trial + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + new_quantum_population[i] = quantum_trial + + population = new_population + quantum_population = new_quantum_population + + # Adaptively adjust mutation and crossover parameters + mutation_factor *= 0.995 # Gradual decrease + crossover_prob *= 1.005 # Gradual increase + quantum_alpha *= 0.99 # Reduce quantum impact over time + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/QuantumInformedPSO.py b/nevergrad/optimization/lama/QuantumInformedPSO.py new file mode 100644 index 000000000..4272bbb95 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedPSO.py @@ -0,0 +1,79 @@ +import numpy as np + + +class QuantumInformedPSO: + def __init__( + self, + budget=10000, + population_size=300, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.5, + social_weight=1.8, + quantum_prob=0.1, + quantum_radius=0.1, + gradient_factor=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_prob = quantum_prob + self.quantum_radius = quantum_radius + self.gradient_factor = gradient_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_reduction = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * self.inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.gradient_factor + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component + gradient_component + ) + + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal(0, self.quantum_radius, self.dim) + particles[i] = global_best + quantum_jump + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/QuantumInformedParticleSwarmOptimizer.py b/nevergrad/optimization/lama/QuantumInformedParticleSwarmOptimizer.py new file mode 100644 index 000000000..2b20ada97 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedParticleSwarmOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class QuantumInformedParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # Adjusted population size for broader exploration + inertia_weight = 0.7 # Inertia weight for momentum + cognitive_coefficient = 1.5 # Coefficient for particle's best known position + social_coefficient = 1.5 # Coefficient for swarm's best known position + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired stochastic component + quantum_factor = np.random.normal(0, 0.1, self.dim) + + # Update velocity + inertia = inertia_weight * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + quantum_factor + + # Update position + population[i] += velocity[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + # Adaptive update of inertia weight to encourage exploitation as iterations proceed + inertia_weight *= 0.99 + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInformedStrategicOptimizer.py b/nevergrad/optimization/lama/QuantumInformedStrategicOptimizer.py new file mode 100644 index 000000000..518e2e61f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInformedStrategicOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class QuantumInformedStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 100 # Enhanced population for broader initial search + inertia_weight = 0.7 # Optimized for balanced initial exploration + cognitive_coefficient = 1.5 # Decreased to reduce local premature convergence + social_coefficient = 1.5 # Decreased for the same reason as above + velocity_limit = 0.2 # Optimized for controlled explorative moves + quantum_momentum = 0.05 # Stronger quantum influences for better global search + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Dynamic inertia weight adjustment for exploration to exploitation transition + w = inertia_weight * (0.5 + 0.5 * np.exp(-4 * current_budget / self.budget)) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics incorporated strategically + if np.random.rand() < 0.02 * np.exp(-10 * current_budget / self.budget): + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # Update velocities and positions with strategic constraints + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and personal and global best updates + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumInfusedAdaptiveStrategy.py b/nevergrad/optimization/lama/QuantumInfusedAdaptiveStrategy.py new file mode 100644 index 000000000..4f4a6f814 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInfusedAdaptiveStrategy.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumInfusedAdaptiveStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 150 + elite_size = 15 + evaluations = 0 + mutation_factor = 0.6 + crossover_probability = 0.8 + quantum_probability = 0.1 + adaptive_rate = 0.05 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Adaptive mutation and crossover strategy + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Acceptance of new candidate + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best found solution + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Quantum mutation step + if np.random.rand() < quantum_probability: + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Adaptation of strategy parameters + if np.abs(previous_best - self.f_opt) < 1e-5: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.1) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEElitistLocalSearch.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEElitistLocalSearch.py new file mode 100644 index 000000000..1e74592eb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEElitistLocalSearch.py @@ -0,0 +1,109 @@ +import numpy as np + + +class QuantumInspiredAdaptiveDEElitistLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.pop_size)] + for idx in elite_indices: + new_population[idx], new_fitness[idx] = self.local_search(new_population[idx], bounds, func) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEHybridLocalSearch.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEHybridLocalSearch.py new file mode 100644 index 000000000..690ebfa8f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDEHybridLocalSearch.py @@ -0,0 +1,123 @@ +import numpy as np + + +class QuantumInspiredAdaptiveDEHybridLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.2 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = np.random.randn(self.dim) * 0.1 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + return ( + (trial, trial_fitness) if trial_fitness < func(individual) else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.pop_size)] + for idx in elite_indices: + new_population[idx], new_fitness[idx] = self.hybrid_local_search( + new_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning.py new file mode 100644 index 000000000..e46e2ade2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning.py @@ -0,0 +1,165 @@ +import numpy as np + + +class QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.1 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + self.learning_rate = 0.1 # Learning rate for elite learning + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def quantum_jump(self, individual, global_best): + return np.clip( + individual + self.alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0 + ) + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + quantum_trial = self.quantum_jump(new_population[i], global_best_position) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + + if evaluations >= self.budget: + break + + # Apply elite learning to a fraction of the elite population + for i in elite_indices: + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch.py new file mode 100644 index 000000000..3be9040c1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch.py @@ -0,0 +1,143 @@ +import numpy as np + + +class QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.1 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def quantum_jump(self, individual, global_best): + return np.clip( + individual + self.alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0 + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + quantum_trial = self.quantum_jump(new_population[i], global_best_position) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..4bf39a953 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridDEPSO.py @@ -0,0 +1,162 @@ +import numpy as np + + +class QuantumInspiredAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best): + alpha = 0.2 # Quantum-inspired parameter controlling the attraction to the global best + beta = 0.8 # Quantum-inspired diffusion parameter + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..bf580fcfb --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveHybridOptimizer.py @@ -0,0 +1,175 @@ +import numpy as np + + +class QuantumInspiredAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature for annealing + self.cooling_rate = 0.98 # Cooling rate for simulated annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.05 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + self.momentum_weight = 0.9 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def quantum_jump(self, individual, global_best): + return np.clip( + individual + self.alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0 + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + velocities = np.zeros((self.pop_size, self.dim)) # Initializing velocities for PSO hybrid component + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(new_population[i], global_best_position) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + + if evaluations >= self.budget: + break + + # Update velocities and positions for PSO component + velocities = ( + self.momentum_weight * velocities + + np.random.uniform(size=(self.pop_size, self.dim)) * (personal_best_positions - population) + + np.random.uniform(size=(self.pop_size, self.dim)) * (global_best_position - population) + ) + new_population = np.clip(new_population + velocities, -5.0, 5.0) + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredAdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/QuantumInspiredAdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..eb22b1d0b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredAdaptiveMemeticOptimizer.py @@ -0,0 +1,181 @@ +import numpy as np + + +class QuantumInspiredAdaptiveMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + self.phase_switch_ratio = 0.3 + self.local_search_iters = 5 + self.adaptive_switch_threshold = 0.2 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx, pop_size): + indices = np.delete(np.arange(pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def mutate_quantum(self, current, best, F): + return np.clip(current + F * np.tanh(best - current), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + phase_switch_evals = int(self.phase_switch_ratio * self.budget) + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i, self.initial_pop_size) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + if evaluations < phase_switch_evals: + if np.random.rand() < 0.5: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + else: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + else: + if np.random.rand() < self.adaptive_switch_threshold: + mutant = self.mutate_quantum(population[i], global_best_position, F) + else: + if np.random.rand() < 0.5: + mutant = self.mutate_best_1(global_best_position, population[i], parent1, F) + else: + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_iters + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumInspiredDifferentialEvolution.py new file mode 100644 index 000000000..2382ab929 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredDifferentialEvolution.py @@ -0,0 +1,98 @@ +import numpy as np + + +class QuantumInspiredDifferentialEvolution: + def __init__(self, budget=10000, population_size=30): + self.budget = budget + self.dim = 5 # as given in the problem statement + self.bounds = [-5.0, 5.0] + self.population_size = population_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + base_F = 0.8 + base_Cr = 0.9 + F = base_F + Cr = base_Cr + + stagnation_threshold = 50 # Number of generations to consider for stagnation + stagnation_counter = 0 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Select indices for mutation + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = np.random.choice(indices, 3, replace=False) + + # Quantum-inspired mutation strategy + centroid = np.mean(population[[a, b, c]], axis=0) + mutant = centroid + F * (population[a] - population[b]) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) # Boundary handling + + # Crossover strategy + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + # Update best solution found + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + stagnation_counter = 0 # Reset stagnation counter + else: + stagnation_counter += 1 + + population = new_population + + # Adaptive parameters based on success rate + if success_count > self.population_size * 0.2: + F = min(1.0, F + 0.1) + Cr = max(0.1, Cr - 0.1) + else: + F = max(0.4, F - 0.1) + Cr = min(1.0, Cr + 0.1) + + # Quantum-inspired restart mechanism + if stagnation_counter > stagnation_threshold: + # Re-initialize a portion of the population based on distance to the best solution + distances = np.linalg.norm(population - self.x_opt, axis=1) + reinit_indices = distances.argsort()[-int(self.population_size / 2) :] + population[reinit_indices] = np.random.uniform( + self.bounds[0], self.bounds[1], (len(reinit_indices), self.dim) + ) + fitness[reinit_indices] = np.array([func(ind) for ind in population[reinit_indices]]) + evaluations += len(reinit_indices) + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + stagnation_counter = 0 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/QuantumInspiredDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..1efdedbb8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.optimize import minimize + + +class QuantumInspiredDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 100 + self.init_num_niches = 10 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.2 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, p_best, g_best, beta): + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - g_best) * np.log(1 / u) + return x + Q * v + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Quantum inspired update + quantum_trial = self.quantum_update(trial, local_bests[n], global_best, self.beta) + quantum_trial = np.clip(quantum_trial, self.bounds[0], self.bounds[1]) + f_quantum_trial = func(quantum_trial) + evaluations += 1 + + if f_quantum_trial < f_trial: + trial, f_trial = quantum_trial, f_quantum_trial + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredHybridOptimizer.py b/nevergrad/optimization/lama/QuantumInspiredHybridOptimizer.py new file mode 100644 index 000000000..18b619acc --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredHybridOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumInspiredHybridOptimizer: + def __init__(self, budget, dim=5, population_size=50, elite_size=10): + self.budget = budget + self.dim = dim + self.population_size = population_size + self.elite_size = elite_size + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.tournament_size = 5 + self.mutation_prob = 0.2 + self.learning_rate = 0.1 + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(x) for x in population]) + + def tournament_selection(self, population, fitnesses): + selected_indices = np.random.randint( + 0, self.population_size, (self.population_size, self.tournament_size) + ) + selected_fitnesses = fitnesses[selected_indices] + winners_indices = selected_indices[ + np.arange(self.population_size), np.argmin(selected_fitnesses, axis=1) + ] + return population[winners_indices] + + def mutate(self, population): + mutation_mask = np.random.rand(self.population_size, self.dim) < self.mutation_prob + gaussian_perturbations = np.random.normal(0, self.learning_rate, (self.population_size, self.dim)) + mutated_population = population + mutation_mask * gaussian_perturbations + return np.clip(mutated_population, self.lower_bound, self.upper_bound) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + iterations = self.population_size + while iterations < self.budget: + selected = self.tournament_selection(population, fitness) + mutated = self.mutate(selected) + mutated_fitness = self.evaluate_population(func, mutated) + + combined_population = np.vstack((population, mutated)) + combined_fitness = np.concatenate((fitness, mutated_fitness)) + + top_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[top_indices] + fitness = combined_fitness[top_indices] + + if np.min(fitness) < best_fitness: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + iterations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/QuantumInspiredMetaheuristic.py b/nevergrad/optimization/lama/QuantumInspiredMetaheuristic.py new file mode 100644 index 000000000..acb2ae56d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredMetaheuristic.py @@ -0,0 +1,51 @@ +import numpy as np + + +class QuantumInspiredMetaheuristic: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + + def __call__(self, func): + # Initialize parameters + population_size = 50 + quantum_size = 10 + initial_position = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + best_position = None + best_value = np.Inf + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-1, 1, position.shape) * (best_position - position) / 2 + + eval_count = 0 + + while eval_count < self.budget: + for i in range(population_size): + if eval_count >= self.budget: + break + for q in range(quantum_size): + if eval_count >= self.budget: + break + # Quantum-inspired position update + candidate = quantum_position_update( + initial_position[i], + best_position if best_position is not None else initial_position[i], + ) + # Ensure candidate is within bounds + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + eval_count += 1 + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + initial_position[i] = candidate + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = QuantumInspiredMetaheuristic(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/QuantumInspiredOptimization.py b/nevergrad/optimization/lama/QuantumInspiredOptimization.py new file mode 100644 index 000000000..873f43515 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredOptimization.py @@ -0,0 +1,49 @@ +import numpy as np + + +class QuantumInspiredOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.alpha = 0.1 # Step size for update + self.gamma = 0.05 # Step size for perturbation + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize a population of solutions + population_size = 20 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + for t in range(1, self.budget // population_size + 1): + # Select the best solution + idx_best = np.argmin(fitness) + x_best = population[idx_best].copy() + + # Update personal bests + for i in range(population_size): + if fitness[i] < self.f_opt: + self.f_opt = fitness[i] + self.x_opt = population[i].copy() + + # Quantum-inspired perturbation step + for i in range(population_size): + if np.random.rand() < 0.5: + direction = np.sign(np.random.randn(self.dim)) + perturbation = self.gamma * direction * np.abs(x_best - population[i]) + population[i] += perturbation + else: + perturbation = self.alpha * (x_best - population[i]) + population[i] += perturbation + + # Ensure solutions remain within bounds + population = np.clip(population, self.lb, self.ub) + + # Evaluate fitness + fitness = np.array([func(ind) for ind in population]) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumInspiredSpiralOptimizer.py b/nevergrad/optimization/lama/QuantumInspiredSpiralOptimizer.py new file mode 100644 index 000000000..8f5f21784 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumInspiredSpiralOptimizer.py @@ -0,0 +1,83 @@ +import numpy as np + + +class QuantumInspiredSpiralOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set as constant + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population + population_size = 30 # Reduced population size for more focused search + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Quantum-inspired parameters + beta_min = 0.1 # Minimum beta value for quantum behavior + beta_max = 1.0 # Maximum beta value for classical behavior + beta_decay = 0.995 # Decay rate for beta to transition from quantum to classical + + mutation_factor = 0.75 # Mutation factor for differential evolution + crossover_probability = 0.7 # Crossover probability + + # Adaptive step sizes for local search + initial_step_size = 0.1 + step_decay = 0.98 + + evaluations_left = self.budget - population_size + beta = beta_max + + while evaluations_left > 0: + # Update beta towards more classical behavior + beta = max(beta_min, beta * beta_decay) + + for i in range(population_size): + # Differential evolution strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Quantum-inspired perturbation + quantum_jitter = np.random.normal(0, beta, self.dim) + trial += quantum_jitter + trial = np.clip(trial, -5.0, 5.0) + + # Evaluate trial solution + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Adaptive local search with diminishing step size + step_size = initial_step_size * step_decay ** (self.budget - evaluations_left) + for _ in range(5): # Limited number of local search steps + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < fitness[i]: + trial = new_trial + fitness[i] = f_new_trial + if f_new_trial < self.f_opt: + self.f_opt = f_new_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumIterativeDeepeningHybridSearch.py b/nevergrad/optimization/lama/QuantumIterativeDeepeningHybridSearch.py new file mode 100644 index 000000000..84a140601 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumIterativeDeepeningHybridSearch.py @@ -0,0 +1,109 @@ +import numpy as np + + +class QuantumIterativeDeepeningHybridSearch: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_ratio=0.1, + mutation_scale=0.5, + mutation_decay=0.0005, + crossover_prob=0.85, + quantum_intensity=0.55, + local_search_prob=0.4, + local_search_intensity=0.05, + deepening_factor=0.2, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_intensity = quantum_intensity + self.local_search_prob = local_search_prob + self.local_search_intensity = local_search_intensity + self.deepening_factor = deepening_factor + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + child = population[np.random.choice(elite_indices)] + + if np.random.random() < self.local_search_prob: + child = self.local_search(child, func) + + mutation_scale_adjusted = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale_adjusted, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Quantum deepening step + if np.random.random() < self.deepening_factor: + new_population, new_fitness = self.quantum_deepening(new_population, new_fitness, func) + evaluations += len(new_population) + + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def local_search(self, individual, func): + local_step_size = self.local_search_intensity + for _ in range(int(self.dimension * 0.8)): # More aggressive local search + perturbation = np.random.uniform(-local_step_size, local_step_size, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, -5, 5) + if func(new_individual) < func(individual): + individual = new_individual + return individual + + def quantum_deepening(self, population, fitness, func): + # Enhance exploration in promising regions + elite_indices = np.argsort(fitness)[: int(len(population) * 0.2)] + updated_pop = population[elite_indices].copy() + quantum_perturbations = self.quantum_intensity * np.random.normal( + size=(len(updated_pop), self.dimension) + ) + updated_pop += quantum_perturbations + updated_pop = np.clip(updated_pop, -5, 5) + updated_fitness = np.array([func(ind) for ind in updated_pop]) + return updated_pop, updated_fitness diff --git a/nevergrad/optimization/lama/QuantumIterativeRefinementOptimizer.py b/nevergrad/optimization/lama/QuantumIterativeRefinementOptimizer.py new file mode 100644 index 000000000..b57a73d1b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumIterativeRefinementOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class QuantumIterativeRefinementOptimizer: + def __init__(self, budget, dim=5, pop_size=20, elite_rate=0.2, refinement_rate=0.95, quantum_prob=0.1): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.refinement_rate = refinement_rate + self.quantum_prob = quantum_prob + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = self.population[i] + + def refine_population(self): + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Refinement and reproduction from elites + for idx in non_elite_indices: + if np.random.rand() < self.quantum_prob: + # Quantum jump inspired by best solution + self.population[idx] = ( + self.best_solution + np.random.normal(0, 0.1, self.dim) * self.refinement_rate + ) + else: + # Crossover and mutation + parent1 = self.population[np.random.choice(elite_indices)] + parent2 = self.population[np.random.choice(elite_indices)] + crossover_point = np.random.randint(self.dim) + child = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + mutation = np.random.normal(0, 0.1, self.dim) * (1 - self.refinement_rate) + self.population[idx] = child + mutation + + # Ensure boundaries are respected + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = self.pop_size + + while evaluations < self.budget: + self.evaluate_fitness(func) + self.refine_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumLeapOptimizer.py b/nevergrad/optimization/lama/QuantumLeapOptimizer.py new file mode 100644 index 000000000..212ae5bd6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLeapOptimizer.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumLeapOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 50 # Reduced population size for more focused exploration + gamma_initial = 0.5 # Initial gamma for exploration/exploitation balance + gamma_final = 0.01 # Final gamma for refined exploitation + gamma_decay = (gamma_final / gamma_initial) ** (1 / self.budget) # Exponential decay + elite_count = 5 # Increased elite count for diversity + mutation_strength = 0.05 # Moderate mutation strength + crossover_probability = 0.7 # Adjusted crossover probability + tunneling_frequency = 0.5 # Adjusted tunneling frequency to avoid excessive randomness + + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + + # Elite selection + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLeapOptimizerV2.py b/nevergrad/optimization/lama/QuantumLeapOptimizerV2.py new file mode 100644 index 000000000..3542c770a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLeapOptimizerV2.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumLeapOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Strategy parameters + population_size = 30 # Further reduced population size for localized search + gamma_initial = 0.8 # Higher initial gamma for stronger initial exploration + gamma_final = 0.01 # Lower final gamma for more fine-grained exploitation + gamma_decay = (gamma_final / gamma_initial) ** (1 / self.budget) # Exponential decay + elite_count = 3 # Reduced elite count to focus on highly promising candidates + mutation_strength = 0.01 # Reduced mutation strength for finer mutations + crossover_probability = 0.8 # Higher crossover probability for more frequent genetic mixing + tunneling_frequency = 0.3 # Reduced tunneling frequency to prevent excessive randomness + + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = gamma_initial * (gamma_decay ** (self.budget - evaluations_left)) + + # Elite selection + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDEHybridLocalSearch.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDEHybridLocalSearch.py new file mode 100644 index 000000000..fcbce02ec --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDEHybridLocalSearch.py @@ -0,0 +1,130 @@ +import numpy as np + + +class QuantumLevyAdaptiveDEHybridLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.2 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds, alpha=0.01): + u = np.random.normal(0, 1, self.dim) * alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.pop_size)] + for idx in elite_indices: + new_population[idx], new_fitness[idx] = self.hybrid_local_search( + new_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV2.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV2.py new file mode 100644 index 000000000..6974cb4be --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV2.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyAdaptiveDifferentialOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.4 - 1.0 * progress + social_coefficient = 1.4 + 0.6 * progress + differential_weight = 0.6 + 0.4 * progress + crossover_rate = 0.9 - 0.6 * progress + quantum_factor = 0.3 - 0.2 * progress + levy_factor = 0.4 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.3: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV3.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV3.py new file mode 100644 index 000000000..f7e89d6e8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV3.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyAdaptiveDifferentialOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.6 - 0.3 * progress + cognitive_coefficient = 1.5 - 1.0 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.8 - 0.5 * progress + quantum_factor = 0.2 - 0.1 * progress + levy_factor = 0.5 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.4: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV4.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV4.py new file mode 100644 index 000000000..671eb21ac --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV4.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyAdaptiveDifferentialOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.5 * progress + cognitive_coefficient = 1.7 - 1.0 * progress + social_coefficient = 1.7 + 0.5 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.6 * progress + quantum_factor = 0.3 - 0.2 * progress + levy_factor = 0.6 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV5.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV5.py new file mode 100644 index 000000000..ca331bbf7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV5.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyAdaptiveDifferentialOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.8 - 0.4 * progress + quantum_factor = 0.4 - 0.3 * progress + levy_factor = 0.7 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 # Reduced population size for faster convergence + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV6.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV6.py new file mode 100644 index 000000000..df1d4e016 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveDifferentialOptimizerV6.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyAdaptiveDifferentialOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 - 0.5 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.6 + 0.4 * progress + crossover_rate = 0.8 - 0.4 * progress + quantum_factor = 0.7 - 0.4 * progress + levy_factor = 0.9 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 100 # Balanced population size + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.8: # Balanced probability for Levy flight + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyAdaptiveMemeticOptimizerV3.py b/nevergrad/optimization/lama/QuantumLevyAdaptiveMemeticOptimizerV3.py new file mode 100644 index 000000000..cd3c0675f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyAdaptiveMemeticOptimizerV3.py @@ -0,0 +1,139 @@ +import numpy as np + + +class QuantumLevyAdaptiveMemeticOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 10 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 25 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.7 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizer.py b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizer.py new file mode 100644 index 000000000..c6b306104 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyDifferentialDynamicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.5 - 0.4 * progress + levy_factor = 0.7 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 # Increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV2.py b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV2.py new file mode 100644 index 000000000..cab6f8653 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV2.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyDifferentialDynamicOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.5 * progress + social_coefficient = 1.7 + 0.5 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.6 - 0.5 * progress + levy_factor = 0.8 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 100 # Increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.7: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV3.py b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV3.py new file mode 100644 index 000000000..1cba17c11 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialDynamicOptimizerV3.py @@ -0,0 +1,156 @@ +import numpy as np + + +class QuantumLevyDifferentialDynamicOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.5 * progress + social_coefficient = 1.7 + 0.5 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.6 - 0.5 * progress + levy_factor = 0.8 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 150 # Increased population size for better exploration + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.8: # Increased probability for Levy flight + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizer.py b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizer.py new file mode 100644 index 000000000..520a80d5e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizer.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumLevyDifferentialHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.6 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.4 * progress + levy_factor = 0.1 + 0.5 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.7: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizerV2.py b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizerV2.py new file mode 100644 index 000000000..7cb05ba42 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridOptimizerV2.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumLevyDifferentialHybridOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.4 * progress + levy_factor = 0.1 + 0.5 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.7: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDifferentialHybridSearch.py b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridSearch.py new file mode 100644 index 000000000..60766975b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDifferentialHybridSearch.py @@ -0,0 +1,158 @@ +import numpy as np + + +class QuantumLevyDifferentialHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.9 + 0.1 * progress + crossover_rate = 0.8 - 0.3 * progress + quantum_factor = 0.6 - 0.2 * progress + levy_factor = 0.1 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 80 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmOptimizerV3.py b/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmOptimizerV3.py new file mode 100644 index 000000000..9829554ad --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmOptimizerV3.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumLevyDynamicDifferentialSwarmOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.3 * progress + social_coefficient = 1.5 - 0.3 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.4 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmV5.py b/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmV5.py new file mode 100644 index 000000000..88a4f50f8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDynamicDifferentialSwarmV5.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumLevyDynamicDifferentialSwarmV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step # Reduced step size for more precise exploitation + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.7 - 0.4 * progress + cognitive_coefficient = 1.4 + 0.4 * progress + social_coefficient = 1.4 - 0.4 * progress + differential_weight = 0.7 + 0.3 * progress + crossover_rate = 0.8 - 0.4 * progress + quantum_factor = 0.4 - 0.2 * progress + levy_factor = 0.05 + 0.35 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 # Reduced population size for faster convergence + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 3 # Reduced local search iterations for efficiency + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDynamicParticleSwarm.py b/nevergrad/optimization/lama/QuantumLevyDynamicParticleSwarm.py new file mode 100644 index 000000000..9e022e3a1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDynamicParticleSwarm.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumLevyDynamicParticleSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.4 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 30 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 2 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyDynamicSwarmOptimization.py b/nevergrad/optimization/lama/QuantumLevyDynamicSwarmOptimization.py new file mode 100644 index 000000000..941a7c6a8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyDynamicSwarmOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class QuantumLevyDynamicSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 60 + inertia_weight_max = 0.9 + inertia_weight_min = 0.4 + cognitive_coefficient = 1.8 + social_coefficient = 2.2 + differential_weight = 0.6 + crossover_rate = 0.9 + quantum_factor = 0.1 + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.2: # Select about 20% of the population for local search + local_search_iters = 5 # Reduce local search iterations for faster execution + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEliteMemeticDEHybridOptimizer.py b/nevergrad/optimization/lama/QuantumLevyEliteMemeticDEHybridOptimizer.py new file mode 100644 index 000000000..8cb7bf1e6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEliteMemeticDEHybridOptimizer.py @@ -0,0 +1,131 @@ +import numpy as np + + +class QuantumLevyEliteMemeticDEHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.2 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.pop_size)] + for idx in elite_indices: + new_population[idx], new_fitness[idx] = self.hybrid_local_search( + new_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEliteMemeticOptimizer.py b/nevergrad/optimization/lama/QuantumLevyEliteMemeticOptimizer.py new file mode 100644 index 000000000..2c1b4f86b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEliteMemeticOptimizer.py @@ -0,0 +1,139 @@ +import numpy as np + + +class QuantumLevyEliteMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.2 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) # Refined adaptation for F + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) # Refined adaptation for CR + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveDifferentialOptimizer.py b/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveDifferentialOptimizer.py new file mode 100644 index 000000000..9d35325c9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveDifferentialOptimizer.py @@ -0,0 +1,177 @@ +import numpy as np + + +class QuantumLevyEnhancedAdaptiveDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.5 - 0.5 * progress + social_coefficient = 1.5 + 0.5 * progress + differential_weight = 0.6 + 0.4 * progress + crossover_rate = 0.8 - 0.4 * progress + quantum_factor = 0.7 - 0.4 * progress + levy_factor = 0.9 + 0.2 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 120 # Increased population size for better diversity + elite_size = 10 # Elite preservation + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + # Elite preservation + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.8: # Balanced probability for Levy flight + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + # Diversity promotion by reinitializing some worst individuals + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveOptimizerV2.py b/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveOptimizerV2.py new file mode 100644 index 000000000..0a0cc4a9f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEnhancedAdaptiveOptimizerV2.py @@ -0,0 +1,158 @@ +import numpy as np + + +class QuantumLevyEnhancedAdaptiveOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 # Balanced population size + elite_size = 5 # Smaller elite size to adapt quicker + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.8: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEnhancedDifferentialOptimizer.py b/nevergrad/optimization/lama/QuantumLevyEnhancedDifferentialOptimizer.py new file mode 100644 index 000000000..335b1acc7 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEnhancedDifferentialOptimizer.py @@ -0,0 +1,158 @@ +import numpy as np + + +class QuantumLevyEnhancedDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress + cognitive_coefficient = 1.7 - 0.7 * progress + social_coefficient = 1.7 * progress + differential_weight = 0.8 + 0.2 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.1: + local_search_iters = 5 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(personal_best_positions[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < personal_best_fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyEnhancedMemeticOptimizerV2.py b/nevergrad/optimization/lama/QuantumLevyEnhancedMemeticOptimizerV2.py new file mode 100644 index 000000000..949d5bd94 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyEnhancedMemeticOptimizerV2.py @@ -0,0 +1,139 @@ +import numpy as np + + +class QuantumLevyEnhancedMemeticOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 20 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.5 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.01 # Smaller step size for finer local search + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Elite population update + sorted_indices = np.argsort(new_fitness) + elite_population = new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] + elite_fitness = new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] + + for idx in range(len(elite_population)): + elite_population[idx], elite_fitness[idx] = self.hybrid_local_search( + elite_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + new_population[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_population + new_fitness[sorted_indices][: int(self.elitism_rate * self.pop_size)] = elite_fitness + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyImprovedDifferentialSwarmOptimization.py b/nevergrad/optimization/lama/QuantumLevyImprovedDifferentialSwarmOptimization.py new file mode 100644 index 000000000..483df88f2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyImprovedDifferentialSwarmOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class QuantumLevyImprovedDifferentialSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 60 # Slightly increased population size for better diversity + inertia_weight_max = 0.8 # Reduced for fine control of exploration + inertia_weight_min = 0.3 + cognitive_coefficient = 1.5 # Increased for stronger local search + social_coefficient = 1.2 # Slightly reduced for balanced global search + differential_weight = 0.9 # Increased differential weight for stronger mutation impact + crossover_rate = 0.6 # Slightly reduced for exploration-exploitation balance + quantum_factor = 0.1 # Increased quantum factor for enhanced global exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Enhanced Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.4: # Increased local search probability + local_search_iters = 15 # Increased iterations for more intensive local search + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevyParticleAdaptiveOptimization.py b/nevergrad/optimization/lama/QuantumLevyParticleAdaptiveOptimization.py new file mode 100644 index 000000000..cbe4c68aa --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevyParticleAdaptiveOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class QuantumLevyParticleAdaptiveOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step # Reduced step size for more precise exploitation + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.5 * progress # More gradual decrease + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + 0.3 * progress + crossover_rate = 0.9 - 0.4 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.05 + 0.35 * progress # Slightly reduced max levy factor + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 50 # Slightly reduced population size for more evaluations per individual + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + # DE Mutation and Crossover + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum Particle Update + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Levy Flight Local Search + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: # Reduced probability of local search to balance exploration + local_search_iters = 5 # Reduced local search iterations + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLevySwarmOptimizationV3.py b/nevergrad/optimization/lama/QuantumLevySwarmOptimizationV3.py new file mode 100644 index 000000000..7b8180742 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLevySwarmOptimizationV3.py @@ -0,0 +1,145 @@ +import numpy as np + + +class QuantumLevySwarmOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def __call__(self, func): + population_size = 80 # Adjusted population size for balance + inertia_weight_max = 0.9 + inertia_weight_min = 0.3 + cognitive_coefficient = 1.5 # Balanced for global and local search + social_coefficient = 1.5 # Balanced for global and local search + differential_weight = 0.8 # Balanced to moderate mutation strength + crossover_rate = 0.9 # Balanced for recombination + quantum_factor = 0.05 # Slightly decreased to moderate exploration + + # Initialize population and velocities + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = inertia_weight_max - (inertia_weight_max - inertia_weight_min) * ( + evaluations / self.budget + ) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Dynamic local search with Levy Flights for further refinement + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.15: # More selective local search probability + local_search_iters = 10 # Reduced for efficiency + for _ in range(local_search_iters): + levy_step = self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLocustSearch.py b/nevergrad/optimization/lama/QuantumLocustSearch.py new file mode 100644 index 000000000..816400635 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLocustSearch.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumLocustSearch: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + adaptive_local_search=True, + local_search_range=0.1, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + self.adaptive_local_search = adaptive_local_search + self.local_search_range = local_search_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = ( + self.local_search_range * np.exp(-_ / self.local_search_iters) + if self.adaptive_local_search + else self.local_search_range + ) + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumLocustSearchV2.py b/nevergrad/optimization/lama/QuantumLocustSearchV2.py new file mode 100644 index 000000000..661c50b76 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumLocustSearchV2.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumLocustSearchV2: + def __init__( + self, + budget=10000, + initial_temp=1.0, + cooling_rate=0.999, + explore_ratio=0.1, + perturb_range=0.1, + local_search_iters=10, + adaptive_local_search=True, + local_search_range=0.1, + ): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + self.perturb_range = perturb_range + self.local_search_iters = local_search_iters + self.adaptive_local_search = adaptive_local_search + self.local_search_range = local_search_range + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _local_search_step(self, x, func): + candidate_x = x + candidate_f = func(candidate_x) + + for _ in range(self.local_search_iters): + perturb_range = ( + self.local_search_range * np.exp(-_ / self.local_search_iters) + if self.adaptive_local_search + else self.local_search_range + ) + new_candidate_x = candidate_x + np.random.uniform(-perturb_range, perturb_range, size=self.dim) + new_candidate_x = np.clip(new_candidate_x, -5.0, 5.0) + new_candidate_f = func(new_candidate_x) + if new_candidate_f < candidate_f: + candidate_x = new_candidate_x + candidate_f = new_candidate_f + + return candidate_x, candidate_f + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_x, candidate_f = self._local_search_step(candidate_x, func) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumOrbitalAdaptiveCrossoverOptimizerV20.py b/nevergrad/optimization/lama/QuantumOrbitalAdaptiveCrossoverOptimizerV20.py new file mode 100644 index 000000000..7c15bf3e5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalAdaptiveCrossoverOptimizerV20.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalAdaptiveCrossoverOptimizerV20: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 500 # Adjusted population size for balance + self.sigma_initial = 1.0 # Reduced initial mutation spread + self.sigma_final = 0.001 # Lower final mutation spread for detailed fine-tuning + self.CR_initial = 0.9 # Initial high crossover probability + self.CR_final = 0.1 # Final crossover probability to maintain diversity at later stages + self.elitism_factor = 0.1 # Increased elitism to retain the top 10% of candidates + self.q_impact_initial = 0.01 # Initial quantum impact + self.q_impact_final = 1.5 # Final quantum impact for enhanced exploitation + self.q_impact_increase_rate = 0.01 # Gradual increase in quantum impact + self.harmonic_impulse_frequency = 0.1 # Lower frequency for less frequent dynamic shifts + self.impulse_amplitude = 1.0 # Reduced amplitude for more controlled perturbations + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite pass-through + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.sin(c + impulse) * (b - a)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover process + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updating + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV12.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV12.py new file mode 100644 index 000000000..b8501f2b9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV12.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV12: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality remains constant across problems + self.pop_size = 150 # Adjust population size for better convergence + self.sigma_initial = 3.5 # Enhance initial mutation variability + self.sigma_final = 0.0005 # Lower final mutation spread for fine-tuning + self.CR_initial = 0.95 # Start with higher crossover probability + self.CR_final = 0.1 # Lower minimal crossover to maintain genetic diversity + self.elitism_factor = 0.2 # Increase elitism to foster top performers + self.q_impact_initial = 0.01 # Start with a minimal quantum impact + self.q_impact_final = 0.95 # End with a significant quantum influence + self.q_impact_increase_rate = 0.002 # Gradual increase in quantum impact + self.harmonic_impulse_frequency = 0.05 # Reduce frequency to stabilize mutation + self.impulse_amplitude = 0.7 # Increase amplitude for stronger periodic effects + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members undergo less drastic changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + (q_impact * np.cos(c - impulse) * (b - a)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update parameters adaptively + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV13.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV13.py new file mode 100644 index 000000000..d4be33e51 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV13.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV13: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 100 # Adjusted population size for optimized exploration-exploitation balance + self.sigma_initial = 2.5 # Initial mutation spread + self.sigma_final = 0.001 # Final mutation spread for fine-tuning + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.2 # Final minimum crossover probability + self.elitism_factor = 0.1 # Reduced elitism to allow greater diversity among non-elite individuals + self.q_impact_initial = 0.05 # Initial quantum impact + self.q_impact_final = 0.99 # Enhanced final quantum impact for aggressive convergence in late stages + self.q_impact_increase_rate = 0.005 # Faster increase in quantum impact + self.harmonic_impulse_frequency = 0.1 # Increased frequency to induce more frequent dynamic changes + self.impulse_amplitude = 0.8 # Higher amplitude to provide stronger dynamic shifts + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members undergo less drastic changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.cos(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update parameters adaptively + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV14.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV14.py new file mode 100644 index 000000000..dd8e93000 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV14.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV14: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.pop_size = 150 # Adjusted population size for broader exploration + self.sigma_initial = 3.0 # Initial mutation spread increased + self.sigma_final = 0.0005 # Further reduced final mutation spread for finer tuning + self.CR_initial = 0.95 # Increased initial crossover probability + self.CR_final = 0.1 # Reduced final crossover probability for maintaining diversity + self.elitism_factor = 0.05 # Significantly reduced elitism to foster diversity + self.q_impact_initial = 0.1 # Higher initial quantum impact + self.q_impact_final = 0.99 # Maintained high final quantum impact + self.q_impact_increase_rate = 0.01 # Increased rate of quantum impact growth + self.harmonic_impulse_frequency = 0.15 # Increased frequency of harmonic impulse + self.impulse_amplitude = 1.0 # Increased amplitude to enforce stronger dynamic shifts + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members undergo less drastic changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.cos(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update parameters adaptively + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV15.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV15.py new file mode 100644 index 000000000..9d0c06035 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV15.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV15: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 + self.pop_size = 200 # Increased population size for enhanced exploration + self.sigma_initial = 2.5 # Slightly decreased initial mutation spread + self.sigma_final = 0.001 # Further reduced final mutation spread for precise fine-tuning + self.CR_initial = 0.90 # Decreased initial crossover probability + self.CR_final = 0.05 # Lowered final crossover probability to maintain diversity + self.elitism_factor = 0.02 # Further reduced elitism to foster diversity + self.q_impact_initial = 0.05 # Lower initial quantum impact + self.q_impact_final = 0.95 # Slightly reduced final quantum impact + self.q_impact_increase_rate = 0.005 # Slower increase rate of quantum impact growth + self.harmonic_impulse_frequency = 0.1 # Lower frequency of harmonic impulse + self.impulse_amplitude = 0.8 # Lower amplitude to enforce moderate dynamic shifts + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members undergo less drastic changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.sin(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update parameters adaptively + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV16.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV16.py new file mode 100644 index 000000000..b2fea69cf --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV16.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV16: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.pop_size = 250 # Further increased population size for better exploration + self.sigma_initial = 3.0 # Increased initial mutation spread to explore more aggressively + self.sigma_final = 0.0005 # Reduced final mutation spread for precise fine-tuning + self.CR_initial = 0.95 # Higher initial crossover probability for promoting early diversity + self.CR_final = 0.01 # Lower final crossover probability to maintain important features + self.elitism_factor = 0.01 # Reducing elitism to increase diversity and avoid local minima + self.q_impact_initial = 0.01 # Reduced initial quantum impact to ensure gentle start + self.q_impact_final = 0.99 # Increased maximum quantum impact for stronger exploitation later + self.q_impact_increase_rate = 0.01 # Increased rate for a smoother transition to exploitation + self.harmonic_impulse_frequency = 0.05 # Reduced frequency for less frequent but impactful shifts + self.impulse_amplitude = 1.0 # Increased amplitude for more significant dynamic shifts + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals undergo smaller changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.sin(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover process + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updating + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV17.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV17.py new file mode 100644 index 000000000..ec764d096 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV17.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV17: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.pop_size = 300 # Increased population size for even broader exploration + self.sigma_initial = 3.5 # Slightly higher initial mutation spread + self.sigma_final = 0.0001 # Very low final mutation spread for ultra-fine adjustments + self.CR_initial = 0.99 # Slightly higher initial crossover probability + self.CR_final = 0.005 # Even lower final crossover probability for strict feature preservation + self.elitism_factor = 0.005 # Reduced elitism factor to further avoid local minima + self.q_impact_initial = 0.005 # Further reduced initial quantum impact + self.q_impact_final = 1.0 # Maximum quantum impact for maximum exploitation at end + self.q_impact_increase_rate = 0.02 # Faster transition to high quantum impact + self.harmonic_impulse_frequency = 0.1 # Increased frequency for periodic dynamic shifts + self.impulse_amplitude = 1.5 # Increase amplitude for greater impact + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals undergo smaller changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.sin(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover process + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updating + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV18.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV18.py new file mode 100644 index 000000000..d1fb9e2e5 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV18.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV18: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.pop_size = 500 # Further increased population size for broader exploration + self.sigma_initial = 2.8 # Reduced initial mutation spread for less randomness + self.sigma_final = 0.00005 # Smaller final mutation spread for precision + self.CR_initial = 0.95 # Reduced initial crossover probability + self.CR_final = 0.01 # Slightly higher final crossover probability for better feature propagation + self.elitism_factor = 0.01 # Increased elitism to preserve good candidates + self.q_impact_initial = 0.001 # Start with a very small quantum impact + self.q_impact_final = 1.5 # Higher max quantum impact for aggressive final exploitation + self.q_impact_increase_rate = 0.03 # Faster quantum impact increase for quicker adaptation + self.harmonic_impulse_frequency = 0.2 # Higher frequency for more frequent dynamic shifts + self.impulse_amplitude = 2.0 # Increased amplitude to ensure larger perturbations + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals undergo smaller changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.sin(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover process + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updating + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV24.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV24.py new file mode 100644 index 000000000..3b2515e7e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV24.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV24: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 80 # Further reduced population size for more in-depth optimization per individual + self.sigma_initial = 0.4 # Slightly reduced mutation spread + self.sigma_final = 0.01 # Lower final mutation spread to allow finer exploitation + self.CR_initial = 0.9 # Maintaining a high initial crossover probability + self.CR_final = 0.1 # Lower final crossover rate for increased exploitation + self.elitism_factor = 0.3 # Increased elitism factor to 30% + self.q_impact_initial = 0.2 # Increased initial quantum impact for stronger exploration at the start + self.q_impact_final = 2.0 # Reduced final quantum impact for controlled final exploration + self.q_impact_increase_rate = 0.1 # Increased rate of quantum impact growth + self.harmonic_impulse_frequency = 0.1 # Increased frequency for harmonic impulse + self.impulse_amplitude = 0.8 # Increased impulse amplitude for a more significant effect + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution parameters initialization + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals are passed unchanged + continue + + # Mutation and Crossover + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.cos(c) * (b - a) + impulse) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV25.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV25.py new file mode 100644 index 000000000..d28ff78d9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV25.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV25: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Increased population size for better exploration + self.sigma_initial = 0.5 # Starting mutation spread slightly increased for broader initial search + self.sigma_final = 0.005 # Reduced final mutation spread for finer exploitation + self.CR_initial = 0.95 # High initial crossover probability for diverse gene mixing + self.CR_final = 0.05 # Reduced crossover rate for increased exploitation in later stages + self.elitism_factor = 0.1 # Reduced elitism to ensure diversity in population + self.q_impact_initial = 0.1 # Lower initial quantum impact to stabilize early exploration + self.q_impact_final = 1.5 # Reduced final quantum impact to focus on refinement in final phase + self.q_impact_increase_rate = ( + 0.05 # Gradual increase in quantum impact to balance exploration and exploitation + ) + self.harmonic_impulse_frequency = ( + 0.2 # Increased frequency for harmonic impulse for more frequent perturbations + ) + self.impulse_amplitude = 0.9 # Enhanced impulse amplitude for stronger diversification effects + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution parameters initialization + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Pass elite individuals unchanged + continue + + # Mutation and Crossover + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.cos(c) * (b - a) + impulse) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV26.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV26.py new file mode 100644 index 000000000..33c3e9d31 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV26.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV26: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Further increased population size for enhanced initial exploration + self.sigma_initial = 0.7 # Increasing the initial mutation spread for broader initial search + self.sigma_final = 0.001 # Further reduced final mutation spread for finer exploitation + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.01 # Minimal final crossover rate for increased exploitation in later stages + self.harmonic_impulse_frequency = 0.1 # Adjusted frequency for harmonic impulse + self.impulse_amplitude = 0.95 # Slightly adjusted impulse amplitude + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Adaptive parameters + sigma = self.sigma_initial + CR = self.CR_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + a + + sigma * (b - c) + + self.impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * (b - a) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = self.sigma_final + (self.sigma_initial - self.sigma_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + CR = self.CR_final + (self.CR_initial - self.CR_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV27.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV27.py new file mode 100644 index 000000000..0e8cc66cf --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV27.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV27: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Increased population size for even more extended exploration + self.sigma_initial = 0.9 # Increased initial mutation spread for broader initial search + self.sigma_final = 0.0005 # Further reduced final mutation spread for finer exploitation + self.CR_initial = 0.95 # Slightly increased initial crossover probability + self.CR_final = 0.005 # Minimal final crossover rate for increased exploitation in late stages + self.harmonic_impulse_frequency = 0.05 # Slower frequency for harmonic impulse + self.impulse_amplitude = 1.0 # Slightly increased impulse amplitude + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Adaptive parameters + sigma = self.sigma_initial + CR = self.CR_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + a + + sigma * (b - c) + + self.impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * (b - a) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = self.sigma_final + (self.sigma_initial - self.sigma_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + CR = self.CR_final + (self.CR_initial - self.CR_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV28.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV28.py new file mode 100644 index 000000000..4c84e7d49 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV28.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV28: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 250 # Further increased population size for heightened exploration + self.sigma_initial = 1.2 # Expanded initial mutation spread to explore more aggressively + self.sigma_final = 0.0001 # Reduced final mutation spread for finer exploitation + self.CR_initial = 0.98 # Increased initial crossover probability for more diversity + self.CR_final = 0.001 # Minimal final crossover rate for precise exploitation + self.harmonic_impulse_frequency = 0.025 # Reduced frequency for less frequent yet impactful impulses + self.impulse_amplitude = 1.2 # Increased amplitude for stronger perturbations + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Adaptive parameters + sigma = self.sigma_initial + CR = self.CR_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + a + + sigma * (b - c) + + self.impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * (b - c) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = self.sigma_final + (self.sigma_initial - self.sigma_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + CR = self.CR_final + (self.CR_initial - self.CR_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV29.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV29.py new file mode 100644 index 000000000..084d71fae --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV29.py @@ -0,0 +1,78 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV29: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 300 # Increased population size for further exploration + self.sigma_initial = 1.5 # Increased initial mutation spread + self.sigma_final = 0.00001 # Smaller final mutation spread for finer exploitation + self.CR_initial = 0.95 # High initial crossover probability + self.CR_final = 0.01 # Higher final crossover rate for better exploitation + self.harmonic_impulse_frequency = 0.01 # Lower frequency for significant impacts at fewer intervals + self.impulse_amplitude = 1.5 # Increased amplitude for stronger perturbations + self.adaptive_impulse = True # Enable adaptive impulse adjustments based on performance stagnation + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Adaptive parameters + sigma = self.sigma_initial + CR = self.CR_initial + stagnation_counter = 0 # Track performance stagnation + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) # Adding randomness + mutant = ( + a + + sigma * (b - c) + + self.impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + stagnation_counter = 0 # Reset counter on improvement + else: + stagnation_counter += 1 + + # Adaptive impulse based on performance stagnation + if self.adaptive_impulse and stagnation_counter > self.pop_size * 10: + self.impulse_amplitude += 0.5 + stagnation_counter = 0 # Reset after adaptation + + # Adaptive parameter updates + sigma = self.sigma_final + (self.sigma_initial - self.sigma_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + CR = self.CR_final + (self.CR_initial - self.CR_final) * ( + 1 - iteration / (self.budget / self.pop_size) + ) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV30.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV30.py new file mode 100644 index 000000000..d95b9e976 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV30.py @@ -0,0 +1,67 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV30: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 400 # Further increased population size for more exploration + self.sigma_initial = 1.0 # Refined initial mutation spread + self.sigma_final = 0.00001 # Finer final mutation spread for deep exploitation + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = ( + 0.05 # Final crossover rate to maintain a balance between exploration and exploitation + ) + self.harmonic_impulse_frequency = 0.02 # Adjust frequency based on previous results + self.impulse_amplitude_initial = 1.0 # Start with a moderate impulse amplitude + self.impulse_amplitude_final = 0.1 # Reduce the amplitude over iterations + self.adaptive_impulse = True # Enable adaptive impulse adjustments based on performance + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) # Adding randomness + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < ( + self.CR_initial + - (self.CR_initial - self.CR_final) * (iteration / (self.budget / self.pop_size)) + ) + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV31.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV31.py new file mode 100644 index 000000000..a4b1d9d1a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV31.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV31: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 500 # Increased population size for more exploration potential + self.sigma_initial = 0.9 # Initial mutation spread + self.sigma_final = 0.001 # Final mutation spread for deeper exploitation + self.CR_initial = 0.95 # Initial crossover probability + self.CR_final = 0.1 # Final crossover rate to maintain a balance between exploration and exploitation + self.harmonic_impulse_frequency = 0.025 # Adjust frequency based on previous results + self.impulse_amplitude_initial = 1.2 # Start with a higher impulse amplitude + self.impulse_amplitude_final = 0.05 # Reduce the amplitude over iterations + self.adaptive_impulse = True # Enable adaptive impulse adjustments based on performance + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin with enhanced harmonic impulse + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) # Adding randomness + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.cos(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial with adaptive crossover rate + cr_current = self.CR_initial - (self.CR_initial - self.CR_final) * ( + iteration / (self.budget / self.pop_size) + ) + cross_points = np.random.rand(self.dim) < cr_current + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV32.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV32.py new file mode 100644 index 000000000..80720e1e0 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV32.py @@ -0,0 +1,64 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV32: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of optimization problem + self.pop_size = 600 # Further increased population size for robust exploration + self.sigma_initial = 1.0 # Initial mutation spread adjusted for enhanced exploration + self.sigma_final = 0.001 # Fine-grained final mutation spread for precision exploitation + self.CR_initial = 1.0 # High initial crossover probability to encourage diverse exploration + self.CR_final = 0.05 # Lower final crossover rate to stabilize exploitation + self.harmonic_impulse_frequency = 0.03 # Slightly increased frequency for impulse + self.impulse_amplitude_initial = 1.5 # Higher initial amplitude for stronger exploration impact + self.impulse_amplitude_final = 0.01 # Very low final amplitude aimed at fine-tuning solutions + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation: DE/rand/1/bin with enhanced harmonic impulse + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) # Additional randomness + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.cos(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover: Binomial with adaptive crossover rate + cr_current = self.CR_initial - (self.CR_initial - self.CR_final) * ( + iteration / (self.budget / self.pop_size) + ) + cross_points = np.random.rand(self.dim) < cr_current + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV33.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV33.py new file mode 100644 index 000000000..325e09302 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV33.py @@ -0,0 +1,66 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV33: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the optimization problem + self.pop_size = 500 # Adjusted population size for better balance + self.sigma_initial = 0.9 # Slightly reduced mutation spread for improved control + self.sigma_final = 0.005 # Finer final mutation spread for detailed exploitation + self.CR_initial = 0.9 # Adjusted initial crossover probability + self.CR_final = 0.1 # Slightly increased final crossover rate for better genetic mixing + self.harmonic_impulse_frequency = 0.02 # Moderately set frequency for impulse + self.impulse_amplitude_initial = 1.2 # Moderately high initial amplitude for exploration + self.impulse_amplitude_final = ( + 0.02 # Increased final amplitude for more effective late-stage optimization + ) + + def __call__(self, func): + # Initialize population and fitness array + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution process + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation strategy with impulse modification + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover strategy with dynamic rate + cr_current = self.CR_initial - (self.CR_initial - self.CR_final) * ( + iteration / (self.budget / self.pop_size) + ) + cross_points = np.random.rand(self.dim) < cr_current + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection step + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV34.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV34.py new file mode 100644 index 000000000..168e21b0e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicEnhancerV34.py @@ -0,0 +1,64 @@ +import numpy as np + + +class QuantumOrbitalDynamicEnhancerV34: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the optimization problem + self.pop_size = 300 # Reduced population size to improve convergence speed + self.sigma_initial = 0.5 # Lower mutation spread starting point + self.sigma_final = 0.001 # Very fine final mutation spread for precise exploitation + self.CR_initial = 0.7 # Initial crossover probability more conservative + self.CR_final = 0.05 # Reduced final crossover rate to maintain closer genetic traits + self.harmonic_impulse_frequency = 0.05 # Increased frequency for faster impulse responses + self.impulse_amplitude_initial = 1.5 # Higher amplitude for initial exploration boost + self.impulse_amplitude_final = 0.01 # Lower final amplitude for precise exploitation + + def __call__(self, func): + # Initialize population and fitness array + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution process + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation strategy with impulse modification + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.sin(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover strategy with dynamic rate + cr_current = self.CR_initial - (self.CR_initial - self.CR_final) * ( + iteration / (self.budget / self.pop_size) + ) + cross_points = np.random.rand(self.dim) < cr_current + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection step + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalDynamicOptimizerV11.py b/nevergrad/optimization/lama/QuantumOrbitalDynamicOptimizerV11.py new file mode 100644 index 000000000..95ff782d1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalDynamicOptimizerV11.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalDynamicOptimizerV11: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality + self.pop_size = 300 # Reduced population size for more iterations per individual + self.sigma_initial = 3.0 # Increased initial mutation spread + self.sigma_final = 0.001 # Further reduced final mutation spread + self.CR_initial = 0.9 # High initial crossover probability + self.CR_final = 0.2 # Increased minimum final crossover probability + self.elitism_factor = 0.1 # Increased elitism factor + self.q_impact_initial = 0.05 # Lower initial quantum impact + self.q_impact_final = 0.9 # Increased maximum quantum impact + self.q_impact_increase_rate = 0.005 # Slower increase rate for quantum impact + self.harmonic_impulse_frequency = 0.1 # Adjusted frequency of harmonic impulse modulation + self.impulse_amplitude = 0.6 # Increased amplitude of the harmonic impulse + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + idxs = [j for j in range(self.pop_size) if j != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.cos( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c + q_impact * np.sin(c + impulse)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalEnhancedCrossoverOptimizerV22.py b/nevergrad/optimization/lama/QuantumOrbitalEnhancedCrossoverOptimizerV22.py new file mode 100644 index 000000000..dc8ad4f21 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalEnhancedCrossoverOptimizerV22.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumOrbitalEnhancedCrossoverOptimizerV22: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 150 # Further refining population size + self.sigma_initial = 0.5 # Initial mutation spread + self.sigma_final = 0.001 # Further reduced final mutation spread + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.1 # Lower final crossover rate to ensure diversity + self.elitism_factor = 0.1 # Increased elitism factor + self.q_impact_initial = 0.05 # Initial quantum impact + self.q_impact_final = 2.5 # Increased final quantum impact for robust exploitation + self.q_impact_increase_rate = 0.03 # Increased rate of quantum impact growth + self.harmonic_impulse_frequency = 0.025 # Reduced frequency for harmonic impulse + self.impulse_amplitude = 0.3 # Reduced impulse amplitude for stability + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution parameters initialization + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals are passed unchanged + continue + + # Mutation and Crossover + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.cos(c) * (b - a) + impulse) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalEnhancedDynamicEnhancerV19.py b/nevergrad/optimization/lama/QuantumOrbitalEnhancedDynamicEnhancerV19.py new file mode 100644 index 000000000..af49a1295 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalEnhancedDynamicEnhancerV19.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumOrbitalEnhancedDynamicEnhancerV19: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 1000 # Increased population size for broader exploration + self.sigma_initial = 3.0 # Slightly higher initial mutation spread to explore more aggressively + self.sigma_final = 0.0001 # Reduced final mutation spread for fine-tuning solutions + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.05 # Final crossover probability for maintaining diversity + self.elitism_factor = 0.05 # Increased elitism to preserve more good candidates + self.q_impact_initial = 0.005 # Initial quantum impact + self.q_impact_final = 2.0 # Increased quantum impact for stronger final exploitation + self.q_impact_increase_rate = 0.05 # Accelerated rate of quantum impact increase + self.harmonic_impulse_frequency = 0.25 # Higher frequency for more frequent dynamic shifts + self.impulse_amplitude = 2.5 # Increased amplitude for stronger perturbations + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals undergo smaller changes + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * np.sin(c + impulse) * (b - a) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover process + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updating + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalHarmonicOptimizerV10.py b/nevergrad/optimization/lama/QuantumOrbitalHarmonicOptimizerV10.py new file mode 100644 index 000000000..84e629bc9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalHarmonicOptimizerV10.py @@ -0,0 +1,69 @@ +import numpy as np + + +class QuantumOrbitalHarmonicOptimizerV10: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 500 # Adjusted population size for better quality-solution focus + self.sigma_initial = 2.0 # Increased initial mutation spread for wider exploration + self.sigma_final = 0.0005 # Extremely fine final mutation spread + self.elitism_factor = 0.05 # Slightly increased elitism to preserve best candidates + self.CR_initial = 0.95 # Even higher initial crossover probability + self.CR_final = 0.1 # Slightly increased final crossover probability to maintain diversity + self.q_impact_initial = 0.1 # Higher initial quantum impact + self.q_impact_final = 0.8 # Further increased final quantum impact + self.q_impact_increase_rate = 0.007 # Accelerated increase in quantum impact + self.harmonic_impulse_frequency = 0.07 # Adjusted frequency of harmonic impulse modulation + self.impulse_amplitude = 0.5 # Increased amplitude of the harmonic impulse + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation with quantum orbital harmonic adjustments + idxs = [j for j in range(self.pop_size) if j != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c + q_impact * np.cos(c + impulse)) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalPrecisionOptimizerV34.py b/nevergrad/optimization/lama/QuantumOrbitalPrecisionOptimizerV34.py new file mode 100644 index 000000000..6982014c9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalPrecisionOptimizerV34.py @@ -0,0 +1,66 @@ +import numpy as np + + +class QuantumOrbitalPrecisionOptimizerV34: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.pop_size = 150 # Further reduced population size for quicker evaluations + self.sigma_initial = 0.3 # Starting mutation spread + self.sigma_final = 0.0005 # Final mutation spread for precise tweaking + self.CR_initial = 0.9 # High initial crossover probability for diverse exploration + self.CR_final = 0.1 # Ending crossover probability for maintaining elite traits + + # Impulse strategy parameters + self.harmonic_impulse_frequency = 0.1 # Increase in frequency for dynamic adaptation + self.impulse_amplitude_initial = 2.0 # Larger initial amplitude for strong early global search + self.impulse_amplitude_final = 0.005 # Minimized final amplitude for local area exploitation + + def __call__(self, func): + # Initialize population within the bounds and evaluate fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + impulse_amplitude = self.impulse_amplitude_final + ( + self.impulse_amplitude_initial - self.impulse_amplitude_final + ) * (1 - iteration / (self.budget / self.pop_size)) + + for i in range(self.pop_size): + # Mutation with dynamic impulse + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + normal_vector = np.random.normal(0, 1, self.dim) + mutant = ( + a + + impulse_amplitude * (b - c) + + impulse_amplitude + * np.cos(2 * np.pi * self.harmonic_impulse_frequency * iteration) + * normal_vector + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover with dynamic rate adjustment + cr_current = self.CR_initial - (self.CR_initial - self.CR_final) * ( + iteration / (self.budget / self.pop_size) + ) + cross_points = np.random.rand(self.dim) < cr_current + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection step + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV21.py b/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV21.py new file mode 100644 index 000000000..06cc7a532 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV21.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumOrbitalRefinedCrossoverOptimizerV21: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 200 # Smaller population for focused search + self.sigma_initial = 0.5 # Initial mutation spread + self.sigma_final = 0.005 # Final mutation spread + self.CR_initial = 0.95 # High initial crossover probability + self.CR_final = 0.2 # More pronounced final crossover rate to maintain diversity + self.elitism_factor = 0.05 # Reduced elitism factor + self.q_impact_initial = 0.05 # Slight increase in initial quantum impact + self.q_impact_final = 2.0 # Higher final quantum impact for robust exploitation + self.q_impact_increase_rate = 0.02 # Faster increase in quantum impact + self.harmonic_impulse_frequency = 0.05 # Reduced frequency for impulse + self.impulse_amplitude = 0.5 # Reduced amplitude for more controlled perturbations + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution parameters initialization + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals are passed unchanged + continue + + # Mutation and Crossover + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.cos(c) * (b - a) + impulse) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV23.py b/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV23.py new file mode 100644 index 000000000..f25231bb9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumOrbitalRefinedCrossoverOptimizerV23.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumOrbitalRefinedCrossoverOptimizerV23: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Reduced population size for more targeted search + self.sigma_initial = 0.5 # Initial mutation spread + self.sigma_final = 0.001 # Reduced final mutation spread + self.CR_initial = 0.9 # High initial crossover probability + self.CR_final = 0.2 # Slightly higher final crossover rate to maintain diversity + self.elitism_factor = 0.2 # Increased elitism factor for better retention of good solutions + self.q_impact_initial = 0.1 # Increased initial quantum impact + self.q_impact_final = 3.0 # Increased final quantum impact for stronger exploration + self.q_impact_increase_rate = 0.05 # Faster increase in quantum impact + self.harmonic_impulse_frequency = 0.05 # Increased frequency for harmonic impulse + self.impulse_amplitude = 0.5 # Increased impulse amplitude for more noticeable effect + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Identify initial best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution parameters initialization + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite individuals are passed unchanged + continue + + # Mutation and Crossover + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + impulse = self.impulse_amplitude * np.sin( + 2 * np.pi * self.harmonic_impulse_frequency * iteration + ) + mutant = a + sigma * (b - c) + q_impact * (np.cos(c) * (b - a) + impulse) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/QuantumParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..e936fc745 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumParticleSwarmDifferentialEvolution.py @@ -0,0 +1,164 @@ +import numpy as np + + +class QuantumParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.7 + self.elite_fraction = 0.2 + self.diversity_threshold = 1e-3 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-0.1, 0.1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func, budget): + for _ in range(budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + if budget <= 0: + break + return individual + + def quantum_jump(self, individual, global_best, alpha): + return np.clip(individual + alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(20, self.budget - evaluations) + elite_population[idx] = self.local_search( + elite_population[idx], bounds, func, local_search_budget + ) + evaluations += local_search_budget + + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + population[:elite_count] = elite_population + + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + if evaluations < self.budget: + mutation_strategy = np.random.uniform(0, 1, self.pop_size) + for i in range(self.pop_size): + if mutation_strategy[i] < 0.5: + mutant = self.mutate( + global_best_position, *self.select_parents(population)[:2], F + ) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha) + quantum_fitness = func(quantum_trial) + evaluations += 1 + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumParticleSwarmOptimization.py b/nevergrad/optimization/lama/QuantumParticleSwarmOptimization.py new file mode 100644 index 000000000..68f9bc50e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumParticleSwarmOptimization.py @@ -0,0 +1,107 @@ +import numpy as np + + +class QuantumParticleSwarmOptimization: + def __init__( + self, + budget, + population_size=50, + w=0.5, + c1=2, + c2=2, + local_search_budget_ratio=0.1, + adaptivity_factor=0.7, + quantum_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.w = w # inertia weight + self.c1 = c1 # cognitive coefficient + self.c2 = c2 # social coefficient + self.local_search_budget_ratio = local_search_budget_ratio + self.adaptivity_factor = adaptivity_factor # adaptive factor for tuning parameters dynamically + self.quantum_factor = quantum_factor # factor for quantum behavior complement + + def local_search(self, func, x, search_budget): + best_score = func(x) + best_x = np.copy(x) + dim = len(x) + + for _ in range(search_budget): + new_x = x + np.random.uniform(-0.1, 0.1, dim) + new_x = np.clip(new_x, -5.0, 5.0) + new_score = func(new_x) + if new_score < best_score: + best_score = new_score + best_x = np.copy(new_x) + + return best_x, best_score + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize the swarm + population = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + velocities = np.random.uniform(-1, 1, (self.population_size, dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in personal_best_positions]) + + best_idx = np.argmin(personal_best_scores) + global_best_position = personal_best_positions[best_idx] + global_best_score = personal_best_scores[best_idx] + + evaluations = self.population_size + local_search_budget = int(self.budget * self.local_search_budget_ratio) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Quantum behavior component + quantum_jump = np.random.normal(0, self.quantum_factor, dim) + + # Update velocity + r1, r2 = np.random.rand(dim), np.random.rand(dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (global_best_position - population[i]) + + quantum_jump + ) + + # Update position + population[i] = np.clip(population[i] + velocities[i], lower_bound, upper_bound) + + # Evaluate fitness + score = func(population[i]) + evaluations += 1 + + # Update personal best + if score < personal_best_scores[i]: + personal_best_scores[i] = score + personal_best_positions[i] = population[i] + + # Update global best + if score < global_best_score: + global_best_score = score + global_best_position = population[i] + + # Apply local search on global best position for further refinement + if evaluations + local_search_budget <= self.budget: + global_best_position, global_best_score = self.local_search( + func, global_best_position, local_search_budget + ) + evaluations += local_search_budget + + # Dynamically adapt parameters based on current best performance + self.w *= self.adaptivity_factor + self.c1 *= self.adaptivity_factor + self.c2 *= self.adaptivity_factor + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumReactiveCooperativeStrategy.py b/nevergrad/optimization/lama/QuantumReactiveCooperativeStrategy.py new file mode 100644 index 000000000..2a164983a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumReactiveCooperativeStrategy.py @@ -0,0 +1,77 @@ +import numpy as np + + +class QuantumReactiveCooperativeStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 150 # Increased population size for wider exploration + self.elite_size = 20 # Further increased elite size for better exploitation + self.crossover_fraction = 0.9 # Higher crossover fraction to encourage genetic diversity + self.mutation_scale = 0.05 # Lower mutation scale for fine-grained search + self.quantum_mutation_scale = 0.2 # Adjustment for specific quantum mutation effects + self.quantum_probability = 0.05 # Slightly higher quantum probability for exploration + self.reactivity_factor = 0.1 # New: Reactivity factor to adapt mutation scales dynamically + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover_and_mutate(self, parents, num_offspring, iteration): + offspring = np.empty((num_offspring, self.dim)) + for i in range(num_offspring): + if np.random.rand() < self.crossover_fraction: + p1, p2 = np.random.choice(len(parents), 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = parents[p1][:cross_point] + offspring[i][cross_point:] = parents[p2][cross_point:] + else: + offspring[i] = parents[np.random.randint(len(parents))] + + # Dynamic mutation strategy adapting to the stage of optimization + dynamic_scale = self.mutation_scale / (1 + iteration * self.reactivity_factor) + dynamic_quantum_scale = self.quantum_mutation_scale / (1 + iteration * self.reactivity_factor) + + if np.random.rand() < self.quantum_probability: + mutation_shift = np.random.normal(0, dynamic_quantum_scale, self.dim) + else: + mutation_shift = np.random.normal(0, dynamic_scale, self.dim) + offspring[i] += mutation_shift + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + return offspring + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + iteration = 0 + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + num_offspring = self.population_size - self.elite_size + offspring = self.crossover_and_mutate(elite_population, num_offspring, iteration) + + population = np.vstack((elite_population, offspring)) + iteration += 1 + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/QuantumRefinedAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/QuantumRefinedAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..4961ffa16 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumRefinedAdaptiveExplorationOptimization.py @@ -0,0 +1,210 @@ +import numpy as np + + +class QuantumRefinedAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 + + # Exploration improvement parameters + exploration_factor = 0.3 + max_exploration_cycles = 30 + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.1 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 + else: + alpha *= 0.8 + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 150 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = QuantumRefinedAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/QuantumRefinedAdaptiveHybridStrategyV5.py b/nevergrad/optimization/lama/QuantumRefinedAdaptiveHybridStrategyV5.py new file mode 100644 index 000000000..25e9b8729 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumRefinedAdaptiveHybridStrategyV5.py @@ -0,0 +1,64 @@ +import numpy as np + + +class QuantumRefinedAdaptiveHybridStrategyV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 400 # Further increased population size for better exploration + self.sigma_initial = 0.2 # Initial mutation spread increased for wider searches early on + self.elitism_factor = 2 # More limited elite size to encourage diversity + self.sigma_decay = 0.97 # Faster decay for sigma to stabilize mutations as convergence approaches + self.CR_base = 0.9 # Starting crossover probability slightly reduced to improve exploration + self.CR_decay = 0.995 # More gradual crossover decay for sustained exploratory capability + self.q_impact = 0.15 # Initial quantum impact increased + self.q_impact_increase = 0.1 # Faster increase rate for quantum impact + self.q_impact_limit = 1.0 # Higher limit for quantum impact to maximize the non-classical effects + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + q_impact = self.q_impact + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Mutation: Adjusted strategy with quantum effects + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= self.sigma_decay + CR *= self.CR_decay + if iteration % (self.budget // (5 * self.pop_size)) == 0 and q_impact < self.q_impact_limit: + q_impact += self.q_impact_increase # Dynamically increase quantum impact more frequently + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumRefinedAdaptiveStrategicOptimizer.py b/nevergrad/optimization/lama/QuantumRefinedAdaptiveStrategicOptimizer.py new file mode 100644 index 000000000..8be8e9512 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumRefinedAdaptiveStrategicOptimizer.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumRefinedAdaptiveStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed problem dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 300 # Further increased population size for more diversity + inertia_weight = 0.75 # Further reduction in initial inertia for quicker exploitation + cognitive_coefficient = 2.1 # Slightly increased cognitive learning factor + social_coefficient = 2.1 # Slightly increased social learning factor + quantum_momentum = 0.25 # Increased quantum influence for better global search + exploration_factor = 0.6 # Adjusted control parameter for exploration phase duration + + # Initialize population, velocities, and personal bests + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + # Adaptive inertia weight adjustment for improved strategic balance + w = inertia_weight * ( + 0.3 + 0.7 * np.exp(-4 * current_budget / (self.budget * exploration_factor)) + ) + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum jump dynamics with adaptive momentum + if np.random.rand() < 0.1 * (1 - w): + quantum_jump = np.random.normal(0, quantum_momentum * (1 - w), self.dim) + population[i] += quantum_jump + + # Update velocities and positions with refined strategic constraints + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia_component + cognitive_component + social_component + velocity[i] = np.clip(velocity[i], -2, 2) # Adjusted clamping on velocities + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Fitness evaluation and personal and global best updates + fitness = func(population[i]) + current_budget += 1 + + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/QuantumRefinedDynamicAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/QuantumRefinedDynamicAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..efdc75cf2 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumRefinedDynamicAdaptiveHybridDEPSO.py @@ -0,0 +1,160 @@ +import numpy as np + + +class QuantumRefinedDynamicAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.5 # Inertia weight for PSO + c1 = 0.9 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # More frequent restarts + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.2, beta=0.8): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumReinforcedNesterovAccelerator.py b/nevergrad/optimization/lama/QuantumReinforcedNesterovAccelerator.py new file mode 100644 index 000000000..e0cfa9f9d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumReinforcedNesterovAccelerator.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumReinforcedNesterovAccelerator: + def __init__( + self, + budget, + dim=5, + learning_rate=0.08, + momentum=0.98, + quantum_prob=0.3, + adaptive_lr_decay=0.98, + elite_rate=0.4, + noise_intensity=0.1, + perturbation_scale=0.2, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.adaptive_lr_decay = adaptive_lr_decay + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal(0, self.perturbation_scale, self.dim) + self.population[i] += quantum_jump * (global_best - self.population[i]) + + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = self.momentum * self.velocities[i] + self.learning_rate * noise + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + self.learning_rate *= self.adaptive_lr_decay + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumResonanceEvolutionaryStrategy.py b/nevergrad/optimization/lama/QuantumResonanceEvolutionaryStrategy.py new file mode 100644 index 000000000..3bdb9236e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumResonanceEvolutionaryStrategy.py @@ -0,0 +1,62 @@ +import numpy as np + + +class QuantumResonanceEvolutionaryStrategy: + def __init__( + self, + budget, + dim=5, + pop_size=100, + learning_rate=0.2, + elite_rate=0.2, + resonance_depth=0.1, + mutation_scale=0.1, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.learning_rate = learning_rate + self.resonance_depth = resonance_depth + self.mutation_scale = mutation_scale + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and select elites + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites with a resonance depth and mutation + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + normal_disturbance = np.random.normal(0, self.mutation_scale, self.dim) + quantum_resonance = self.resonance_depth * (np.random.uniform(-1, 1, self.dim) ** 3) + self.population[idx] = elite_sample + normal_disturbance + self.learning_rate * quantum_resonance + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/QuantumSearch.py b/nevergrad/optimization/lama/QuantumSearch.py new file mode 100644 index 000000000..a17fb3f44 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSearch.py @@ -0,0 +1,28 @@ +import numpy as np + + +class QuantumSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.alpha = 0.1 # Step size parameter + + def _quantum_step(self, x): + return x + np.random.uniform(-self.alpha, self.alpha, size=self.dim) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + x = np.random.uniform(-5.0, 5.0, size=(self.budget, self.dim)) + + for i in range(self.budget): + x[i] = self._quantum_step(x[i]) + x[i] = np.clip(x[i], -5.0, 5.0) # Ensure within bounds + + f = func(x[i]) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x[i] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumSimulatedAnnealing.py b/nevergrad/optimization/lama/QuantumSimulatedAnnealing.py new file mode 100644 index 000000000..3d675439d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSimulatedAnnealing.py @@ -0,0 +1,40 @@ +import numpy as np + + +class QuantumSimulatedAnnealing: + def __init__(self, budget=10000, initial_temp=1.0, cooling_rate=0.999): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + + def _quantum_step(self, x): + return x + np.random.uniform(-0.1 * self.temp, 0.1 * self.temp, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumSimulatedAnnealingHybridOptimizer.py b/nevergrad/optimization/lama/QuantumSimulatedAnnealingHybridOptimizer.py new file mode 100644 index 000000000..ae143c6e4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSimulatedAnnealingHybridOptimizer.py @@ -0,0 +1,150 @@ +import numpy as np + + +class QuantumSimulatedAnnealingHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature + self.cooling_rate = 0.9 # Cooling rate for annealing + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, idx)) + list(range(idx + 1, self.pop_size)) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Perform local search on the elite half of the population + elite_indices = np.argsort(fitness)[: self.pop_size // 2] + elite_population = new_population[elite_indices] + + for i in range(len(elite_indices)): + elite_population[i] = self.local_search(elite_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + population = np.copy(new_population) + for i in range(self.pop_size): + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha, T) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumSimulatedAnnealingImproved.py b/nevergrad/optimization/lama/QuantumSimulatedAnnealingImproved.py new file mode 100644 index 000000000..e850bc18c --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSimulatedAnnealingImproved.py @@ -0,0 +1,42 @@ +import numpy as np + + +class QuantumSimulatedAnnealingImproved: + def __init__(self, budget=10000, initial_temp=1.0, cooling_rate=0.999, explore_ratio=0.1): + self.budget = budget + self.dim = 5 + self.temp = initial_temp + self.cooling_rate = cooling_rate + self.explore_ratio = explore_ratio + + def _quantum_step(self, x): + explore_range = self.explore_ratio * (5.0 - (-5.0)) + return x + np.random.uniform(-explore_range, explore_range, size=self.dim) + + def _acceptance_probability(self, candidate_f, current_f): + return np.exp((current_f - candidate_f) / self.temp) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + current_x = np.random.uniform(-5.0, 5.0, size=self.dim) + current_f = func(current_x) + + for i in range(self.budget): + candidate_x = self._quantum_step(current_x) + candidate_x = np.clip(candidate_x, -5.0, 5.0) + candidate_f = func(candidate_x) + + if candidate_f < current_f or np.random.rand() < self._acceptance_probability( + candidate_f, current_f + ): + current_x = candidate_x + current_f = candidate_f + + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_x + + self.temp *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumSpectralAdaptiveHybridStrategy.py b/nevergrad/optimization/lama/QuantumSpectralAdaptiveHybridStrategy.py new file mode 100644 index 000000000..bedde6cde --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralAdaptiveHybridStrategy.py @@ -0,0 +1,70 @@ +import numpy as np + + +class QuantumSpectralAdaptiveHybridStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of 5 + self.pop_size = 500 # Increased population size for broader exploration + self.sigma_initial = 0.25 # Increased initial mutation spread + self.elitism_factor = 1 # Reduced elite size to enhance competitive evolution + self.sigma_decay = ( + 0.95 # Accelerated decay for mutation radius to stabilize towards optimal solutions + ) + self.CR_base = 0.85 # Adjusted initial crossover probability + self.CR_decay = 0.99 # Slightly slower decay, maintaining crossover effectiveness longer + self.q_impact = 0.05 # Reduced quantum impact starting value + self.q_impact_increase = 0.05 # Gradual increase in quantum impact + self.q_impact_limit = 0.5 # Reduced upper limit for quantum impact to control non-classical effects + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + q_impact = self.q_impact + + # Main evolutionary loop + for iteration in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation: Quantum-inspired differential mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + + sigma * (a - b + c * np.random.normal()) + + q_impact * np.random.standard_cauchy(self.dim) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma *= self.sigma_decay + CR *= self.CR_decay + if q_impact < self.q_impact_limit: + q_impact += self.q_impact_increase # Gradually increase quantum impact + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV2.py b/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV2.py new file mode 100644 index 000000000..ebbd7c7d3 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV2.py @@ -0,0 +1,66 @@ +import numpy as np + + +class QuantumSpectralAdaptiveOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 800 # Population size adjusted for exploration vs. computation trade-off + self.sigma_initial = 0.3 # Wider initial mutation spread + self.sigma_final = 0.005 # Final reduced mutation spread + self.elitism_factor = 0.1 # Reduced elitism factor for increased competition + self.CR_initial = 0.95 # High initial crossover probability + self.CR_final = 0.6 # Higher final crossover rate to allow for meaningful late convergence + self.q_impact_initial = 0.005 # Lower initial quantum impact + self.q_impact_final = 0.05 # Lesser final quantum impact + self.q_impact_increase_rate = 0.0005 # Slower rate of quantum impact increase + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Dynamic adjustment of parameters + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Quantum-inspired trigonometric mutation with differential mutation components + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + sigma * (a - b + np.sin(c)) + q_impact * np.random.standard_cauchy(self.dim) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV3.py b/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV3.py new file mode 100644 index 000000000..224e6bbe8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralAdaptiveOptimizerV3.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumSpectralAdaptiveOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 500 # Population size adjusted for further exploration + self.sigma_initial = 0.5 # Wider initial mutation spread + self.sigma_final = 0.001 # Further reduced mutation spread at the end + self.elitism_factor = 0.05 # Further reduced elitism factor + self.CR_initial = 0.9 # Initial crossover probability + self.CR_final = 0.7 # Higher final crossover rate + self.q_impact_initial = 0.01 # Increased initial quantum impact + self.q_impact_final = 0.1 # Increased final quantum impact + self.q_impact_increase_rate = 0.001 # Increased rate of quantum impact increase + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Quantum-inspired trigonometric mutation with differential mutation components + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + + sigma * (a - b + np.cos(c)) + + q_impact * np.tan(np.random.standard_cauchy(self.dim)) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumSpectralDynamicOptimizer.py b/nevergrad/optimization/lama/QuantumSpectralDynamicOptimizer.py new file mode 100644 index 000000000..7dd1afa37 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralDynamicOptimizer.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumSpectralDynamicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of 5 + self.pop_size = 1000 # Further increased population size for even broader exploration + self.sigma_initial = 0.2 # Initial mutation spread + self.sigma_final = 0.01 # Final minimal mutation spread + self.elitism_factor = 0.5 # Enhanced elitism factor for promoting top performers + self.CR_initial = 0.9 # Starting crossover probability to encourage gene mixing + self.CR_final = 0.5 # Final crossover rate to maintain genetic diversity longer + self.q_impact_initial = 0.01 # Start with a lower quantum impact + self.q_impact_final = 0.1 # End with a moderate quantum impact + self.q_impact_increase_rate = 0.001 # Rate at which quantum impact increases + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Dynamic adjustment of parameters + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Mutation using Quantum-inspired differential mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + + sigma * (a - b + np.random.normal() * c) + + q_impact * np.random.standard_cauchy(self.dim) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumSpectralEnhancedOptimizerV5.py b/nevergrad/optimization/lama/QuantumSpectralEnhancedOptimizerV5.py new file mode 100644 index 000000000..9f8bcdc75 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralEnhancedOptimizerV5.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumSpectralEnhancedOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 500 # Increase population size for better exploration + self.sigma_initial = 0.8 # Adjust initial mutation spread + self.sigma_final = 0.0005 # Tighter control at the end of the search + self.elitism_factor = 0.2 # Higher elitism to ensure retention of best found solutions + self.CR_initial = 0.9 # Starting crossover probability + self.CR_final = 0.5 # Lower final crossover probability to allow detailed local search + self.q_impact_initial = 0.1 # Adjusted initial quantum impact for stronger early exploration + self.q_impact_final = 0.2 # Higher final quantum impact for intensive exploitation + self.q_impact_increase_rate = 0.003 # Faster increase in quantum impact through generations + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Hybrid trigonometric mutation strategy enhanced with quantum theory influence + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + + sigma * (a - b + np.cos(c)) + + q_impact * np.cos(np.pi * np.random.standard_cauchy(self.dim)) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptively update parameters + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumSpectralRefinedOptimizerV4.py b/nevergrad/optimization/lama/QuantumSpectralRefinedOptimizerV4.py new file mode 100644 index 000000000..62ca0cc8b --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSpectralRefinedOptimizerV4.py @@ -0,0 +1,68 @@ +import numpy as np + + +class QuantumSpectralRefinedOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension of the problem + self.pop_size = 300 # Refined population size for more effective exploration/exploitation + self.sigma_initial = 0.7 # Initial mutation spread + self.sigma_final = 0.001 # Fine control over mutation spread by end + self.elitism_factor = 0.1 # Increased elitism to ensure survival of best solutions + self.CR_initial = 0.95 # Enhanced initial crossover probability + self.CR_final = 0.6 # Reduced final crossover rate to encourage deeper local search + self.q_impact_initial = 0.05 # Enhanced initial quantum impact + self.q_impact_final = 0.15 # Increased final quantum impact + self.q_impact_increase_rate = 0.002 # Accelerated increase in quantum impact + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_initial + q_impact = self.q_impact_initial + + # Evolutionary loop + for iteration in range(self.budget // self.pop_size): + elite_size = int(self.elitism_factor * self.pop_size) + + for i in range(self.pop_size): + if i < elite_size: # Elite members skip mutation and crossover + continue + + # Combined trigonometric and differential mutation strategy + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = ( + best_ind + + sigma * (a - b + np.sin(c)) + + q_impact * np.tan(np.random.standard_cauchy(self.dim)) + ) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive parameter updates + sigma = sigma * (self.sigma_final / self.sigma_initial) ** (1 / (self.budget / self.pop_size)) + CR = max(self.CR_final, CR - (self.CR_initial - self.CR_final) / (self.budget / self.pop_size)) + q_impact = min(self.q_impact_final, q_impact + self.q_impact_increase_rate) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumStabilizedDynamicBalanceOptimizer.py b/nevergrad/optimization/lama/QuantumStabilizedDynamicBalanceOptimizer.py new file mode 100644 index 000000000..899cbe755 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStabilizedDynamicBalanceOptimizer.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumStabilizedDynamicBalanceOptimizer: + def __init__( + self, + budget, + dim=5, + learning_rate=0.05, + momentum=0.9, + quantum_prob=0.1, + elite_rate=0.2, + noise_intensity=0.1, + perturbation_scale=0.1, + stability_factor=0.8, + decay_rate=0.005, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_prob = quantum_prob + self.elite_rate = elite_rate + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_intensity = noise_intensity + self.perturbation_scale = perturbation_scale + self.stability_factor = stability_factor # Increased stability in the updating mechanism + self.decay_rate = decay_rate # Decay rate for adaptive learning rate + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_rate), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_rate), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_rate), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal( + 0.0, + self.perturbation_scale * np.exp(-self.stability_factor * self.decay_rate * i), + self.dim, + ) + self.population[i] += quantum_jump + else: + lr = self.learning_rate * np.exp(-self.decay_rate * i) # Exponentially decaying learning rate + noise = np.random.normal(0, self.noise_intensity, self.dim) + self.velocities[i] = ( + self.momentum * self.velocities[i] + lr * (global_best - self.population[i]) + noise + ) + future_position = self.population[i] + self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/QuantumStateConvergenceOptimizer.py b/nevergrad/optimization/lama/QuantumStateConvergenceOptimizer.py new file mode 100644 index 000000000..e9f1f062d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStateConvergenceOptimizer.py @@ -0,0 +1,50 @@ +import numpy as np + + +class QuantumStateConvergenceOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 100 # Population size for better manageability + self.F = 0.8 # Differential weight + self.CR = 0.9 # Crossover probability + self.quantum_influence = 0.1 # Probability of quantum mutation + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main optimization loop + for _ in range(int(self.budget / self.pop_size)): + for i in range(self.pop_size): + # Quantum Mutation influenced by best solution + if np.random.rand() < self.quantum_influence: + mutation = best_ind + np.random.normal(0, 1, self.dim) * 0.1 + else: + # DE/rand/1 mutation strategy + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutation = pop[i] + self.F * (a - b + c - pop[i]) + + mutation = np.clip(mutation, -5.0, 5.0) + + # Binomial crossover + trial = np.where(np.random.rand(self.dim) < self.CR, mutation, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumStateCrossoverOptimization.py b/nevergrad/optimization/lama/QuantumStateCrossoverOptimization.py new file mode 100644 index 000000000..20736da6a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStateCrossoverOptimization.py @@ -0,0 +1,84 @@ +import numpy as np + + +class QuantumStateCrossoverOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.2, + mutation_intensity=0.05, + crossover_rate=0.9, + quantum_prob=0.1, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1) + + # Quantum-inspired updates + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual): + return individual + np.random.normal(0, self.mutation_intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Quantum inspired state update to allow exploration around the best solution found""" + perturbation = np.random.uniform(-1, 1, self.dimension) * (best_individual - individual) + return individual + perturbation diff --git a/nevergrad/optimization/lama/QuantumStateHybridStrategy.py b/nevergrad/optimization/lama/QuantumStateHybridStrategy.py new file mode 100644 index 000000000..136b77691 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStateHybridStrategy.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumStateHybridStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 300 # Further increased population size + self.sigma_initial = 0.25 # Adjusted initial standard deviation + self.learning_rate = 0.1 # Adjusted learning rate + self.CR = 0.85 # Adjusted crossover probability + self.q_impact_initial = 0.15 # Adjusted initial quantum impact for stronger exploration + self.q_impact_decay = 0.98 # Adjusted decay rate for quantum impact + self.sigma_decay = 0.98 # Adjusted decay rate for sigma + self.elitism_factor = 5 # Introducing elitism to ensure the best solutions propagate + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Maintain a set of elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + + for i in range(self.pop_size): + if i in elites: # Skip mutation for elites + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b) + quantum_term + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = self.CR + self.learning_rate * (np.random.randn()) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumStateRefinedHybridStrategy.py b/nevergrad/optimization/lama/QuantumStateRefinedHybridStrategy.py new file mode 100644 index 000000000..e47f4a7b4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStateRefinedHybridStrategy.py @@ -0,0 +1,65 @@ +import numpy as np + + +class QuantumStateRefinedHybridStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 300 # Population size + self.sigma_initial = 0.3 # Initial standard deviation for exploration + self.learning_rate = 0.08 # Learning rate for adaptiveness in crossover + self.CR = 0.9 # Crossover probability + self.q_impact_initial = 0.2 # Initial quantum impact for exploration + self.q_impact_decay = 0.99 # Decay rate for quantum impact + self.sigma_decay = 0.99 # Decay rate for sigma + self.elitism_factor = 10 # Increased elitism factor + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Maintain a set of elite solutions + elite_size = max(1, int(self.elitism_factor * self.pop_size / 100)) + elites = np.argsort(fitness)[:elite_size] + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + + for i in range(self.pop_size): + if i in elites: # Skip mutation for elites + continue + + idxs = [idx for idx in range(self.pop_size) if idx != i and idx not in elites] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b) + quantum_term + mutant = np.clip(mutant, -5.0, 5.0) + + CRi = self.CR + self.learning_rate * (np.random.randn()) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Update elites + elites = np.argsort(fitness)[:elite_size] + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/QuantumStochasticGradientDescentFireworks.py b/nevergrad/optimization/lama/QuantumStochasticGradientDescentFireworks.py new file mode 100644 index 000000000..8b8a36093 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStochasticGradientDescentFireworks.py @@ -0,0 +1,41 @@ +import numpy as np + + +class QuantumStochasticGradientDescentFireworks: + def __init__(self, budget=1000, num_sparks=10, num_iterations=100, learning_rate=0.1, momentum=0.9): + self.budget = budget + self.num_sparks = num_sparks + self.num_iterations = num_iterations + self.learning_rate = learning_rate + self.momentum = momentum + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dimensions = 5 + bounds = func.bounds + fireworks = np.random.uniform(bounds.lb, bounds.ub, size=(self.num_sparks, dimensions)) + best_firework = fireworks[0] + velocities = np.zeros_like(fireworks) + + for _ in range(self.num_iterations): + for firework in fireworks: + f = func(firework) + if f < func(best_firework): + best_firework = firework + + for i, firework in enumerate(fireworks): + gradient = np.zeros(dimensions) + for _ in range(self.num_sparks): + spark = firework + np.random.normal(0, 1, size=dimensions) + spark = np.clip(spark, bounds.lb, bounds.ub) + gradient += (func(spark) - func(firework)) * (spark - firework) + + velocities[i] = self.momentum * velocities[i] + self.learning_rate * gradient + fireworks[i] += velocities[i] + fireworks[i] = np.clip(fireworks[i], bounds.lb, bounds.ub) + + self.f_opt = func(best_firework) + self.x_opt = best_firework + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumStochasticGradientOptimizer.py b/nevergrad/optimization/lama/QuantumStochasticGradientOptimizer.py new file mode 100644 index 000000000..914265d45 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumStochasticGradientOptimizer.py @@ -0,0 +1,45 @@ +import numpy as np + + +class QuantumStochasticGradientOptimizer: + def __init__(self, budget, dim=5, learning_rate=0.1, momentum=0.9, quantum_boost=False): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_boost = quantum_boost + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.position = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + self.velocity = np.zeros(self.dim) + self.best_position = np.copy(self.position) + self.best_fitness = np.inf + + def evaluate(self, func, position): + return func(position) + + def update_position(self): + self.velocity = self.momentum * self.velocity + self.learning_rate * np.random.normal(0, 1, self.dim) + self.position += self.velocity + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def quantum_influence(self): + if self.quantum_boost: + self.position += np.random.normal(0, 0.1 * (self.upper_bound - self.lower_bound), self.dim) + self.position = np.clip(self.position, self.lower_bound, self.upper_bound) + + def __call__(self, func): + self.initialize() + + for _ in range(self.budget): + self.update_position() + self.quantum_influence() + fitness = self.evaluate(func, self.position) + + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_position = np.copy(self.position) + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/QuantumSwarmOptimization.py b/nevergrad/optimization/lama/QuantumSwarmOptimization.py new file mode 100644 index 000000000..c475351c8 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSwarmOptimization.py @@ -0,0 +1,54 @@ +import numpy as np + + +class QuantumSwarmOptimization: + def __init__( + self, budget=10000, num_particles=10, inertia_weight=0.5, cognitive_weight=1.5, social_weight=2.0 + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.Inf) + self.global_best = None + self.global_best_value = np.Inf + + def __call__(self, func): + for _ in range(self.budget): + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/QuantumSwarmOptimizationImproved.py b/nevergrad/optimization/lama/QuantumSwarmOptimizationImproved.py new file mode 100644 index 000000000..9a2c6efc1 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSwarmOptimizationImproved.py @@ -0,0 +1,62 @@ +import numpy as np + + +class QuantumSwarmOptimizationImproved: + def __init__( + self, + budget=10000, + num_particles=20, + inertia_weight=0.6, + cognitive_weight=1.7, + social_weight=2.2, + boundary_handling=True, + ): + self.budget = budget + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + + def __call__(self, func): + for _ in range(self.budget): + for i in range(self.num_particles): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = current_position + new_velocity + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = new_velocity + self.particles[i] = new_position + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/QuantumSymbioticEnhancedStrategyV3.py b/nevergrad/optimization/lama/QuantumSymbioticEnhancedStrategyV3.py new file mode 100644 index 000000000..570f71d50 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumSymbioticEnhancedStrategyV3.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumSymbioticEnhancedStrategyV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 300 + elite_size = 60 + evaluations = 0 + mutation_factor = 0.9 + crossover_probability = 0.8 + quantum_probability = 0.2 + adaptive_scaling_factor = lambda t: 0.2 * np.exp(-0.2 * t) # Improved decay rate for better precision + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step with increased impact + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Enhanced symbiotic mutation and crossover with more strategic selection + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection with a focus on stronger replacements + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunedGradientSearchV2.py b/nevergrad/optimization/lama/QuantumTunedGradientSearchV2.py new file mode 100644 index 000000000..2055c7a25 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunedGradientSearchV2.py @@ -0,0 +1,90 @@ +import numpy as np + + +class QuantumTunedGradientSearchV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 500 # Increased population size for broader initial exploration + elite_size = 50 # Larger elite size to maintain diversity in solutions + evaluations = 0 + mutation_factor = 0.8 # Adjusted mutation factor + crossover_probability = 0.85 # High crossover probability + quantum_probability = 0.2 # Increased quantum probability at start + learning_rate = 0.01 # Initial learning rate + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + if abs(previous_best - self.f_opt) < 1e-6: # Sensitive convergence detection + mutation_factor *= 0.95 # Gradual reduction of mutation factor + learning_rate *= 0.95 # Gradual reduction of learning rate + else: + mutation_factor *= 1.05 # Increment mutation factor for potential escapes from local minima + learning_rate *= 1.05 # Increment learning rate for aggressive gradient steps + previous_best = self.f_opt + + # Quantum exploration step + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Gradient-based refinement for elites + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) + population[idx] += learning_rate * gradient + population[idx] = np.clip(population[idx], self.lb, self.ub) + new_fitness = func(population[idx]) + evaluations += 1 + + if new_fitness < fitness[idx]: + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = population[idx] + + # Crossover and mutation for diversity + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability *= 1.1 # Increase quantum probability to maintain diversity + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizer.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizer.py new file mode 100644 index 000000000..078cdc351 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizer.py @@ -0,0 +1,72 @@ +import numpy as np + + +class QuantumTunnelingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem statement + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize parameters + population_size = 20 # Modest population size for balance + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Quantum parameters + gamma = 0.1 # Initial quantum fluctuation magnitude + gamma_decay = 0.99 # Decay rate for gamma + + # Evolutionary parameters + crossover_rate = 0.85 + mutation_strength = 0.5 + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update quantum fluctuation magnitude + gamma *= gamma_decay + + new_population = [] + new_fitness = [] + + for i in range(population_size): + # Selection of parents for crossover + parents_indices = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parents_indices] + + # Crossover + mask = np.random.rand(self.dim) < crossover_rate + offspring = np.where(mask, parent1, parent2) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling effect + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + # Evaluate offspring + f_offspring = func(offspring) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + # Insert offspring into new population + new_population.append(offspring) + new_fitness.append(f_offspring) + + if f_offspring < self.f_opt: + self.f_opt = f_offspring + self.x_opt = offspring + + # Update the population + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV10.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV10.py new file mode 100644 index 000000000..9190061d9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV10.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV10: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment for further refined behavior + population_size = 2000 # Further increased population for wider initial exploration + gamma = 0.15 # Higher initial quantum fluctuation to enhance exploratory steps + gamma_min = 0.00001 # Maintain low minimum for fine exploitation + gamma_decay = 0.98 # Slower decay to maintain higher exploratory capability longer + elite_count = 200 # Increased elite count for robust convergence + mutation_strength = 0.001 # Reduced mutation for finer adjustment + crossover_probability = 0.9 # Slightly reduced crossover to maintain more original traits + tunneling_frequency = 0.25 # Increased frequency to enhance escaping from local minima + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV11.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV11.py new file mode 100644 index 000000000..8f85a9c9e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV11.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV11: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment for further refined behavior + population_size = 2500 # Increased population for wider initial exploration + gamma = 0.12 # Adjusted initial quantum fluctuation to balance exploration + gamma_min = 0.00005 # Maintain a slightly higher low minimum for fine exploitations + gamma_decay = 0.95 # Adjusted decay to retain exploratory capability for a longer duration + elite_count = 300 # Increased elite count for more robust convergence + mutation_strength = 0.0005 # Further reduced mutation for finer adjustment + crossover_probability = 0.85 # Adjusted crossover to maintain more original traits with some mixing + tunneling_frequency = 0.3 # Increased frequency to enhance escaping from local minima + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV12.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV12.py new file mode 100644 index 000000000..9fa337d4e --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV12.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV12: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters refinement for enhanced performance + population_size = 3000 # Further increased for wider exploration + gamma = 0.1 # Lower starting gamma for more focused local search + gamma_min = 0.00001 # Lower minimum gamma to allow finer tuning in late stages + gamma_decay = 0.98 # Slower gamma decay to sustain exploration + elite_count = 350 # More elitism to ensure good traits persist + mutation_strength = 0.0003 # Finer mutation for more precise adjustments + crossover_probability = 0.9 # Higher crossover probability for better gene mixing + tunneling_frequency = 0.35 # Increased tunneling frequency to escape local optima more often + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV13.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV13.py new file mode 100644 index 000000000..46caa8df4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV13.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV13: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters refinement for enhanced performance + population_size = 5000 # Further increased for wider exploration + gamma = 0.05 # Lower starting gamma for more focused local search + gamma_min = 0.00001 # Lower minimum gamma to allow finer tuning in late stages + gamma_decay = 0.95 # Slower gamma decay to sustain exploration + elite_count = 500 # More elitism to ensure good traits persist + mutation_strength = 0.0001 # Finer mutation for more precise adjustments + crossover_probability = 0.95 # Higher crossover probability for better gene mixing + tunneling_frequency = 0.40 # Increased tunneling frequency to escape local optima more often + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV14.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV14.py new file mode 100644 index 000000000..fd8bcc0c6 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV14.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV14: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters refinement for enhanced performance + population_size = 500 # Scaled-down population for more iterations per individual + gamma = 0.08 # Starting gamma slightly increased for broader early exploration + gamma_min = 0.00001 # Minimum gamma remains same for fine-tuning in late stages + gamma_decay = 0.99 # Slower gamma decay to sustain exploration capabilities longer + elite_count = 50 # Reduced elitism count, focusing on top 10% of the population + mutation_strength = 0.01 # Increased mutation strength for broader search + crossover_probability = 0.85 # Slightly reduced to promote more diverse mutations + tunneling_frequency = 0.50 # Increased tunneling frequency for enhanced exploration + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # No crossover, clone parent + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV15.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV15.py new file mode 100644 index 000000000..516fe753d --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV15.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV15: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Further refined hyperparameters + population_size = 300 # Reduced population for increased iteration per individual + gamma = 0.1 # Initial tunneling coefficient, increased to enhance exploration initially + gamma_min = 0.0001 # Lower minimum gamma for finer exploration in later stages + gamma_decay = 0.995 # Slower decay to maintain a higher exploration longer + elite_count = 30 # Increased elitism for better exploitation of good solutions + mutation_strength = 0.015 # Increased mutation for broader search + crossover_probability = 0.7 # Lower crossover probability to maintain individual diversity + tunneling_frequency = 0.6 # Increased tunneling frequency for robust exploration + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # No crossover, clone parent + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV16.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV16.py new file mode 100644 index 000000000..5a2f634ec --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV16.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV16: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Tuning hyperparameters based on previous performance + population_size = 250 # Slightly reduced population for more iterations per individual + gamma = 0.15 # Increased initial tunneling coefficient for more aggressive exploration + gamma_min = 0.00005 # Lower minimum for very fine exploration during late optimization + gamma_decay = 0.990 # Slower decay maintains a higher degree of exploration for longer + elite_count = 20 # Reduced elite count to focus on fewer, higher quality individuals + mutation_strength = 0.02 # More aggressive mutation for increased exploration + crossover_probability = 0.8 # Increased probability for strong recombination + tunneling_frequency = 0.65 # Increased frequency for enhanced tunneling effect + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # No crossover, clone parent + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV17.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV17.py new file mode 100644 index 000000000..0c59b4b4f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV17.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV17: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Tuning hyperparameters based on previous performance and feedback + population_size = 200 # Further reduced population for increased individual iteration + gamma = 0.2 # Increased initial tunneling coefficient for more pronounced explorative jumps + gamma_min = 0.0001 # Lower minimum for finer late-stage exploration + gamma_decay = 0.992 # Slower decay to preserve exploration capabilities longer + elite_count = 10 # Reduced elite count to focus on top-performing individuals + mutation_strength = 0.025 # Higher mutation strength to encourage diversity + crossover_probability = 0.85 # Higher probability for effective recombination + tunneling_frequency = 0.7 # Increased tunneling frequency for better local optima escaping + + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to exponential decay + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the top performers + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Reproduction process: crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # If crossover does not occur, clone parent1 + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within the problem bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV18.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV18.py new file mode 100644 index 000000000..1d12981b9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV18.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV18: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Adjusting strategy based on feedback + population_size = 150 # Slightly reducing population size for more frequent updates + gamma = 0.25 # Slightly increased initial gamma for more aggressive initial exploration + gamma_min = 0.00005 # Further reduced minimum gamma for finer tuning in late optimization stages + gamma_decay = 0.995 # Slowing decay for preservation of exploration capabilities + elite_count = 5 # Sharply focusing on fewer elites to drive convergence + mutation_strength = 0.015 # Reduced mutation strength for finer mutations + crossover_probability = 0.9 # Increased crossover probability + tunneling_frequency = 0.75 # Increased tunneling frequency for better escaping of local optima + + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to exponential decay + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: retain the top performers + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Reproduction process: crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # If crossover does not occur, clone parent1 + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within the problem bounds + + # Quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV2.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV2.py new file mode 100644 index 000000000..8a6f2347f --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV2.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality given in the problem statement + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters + population_size = 100 # Increased population size for more diversity + gamma = 0.1 # Quantum fluctuation magnitude + gamma_min = 0.01 # Minimum fluctuation magnitude to ensure continued exploration + gamma_decay = 0.995 # Slower decay rate + elite_count = 10 # Greater number of elites to stabilize performance + mutation_strength = 0.2 # Finer mutations for precise local exploration + crossover_probability = 0.95 # High probability for crossover to encourage mixing + + # Initialize population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update quantum fluctuation magnitude with floor + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: carry forward best individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Generate new individuals from existing population + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = parent1 if np.random.random() < 0.5 else parent2 + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling effect + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV3.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV3.py new file mode 100644 index 000000000..8799c5a6a --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV3.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality given in the problem statement + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters + population_size = 150 # Increased population size for more diversity + gamma = 0.15 # Initial quantum fluctuation magnitude + gamma_min = 0.005 # Lower minimum fluctuation magnitude + gamma_decay = 0.99 # Slower decay rate to preserve exploration + elite_count = 15 # Increased number of elites for stability + mutation_strength = 0.15 # Reduced mutation strength for finer local exploration + crossover_probability = 0.9 # Adjusted crossover probability + + # Initialize population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update quantum fluctuation magnitude with floor + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: carry forward best individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Generate new individuals from existing population + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = parent1 if np.random.random() < 0.5 else parent2 + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling effect + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV4.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV4.py new file mode 100644 index 000000000..acad73fc4 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV4.py @@ -0,0 +1,74 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality given in the problem statement + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters + population_size = 200 # Increased population size for enhanced diversity + gamma = 0.2 # Increased initial quantum fluctuation magnitude + gamma_min = 0.01 # Slightly increased minimum quantum fluctuation + gamma_decay = 0.95 # Reduced decay rate for longer exploration maintenance + elite_count = 20 # Higher number of elites for better performance retention + mutation_strength = 0.1 # Lower mutation strength for more precise adjustments + crossover_probability = 0.85 # Slightly adjusted crossover probability + + # Initialize population + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update quantum fluctuation magnitude + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: carry forward best individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Generate new individuals from existing population + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = parent1 if np.random.random() < 0.5 else parent2 + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling effect + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV5.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV5.py new file mode 100644 index 000000000..2169be1af --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV5.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment + population_size = 250 # Increased population for more diversity + gamma = 0.25 # Start with a higher quantum fluctuation magnitude + gamma_min = 0.005 # Lower minimum quantum fluctuation + gamma_decay = 0.98 # Slower decay to maintain exploration longer + elite_count = 25 # Increased elite count for better elite retention + mutation_strength = 0.05 # Reduced mutation strength for finer adjustments + crossover_probability = 0.9 # Increased crossover probability for more mixing + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: keep the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Introduce quantum tunneling effect with updated dynamics + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV6.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV6.py new file mode 100644 index 000000000..dc175f314 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV6.py @@ -0,0 +1,75 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment + population_size = 300 # Further increased population for more diversity + gamma = 0.3 # Starting quantum fluctuation magnitude + gamma_min = 0.001 # Lower minimum quantum fluctuation + gamma_decay = 0.95 # Slower decay to maintain exploration longer + elite_count = 30 # Increased elite count for better elite retention + mutation_strength = 0.02 # Reduced mutation strength for finer adjustments + crossover_probability = 0.92 # Increased crossover probability for more mixing + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: keep the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Introduce quantum tunneling effect with updated dynamics + if np.random.rand() < gamma: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV7.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV7.py new file mode 100644 index 000000000..dda090d09 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV7.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment + population_size = 400 # Increased population size for more diversity + gamma = 0.1 # Initial quantum fluctuation magnitude + gamma_min = 0.0001 # Lower minimum quantum fluctuation + gamma_decay = 0.99 # Slower decay to maintain exploration longer + elite_count = 40 # Increased elite count for better elite retention + mutation_strength = 0.01 # Reduced mutation strength for finer adjustments + crossover_probability = 0.95 # Increased crossover probability for more mixing + tunneling_frequency = 0.07 # Slightly increased quantum tunneling frequency + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: keep the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Introduce quantum tunneling effect with updated dynamics + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV8.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV8.py new file mode 100644 index 000000000..6807c47a9 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV8.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV8: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Hyperparameters adjustment + population_size = 500 # Further increased population size for more diversity + gamma = 0.07 # Reduced initial quantum fluctuation magnitude + gamma_min = 0.00005 # Lower minimum quantum fluctuation + gamma_decay = 0.995 # Slower decay to maintain exploration longer + elite_count = 50 # Further increased elite count for better elite retention + mutation_strength = 0.005 # Reduced mutation strength for finer adjustments + crossover_probability = 0.98 # Increased crossover probability for more mixing + tunneling_frequency = 0.1 # Increased quantum tunneling frequency + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: keep the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Introduce quantum tunneling effect with updated dynamics + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/QuantumTunnelingOptimizerV9.py b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV9.py new file mode 100644 index 000000000..07f2d5498 --- /dev/null +++ b/nevergrad/optimization/lama/QuantumTunnelingOptimizerV9.py @@ -0,0 +1,76 @@ +import numpy as np + + +class QuantumTunnelingOptimizerV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Updated hyperparameters + population_size = 1000 # Increased population size further for greater exploration + gamma = 0.1 # Increased initial quantum fluctuation magnitude for more robust movement + gamma_min = 0.00001 # Lower minimum quantum fluctuation for finer exploitation + gamma_decay = 0.99 # Slower decay rate to sustain a higher level of exploration + elite_count = 100 # Increased elite count for better convergence behavior + mutation_strength = 0.003 # Finer mutation adjustments + crossover_probability = 0.95 # High crossover probability for aggressive mixing + tunneling_frequency = 0.2 # More frequent tunneling to improve escape from local optima + + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + # Update gamma according to decay schedule + gamma = max(gamma * gamma_decay, gamma_min) + + # Elitism: keep the best performing individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Breed new individuals via crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) # Clone one parent if no crossover + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) # Ensure offspring remain within bounds + + # Introduce quantum tunneling effect + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RADE.py b/nevergrad/optimization/lama/RADE.py new file mode 100644 index 000000000..5892977fd --- /dev/null +++ b/nevergrad/optimization/lama/RADE.py @@ -0,0 +1,62 @@ +import numpy as np + + +class RADE: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + + def __call__(self, func): + # Initialization of population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Differential Evolution parameters + mutation_factor = 0.8 + crossover_probability = 0.9 + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Mutation strategy: "rand/1/bin" + indices = [index for index in range(self.population_size) if index != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_probability else population[i][j] + for j in range(self.dimension) + ] + ) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + # Adaptive strategy adjustment + mutation_factor = max(0.5, min(1.0, mutation_factor + 0.02 * (best_fitness - np.mean(fitness)))) + crossover_probability = max( + 0.5, min(1.0, crossover_probability - 0.02 * (best_fitness - np.mean(fitness))) + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RADEA.py b/nevergrad/optimization/lama/RADEA.py new file mode 100644 index 000000000..529978a9f --- /dev/null +++ b/nevergrad/optimization/lama/RADEA.py @@ -0,0 +1,74 @@ +import numpy as np + +"""### Redesigned Algorithm Name: Robust Adaptive Differential Evolution with Archive (RADEA) +### Key Adjustments: +1. **Archive Initialization and Usage**: Introduce checks to handle cases where the archive is initially empty and update its usage logic in mutation. +2. **Parameter Tuning and Control**: Introduce adaptive parameters that can change based on the optimization progress. +3. **Improved Mutation Strategy**: Use a more robust strategy that ensures diversity and avoids premature convergence. +""" + + +class RADEA: + def __init__(self, budget): + self.budget = budget + self.population_size = 20 + self.dimension = 5 + self.low = -5.0 + self.high = 5.0 + self.archive = [] + self.archive_max_size = 50 + + def initialize(self): + return np.random.uniform(self.low, self.high, (self.population_size, self.dimension)) + + def evaluate(self, population, func): + fitness = np.array([func(ind) for ind in population]) + return fitness + + def mutation(self, population, archive, F): + mutant = np.zeros_like(population) + combined = np.vstack([population] + [archive]) if archive else population + num_candidates = len(combined) + + for i in range(self.population_size): + indices = np.random.choice(num_candidates, 3, replace=False) + x1, x2, x3 = combined[indices] + mutant_vector = x1 + F * (x2 - x3) + mutant[i] = np.clip(mutant_vector, self.low, self.high) + return mutant + + def crossover(self, population, mutant, CR): + crossover = np.where(np.random.rand(self.population_size, self.dimension) < CR, mutant, population) + return crossover + + def select(self, population, fitness, trial_population, trial_fitness): + better_idx = trial_fitness < fitness + population[better_idx] = trial_population[better_idx] + fitness[better_idx] = trial_fitness[better_idx] + updated_indices = np.where(better_idx)[0] + return population, fitness, updated_indices + + def __call__(self, func): + population = self.initialize() + fitness = self.evaluate(population, func) + iterations = self.budget // self.population_size + F, CR = 0.8, 0.9 # Fixed F and CR for simplification + + for _ in range(iterations): + mutant = self.mutation(population, self.archive, F) + trial_population = self.crossover(population, mutant, CR) + trial_fitness = self.evaluate(trial_population, func) + population, fitness, updated_indices = self.select( + population, fitness, trial_population, trial_fitness + ) + + # Archive management: add only improved solutions + for idx in updated_indices: + self.archive.append(population[idx]) + if len(self.archive) > self.archive_max_size: + self.archive.pop(0) + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RADECM.py b/nevergrad/optimization/lama/RADECM.py new file mode 100644 index 000000000..87f1413be --- /dev/null +++ b/nevergrad/optimization/lama/RADECM.py @@ -0,0 +1,65 @@ +import numpy as np + + +class RADECM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + # Adaptive F scaling based on the linear progression from initial to end value + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + # Compute the overall best index once for use in all mutations this generation + best_idx = np.argmin(fitness) + + for i in range(self.population_size): + # Select three random distinct indices + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Utilize a blend of mutation strategies: DE/current-to-best/1 and DE/rand-to-best/2 + best = population[best_idx] + mutant = x1 + F_current * (best - x1 + x2 - x3) + mutant_rand_best = x1 + F_current * (best - x1) + F_current * (x2 - x3) + + # Select mutation based on a strategic choice + if np.random.rand() < 0.5: + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + else: + mutant = np.clip(mutant_rand_best, self.bounds[0], self.bounds[1]) + + # Binomial crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + + # Evaluate the new candidate + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RADEDM.py b/nevergrad/optimization/lama/RADEDM.py new file mode 100644 index 000000000..e886fd6e5 --- /dev/null +++ b/nevergrad/optimization/lama/RADEDM.py @@ -0,0 +1,59 @@ +import numpy as np + + +class RADEDM: + def __init__(self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.2): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Weight given to memory in mutation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + memory = np.zeros(self.dimension) # A single vector holds the cumulative memory + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[np.argmin(fitness)] + + # Mutant vector incorporating memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with the successful mutation direction scaled by F + memory = (1 - self.memory_factor) * memory + self.memory_factor * F_current * ( + mutant - population[i] + ) + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RADEEM.py b/nevergrad/optimization/lama/RADEEM.py new file mode 100644 index 000000000..778968ab2 --- /dev/null +++ b/nevergrad/optimization/lama/RADEEM.py @@ -0,0 +1,71 @@ +import numpy as np + + +class RADEEM: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.2, CR=0.9, memory_size=10, memory_factor=0.1 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight for linear adaptation + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_size = memory_size # Number of memory slots + self.memory_factor = memory_factor # Proportion of memory influence + self.memory = [] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + best = population[np.argmin(fitness)] + + # Memory influence in mutation + memory_effect = ( + np.sum(self.memory, axis=0) * self.memory_factor + if self.memory + else np.zeros(self.dimension) + ) + + # Mutation strategy incorporating memory + mutant = x1 + F_current * (best - x1 + x2 - x3 + memory_effect) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + # Selection and memory update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory with successful mutation vectors + if len(self.memory) < self.memory_size: + self.memory.append(mutant - population[i]) + else: + # Replace a random memory component + self.memory[np.random.randint(len(self.memory))] = mutant - population[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RADEPM.py b/nevergrad/optimization/lama/RADEPM.py new file mode 100644 index 000000000..65cbb8bff --- /dev/null +++ b/nevergrad/optimization/lama/RADEPM.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RADEPM: + def __init__( + self, budget, population_size=50, F_init=0.5, F_end=0.8, CR=0.9, memory_factor=0.3, memory_decay=0.99 + ): + self.budget = budget + self.population_size = population_size + self.F_init = F_init # Initial differential weight + self.F_end = F_end # Final differential weight adapted linearly + self.CR = CR # Crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # Search space bounds + self.memory_factor = memory_factor # Fraction of memory used in mutation + self.memory_decay = memory_decay # Decay factor for memory's impact + self.memory = [] + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + F_current = self.F_init + (self.F_end - self.F_init) * (evaluations / self.budget) + + for i in range(self.population_size): + indices = np.random.choice( + [j for j in range(self.population_size) if j != i], 3, replace=False + ) + x1, x2, x3 = population[indices] + + # Use weighted sum of difference vectors to mutate, including memory + memory_component = np.zeros(self.dimension) + if self.memory: + memory_vectors = np.array(self.memory) + memory_weights = np.random.dirichlet(np.ones(len(self.memory))) + memory_component = np.sum(memory_vectors.T * memory_weights, axis=1) + + # Mutant vector calculation incorporating memory + best = population[np.argmin(fitness)] + mutant = x1 + F_current * (best - x1 + x2 - x3 + self.memory_factor * memory_component) + mutant = np.clip(mutant, self.bounds[0], self.bounds[1]) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < self.CR, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Update memory dynamically with a decay factor + if len(self.memory) < self.population_size: + self.memory.append(mutant - population[i]) + else: + self.memory[int(np.random.rand() * len(self.memory))] = mutant - population[i] + self.memory = [v * self.memory_decay for v in self.memory] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RADSDiffEvo.py b/nevergrad/optimization/lama/RADSDiffEvo.py new file mode 100644 index 000000000..8953f59d6 --- /dev/null +++ b/nevergrad/optimization/lama/RADSDiffEvo.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RADSDiffEvo: + def __init__(self, budget, population_size=100, F_base=0.5, CR_base=0.8, adaptive=True): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base mutation factor + self.CR_base = CR_base # Base crossover probability + self.adaptive = adaptive # Toggle adaptive behavior + + def __call__(self, func): + # Initialize population and fitness evaluations + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation strategy selector that considers progress + if self.adaptive: + progress = num_evals / self.budget + F = self.F_base + 0.3 * np.random.randn() * ( + 1 - progress + ) # Decreasing F variation over time + CR = self.CR_base + 0.1 * np.random.randn() * ( + 1 - progress + ) # Decreasing CR variation over time + strategy_type = "best1bin" if np.random.rand() < 0.5 * (1 + progress) else "rand1bin" + else: + F = self.F_base + CR = self.CR_base + strategy_type = "best1bin" if num_evals % 2 == 0 else "rand1bin" + + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 3, replace=False) + a, b, c = population[chosen] + + # Mutation strategies + if strategy_type == "rand1bin": + mutant = a + F * (b - c) + elif strategy_type == "best1bin": + mutant = best_individual + F * (b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RAGCES.py b/nevergrad/optimization/lama/RAGCES.py new file mode 100644 index 000000000..63bcf07d6 --- /dev/null +++ b/nevergrad/optimization/lama/RAGCES.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RAGCES: + def __init__( + self, budget, population_size=50, F_base=0.8, CR_base=0.5, adapt_rate=0.05, gradient_weight=0.1 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base scaling factor for differential evolution + self.CR_base = CR_base # Base crossover rate + self.adapt_rate = adapt_rate # Rate of adaptation for F and CR + self.gradient_weight = gradient_weight # Weighting for gradient influence in mutation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while num_evals < self.budget: + # Adapt F and CR adaptively + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.0, 1.0) + + # Mutation, Crossover and Selection + for i in range(self.population_size): + if num_evals >= self.budget: + break + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b = np.random.choice(indices, 2, replace=False) + mutant = population[i] + Fs[i] * (population[a] - population[b]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Gradient correction + gradient_step = self.gradient_weight * (np.random.rand(self.dimension) * 2 - 1) + trial += gradient_step + + # Boundary correction + trial = np.clip(trial, self.lb, self.ub) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RAGEA.py b/nevergrad/optimization/lama/RAGEA.py new file mode 100644 index 000000000..ea7bef331 --- /dev/null +++ b/nevergrad/optimization/lama/RAGEA.py @@ -0,0 +1,63 @@ +import numpy as np + + +class RAGEA: + def __init__(self, budget, population_size=50, alpha=0.1, mutation_scaling=0.1, elite_fraction=0.2): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.alpha = alpha # Adaptive step scaling factor + self.mutation_scaling = mutation_scaling + self.elite_count = int(population_size * elite_fraction) + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[: self.elite_count]] + elite_fitnesses = fitness[sorted_indices[: self.elite_count]] + + new_population = np.zeros_like(population) + new_population[: self.elite_count] = elites + + # Generate new candidates + for i in range(self.elite_count, self.population_size): + parent_idx = np.random.choice(self.elite_count) + parent = elites[parent_idx] + + # Adaptive mutation based on elite performance + relative_fitness = (elite_fitnesses[parent_idx] - np.min(elite_fitnesses)) / ( + np.max(elite_fitnesses) - np.min(elite_fitnesses) + 1e-8 + ) + step_size = self.mutation_scaling * (1 - relative_fitness) + mutation = np.random.normal(0, max(step_size, 1e-8), self.dimension) + + # Adaptive crossover between best individual and elite + crossover_weight = np.random.rand() + child = (crossover_weight * best_individual) + ((1 - crossover_weight) * parent) + mutation + child = np.clip(child, self.lb, self.ub) # Ensure bounds are respected + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + num_evals += self.population_size - self.elite_count + + # Update the best individual if necessary + current_best_idx = np.argmin(fitness) + current_best_fitness = fitness[current_best_idx] + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[current_best_idx].copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RAHDEMI.py b/nevergrad/optimization/lama/RAHDEMI.py new file mode 100644 index 000000000..7bca71abe --- /dev/null +++ b/nevergrad/optimization/lama/RAHDEMI.py @@ -0,0 +1,87 @@ +import numpy as np + + +class RAHDEMI: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=100, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb = -5.0 + ub = 5.0 + dimension = 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((0, dimension)) + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions periodically + if evaluations % (self.budget // 20) == 0: + elite_idx = np.argsort(fitness)[: self.elite_size] + elite = population[elite_idx] + + for i in range(self.population_size): + # Adaptive mutation factor that changes dynamically + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/rand-to-best/1 strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = ( + best_solution if np.random.rand() < 0.8 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best - a + b - c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with the old good solutions + if memory.shape[0] < self.memory_size: + memory = np.vstack([memory, population[i]]) + elif np.random.rand() < 0.1: # Occasionally replace memory entries + memory[np.random.randint(0, self.memory_size)] = population[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RALES.py b/nevergrad/optimization/lama/RALES.py new file mode 100644 index 000000000..d3677ee68 --- /dev/null +++ b/nevergrad/optimization/lama/RALES.py @@ -0,0 +1,61 @@ +import numpy as np + + +class RALES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 # Increased population size for better exploration + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Evolution parameters + learning_rate = 0.05 # Reduced learning rate for more stable convergence + global_learning_rate = 0.25 # Increased global learning rate for stronger elite attraction + mutation_strength = 0.7 # Increased initial mutation strength for wider initial search + mutation_decay = 0.95 # Slower mutation decay to maintain exploration longer + elite_fraction = 0.2 # Increased elite fraction for a stronger focus on best solutions + elite_size = int(population_size * elite_fraction) + + while num_evals < self.budget: + elite_indices = np.argsort(fitness)[:elite_size] + global_mean = np.mean(population[elite_indices], axis=0) + + for i in range(population_size): + if num_evals >= self.budget: + break + + step = mutation_strength * np.random.randn(self.dimension) + if i in elite_indices: + # Elites undergo less mutation + step *= 0.5 + + individual = population[i] + step + individual = np.clip(individual, self.lower_bound, self.upper_bound) + + # Global pull move towards the mean of elites + individual = individual + global_learning_rate * (global_mean - individual) + individual_fitness = func(individual) + num_evals += 1 + + # Selection process + if individual_fitness < fitness[i]: + population[i] = individual + fitness[i] = individual_fitness + if individual_fitness < best_fitness: + best_fitness = individual_fitness + best_individual = individual.copy() + + # Decay the mutation strength + mutation_strength *= mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RAMDE.py b/nevergrad/optimization/lama/RAMDE.py new file mode 100644 index 000000000..5a432eca7 --- /dev/null +++ b/nevergrad/optimization/lama/RAMDE.py @@ -0,0 +1,91 @@ +import numpy as np + + +class RAMDE: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=50, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory initialization + memory = population[: self.memory_size].copy() + memory_fitness = fitness[: self.memory_size].copy() + + # Elite solutions tracking + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adjust mutation factor with decaying amplitude and sine function + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + for i in range(self.population_size): + # Select individuals for mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + + # Mutation: Incorporate both best and random elite solutions + best_or_elite = ( + best_solution if np.random.rand() < 0.75 else elite[np.random.randint(self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - b + c - a), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory if better + worst_memory_idx = np.argmax(memory_fitness) + if trial_fitness < memory_fitness[worst_memory_idx]: + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update elite if better + worst_elite_idx = np.argmax(elite_fitness) + if trial_fitness < elite_fitness[worst_elite_idx]: + elite[worst_elite_idx] = trial + elite_fitness[worst_elite_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RAMEDS.py b/nevergrad/optimization/lama/RAMEDS.py new file mode 100644 index 000000000..14bf1fd74 --- /dev/null +++ b/nevergrad/optimization/lama/RAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.6 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RAMEDSPlus.py b/nevergrad/optimization/lama/RAMEDSPlus.py new file mode 100644 index 000000000..dcd9993cb --- /dev/null +++ b/nevergrad/optimization/lama/RAMEDSPlus.py @@ -0,0 +1,87 @@ +import numpy as np + + +class RAMEDSPlus: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=30, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites based on fitness and diversity + sorted_indices = np.argsort(fitness) + elite_indices = sorted_indices[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive Mutation Scale based on fitness spread + F_spread = np.std(fitness) + F = self.F_max - (self.F_max - self.F_min) * (np.tanh(3 * F_spread)) + + # Mutation: DE/current-to-best/1 with adaptive mutation scale + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.7 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RAMEDSPro.py b/nevergrad/optimization/lama/RAMEDSPro.py new file mode 100644 index 000000000..72aef0ad1 --- /dev/null +++ b/nevergrad/optimization/lama/RAMEDSPro.py @@ -0,0 +1,89 @@ +import numpy as np + + +class RAMEDSPro: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites based on hybrid strategy + sorted_indices = np.argsort(fitness) + elite_indices = sorted_indices[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Dynamic Mutation Tuning + F_var = np.var(fitness) + F = self.F_max - (self.F_max - self.F_min) * np.cos( + np.pi * evaluations / self.budget + ) * np.exp(-F_var) + + # Mutation: DE/current-to-best/1 with dynamic mutation scale + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.5 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory incrementally + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Incremental Memory Update + memory_idx_update = np.random.randint(0, self.memory_size) + memory[memory_idx_update] = memory[memory_idx_update] * 0.9 + trial * 0.1 + memory_fitness[memory_idx_update] = min(memory_fitness[memory_idx_update], trial_fitness) + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RAMSDiffEvo.py b/nevergrad/optimization/lama/RAMSDiffEvo.py new file mode 100644 index 000000000..bb4f1c6d7 --- /dev/null +++ b/nevergrad/optimization/lama/RAMSDiffEvo.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RAMSDiffEvo: + def __init__(self, budget, population_size=100, F_base=0.8, CR_base=0.5, perturbation=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Further increased base mutation factor for enhanced exploration + self.CR_base = CR_base # Lowered crossover probability to avoid premature convergence + self.perturbation = perturbation # Reduced perturbation for more stable adaptive parameters + + def __call__(self, func): + # Initialize population and fitness evaluations + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + for i in range(self.population_size): + if num_evals >= self.budget: + break + + # Mutation with dynamic strategy adaptation based on previous improvements + strategy_type = np.random.choice( + ["best1bin", "rand1bin", "rand2best1bin", "current2rand1"], p=[0.4, 0.2, 0.3, 0.1] + ) + F = np.clip(self.F_base + self.perturbation * np.random.randn(), 0.1, 1.0) + CR = np.clip(self.CR_base + self.perturbation * np.random.randn(), 0.0, 1.0) + + idxs = [idx for idx in range(self.population_size) if idx != i] + chosen = np.random.choice(idxs, 3, replace=False) + a, b, c = population[chosen] + + if strategy_type == "best1bin": + mutant = best_individual + F * (b - c) + elif strategy_type == "rand1bin": + mutant = a + F * (b - c) + elif strategy_type == "rand2best1bin": + mutant = a + F * (best_individual - a) + F * (b - c) + else: # 'current2rand1' + mutant = population[i] + F * (a - population[i] + b - c) + + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RAPDE.py b/nevergrad/optimization/lama/RAPDE.py new file mode 100644 index 000000000..699a144c6 --- /dev/null +++ b/nevergrad/optimization/lama/RAPDE.py @@ -0,0 +1,60 @@ +import numpy as np + + +class RAPDE: + def __init__( + self, budget, population_size=100, F_base=0.8, CR_base=0.9, adapt_rate=0.1, precision_base=0.1 + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base scaling factor for differential evolution + self.CR_base = CR_base # Base crossover rate + self.adapt_rate = adapt_rate # Rate of adaptation for F and CR + self.precision_base = precision_base # Base precision adjustment factor + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main loop + while num_evals < self.budget: + # Adapt F, CR, and precision adaptively + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.0, 1.0) + precision = self.precision_base * (1 - np.exp(-num_evals / self.budget)) + + # Mutation, Crossover, and Selection + for i in range(self.population_size): + if num_evals >= self.budget: + break + + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = population[a] + Fs[i] * (population[b] - population[c]) * precision + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RASES.py b/nevergrad/optimization/lama/RASES.py new file mode 100644 index 000000000..baae5c2d0 --- /dev/null +++ b/nevergrad/optimization/lama/RASES.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RASES: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 50 + self.elite_size = 5 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, sigma): + mutation = np.random.normal(0, sigma, (self.population_size, self.dimension)) + return np.clip(population + mutation, self.bounds[0], self.bounds[1]) + + def crossover(self, parent1, parent2): + alpha = np.random.uniform(0.3, 0.7) + return alpha * parent1 + (1 - alpha) * parent2 + + def select_elite(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_size] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_fitness = np.min(fitness) + best_solution = population[np.argmin(fitness)] + + sigma = 0.5 # Initial standard deviation for mutation + + while evaluations < self.budget: + sigma *= 0.99 # Decay of mutation rate + if evaluations % (self.budget // 10) == 0: # Mutation burst + sigma = 0.5 + + new_population = self.mutate(population, sigma) + new_fitness = self.evaluate(new_population, func) + evaluations += self.population_size + + # Elite preservation and crossover + elite_population, elite_fitness = self.select_elite(population, fitness) + for i in range(self.population_size): + if np.random.rand() < 0.5: # 50% chance to crossover with elite + elite_partner = elite_population[np.random.randint(self.elite_size)] + new_population[i] = self.crossover(new_population[i], elite_partner) + + # Re-evaluate after crossover + fitness = self.evaluate(new_population, func) + + # Update best solution + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_solution = new_population[np.argmin(fitness)] + + population = new_population + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RAVDE.py b/nevergrad/optimization/lama/RAVDE.py new file mode 100644 index 000000000..418427b44 --- /dev/null +++ b/nevergrad/optimization/lama/RAVDE.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RAVDE: + def __init__(self, budget, population_size=150, F_base=0.6, CR_base=0.8, adapt_rate=0.05): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.adapt_rate = adapt_rate # Adaptation rate for parameters + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Main evolutionary loop + while num_evals < self.budget: + # Adaptive F and CR + Fs = np.clip(np.random.normal(self.F_base, self.adapt_rate, self.population_size), 0.1, 1.0) + CRs = np.clip(np.random.normal(self.CR_base, self.adapt_rate, self.population_size), 0.1, 1.0) + + for i in range(self.population_size): + # Mutation using "current-to-best/2" strategy + indices = [idx for idx in range(self.population_size) if idx != i] + a, b = np.random.choice(indices, 2, replace=False) + mutant = population[i] + Fs[i] * ( + best_individual - population[i] + population[a] - population[b] + ) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + trial = np.where(np.random.rand(self.dimension) < CRs[i], mutant, population[i]) + + # Selection + trial_fitness = func(trial) + num_evals += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + + if num_evals >= self.budget: + break + + # Dynamic update of F_base and CR_base based on performance + self.F_base = ( + np.mean(Fs[fitness < np.array([func(ind) for ind in population])]) + if np.any(fitness < np.array([func(ind) for ind in population])) + else self.F_base + ) + self.CR_base = ( + np.mean(CRs[fitness < np.array([func(ind) for ind in population])]) + if np.any(fitness < np.array([func(ind) for ind in population])) + else self.CR_base + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RDACE.py b/nevergrad/optimization/lama/RDACE.py new file mode 100644 index 000000000..0694bfa46 --- /dev/null +++ b/nevergrad/optimization/lama/RDACE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class RDACE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.initial_population_size = 100 + self.mutation_factor = 0.5 + self.crossover_rate = 0.7 # Adjusted for better exploration + + def initialize_population(self): + return np.random.uniform( + self.bounds[0], self.bounds[1], (self.initial_population_size, self.dimension) + ) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, best_idx): + mutants = np.empty_like(population) + for i in range(len(population)): + idxs = [idx for idx in range(len(population)) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.mutation_factor * (population[b] - population[c]) + mutants[i] = np.clip(mutant, self.bounds[0], self.bounds[1]) + return mutants + + def crossover(self, target, mutant): + mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(mask, mutant, target) + + def select(self, population, fitness, mutants, func): + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + for i in range(len(population)): + trial = self.crossover(population[i], mutants[i]) + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + return new_population, new_fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = len(population) + best_idx = np.argmin(fitness) + + while evaluations < self.budget: + if evaluations + len(population) > self.budget: + # Reduce population size to fit within budget + excess = evaluations + len(population) - self.budget + population = population[:-excess] + fitness = fitness[:-excess] + + mutants = self.mutate(population, best_idx) + population, fitness = self.select(population, fitness, mutants, func) + evaluations += len(population) + best_idx = np.argmin(fitness) + + best_individual = population[best_idx] + best_fitness = func(best_individual) + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RDSAS.py b/nevergrad/optimization/lama/RDSAS.py new file mode 100644 index 000000000..92b1888b4 --- /dev/null +++ b/nevergrad/optimization/lama/RDSAS.py @@ -0,0 +1,59 @@ +import numpy as np + + +class RDSAS: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + + def initialize(self): + population_size = 50 + population = np.random.uniform(self.bounds[0], self.bounds[1], (population_size, self.dimension)) + return population, population_size + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def local_search(self, individual, func, base_fitness): + # Safe scale computation with minimum boundary + scale = max(abs(base_fitness) * 0.01, 0.1) # Ensuring scale is never zero or negative + perturbation = np.random.normal(0, scale, self.dimension) + new_individual = individual + perturbation + new_individual = np.clip(new_individual, self.bounds[0], self.bounds[1]) + new_fitness = func(new_individual) + return new_individual if new_fitness < base_fitness else individual + + def __call__(self, func): + population, population_size = self.initialize() + best_fitness = np.Inf + best_individual = None + evaluations = 0 + + while evaluations < self.budget: + fitness = self.evaluate(population, func) + evaluations += population_size + + # Update global best + min_idx = np.argmin(fitness) + if fitness[min_idx] < best_fitness: + best_fitness = fitness[min_idx] + best_individual = population[min_idx].copy() + + # Local search with dynamic scale adjustment + for i in range(population_size): + if np.random.rand() < 0.1: # 10% chance to perform local search + population[i] = self.local_search(population[i], func, fitness[i]) + + # Enhanced synchronization based on performance stagnation + if evaluations % (100 + int(1000 * (best_fitness / (np.mean(fitness) + 1e-6)))) == 0: + population[np.random.randint(population_size)] = best_individual.copy() + + # Continuous diversification + if np.random.rand() < 0.02: # 2% chance for diversification + idx_to_diversify = np.random.randint(population_size) + population[idx_to_diversify] = np.random.uniform( + self.bounds[0], self.bounds[1], self.dimension + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/READEPMC.py b/nevergrad/optimization/lama/READEPMC.py new file mode 100644 index 000000000..ca9675c32 --- /dev/null +++ b/nevergrad/optimization/lama/READEPMC.py @@ -0,0 +1,73 @@ +import numpy as np + + +class READEPMC: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + F_base=0.6, + CR_base=0.9, + learning_rate=0.05, + p=0.2, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.F_base = F_base # Initial mutation factor + self.CR_base = CR_base # Initial crossover probability + self.learning_rate = learning_rate # Learning rate for adaptive parameters + self.p = p # Probability of using best individual updates + + def __call__(self, func): + # Initialize population and fitness + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_index = np.argmin(fitness) + best_individual = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mutation and crossover probabilities + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + if evaluations >= self.budget: + break + + # Choose different indices for mutation + indices = np.random.choice(self.population_size, 3, replace=False) + a, b, c = population[indices] + + # Prefer best solutions based on probability p + if np.random.rand() < self.p: + a = best_individual # Using best individual to guide mutation + + # Mutation and crossover + mutant = np.clip(a + F_adaptive[i] * (b - c), self.lower_bound, self.upper_bound) + trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection and adaptivity update + if trial_fitness < fitness[i]: + population[i], fitness[i] = trial, trial_fitness + F_adaptive[i] = max(0.1, F_adaptive[i] + self.learning_rate * (1.0 - F_adaptive[i])) + CR_adaptive[i] = min(1.0, CR_adaptive[i] - self.learning_rate * CR_adaptive[i]) + if trial_fitness < best_fitness: + best_fitness, best_individual = trial_fitness, trial.copy() + else: + F_adaptive[i] = max(0.1, F_adaptive[i] - self.learning_rate * F_adaptive[i]) + CR_adaptive[i] = min(1.0, CR_adaptive[i] + self.learning_rate * (1.0 - CR_adaptive[i])) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/REAMSEA.py b/nevergrad/optimization/lama/REAMSEA.py new file mode 100644 index 000000000..96aa7b7d6 --- /dev/null +++ b/nevergrad/optimization/lama/REAMSEA.py @@ -0,0 +1,76 @@ +import numpy as np + + +class REAMSEA: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.bounds = np.array([-5.0, 5.0]) + self.population_size = 100 + self.archive = [] + self.archive_size = 50 + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def evaluate(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutate(self, population, archive, best_index): + F = np.random.normal(0.5, 0.1) + new_population = np.empty_like(population) + combined = np.vstack((population, archive)) + for i in range(self.population_size): + idxs = np.random.choice(np.arange(len(combined)), 3, replace=False) + a, b, c = combined[idxs] + mutant_vector = a + F * (b - c) + new_population[i] = np.clip(mutant_vector, self.bounds[0], self.bounds[1]) + return new_population + + def crossover(self, target, mutant): + CR = 0.1 + 0.4 * np.random.rand() + mask = np.random.rand(self.dimension) < CR + return np.where(mask, mutant, target) + + def local_search(self, best_candidate, func): + step_size = 0.1 + local_best = best_candidate + local_best_fitness = func(best_candidate) + for _ in range(10): + candidate = local_best + np.random.uniform(-step_size, step_size, self.dimension) + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < local_best_fitness: + local_best = candidate + local_best_fitness = candidate_fitness + return local_best + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate(population, func) + evaluations = self.population_size + best_index = np.argmin(fitness) + + while evaluations < self.budget: + if len(self.archive) > self.archive_size: + self.archive.pop(0) + self.archive.append(population[best_index]) + + mutants = self.mutate(population, self.archive, best_index) + trials = np.array( + [self.crossover(population[i], mutants[i]) for i in range(self.population_size)] + ) + fitness_trials = self.evaluate(trials, func) + evaluations += len(trials) + + for i in range(self.population_size): + if fitness_trials[i] < fitness[i]: + population[i] = trials[i] + fitness[i] = fitness_trials[i] + + best_index = np.argmin(fitness) + + if evaluations % 100 == 0: + population[best_index] = self.local_search(population[best_index], func) + + return fitness[best_index], population[best_index] diff --git a/nevergrad/optimization/lama/RE_ADMMMS.py b/nevergrad/optimization/lama/RE_ADMMMS.py new file mode 100644 index 000000000..45ab8f464 --- /dev/null +++ b/nevergrad/optimization/lama/RE_ADMMMS.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RE_ADMMMS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_base=0.6, + F_amp=0.4, + memory_size=20, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population uniformly within bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Setup memory and elite storage + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Initialize the best solution tracker + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % (self.budget // 20) == 0: + # Update elite and memory + sorted_indices = np.argsort(fitness) + elite[:] = population[sorted_indices[: self.elite_size]] + elite_fitness[:] = fitness[sorted_indices[: self.elite_size]] + memory[:] = population[sorted_indices[: self.memory_size]] + memory_fitness[:] = fitness[sorted_indices[: self.memory_size]] + + for i in range(self.population_size): + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.85 else elite[np.random.randint(self.elite_size)] + ) + memory_contrib = memory[np.random.randint(self.memory_size)] + mutant = np.clip(a + F * (best_or_elite - b + memory_contrib), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RPWDE.py b/nevergrad/optimization/lama/RPWDE.py new file mode 100644 index 000000000..14c80f425 --- /dev/null +++ b/nevergrad/optimization/lama/RPWDE.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RPWDE: + def __init__(self, budget, population_size=50, crossover_rate=0.9, F_base=0.5, F_amp=0.4, elite_size=5): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base + self.F_amp = F_amp + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population within the bounds + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Elite solutions tracking + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices] + elite_fitness = fitness[elite_indices] + + for i in range(self.population_size): + # Adaptive mutation factor with progressive wave pattern + F = self.F_base + self.F_amp * np.sin(2 * np.pi * evaluations / self.budget) + + # Select mutation candidates + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + best_or_elite = elite[np.random.randint(0, self.elite_size)] + + # Mutation: A combination of best or elite with random individuals + mutant = np.clip(best_or_elite + F * (a - b), lb, ub) + + # Crossover: Binomial + cross_points = np.random.rand(dimension) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RankingDifferentialEvolution.py b/nevergrad/optimization/lama/RankingDifferentialEvolution.py new file mode 100644 index 000000000..cc2364e7e --- /dev/null +++ b/nevergrad/optimization/lama/RankingDifferentialEvolution.py @@ -0,0 +1,44 @@ +import numpy as np + + +class RankingDifferentialEvolution: + def __init__(self, budget=10000, population_size=30, f_weight=0.8, cr=0.9): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.f_weight = f_weight + self.cr = cr + + def initialize_population(self): + self.population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dim)) + + def generate_trial_vector(self, target_idx): + candidates = np.random.choice( + np.delete(np.arange(self.population_size), target_idx), size=3, replace=False + ) + a, b, c = self.population[candidates] + trial_vector = self.population[target_idx] + self.f_weight * (a - b) + + return np.clip(trial_vector, -5.0, 5.0) + + def update_population(self, func): + for i in range(self.population_size): + trial_vector = self.generate_trial_vector(i) + crossover_mask = np.random.uniform(0, 1, self.dim) < self.cr + new_vector = crossover_mask * trial_vector + (1 - crossover_mask) * self.population[i] + + new_value = func(new_vector) + + if new_value < func(self.population[i]): + self.population[i] = new_vector + + def __call__(self, func): + self.initialize_population() + + for _ in range(self.budget // self.population_size): + self.update_population(func) + + best_idx = np.argmin([func(ind) for ind in self.population]) + best_solution = self.population[best_idx] + + return func(best_solution), best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveClusteredDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveClusteredDifferentialEvolution.py new file mode 100644 index 000000000..b483d5b0d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveClusteredDifferentialEvolution.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats.qmc import Sobol + + +class RefinedAdaptiveClusteredDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem statement + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.memory_size = 20 + self.elite_size = 5 + self.memory = [] + self.elite = [] + self.mutation_strategies = [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2] + self.strategy_weights = np.ones(len(self.mutation_strategies)) + self._dynamic_parameters() + + def _initialize_population(self): + sobol_engine = Sobol(d=self.dim, scramble=False) + sobol_samples = sobol_engine.random_base2(m=int(np.log2(self.pop_size // 2))) + sobol_samples = self.lb + (self.ub - self.lb) * sobol_samples + + random_samples = np.random.uniform(self.lb, self.ub, (self.pop_size - len(sobol_samples), self.dim)) + return np.vstack((sobol_samples, random_samples)) + + def _local_search(self, x, func): + res = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim, options={"disp": False} + ) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.random.uniform(0.5, 1.0) + self.CR = np.random.uniform(0.4, 0.9) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _select_strategy(self): + return np.random.choice( + self.mutation_strategies, p=self.strategy_weights / self.strategy_weights.sum() + ) + + def _opposition_based_learning(self, population): + opp_population = self.lb + self.ub - population + return opp_population + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + else: # strategy == self._mutation_rand_2 + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + elite_indices = np.argsort(fitness)[: self.elite_size] + self.elite = [population[idx] for idx in elite_indices] + + if self.evaluations < self.budget: + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + self._dynamic_parameters() + + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + population = np.concatenate((population, opp_population), axis=0) + fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(fitness)[: self.pop_size] + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixAdaptation.py b/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixAdaptation.py new file mode 100644 index 000000000..fb565ee4a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixAdaptation.py @@ -0,0 +1,114 @@ +import numpy as np + + +class RefinedAdaptiveCovarianceMatrixAdaptation: + def __init__( + self, + budget, + population_size=50, + elite_fraction=0.2, + initial_sigma=0.3, + c_c=0.1, + c_s=0.3, + c_1=0.2, + c_mu=0.3, + damps=1.0, + ): + self.budget = budget + self.population_size = population_size + self.elite_fraction = elite_fraction + self.initial_sigma = initial_sigma + self.c_c = c_c # cumulation for C + self.c_s = c_s # cumulation for sigma control + self.c_1 = c_1 # learning rate for rank-one update + self.c_mu = c_mu # learning rate for rank-mu update + self.damps = damps # damping for step-size + + def __adaptive_covariance_matrix_adaptation(self, func, pop, mean, C, sigma, weights, pc, ps): + n_samples = self.population_size + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean, covariance matrix, and sigma + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + sigma = self.initial_sigma + + # Evolution path + pc = np.zeros(dim) + ps = np.zeros(dim) + chi_n = np.sqrt(dim) * (1.0 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim**2)) + + for iteration in range(max_iterations): + # Perform adaptive covariance matrix adaptation step + pop, scores = self.__adaptive_covariance_matrix_adaptation( + func, pop, mean, C, sigma, np.ones(self.population_size) / self.population_size, pc, ps + ) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean, covariance matrix, and sigma + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean_new = np.dot(np.ones(elite_count) / elite_count, elite_pop) + + ps = (1 - self.c_s) * ps + np.sqrt(self.c_s * (2 - self.c_s)) * (mean_new - mean) / sigma + hsig = ( + np.linalg.norm(ps) / np.sqrt(1 - (1 - self.c_s) ** (2 * evaluations / self.population_size)) + ) < (1.4 + 2 / (dim + 1)) + pc = (1 - self.c_c) * pc + hsig * np.sqrt(self.c_c * (2 - self.c_c)) * (mean_new - mean) / sigma + + artmp = (elite_pop - mean) / sigma + C = ( + (1 - self.c_1 - self.c_mu) * C + + self.c_1 * np.outer(pc, pc) + + self.c_mu * np.dot(artmp.T, artmp) / elite_count + ) + sigma *= np.exp((np.linalg.norm(ps) / chi_n - 1) * self.damps) + + mean = mean_new + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixEvolution.py new file mode 100644 index 000000000..222e902ba --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveCovarianceMatrixEvolution.py @@ -0,0 +1,96 @@ +import numpy as np + + +class RefinedAdaptiveCovarianceMatrixEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increase population size for better exploration + self.sigma = 0.3 # Decrease initial step size for more fine-tuned exploration + self.c1 = 0.02 # Learning rate for rank-one update + self.cmu = 0.03 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) # Damping factor for step size + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) # Number of parents for recombination + self.adaptive_learning_rate = 0.1 # Reduced learning rate for adaptive self-adaptive mutation + self.eval_count = 0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def dynamic_crossover(parent1, parent2): + alpha = np.random.uniform(0.25, 0.75, self.dim) # More balanced crossover + return alpha * parent1 + (1 - alpha) * parent2 + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + for i in range(self.population_size // 2): + parent1, parent2 = offspring[i], offspring[self.population_size // 2 + i] + offspring[i] = dynamic_crossover(parent1, parent2) + + new_fitness = np.array([func(ind) for ind in offspring]) + self.eval_count += self.population_size + + population = offspring + fitness = new_fitness + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedAdaptiveCovarianceMatrixEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveCrossoverElitistStrategyV7.py b/nevergrad/optimization/lama/RefinedAdaptiveCrossoverElitistStrategyV7.py new file mode 100644 index 000000000..2be25a939 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveCrossoverElitistStrategyV7.py @@ -0,0 +1,88 @@ +import numpy as np + + +class RefinedAdaptiveCrossoverElitistStrategyV7: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_fraction=0.2, + mutation_intensity=0.1, + crossover_rate=0.9, + adaptive_crossover_depth=0.8, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_crossover_depth = adaptive_crossover_depth + + def __call__(self, func): + # Initialize the population within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Perform adaptive crossover + parent1, parent2 = self.select_parents(elites, population, evaluations) + child = self.recombine(parent1, parent2, evaluations) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation intensity + scale = self.mutation_intensity * np.exp(-evaluations / self.budget * 10) + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2, evaluations): + # Adaptive recombination based on the stage of optimization + alpha = np.random.uniform(0.4, 0.6) + if evaluations < self.budget * self.adaptive_crossover_depth: + alpha *= np.exp(-evaluations / (self.budget * self.adaptive_crossover_depth)) + return alpha * parent1 + (1 - alpha) * parent2 + + def select_parents(self, elites, population, evaluations): + # Enhanced selection strategy based on optimization progress + if evaluations < self.budget * self.adaptive_crossover_depth: + parent1 = elites[np.random.choice(len(elites))] + parent2 = population[np.random.randint(0, self.population_size)] + else: + parent1 = elites[np.random.choice(len(elites))] + parent2 = elites[np.random.choice(len(elites))] + return parent1, parent2 diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..822bf8e31 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolution.py @@ -0,0 +1,160 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats.qmc import Sobol + + +class RefinedAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.memory_size = 20 + self.elite_size = 5 + self.memory = [] + self.elite = [] + self.mutation_strategies = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ] + self.strategy_weights = np.ones(len(self.mutation_strategies)) + self.strategy_success = np.zeros(len(self.mutation_strategies)) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.F = 0.5 # Initialize mutation factor + self.CR = 0.9 # Initialize crossover rate + + def _initialize_population(self): + sobol_engine = Sobol(d=self.dim, scramble=False) + sobol_samples = sobol_engine.random_base2(m=int(np.log2(self.pop_size // 2))) + sobol_samples = self.lb + (self.ub - self.lb) * sobol_samples + + random_samples = np.random.uniform(self.lb, self.ub, (self.pop_size - len(sobol_samples), self.dim)) + return np.vstack((sobol_samples, random_samples)) + + def _local_search(self, x, func): + res = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim, options={"disp": False} + ) + return res.x, res.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.0) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.4, 0.9) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + self.mutation_strategies, p=self.strategy_weights / self.strategy_weights.sum() + ) + + def _opposition_based_learning(self, population): + opp_population = self.lb + self.ub - population + return opp_population + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = self.mutation_strategies.index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if self.no_improvement_count >= 5: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + elite_indices = np.argsort(fitness)[: self.elite_size] + self.elite = [population[idx] for idx in elite_indices] + + if self.evaluations < self.budget: + if len(self.memory) < self.memory_size: + self.memory.append(self.x_opt) + else: + worst_mem_idx = np.argmax([func(mem) for mem in self.memory]) + self.memory[worst_mem_idx] = self.x_opt + + self._dynamic_parameters() + + if self.evaluations < self.budget: + opp_population = self._opposition_based_learning(population) + opp_fitness = np.array([func(ind) for ind in opp_population]) + self.evaluations += len(opp_population) + population = np.concatenate((population, opp_population), axis=0) + fitness = np.concatenate((fitness, opp_fitness), axis=0) + sorted_indices = np.argsort(fitness)[: self.pop_size] + population = population[sorted_indices] + fitness = fitness[sorted_indices] + + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionStrategy.py new file mode 100644 index 000000000..f76918038 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionStrategy.py @@ -0,0 +1,54 @@ +import numpy as np + + +class RefinedAdaptiveDifferentialEvolutionStrategy: + def __init__(self, budget, dim=5, pop_size=50, F=0.8, CR=0.9): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.F = F # Mutation factor + self.CR = CR # Crossover rate + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, population, idx): + indices = [i for i in range(self.pop_size) if i != idx] + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = np.clip( + population[a] + self.F * (population[b] - population[c]), self.bounds[0], self.bounds[1] + ) + return mutant + + def crossover(self, target, mutant): + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, f_values, trial, trial_f, trial_idx): + if trial_f < f_values[trial_idx]: + population[trial_idx] = trial + f_values[trial_idx] = trial_f + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + + while n_evals < self.budget: + for trial_idx in range(self.pop_size): + mutant = self.mutate(population, trial_idx) + trial = self.crossover(population[trial_idx], mutant) + trial_f = func(trial) + n_evals += 1 + self.select(population, f_values, trial, trial_f, trial_idx) + if n_evals >= self.budget: + break + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation.py new file mode 100644 index 000000000..c2f178391 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation.py @@ -0,0 +1,112 @@ +import numpy as np + + +class RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.3: + self.base_lr *= 1.1 + elif success_count / self.population_size < 0.1: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithGradientBoost.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithGradientBoost.py new file mode 100644 index 000000000..1407bdcb4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialEvolutionWithGradientBoost.py @@ -0,0 +1,112 @@ +import numpy as np + + +class RefinedAdaptiveDifferentialEvolutionWithGradientBoost: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + elif success_count / self.population_size < 0.1: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveDifferentialEvolutionWithGradientBoost(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSearch.py new file mode 100644 index 000000000..5d3d2068e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSearch.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RefinedAdaptiveDifferentialSearch: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.mutation_factor = 0.8 + self.crossover_rate = 0.7 + self.adaptive_factor = 0.05 # Smaller adaptive factor to fine-tune exploration + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, population, idx): + idxs = [i for i in range(self.population_size) if i != idx] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = np.clip( + population[a] + self.mutation_factor * (population[b] - population[c]), + self.bounds[0], + self.bounds[1], + ) + return mutant + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + return np.where(crossover_mask, mutant, target) + + def select(self, current, candidate, func): + if func(candidate) < func(current): + return candidate + else: + return current + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + population[i] = self.select(population[i], trial, func) + + # Update best solution found so far + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + if trial_fitness < fitness[best_idx]: + best_individual = trial + best_idx = i + + evaluations += 1 + if evaluations >= self.budget: + break + + return fitness[best_idx], best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSpiralSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSpiralSearch.py new file mode 100644 index 000000000..a44aa2afd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDifferentialSpiralSearch.py @@ -0,0 +1,63 @@ +import numpy as np + + +class RefinedAdaptiveDifferentialSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize a population around the search space + population_size = 100 # Increased population for better exploration + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Initialize spiral dynamics with adaptive parameters + radius = 5.0 + angle_increment = 2 * np.pi / population_size + evaluations_left = self.budget - population_size + radius_decay = 0.95 # Slower decay to maintain exploration longer + angle_speed_increase = 1.05 # Slightly more aggressive angular increment + + while evaluations_left > 0: + # Select indices for differential mutation using tournament selection + tournament_size = 3 + indices = np.random.choice(population_size, tournament_size, replace=False) + tournament_fitness = fitness[indices] + best_index = indices[np.argmin(tournament_fitness)] + rest_indices = np.delete(indices, np.argmin(tournament_fitness)) + + # Mutation and crossover using best of tournament and random others + a, b, c = population[best_index], population[rest_indices[0]], population[rest_indices[1]] + mutant = a + 0.9 * (b - c) # Increased weight for more aggressive mutations + mutant = np.clip(mutant, -5.0, 5.0) + + # Spiral movement on the mutant + angle = angle_increment * best_index + offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + candidate = mutant + offset + candidate = np.clip(candidate, -5.0, 5.0) + + # Evaluate candidate + f_candidate = func(candidate) + evaluations_left -= 1 + + # Selection based on better fitness + worst_index = np.argmax(fitness) + if f_candidate < fitness[worst_index]: + population[worst_index] = candidate + fitness[worst_index] = f_candidate + + # Update the optimal solution found + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = candidate + + # Adaptive update of spiral dynamics parameters + radius *= radius_decay + angle_increment *= angle_speed_increase + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDimensionalClimbingStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveDimensionalClimbingStrategy.py new file mode 100644 index 000000000..3d2d146da --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDimensionalClimbingStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedAdaptiveDimensionalClimbingStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + # Population settings + population_size = 200 + elite_size = 20 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.1 + adaptive_factor = 0.95 + recombination_prob = 0.7 + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + # Recombination of three parents for better exploration + parents_indices = np.random.choice(population_size, 3, replace=False) + parent1, parent2, parent3 = population[parents_indices] + child = (parent1 + parent2 + parent3) / 3 + else: + # Clone with a reduced mutation effect + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + # Adaptive mutation considering distance to global best + distance_to_best = np.linalg.norm(population[best_idx] - child) + individual_mutation_scale = mutation_scale * adaptive_factor ** (distance_to_best) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Incorporate elitism more effectively + if evaluations % 250 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in elite_indices: + population[idx] = elite_individuals[np.random.choice(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDimensionalCrossoverEvolver.py b/nevergrad/optimization/lama/RefinedAdaptiveDimensionalCrossoverEvolver.py new file mode 100644 index 000000000..13a2c9882 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDimensionalCrossoverEvolver.py @@ -0,0 +1,87 @@ +import numpy as np + + +class RefinedAdaptiveDimensionalCrossoverEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.2, + mutation_intensity=0.05, + crossover_rate=0.85, + momentum=0.1, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.momentum = momentum + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + crossover_mask = np.random.rand(self.dimension) < self.crossover_rate + child = np.where(crossover_mask, parent1, parent2) + return child + + def momentum_update(self, current, previous): + return current + self.momentum * (current - previous) + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + child = self.mutate(child) + if previous_population is not None: + child = self.momentum_update(child, previous_population[i]) + new_population[i] = child + + # Introduce the best of the previous generation to the new population + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDirectionalBiasQuorumOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveDirectionalBiasQuorumOptimization.py new file mode 100644 index 000000000..567e04ac1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDirectionalBiasQuorumOptimization.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedAdaptiveDirectionalBiasQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_scale=0.3, + momentum=0.9, + learning_rate=0.01, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(max(1, population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.learning_rate = learning_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Track best solution + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + # Optimization loop + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select elite indices including the best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Determine the local best + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Mutation and update strategy + direction = best_individual - local_best + random_noise = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = direction * random_noise + self.momentum * velocity + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution and velocity + if child_fitness < best_fitness: + velocity = self.learning_rate * (child - best_individual) + self.momentum * velocity + best_fitness = child_fitness + best_individual = child + + new_population[i, :] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adapt mutation scale and elite count dynamically + adaptive_ratio = np.random.uniform(-0.05, 0.05) + self.mutation_scale *= 1 + adaptive_ratio + self.elite_count = int(max(1, self.elite_count * (1 + adaptive_ratio))) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDivergenceClusteringSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveDivergenceClusteringSearch.py new file mode 100644 index 000000000..96634de11 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDivergenceClusteringSearch.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedAdaptiveDivergenceClusteringSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # given as per problem statement + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population_size = 20 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + + # Initialize tracking for adaptive mechanism + last_improvement = 0 + no_improvement_stretch = 0 + + iteration = 0 + while iteration < self.budget: + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + + if fitness[best_idx] < self.f_opt: + self.f_opt = fitness[best_idx] + self.x_opt = best_individual + last_improvement = iteration + + no_improvement_stretch = iteration - last_improvement + + # Calculate adaptive exploration rate + exploration_rate = 0.5 * np.exp(-no_improvement_stretch / 50) + + new_population = [] + for i in range(population_size): + if np.random.rand() < exploration_rate: + # Divergence logic + random_direction = np.random.normal(0, 1, self.dimension) + new_individual = best_individual + random_direction * np.random.rand() + else: + # Convergence logic + convergence_factor = np.random.uniform(0.1, 0.5) + new_individual = best_individual + convergence_factor * (population[i] - best_individual) + + # Local search mechanism + local_search_step = 0.1 * (self.upper_bound - self.lower_bound) + local_individual = new_individual + np.random.uniform( + -local_search_step, local_search_step, self.dimension + ) + local_individual = np.clip(local_individual, self.lower_bound, self.upper_bound) + + # Selection between new individual and local search result based on fitness + new_individual_fitness = func(new_individual) + local_individual_fitness = func(local_individual) + + if local_individual_fitness < new_individual_fitness: + new_population.append(local_individual) + new_fitness = local_individual_fitness + else: + new_population.append(new_individual) + new_fitness = new_individual_fitness + + # Update best found solution + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_population[-1] + + population = np.array(new_population) + fitness = np.array([func(individual) for individual in population]) + + iteration += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDiversityPSO.py b/nevergrad/optimization/lama/RefinedAdaptiveDiversityPSO.py new file mode 100644 index 000000000..989d4a012 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDiversityPSO.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedAdaptiveDiversityPSO: + def __init__( + self, + budget=10000, + population_size=120, + omega_start=0.95, + omega_end=0.35, + phi_p=0.12, + phi_g=0.12, + beta=0.25, + gamma=0.05, + ): + self.budget = budget + self.population_size = population_size + # Inertia weight decreases linearly from omega_start to omega_end + self.omega_start = omega_start + self.omega_end = omega_end + # Personal and global acceleration coefficients + self.phi_p = phi_p + self.phi_g = phi_g + # Diversity control parameter + self.beta = beta + # Global attraction factor to improve convergence on high conditioning functions + self.gamma = gamma + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + omega = self.omega_start - ((self.omega_start - self.omega_end) * evaluations / self.budget) + mean_position = np.mean(particles, axis=0) + diversity = np.mean(np.linalg.norm(particles - mean_position, axis=1)) + + for i in range(self.population_size): + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + r_b = np.random.random(self.dim) + r_c = np.random.random() + + # Update velocities with added global attraction term + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + + self.beta * r_b * (mean_position - particles[i]) + + self.gamma * r_c * (global_best - mean_position) + ) + + # Update positions + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate new solutions + current_score = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_score < personal_best_scores[i]: + personal_best[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best = particles[i] + global_best_score = current_score + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategy.py new file mode 100644 index 000000000..2e23de5a1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategy.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedAdaptiveDualPhaseStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[a] + self.F * (population[b] - population[c]) + else: + # Enhancing mutation strategy for phase 2 by using current best and another differential vector + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[best_idx] + self.F * ( + population[d] - population[e] + population[a] - population[b] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Fine-tuning of parameter adaptation to enhance exploration in the early stages and exploitation later + scale = iteration / total_iterations + self.F = np.clip(0.75 * np.sin(2 * np.pi * scale) + 0.75, 0.1, 1) + self.CR = np.clip(0.75 * np.cos(2 * np.pi * scale) + 0.75, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategyV3.py b/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategyV3.py new file mode 100644 index 000000000..ae4b57e85 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDualPhaseStrategyV3.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedAdaptiveDualPhaseStrategyV3: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Mutant vector calculation using best individual for faster convergence + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using two differential vectors for increased diversity + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Sigmoid adjustment for smoother transition + scale = iteration / total_iterations + self.F = np.clip(0.9 / (1 + np.exp(-10 * (scale - 0.5))) + 0.1, 0.1, 1) + self.CR = np.clip(0.9 / (1 + np.exp(10 * (scale - 0.5))) + 0.1, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicDE.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDE.py new file mode 100644 index 000000000..52e241c48 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class RefinedAdaptiveDynamicDE: + def __init__( + self, budget=10000, population_size=150, F_base=0.5, F_range=0.3, CR=0.9, strategy="dynamic" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity and adaptation + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation based on the learning phase + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive strategy for mutation: switches based on phase of optimization + if evaluations < self.budget * 0.5: + # Exploration phase: use random best from top 10% + best_samples = np.argsort(fitness)[: max(1, self.population_size // 10)] + base = population[np.random.choice(best_samples)] + else: + # Exploitation phase: focus more on the current best + base = population[best_idx] + + # Dynamic adjustment of F based on the optimization phase + F = self.F_base + (np.sin(evaluations / self.budget * np.pi) * self.F_range) + + # Mutation using DE/rand/1/bin strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV14.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV14.py new file mode 100644 index 000000000..e66d7dfd2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV14.py @@ -0,0 +1,87 @@ +import numpy as np + + +class RefinedAdaptiveDynamicDualPhaseStrategyV14: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + # Standard mutation strategy for phase 1 + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using more vectors + candidates = np.random.choice(idxs, 4, replace=False) + mutant = population[candidates[0]] + self.F * ( + population[candidates[1]] + - population[candidates[2]] + + 0.5 * (population[candidates[3]] - population[best_idx]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic parameter adjustment using a sigmoid-based function for a more aggressive middle-phase + scale = iteration / total_iterations + scale = 1 / ( + 1 + np.exp(-12 * (scale - 0.5)) + ) # Sharper sigmoid function for a more pronounced transition + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV17.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV17.py new file mode 100644 index 000000000..107f073eb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV17.py @@ -0,0 +1,88 @@ +import numpy as np + + +class RefinedAdaptiveDynamicDualPhaseStrategyV17: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + candidates = np.random.choice(idxs, 5, replace=False) + if phase == 1: + # Focusing more on the best individual and perturbations by two differences + mutant = ( + population[best_idx] + + self.F * (population[candidates[0]] - population[candidates[1]]) + + 0.5 * self.F * (population[candidates[2]] - population[candidates[3]]) + ) + else: + # Enhanced mutation strategy using more vectors for robust exploration + mutant = ( + population[best_idx] + + self.F * (population[candidates[0]] - population[candidates[1]]) + + self.F * (population[candidates[2]] - population[best_idx]) + + 0.5 * self.F * (population[candidates[3]] - population[candidates[4]]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adjusting parameters using a time-dependent adaptive strategy + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * scale, 0.1, 1) # Linearly increase mutation factor + self.CR = np.clip(0.9 - 0.4 * scale, 0.1, 1) # Linearly decrease crossover rate + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV20.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV20.py new file mode 100644 index 000000000..1b7aa7bd6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicDualPhaseStrategyV20.py @@ -0,0 +1,84 @@ +import numpy as np + + +class RefinedAdaptiveDynamicDualPhaseStrategyV20: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant, individual_CR): + crossover_mask = np.random.rand(self.dimension) < individual_CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + scale = 1 / (1 + np.exp(-10 * (scale - 0.5))) # Sigmoid function for smooth transition + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = 0.5 + 0.5 * np.cos(2 * np.pi * scale) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + C_individual = np.full(self.pop_size, self.CR) # Individual crossover probabilities + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant, C_individual[i]) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + C_individual[i] = max(0.1, C_individual[i] - 0.05) # Decrease CR if improvement + if trial_fitness < fitnesses[best_idx]: + best_idx = i + else: + C_individual[i] = min(0.9, C_individual[i] + 0.05) # Increase CR if no improvement + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicExplorationOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicExplorationOptimization.py new file mode 100644 index 000000000..4507c9cdc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicExplorationOptimization.py @@ -0,0 +1,166 @@ +import numpy as np + + +class RefinedAdaptiveDynamicExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 15 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.1 # Exploration factor to enhance exploration phase + max_exploration_cycles = 50 # Maximum exploration cycles + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedAdaptiveDynamicExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..dc66839f5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=50): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.5 + np.random.rand() * 0.3 # Narrowed mutation factor range + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.4 * ((iteration / max_iterations) ** 0.5) + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + return crossover_rate, learning_rate, memetic_probability + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveDynamicStrategyV25.py b/nevergrad/optimization/lama/RefinedAdaptiveDynamicStrategyV25.py new file mode 100644 index 000000000..fb34a08cb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveDynamicStrategyV25.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedAdaptiveDynamicStrategyV25: + def __init__( + self, budget, dimension=5, population_size=100, initial_F=0.5, initial_CR=0.9, switch_ratio=0.5 + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.initial_F = initial_F + self.initial_CR = initial_CR + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adapt_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + sigmoid = 1 / (1 + np.exp(-12 * (scale - 0.5))) + self.F = np.clip(self.initial_F + 0.4 * np.sin(np.pi * sigmoid), 0.1, 1) + self.CR = np.clip(self.initial_CR + 0.4 * np.cos(np.pi * sigmoid), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adapt_parameters(evaluations, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedDE.py b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedDE.py new file mode 100644 index 000000000..fdc3a62eb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedDE.py @@ -0,0 +1,122 @@ +import numpy as np + + +class RefinedAdaptiveEliteGuidedDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.restart_threshold = 0.01 + self.max_generations = int(self.budget / self.pop_size) + self.diversity_threshold = 0.1 + + def __call__(self, func): + def initialize_population(): + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + return pop, fitness + + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + pop, fitness = initialize_population() + self.budget -= self.pop_size + + generation = 0 + best_fitness_history = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / self.max_generations) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Track the best fitness value over generations + best_fitness_history.append(np.min(fitness)) + + # Check if a restart is needed + if len(best_fitness_history) > 10: + recent_improvement = np.abs(best_fitness_history[-10] - best_fitness_history[-1]) + if recent_improvement < self.restart_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + continue + + # Diversity check to avoid premature convergence + diversity = np.mean(np.std(pop, axis=0)) + if diversity < self.diversity_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE.py b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE.py new file mode 100644 index 000000000..b0b63d798 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE.py @@ -0,0 +1,117 @@ +import numpy as np + + +class RefinedAdaptiveEliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.archive = [] + self.local_search_rate = 0.3 + self.diversity_threshold = 1e-5 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Local Search on Elite Solutions + for idx in elite_indices: + if np.random.rand() < self.local_search_rate: + local_search_ind = pop[idx] + np.random.normal(0, 0.1, self.dim) + local_search_ind = np.clip(local_search_ind, lower_bound, upper_bound) + f_local = func(local_search_ind) + self.budget -= 1 + if f_local < fitness[idx]: + pop[idx] = local_search_ind + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = local_search_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + # Diversity preservation mechanism + diversity = np.mean(np.std(combined_pop, axis=0)) + if diversity < self.diversity_threshold: + combined_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + combined_fitness = np.array([func(ind) for ind in combined_pop]) + self.budget -= self.pop_size + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE_v5.py b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE_v5.py new file mode 100644 index 000000000..246107edc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEliteGuidedMutationDE_v5.py @@ -0,0 +1,105 @@ +import numpy as np + + +class RefinedAdaptiveEliteGuidedMutationDE_v5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation with enhanced crossover strategy + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + + if np.random.rand() < 0.5: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Mutation + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + # Enhanced crossover with additional elitist guidance + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + if np.random.rand() < 0.5: + trial = trial + np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveElitistDE_v4.py b/nevergrad/optimization/lama/RefinedAdaptiveElitistDE_v4.py new file mode 100644 index 000000000..ff539fa1d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveElitistDE_v4.py @@ -0,0 +1,126 @@ +import numpy as np + + +class RefinedAdaptiveElitistDE_v4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.local_search_prob = 0.3 + self.stagnation_threshold = 20 + self.stagnation_counter = 0 + self.best_fitness_history = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + new_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in new_pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, 0.01, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py new file mode 100644 index 000000000..9958f7a05 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch.py @@ -0,0 +1,111 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + local_search_rate=0.2, + local_search_budget=10, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.local_search_rate = local_search_rate + self.local_search_budget = local_search_budget + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k): + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + + def local_search(self, x, func): + res = minimize( + func, + x, + bounds=[(func.bounds.lb[i], func.bounds.ub[i]) for i in range(self.dim)], + options={"maxiter": self.local_search_budget}, + ) + return res.x + + def run_firework_algorithm(self, func): + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + if np.random.rand() < self.local_search_rate: + new_spark = self.local_search(new_spark, func) + + if func(new_spark) < func(x): + x = np.copy(new_spark) + self.update_parameters(i) + + self.fireworks[i] = (np.copy(x), 0) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.best_individual = x + self.best_fitness = func(self.best_individual) + + def adaptive_local_search(self, func): + improved = False + for i in range(self.population_size): + current_fitness = func(self.fireworks[i][0]) + new_individual = self.local_search(self.fireworks[i][0], func) + new_fitness = func(new_individual) + if new_fitness < current_fitness: + self.fireworks[i] = (np.copy(new_individual), 0) + improved = True + + return improved + + def __call__(self, func): + self.run_firework_algorithm(func) + + improved = self.adaptive_local_search(func) + if improved: + self.run_firework_algorithm(func) + + self.f_opt = self.best_fitness + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEnhancedGradientGuidedHybridPSO.py b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedGradientGuidedHybridPSO.py new file mode 100644 index 000000000..32fc23199 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedGradientGuidedHybridPSO.py @@ -0,0 +1,69 @@ +import numpy as np + + +class RefinedAdaptiveEnhancedGradientGuidedHybridPSO: + def __init__( + self, + budget=10000, + population_size=30, + initial_inertia=0.95, + final_inertia=0.3, + cognitive_weight=2.5, + social_weight=2.2, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.inertia_weight - self.evolution_rate, self.final_inertia + ) # Adaptive inertia weight + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + # Enhanced gradient-guided component with a dynamic adjustment factor + distance_factor = np.linalg.norm(global_best_position - particles[i]) + gradient_guided_component = ( + 0.1 * (global_best_position - particles[i]) / (1 + np.exp(-distance_factor)) + ) + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + gradient_guided_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2.py b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2.py new file mode 100644 index 000000000..730dbd094 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2.py @@ -0,0 +1,108 @@ +import numpy as np + + +class RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.5 + self.step_size = 0.1 + self.max_local_search_attempts = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.1, social_weight + 0.1 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 30 == 0: + for i in range(self.num_particles): + attempts = 0 + while attempts < self.max_local_search_attempts: + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + attempts += 1 + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..7ba5a1fbc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveEvolutionStrategy.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedAdaptiveEvolutionStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 # Adapted population size + mutation_rate = 0.1 # Adjusted mutation rate + mutation_scale = 0.3 # Increased mutation scale + crossover_rate = 0.7 # Adjusted crossover rate + elite_size = 10 # Reduced elite size + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Selection: Roulette Wheel Selection + fitness_scores = fitness.max() - fitness # Convert fitness to a maximization problem + if fitness_scores.sum() > 0: + probabilities = fitness_scores / fitness_scores.sum() + else: + probabilities = np.ones_like(fitness_scores) / len(fitness_scores) + selected_indices = np.random.choice( + np.arange(population_size), size=population_size - elite_size, p=probabilities, replace=True + ) + mating_pool = population[selected_indices] + + # Crossover: Uniform crossover + children = [] + for i in range(0, len(mating_pool) - 1, 2): + child1, child2 = np.copy(mating_pool[i]), np.copy(mating_pool[i + 1]) + for d in range(self.dim): + if np.random.rand() < crossover_rate: + child1[d], child2[d] = child2[d], child1[d] + children.append(child1) + children.append(child2) + + # Mutation + children = np.array(children) + mutation_mask = np.random.rand(children.shape[0], self.dim) < mutation_rate + mutations = np.random.normal(0, mutation_scale, children.shape) + children = np.clip(children + mutation_mask * mutations, self.lb, self.ub) + + # Evaluate new individuals + new_fitness = np.array([func(x) for x in children]) + evaluations += len(children) + + # Elitism and replacement + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + elite_fitness = fitness[elites_indices] + + combined_population = np.vstack([elites, children]) + combined_fitness = np.concatenate([elite_fitness, new_fitness]) + + # Select the next generation + sorted_indices = np.argsort(combined_fitness) + population = combined_population[sorted_indices[:population_size]] + fitness = combined_fitness[sorted_indices[:population_size]] + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveExplorationOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveExplorationOptimizer.py new file mode 100644 index 000000000..416f30e75 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveExplorationOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedAdaptiveExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Search space dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 300 # Increased population size for better exploration + mutation_base = 0.5 # Lower base mutation factor for finer grained exploration + mutation_adaptiveness = 0.1 # Adaptiveness factor for mutation control + crossover_base = 0.8 # Base crossover rate + elite_size = 30 # Increased elite size for better retention of solutions + + # Initialize population and evaluate + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Elite retention + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Adaptive mutation with noise reduction as evaluations increase + noise_reduction_factor = 1 - (evaluations / self.budget) + mutation_factor = ( + mutation_base + mutation_adaptiveness * (np.random.rand() - 0.5) * noise_reduction_factor + ) + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_base + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingOptimizerV5.py b/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingOptimizerV5.py new file mode 100644 index 000000000..a44c61c30 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingOptimizerV5.py @@ -0,0 +1,73 @@ +import numpy as np + + +class RefinedAdaptiveGlobalClimbingOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 120 # Adjusted population size for enhanced exploration + elite_size = 20 # Increased elite size for better quality solutions + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.25 # Increased mutation for stronger exploration + adaptive_factor = 0.9 # More aggressive scale down for mutation + recombination_prob = 0.75 # Slightly higher recombination for better exploration + + # Main evolutionary loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if evaluations % 250 == 0: # Increased check intervals + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size - elite_size): + if np.random.rand() < 0.2: # Increased regeneration rate + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingStrategy.py new file mode 100644 index 000000000..fb1a2fbee --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGlobalClimbingStrategy.py @@ -0,0 +1,89 @@ +import numpy as np + + +class RefinedAdaptiveGlobalClimbingStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 + elite_size = 50 + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Strategy parameters + mutation_scale = 0.1 + adaptive_factor = 0.90 + recombination_prob = 0.90 # Increase recombination probability + + # Enhancing exploration and exploitation + last_best_fitness = np.inf + + while evaluations < self.budget: + success_count = 0 + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + parents_indices = np.random.choice(population_size, 5, replace=False) # Use 5 parents + parents = population[parents_indices] + child = np.mean(parents, axis=0) # Mean recombination + else: + parent_idx = np.random.choice(population_size) + child = population[parent_idx].copy() + + # Adaptive mutation based on distance from the best + distance_to_best = np.linalg.norm(population[best_idx] - child) + individual_mutation_scale = mutation_scale * adaptive_factor ** (distance_to_best) + mutation = np.random.normal(0, individual_mutation_scale, self.dim) + child += mutation + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + success_count += 1 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + if fitness[current_best_idx] < last_best_fitness: + last_best_fitness = fitness[current_best_idx] + success_rate = success_count / population_size + adaptive_factor = max(0.75, adaptive_factor - 0.05 * success_rate) + mutation_scale = mutation_scale + 0.03 * (1 - success_rate) + + # Elite reinforcement with global polling + if evaluations % 300 == 0: + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size): + if idx not in elite_indices and np.random.rand() < 0.1: + population[idx] = elite_individuals[np.random.choice(elite_size)] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientCrossover.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientCrossover.py new file mode 100644 index 000000000..d82516d6b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientCrossover.py @@ -0,0 +1,89 @@ +import numpy as np + + +class RefinedAdaptiveGradientCrossover: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + mutation_rate = 0.1 + mutation_scale = 0.2 + crossover_rate = 0.85 + elite_size = 15 + + # Initialize population and calculate fitness + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Tournament Selection + tournament_size = 3 + parents = [] + for _ in range(population_size): + idx = np.random.choice(range(population_size), size=tournament_size, replace=False) + parents.append(population[idx[np.argmin(fitness[idx])]]) + parents = np.array(parents) + + # Crossover: Blend Crossover (BLX-alpha) + alpha = 0.5 + children = [] + for i in range(0, parents.shape[0], 2): + if i + 1 >= parents.shape[0]: + continue + p1, p2 = parents[i], parents[i + 1] + gamma = (1 + 2 * alpha) * np.random.random(self.dim) - alpha + child1 = gamma * p1 + (1 - gamma) * p2 + child2 = gamma * p2 + (1 - gamma) * p1 + children.extend([child1, child2]) + children = np.clip(children, self.lb, self.ub) + + # Mutation: Uniform mutation + for child in children: + if np.random.rand() < mutation_rate: + mutate_dims = np.random.randint(0, self.dim) + child[mutate_dims] += mutation_scale * np.random.randn() + + # Ensure all mutations are within bounds + children = np.clip(children, self.lb, self.ub) + + # Evaluate children + children_fitness = np.array([func(child) for child in children]) + evaluations += len(children) + + # Elitism and new population formation + combined_population = np.vstack([population, children]) + combined_fitness = np.concatenate([fitness, children_fitness]) + + # Select the best to form the next generation + elite_indices = np.argsort(combined_fitness)[:elite_size] + non_elite_indices = np.argsort(combined_fitness)[elite_size:population_size] + + population = np.vstack( + [ + combined_population[elite_indices], + combined_population[non_elite_indices][: population_size - elite_size], + ] + ) + fitness = np.concatenate( + [ + combined_fitness[elite_indices], + combined_fitness[non_elite_indices][: population_size - elite_size], + ] + ) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientDifferentialEvolution.py new file mode 100644 index 000000000..a45e1ccfb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientDifferentialEvolution.py @@ -0,0 +1,112 @@ +import numpy as np + + +class RefinedAdaptiveGradientDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adapt learning rate based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + else: + self.base_lr *= 0.95 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveGradientDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientEnhancedRAMEDS.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientEnhancedRAMEDS.py new file mode 100644 index 000000000..d5885c8e2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientEnhancedRAMEDS.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedAdaptiveGradientEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_base=0.5, + F_range=0.4, + memory_size=50, + elite_size=10, + alpha=0.1, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_base = F_base # Base level for mutation factor + self.F_range = F_range # Range for mutation factor adjustment + self.memory_size = memory_size + self.elite_size = elite_size + self.alpha = alpha # Smoothing factor for adaptive mutation adjustments + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + # Initialize adaptive mutation factor + F_current = self.F_base + + evaluations = self.population_size + while evaluations < self.budget: + # Calculate adaptive mutation factor + historical_fitness_improvements = fitness - np.roll(fitness, 1) + mean_improvement = np.mean(historical_fitness_improvements[1:]) # ignore the first incorrect diff + F_current = F_current * (1 - self.alpha) + self.alpha * ( + self.F_base + self.F_range * np.sign(mean_improvement) + ) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F_current * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update with better solutions + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientEvolverV2.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientEvolverV2.py new file mode 100644 index 000000000..6eaa2e655 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientEvolverV2.py @@ -0,0 +1,95 @@ +import numpy as np + + +class RefinedAdaptiveGradientEvolverV2: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + elite_fraction=0.1, + mutation_intensity=0.2, + crossover_probability=0.8, + gradient_step=0.1, + mutation_decay=0.99, + gradient_enhancement=True, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement = gradient_enhancement + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return parent1 if np.random.rand() < 0.5 else parent2 + + def adaptive_gradient(self, individual, func, best_individual): + if self.gradient_enhancement: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.linalg.norm(gradient_direction)) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = np.zeros_like(population) + for i in range(self.population_size): + if i < len(elites): + new_population[i] = self.adaptive_gradient(elites[i], func, best_individual) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population[i] = child + + fitness = self.evaluate_fitness(func, new_population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = new_population[min_idx] + + population = new_population + evaluations += self.population_size + + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientGuidedEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientGuidedEvolution.py new file mode 100644 index 000000000..c7ec64f92 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientGuidedEvolution.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedAdaptiveGradientGuidedEvolution: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + mutation_intensity=0.15, + gradient_sampling=15, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.mutation_intensity = mutation_intensity + self.gradient_sampling = gradient_sampling # Number of points to estimate gradient + self.sigma = 0.1 # Standard deviation for mutations, reduced for finer gradient approximations + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation operation with decreased sigma for more precise movements + return np.clip( + individual + np.random.normal(0, self.sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def approximate_gradient(self, individual, func): + # Enhanced gradient approximation by central difference method + gradients = [] + initial_fitness = func(individual) + for _ in range(self.gradient_sampling): + perturbation = np.random.normal(0, self.sigma, self.dimension) + forward = np.clip(individual + perturbation, self.bounds[0], self.bounds[1]) + backward = np.clip(individual - perturbation, self.bounds[0], self.bounds[1]) + forward_fitness = func(forward) + backward_fitness = func(backward) + gradient = ( + (forward_fitness - backward_fitness) + / (2 * np.linalg.norm(perturbation) + 1e-6) + * perturbation + ) + gradients.append(gradient) + return np.mean(gradients, axis=0) + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + gradient = self.approximate_gradient(population[i], func) + individual = ( + population[i] - self.mutation_intensity * gradient + ) # Adaptive gradient descent step + individual = self.mutate(individual) # Controlled mutation step + individual_fitness = func(individual) + evaluations += 1 + + if individual_fitness < fitness[i]: + population[i] = individual + fitness[i] = individual_fitness + + if individual_fitness < best_fitness: + best_individual = individual + best_fitness = individual_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGradientHybridOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveGradientHybridOptimizer.py new file mode 100644 index 000000000..1f7910183 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGradientHybridOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class RefinedAdaptiveGradientHybridOptimizer: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.5, + F_range=0.4, + CR=0.9, + elite_fraction=0.05, + mutation_strategy="adaptive", + crossover_strategy="exponential", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.crossover_strategy = crossover_strategy # Type of crossover 'binomial' or 'exponential' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Adaptive mutation using an elite individual + if self.mutation_strategy == "adaptive": + base = population[np.random.choice(elite_indices)] + else: + base = best_individual + + # Dynamic adjustment of F + F = self.F_base + np.random.normal(0, self.F_range) + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Exponential crossover + if self.crossover_strategy == "exponential": + trial = population[i].copy() + j = np.random.randint(self.dim) + L = 0 + while (np.random.rand() < self.CR) and (L < self.dim): + trial[j] = mutant[j] + j = (j + 1) % self.dim + L += 1 + else: # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveGuidedEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveGuidedEvolutionStrategy.py new file mode 100644 index 000000000..27d0bd200 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveGuidedEvolutionStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class RefinedAdaptiveGuidedEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + initial_step_size=0.5, + min_step_size=0.01, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.min_step_size = min_step_size + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual): + # Mutation with dynamic step size control + mutation = np.random.normal(0, self.step_size, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def update_step_size(self, generation): + # Exponential decay with a lower limit + decay_factor = 0.98 + self.step_size = max(self.min_step_size, self.step_size * decay_factor**generation) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness): + best_index = np.argmin(fitness) + return population[best_index], fitness[best_index] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual, best_fitness = self.select_best(population, fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + self.update_step_size(generation) + new_population = np.array([self.mutate(ind) for ind in population]) + new_fitness = self.evaluate_population(func, new_population) + + # Replace only if the new individual is better + for i in range(self.population_size): + if new_fitness[i] < fitness[i]: + population[i] = new_population[i] + fitness[i] = new_fitness[i] + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_individual = new_population[i] + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridDE.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridDE.py new file mode 100644 index 000000000..7ca50381b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridDE.py @@ -0,0 +1,64 @@ +import numpy as np + + +class RefinedAdaptiveHybridDE: + def __init__(self, budget=10000, population_size=100, F_base=0.8, CR_base=0.9, strategy_switch=0.1): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.strategy_switch = strategy_switch # Threshold to switch strategies + self.dim = 5 # Dimensionality fixed to 5 according to the problem statement + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly across the search space + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Initialize adaptive strategy components + F_adaptive = np.full(self.population_size, self.F_base) + CR_adaptive = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + best_idx = np.argmin(fitness) + for i in range(self.population_size): + # Adaptive strategy: update F and CR per individual + F_adaptive[i] = max(0.1, self.F_base * np.exp(-4.0 * evaluations / self.budget)) + CR_adaptive[i] = self.CR_base / (1 + np.exp(-10 * (evaluations / self.budget - 0.5))) + + # Select mutation strategy dynamically + if np.random.rand() < self.strategy_switch: + idxs = np.delete(np.arange(self.population_size), i) + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = np.clip( + population[c] + F_adaptive[i] * (population[a] - population[b]), self.lb, self.ub + ) + else: + a, b = np.random.choice(np.delete(np.arange(self.population_size), i), 2, replace=False) + mutant = np.clip( + population[i] + + F_adaptive[i] * (population[best_idx] - population[i]) + + F_adaptive[i] * (population[a] - population[b]), + self.lb, + self.ub, + ) + + # Crossover + trial = np.where(np.random.rand(self.dim) < CR_adaptive[i], mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < fitness[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + return fitness[best_idx], population[best_idx] diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridEvolutionStrategyV6.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridEvolutionStrategyV6.py new file mode 100644 index 000000000..8496836c7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridEvolutionStrategyV6.py @@ -0,0 +1,62 @@ +import numpy as np +import scipy.stats as stats + + +class RefinedAdaptiveHybridEvolutionStrategyV6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + mutation_rate = 0.15 # Slightly increased mutation rate + mutation_scale = lambda t: 0.1 * np.exp(-0.0001 * t) # Gradual decrease in mutation scale + crossover_rate = 0.9 # Adjusted crossover rate for balance + elite_size = int(0.2 * population_size) # Increased elite size + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + # Using tournament selection for parent selection + for _ in range(population_size - elite_size): + tournament = np.random.choice(population_size, 5, replace=False) + t1, t2 = np.argmin(fitness[tournament][:3]), np.argmin(fitness[tournament][3:]) + parent1, parent2 = population[tournament[t1]], population[tournament[t2]] + + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + new_population.append(child) + + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimization.py new file mode 100644 index 000000000..95c10f412 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class RefinedAdaptiveHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_min = 0.4 + w_max = 0.9 + w_decay = 0.995 + + # Differential Evolution parameters + F_base = 0.8 + CR_base = 0.9 + + # Gradient-based search parameters + alpha_base = 0.1 + beta_base = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = CR_base + adaptive_F = F_base + adaptive_alpha = alpha_base + adaptive_beta = beta_base + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = CR_base - 0.5 * (i / self.budget) + adaptive_F = F_base + 0.2 * (i / self.budget) + adaptive_alpha = alpha_base + 0.1 * (i / self.budget) + adaptive_beta = beta_base - 0.3 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.05 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(w_min, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedAdaptiveHybridOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..a9c022672 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridOptimizer.py @@ -0,0 +1,61 @@ +import numpy as np + + +class RefinedAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given dimensionality. + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 200 + mutation_factor = 0.8 # Increased initial mutation to explore more broadly + crossover_prob = 0.7 # Lowered crossover to prevent premature convergence + adaptive_factor = 0.01 # Smoother adaptation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + num_iterations = self.budget // population_size + + for iteration in range(num_iterations): + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + # Dynamic mutation factor adjustment + dynamic_mutation = mutation_factor + adaptive_factor * np.random.randn() + mutant = a + dynamic_mutation * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Dynamic crossover probability adjustment + dynamic_crossover = crossover_prob + adaptive_factor * np.random.randn() + trial = np.where(np.random.rand(self.dim) < dynamic_crossover, mutant, population[i]) + trial_fitness = func(trial) + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + + # Adaptive mutation and crossover adjustments + if best_value < np.mean(fitness): + mutation_factor += 2 * adaptive_factor # Faster increase in mutation factor + crossover_prob -= adaptive_factor / 2 # Slower decrease in crossover probability + else: + mutation_factor -= adaptive_factor / 2 # Slower decrease in mutation factor + crossover_prob += 2 * adaptive_factor # Faster increase in crossover probability + + mutation_factor = np.clip(mutation_factor, 0.1, 1.0) + crossover_prob = np.clip(crossover_prob, 0.1, 1.0) + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..f29c5120b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridParticleSwarmDifferentialEvolution.py @@ -0,0 +1,136 @@ +import numpy as np + + +class RefinedAdaptiveHybridParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 # Differential weight + self.initial_CR = 0.9 # Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.local_search_rate = 0.2 # Probability for local search + self.memory_size = 5 # Memory size for self-adaptation + self.w = 0.7 # Inertia weight for PSO + self.c1 = 1.5 # Cognitive coefficient for PSO + self.c2 = 2.0 # Social coefficient for PSO + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal bests + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + # Simple local search strategy + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with memory + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.6, 1.0), np.clip(adaptive_CR, 0.6, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + # Update memory + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # PSO update for non-elite particles + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + # Update personal bests + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + # Update global best + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + # Update population and fitness + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedAdaptiveHybridParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridQuasiRandomGradientDE.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridQuasiRandomGradientDE.py new file mode 100644 index 000000000..d72dcab0e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridQuasiRandomGradientDE.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.stats import qmc + + +class RefinedAdaptiveHybridQuasiRandomGradientDE: + def __init__(self, budget, population_size=30, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j])) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveHybridQuasiRandomGradientDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveHybridSwarmEvolutionOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveHybridSwarmEvolutionOptimization.py new file mode 100644 index 000000000..f3c6a0f57 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveHybridSwarmEvolutionOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class RefinedAdaptiveHybridSwarmEvolutionOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Increased population size for better exploration + self.initial_F = 0.7 # Slightly lower mutation factor + self.initial_CR = 0.9 # Higher crossover rate for better diversity + self.elite_rate = 0.05 # Reduced elite rate to balance exploration + self.local_search_rate = 0.2 # Increased local search rate for better exploitation + self.memory_size = 25 # Larger memory size for better parameter adaptation + self.w = 0.4 # Lower inertia weight for better convergence + self.c1 = 1.7 # Increased cognitive component + self.c2 = 1.3 # Decreased social component + self.phase_switch_ratio = 0.5 # More balanced budget allocation between phases + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Slightly larger local search step + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedAdaptiveHybridSwarmEvolutionOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveIncrementalCrossover.py b/nevergrad/optimization/lama/RefinedAdaptiveIncrementalCrossover.py new file mode 100644 index 000000000..39ef1b203 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveIncrementalCrossover.py @@ -0,0 +1,71 @@ +import numpy as np + + +class RefinedAdaptiveIncrementalCrossover: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 150 + self.elite_size = 30 + self.offspring_size = 120 + self.mutation_scale = 0.03 + self.crossover_rate = 0.7 + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_survivors(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover(self, parents): + num_parents = len(parents) + offspring = np.empty((self.offspring_size, self.dim)) + for i in range(self.offspring_size): + if np.random.rand() < self.crossover_rate: + p1, p2 = np.random.choice(num_parents, 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i, :cross_point] = parents[p1, :cross_point] + offspring[i, cross_point:] = parents[p2, cross_point:] + else: + offspring[i, :] = parents[np.random.randint(num_parents)] + return offspring + + def mutate(self, population): + perturbation = np.random.normal(0, self.mutation_scale, size=population.shape) + mutated = np.clip(population + perturbation, self.lower_bound, self.upper_bound) + return mutated + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_survivors(population, fitness) + + offspring = self.crossover(elite_population) + offspring = self.mutate(offspring) + + elite_population[0] = best_solution.copy() + + population = np.vstack((elite_population, offspring)) + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveIslandEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedAdaptiveIslandEvolutionStrategy.py new file mode 100644 index 000000000..adca37b7e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveIslandEvolutionStrategy.py @@ -0,0 +1,94 @@ +import numpy as np + + +class RefinedAdaptiveIslandEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=5, + population_per_island=20, + migration_rate=0.1, + mutation_intensity=0.5, + mutation_decay=0.98, + elite_ratio=0.1, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + # Fill the rest of the island population + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + np.random.shuffle(population) + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveMemeticDifferentialEvolution.py new file mode 100644 index 000000000..177ade889 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMemeticDifferentialEvolution.py @@ -0,0 +1,167 @@ +import numpy as np + + +class RefinedAdaptiveMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.F = 0.8 + self.CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.F] * self.memory_size + self.memory_CR = [self.CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + self.min_pop_size = 30 + self.max_pop_size = 100 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate_rand_1(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def mutate_best_1(self, best, target, parent, F): + return np.clip(target + F * (best - target) + F * (parent - target), -5.0, 5.0) + + def mutate_current_to_best_1(self, best, current, parent1, parent2, F): + return np.clip(current + F * (best - current) + F * (parent1 - parent2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(10): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self): + return np.random.randint(self.min_pop_size, self.max_pop_size + 1) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + + strategy = np.random.choice(["rand_1", "best_1", "curr_to_best_1"]) + if strategy == "rand_1": + mutant = self.mutate_rand_1(parent1, parent2, parent3, F) + elif strategy == "best_1": + mutant = self.mutate_best_1(global_best_position, population[i], parent1, F) + else: + mutant = self.mutate_current_to_best_1( + global_best_position, population[i], parent1, parent2, F + ) + + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size() + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMemeticDiverseOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveMemeticDiverseOptimizer.py new file mode 100644 index 000000000..a632c6893 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMemeticDiverseOptimizer.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdaptiveMemeticDiverseOptimizer: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=15): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedSearch.py new file mode 100644 index 000000000..5fb8801d6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedSearch.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedAdaptiveMemoryEnhancedSearch: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite structures + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track the best solution and its fitness + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Dynamic mutation factor using sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + + # Mutation: DE/rand-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(population[i] + F * (best_solution - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Attempt to find a better solution in memory or update it + if trial_fitness < np.max(memory_fitness): + worst_idx = np.argmax(memory_fitness) + memory[worst_idx] = trial.copy() + memory_fitness[worst_idx] = trial_fitness + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedStrategyV55.py b/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedStrategyV55.py new file mode 100644 index 000000000..e6e42ba8f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMemoryEnhancedStrategyV55.py @@ -0,0 +1,73 @@ +import numpy as np + + +class RefinedAdaptiveMemoryEnhancedStrategyV55: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=10): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + # Dynamically adjust the scale factor based on progress + scale_factor = self.F * (0.5 + 0.5 * np.sin(2 * np.pi * (len(self.memory) / self.memory_size))) + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + mutant = population[a] + scale_factor * (population[b] - population[c]) + + if self.memory: + # Apply differential memory effect + memory_effect = np.mean(self.memory, axis=0) + mutant += 0.1 * memory_effect # Scaled memory influence + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + return trial, f_trial + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMemoryStrategyV67.py b/nevergrad/optimization/lama/RefinedAdaptiveMemoryStrategyV67.py new file mode 100644 index 000000000..75ec8370e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMemoryStrategyV67.py @@ -0,0 +1,93 @@ +import numpy as np + + +class RefinedAdaptiveMemoryStrategyV67: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover rate + self.switch_ratio = switch_ratio + self.memory_size = memory_size + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Use memory to guide mutation in phase 2 + memory_effect = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory.pop(0) + self.memory.append(trial - target) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + # Gradual and non-linear parameter adaptation + self.F = 0.5 + 0.5 * np.sin(np.pi * scale) + self.CR = 0.5 + 0.5 * np.cos(np.pi * scale) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + total_iterations = self.budget // self.pop_size + switch_point = int(self.switch_ratio * total_iterations) + + for iteration in range(total_iterations): + phase = 1 if iteration < switch_point else 2 + self.adjust_parameters(iteration, total_iterations) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..3a184b6eb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMultiOperatorSearch.py @@ -0,0 +1,146 @@ +import numpy as np + + +class RefinedAdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w_max = 0.9 # Max inertia weight + w_min = 0.4 # Min inertia weight + w_decay = (w_max - w_min) / self.budget + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + w = max(w_min, w_max - w_decay * i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedAdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE.py b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE.py new file mode 100644 index 000000000..91601c4d7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE.py @@ -0,0 +1,162 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdaptiveMultiStrategyDE: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.4 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 + self.stagnation_threshold = 10 + self.restart_threshold = 20 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = np.inf + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + # Restart population if stagnation or budget threshold reached + if stagnation_count >= self.stagnation_threshold or self.budget < self.restart_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation and crossover factors + success_rate = max(0, (self.budget - self.pop_size * generation) / self.budget) + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) * success_rate + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) * success_rate + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + # Enhanced selection strategy + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Hybrid mutation strategy based on success rate + if success_rate < 0.3: + mutant = x1 + mutation_factor * (x2 - x3) + elif success_rate < 0.6: + mutant = x1 + mutation_factor * (x2 - pop[np.random.randint(self.pop_size)]) + else: + mutant = x1 + mutation_factor * (elite_pop[np.random.randint(elite_count)] - x3) + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism with diversity preservation + self.update_archive(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = np.array(self.archive[archive_idx]) + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + if np.random.rand() < 0.5: + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + else: + # Gradient-based adjustment + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) + result = minimize(func, best_x + perturbation, method="BFGS", options={"maxiter": 10}) + + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x + + def update_archive(self, new_pop): + unique_archive = [] + for ind in new_pop: + if not any(np.array_equal(ind, arch) for arch in self.archive): + unique_archive.append(ind) + self.archive.extend(unique_archive) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE_v2.py b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE_v2.py new file mode 100644 index 000000000..1cd49886f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDE_v2.py @@ -0,0 +1,140 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdaptiveMultiStrategyDE_v2: + def __init__(self, budget=10000, dim=5): + self.budget = budget + self.dim = dim + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + self.stagnation_threshold = 10 + self.max_archive_size = 100 # Limit archive size to maintain diversity + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + stagnation_count = 0 + + while self.budget > 0: + # Check for stagnation + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + else: + stagnation_count = 0 + + if stagnation_count >= self.stagnation_threshold: + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + stagnation_count = 0 + + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.max_archive_size: + self.archive = self.archive[-self.max_archive_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + stagnation_count += 1 + + last_best_fitness = self.f_opt + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Gradient-based adjustment + result = minimize(func, best_x, method="BFGS", options={"maxiter": 20}) + self.budget -= result.nfev + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + return best_x diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..270fab3be --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolution.py @@ -0,0 +1,135 @@ +import numpy as np + + +class RefinedAdaptiveMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolutionV2.py b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolutionV2.py new file mode 100644 index 000000000..0437afa8d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveMultiStrategyDifferentialEvolutionV2.py @@ -0,0 +1,154 @@ +import numpy as np + + +class RefinedAdaptiveMultiStrategyDifferentialEvolutionV2: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + def differential_mutation(a, b, c): + return a + self.mutation_factor * (b - c) + + def gaussian_mutation(x): + return x + np.random.randn(self.dim) * 0.1 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(differential_mutation(a, b, c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + # Apply Gaussian mutation randomly to maintain diversity + if np.random.rand() < 0.1: + mutated_individual = gaussian_mutation(population[j]) + mutated_individual = np.clip(mutated_individual, self.bounds[0], self.bounds[1]) + mutated_fitness = func(mutated_individual) + evaluations += 1 + if mutated_fitness < fitness[j]: + population[j] = mutated_individual + fitness[j] = mutated_fitness + if mutated_fitness < self.f_opt: + self.f_opt = mutated_fitness + self.x_opt = mutated_individual + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveMultiStrategyDifferentialEvolutionV2(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveParameterStrategyV38.py b/nevergrad/optimization/lama/RefinedAdaptiveParameterStrategyV38.py new file mode 100644 index 000000000..5943902fa --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveParameterStrategyV38.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedAdaptiveParameterStrategyV38: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + # Adaptive mutation strategy with self-adjusting F + mutant = population[best_idx] + self.F * ( + population[a] - population[b] + population[c] - population[best_idx] + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adjustment of F and CR using nonlinear scaling + scale = iteration / total_iterations + self.F = 0.5 * (1 + np.sin(np.pi * scale - np.pi / 2)) # Sinusoidal adjustment for F + self.CR = 0.9 - 0.4 * np.sin(np.pi * scale) # Sinusoidal adjustment for CR + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py b/nevergrad/optimization/lama/RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py new file mode 100644 index 000000000..72aaea5ae --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch.py @@ -0,0 +1,157 @@ +import numpy as np + + +class RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-5 + self.learning_rate = 0.1 + + # For adaptive population sizing + self.min_pop_size = 20 + self.max_pop_size = 70 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + # Use memory to adapt parameters F and CR + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(5): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self, current_pop_size): + new_pop_size = np.random.randint(self.min_pop_size, self.max_pop_size + 1) + return new_pop_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size( + self.initial_pop_size + ) # Adapt population size here + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size # Update population size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py b/nevergrad/optimization/lama/RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py new file mode 100644 index 000000000..ac0170140 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch.py @@ -0,0 +1,157 @@ +import numpy as np + + +class RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.elitism_rate = 0.2 + self.memory_size = 5 + self.memory_F = [self.initial_F] * self.memory_size + self.memory_CR = [self.initial_CR] * self.memory_size + self.memory_index = 0 + self.diversity_threshold = 1e-4 + self.learning_rate = 0.2 + + # For adaptive population sizing + self.min_pop_size = 30 + self.max_pop_size = 100 + + def initialize_population(self, bounds, pop_size): + return np.random.uniform(bounds.lb, bounds.ub, (pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(len(population)), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self): + # Use memory to adapt parameters F and CR + F = np.random.choice(self.memory_F) + CR = np.random.choice(self.memory_CR) + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(10): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def elite_learning(self, elite, global_best): + return np.clip( + elite + self.learning_rate * np.random.randn(self.dim) * (global_best - elite), -5.0, 5.0 + ) + + def restart_population(self, bounds, pop_size): + return self.initialize_population(bounds, pop_size) + + def adaptive_population_size(self, current_pop_size): + new_pop_size = np.random.randint(self.min_pop_size, self.max_pop_size + 1) + return new_pop_size + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds, self.initial_pop_size) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + while evaluations < self.budget: + new_population = np.zeros((self.initial_pop_size, self.dim)) + fitness = np.zeros(self.initial_pop_size) + + for i in range(self.initial_pop_size): + if evaluations >= self.budget: + break + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.initial_pop_size)] + for i in elite_indices: + if evaluations >= self.budget: + break + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += 5 + + elite_population = new_population[elite_indices] + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.initial_pop_size) :] + for i in non_elite_indices: + if evaluations >= self.budget: + break + learned_trial = self.elite_learning(new_population[i], global_best_position) + learned_fitness = func(learned_trial) + evaluations += 1 + + if learned_fitness < fitness[i]: + new_population[i] = learned_trial + fitness[i] = learned_fitness + if learned_fitness < self.f_opt: + self.f_opt = learned_fitness + self.x_opt = learned_trial + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_pop_size = self.adaptive_population_size( + self.initial_pop_size + ) # Adapt population size here + new_population = self.restart_population(bounds, new_pop_size) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += new_pop_size + self.initial_pop_size = new_pop_size # Update population size + + population = np.copy(new_population) + + # Update memory + self.memory_F[self.memory_index] = F + self.memory_CR[self.memory_index] = CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionBalanceStrategy.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionBalanceStrategy.py new file mode 100644 index 000000000..d372181d7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionBalanceStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class RefinedAdaptivePrecisionBalanceStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + population_size = 100 + elite_size = int(0.2 * population_size) + mutation_rate = 0.6 + mutation_scale = 0.1 + crossover_rate = 0.7 + + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + new_population = [] + + # Select elites to carry over to next generation + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + + # Generate the rest of the new population + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:cross_point], parent2[cross_point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + new_population.append(child) + + new_population = np.array(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + # Combine new population with elites + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elites_indices], new_fitness]) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt + + +# Example usage: +# optimizer = RefinedAdaptivePrecisionBalanceStrategy(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV4.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV4.py new file mode 100644 index 000000000..5d59c50c9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV4.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RefinedAdaptivePrecisionCohortOptimizationV4: + def __init__(self, budget, dimension=5, population_size=200, elite_fraction=0.2, mutation_intensity=0.8): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Base intensity for mutation + + def __call__(self, func): + # Initialize the population uniformly within the search space [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select the elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population members + for i in range(self.population_size): + if np.random.rand() < self.adaptive_mutation_rate(evaluations): + # Mutation: pick a random elite, apply Gaussian noise + parent_idx = np.random.choice(elite_indices) + mutation = np.random.normal(0, self.adaptive_mutation_scale(evaluations), self.dimension) + child = np.clip(population[parent_idx] + mutation, -5.0, 5.0) + else: + # Crossover: pick two different elites, combine their features + parents = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + (population[parents[0], :crossover_point], population[parents[1], crossover_point:]) + ) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def adaptive_mutation_rate(self, evaluations): + # Gradually decrease mutation rate to allow more exploration initially + return max(0.1, 1 - (evaluations / self.budget)) + + def adaptive_mutation_scale(self, evaluations): + # Decay mutation scale logarithmically to fine-tune search in later stages + return self.mutation_intensity * (1 / (1 + np.log(1 + evaluations))) diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV6.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV6.py new file mode 100644 index 000000000..e2a488f15 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionCohortOptimizationV6.py @@ -0,0 +1,71 @@ +import numpy as np + + +class RefinedAdaptivePrecisionCohortOptimizationV6: + def __init__(self, budget, dimension=5, population_size=300, elite_fraction=0.2, mutation_intensity=1.2): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial intensity for mutation + + def __call__(self, func): + # Initialize the population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + + # Select elites based on current fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new candidates + for i in range(self.population_size): + if np.random.rand() < self.adaptive_mutation_rate(evaluations): + # Mutation: random elite perturbation + parent_idx = np.random.choice(elite_indices) + mutation = np.random.normal(0, self.adaptive_mutation_scale(evaluations), self.dimension) + child = np.clip(population[parent_idx] + mutation, -5.0, 5.0) + else: + # Crossover and mutation combined + parents = np.random.choice(elite_indices, 2, replace=False) + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate( + (population[parents[0], :crossover_point], population[parents[1], crossover_point:]) + ) + mutation = np.random.normal( + 0, self.adaptive_mutation_scale(evaluations) * 0.5, self.dimension + ) + child = np.clip(child + mutation, -5.0, 5.0) + + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + # Update the population + population = new_population + fitness = np.array([func(x) for x in population]) + + return best_fitness, best_individual + + def adaptive_mutation_rate(self, evaluations): + # Gradually reduce mutation rate for balanced exploration and exploitation + return max(0.1, 1 - (evaluations / self.budget * 1.2)) + + def adaptive_mutation_scale(self, evaluations): + # Gradual decay of mutation scale to refine search as evaluations progress + return self.mutation_intensity * (1 / (1 + np.log(1 + 0.5 * evaluations))) diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionDifferentialEvolution.py new file mode 100644 index 000000000..796b38881 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionDifferentialEvolution.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RefinedAdaptivePrecisionDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # The given dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = ( + 100 # Adjusted population size for balance between performance and computational cost + ) + mutation_factor = 0.9 # Initial high mutation factor to promote diverse exploration + crossover_prob = 0.85 # Starting crossover probability + adaptive_threshold = 0.2 # Modified threshold for adapting mutation and crossover + + # Initialize population randomly + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + # Main optimization loop + for generation in range(self.budget // population_size): + # Adapt mutation factor and crossover probability based on progress and fitness improvements + progress = generation / (self.budget // population_size) + if progress > adaptive_threshold and progress < 0.5: + mutation_factor *= 0.98 # Gradually decrease mutation factor + crossover_prob *= 0.99 # Gradually decrease crossover probability + elif progress >= 0.5: + mutation_factor *= 0.97 # Further decrease mutation factor for fine-tuning + crossover_prob *= 0.97 # Further decrease for stable convergence + + for i in range(population_size): + # Mutation using "current-to-best/1/bin" strategy for faster convergence on promising solutions + indices = [j for j in range(population_size) if j != i] + a, b = population[np.random.choice(indices, 2, replace=False)] + best = population[best_idx] + mutant = population[i] + mutation_factor * (best - population[i]) + mutation_factor * (a - b) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover - Binomial + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update the best solution found + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionDivideSearch.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionDivideSearch.py new file mode 100644 index 000000000..e419c534c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionDivideSearch.py @@ -0,0 +1,55 @@ +import numpy as np + + +class RefinedAdaptivePrecisionDivideSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize center point + center_point = np.random.uniform(-5.0, 5.0, self.dim) + center_f = func(center_point) + if center_f < self.f_opt: + self.f_opt = center_f + self.x_opt = center_point + + # Division strategy parameters + num_divisions = 5 # Reduce the number of initial divisions to focus more on refinement + division_size = 10.0 / num_divisions + refine_factor = 0.75 # Increase refinement factor for more aggressive focusing + initial_exploration_steps = max( + 1, self.budget // (num_divisions**self.dim) // 2 + ) # Balance between initial exploration and refinement + + # Generate a grid around the center point and explore each grid division + grid_offsets = np.linspace(-5.0, 5.0, num_divisions) + for offset_dims in np.ndindex(*(num_divisions,) * self.dim): + local_center = center_point + np.array([grid_offsets[dim] for dim in offset_dims]) + local_center = np.clip(local_center, -5.0, 5.0) # Ensure it is within bounds + local_scale = division_size / 2 + + # Local search within the grid division + for _ in range(initial_exploration_steps): + candidate = local_center + np.random.uniform(-local_scale, local_scale, self.dim) + candidate_f = func(candidate) + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Refinement phase + refinement_budget = (self.budget - initial_exploration_steps * num_divisions**self.dim) // ( + num_divisions**self.dim + ) + for _ in range(refinement_budget): + refined_scale = local_scale * refine_factor + refined_candidate = self.x_opt + np.random.uniform(-refined_scale, refined_scale, self.dim) + refined_candidate_f = func(refined_candidate) + if refined_candidate_f < self.f_opt: + self.f_opt = refined_candidate_f + self.x_opt = refined_candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionEvolutionStrategy.py new file mode 100644 index 000000000..5e310cb80 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionEvolutionStrategy.py @@ -0,0 +1,83 @@ +import numpy as np + + +class RefinedAdaptivePrecisionEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=40, + elite_fraction=0.1, + mutation_factor=0.8, + crossover_prob=0.7, + ): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.elite_fraction = elite_fraction + self.sigma = 0.5 # Initial standard deviation for Gaussian mutation + self.learning_rate = 0.2 # Learning rate for sigma adaptation + self.mutation_factor = mutation_factor # Factor for differential mutation + self.crossover_prob = crossover_prob # Probability of crossover + + def mutate(self, individual, best_individual): + """Differential evolution mutation""" + mutation = ( + np.random.normal(0, self.sigma, self.dimension) + * self.mutation_factor + * (best_individual - individual) + ) + return np.clip(individual + mutation, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent, donor): + """Uniform crossover""" + mask = np.random.rand(self.dimension) < self.crossover_prob + return np.where(mask, donor, parent) + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_fitness = np.min(fitness) + best_individual = population[np.argmin(fitness)] + evaluations = self.population_size + + while evaluations < self.budget: + num_elites = int(self.population_size * self.elite_fraction) + elites = population[np.argsort(fitness)[:num_elites]] + + # Create offspring using differential mutation and crossover + offspring = np.zeros_like(population) + for i in range(self.population_size): + donor = self.mutate(population[i], best_individual) + offspring[i] = self.crossover(population[i], donor) + + offspring_fitness = np.array([func(ind) for ind in offspring]) + evaluations += self.population_size + + # Combine elites and offspring, then select the next generation + combined_population = np.vstack((elites, offspring[num_elites:])) + combined_fitness = np.concatenate((fitness[:num_elites], offspring_fitness[num_elites:])) + indices = np.argsort(combined_fitness) + population = combined_population[indices] + fitness = combined_fitness[indices] + + # Update best found solution + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + # Adapt mutation step size using the success rate of mutations + successful_mutations = (offspring_fitness < fitness).mean() + self.sigma *= np.exp(self.learning_rate * (successful_mutations - 0.2) / (1 - 0.2)) + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionFocalHybrid.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionFocalHybrid.py new file mode 100644 index 000000000..92b7bf32a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionFocalHybrid.py @@ -0,0 +1,100 @@ +import numpy as np + + +class RefinedAdaptivePrecisionFocalHybrid: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + focal_ratio=0.1, + elite_ratio=0.05, + mutation_intensity=0.2, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.focal_population_size = int(population_size * focal_ratio) + self.elite_population_size = int(population_size * elite_ratio) + self.sigma = 0.3 # Initial standard deviation for mutations + self.learning_rate = 0.1 # Learning rate for self-adaptation of sigma + self.mutation_intensity = mutation_intensity # Intensity of mutation + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, local_sigma): + # Mutation with individual sigma + return np.clip( + individual + np.random.normal(0, local_sigma, self.dimension), self.bounds[0], self.bounds[1] + ) + + def select_focal_group(self, population, fitness): + # Select a smaller focal group based on the best fitness values + sorted_indices = np.argsort(fitness) + return population[sorted_indices[: self.focal_population_size]] + + def select_elite_group(self, population, fitness): + # Select the elite group for intense exploitation + sorted_indices = np.argsort(fitness) + return population[sorted_indices[: self.elite_population_size]] + + def recombine(self, focal_group): + # Global intermediate recombination from a focal group + return np.mean(focal_group, axis=0) + + def adapt_sigma(self, success_rate): + # Dynamically adjust sigma based on observed mutation success + if success_rate > 0.2: + self.sigma /= self.learning_rate + elif success_rate < 0.2: + self.sigma *= self.learning_rate + + def optimize(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + successful_mutations = 0 + + while evaluations < self.budget: + focal_group = self.select_focal_group(population, fitness) + elite_group = self.select_elite_group(population, fitness) + recombined_individual = self.recombine(focal_group) + + local_sigma = self.sigma * self.mutation_intensity + + for i in range(self.population_size): + if i < self.elite_population_size: + mutant = self.mutate(elite_group[i % self.elite_population_size], local_sigma) + else: + mutant = self.mutate(recombined_individual, local_sigma) + + mutant_fitness = func(mutant) + + if mutant_fitness < fitness[i]: + population[i] = mutant + fitness[i] = mutant_fitness + successful_mutations += 1 + + if mutant_fitness < best_fitness: + best_individual = mutant + best_fitness = mutant_fitness + + evaluations += 1 + if evaluations >= self.budget: + break + + # Adjust mutation strategy based on success + success_rate = successful_mutations / self.population_size + self.adapt_sigma(success_rate) + successful_mutations = 0 # Reset for next generation + + return best_fitness, best_individual + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionHybridSearch.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionHybridSearch.py new file mode 100644 index 000000000..05621ec01 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionHybridSearch.py @@ -0,0 +1,73 @@ +import numpy as np + + +class RefinedAdaptivePrecisionHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 400 + elite_size = int(0.25 * population_size) + mutation_rate = 0.15 + mutation_scale = lambda t: 0.1 * np.exp(-0.0004 * t) + crossover_rate = 0.90 + + local_search_prob_base = 0.20 + local_search_decay = 0.0001 + local_search_step_scale = lambda t: 0.02 * np.exp(-0.00005 * t) + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + local_search_prob = local_search_prob_base * np.exp(-local_search_decay * evaluations) + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptivePrecisionStrategicOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptivePrecisionStrategicOptimizer.py new file mode 100644 index 000000000..f29686b04 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptivePrecisionStrategicOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedAdaptivePrecisionStrategicOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + mutation_factor = 0.9 # Slightly increased base mutation factor + crossover_probability = 0.8 # Slightly increased crossover probability + elite_size = 10 # Increased elite size + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptive mechanism for mutation and crossover + success_rate = np.zeros(population_size) # Track success for each individual + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with adaptive factor + adaptive_mutation_factor = mutation_factor + 0.2 * (2 * success_rate[i] - 1) + mutated = a + adaptive_mutation_factor * (b - c) + mutated = np.clip(mutated, self.lower_bound, self.upper_bound) + + # Adaptive crossover probability + adaptive_crossover_probability = crossover_probability + 0.2 * (2 * success_rate[i] - 1) + + # Crossover + trial_vector = np.where( + np.random.rand(self.dim) < adaptive_crossover_probability, mutated, population[i] + ) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + success_rate[i] += 1 # Increment success count for this individual + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + success_rate[i] = max(0, success_rate[i] - 1) # Decrement or maintain success rate + + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumCrossoverStrategyV3.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumCrossoverStrategyV3.py new file mode 100644 index 000000000..2ed0a58e3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumCrossoverStrategyV3.py @@ -0,0 +1,91 @@ +import numpy as np + + +class RefinedAdaptiveQuantumCrossoverStrategyV3: + def __init__( + self, + budget, + dimension=5, + population_size=500, + elite_fraction=0.03, + mutation_intensity=0.1, + crossover_rate=0.95, + quantum_prob=0.3, + gamma=0.15, + beta=0.4, + epsilon=0.01, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + self.beta = beta # Coefficient for dynamic mutation intensity adjustment + self.epsilon = epsilon # Minimum mutation intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Apply a controlled quantum state update to explore potential better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolution.py new file mode 100644 index 000000000..612014ab2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolution.py @@ -0,0 +1,138 @@ +import numpy as np + + +class RefinedAdaptiveQuantumDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.alpha = 0.1 # Scale for quantum jumps + self.local_search_budget = 5 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, idx)) + list(range(idx + 1, self.pop_size)) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < func(individual): + individual = trial + return individual + + def quantum_jump(self, individual, global_best, alpha): + return np.clip(individual + alpha * np.random.randn(self.dim) * (global_best - individual), -5.0, 5.0) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + elite_indices = np.argsort(fitness)[: self.pop_size // 2] + elite_population = new_population[elite_indices] + + for i in range(len(elite_indices)): + elite_population[i] = self.local_search(elite_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + population = np.copy(new_population) + for i in range(self.pop_size): + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump(population[i], global_best_position, self.alpha) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolutionPlus.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolutionPlus.py new file mode 100644 index 000000000..713dda8cc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumDifferentialEvolutionPlus.py @@ -0,0 +1,89 @@ +import numpy as np + + +class RefinedAdaptiveQuantumDifferentialEvolutionPlus: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.7 # Initial Differential weight + self.initial_CR = 0.8 # Initial Crossover probability + self.elite_rate = 0.2 # Elite rate to maintain a portion of elites + self.amplitude = 0.1 # Quantum amplitude + self.eval_count = 0 + + def __call__(self, func): + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + def quantum_position_update(position, best_position): + return position + np.random.uniform(-self.amplitude, self.amplitude, position.shape) * ( + best_position - position + ) + + def adapt_parameters(): + # Self-adaptive strategy for F and CR with random components + adaptive_F = self.initial_F + (0.1 * np.random.rand() - 0.05) + adaptive_CR = self.initial_CR + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + # Sort population by fitness and maintain elites + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = np.clip(a + F * (b - c), self.lower_bound, self.upper_bound) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + candidate = trial + if self.eval_count % 2 == 0: # Apply quantum every second step for balance + candidate = quantum_position_update( + trial, best_position if best_position is not None else trial + ) + + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + # Update population for the next iteration + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumEliteDE.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumEliteDE.py new file mode 100644 index 000000000..c4f1d59fe --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumEliteDE.py @@ -0,0 +1,186 @@ +import numpy as np + + +class RefinedAdaptiveQuantumEliteDE: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumEntropyDE.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumEntropyDE.py new file mode 100644 index 000000000..dcd12e64f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumEntropyDE.py @@ -0,0 +1,153 @@ +import numpy as np + + +class RefinedAdaptiveQuantumEntropyDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = self.entropy_based_selection(population, fitness) + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientBoostedMemeticSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientBoostedMemeticSearch.py new file mode 100644 index 000000000..bc13eddea --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientBoostedMemeticSearch.py @@ -0,0 +1,151 @@ +import numpy as np + + +class RefinedAdaptiveQuantumGradientBoostedMemeticSearch: + def __init__( + self, + budget, + population_size=50, + tau1=0.1, + tau2=0.1, + memetic_rate=0.6, + alpha=0.2, + learning_rate=0.01, + elite_fraction=0.1, + ): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.alpha = alpha + self.learning_rate = learning_rate + self.elite_fraction = elite_fraction + + def gradient_estimation(self, func, x, h=1e-7): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best): + return np.clip(x + self.alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def quantum_boosted_search(self, func, pop, scores, global_best): + boosted_pop = np.copy(pop) + boosted_scores = np.copy(scores) + + for i in range(self.population_size): + boosted_pop[i] = self.quantum_walk(boosted_pop[i], global_best) + boosted_scores[i] = func(boosted_pop[i]) + + best_idx = np.argmin(boosted_scores) + if boosted_scores[best_idx] < scores[best_idx]: + pop[best_idx] = boosted_pop[best_idx] + scores[best_idx] = boosted_scores[best_idx] + + return pop, scores + + def elite_preservation(self, pop, scores): + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + return pop[elite_idx], scores[elite_idx] + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + for iteration in range(max_iterations): + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform quantum boosted search + pop, scores = self.quantum_boosted_search(func, pop, scores, global_best_position) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + + # Perform elite preservation + elite_pop, elite_scores = self.elite_preservation(pop, scores) + pop[: len(elite_pop)] = elite_pop + scores[: len(elite_scores)] = elite_scores + + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientExplorationOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientExplorationOptimization.py new file mode 100644 index 000000000..45176fcaf --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientExplorationOptimization.py @@ -0,0 +1,221 @@ +import numpy as np + + +class RefinedAdaptiveQuantumGradientExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.6 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + + # Differential Evolution parameters + F_init = 0.5 # Initial Differential weight + F = F_init # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + thetas = [np.pi / 4, np.pi / 6, np.pi / 8] # Multiple rotation angles + rotation_matrices = [ + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) for theta in thetas + ] + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrices + if i % 100 == 0 and i > 0: + for rotation_matrix in rotation_matrices: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx][:2]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Adjust Differential Evolution parameter F dynamically + if i % 50 == 0 and i > 0: + if self.f_opt < global_best_score: + F *= 1.05 # Increase F if the global best is improving + else: + F = F_init # Reset F if no improvement + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedAdaptiveQuantumGradientExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientHybridOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientHybridOptimizer.py new file mode 100644 index 000000000..c327f7649 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumGradientHybridOptimizer.py @@ -0,0 +1,92 @@ +import numpy as np + + +class RefinedAdaptiveQuantumGradientHybridOptimizer: + def __init__( + self, + budget, + dimension=5, + population_size=250, + elite_ratio=0.2, + mutation_intensity=1.2, + crossover_rate=0.8, + quantum_prob=0.85, + gradient_boost_prob=0.35, + adaptive_factor=0.08, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob + self.gradient_boost_prob = gradient_boost_prob + self.adaptive_factor = adaptive_factor + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + parent_indices = np.random.choice(elite_indices, 2, replace=False) + child = self.crossover(population[parent_indices[0]], population[parent_indices[1]]) + else: + parent_idx = np.random.choice(elite_indices) + child = population[parent_idx].copy() + + if np.random.random() < self.gradient_boost_prob: + child = self.gradient_boost(child, func) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + mutation_scale = self.adaptive_mutation_scale(evaluations) + child = np.clip(child + np.random.normal(0, mutation_scale, self.dimension), -5, 5) + + new_population[i] = child + + population = new_population + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + new_best_idx = np.argmin(fitness) + if fitness[new_best_idx] < best_fitness: + best_fitness = fitness[new_best_idx] + best_individual = population[new_best_idx] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def gradient_boost(self, individual, func, lr=0.01): + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = np.array(individual) + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - lr * grad_est + + def quantum_state_update(self, individual, best_individual): + return individual + np.random.normal(0, self.adaptive_factor, self.dimension) * ( + best_individual - individual + ) + + def adaptive_mutation_scale(self, evaluations): + return self.mutation_intensity * np.exp(-self.adaptive_factor * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumPSO.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumPSO.py new file mode 100644 index 000000000..c8d2f6344 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumPSO.py @@ -0,0 +1,111 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdaptiveQuantumPSO: + def __init__(self, budget=10000, population_size=100): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.inertia_weight = 0.729 + self.cognitive_weight = 1.49445 + self.social_weight = 1.49445 + self.quantum_weight = 0.3 + self.adaptive_threshold = 0.1 + self.elite_fraction = 0.25 + self.memory_size = 5 # Memory size for tracking performance + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = np.copy(population) + personal_best_scores = np.copy(fitness) + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + last_best_fitness = best_fitness + + performance_memory = [best_fitness] * self.memory_size + adaptive_factor = 1.0 + + while eval_count < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * r1 * (personal_best_positions[i] - population[i]) + + self.social_weight * r2 * (best_individual - population[i]) + ) + + # Quantum behavior + if np.random.rand() < self.quantum_weight: + quantum_step = np.random.normal(0, 1, self.dim) + population[i] = best_individual + 0.5 * quantum_step + else: + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + trial_fitness = evaluate(population[i]) + eval_count += 1 + + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + personal_best_positions[i] = population[i] + personal_best_scores[i] = trial_fitness + + if trial_fitness < best_fitness: + best_individual = population[i] + best_fitness = trial_fitness + + if eval_count >= self.budget: + break + + performance_memory.append(best_fitness) + if len(performance_memory) > self.memory_size: + performance_memory.pop(0) + + mean_recent_performance = np.mean(performance_memory) + if best_fitness < mean_recent_performance * (1 - self.adaptive_threshold): + adaptive_factor *= 0.9 + self.quantum_weight *= adaptive_factor + else: + adaptive_factor *= 1.1 + self.quantum_weight *= adaptive_factor + + if eval_count < self.budget: + elite_indices = np.argsort(fitness)[: int(self.population_size * self.elite_fraction)] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + eval_count += res[2]["nit"] + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + personal_best_positions[idx] = res[0] + personal_best_scores[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + if eval_count >= self.budget: + break + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start, tol=1e-6, max_iter=100): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=tol, + options={"maxiter": max_iter}, + ) + return res.x, res.fun, res diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuantumSwarmOptimizerV3.py b/nevergrad/optimization/lama/RefinedAdaptiveQuantumSwarmOptimizerV3.py new file mode 100644 index 000000000..55ca8df2b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuantumSwarmOptimizerV3.py @@ -0,0 +1,88 @@ +import numpy as np + + +class RefinedAdaptiveQuantumSwarmOptimizerV3: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coefficient=2.0, + social_coefficient=2.0, + inertia_decay=0.98, + quantum_jump_rate=0.2, + quantum_scale=0.2, + adaptive_depth=20, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_depth = ( + adaptive_depth # Depth of historical performance to adapt parameters dynamically + ) + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + performance_history = [] + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhanced Quantum jump for better exploration + particles[i] = global_best + np.random.normal(0, self.quantum_scale, self.dim) * ( + self.ub - self.lb + ) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Classical PSO update for better exploitation + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + performance_history.append(global_best_score) + + # More robust adaptive parameter tuning based on deeper historical performance + if len(performance_history) > self.adaptive_depth: + recent_progress = np.mean(np.diff(performance_history[-self.adaptive_depth :])) + if recent_progress > 0: + # Increase quantum jump rate slightly if improvements are observed + self.quantum_jump_rate = min(self.quantum_jump_rate * 1.05, 0.9) + else: + # Reduce quantum jump rate to stabilize and focus on exploitation + self.quantum_jump_rate = max(self.quantum_jump_rate * 0.95, 0.1) + self.inertia_weight *= self.inertia_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomDEGradientAnnealing.py b/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomDEGradientAnnealing.py new file mode 100644 index 000000000..3470195c1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomDEGradientAnnealing.py @@ -0,0 +1,152 @@ +import numpy as np +from scipy.stats import qmc + + +class RefinedAdaptiveQuasiRandomDEGradientAnnealing: + def __init__(self, budget, population_size=30, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.elitism_rate = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + threshold = 1e-3 + diversity_enhanced = False + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < threshold: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + diversity_enhanced = True + return diversity_enhanced + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + crossover_rate = self.crossover_rate + mutation_factor = self.mutation_factor + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution with Elitism + elite_count = int(self.elitism_rate * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + # Cool down the temperature + self.temperature *= self.cooling_rate + + # Maintain diversity + diversity_enhanced = maintain_diversity(population, fitness) + + # Adaptive parameter control + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + crossover_rate *= 1.05 + mutation_factor = min(1.0, mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + crossover_rate *= 0.95 + mutation_factor = max(0.5, mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + crossover_rate = np.clip(crossover_rate, 0.1, 0.9) + + # Enhanced diversity strategy: Reduce learning rate if diversity was enforced + if diversity_enhanced: + self.base_lr *= 0.9 + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveQuasiRandomDEGradientAnnealing(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution.py new file mode 100644 index 000000000..c120ce90c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution.py @@ -0,0 +1,120 @@ +import numpy as np +from scipy.stats import qmc + + +class RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution: + def __init__( + self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8, base_lr=0.1, epsilon=1e-8 + ): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = base_lr + self.epsilon = epsilon + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + self.crossover_rate *= 1.1 + else: + self.base_lr *= 0.9 + self.crossover_rate *= 0.9 + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAdaptiveRefinementPSO.py b/nevergrad/optimization/lama/RefinedAdaptiveRefinementPSO.py new file mode 100644 index 000000000..4458e7362 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveRefinementPSO.py @@ -0,0 +1,65 @@ +import numpy as np + + +class RefinedAdaptiveRefinementPSO: + def __init__( + self, budget=10000, population_size=50, omega_start=0.9, omega_end=0.4, phi_p=0.5, phi_g=0.8 + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start # Initial inertia weight + self.omega_end = omega_end # Final inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_fitness = np.array([func(p) for p in particles]) + + global_best = particles[np.argmin(personal_best_fitness)] + global_best_fitness = min(personal_best_fitness) + + evaluations = self.population_size + + # Optimization loop + while evaluations < self.budget: + for i in range(self.population_size): + # Linearly decreasing inertia weight + dynamic_omega = self.omega_start - (self.omega_start - self.omega_end) * ( + evaluations / self.budget + ) + + # Update velocity and position + r_p = np.random.random(self.dim) + r_g = np.random.random(self.dim) + velocity[i] = ( + dynamic_omega * velocity[i] + + self.phi_p * r_p * (personal_best[i] - particles[i]) + + self.phi_g * r_g * (global_best - particles[i]) + ) + + particles[i] += velocity[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate particle's fitness + current_fitness = func(particles[i]) + evaluations += 1 + + if evaluations >= self.budget: + break + + # Update personal and global bests + if current_fitness < personal_best_fitness[i]: + personal_best[i] = particles[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best = particles[i] + global_best_fitness = current_fitness + + return global_best_fitness, global_best diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSimulatedAnnealingWithSmartMemory.py b/nevergrad/optimization/lama/RefinedAdaptiveSimulatedAnnealingWithSmartMemory.py new file mode 100644 index 000000000..b08aa3e1e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSimulatedAnnealingWithSmartMemory.py @@ -0,0 +1,157 @@ +import numpy as np + + +class RefinedAdaptiveSimulatedAnnealingWithSmartMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Smart Memory Reinforcement + if evaluations % (self.budget // 10) == 0: + best_idx = np.argmin(memory_scores) + for _ in range(memory_size // 4): + x_candidate = memory[best_idx] + np.random.normal(0, T, self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=150, step_size=0.008): # Refined local refinement + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSpatialExplorationOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveSpatialExplorationOptimizer.py new file mode 100644 index 000000000..ca0bc9336 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSpatialExplorationOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedAdaptiveSpatialExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 150 # Increased population size for better exploration + mutation_factor = 0.8 # More aggressive initial mutation + crossover_rate = 0.7 # Initial crossover rate + elite_size = 10 # Increased elite size + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main loop + while evaluations < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Elitism: carry forward best solutions + sorted_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[sorted_indices] + new_fitness[:elite_size] = fitness[sorted_indices] + + # Generate new candidates for the rest of the population + for i in range(elite_size, population_size): + # Tournament selection + idxs = np.random.choice(population_size, 3, replace=False) + if fitness[idxs[0]] < fitness[idxs[1]]: + better_idx = idxs[0] + else: + better_idx = idxs[1] + + target = population[better_idx] + a, b, c = population[np.random.choice(population_size, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < crossover_rate + trial = np.where(cross_points, mutant, target) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + new_population[i] = trial if trial_fitness < fitness[better_idx] else population[better_idx] + new_fitness[i] = trial_fitness if trial_fitness < fitness[better_idx] else fitness[better_idx] + + population = new_population + fitness = new_fitness + + # Update best solution if found + current_best_index = np.argmin(fitness) + if fitness[current_best_index] < best_fitness: + best_fitness = fitness[current_best_index] + best_solution = population[current_best_index] + + # Update parameters dynamically + if evaluations % (self.budget // 10) == 0: + mutation_factor *= 0.95 # Gradually reduce mutation to stabilize convergence + crossover_rate *= 0.98 # Slowly reduce the crossover rate + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSpatialOptimizer.py b/nevergrad/optimization/lama/RefinedAdaptiveSpatialOptimizer.py new file mode 100644 index 000000000..ac6a1d8fd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSpatialOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class RefinedAdaptiveSpatialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 150 # Increased population size for better exploration + mutation_factor = 0.5 # Starting lower for a careful exploration + crossover_prob = 0.8 # Higher crossover for increased diversity + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Select three random individuals different from i, including best for bias + indices = np.arange(population_size) + indices = np.delete(indices, i) + x1, x2, x3 = population[np.random.choice(indices, 3, replace=False)] + + # Mutation incorporating elite solution influence + elite = population[best_index] + mutant = x1 + mutation_factor * (elite - x1 + x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + best_index = np.argmin(fitness) + + # Adaptive parameter tuning based on statistical feedback + mutation_factor = max( + 0.1, min(0.9, mutation_factor - 0.05 * (np.std(fitness) / np.mean(fitness))) + ) + crossover_prob = min(0.9, crossover_prob + 0.05 * (1 - (np.std(fitness) / np.mean(fitness)))) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSpectralEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveSpectralEvolution.py new file mode 100644 index 000000000..1abb115d9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSpectralEvolution.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedAdaptiveSpectralEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 + elite_size = 10 + mutation_factor = 0.9 + crossover_probability = 0.9 + spectral_radius = 0.5 + catastrophe_frequency = 1000 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + for i in range(population_size): + # Spectral mutation + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = a + mutation_factor * np.random.normal() * (b - c) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Adaptive spectral recombination + direction = self.x_opt - population[i] + spectral_mutation = population[i] + spectral_radius * np.random.normal() * direction + spectral_mutation = np.clip(spectral_mutation, self.lb, self.ub) + + # Crossover + trial_vector = np.array( + [ + ( + mutant_vector[j] + if np.random.rand() < crossover_probability or j == np.random.randint(self.dim) + else spectral_mutation[j] + ) + for j in range(self.dim) + ] + ) + + # Fitness evaluation and selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + else: + new_population.append(population[i]) + + population = np.array(new_population) + + # Catastrophic mutation after fixed intervals + if evaluations % catastrophe_frequency == 0: + for j in range(int(population_size * 0.1)): # Affect 10% of the population + catastrophic_idx = np.random.randint(population_size) + population[catastrophic_idx] = np.random.uniform(self.lb, self.ub, self.dim) + + # Dynamic elite preservation + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in np.random.choice(range(population_size), elite_size, replace=False): + population[idx] = elite_individuals[np.random.randint(elite_size)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSpiralGradientSearch.py b/nevergrad/optimization/lama/RefinedAdaptiveSpiralGradientSearch.py new file mode 100644 index 000000000..6854448ac --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSpiralGradientSearch.py @@ -0,0 +1,62 @@ +import numpy as np + + +class RefinedAdaptiveSpiralGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial setup + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Start with a full range + angle_increment = np.pi / 4 # Broader angle for initial exploration + + # Adaptive parameters + radius_decay = 0.92 # Slowly decrease radius + angle_refinement = 0.85 # Refine angles for closer exploration + evaluations_left = self.budget + min_radius = 0.005 # Prevent the radius from becoming too small + + # This array holds the last few best points to calculate a moving centroid + historical_best = centroid.copy() + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max(int(2 * np.pi / angle_increment), 5) # Ensure at least 5 points + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + displacement = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Update the centroid towards the best found point in this iteration + if points: + best_index = np.argmin(function_values) + historical_best = 0.7 * historical_best + 0.3 * points[best_index] + centroid = historical_best + + # Dynamically update radius and angle increment + radius *= radius_decay + radius = max(radius, min_radius) + angle_increment *= angle_refinement + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdaptiveStochasticGradientQuorumOptimization.py b/nevergrad/optimization/lama/RefinedAdaptiveStochasticGradientQuorumOptimization.py new file mode 100644 index 000000000..cfc06d8fb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveStochasticGradientQuorumOptimization.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedAdaptiveStochasticGradientQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.3, + mutation_scale=0.1, + momentum=0.5, + learning_rate=0.05, + decay_rate=0.99, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(max(1, population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.learning_rate = learning_rate + self.decay_rate = decay_rate # Adaptive decay rate for learning rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best solution + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select elite indices including the best individual + elite_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + elite_indices = np.append(elite_indices, best_idx) + elite_individuals = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Determine the local best among the elites + local_best_idx = np.argmin(elite_fitness) + local_best = elite_individuals[local_best_idx] + + # Modified update strategy using a weighted gradient and momentum + gradient = best_individual - local_best + random_noise = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = (gradient * random_noise + self.momentum * velocity) * self.learning_rate + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution if necessary + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + velocity = child - local_best + + new_population[i, :] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptive parameters modifications + self.learning_rate *= self.decay_rate + self.mutation_scale *= np.random.uniform(0.9, 1.1) + self.elite_count = int(max(1, self.population_size * np.random.uniform(0.25, 0.35))) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveStochasticHybridEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveStochasticHybridEvolution.py new file mode 100644 index 000000000..fa6abb786 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveStochasticHybridEvolution.py @@ -0,0 +1,62 @@ +import numpy as np + + +class RefinedAdaptiveStochasticHybridEvolution: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=200): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_top_individuals(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def adapt_mutation_strength(self, best_score, current_score, base_strength=0.3, scale_factor=0.85): + if current_score < best_score: + return base_strength * scale_factor + else: + return base_strength / scale_factor + + def mutate_population(self, population, strength): + mutations = np.random.normal(0, strength, population.shape) + return np.clip(population + mutations, self.lower_bound, self.upper_bound) + + def recombine_population(self, best_individuals, population_size): + num_top = len(best_individuals) + extended_population = np.repeat( + best_individuals, np.ceil(population_size / num_top).astype(int), axis=0 + )[:population_size] + random_indices = np.random.randint(0, num_top, size=(population_size, self.dim)) + for i in range(self.dim): + extended_population[:, i] = best_individuals[random_indices[:, i], i] + return extended_population + + def __call__(self, func): + population_size = 200 + num_generations = max(1, self.budget // population_size) + num_best = 10 # Increased top individuals to focus on + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_individuals, best_fitness = self.select_top_individuals(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_individuals[0] + + strength = self.adapt_mutation_strength(best_score, best_fitness[0]) + new_population = self.recombine_population(best_individuals, population_size) + population = self.mutate_population(new_population, strength) + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/RefinedAdaptiveSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdaptiveSwarmDifferentialEvolution.py new file mode 100644 index 000000000..deac1c0d7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdaptiveSwarmDifferentialEvolution.py @@ -0,0 +1,50 @@ +import numpy as np + + +class RefinedAdaptiveSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 300 # Population size maintained as 300 + self.F_base = 0.5 # Base mutation factor + self.CR = 0.9 # Crossover probability + self.adapt_rate = 0.1 # Adaptation rate for mutation factor + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + F_adaptive = self.F_base + self.adapt_rate * np.sin(i / (self.budget / self.pop_size) * np.pi) + + for j in range(self.pop_size): + # Mutation strategy: DE/rand/1/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = pop[j] + F_adaptive * (a - b) + F_adaptive * (c - pop[j]) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..e050bc4ed --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution.py @@ -0,0 +1,159 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 30 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + self.history = [] + self.dynamic_adjustment_period = 10 + self.dynamic_parameters_adjustment_threshold = 10 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + bounds = [(self.lb, self.ub)] * self.dim + result = minimize(func, x, method="L-BFGS-B", bounds=bounds) + return result.x, result.fun + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.dynamic_parameters_adjustment_threshold: + self.strategy_weights = (self.strategy_success + 1) / (self.strategy_success.sum() + 4) + self.strategy_success.fill(0) + self.no_improvement_count = 0 + self.dynamic_parameters() + + # Dynamic population resizing based on performance + if self.no_improvement_count >= self.dynamic_adjustment_period: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self.history.append(self.f_opt) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..0b49c3385 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory.py @@ -0,0 +1,160 @@ +import numpy as np + + +class RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.7 # Adjusted inertia weight for PSO + c1 = 1.5 # Adjusted cognitive coefficient for PSO + c2 = 1.5 # Adjusted social coefficient for PSO + initial_F = 0.8 # Adjusted initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 10 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.3 + 0.7 * np.random.rand() # Smaller range for F to avoid too large jumps + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedArchiveEnhancedAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedArchiveEnhancedAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..e87533702 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedArchiveEnhancedAdaptiveDifferentialEvolution.py @@ -0,0 +1,166 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class RefinedArchiveEnhancedAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, mutation_factor=0.7, crossover_rate=0.9, cluster_size=5): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.mutation_factor = mutation_factor + self.crossover_rate = crossover_rate + self.cluster_size = cluster_size + self.epsilon = 1e-8 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + kmeans = KMeans(n_clusters=self.cluster_size, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for i in range(len(population)): + if np.linalg.norm(population[i] - cluster_centers[kmeans.labels_[i]]) < 1e-1: + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_params(success_rate): + if success_rate > 0.2: + new_mutation_factor = self.mutation_factor * 1.1 + new_crossover_rate = self.crossover_rate * 1.05 + else: + new_mutation_factor = self.mutation_factor * 0.9 + new_crossover_rate = self.crossover_rate * 0.95 + return np.clip(new_mutation_factor, 0.4, 1.0), np.clip(new_crossover_rate, 0.5, 1.0) + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.epsilon + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + success_count_history = [] + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + self.archive.append(self.x_opt) + + maintain_diversity(population, fitness) + success_rate = success_count / self.population_size + self.mutation_factor, self.crossover_rate = adaptive_params(success_rate) + + success_count_history.append(success_rate) + if len(success_count_history) > 10: + success_count_history.pop(0) + + avg_success_rate = np.mean(success_count_history) + + if avg_success_rate > 0.2: + self.mutation_factor *= 1.1 + self.crossover_rate *= 1.05 + else: + self.mutation_factor *= 0.9 + self.crossover_rate *= 0.95 + + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.5, 1.0) + + if len(self.archive) > 0: + archive_selection = np.random.choice(len(self.archive)) + archive_mutant = np.clip( + self.archive[archive_selection] + self.mutation_factor * np.random.randn(self.dim), + self.bounds[0], + self.bounds[1], + ) + archive_mutant = np.clip(archive_mutant, self.bounds[0], self.bounds[1]) + archive_fitness = func(archive_mutant) + evaluations += 1 + if archive_fitness < self.f_opt: + self.f_opt = archive_fitness + self.x_opt = archive_mutant + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedArchiveEnhancedAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedAttenuatedAdaptiveEvolver.py b/nevergrad/optimization/lama/RefinedAttenuatedAdaptiveEvolver.py new file mode 100644 index 000000000..ee7e5b028 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedAttenuatedAdaptiveEvolver.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedAttenuatedAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=120, + initial_step_size=0.3, + step_decay=0.95, + elite_ratio=0.25, + mutation_probability=0.1, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_probability = mutation_probability + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + if np.random.rand() < self.mutation_probability: + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + return individual + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * ( + self.step_decay**generation + ) # Adjusted step size decay for better exploration + + new_population = np.array([self.mutate(ind, scale) for ind in population]) + new_fitness = self.evaluate_population(func, new_population) + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + indices = np.argsort(combined_fitness) + population = combined_population[indices[: self.population_size]] + fitness = combined_fitness[indices[: self.population_size]] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedBalancedAdaptiveElitistStrategy.py b/nevergrad/optimization/lama/RefinedBalancedAdaptiveElitistStrategy.py new file mode 100644 index 000000000..1da830553 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedBalancedAdaptiveElitistStrategy.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedBalancedAdaptiveElitistStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=50, + elite_fraction=0.2, + mutation_intensity=0.1, + crossover_rate=0.7, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity # Initial mutation intensity factor + self.crossover_rate = crossover_rate # Probability of crossover + + def __call__(self, func): + # Initialize the population within bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Identify elite individuals + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if i < self.elite_count: + # Elite individuals are carried over unchanged + new_population[i] = population[elite_indices[i]] + else: + # Generate new individuals by mutation and crossover + if np.random.random() < self.crossover_rate: + # Perform crossover + parent1, parent2 = elites[np.random.choice(len(elites), 2, replace=False)] + child = self.crossover(parent1, parent2) + else: + # Directly mutate an elite + parent = elites[np.random.randint(0, self.elite_count)] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + + # Evaluate new individual's fitness + new_fitness = func(new_population[i]) + if new_fitness < fitness[i]: + fitness[i] = new_fitness + population[i] = new_population[i] + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation scale decreases over time + scale = self.mutation_intensity * (1 - (evaluations / self.budget)) + return individual + np.random.normal(0, scale, self.dimension) + + def crossover(self, parent1, parent2): + # Uniform crossover + mask = np.random.rand(self.dimension) < 0.5 + return np.where(mask, parent1, parent2) diff --git a/nevergrad/optimization/lama/RefinedBalancedExplorationOptimizer.py b/nevergrad/optimization/lama/RefinedBalancedExplorationOptimizer.py new file mode 100644 index 000000000..27e8924b2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedBalancedExplorationOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedBalancedExplorationOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + mutation_factor = 0.9 + crossover_probability = 0.9 + elite_size = 10 + + # Initialize population and evaluate + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Adaptation factors + adaptive_mutation = np.full(population_size, mutation_factor) + adaptive_crossover = np.full(population_size, crossover_probability) + success_tracker = np.zeros(population_size) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Elite retention + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + for i in range(elite_size, population_size): + # Mutation and Crossover + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Mutation with adaptive factors + mutant = a + adaptive_mutation[i] * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.where(np.random.rand(self.dim) < adaptive_crossover[i], mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + success_tracker[i] += 1 + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + success_tracker[i] = max(0, success_tracker[i] - 1) + + # Update adaptive mutation and crossover probabilities + if success_tracker[i] > 2: + adaptive_mutation[i] = min(1.0, adaptive_mutation[i] + 0.02) + adaptive_crossover[i] = min(1.0, adaptive_crossover[i] + 0.02) + elif success_tracker[i] == 0: + adaptive_mutation[i] = max(0.5, adaptive_mutation[i] - 0.02) + adaptive_crossover[i] = max(0.5, adaptive_crossover[i] - 0.02) + + # Update best solution + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedCMADiffEvoPSO.py b/nevergrad/optimization/lama/RefinedCMADiffEvoPSO.py new file mode 100644 index 000000000..5425a31ed --- /dev/null +++ b/nevergrad/optimization/lama/RefinedCMADiffEvoPSO.py @@ -0,0 +1,127 @@ +import numpy as np + + +class RefinedCMADiffEvoPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.min_pop_size = 20 + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 2.0 + self.c2 = 2.0 + self.w = 0.7 + self.restart_threshold = 50 + self.sigma = 0.3 + self.diversity_threshold = 0.1 # Threshold for population diversity + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.initial_pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return parent1 + F * (parent2 - parent3) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def cma_update(self, population, mean, cov_matrix): + new_samples = np.random.multivariate_normal(mean, cov_matrix, size=population.shape[0]) + return np.clip(new_samples, -5.0, 5.0) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + no_improvement_counter = 0 + + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.initial_pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F = np.random.uniform(0.4, 0.9) + CR = np.random.uniform(0.6, 1.0) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Check for diversity + if ( + self.diversity(population) < self.diversity_threshold + or no_improvement_counter >= self.restart_threshold + ): + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + evaluations += self.initial_pop_size + + # CMA Update + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population, rowvar=False) + population = self.cma_update(population, mean, cov_matrix) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedConcentricDiversityStrategy.py b/nevergrad/optimization/lama/RefinedConcentricDiversityStrategy.py new file mode 100644 index 000000000..e46041685 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedConcentricDiversityStrategy.py @@ -0,0 +1,113 @@ +import numpy as np + + +class RefinedConcentricDiversityStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=100, + population_per_island=50, + migration_interval=10, + migration_rate=0.2, + mutation_intensity=2.0, + mutation_decay=0.95, + elite_ratio=0.25, + crossover_probability=0.9, + tournament_size=3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_interval = migration_interval + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + generation = 0 + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if generation % self.migration_interval == 0: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + generation += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedConcentricQuantumCrossoverStrategyV5.py b/nevergrad/optimization/lama/RefinedConcentricQuantumCrossoverStrategyV5.py new file mode 100644 index 000000000..c5a32f26a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedConcentricQuantumCrossoverStrategyV5.py @@ -0,0 +1,91 @@ +import numpy as np + + +class RefinedConcentricQuantumCrossoverStrategyV5: + def __init__( + self, + budget, + dimension=5, + population_size=800, + elite_fraction=0.05, + mutation_intensity=0.06, + crossover_rate=0.85, + quantum_prob=0.4, + gamma=0.2, + beta=0.6, + epsilon=0.003, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.quantum_prob = quantum_prob # Probability to perform quantum-inspired state update + self.gamma = gamma # Scaling factor for quantum perturbation + self.beta = beta # Coefficient for dynamic mutation intensity adjustment + self.epsilon = epsilon # Minimum mutation intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Main optimization loop + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + parent2 = elites[np.random.choice(len(elites))] + child = self.crossover(parent1, parent2) + else: + child = self.mutate(parent1, evaluations) + + if np.random.random() < self.quantum_prob: + child = self.quantum_state_update(child, best_individual) + + new_population[i] = np.clip(child, -5, 5) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + intensity = max( + self.epsilon, self.mutation_intensity * np.exp(-self.beta * evaluations / self.budget) + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, best_individual): + """Apply a controlled quantum state update to explore potential better solutions""" + perturbation = ( + np.random.uniform(-1, 1, self.dimension) * self.gamma * np.abs(best_individual - individual) + ) + return individual + perturbation diff --git a/nevergrad/optimization/lama/RefinedConvergenceAdaptiveOptimizer.py b/nevergrad/optimization/lama/RefinedConvergenceAdaptiveOptimizer.py new file mode 100644 index 000000000..424a31500 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedConvergenceAdaptiveOptimizer.py @@ -0,0 +1,96 @@ +import numpy as np + + +class RefinedConvergenceAdaptiveOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=200, + elite_fraction=0.1, + mutation_intensity=0.2, + crossover_probability=0.7, + gradient_step=0.05, + mutation_decay=0.98, + gradient_enhancement_cycle=5, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = np.full(dimension, lower_bound) + self.upper_bound = np.full(dimension, upper_bound) + self.population_size = population_size + self.elite_fraction = elite_fraction + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.gradient_step = gradient_step + self.mutation_decay = mutation_decay + self.gradient_enhancement_cycle = gradient_enhancement_cycle + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(individual) for individual in population]) + + def select_elites(self, population, fitness): + elite_count = int(self.population_size * self.elite_fraction) + elite_indices = np.argsort(fitness)[:elite_count] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + return np.clip(alpha * parent1 + (1 - alpha) * parent2, self.lower_bound, self.upper_bound) + return np.copy(parent1 if np.random.rand() < 0.5 else parent2) + + def adaptive_gradient(self, individual, func, best_individual, iteration): + if iteration % self.gradient_enhancement_cycle == 0: + gradient_direction = best_individual - individual + step_size = self.gradient_step / (1 + np.sqrt(np.dot(gradient_direction, gradient_direction))) + new_individual = individual + step_size * gradient_direction + return np.clip(new_individual, self.lower_bound, self.upper_bound) + return individual + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + evaluations = self.population_size + + iteration = 0 + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + new_population = [] + for i in range(self.population_size): + if i < len(elites): + new_population.append(self.adaptive_gradient(elites[i], func, best_individual, iteration)) + else: + parents_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parents_indices[0]], elites[parents_indices[1]] + child = self.crossover(parent1, parent2) + child = self.mutate(child) + new_population.append(child) + + population = np.array(new_population) + fitness = self.evaluate_fitness(func, population) + + min_idx = np.argmin(fitness) + min_fitness = fitness[min_idx] + + if min_fitness < best_fitness: + best_fitness = min_fitness + best_individual = population[min_idx] + + evaluations += self.population_size + iteration += 1 + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedConvergenceDE.py b/nevergrad/optimization/lama/RefinedConvergenceDE.py new file mode 100644 index 000000000..38aa6b9a2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedConvergenceDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class RefinedConvergenceDE: + def __init__( + self, budget=10000, population_size=150, F_base=0.48, F_range=0.42, CR=0.85, strategy="best" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for increased diversity and adaptation + self.CR = CR # Crossover probability, slightly reduced to enhance exploitation + self.strategy = strategy # Strategy for mutation and selection, focusing on 'best' individual + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Selection strategy based on 'best' + if self.strategy == "best": + base = population[best_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjusting F for more exploration and exploitation + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using a more diverse differential mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(base + F * (b - c + a - base), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedConvergentAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedConvergentAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..02a3e8d37 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedConvergentAdaptiveEvolutionStrategy.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedConvergentAdaptiveEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + elite_fraction=0.2, + mutation_rate=0.1, + mutation_decrease=0.99, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_rate = mutation_rate + self.mutation_decrease = mutation_decrease + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def reproduce(self, elites, elite_fitness): + new_population = elites.copy() + while len(new_population) < self.population_size: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.crossover(elites[parents[0]], elites[parents[1]]) + child = self.mutate(child, self.mutation_scale * (np.random.rand() * 2)) # Varying mutation scale + new_population = np.vstack([new_population, child]) + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + self.mutation_scale = (self.upper_bound - self.lower_bound) / 2 + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + evaluations += len(population) + self.mutation_scale *= self.mutation_decrease # Decrease mutation scale adaptively + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedCooperativeDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedCooperativeDifferentialEvolution.py new file mode 100644 index 000000000..d7a7e39e3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedCooperativeDifferentialEvolution.py @@ -0,0 +1,121 @@ +import numpy as np + + +class RefinedCooperativeDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.9): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.95): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def guided_local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 # Further reduced step size for finer local adjustments + for _ in range(max_iter): + gradient = self.estimate_gradient(best_x, func) + new_x = np.clip(best_x - step_size * gradient, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def estimate_gradient(self, x, func, epsilon=1e-8): + gradient = np.zeros(self.dim) + f_x = func(x) + for i in range(self.dim): + x_step = np.copy(x) + x_step[i] += epsilon + gradient[i] = (func(x_step) - f_x) / epsilon + return gradient + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Cultural knowledge + knowledge_base = { + "best_solution": population[np.argmin(fitness)], + "best_fitness": np.min(fitness), + "mean_position": np.mean(population, axis=0), + } + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update cultural knowledge + if trial_fitness < knowledge_base["best_fitness"]: + knowledge_base["best_solution"] = trial_vector + knowledge_base["best_fitness"] = trial_fitness + knowledge_base["mean_position"] = np.mean(population, axis=0) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply guided local search on selected individuals + if np.random.rand() < 0.05: # Even lower probability for local search + guided_best_x, guided_best_f = self.guided_local_search(population[i], func) + evaluations += 5 + if guided_best_f < fitness[i]: + population[i] = guided_best_x + fitness[i] = guided_best_f + if guided_best_f < self.f_opt: + self.f_opt = guided_best_f + self.x_opt = guided_best_x + + # Cultural-based guidance: periodically update population based on cultural knowledge + if evaluations % (population_size * 2) == 0: + if knowledge_base["best_solution"] is None: + continue # Skip if no best solution has been found yet + + # Adjust cultural shift influence dynamically based on fitness diversity + fitness_std = np.std(fitness) + cultural_influence = 0.5 + (0.2 * fitness_std / (np.mean(fitness) + 1e-9)) + cultural_shift = ( + knowledge_base["best_solution"] - knowledge_base["mean_position"] + ) * cultural_influence + + # Cooperative cultural influence updates with mean position + for i in range(population_size): + cooperation_factor = np.random.normal(0.5, 0.1) # Adjusted influence factors + shift = cooperation_factor * cultural_shift + (1 - cooperation_factor) * ( + knowledge_base["best_solution"] - population[i] + ) * np.random.normal(0, 0.05, self.dim) + population[i] = np.clip(population[i] + shift, self.lb, self.ub) + + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedCosineAdaptiveDifferentialSwarm.py b/nevergrad/optimization/lama/RefinedCosineAdaptiveDifferentialSwarm.py new file mode 100644 index 000000000..732f01158 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedCosineAdaptiveDifferentialSwarm.py @@ -0,0 +1,54 @@ +import numpy as np + + +class RefinedCosineAdaptiveDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.pop_size = 300 # Optimal population size after tuning + self.F_base = 0.5 # Base mutation factor + self.CR = 0.7 # Crossover probability + self.adapt_rate = 0.1 # Adaptation rate for mutation factor + self.top_percentile = 0.1 # Using top 10% of individuals for mutation + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Cosine adaptive mutation factor + F_adaptive = self.F_base + self.adapt_rate * np.cos(i / (self.budget / self.pop_size) * np.pi) + + for j in range(self.pop_size): + # Mutation strategy: DE/current-to-pbest/1 + idxs = np.argsort(fitness)[: int(self.top_percentile * self.pop_size)] # p-best individuals + pbest = pop[np.random.choice(idxs)] + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = pop[j] + F_adaptive * (pbest - pop[j]) + F_adaptive * (a - b) + + # Clip to ensure staying within bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedDifferentialEvolutionWithAdaptiveLearningRate.py b/nevergrad/optimization/lama/RefinedDifferentialEvolutionWithAdaptiveLearningRate.py new file mode 100644 index 000000000..e7b9f84dd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDifferentialEvolutionWithAdaptiveLearningRate.py @@ -0,0 +1,143 @@ +import numpy as np + + +class RefinedDifferentialEvolutionWithAdaptiveLearningRate: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Population size + self.initial_F = 0.5 # Differential evolution mutation factor + self.initial_CR = 0.9 # Differential evolution crossover rate + self.self_adaptive_rate = 0.1 # Self-adaptive rate for parameters + self.elite_rate = 0.1 # Elite retention rate + self.memory_size = 20 # Memory size for adaptive parameters + self.adaptive_phase_ratio = 0.7 # Budget ratio for evolutionary phase + self.local_search_rate = 0.2 # Probability for local search + self.alpha = 0.6 # Differential weight for local search + self.adaptive_learning_rate = 0.01 # Learning rate for adaptive mutation factor and crossover rate + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 # Step size for local search + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + self.adaptive_learning_rate * np.random.randn() + adaptive_CR = memory_CR[idx] + self.adaptive_learning_rate * np.random.randn() + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + a, b, c = population[np.random.choice(self.population_size, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + 0.5 * velocities[i] + + 1.5 * r1 * (personal_best_positions[i] - population[i]) + + 1.5 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedDifferentialEvolutionWithAdaptiveLearningRate(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedDifferentialParticleSwarmOptimization.py b/nevergrad/optimization/lama/RefinedDifferentialParticleSwarmOptimization.py new file mode 100644 index 000000000..0448ee410 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDifferentialParticleSwarmOptimization.py @@ -0,0 +1,158 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedDifferentialParticleSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 20 + self.init_num_niches = 5 + self.alpha = 0.5 # Weight for DE contribution + self.beta = 0.5 # Weight for PSO contribution + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize niches + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + # Combined DE and PSO trial + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Local Search + if np.random.rand() < 0.5 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + # Update local bests for the niche + local_bests[n] = new_niches[n][np.argmin(new_fitness[n])] + local_best_fits[n] = min(new_fitness[n]) + + # Update niches and fitness + niches = new_niches + fitness = new_fitness + + # Diversity Maintenance: Re-initialize if the population converges too tightly + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + # Dynamic niching and regrouping + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + # Adaptive parameter adjustment + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + # Adding a restart mechanism based on diversity and performance + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDimensionalCyclicCrossoverEvolver.py b/nevergrad/optimization/lama/RefinedDimensionalCyclicCrossoverEvolver.py new file mode 100644 index 000000000..6e8df0332 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDimensionalCyclicCrossoverEvolver.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedDimensionalCyclicCrossoverEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=200, + elite_fraction=0.3, + mutation_intensity=0.005, + crossover_probability=0.95, + momentum=0.3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.momentum = momentum + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def cyclic_crossover(self, parent1, parent2): + start = np.random.randint(self.dimension) + cycle_length = np.random.randint(1, self.dimension) + indices = np.arange(start, start + cycle_length) % self.dimension + child = parent1.copy() + child[indices] = parent2[indices] + return child + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.cyclic_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + child += self.momentum * (child - previous_population[i]) + new_population[i] = child + + # Ensuring the best previous individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV2.py b/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV2.py new file mode 100644 index 000000000..3dc42e242 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV2.py @@ -0,0 +1,87 @@ +import numpy as np + + +class RefinedDimensionalFeedbackEvolverV2: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=300, + elite_fraction=0.15, + mutation_intensity=0.02, + crossover_probability=0.7, + feedback_factor=0.5, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.feedback_factor = feedback_factor + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + feedback_vector = self.feedback_factor * (previous_best - previous_population[i]) + child = np.clip(child + feedback_vector, self.lower_bound, self.upper_bound) + new_population[i] = child + + # Ensuring the best previous individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV4.py b/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV4.py new file mode 100644 index 000000000..2c84c38d3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDimensionalFeedbackEvolverV4.py @@ -0,0 +1,95 @@ +import numpy as np + + +class RefinedDimensionalFeedbackEvolverV4: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=400, + elite_fraction=0.05, + mutation_intensity=0.02, + crossover_probability=0.7, + feedback_factor=0.7, + exploration_increment=0.04, + mutation_decay=0.98, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.population_size = population_size + self.num_elites = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_probability = crossover_probability + self.feedback_factor = feedback_factor + self.exploration_increment = exploration_increment + self.mutation_decay = mutation_decay + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_fitness(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.num_elites] + return population[elite_indices], fitness[elite_indices] + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def adaptive_crossover(self, parent1, parent2): + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def reproduce(self, elites, elite_fitness, previous_population=None): + new_population = np.empty((self.population_size, self.dimension)) + previous_best = elites[np.argmin(elite_fitness)] + + for i in range(self.population_size): + if np.random.rand() < self.crossover_probability: + parents = np.random.choice(self.num_elites, 2, replace=False) + child = self.adaptive_crossover(elites[parents[0]], elites[parents[1]]) + else: + child = elites[np.random.choice(self.num_elites)] + child = self.mutate(child) + if previous_population is not None: + feedback_vector = self.feedback_factor * (previous_best - previous_population[i]) + child = np.clip(child + feedback_vector, self.lower_bound, self.upper_bound) + new_population[i] = child + + # Ensure the best individual is maintained + new_population[0] = previous_best + return new_population + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_fitness(func, population) + previous_population = None + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + evaluations = self.population_size + + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + population = self.reproduce(elites, elite_fitness, previous_population) + fitness = self.evaluate_fitness(func, population) + + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = population[np.argmin(fitness)] + + previous_population = population + evaluations += self.population_size + + # Adaptive mutation intensity to balance exploration and exploitation + self.mutation_intensity *= self.mutation_decay + self.mutation_intensity += self.exploration_increment * (evaluations / self.budget) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedDualConvergenceEvolutiveStrategy.py b/nevergrad/optimization/lama/RefinedDualConvergenceEvolutiveStrategy.py new file mode 100644 index 000000000..db6c30636 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDualConvergenceEvolutiveStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class RefinedDualConvergenceEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate, mutation_strength): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + for _ in range(num_children): + if np.random.rand() < 0.95: # Increased crossover probability + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + population_size = 250 # Increased initial population size for diversity + num_generations = self.budget // population_size + elitism_size = population_size // 5 # Increased elitism to 20% + mutation_rate = 0.15 # More aggressive initial mutation + mutation_strength = 1.2 # Higher mutation strength + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, elitism_size) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + non_elite_size = population_size - elitism_size + offspring = self.crossover(best_population, non_elite_size) + offspring = self.mutate(offspring, mutation_rate, mutation_strength) + population = np.vstack((best_population, offspring)) + + # Dynamically adapt mutation rate and strength + mutation_rate *= 0.95 # Slower rate of mutation decrease + mutation_strength *= 0.95 # Slower strength decrease + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/RefinedDualPhaseADPSO_DE_V3_Enhanced.py b/nevergrad/optimization/lama/RefinedDualPhaseADPSO_DE_V3_Enhanced.py new file mode 100644 index 000000000..b9f8e1ae2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDualPhaseADPSO_DE_V3_Enhanced.py @@ -0,0 +1,144 @@ +import numpy as np + + +class RefinedDualPhaseADPSO_DE_V3_Enhanced: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.5 # Increased local search rate + self.memory_size = 5 + self.w = 0.7 # Adjusted inertia weight for better exploration + self.c1 = 1.3 # Further reduced cognitive component + self.c2 = 2.2 # Further increased social component + self.phase_switch_ratio = 0.25 # Earlier phase switch + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal best positions and fitness + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best position and fitness + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + # Track the number of function evaluations + self.eval_count = self.population_size + + # Initialize memory for adaptive parameters + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedDualPhaseADPSO_DE_V3_Enhanced(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedDualPhaseOptimization.py b/nevergrad/optimization/lama/RefinedDualPhaseOptimization.py new file mode 100644 index 000000000..eb13d28ae --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDualPhaseOptimization.py @@ -0,0 +1,59 @@ +import numpy as np + + +class RefinedDualPhaseOptimization: + def __init__(self, budget, dim=5, initial_exploration_ratio=0.6): + self.budget = budget + self.dim = dim + self.bounds = np.array([-5.0, 5.0]) + self.initial_exploration_budget = int(budget * initial_exploration_ratio) + self.exploitation_phase_budget = budget - self.initial_exploration_budget + + def initialize_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def mutate(self, individual, mutation_strength): + mutant = individual + mutation_strength * np.random.randn(self.dim) + return np.clip(mutant, self.bounds[0], self.bounds[1]) + + def __call__(self, func): + # Exploration Phase with Adaptive Mutation Strength + population_size = 30 + mutation_strength = 0.8 + population = self.initialize_population(population_size) + f_values = self.evaluate_population(func, population) + evaluations = population_size + best_score = np.min(f_values) + best_individual = population[np.argmin(f_values)] + + while evaluations < self.initial_exploration_budget: + new_population = [] + for individual in population: + new_individual = self.mutate(individual, mutation_strength) + new_population.append(new_individual) + new_f_values = self.evaluate_population(func, new_population) + evaluations += population_size + + combined_f_values = np.concatenate((f_values, new_f_values)) + combined_population = np.vstack((population, new_population)) + + best_indices = np.argsort(combined_f_values)[:population_size] + population = combined_population[best_indices] + f_values = combined_f_values[best_indices] + mutation_strength *= 0.95 # Reduce mutation strength gradually + + # Exploitation Phase with Local Search + for _ in range(self.exploitation_phase_budget): + perturbations = np.random.randn(self.dim) * 0.1 + candidate = np.clip(best_individual + perturbations, self.bounds[0], self.bounds[1]) + candidate_score = func(candidate) + evaluations += 1 + + if candidate_score < best_score: + best_score = candidate_score + best_individual = candidate + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/RefinedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/RefinedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..ab8073cbf --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDualStrategyAdaptiveDE.py @@ -0,0 +1,118 @@ +import numpy as np + + +class RefinedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 # Increased population size for better exploration + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveDE.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveDE.py new file mode 100644 index 000000000..2f0aeddce --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveDE.py @@ -0,0 +1,149 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedDynamicAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.25 + self.local_search_prob = 0.25 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + # Apply multiple mutation strategies + mutant1 = x1 + mutation_factor * (x2 - x3) + mutant2 = elite_pop[np.random.randint(elite_count)] + mutation_factor * ( + x1 - elite_pop[np.random.randint(elite_count)] + ) + if func(mutant1) < func(mutant2): + mutant = mutant1 + else: + mutant = mutant2 + + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.hybrid_local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def hybrid_local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + + # Nelder-Mead local search + result = minimize(func, best_x, method="Nelder-Mead", options={"maxiter": 10, "xatol": 1e-6}) + self.budget -= result.nfev # Account for the function evaluations + if result.fun < best_f: + best_x = result.x + best_f = result.fun + + # Simulated Annealing + T = 1.0 + T_min = 0.0001 + alpha = 0.9 + while T > T_min and self.budget > 0: + new_x = best_x + np.random.uniform(-0.5, 0.5, self.dim) + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 + if new_f < best_f or np.exp((best_f - new_f) / T) > np.random.rand(): + best_x = new_x + best_f = new_f + T *= alpha + + return best_x diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDE.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDE.py new file mode 100644 index 000000000..1119eca73 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDE.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedDynamicAdaptiveHybridDE: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.5, + F_range=0.4, + CR=0.85, + elite_fraction=0.1, + mutation_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Dynamic range for the mutation factor F + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_factor = mutation_factor # Proportion of randomization in mutation + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy using random elite selection + if np.random.rand() < self.mutation_factor: + base = population[elite_indices[np.random.randint(elite_size)]] + else: + # More likely to use the best individual + base = best_individual + + # Adjust F dynamically + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory.py new file mode 100644 index 000000000..b68ed5a03 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory.py @@ -0,0 +1,152 @@ +import numpy as np + + +class RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.6 # Adaptive inertia weight for PSO + c1 = 1.2 # Increased cognitive coefficient for PSO + c2 = 1.2 # Increased social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..74613d44f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizer.py @@ -0,0 +1,142 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedDynamicAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def maintain_diversity(self, population): + unique_individuals = np.unique(population, axis=0) + if len(unique_individuals) < self.init_pop_size: + additional_individuals = np.random.uniform( + self.bounds[0], self.bounds[1], (self.init_pop_size - len(unique_individuals), self.dim) + ) + population = np.vstack((unique_individuals, additional_individuals)) + else: + population = unique_individuals + return population + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(self.init_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.init_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Maintain diversity + population = self.maintain_diversity(population) + + # Perform local search on the best individuals + for i in range(self.init_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizerV2.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizerV2.py new file mode 100644 index 000000000..46ad5f127 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveHybridOptimizerV2.py @@ -0,0 +1,144 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedDynamicAdaptiveHybridOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicAdaptiveStrategyV23.py b/nevergrad/optimization/lama/RefinedDynamicAdaptiveStrategyV23.py new file mode 100644 index 000000000..6e82f8342 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicAdaptiveStrategyV23.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedDynamicAdaptiveStrategyV23: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, performance_ratio): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if performance_ratio < 0.5: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + performance_ratio = fitnesses[best_idx] / np.mean(fitnesses) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, performance_ratio) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV3.py b/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV3.py new file mode 100644 index 000000000..f7a081826 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV3.py @@ -0,0 +1,108 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class RefinedDynamicClusterHybridOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def __call__(self, func): + population_size = 50 + + # Enhanced Initialization using Sobol Sequence + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size) + population = qmc.scale(sample, self.lb, self.ub) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations < self.budget / 2: + # Apply PSO Strategy + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: # 10% chance to reintroduce + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV4.py b/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV4.py new file mode 100644 index 000000000..1143f0de2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicClusterHybridOptimizationV4.py @@ -0,0 +1,109 @@ +import numpy as np +from sklearn.cluster import KMeans +from scipy.stats import qmc + + +class RefinedDynamicClusterHybridOptimizationV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, param_range): + progress = evaluations / max_evaluations + return param_range[0] + (param_range[1] - param_range[0]) * progress + + def __call__(self, func): + population_size = 60 + + # Enhanced Initialization using Sobol Sequence + sampler = qmc.Sobol(d=self.dim, scramble=True) + sample = sampler.random(population_size) + population = qmc.scale(sample, self.lb, self.ub) + + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + memory = [] + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.4)) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, (2.0, 1.0)) + differential_weight = self.adaptive_parameters(evaluations, self.budget, (0.8, 0.2)) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, (0.9, 0.3)) + + # Adaptive Clustering Strategy with KMeans + num_clusters = max(2, int(np.sqrt(population_size))) + kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(population) + cluster_centers = kmeans.cluster_centers_ + + for i in range(population_size): + if evaluations >= self.budget: + break + + if evaluations < self.budget / 2: + # Apply PSO Strategy + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + cluster_index = kmeans.predict([population[i]])[0] + social = social_coefficient * r2 * (cluster_centers[cluster_index] - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + else: + # Apply DE Strategy with Adaptive Scaling Factor + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + scaling_factor = 0.5 + np.random.rand() * 0.5 # Adaptive scaling + mutant_vector = np.clip(a + scaling_factor * (b - c), self.lb, self.ub) + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + new_position = np.where(crossover_mask, mutant_vector, population[i]) + + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + # Reintroduce promising individuals from memory + if len(memory) > 0 and evaluations < self.budget: + for mem_pos, mem_fit in memory: + if np.random.rand() < 0.1: # 10% chance to reintroduce + index = np.random.randint(0, population_size) + population[index] = mem_pos + fitness[index] = mem_fit + evaluations += 1 + + # Update memory with top individuals + sorted_indices = np.argsort(fitness) + top_individuals = sorted_indices[: max(1, population_size // 10)] + memory.extend([(population[idx], fitness[idx]) for idx in top_individuals]) + if len(memory) > population_size: + memory = memory[:population_size] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicClusteringPSO.py b/nevergrad/optimization/lama/RefinedDynamicClusteringPSO.py new file mode 100644 index 000000000..b37a6f222 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicClusteringPSO.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedDynamicClusteringPSO: + def __init__( + self, budget=10000, population_size=50, omega=0.7, phi_p=0.15, phi_g=0.25, cluster_ratio=0.1 + ): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia weight + self.phi_p = phi_p # Personal coefficient + self.phi_g = phi_g # Global coefficient + self.dim = 5 # Dimension of the problem + self.cluster_ratio = cluster_ratio # Ratio of population to form clusters + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize particles + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = particles.copy() + personal_best_fitness = np.array([func(p) for p in particles]) + + global_best = particles[np.argmin(personal_best_fitness)] + global_best_fitness = min(personal_best_fitness) + + evaluations = self.population_size + cluster_best = np.copy(global_best) # Initialize cluster best + + # Optimization loop + while evaluations < self.budget: + # Clustering phase with proper initialization of cluster_best + if evaluations % int(self.budget * self.cluster_ratio) == 0: + # Use k-means clustering to identify clusters in the particle positions + from sklearn.cluster import KMeans + + num_clusters = int(self.population_size * self.cluster_ratio) + kmeans = KMeans(n_clusters=max(2, num_clusters)) + clusters = kmeans.fit_predict(particles) + cluster_bests = [ + particles[clusters == i][np.argmin(personal_best_fitness[clusters == i])] + for i in range(max(2, num_clusters)) + ] + + for i in range(self.population_size): + # Update velocity and position + velocity[i] = ( + self.omega * velocity[i] + + self.phi_p * np.random.rand(self.dim) * (personal_best[i] - particles[i]) + + self.phi_g * np.random.rand(self.dim) * (global_best - particles[i]) + ) + + # Use cluster best for the particle's specific cluster + if evaluations % int(self.budget * self.cluster_ratio) == 0: + cluster_id = clusters[i] + cluster_best = cluster_bests[cluster_id] + + velocity[i] += self.phi_g * np.random.rand(self.dim) * (cluster_best - particles[i]) + particles[i] += velocity[i] + particles[i] = np.clip(particles[i], lb, ub) + + # Evaluate particle's fitness + current_fitness = func(particles[i]) + evaluations += 1 + + # Update personal and global bests + if current_fitness < personal_best_fitness[i]: + personal_best[i] = particles[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best = particles[i] + global_best_fitness = current_fitness + + # Checkpoint to print or log the best found solution + if evaluations % 1000 == 0: + print(f"Evaluation: {evaluations}, Best Fitness: {global_best_fitness}") + + return global_best_fitness, global_best diff --git a/nevergrad/optimization/lama/RefinedDynamicCrowdingHybridOptimizer.py b/nevergrad/optimization/lama/RefinedDynamicCrowdingHybridOptimizer.py new file mode 100644 index 000000000..e463df41d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicCrowdingHybridOptimizer.py @@ -0,0 +1,189 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedDynamicCrowdingHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + crowding_factor=0.5, + restart_threshold=1e-5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.crowding_factor = crowding_factor + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.restart_threshold = restart_threshold + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_select(self, population, trial, fitness, f_trial): + distances = np.linalg.norm(population - trial, axis=1) + idx = np.argmin(distances) + if f_trial < fitness[idx]: + return idx + else: + return None + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + last_best_fitness = g_best_fitness + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + r = np.random.choice(3) + if r == 0: + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + elif r == 1: + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(a + F * (b - population[i]), self.bounds[0], self.bounds[1]) + else: + a = population[np.random.choice(idxs)] + mutant = np.clip(a + F * (g_best - population[i]), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection with crowding mechanism + f_trial = func(trial) + self.eval_count += 1 + selected_idx = self.crowding_select(population, trial, fitness, f_trial) + if selected_idx is not None and selected_idx < current_pop_size: + fitness[selected_idx] = f_trial + population[selected_idx] = trial + successful_steps.append((F, CR)) + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[selected_idx] = min(F * 1.1, 1.0) + CR_values[selected_idx] = min(CR * 1.1, 1.0) + + # Update personal best + if f_trial < p_best_fitness[selected_idx]: + p_best[selected_idx] = trial + p_best_fitness[selected_idx] = f_trial + + # Update global best + if f_trial < g_best_fitness: + g_best = trial + g_best_fitness = f_trial + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Restart mechanism based on convergence criterion + if abs(last_best_fitness - g_best_fitness) < self.restart_threshold: + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + successful_steps = [] + last_best_fitness = g_best_fitness + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicEliteAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/RefinedDynamicEliteAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..9022318ec --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicEliteAdaptiveHybridOptimizer.py @@ -0,0 +1,168 @@ +import numpy as np + + +class RefinedDynamicEliteAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_budget = 5 + self.T_init = 1.0 # Initial temperature for annealing + self.cooling_rate = 0.98 # Cooling rate for simulated annealing + self.elitism_rate = 0.2 # Fraction of elite individuals to retain + self.alpha = 0.05 # Scale for quantum jumps + self.diversity_threshold = 1e-5 # Threshold to restart the population + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = list(range(0, self.pop_size)) + indices.remove(idx) + idxs = np.random.choice(indices, 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_budget): + mutation = np.random.randn(self.dim) * 0.01 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def quantum_jump(self, individual, global_best, alpha, T): + return np.clip( + individual + alpha * np.random.randn(self.dim) * np.exp(-T) * (global_best - individual), + -5.0, + 5.0, + ) + + def restart_population(self, bounds): + return self.initialize_population(bounds) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + T = self.T_init # Initial temperature for annealing + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + new_population[i] = trial + fitness[i] = trial_fitness + + if evaluations >= self.budget: + break + + # Perform local search on the top fraction of the population + elite_indices = np.argsort(fitness)[: int(self.elitism_rate * self.pop_size)] + for i in elite_indices: + new_population[i], fitness[i] = self.local_search(new_population[i], bounds, func) + evaluations += self.local_search_budget + + if evaluations < self.budget: + # Apply elitism: retain the top performing individuals + non_elite_indices = np.argsort(fitness)[int(self.elitism_rate * self.pop_size) :] + for i in non_elite_indices: + if np.random.rand() < 0.5: + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + mutant = self.mutate(global_best_position, parent1, parent2, F) + trial = self.crossover(new_population[i], mutant, CR) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + else: + quantum_trial = self.quantum_jump( + new_population[i], global_best_position, self.alpha, T + ) + quantum_fitness = func(quantum_trial) + evaluations += 1 + + if quantum_fitness < fitness[i]: + new_population[i] = quantum_trial + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_trial + if evaluations >= self.budget: + break + + # Check for diversity and restart if too low + if self.diversity(new_population) < self.diversity_threshold: + new_population = self.restart_population(bounds) + personal_best_positions = np.copy(new_population) + personal_best_scores = np.array([func(ind) for ind in new_population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + + # Update population with new candidates + population = np.copy(new_population) + + # Gradually decrease temperature + T *= self.cooling_rate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicEnhancedHybridOptimizer.py b/nevergrad/optimization/lama/RefinedDynamicEnhancedHybridOptimizer.py new file mode 100644 index 000000000..e5ba66810 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicEnhancedHybridOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np + + +class RefinedDynamicEnhancedHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.7 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + # Exploration improvement parameters + exploration_factor = 0.1 + max_exploration_cycles = 50 + + # New Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.05 # Increase learning rate if improvement is significant + else: + alpha *= 0.7 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + prev_f = self.f_opt + + # Enhanced exploration using adaptive exploration factor + if i % 100 == 0 and i > 0: # Every 100 iterations, enhance exploration + exploration_factor = min( + 0.5, exploration_factor * 1.1 + ) # Gradually increase exploration factor + for idx in range(swarm_size): + new_position = positions[idx] + exploration_factor * np.random.uniform(-1, 1, self.dim) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedDynamicEnhancedHybridOptimizer(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedDynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/RefinedDynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..e21eb0d9e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class RefinedDynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.99 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.97 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.95 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedDynamicHybridDEPSOWithEliteMemoryV2.py b/nevergrad/optimization/lama/RefinedDynamicHybridDEPSOWithEliteMemoryV2.py new file mode 100644 index 000000000..edd73bc9e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicHybridDEPSOWithEliteMemoryV2.py @@ -0,0 +1,168 @@ +import numpy as np + + +class RefinedDynamicHybridDEPSOWithEliteMemoryV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.4 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + if elite_solutions.shape[0] > elite_size: + elite_solutions = elite_solutions[:elite_size] + new_population[:elite_size] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/RefinedDynamicHybridOptimizer.py new file mode 100644 index 000000000..9583bf0b4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicHybridOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedDynamicHybridOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=40): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.velocity_coeff = 0.7 + self.global_coeff = 0.9 + self.local_coeff = 0.9 + self.inertia_weighting_strategy = self.dynamic_inertia_weighting + self.exploration_phase_ratio = 0.6 + + def dynamic_inertia_weighting(self, current_eval, eval_cutoff): + """ + Dynamically adjusts the inertia weight based on the phase of the optimisation process. + """ + if current_eval < eval_cutoff: + return 0.9 - 0.5 * (current_eval / eval_cutoff) + else: + return 0.4 + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + phase_cutoff = int(self.budget * self.exploration_phase_ratio) + + while evaluations < self.budget: + inertia = self.inertia_weighting_strategy(evaluations, phase_cutoff) + + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + inertia * velocities[i] + + self.local_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.global_coeff * r2 * (global_best_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + current_fitness = func(positions[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = current_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedDynamicQuantumEvolution.py b/nevergrad/optimization/lama/RefinedDynamicQuantumEvolution.py new file mode 100644 index 000000000..966b39eff --- /dev/null +++ b/nevergrad/optimization/lama/RefinedDynamicQuantumEvolution.py @@ -0,0 +1,186 @@ +import numpy as np + + +class RefinedDynamicQuantumEvolution: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=20, + local_search_steps=20, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + alpha=0.5, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + self.alpha = alpha # Weight for combining strategies + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F, current_idx): + """Perform differential mutation.""" + pop_size, dim = population.shape + indices = [idx for idx in range(pop_size) if idx != current_idx] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(fitness)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(fitness), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F, i) + else: + intensity = self.perturbation_intensity * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..3434bddd8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveHybridDEPSO.py @@ -0,0 +1,152 @@ +import numpy as np + + +class RefinedEliteAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + elite_size = 5 + w = 0.7 # Increased inertia weight for PSO to improve exploration + c1 = 1.2 # Increased cognitive coefficient for PSO + c2 = 1.2 # Increased social coefficient for PSO + initial_F = 0.6 # Reduced differential weight for DE + initial_CR = 0.7 # Reduced crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with elite handling + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py new file mode 100644 index 000000000..4d482827b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3.py new file mode 100644 index 000000000..6364a63e5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + adaptive_memory=True, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + self.adaptive_memory = adaptive_memory + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..38e3f87f5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV3.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV3.py new file mode 100644 index 000000000..b96ac7245 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV3.py @@ -0,0 +1,206 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.neighbors import NearestNeighbors + + +class RefinedEliteAdaptiveMemoryHybridOptimizerV3: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.diversity_threshold = diversity_threshold + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def measure_diversity(self, population): + if len(population) < 2: + return np.inf # Maximum diversity when population size is very small + nbrs = NearestNeighbors(n_neighbors=2, algorithm="ball_tree").fit(population) + distances, _ = nbrs.kneighbors(population) + avg_distance = np.mean(distances[:, 1]) # Average distance to the nearest neighbor + return avg_distance + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enforce population diversity if it falls below a threshold + avg_distance = self.measure_diversity(population) + if avg_distance < self.diversity_threshold: + # Introduce new individuals to increase diversity + for _ in range(self.init_pop_size - current_pop_size): + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV4.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV4.py new file mode 100644 index 000000000..2e519b05b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV4.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteAdaptiveMemoryHybridOptimizerV4: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + mem_size=50, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.mem_size = mem_size + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population): + dist = np.zeros(len(population)) + for i in range(len(population)): + for j in range(len(population)): + if i != j: + dist[i] += np.linalg.norm(population[i] - population[j]) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if ( + np.random.rand() < 0.2 + ): # Reduced to 20% chance to apply blend crossover for more exploitation + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.mem_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Reinforce diversity using crowding distance + if no_improvement_count == 0 and current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV5.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV5.py new file mode 100644 index 000000000..64a25cd9b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveMemoryHybridOptimizerV5.py @@ -0,0 +1,206 @@ +import numpy as np +from scipy.optimize import minimize +from sklearn.neighbors import NearestNeighbors + + +class RefinedEliteAdaptiveMemoryHybridOptimizerV5: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + diversity_threshold=0.1, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.diversity_threshold = diversity_threshold + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def measure_diversity(self, population): + if len(population) < 2: + return np.inf # Maximum diversity when population size is very small + nbrs = NearestNeighbors(n_neighbors=2, algorithm="ball_tree").fit(population) + distances, _ = nbrs.kneighbors(population) + avg_distance = np.mean(distances[:, 1]) # Average distance to the nearest neighbor + return avg_distance + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enforce population diversity if it falls below a threshold + avg_distance = self.measure_diversity(population) + if avg_distance < self.diversity_threshold: + # Introduce new individuals to increase diversity + for _ in range(self.init_pop_size - current_pop_size): + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch.py b/nevergrad/optimization/lama/RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch.py new file mode 100644 index 000000000..dc0958419 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.7 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + self.learning_rate = 0.5 + self.num_learning_agents = 10 + self.adaptive_memory_rate = 0.5 + self.diversity_tracking_interval = 50 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def enhanced_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC", "SLSQP"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def elitist_learning(self, population, elites, func): + new_population = np.copy(population) + for i in range(self.num_learning_agents): + elite = elites[np.random.randint(len(elites))] + learner = np.copy(elite) + perturbation = np.random.uniform(-self.learning_rate, self.learning_rate, self.dim) + learner = np.clip(learner + perturbation, self.bounds[0], self.bounds[1]) + f_learner = func(learner) + + if f_learner < func(elite): + new_population[i] = learner + else: + new_population[i] = elite + + return new_population + + def adaptive_memory_update(self, population, memory, fitness, memory_fitness, func): + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = ( + self.adaptive_memory_rate * memory[i] + (1 - self.adaptive_memory_rate) * population[i] + ) + f_trial = func(trial) + if f_trial < memory_fitness[i]: + memory[i] = trial + memory_fitness[i] = f_trial + return memory, memory_fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.enhanced_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = population[np.argsort(fitness)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + # Adaptive Memory Update + memory, memory_fitness = self.adaptive_memory_update( + population, memory, fitness, memory_fitness, func + ) + + # Elitist Learning Phase + learned_population = self.elitist_learning(population, elite_particles, func) + learned_fitness = np.array([func(ind) for ind in learned_population]) + evaluations += self.num_learning_agents + + for i in range(self.num_learning_agents): + if learned_fitness[i] < self.f_opt: + self.f_opt = learned_fitness[i] + self.x_opt = learned_population[i] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteDynamicHybridOptimizer.py b/nevergrad/optimization/lama/RefinedEliteDynamicHybridOptimizer.py new file mode 100644 index 000000000..81ee46d3b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteDynamicHybridOptimizer.py @@ -0,0 +1,136 @@ +import numpy as np + + +class RefinedEliteDynamicHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 # Increased population size for better diversity + self.initial_F = 0.8 # Adjusted to promote higher mutation step + self.initial_CR = 0.9 # High crossover rate to maintain genetic diversity + self.c1 = 1.5 # Increased cognitive coefficient for personal best attraction + self.c2 = 1.5 # Increased social coefficient for global best attraction + self.w = 0.7 # Increased inertia weight for maintaining momentum + self.elite_fraction = 0.2 # Reduced to focus on more varied solutions + self.diversity_threshold = 1e-5 # Higher threshold to reinitialize earlier + self.tau1 = 0.1 # Parameter adaptation probability + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min( + 20, self.budget - evaluations + ) # Increased iterations for better local search + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.01, bounds.lb, bounds.ub + ) # Reduced perturbation for precision + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + # Replace worst individuals with random samples for maintaining diversity + worst_indices = np.argsort(fitness)[-elite_count:] + population[worst_indices] = np.random.uniform(bounds.lb, bounds.ub, (elite_count, self.dim)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteDynamicMemoryHybridOptimizer.py b/nevergrad/optimization/lama/RefinedEliteDynamicMemoryHybridOptimizer.py new file mode 100644 index 000000000..f51e8616f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteDynamicMemoryHybridOptimizer.py @@ -0,0 +1,197 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEliteDynamicMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteGuidedAdaptiveDE.py b/nevergrad/optimization/lama/RefinedEliteGuidedAdaptiveDE.py new file mode 100644 index 000000000..c55362b41 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteGuidedAdaptiveDE.py @@ -0,0 +1,99 @@ +import numpy as np + + +class RefinedEliteGuidedAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive_max_size = 100 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + archive = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation with better selection mechanism + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + if archive and np.random.rand() < 0.5: + archive_idx = np.random.choice(len(archive), 3, replace=False) + x1, x2, x3 = np.array(archive)[archive_idx] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + archive.extend(new_pop) + if len(archive) > self.archive_max_size: + archive = archive[-self.archive_max_size :] + + if self.budget % 50 == 0 and archive: + archive_idx = np.random.choice(len(archive)) + archive_ind = archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[: self.pop_size - elite_count])) + combined_fitness = np.hstack((elite_fitness, fitness[: self.pop_size - elite_count])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE.py b/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE.py new file mode 100644 index 000000000..eef2d0c3b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class RefinedEliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.9 + self.elitism_rate = 0.25 + self.archive_size = 50 + self.stagnation_threshold = 30 + self.local_search_prob = 0.3 + self.local_search_radius = 0.01 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + archive = [] + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + archive.extend(new_pop) + if len(archive) > self.archive_size: + archive = archive[-self.archive_size :] + + if self.budget % 50 == 0 and archive: + archive_idx = np.random.choice(len(archive)) + archive_ind = archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + # Stagnation handling + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + new_pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in new_pop]) + self.budget -= self.pop_size + self.stagnation_counter = 0 + + # Local search on elite individuals + if np.random.rand() < self.local_search_prob and elite_count > 0: + for j in range(elite_count): + perturbed = elite_pop[j] + np.random.normal(0, self.local_search_radius, self.dim) + perturbed = np.clip(perturbed, lower_bound, upper_bound) + f_perturbed = func(perturbed) + self.budget -= 1 + if f_perturbed < elite_fitness[j]: + elite_pop[j] = perturbed + elite_fitness[j] = f_perturbed + if f_perturbed < self.f_opt: + self.f_opt = f_perturbed + self.x_opt = perturbed + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE_v3.py b/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE_v3.py new file mode 100644 index 000000000..a0beac011 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEliteGuidedMutationDE_v3.py @@ -0,0 +1,97 @@ +import numpy as np + + +class RefinedEliteGuidedMutationDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.stagnation_threshold = 30 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + best_fitness = self.f_opt + self.stagnation_counter = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * min(generation / (self.budget / self.pop_size), 1.0) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + else: + idxs = np.random.choice(elite_count, 3, replace=False) + + x1, x2, x3 = pop[idxs[0]], pop[idxs[1]], pop[idxs[2]] + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + pop = np.array(new_pop) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + if best_fitness == self.f_opt: + self.stagnation_counter += 1 + else: + self.stagnation_counter = 0 + best_fitness = self.f_opt + + if self.stagnation_counter >= self.stagnation_threshold: + reinit_count = self.pop_size // 2 + reinit_pop = np.random.uniform(lower_bound, upper_bound, (reinit_count, self.dim)) + reinit_fitness = np.array([func(ind) for ind in reinit_pop]) + self.budget -= reinit_count + + pop = np.vstack((elite_pop, reinit_pop)) + fitness = np.hstack((elite_fitness, reinit_fitness)) + + self.stagnation_counter = 0 + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py b/nevergrad/optimization/lama/RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py new file mode 100644 index 000000000..12d5a7861 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined.py @@ -0,0 +1,84 @@ +import numpy as np + + +class RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): # Keep the same local search iterations + x_new = x + 0.1 * np.random.randn(self.dim) # Adjusted the local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.7 - 0.5 * t / self.budget # Adjusted inertia weight update + + def update_parameters(self, t): + return 1.8 - t / (2 * self.budget), 2.2 - t / ( + 2 * self.budget + ) # Adjusted cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.8 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5.py new file mode 100644 index 000000000..d5f9fecad --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5.py @@ -0,0 +1,122 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Adjusted population size for better balance + self.sigma = 0.3 # Adjusted step size for better exploration + self.c1 = 0.1 # Increased learning rate + self.cmu = 0.05 # Increased learning rate for covariance matrix update + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 # Slightly increased learning rate + self.elitism_rate = 0.15 # Adjusted elitism rate to balance exploration and exploitation + self.eval_count = 0 + self.F = 0.7 # Tuned differential weight + self.CR = 0.85 # Tuned crossover probability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost.py new file mode 100644 index 000000000..8424adc2e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost.py @@ -0,0 +1,106 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + probabilities = 1 / (fitness - np.min(fitness) + 1e-8) + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.1 + else: + self.base_lr *= 0.9 + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDualPhaseStrategyV9.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDualPhaseStrategyV9.py new file mode 100644 index 000000000..e317a7eeb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveDualPhaseStrategyV9.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveDualPhaseStrategyV9: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhanced mutation strategy for phase 2 using additional differential vectors + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * (population[b] - population[c] + population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Dynamic adaptation of F and CR based on the iteration count using a sigmoid function + scale = iteration / total_iterations + self.F = 0.9 / (1 + np.exp((scale - 0.5) * 10)) + 0.1 + self.CR = 0.9 / (1 + np.exp(-(scale - 0.5) * 10)) + 0.1 + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, self.budget) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO.py new file mode 100644 index 000000000..c2043695b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO: + def __init__( + self, + budget=10000, + population_size=300, + initial_inertia=0.95, + final_inertia=0.35, + cognitive_weight=2.5, + social_weight=2.3, + crossover_rate=0.2, + mutation_rate=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.crossover_rate = crossover_rate + self.mutation_rate = mutation_rate + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + velocities[i] = self.inertia_weight * velocities[i] + personal_component + social_component + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + # Crossover mechanism + if np.random.rand() < self.crossover_rate: + j = np.random.choice([x for x in range(self.population_size) if x != i]) + crossover_point = np.random.randint(self.dim) + particles[i][:crossover_point], particles[j][:crossover_point] = ( + particles[j][:crossover_point].copy(), + particles[i][:crossover_point].copy(), + ) + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation_indices = np.random.choice( + self.dim, size=int(np.ceil(self.dim * 0.3)), replace=False + ) + particles[i][mutation_indices] += np.random.normal(0, 0.5, size=len(mutation_indices)) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9.py new file mode 100644 index 000000000..b9bbf44b2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9.py @@ -0,0 +1,95 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9: + def __init__( + self, + budget=10000, + memetic_iter=100, + memetic_prob=0.9, + memetic_step=0.1, + memory_size=50, + pitch_adjustment_rate=0.9, + ): + self.budget = budget + self.dim = 5 + self.memetic_iter = memetic_iter + self.memetic_prob = memetic_prob + self.memetic_step = memetic_step + self.memory_size = memory_size + self.pitch_adjustment_rate = pitch_adjustment_rate + + def _initialize_harmony_memory(self, func): + harmony_memory = [np.random.uniform(-5.0, 5.0, size=self.dim) for _ in range(self.memory_size)] + harmony_memory_costs = [func(hm) for hm in harmony_memory] + return harmony_memory, harmony_memory_costs + + def _improvise_new_harmony(self, harmony_memory): + new_harmony = np.empty(self.dim) + for i in range(self.dim): + new_harmony[i] = np.random.choice([hm[i] for hm in harmony_memory]) + if np.random.rand() < self.pitch_adjustment_rate: + new_harmony[i] += np.random.normal(0, 0.5) + new_harmony[i] = np.clip(new_harmony[i], -5.0, 5.0) + return new_harmony + + def _memetic_local_search(self, harmony, func): + best_harmony = harmony.copy() + best_cost = func(harmony) + + for _ in range(self.memetic_iter): + mutated_harmony = harmony + np.random.normal(0, self.memetic_step, size=self.dim) + mutated_harmony = np.clip(mutated_harmony, -5.0, 5.0) + cost = func(mutated_harmony) + + if cost < best_cost: + best_harmony = mutated_harmony + best_cost = cost + + return best_harmony, best_cost + + def _apply_memetic_search(self, harmony_memory, harmony_memory_costs, func): + for idx in range(len(harmony_memory)): + if np.random.rand() < self.memetic_prob: + harmony_memory[idx], harmony_memory_costs[idx] = self._memetic_local_search( + harmony_memory[idx], func + ) + + return harmony_memory, harmony_memory_costs + + def _harmony_selection(self, harmony_memory, harmony_memory_costs): + idx = np.argsort(harmony_memory_costs)[: self.memory_size] + return [harmony_memory[i] for i in idx], [harmony_memory_costs[i] for i in idx] + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + harmony_memory, harmony_memory_costs = self._initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self._improvise_new_harmony(harmony_memory) + + if np.random.rand() < 0.1: + new_harmony = np.random.uniform(-5.0, 5.0, size=self.dim) + + if np.random.rand() < 0.8: + new_harmony, new_cost = self._memetic_local_search(new_harmony, func) + else: + new_cost = func(new_harmony) + + harmony_memory.append(new_harmony) + harmony_memory_costs.append(new_cost) + + harmony_memory, harmony_memory_costs = self._apply_memetic_search( + harmony_memory, harmony_memory_costs, func + ) + + harmony_memory, harmony_memory_costs = self._harmony_selection( + harmony_memory, harmony_memory_costs + ) + + if new_cost < self.f_opt: + self.f_opt = new_cost + self.x_opt = new_harmony + + return 1.0 - np.mean(np.array(harmony_memory_costs)), np.std(np.array(harmony_memory_costs)) diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonySearch.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonySearch.py new file mode 100644 index 000000000..b6e7394e1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHarmonySearch.py @@ -0,0 +1,81 @@ +import numpy as np +from scipy.stats import cauchy + + +class RefinedEnhancedAdaptiveHarmonySearch: + def __init__(self, budget=1000, hmcr=0.7, par=0.3, init_bw=0.1, bw_range=[0.01, 0.2], bw_decay=0.95): + self.budget = budget + self.hmcr = hmcr + self.par = par + self.init_bw = init_bw + self.bw_range = bw_range + self.bw_decay = bw_decay + + def cauchy_mutation(self, value, lb, ub, scale=0.1): + mutated_value = value + cauchy.rvs(loc=0, scale=scale) + mutated_value = np.clip(mutated_value, lb, ub) + return mutated_value + + def adaptive_bandwidth(self, iteration): + return max(self.init_bw * (self.bw_decay**iteration), self.bw_range[0]) + + def explore(self, func_bounds): + return np.random.uniform(func_bounds.lb, func_bounds.ub) + + def exploit(self, harmony_memory, func, func_bounds, bandwidth): + new_harmony = np.zeros(len(func_bounds.lb)) + for j in range(len(func_bounds.lb)): + if np.random.rand() < self.hmcr: + idx = np.random.randint(0, len(harmony_memory)) + new_harmony[j] = harmony_memory[idx][j] + else: + new_harmony[j] = np.random.uniform(func_bounds.lb[j], func_bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] = self.cauchy_mutation( + new_harmony[j], func_bounds.lb[j], func_bounds.ub[j], scale=bandwidth + ) + + return new_harmony + + def local_search(self, func, initial_solution, bandwidth): + current_solution = initial_solution.copy() + for _ in range(5): + new_solution = self.exploit([current_solution], func, func.bounds, bandwidth) + if func(new_solution) < func(current_solution): + current_solution = new_solution + return current_solution + + def global_best_update(self, harmony_memory, func): + return harmony_memory[np.argmin([func(h) for h in harmony_memory])] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + harmony_memory = [np.random.uniform(func.bounds.lb, func.bounds.ub) for _ in range(self.budget)] + global_best = self.global_best_update(harmony_memory, func) + bandwidth = self.init_bw + + for i in range(self.budget): + new_harmony = self.exploit(harmony_memory, func, func.bounds, bandwidth) + new_harmony = self.local_search(func, new_harmony, bandwidth) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + global_best = self.global_best_update(harmony_memory, func) + + if np.random.rand() < 0.1: + new_harmony = self.explore(func.bounds) + f = func(new_harmony) + if f < self.f_opt: + self.f_opt = f + self.x_opt = new_harmony + + if np.random.rand() < 0.05: + global_best = self.global_best_update(harmony_memory, func) + + bandwidth = self.adaptive_bandwidth(i) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2.py new file mode 100644 index 000000000..60e06ddaf --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.2 + self.local_search_rate = 0.3 + self.memory_size = 10 + self.w = 0.5 + self.c1 = 2.0 + self.c2 = 2.0 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.05 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + while self.eval_count < self.budget: + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + for i in range(elite_count, self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + population = new_population + fitness = new_fitness + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..b59025272 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=30): + self.budget = budget + self.population_size = population_size + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rate): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = 0.5 + np.random.rand() * 0.3 + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations): + crossover_rate = 0.9 - 0.5 * (iteration / max_iterations) + learning_rate = 0.01 * (1 - iteration / max_iterations) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + return crossover_rate, learning_rate, memetic_probability + + def hybrid_step(self, func, pop, scores, crossover_rate, learning_rate, memetic_probability): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rate) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + crossover_rate, learning_rate, memetic_probability = self.adaptive_parameters( + iteration, max_iterations + ) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rate, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiOperatorSearch.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiOperatorSearch.py new file mode 100644 index 000000000..6646dc638 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiOperatorSearch.py @@ -0,0 +1,141 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveMultiOperatorSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 2.0 # Increased Cognitive constant + c2 = 2.0 # Increased Social constant + w = 0.5 # Reduced Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.5 # Increased the diversity threshold + stagnation_counter = 0 + max_stagnation = 50 + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedEnhancedAdaptiveMultiOperatorSearch(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiStrategyDE.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiStrategyDE.py new file mode 100644 index 000000000..34c0222d8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveMultiStrategyDE.py @@ -0,0 +1,128 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveMultiStrategyDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_min, F_max = 0.5, 0.9 + CR_min, CR_max = 0.1, 1.0 + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + memory_size = 5 # Memory size for adaptive parameters + memory_F = np.full(memory_size, 0.5) + memory_CR = np.full(memory_size, 0.5) + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(memory_F, memory_CR, k): + idx = k % memory_size + F = np.clip(np.random.normal(memory_F[idx], 0.1), F_min, F_max) + CR = np.clip(np.random.normal(memory_CR[idx], 0.1), CR_min, CR_max) + return F, CR + + def update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness): + idx = np.argmax(delta_fitness) + fidx = np.argmin(delta_fitness) + memory_F[fidx % memory_size] = F_values[idx] + memory_CR[fidx % memory_size] = CR_values[idx] + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + + last_improvement = evaluations + k = 0 + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, 0.8) + CR_values = np.full(population_size, 0.9) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + delta_fitness = np.zeros(population_size) + + for i in range(population_size): + F_values[i], CR_values[i] = adaptive_parameters(memory_F, memory_CR, k) + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + delta_fitness[i] = fitness[i] - f_trial + + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + delta_fitness[i] = 0.0 + + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + update_memory(memory_F, memory_CR, F_values, CR_values, delta_fitness) + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + k += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v45.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v45.py new file mode 100644 index 000000000..d6a01a6cb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v45.py @@ -0,0 +1,93 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveQGSA_v45: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.iter_count = 0 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: # Include equality for better exploration + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + self.iter_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v46.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v46.py new file mode 100644 index 000000000..0395d3577 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v46.py @@ -0,0 +1,93 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveQGSA_v46: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.iter_count = 0 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + noise = np.random.normal(0, self.step_size, size=self.dimension) + return np.clip(agent + noise + self.delta * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + self.iter_count += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v48.py b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v48.py new file mode 100644 index 000000000..46c32eb5e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedAdaptiveQGSA_v48.py @@ -0,0 +1,91 @@ +import numpy as np + + +class RefinedEnhancedAdaptiveQGSA_v48: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, delta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.delta = delta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.best_fitness_history = [] + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.delta = min(0.2, self.delta * 1.03) + else: + self.delta = max(0.05, self.delta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + return best_agent, best_agent_idx + + def _adjust_agent_position(self, agent, best_agent): + noise = np.random.normal(0, self.step_size, size=self.dimension) + return np.clip(agent + noise + self.delta * (best_agent - agent), self.lb, self.ub) + + def _update_best_fitness_history(self): + self.best_fitness_history.append(self.f_opt) + + def __call__(self, func): + agents = self._initialize_agents() + + for _ in range(self.budget): + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + best_agent, best_agent_idx = self._update_best_agent(agents, fitness_values) + masses = self._calculate_masses(fitness_values) + + for i in range(self.num_agents): + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + new_agent = self._update_agent_position(agents[i], force) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness <= fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + self._update_best_fitness_history() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedBalancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/RefinedEnhancedBalancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..576ed18c8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedBalancedDualStrategyAdaptiveDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class RefinedEnhancedBalancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.85 + self.elitism_rate = 0.20 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage with balanced influence + trial = trial + 0.5 * np.random.rand(self.dim) * ( + elite_pop[np.random.randint(elite_count)] - trial + ) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedEnhancedCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..f84287d22 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,161 @@ +import numpy as np + + +class RefinedEnhancedCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.elitism_rate = 0.20 + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + self.alpha_levy = 0.01 + self.k = 0.3 + self.strategy_switches = [0.33, 0.66] # Points where strategy switches + self.levy_prob = 0.2 # Probability to perform Levy flight + self.adaptive_learning_rate = 0.02 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + else: + return "exploitative" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + for i in range(self.population_size): + if np.random.rand() < self.k: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + population = hybridization(population, cov_matrix) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDifferentialEvolutionLocalSearch_v42.py b/nevergrad/optimization/lama/RefinedEnhancedDifferentialEvolutionLocalSearch_v42.py new file mode 100644 index 000000000..c3500970e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDifferentialEvolutionLocalSearch_v42.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedEnhancedDifferentialEvolutionLocalSearch_v42: + def __init__( + self, budget=10000, p_best=0.3, f_min=0.5, f_max=0.9, cr_min=0.2, cr_max=0.8, local_search_iters=1000 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def refined_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val <= target_val: + population[idx] = new_trial + if new_trial_val <= trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) <= func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.refined_de_local_search(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3.py b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3.py new file mode 100644 index 000000000..63f1dfeb5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3.py @@ -0,0 +1,146 @@ +import numpy as np + + +class RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 300 # Increased population size for better exploration + self.initial_F = 0.7 # Adjusted mutation factor for more gradual changes + self.initial_CR = 0.8 # Adjusted crossover rate for controlled diversity + self.elite_rate = 0.1 # Increased elite rate for better exploitation + self.local_search_rate = 0.4 # Increased for more local refinement + self.memory_size = 25 # Increased memory size for adaptive parameter tuning + self.w = 0.4 # Lower inertia weight for better convergence + self.c1 = 1.2 # Adjusted cognitive component + self.c2 = 1.8 # Adjusted social component + self.adaptive_phase_ratio = 0.6 # Balanced between DE and PSO phases + self.alpha = 0.2 # Differential weight for stronger exploration-exploitation balance + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Adjusted local search step for finer granularity + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.05 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.05 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimization.py b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimization.py new file mode 100644 index 000000000..f2aae0e17 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimization.py @@ -0,0 +1,145 @@ +import numpy as np + + +class RefinedEnhancedDualPhaseHybridOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 300 # Increased population size for better exploration + self.initial_F = 0.7 # Balanced mutation factor + self.initial_CR = 0.9 # Slightly higher crossover rate + self.elite_rate = 0.15 # Slightly increased elite rate for more robust performance + self.local_search_rate = 0.25 # Balanced local search rate + self.memory_size = 40 # Further increased memory size for better parameter adaptation + self.w = 0.6 # Increased inertia weight for better exploration + self.c1 = 1.2 # Slightly reduced cognitive component + self.c2 = 1.7 # Increased social component + self.phase_switch_ratio = 0.5 # Balanced allocation between DE and PSO phases + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Balanced local search step + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedDualPhaseHybridOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimizationV3.py b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimizationV3.py new file mode 100644 index 000000000..62332ac28 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualPhaseHybridOptimizationV3.py @@ -0,0 +1,145 @@ +import numpy as np + + +class RefinedEnhancedDualPhaseHybridOptimizationV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 200 # Reduced population size for faster convergence + self.initial_F = 0.7 # Slightly reduced mutation factor for better stability + self.initial_CR = 0.8 # Reduced crossover rate + self.elite_rate = 0.1 # Reduced elite rate for better diversity + self.local_search_rate = 0.5 # Increased local search rate for better exploitation + self.memory_size = 30 # Reduced memory size for faster parameter adaptation + self.w = 0.6 # Reduced inertia weight for improved convergence speed + self.c1 = 1.4 # Balanced cognitive component + self.c2 = 1.6 # Balanced social component + self.adaptive_phase_ratio = 0.6 # Allocate more budget to the DE phase initially + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.02 # Increased local search step for better local exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.randn()) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedDualPhaseHybridOptimizationV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v2.py b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v2.py new file mode 100644 index 000000000..f2363e819 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class RefinedEnhancedDualStrategyAdaptiveDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 100 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.35 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v3.py b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v3.py new file mode 100644 index 000000000..0c578eeb2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyAdaptiveDE_v3.py @@ -0,0 +1,125 @@ +import numpy as np + + +class RefinedEnhancedDualStrategyAdaptiveDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.9 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.85 + self.elitism_rate = 0.3 + self.local_search_prob = 0.20 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.03 * (np.random.rand(self.dim) - 0.5) # Adjust the perturbation for local search + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualStrategyDynamicDE.py b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyDynamicDE.py new file mode 100644 index 000000000..d0dd9fc55 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyDynamicDE.py @@ -0,0 +1,145 @@ +import numpy as np + + +class RefinedEnhancedDualStrategyDynamicDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.final_pop_size = 20 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.6 + self.elitism_rate = 0.25 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop_size = self.initial_pop_size + pop = np.random.uniform(lower_bound, upper_bound, (pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor and crossover probability + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.initial_pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.initial_pop_size)) + ) + + # Adaptive population size + pop_size = max( + self.final_pop_size, + int( + self.initial_pop_size + - (self.initial_pop_size - self.final_pop_size) + * (generation / (self.budget / self.initial_pop_size)) + ), + ) + elite_count = max(1, int(self.elitism_rate * pop_size)) + + # Elitism: preserve top individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(range(elite_count), 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + elite_target_idx = np.random.randint(elite_count) + trial = trial + np.random.rand(self.dim) * (elite_pop[elite_target_idx] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > pop_size: + self.archive = self.archive[-pop_size:] + + if self.budget % int(pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack( + (elite_fitness, fitness[elite_count:] if len(fitness) > elite_count else np.array([])) + ) + + pop = combined_pop[:pop_size] # Ensure pop size consistency + fitness = combined_fitness[:pop_size] # Ensure fitness size consistency + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.01 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedDualStrategyElitistDE_v2.py b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyElitistDE_v2.py new file mode 100644 index 000000000..15b22ab16 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDualStrategyElitistDE_v2.py @@ -0,0 +1,125 @@ +import numpy as np + + +class RefinedEnhancedDualStrategyElitistDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.local_search_prob = 0.15 + self.archive = [] # Initialize the archive + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.02 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedDynamicAdaptiveHybridOptimization.py b/nevergrad/optimization/lama/RefinedEnhancedDynamicAdaptiveHybridOptimization.py new file mode 100644 index 000000000..72a29e047 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDynamicAdaptiveHybridOptimization.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.stats import qmc + + +class RefinedEnhancedDynamicAdaptiveHybridOptimization: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + self.diversity_threshold = 1e-3 + self.diversity_factor = 0.1 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < self.diversity_threshold: + perturbation = np.random.uniform( + -self.diversity_factor, self.diversity_factor, self.dim + ) + if fitness[i] > fitness[j]: + population[i] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[i] = func(population[i]) + else: + population[j] = np.clip( + random_vector() + perturbation, self.bounds[0], self.bounds[1] + ) + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + return qmc.scale(samples, self.bounds[0], self.bounds[1]) + + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + elite_count = int(0.1 * self.population_size) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = [population[i] for i in elite_indices] + new_population = elite_population.copy() + new_fitness = [fitness[i] for i in elite_indices] + + for j in range(elite_count, self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + new_population.append(new_x) + new_fitness.append(new_f) + success_count += 1 + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + else: + new_population.append(target) + new_fitness.append(fitness[j]) + + population = new_population + fitness = new_fitness + + self.temperature *= self.cooling_rate + maintain_diversity(population, fitness) + + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedEnhancedDynamicAdaptiveHybridOptimization(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedEnhancedDynamicDualStrategyHybridDE.py b/nevergrad/optimization/lama/RefinedEnhancedDynamicDualStrategyHybridDE.py new file mode 100644 index 000000000..af0706432 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedDynamicDualStrategyHybridDE.py @@ -0,0 +1,145 @@ +import numpy as np + + +class RefinedEnhancedDynamicDualStrategyHybridDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.final_pop_size = 20 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.initial_crossover_prob = 0.9 + self.final_crossover_prob = 0.6 + self.elitism_rate = 0.3 + self.local_search_prob = 0.3 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop_size = self.initial_pop_size + pop = np.random.uniform(lower_bound, upper_bound, (pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor and crossover probability + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.initial_pop_size)) + ) + crossover_prob = self.initial_crossover_prob - ( + (self.initial_crossover_prob - self.final_crossover_prob) + * (generation / (self.budget / self.initial_pop_size)) + ) + + # Adaptive population size + pop_size = max( + self.final_pop_size, + int( + self.initial_pop_size + - (self.initial_pop_size - self.final_pop_size) + * (generation / (self.budget / self.initial_pop_size)) + ), + ) + elite_count = max(1, int(self.elitism_rate * pop_size)) + + # Elitism: preserve top individuals + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(elite_count), 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + else: + idxs = np.random.choice(range(pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + elite_target_idx = np.random.randint(elite_count) + trial = trial + np.random.rand(self.dim) * (elite_pop[elite_target_idx] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.initial_pop_size: + self.archive = self.archive[-self.initial_pop_size :] + + if self.budget % int(pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack( + (elite_fitness, fitness[elite_count:] if len(fitness) > elite_count else np.array([])) + ) + + pop = combined_pop[:pop_size] # Ensure pop size consistency + fitness = combined_fitness[:pop_size] # Ensure fitness size consistency + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.01 * (np.random.rand(self.dim) - 0.5) # Smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedAdaptiveRestartDE.py b/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedAdaptiveRestartDE.py new file mode 100644 index 000000000..5cefd09e9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedAdaptiveRestartDE.py @@ -0,0 +1,115 @@ +import numpy as np + + +class RefinedEnhancedEliteGuidedAdaptiveRestartDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.8 + self.elitism_rate = 0.2 + self.archive = [] + self.restart_threshold = 0.01 + self.max_generations = int(self.budget / self.pop_size) + self.diversity_threshold = 0.1 + + def __call__(self, func): + def initialize_population(): + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + return pop, fitness + + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + pop, fitness = initialize_population() + self.budget -= self.pop_size + + generation = 0 + best_fitness_history = [] + + while self.budget > 0: + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / self.max_generations) + ) + + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + best_fitness_history.append(np.min(fitness)) + + if len(best_fitness_history) > 10: + recent_improvement = np.abs(best_fitness_history[-10] - best_fitness_history[-1]) + if recent_improvement < self.restart_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + continue + + diversity = np.mean(np.std(pop, axis=0)) + if diversity < self.diversity_threshold: + pop, fitness = initialize_population() + self.budget -= self.pop_size + generation = 0 + best_fitness_history = [] + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedMassQGSA_v87.py b/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedMassQGSA_v87.py new file mode 100644 index 000000000..91174a9f0 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedEliteGuidedMassQGSA_v87.py @@ -0,0 +1,129 @@ +import numpy as np + + +class RefinedEnhancedEliteGuidedMassQGSA_v87: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.05 + self.crossover_rate = 0.8 + self.explore_rate = 0.2 + self.inertia_weight = 0.8 + self.social_weight = 1.2 + self.cognitive_weight = 1.2 + self.elite_weight = 0.6 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_elite_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + elite_agent_idx = np.argmin(fitness_values) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[elite_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], + force + self.elite_weight * guide_force, + best_agent, + personal_best_values[i], + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_elite_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/RefinedEnhancedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..20ca7e65a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class RefinedEnhancedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 150 + self.initial_F = 0.5 + self.initial_CR = 0.8 + self.elite_rate = 0.2 + self.local_search_rate = 0.3 + self.memory_size = 50 + self.w = 0.6 + self.c1 = 1.7 + self.c2 = 1.7 + self.phase_switch_ratio = 0.5 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3.py b/nevergrad/optimization/lama/RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3.py new file mode 100644 index 000000000..98b2472e4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3.py @@ -0,0 +1,177 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.8 + self.CR = 0.9 + self.elitism_rate = 0.15 + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.25 + self.adaptive_learning_rate = 0.02 + self.strategy_switches = [0.2, 0.5, 0.8] + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < 0.1: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2.py b/nevergrad/optimization/lama/RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2.py new file mode 100644 index 000000000..1b6914a79 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2.py @@ -0,0 +1,172 @@ +import numpy as np +import scipy.stats as st + + +class RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def levy_flight(Lambda=1.5): + sigma = ( + st.gamma(1 + Lambda).mean() + * np.sin(np.pi * Lambda / 2) + / (st.gamma((1 + Lambda) / 2).mean() * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.normal(0, sigma, self.dim) + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / Lambda) + return step + + def quantum_behavior(population, global_best, alpha=0.25, beta=0.75): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior and Levy flight + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity + levy_flight(), bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedHybridExplorationOptimization.py b/nevergrad/optimization/lama/RefinedEnhancedHybridExplorationOptimization.py new file mode 100644 index 000000000..071cc78cb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHybridExplorationOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class RefinedEnhancedHybridExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.6 # Inertia weight reduced for better local search + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 30 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Increased exploration factor to enhance exploration phase + max_exploration_cycles = 25 # Reduced maximum exploration cycles for quicker reaction + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + prev_f = self.f_opt + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.85 # Decrease learning rate if improvement is not significant + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedEnhancedHybridExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedEnhancedHyperAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/RefinedEnhancedHyperAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..967b2155e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHyperAdaptiveHybridDEPSO.py @@ -0,0 +1,149 @@ +import numpy as np + + +class RefinedEnhancedHyperAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Increased population size for better exploration + w = 0.7 # Slightly higher inertia weight for PSO + c1 = 1.0 # Increased cognitive coefficient for PSO + c2 = 1.2 # Increased social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.6 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63.py b/nevergrad/optimization/lama/RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63.py new file mode 100644 index 000000000..9e6f456fa --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.98, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Increased base mutation factor for more aggressive exploration + self.F_range = F_range # Slightly narrowed mutation range for controlled diversity + self.CR = CR # Increased crossover probability for enhanced exploratory capabilities + self.elite_fraction = ( + elite_fraction # Slightly increased elite fraction to focus more on the best candidates + ) + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy maintains dynamic adaptation to fitness landscape + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Select base individual from elite with a modified strategy for dynamic adaptation + if np.random.rand() < 0.8: + base = best_individual # Prefer the current best individual frequently + else: + base = population[np.random.choice(elite_indices)] + else: + # Use random elite as base + base = population[np.random.choice(elite_indices)] + + # Mutation factor F dynamically adjusted + F = self.F_base + (np.random.rand() * 2 - 1) * self.F_range + + # Mutation (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover (binomial) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedEnhancedHyperStrategicOptimizerV57.py b/nevergrad/optimization/lama/RefinedEnhancedHyperStrategicOptimizerV57.py new file mode 100644 index 000000000..247da5afd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedHyperStrategicOptimizerV57.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedEnhancedHyperStrategicOptimizerV57: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Optimized base mutation factor for balanced mutation + self.F_range = F_range # Reduced mutation range for improved solution stability + self.CR = CR # Adjusted crossover probability for better exploration-exploitation trade-off + self.elite_fraction = ( + elite_fraction # Increased elite fraction for more focused search on promising areas + ) + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy for dynamic adaptation to problem landscape + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Decide base vector for mutation + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.8: # Adjusted probability for selecting the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjust mutation factor dynamically + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate and select + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if the budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedEnhancedMetaNetAQAPSOv7.py b/nevergrad/optimization/lama/RefinedEnhancedMetaNetAQAPSOv7.py new file mode 100644 index 000000000..165b4a4c7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedMetaNetAQAPSOv7.py @@ -0,0 +1,123 @@ +import numpy as np + + +class RefinedEnhancedMetaNetAQAPSOv7: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.05 + self.max_local_search_attempts = 3 + self.meta_net_iters = 1500 + self.meta_net_lr = 0.2 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedOptimizedEvolutiveStrategy.py b/nevergrad/optimization/lama/RefinedEnhancedOptimizedEvolutiveStrategy.py new file mode 100644 index 000000000..7792ee0b8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedOptimizedEvolutiveStrategy.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedEnhancedOptimizedEvolutiveStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=50): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate=0.1, mutation_strength=1.0): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2, crossover_rate=0.9): + if np.random.rand() < crossover_rate: + alpha = np.random.rand(self.dim) + return alpha * parent1 + (1 - alpha) * parent2 + else: + return parent1 if np.random.rand() > 0.5 else parent2 + + def __call__(self, func): + # Parameters + population_size = 50 + num_generations = self.budget // population_size + num_best = 5 + mutation_rate = 0.1 + mutation_strength = 1.0 + crossover_rate = 0.9 + decay_factor = 0.98 + + # Initialize + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + # Evolution loop + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + # Generate new population using crossover and mutation + new_population = [] + while len(new_population) < population_size: + parents = np.random.choice(num_best, 2, replace=False) + child = self.crossover( + best_population[parents[0]], best_population[parents[1]], crossover_rate + ) + new_population.append(child) + population = np.array(new_population) + population = self.mutate(population, mutation_rate, mutation_strength) + mutation_strength *= decay_factor + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/RefinedEnhancedPrecisionEvolutionaryOptimizerV40.py b/nevergrad/optimization/lama/RefinedEnhancedPrecisionEvolutionaryOptimizerV40.py new file mode 100644 index 000000000..9cce271d4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedPrecisionEvolutionaryOptimizerV40.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedEnhancedPrecisionEvolutionaryOptimizerV40: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.6, + F_range=0.35, + CR=0.97, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Adjusted base mutation factor for enhanced exploration + self.F_range = F_range # Refined range for mutation factor to prioritize stable convergence + self.CR = CR # Increased crossover probability to enhance offspring quality + self.elite_fraction = elite_fraction # Increased elite fraction for more robust elitism + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy with refined parameters + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Enhanced adaptive mutation strategy with adjusted base selection probability + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.8: # Higher usage of the leading individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + F = self.F_base + (np.random.rand() - 0.5) * 2 * self.F_range # Dynamically adjusted F + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Refined binomial crossover technique + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedEnhancedQAPSOAIRVCHRLS.py b/nevergrad/optimization/lama/RefinedEnhancedQAPSOAIRVCHRLS.py new file mode 100644 index 000000000..1184b7a8d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedQAPSOAIRVCHRLS.py @@ -0,0 +1,114 @@ +import numpy as np + + +class RefinedEnhancedQAPSOAIRVCHRLS: + def __init__( + self, + budget=1000, + num_particles=30, + cognitive_weight=1.5, + social_weight=2.0, + acceleration_coeff=1.1, + restart_threshold=50, + restart_prob=0.1, + velocity_clamp=0.5, + hybrid_restart_interval=100, + local_search_radius=0.05, + local_search_samples=20, + ): + self.budget = budget + self.num_particles = num_particles + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.acceleration_coeff = acceleration_coeff + self.restart_threshold = restart_threshold + self.restart_prob = restart_prob + self.velocity_clamp = velocity_clamp + self.hybrid_restart_interval = hybrid_restart_interval + self.local_search_radius = local_search_radius + self.local_search_samples = local_search_samples + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_samples): + x_new = x + np.random.uniform(-self.local_search_radius, self.local_search_radius, size=self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def __call__(self, func): + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + restart_counter = 0 + + for t in range(1, self.budget + 1): + inertia_weight = 0.5 + 0.5 * (1 - t / self.budget) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + self.cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + self.social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = self.acceleration_coeff * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += accel + + # Velocity clamping + particles_vel[i] = np.clip(particles_vel[i], -self.velocity_clamp, self.velocity_clamp) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if np.random.rand() < self.restart_prob: # Random restart with probability restart_prob + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + restart_counter += 1 + + if restart_counter >= self.restart_threshold or t % self.hybrid_restart_interval == 0: + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + restart_counter = 0 + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2.py b/nevergrad/optimization/lama/RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2.py new file mode 100644 index 000000000..ae6812b39 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 60 # Adjusted population size + self.sigma = 0.2 # Reduced sigma for better precision + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.6 # Reduced differential weight for better stability + self.CR = 0.85 # Slightly reduced crossover rate for more controlled diversity + self.elitism_rate = 0.2 # Increased elitism rate + self.eval_count = 0 + self.alpha_levy = 0.01 + self.levy_prob = 0.2 # Reduced levy probability to avoid excessive randomness + self.adaptive_learning_rate = 0.01 # Reduced adaptive learning rate for stability + self.strategy_switches = [0.2, 0.5, 0.8] + self.local_opt_prob = 0.25 # Increased probability of local optimization + self.learning_rate_decay = 0.98 # Increased learning rate decay + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.2 # probability to apply hybridization + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedEnhancedRAMEDSProV3.py b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSProV3.py new file mode 100644 index 000000000..bee2684e0 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSProV3.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedEnhancedRAMEDSProV3: + def __init__( + self, + budget, + population_size=100, + crossover_rate=0.9, + F_min=0.5, + F_max=0.9, + memory_size=30, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite storage + memory = population[: self.memory_size].copy() + memory_fitness = fitness[: self.memory_size].copy() + elite = population[: self.elite_size].copy() + elite_fitness = fitness[: self.elite_size].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite and memory based on current best + for i in range(self.population_size): + # Mutation: DE/rand-to-best/1 with elite adaptation + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + a, b, c = population[np.random.choice(self.population_size, 3, replace=False)] + mutant = np.clip(a + F * (best_solution - b + c), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory and elite + if trial_fitness < np.max(memory_fitness): + worst_mem_idx = np.argmax(memory_fitness) + memory[worst_mem_idx] = trial + memory_fitness[worst_mem_idx] = trial_fitness + + if trial_fitness < np.max(elite_fitness): + worst_elite_idx = np.argmax(elite_fitness) + elite[worst_elite_idx] = trial + elite_fitness[worst_elite_idx] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv3.py b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv3.py new file mode 100644 index 000000000..32ae6ba65 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv3.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedEnhancedRAMEDSv3: + def __init__(self, budget, population_size=50, initial_crossover_rate=0.9, memory_size=50, elite_size=10): + self.budget = budget + self.population_size = population_size + self.initial_crossover_rate = initial_crossover_rate + self.memory_size = memory_size + self.elite_size = elite_size + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal modulation + F = 0.5 + 0.5 * np.sin(2 * np.pi * evaluations / self.budget) + # Dynamic crossover rate adjustment + cr = self.initial_crossover_rate * (1 - evaluations / self.budget) + + # Mutation: DE/rand-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip( + population[i] + F * (best_solution - population[i] + a - b), self.lb, self.ub + ) + + # Crossover + cross_points = np.random.rand(self.dimension) < cr + trial = np.where(cross_points, mutant, population[i]) + + # Selection and adaptive memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with a probability + if evaluations % 10 == 0: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv4.py b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv4.py new file mode 100644 index 000000000..4f7c31194 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedRAMEDSv4.py @@ -0,0 +1,95 @@ +import numpy as np + + +class RefinedEnhancedRAMEDSv4: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + reinit_cycle=100, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.reinit_cycle = reinit_cycle + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + if evaluations % self.reinit_cycle == 0 and evaluations != 0: + # Reinitialize portion of the population + reinit_indices = np.random.choice( + range(self.population_size), size=self.population_size // 5, replace=False + ) + population[reinit_indices] = self.lb + (self.ub - self.lb) * np.random.rand( + len(reinit_indices), self.dimension + ) + fitness[reinit_indices] = np.array( + [func(individual) for individual in population[reinit_indices]] + ) + evaluations += len(reinit_indices) + + # Update elite solutions + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Evolution steps + for i in range(self.population_size): + F = self.F_max * np.sin(np.pi * evaluations / self.budget) # Sinusoidal mutation factor + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + elite_or_best = ( + best_solution if np.random.rand() < 0.5 else elite[np.random.randint(self.elite_size)] + ) + mutant = np.clip( + population[i] + F * (elite_or_best - population[i] + a - b), self.lb, self.ub + ) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedEnhancedStrategyDE.py b/nevergrad/optimization/lama/RefinedEnhancedStrategyDE.py new file mode 100644 index 000000000..68006801f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedStrategyDE.py @@ -0,0 +1,75 @@ +import numpy as np + + +class RefinedEnhancedStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_range=0.3, CR=0.8, strategy="adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically based on adaptive strategy + if self.strategy == "adaptive": + if np.random.rand() < 0.5: + # Choose among the top performers + idxs = np.argsort(fitness)[:3] # Select three best for diversity + base = population[idxs[np.random.randint(3)]] + else: + # Randomly choose base from remaining population + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + + # Mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Enhanced Crossover using more aggressive CR updates + cross_points = np.random.rand(self.dim) < self.CR + 0.1 * np.random.randn() + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedEnhancedUltraRefinedRAMEDS.py b/nevergrad/optimization/lama/RefinedEnhancedUltraRefinedRAMEDS.py new file mode 100644 index 000000000..0730747c6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnhancedUltraRefinedRAMEDS.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedEnhancedUltraRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor using a sigmoid function + F = self.F_min + (self.F_max - self.F_min) / (1 + np.exp(-10 * (evaluations / self.budget - 0.5))) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedEnsembleAdaptiveQuantumDE.py b/nevergrad/optimization/lama/RefinedEnsembleAdaptiveQuantumDE.py new file mode 100644 index 000000000..363aecfd9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEnsembleAdaptiveQuantumDE.py @@ -0,0 +1,130 @@ +import numpy as np + + +class RefinedEnsembleAdaptiveQuantumDE: + def __init__(self, budget=10000, population_size=100, elite_size=10, local_search_steps=100): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + + def local_search(self, elite_individual, func): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + F, Cr = 0.8, 0.9 + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(population, F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(population[i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitness)[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = population[elite_idx] - population[worst_idx] + new_candidate = population[worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitness[worst_idx]: + population[worst_idx] = new_candidate + fitness[worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedEvolutionaryGradientHybridOptimizerV3.py b/nevergrad/optimization/lama/RefinedEvolutionaryGradientHybridOptimizerV3.py new file mode 100644 index 000000000..d131515ab --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEvolutionaryGradientHybridOptimizerV3.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedEvolutionaryGradientHybridOptimizerV3: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.58, + F_range=0.42, + CR=0.9, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.7: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedEvolutionaryTuningStrategy.py b/nevergrad/optimization/lama/RefinedEvolutionaryTuningStrategy.py new file mode 100644 index 000000000..8473c7bcc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedEvolutionaryTuningStrategy.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedEvolutionaryTuningStrategy: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 100 # Adjusted population size for better handling in various landscapes + mutation_rate = 0.1 # Adjusted mutation rate for maintaining diversity + mutation_scale = 0.1 # Mutation scale maintained for precise mutations + crossover_rate = 0.7 # Adjusted crossover rate for optimal mixing + elite_size = 10 # Percentage of population to maintain as elite + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Selection via tournament selection + tournament_size = 5 + selected_indices = [] + for _ in range(population_size - elite_size): + participants = np.random.choice(population_size, tournament_size, replace=False) + best_participant = participants[np.argmin(fitness[participants])] + selected_indices.append(best_participant) + + mating_pool = population[selected_indices] + + # Crossover + np.random.shuffle(mating_pool) + children = [] + for i in range(0, len(selected_indices) - 1, 2): + if np.random.random() < crossover_rate: + cross_point = np.random.randint(1, self.dim) + child1 = np.concatenate((mating_pool[i][:cross_point], mating_pool[i + 1][cross_point:])) + child2 = np.concatenate((mating_pool[i + 1][:cross_point], mating_pool[i][cross_point:])) + else: + child1, child2 = mating_pool[i], mating_pool[i + 1] + children.append(child1) + children.append(child2) + + # Mutation + children = np.array(children) + mutation_mask = np.random.rand(children.shape[0], self.dim) < mutation_rate + mutations = np.random.normal(0, mutation_scale, children.shape) + children = np.clip(children + mutation_mask * mutations, self.lb, self.ub) + + # Evaluate new individuals + new_fitness = np.array([func(x) for x in children]) + evaluations += len(children) + + # Elitism and replacement + elites_indices = np.argsort(fitness)[:elite_size] + elites = population[elites_indices] + elite_fitness = fitness[elites_indices] + + combined_population = np.vstack([elites, children]) + combined_fitness = np.concatenate([elite_fitness, new_fitness]) + + # Select the next generation + sorted_indices = np.argsort(combined_fitness) + population = combined_population[sorted_indices[:population_size]] + fitness = combined_fitness[sorted_indices[:population_size]] + + # Track the best found solution + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedGlobalClimbingOptimizerV2.py b/nevergrad/optimization/lama/RefinedGlobalClimbingOptimizerV2.py new file mode 100644 index 000000000..ab376594f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGlobalClimbingOptimizerV2.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedGlobalClimbingOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 200 # Reduced population size for faster convergence + elite_size = 30 # Reduced elite size to intensify the search in promising areas + evaluations = 0 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.05 # Reduced mutation scale for finer exploration + adaptive_factor = 0.95 # Slower adaptation rate + recombination_prob = 0.7 # Lower recombination rate to favor exploration + + # Evolution loop + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + # Differential evolution inspired crossover strategy + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) # Differential mutation + child = np.clip(child, self.lb, self.ub) + else: + # Mutation and selection + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Update the best solution found + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + # Adaptive mutation scaling and replacement strategy + if evaluations % 300 == 0: + mutation_scale *= adaptive_factor # Decrement to stabilize as optimization progresses + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + for idx in range(population_size - elite_size): + if np.random.rand() < 0.15: # Introducing fresh blood more conservatively + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedGlobalLocalBalancingOptimizer.py b/nevergrad/optimization/lama/RefinedGlobalLocalBalancingOptimizer.py new file mode 100644 index 000000000..0558379d7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGlobalLocalBalancingOptimizer.py @@ -0,0 +1,69 @@ +import numpy as np + + +class RefinedGlobalLocalBalancingOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=150): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.global_influence = 0.8 # Refined influence setting for global best + self.local_influence = 0.2 # Increased local influence for better fine-tuning + self.vel_scale = 0.1 # Fine-tuning velocity scaling + self.learning_rate = 0.5 # Optimized learning rate for balanced convergence + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = ( + np.random.uniform(-1, 1, (self.particles, self.dimension)) + * (self.bounds[1] - self.bounds[0]) + * 0.1 + ) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + + personal_best_positions = positions.copy() + personal_best_fitness = fitness.copy() + + best_global_position = positions[np.argmin(fitness)] + best_global_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.vel_scale * velocities[i] + + self.global_influence * r1 * (personal_best_positions[i] - positions[i]) + + self.local_influence * r2 * (best_global_position - positions[i]) + ) + + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < best_global_fitness: + best_global_position = positions[i] + best_global_fitness = new_fitness + + if evaluations >= self.budget: + break + + return best_global_fitness, best_global_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedGlobalStructureAdaptiveEvolverV2.py b/nevergrad/optimization/lama/RefinedGlobalStructureAdaptiveEvolverV2.py new file mode 100644 index 000000000..9bc2b5274 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGlobalStructureAdaptiveEvolverV2.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedGlobalStructureAdaptiveEvolverV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 + elite_size = 50 + evaluations = 0 + mutation_scale = 0.15 + adaptive_factor = 0.9 + recombination_prob = 0.65 + innovators_factor = 0.15 # Increase to improve exploration + + # Initialize population more strategically + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + # Select elites based on a roulette wheel selection mechanism for diversity + fitness_prob = 1 / (1 + fitness - fitness.min()) + fitness_prob /= fitness_prob.sum() + elite_indices = np.random.choice(population_size, elite_size, replace=False, p=fitness_prob) + elite_individuals = population[elite_indices] + elite_fitness = fitness[elite_indices] + + # Generate new candidates + new_population = [] + new_fitness = [] + for _ in range(population_size - elite_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(elite_size, 3, replace=False) + x0, x1, x2 = elite_individuals[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + idx = np.random.choice(elite_size) + child = elite_individuals[idx] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + new_fitness.append(child_fitness) + + # Add innovators to explore more of the search space + innovators = np.random.uniform( + self.lb, self.ub, (int(population_size * innovators_factor), self.dim) + ) + innovator_fitness = np.array([func(ind) for ind in innovators]) + evaluations += len(innovators) + + # Form the new population from elite, new candidates, and innovators + population = np.vstack((elite_individuals, new_population, innovators)) + fitness = np.hstack((elite_fitness, new_fitness, innovator_fitness)) + + # Adaptive mutation scale adjustment + mutation_scale *= adaptive_factor + if mutation_scale < 0.05: + mutation_scale = 0.15 # Reset mutation scale + + # Retain best solutions based on fitness + best_indices = np.argsort(fitness)[:population_size] + population = population[best_indices] + fitness = fitness[best_indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV2.py b/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV2.py new file mode 100644 index 000000000..0d9ff8eaa --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV2.py @@ -0,0 +1,91 @@ +import numpy as np + + +class RefinedGlobalStructureAwareOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 100 + elite_size = 20 + evaluations = 0 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + mutation_scale = 0.08 + adaptive_factor = 0.95 + recombination_prob = 0.9 + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + for idx in range(population_size - elite_size): + if np.random.rand() < 0.25: + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + fitness[idx] = func(population[idx]) + evaluations += 1 + + # Enhanced global structure-aware mutation for better exploration + if evaluations % 400 == 0: + structure_scale = 0.6 + structure_population = np.random.normal(0, structure_scale, (population_size // 4, self.dim)) + structure_population = np.clip( + structure_population + + population[np.random.choice(population_size, population_size // 4)], + self.lb, + self.ub, + ) + structure_fitness = np.array([func(ind) for ind in structure_population]) + evaluations += population_size // 4 + + combined_population = np.concatenate((population, structure_population), axis=0) + combined_fitness = np.concatenate((fitness, structure_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV3.py b/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV3.py new file mode 100644 index 000000000..879273036 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGlobalStructureAwareOptimizerV3.py @@ -0,0 +1,92 @@ +import numpy as np + + +class RefinedGlobalStructureAwareOptimizerV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 150 + elite_size = 45 + evaluations = 0 + mutation_scale = 0.08 + adaptive_factor = 0.95 + recombination_prob = 0.95 + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(population_size): + if np.random.rand() < recombination_prob: + indices = np.random.choice(population_size, 3, replace=False) + x0, x1, x2 = population[indices] + child = x0 + mutation_scale * (x1 - x2) + child = np.clip(child, self.lb, self.ub) + else: + child = population[i] + np.random.normal(0, mutation_scale, self.dim) + child = np.clip(child, self.lb, self.ub) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < fitness[i]: + new_population.append(child) + new_fitness.append(child_fitness) + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < self.f_opt: + self.f_opt = fitness[current_best_idx] + self.x_opt = population[current_best_idx] + + mutation_scale *= adaptive_factor + elite_indices = np.argsort(fitness)[:elite_size] + elite_individuals = population[elite_indices] + + for idx in range(population_size - elite_size): + if np.random.rand() < 0.4: # Enhanced mutation within elites + replacement_idx = np.random.choice(elite_size) + population[idx] = elite_individuals[replacement_idx] + np.random.normal( + 0, mutation_scale, self.dim + ) + population[idx] = np.clip(population[idx], self.lb, self.ub) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if evaluations % 250 == 0: # More frequent and varied global structure mutations + structure_scale = 0.3 + structure_population = np.random.normal(0, structure_scale, (population_size // 2, self.dim)) + structure_population = np.clip( + structure_population + + population[np.random.choice(population_size, population_size // 2)], + self.lb, + self.ub, + ) + structure_fitness = np.array([func(ind) for ind in structure_population]) + evaluations += population_size // 2 + + combined_population = np.concatenate((population, structure_population), axis=0) + combined_fitness = np.concatenate((fitness, structure_fitness), axis=0) + + indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[indices] + fitness = combined_fitness[indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedGradientBalancedExplorationPSO.py b/nevergrad/optimization/lama/RefinedGradientBalancedExplorationPSO.py new file mode 100644 index 000000000..6757bad4b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBalancedExplorationPSO.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedGradientBalancedExplorationPSO: + def __init__( + self, + budget=10000, + population_size=50, + initial_inertia=0.95, + final_inertia=0.5, + cognitive_weight=2.0, + social_weight=1.8, + exploration_factor=0.1, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.exploration_factor = exploration_factor + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits + self.evolution_rate = (initial_inertia - final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + for i in range(self.population_size): + r1, r2 = np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + random_exploration = self.exploration_factor * np.random.normal( + 0, 1, self.dim + ) # Random exploration factor + + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + random_exploration + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration.py b/nevergrad/optimization/lama/RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration.py new file mode 100644 index 000000000..067ee77f8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration.py @@ -0,0 +1,180 @@ +import numpy as np + + +class RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Initial cooling rate + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive beta and alpha adjustments based on phases + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Introducing crossover mechanism to create new candidates + if evaluations % (self.budget // 5) == 0: + for _ in range(memory_size // 2): + parent1 = memory[np.random.randint(memory_size)] + parent2 = memory[np.random.randint(memory_size)] + x_crossover = self._crossover(parent1, parent2) + f_crossover = func(x_crossover) + evaluations += 1 + if f_crossover < self.f_opt: + self.f_opt = f_crossover + self.x_opt = x_crossover + + worst_idx = np.argmax(memory_scores) + if f_crossover < memory_scores[worst_idx]: + memory[worst_idx] = x_crossover + memory_scores[worst_idx] = f_crossover + + # Introducing mutation mechanism to create new candidates + if evaluations % (self.budget // 3) == 0: + for i in range(memory_size // 3): + x_mut = memory[np.random.randint(memory_size)] + x_mut += np.random.normal(0, 0.1, self.dim) + x_mut = np.clip(x_mut, func.bounds.lb, func.bounds.ub) + f_mut = func(x_mut) + evaluations += 1 + if f_mut < self.f_opt: + self.f_opt = f_mut + self.x_opt = x_mut + + worst_idx = np.argmax(memory_scores) + if f_mut < memory_scores[worst_idx]: + memory[worst_idx] = x_mut + memory_scores[worst_idx] = f_mut + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x + + def _crossover(self, parent1, parent2): + crossover_point = np.random.randint(1, self.dim - 1) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + return np.clip(child, -5.0, 5.0) diff --git a/nevergrad/optimization/lama/RefinedGradientBoostedMemoryAnnealing.py b/nevergrad/optimization/lama/RefinedGradientBoostedMemoryAnnealing.py new file mode 100644 index 000000000..e5924a478 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBoostedMemoryAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class RefinedGradientBoostedMemoryAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.97 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 15 # Increased memory size for better diversity + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + # Define phases for dynamic adaptation + phase1 = self.budget // 4 # Initial exploration phase + phase2 = self.budget // 2 # Intensive search phase + phase3 = 3 * self.budget // 4 # Exploitation phase + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + # Disturbance around current best memory solution + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + # Random memory selection + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Dynamic adjustment of beta and alpha for better exploration-exploitation balance + if evaluations < phase1: + beta = 2.0 # Higher exploration phase + alpha = 0.99 # Slower cooling for thorough exploration + elif evaluations < phase2: + beta = 1.5 # Balanced phase + alpha = 0.97 # Standard cooling rate + elif evaluations < phase3: + beta = 1.0 # Transition to exploitation + alpha = 0.95 # Faster cooling for convergence + else: + beta = 2.5 # Higher acceptance for local search refinement + alpha = 0.92 # Even faster cooling for final convergence + + # Gradient-based local search refinement occasionally + if evaluations % (self.budget // 10) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment to escape local minima and diversify search + if evaluations % (self.budget // 5) == 0 and evaluations < self.budget: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Periodic exploration boost + if evaluations % (self.budget // 4) == 0: + for _ in range(memory_size // 2): # Half the memory size for exploration boost + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + # Update memory with better solutions + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient # Gradient descent step + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..2acfe6dd2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,140 @@ +import numpy as np + + +class RefinedGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealingPlus.py b/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealingPlus.py new file mode 100644 index 000000000..a81574a30 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBoostedMemorySimulatedAnnealingPlus.py @@ -0,0 +1,174 @@ +import numpy as np + + +class RefinedGradientBoostedMemorySimulatedAnnealingPlus: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-6 # Minimum temperature + alpha_initial = 0.96 # Cooling rate for initial phase + beta_initial = 1.5 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + if evaluations < phase1: + beta = 2.0 + alpha = 0.98 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.96 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.94 + else: + beta = 2.5 + alpha = 0.92 + + # Enhanced gradient-based local search refinement + if evaluations % (self.budget // 10) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 6) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.25: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Improved adaptive exploration control + if evaluations % (self.budget // 5) == 0: + adaptive_exploration_radius = 0.2 + 0.8 * (1 - T / T_initial) + for _ in range(memory_size // 3): + x_candidate = memory[ + np.random.randint(memory_size) + ] + adaptive_exploration_radius * np.random.randn(self.dim) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + # Control periodic exploration + if evaluations % (self.budget // 6) == 0: + for _ in range(memory_size // 3): + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=50, step_size=0.005): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedGradientBoostedOptimizer.py b/nevergrad/optimization/lama/RefinedGradientBoostedOptimizer.py new file mode 100644 index 000000000..baae3e0b6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientBoostedOptimizer.py @@ -0,0 +1,63 @@ +import numpy as np + + +class RefinedGradientBoostedOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 100 + mutation_factor = 0.5 # Lower mutation factor to start with finer mutations + crossover_rate = 0.9 # High crossover rate for stronger exploitation + grad_step_size = 0.01 # Step size for gradient approximation + adaptive_rate = 0.02 # More conservative adaptive rate + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Using a hybrid gradient and evolutionary strategy + while evaluations < self.budget: + for i in range(population_size): + # Gradient mutation and differential mutation combined + grad_mutant = population[i] + np.random.randn(self.dim) * grad_step_size + grad_mutant = np.clip(grad_mutant, self.lower_bound, self.upper_bound) + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + diff_mutant = a + mutation_factor * (b - c) + diff_mutant = np.clip(diff_mutant, self.lower_bound, self.upper_bound) + + # Construct trial vector using both mutations + trial_vector = np.where(np.random.rand(self.dim) < 0.5, grad_mutant, diff_mutant) + trial_vector = np.clip(trial_vector, self.lower_bound, self.upper_bound) + + # Crossover + crossover_mask = np.random.rand(self.dim) < crossover_rate + trial_vector = np.where(crossover_mask, trial_vector, population[i]) + + trial_fitness = func(trial_vector) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial_vector + + # Adaptive updates for mutation and crossover rates + mutation_factor = max(0.1, mutation_factor - adaptive_rate * np.random.randn()) + crossover_rate = min(1.0, max(0.5, crossover_rate + adaptive_rate * np.random.randn())) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedGradientGuidedEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedGradientGuidedEvolutionStrategy.py new file mode 100644 index 000000000..f3bd457a9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedGradientGuidedEvolutionStrategy.py @@ -0,0 +1,66 @@ +import numpy as np + + +class RefinedGradientGuidedEvolutionStrategy: + def __init__(self, budget, dim=5, pop_size=100, tau=0.15, sigma_init=0.5, beta=0.1): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.tau = tau # Learning rate for step size adaptation + self.sigma_init = sigma_init # Initial step size + self.beta = beta # Gradient estimation perturbation magnitude + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, individual, sigma): + return np.clip(individual + sigma * np.random.randn(self.dim), self.bounds[0], self.bounds[1]) + + def estimate_gradient(self, func, individual, sigma): + grad = np.zeros(self.dim) + for i in range(self.dim): + perturb = np.zeros(self.dim) + perturb[i] = self.beta * sigma + f_plus = func(individual + perturb) + f_minus = func(individual - perturb) + grad[i] = (f_plus - f_minus) / (2 * self.beta * sigma) + return grad + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + sigma_values = np.full(self.pop_size, self.sigma_init) + + while n_evals < self.budget: + candidates = [] + candidate_f_values = [] + + for idx in range(self.pop_size): + individual = population[idx] + sigma = sigma_values[idx] + gradient = self.estimate_gradient(func, individual, sigma) + individual_new = np.clip(individual - sigma * gradient, self.bounds[0], self.bounds[1]) + f_new = func(individual_new) + n_evals += 1 + + # Collect candidates for selection + candidates.append((individual_new, f_new, sigma)) + + if n_evals >= self.budget: + break + + # Select next generation + sorted_candidates = sorted(candidates, key=lambda x: x[1]) + for i, (ind, f_val, sig) in enumerate(sorted_candidates[: self.pop_size]): + population[i] = ind + f_values[i] = f_val + # Adapt sigma based on ranking in the population + rank = i / self.pop_size + sigma_values[i] = sig * np.exp(self.tau * (rank - 0.5)) + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..1c20569b2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,122 @@ +import numpy as np + + +class RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 50 # Adjusted population size to balance exploration and exploitation + self.sigma = 0.15 # Adjusted step size for further refinement + self.c1 = 0.02 # Refined learning rate for rank-one update + self.cmu = 0.01 # Learning rate for rank-mu update + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.01 # Reduced learning rate for mutation adaptability + self.elitism_rate = 0.2 # Increased elitism rate to retain more top solutions + self.eval_count = 0 + self.F = 0.7 # Increased differential weight + self.CR = 0.85 # Adjusted crossover probability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedHybridAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedHybridAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..8e67f40bc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridAdaptiveDifferentialEvolution.py @@ -0,0 +1,63 @@ +import numpy as np + + +class RefinedHybridAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dimension = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 200 # Further increased population for greater exploratory potential + self.crossover_rate = 0.9 # Higher crossover rate for better gene mixing + self.differential_weight = ( + 0.75 # Fine-tuned differential weight for robust exploration-exploitation balance + ) + self.patience = 30 # Reduced patience for more dynamic adaptation + self.p_adaptive_mutation = 0.2 # Mutation probability starts lower + self.adaptation_factor = 1.05 # Controlled adaptation rate for mutation probability + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + evaluations = self.population_size + generations_since_last_improvement = 0 + + while evaluations < self.budget: + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.differential_weight * (b - c), self.lower_bound, self.upper_bound) + + # Cross-over operation + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + generations_since_last_improvement = 0 + else: + generations_since_last_improvement += 1 + + if evaluations >= self.budget: + break + + if generations_since_last_improvement > self.patience: + # Adapt mutation probability dynamically to enhance exploration + generations_since_last_improvement = 0 + self.p_adaptive_mutation = min(1, self.p_adaptive_mutation * self.adaptation_factor) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridAdaptiveGradientPSO.py b/nevergrad/optimization/lama/RefinedHybridAdaptiveGradientPSO.py new file mode 100644 index 000000000..a06568d04 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridAdaptiveGradientPSO.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedHybridAdaptiveGradientPSO: + def __init__( + self, + budget=10000, + population_size=200, + initial_inertia=0.9, + final_inertia=0.4, + cognitive_weight=2.0, + social_weight=2.0, + gradient_weight=0.1, + mutation_rate=0.15, + mutation_intensity=0.05, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.gradient_weight = gradient_weight + self.mutation_rate = mutation_rate + self.mutation_intensity = mutation_intensity + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.evolution_rate = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + self.inertia_weight = max( + self.initial_inertia - (self.evolution_rate * evaluation_counter), self.final_inertia + ) + + for i in range(self.population_size): + r1, r2, r3 = np.random.rand(), np.random.rand(), np.random.rand() + personal_component = r1 * self.cognitive_weight * (personal_best_positions[i] - particles[i]) + social_component = r2 * self.social_weight * (global_best_position - particles[i]) + gradient_step = ( + r3 + * self.gradient_weight + * (particles[i] - global_best_position) + / np.linalg.norm(particles[i] - global_best_position + 1e-8) + ) + + # Mutation with a certain probability + if np.random.rand() < self.mutation_rate: + mutation_vector = np.random.normal(0, self.mutation_intensity, self.dim) + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + + mutation_vector + ) + else: + velocities[i] = ( + self.inertia_weight * velocities[i] + + personal_component + + social_component + - gradient_step + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/RefinedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/RefinedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..9f3319bab --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class RefinedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 120 # Increased for better search space coverage + self.initial_F = 0.8 # Tuned mutation factor for balanced search + self.initial_CR = 0.8 # Tuned crossover rate for better recombination + self.elite_rate = 0.2 # Elite rate for strong convergence + self.local_search_rate = 0.5 # Local search rate for intensive local searches + self.memory_size = 20 # Larger memory size for better adaptive parameters + self.w = 0.6 # Lower inertia weight for better convergence + self.c1 = 2.0 # Stronger cognitive component for better individual best search + self.c2 = 2.0 # Stronger social component for better global best search + self.phase_switch_ratio = 0.5 # Adjusted phase switch ratio for balance + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # More precise local search for exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) # Fixed typo here + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedHybridCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedHybridCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..00ffe5c7e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,156 @@ +import numpy as np + + +class RefinedHybridCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.sigma = 0.3 + self.c1 = 0.1 + self.cmu = 0.05 + self.damping = 1 + (self.dim / (2 * self.population_size)) + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.adaptive_learning_rate = 0.02 + self.elitism_rate = 0.20 # Increased elitism rate + self.eval_count = 0 + self.F = 0.7 + self.CR = 0.85 + self.alpha_levy = 0.01 # Levy flight parameter + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(population, fitness): + """Switch strategy based on current performance.""" + strategy = "default" + if self.eval_count < self.budget * 0.33: + strategy = "explorative" + self.F = 0.9 + self.CR = 0.9 + elif self.eval_count < self.budget * 0.66: + strategy = "balanced" + self.F = 0.7 + self.CR = 0.85 + else: + strategy = "exploitative" + self.F = 0.5 + self.CR = 0.75 + return strategy + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < 0.2: + population[i] = levy_flight_step(population[i]) + return population + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching(population, fitness) + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedHybridCovarianceMatrixDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedHybridDEPSO.py b/nevergrad/optimization/lama/RefinedHybridDEPSO.py new file mode 100644 index 000000000..688dd5c52 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridDEPSO.py @@ -0,0 +1,160 @@ +import numpy as np + + +class RefinedHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros_like(population) + return population, fitness, velocities + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + new_velocities = np.zeros_like(new_population) + return new_population, new_fitness, new_velocities + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def lévy_flight(Lambda=1.5): + sigma1 = ( + (np.math.gamma(1 + Lambda) * np.sin(np.pi * Lambda / 2)) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + sigma2 = 1 + u = np.random.normal(0, sigma1, size=self.dim) + v = np.random.normal(0, sigma2, size=self.dim) + step = u / abs(v) ** (1 / Lambda) + return step + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_velocity(velocities, population, pbest, gbest, w, c1=1.5, c2=1.5): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + new_velocities = w * velocities + c1 * r1 * (pbest - population) + c2 * r2 * (gbest - population) + return new_velocities + + def local_search(x): + perturbation = np.random.normal(scale=0.1, size=x.shape) + return np.clip(x + perturbation, bounds[0], bounds[1]) + + population, fitness, velocities = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + pbest = population.copy() + pbest_fitness = fitness.copy() + gbest = population[np.argmin(fitness)] + + last_improvement = evaluations + w = 0.9 # Initial inertia weight + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness, velocities = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + new_velocities = np.zeros_like(velocities) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + if np.random.rand() < 0.3: + # Use Lévy flight mutation with a probability of 30% + mutant = population[i] + lévy_flight() + else: + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < pbest_fitness[i]: + pbest[i] = trial + pbest_fitness[i] = f_trial + + if f_trial < func(gbest): + gbest = trial + + if evaluations >= self.budget: + break + + velocities = update_velocity(velocities, population, pbest, gbest, w) + population = new_population + velocities + population = np.clip(population, bounds[0], bounds[1]) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + w = 0.4 + 0.5 * (1 - evaluations / self.budget) # Adaptive inertia weight + + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridDEPSOWithAdaptiveMemoryV4.py b/nevergrad/optimization/lama/RefinedHybridDEPSOWithAdaptiveMemoryV4.py new file mode 100644 index 000000000..93aa9362f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridDEPSOWithAdaptiveMemoryV4.py @@ -0,0 +1,152 @@ +import numpy as np + + +class RefinedHybridDEPSOWithAdaptiveMemoryV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + elite_size = 5 # Number of elite individuals to maintain diversity + w = 0.7 # Adaptive inertia weight for PSO + c1 = 1.2 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.2: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.2: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + elite_indices = np.argsort(fitness)[:elite_size] + elite_population = population[elite_indices] + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridDEPSOWithDynamicAdaptationV3.py b/nevergrad/optimization/lama/RefinedHybridDEPSOWithDynamicAdaptationV3.py new file mode 100644 index 000000000..b5af2eb08 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridDEPSOWithDynamicAdaptationV3.py @@ -0,0 +1,149 @@ +import numpy as np + + +class RefinedHybridDEPSOWithDynamicAdaptationV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py new file mode 100644 index 000000000..72fd56316 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridDualPhaseParticleSwarmDifferentialEvolution.py @@ -0,0 +1,144 @@ +import numpy as np + + +class RefinedHybridDualPhaseParticleSwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 60 # Increased for better diversity + self.initial_F = 0.9 # Slightly higher for better mutation + self.initial_CR = 0.8 # Slightly lower for better crossover + self.elite_rate = 0.15 # Increased elite rate + self.local_search_rate = 0.4 # Increased for more local refinements + self.memory_size = 5 + self.w = 0.7 # Adjusted inertia weight for better balance + self.c1 = 1.2 # Reduced cognitive component + self.c2 = 1.8 # Increased social component + self.phase_switch_ratio = 0.25 # Adjusted phase switch + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + # Initialize personal best positions and fitness + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + # Initialize global best position and fitness + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + # Track the number of function evaluations + self.eval_count = self.population_size + + # Initialize memory for adaptive parameters + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 # Adjusted for better local search exploitation + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedHybridDualPhaseParticleSwarmDifferentialEvolution(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedHybridDynamicClusterOptimization.py b/nevergrad/optimization/lama/RefinedHybridDynamicClusterOptimization.py new file mode 100644 index 000000000..d06f01d2d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridDynamicClusterOptimization.py @@ -0,0 +1,153 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class RefinedHybridDynamicClusterOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5, alpha=0.01): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return alpha * step + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 100 + elite_size = 5 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + quantum_factor = self.adaptive_parameters(evaluations, self.budget, 0.5, 0.1) + levy_factor = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cluster_count = int(self.adaptive_parameters(evaluations, self.budget, 2, 10)) + + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + kmeans = KMeans(n_clusters=cluster_count) + clusters = kmeans.fit_predict(population) + cluster_centers = kmeans.cluster_centers_ + + for cluster_center in cluster_centers: + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(cluster_center + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + worst_indices = np.argsort(fitness)[-elite_size:] + for idx in worst_indices: + if evaluations < self.budget: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if fitness[idx] < personal_best_fitness[idx]: + personal_best_positions[idx] = population[idx] + personal_best_fitness[idx] = fitness[idx] + + if fitness[idx] < self.f_opt: + self.f_opt = fitness[idx] + self.x_opt = population[idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE.py b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE.py new file mode 100644 index 000000000..6ea111306 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE.py @@ -0,0 +1,88 @@ +import numpy as np + + +class RefinedHybridEliteGuidedMutationDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.3 + self.crossover_prob = 0.9 + self.elitism_rate = 0.2 + self.recombination_prob = 0.7 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation and recombination + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Recombination + if np.random.rand() < self.recombination_prob: + recombination_idx = np.random.choice(range(self.pop_size), 1) + trial = 0.5 * (trial + pop[recombination_idx][0]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v2.py b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v2.py new file mode 100644 index 000000000..85cbfc130 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v2.py @@ -0,0 +1,99 @@ +import numpy as np + + +class RefinedHybridEliteGuidedMutationDE_v2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.4 # Increased final mutation factor for better exploration + self.crossover_prob = 0.9 + self.elitism_rate = 0.25 # Increased elitism rate + self.recombination_prob = 0.7 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation and recombination + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Recombination + if np.random.rand() < self.recombination_prob: + recombination_idx = np.random.choice(range(self.pop_size), 1) + trial = 0.5 * (trial + pop[recombination_idx][0]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Re-evaluate elite individuals periodically for robustness against noisy fitness + if generation % 20 == 0: + elite_fitness = np.array([func(ind) for ind in elite_pop]) + self.budget -= elite_count + elite_indices = np.argsort(elite_fitness)[:elite_count] + elite_pop = elite_pop[elite_indices] + elite_fitness = elite_fitness[elite_indices] + + pop[:elite_count] = elite_pop + fitness[:elite_count] = elite_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v3.py b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v3.py new file mode 100644 index 000000000..f5766dc24 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridEliteGuidedMutationDE_v3.py @@ -0,0 +1,111 @@ +import numpy as np + + +class RefinedHybridEliteGuidedMutationDE_v3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 + self.initial_mutation_factor = 0.8 + self.final_mutation_factor = 0.4 + self.crossover_prob = 0.8 + self.elitism_rate = 0.25 + self.recombination_prob = 0.7 + self.archive = [] + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Incorporate elite-guided mutation and recombination + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Recombination + if np.random.rand() < self.recombination_prob: + recombination_idx = np.random.choice(range(self.pop_size), 1) + trial = 0.5 * (trial + pop[recombination_idx][0]) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % 50 == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + if func(archive_ind) < self.f_opt: + self.f_opt = func(archive_ind) + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Re-evaluate elite individuals periodically for robustness against noisy fitness + if generation % 20 == 0: + elite_fitness = np.array([func(ind) for ind in elite_pop]) + self.budget -= elite_count + elite_indices = np.argsort(elite_fitness)[:elite_count] + elite_pop = elite_pop[elite_indices] + elite_fitness = elite_fitness[elite_indices] + + pop[:elite_count] = elite_pop + fitness[:elite_count] = elite_fitness + + generation += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridEvolutionStrategyV4.py b/nevergrad/optimization/lama/RefinedHybridEvolutionStrategyV4.py new file mode 100644 index 000000000..fd4b22adf --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridEvolutionStrategyV4.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedHybridEvolutionStrategyV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 500 + elite_size = int(0.1 * population_size) # Reduced elite size for more diversity + mutation_rate = 0.07 # Slightly increased mutation rate + mutation_scale = lambda t: 0.1 * np.exp(-0.0001 * t) # Slower mutation scale decay + crossover_rate = 0.9 # Increased crossover rate for more exploration + + local_search_prob = 0.35 # Increased local search probability + local_search_step_scale = lambda t: 0.02 * np.exp(-0.00005 * t) # Slower decay for local search step + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridEvolutionaryAnnealingOptimizer.py b/nevergrad/optimization/lama/RefinedHybridEvolutionaryAnnealingOptimizer.py new file mode 100644 index 000000000..9a898547b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridEvolutionaryAnnealingOptimizer.py @@ -0,0 +1,54 @@ +import numpy as np + + +class RefinedHybridEvolutionaryAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initial temperature for simulated annealing with a more conservative start + T = 1.5 + T_min = 0.001 # Lower minimum temperature for finer control at late stages + alpha = 0.92 # Slower cooling rate to allow more exploration at higher temperatures + F = 0.7 # Mutation factor adjusted for more robust search behavior + CR = 0.9 # Higher crossover probability to encourage more information sharing + + # Increased population size for a broader search space coverage + population_size = 50 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced exploration and exploitation phases + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Introduce a dynamic mutation factor adjusted by temperature + dynamic_F = F * (1 + 0.1 * np.log(1 + T)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Simulated annealing acceptance with a dynamic criterion + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adjust cooling rate dynamically based on progress + adaptive_cooling = alpha + 0.01 * (1 - evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridOptimizer.py b/nevergrad/optimization/lama/RefinedHybridOptimizer.py new file mode 100644 index 000000000..e57953021 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridOptimizer.py @@ -0,0 +1,127 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedHybridOptimizer: + def __init__( + self, + budget=10000, + pop_size=50, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.1, + ): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + return result.x, result.fun + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.pop_size + + while self.eval_count < global_search_budget: + for i in range(self.pop_size): + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice([idx for idx in range(self.pop_size) if idx != i]) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Perform local search on the best individuals + for i in range(self.pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + self.eval_count += local_budget + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridPSODEOptimizer.py b/nevergrad/optimization/lama/RefinedHybridPSODEOptimizer.py new file mode 100644 index 000000000..92618ece0 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridPSODEOptimizer.py @@ -0,0 +1,95 @@ +import numpy as np + + +class RefinedHybridPSODEOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + global_best_position = trial_vector + global_best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridPSODESimulatedAnnealing.py b/nevergrad/optimization/lama/RefinedHybridPSODESimulatedAnnealing.py new file mode 100644 index 000000000..85d735efb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridPSODESimulatedAnnealing.py @@ -0,0 +1,118 @@ +import numpy as np + + +class RefinedHybridPSODESimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def adaptive_parameters(self, evaluations, max_evaluations, start_param, end_param): + progress = evaluations / max_evaluations + return start_param + (end_param - start_param) * progress + + def simulated_annealing(self, current_position, current_fitness, func, temp): + new_position = current_position + np.random.uniform(-0.1, 0.1, self.dim) + new_position = np.clip(new_position, self.lb, self.ub) + new_fitness = func(new_position) + if new_fitness < current_fitness or np.exp((current_fitness - new_fitness) / temp) > np.random.rand(): + return new_position, new_fitness + return current_position, current_fitness + + def __call__(self, func): + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-0.1, 0.1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + inertia_weight = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.4) + cognitive_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + social_coefficient = self.adaptive_parameters(evaluations, self.budget, 2.0, 1.0) + differential_weight = self.adaptive_parameters(evaluations, self.budget, 0.8, 0.2) + crossover_rate = self.adaptive_parameters(evaluations, self.budget, 0.9, 0.3) + temperature = self.adaptive_parameters(evaluations, self.budget, 1.0, 0.01) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + global_best_position = new_position + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + global_best_position = trial_vector + global_best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + population[i], fitness[i] = self.simulated_annealing( + population[i], fitness[i], func, temperature + ) + evaluations += 1 + + if fitness[i] < self.f_opt: + self.f_opt = fitness[i] + self.x_opt = population[i] + global_best_position = population[i] + global_best_fitness = fitness[i] + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridPSO_DE.py b/nevergrad/optimization/lama/RefinedHybridPSO_DE.py new file mode 100644 index 000000000..62938ef94 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridPSO_DE.py @@ -0,0 +1,109 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedHybridPSO_DE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.initial_pop_size = 20 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.initial_pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.initial_pop_size + + # PSO parameters + w = 0.5 # Inertia weight + c1 = 1.5 # Cognitive coefficient + c2 = 1.5 # Social coefficient + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + + while evaluations < self.budget: + new_population = [] + new_fitness = [] + pop_size = len(population) + + for i in range(pop_size): + # PSO update + r1, r2 = np.random.rand(), np.random.rand() + if self.x_opt is not None: + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (self.x_opt - population[i]) + + c2 * r2 * (population[np.argmin(fitness)] - population[i]) + ) + else: + velocities[i] = w * velocities[i] + c2 * r2 * ( + population[np.argmin(fitness)] - population[i] + ) + + trial_pso = population[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + # Mutation strategy from DE + F = 0.8 + CR = 0.9 + indices = np.arange(pop_size) + indices = np.delete(indices, i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, trial_pso) + + # Local Search + if np.random.rand() < 0.25 and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + # Selection + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + if evaluations >= self.budget: + break + + # Elitism: Keep the best individual + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + if best_fitness < self.f_opt: + self.f_opt = best_fitness + self.x_opt = best_individual + + population = np.array(new_population) + fitness = np.array(new_fitness) + + # Diversity Maintenance: Re-initialize if the population converges too tightly + if np.std(fitness) < 1e-5 and evaluations < self.budget: + population = np.array([self.random_bounds() for _ in range(self.initial_pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.initial_pop_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridPrecisionSearch.py b/nevergrad/optimization/lama/RefinedHybridPrecisionSearch.py new file mode 100644 index 000000000..a4ec8940a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridPrecisionSearch.py @@ -0,0 +1,71 @@ +import numpy as np + + +class RefinedHybridPrecisionSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + population_size = 600 + elite_size = int(0.15 * population_size) + mutation_rate = 0.08 + mutation_scale = lambda t: 0.08 * np.exp(-0.0003 * t) + crossover_rate = 0.75 + + local_search_prob = 0.20 # Increased probability for local search + local_search_step_scale = lambda t: 0.02 * np.exp(-0.00002 * t) + + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + evaluations = population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + new_population = [] + elite_indices = np.argsort(fitness)[:elite_size] + elites = population[elite_indices] + + while len(new_population) < population_size - elite_size: + idx1, idx2 = np.random.choice(population_size, 2, replace=False) + parent1, parent2 = population[idx1], population[idx2] + + if np.random.random() < crossover_rate: + point = np.random.randint(1, self.dim) + child = np.concatenate([parent1[:point], parent2[point:]]) + else: + child = parent1.copy() + + if np.random.random() < mutation_rate: + mutation = np.random.normal(0, mutation_scale(evaluations), self.dim) + child = np.clip(child + mutation, self.lb, self.ub) + + if np.random.random() < local_search_prob: + direction = np.random.randn(self.dim) + step = local_search_step_scale(evaluations) + candidate = child + step * direction + candidate = np.clip(candidate, self.lb, self.ub) + if func(candidate) < func(child): + child = candidate + + new_population.append(child) + + new_population = np.vstack(new_population) + new_fitness = np.array([func(x) for x in new_population]) + evaluations += len(new_population) + + population = np.vstack((elites, new_population)) + fitness = np.concatenate([fitness[elite_indices], new_fitness]) + + current_best_idx = np.argmin(fitness) + current_best_f = fitness[current_best_idx] + if current_best_f < self.f_opt: + self.f_opt = current_best_f + self.x_opt = population[current_best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridQuantumAdaptiveDE.py b/nevergrad/optimization/lama/RefinedHybridQuantumAdaptiveDE.py new file mode 100644 index 000000000..f69d911b3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridQuantumAdaptiveDE.py @@ -0,0 +1,136 @@ +import numpy as np + + +class RefinedHybridQuantumAdaptiveDE: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning.""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(50): # increased number of local steps for better refinement + perturbation = np.random.uniform(-0.01, 0.01, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridQuantumLevyAdaptiveSwarm.py b/nevergrad/optimization/lama/RefinedHybridQuantumLevyAdaptiveSwarm.py new file mode 100644 index 000000000..011782815 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridQuantumLevyAdaptiveSwarm.py @@ -0,0 +1,163 @@ +import numpy as np + + +class RefinedHybridQuantumLevyAdaptiveSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return 0.01 * step # Reduced step size for more precise exploitation + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.6 * progress # Enhanced dynamic range + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 + 0.4 * progress + crossover_rate = 0.9 - 0.5 * progress + quantum_factor = 0.5 - 0.3 * progress + levy_factor = 0.05 + 0.45 * progress # Increased max levy factor + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 60 # Reduced population size for more evaluations per individual + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + # DE Mutation and Crossover + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Quantum Particle Update + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + # Levy Flight Local Search + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: # Reduced probability of local search to balance exploration + local_search_iters = 10 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedHybridQuasiRandomDEGradientAnnealing.py b/nevergrad/optimization/lama/RefinedHybridQuasiRandomDEGradientAnnealing.py new file mode 100644 index 000000000..a0bf117c7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHybridQuasiRandomDEGradientAnnealing.py @@ -0,0 +1,142 @@ +import numpy as np +from scipy.stats import qmc + + +class RefinedHybridQuasiRandomDEGradientAnnealing: + def __init__(self, budget, population_size=30, initial_crossover_rate=0.7, initial_mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.mutation_factor = initial_mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.temperature = 1.0 + self.cooling_rate = 0.99 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def quasi_random_sequence(size): + sampler = qmc.Sobol(d=self.dim, scramble=True) + samples = sampler.random(size) + samples = qmc.scale(samples, self.bounds[0], self.bounds[1]) + return samples + + # Initialize population + population = quasi_random_sequence(self.population_size) + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + + while evaluations < self.budget: + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + evaluations += 1 + + # Simulated Annealing acceptance criterion + if new_f < fitness[j] or np.exp(-(new_f - fitness[j]) / self.temperature) > np.random.rand(): + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Cool down the temperature + self.temperature *= self.cooling_rate + + # Maintain diversity + maintain_diversity(population, fitness) + + # Adaptive mutation and crossover strategies based on success count + if success_count / self.population_size > 0.2: + self.base_lr *= 1.05 + self.crossover_rate *= 1.05 + self.mutation_factor = min(1.0, self.mutation_factor * 1.05) + else: + self.base_lr *= 0.95 + self.crossover_rate *= 0.95 + self.mutation_factor = max(0.5, self.mutation_factor * 0.95) + + # Additional perturbation to improve exploration + if evaluations % 100 == 0: + for l in range(self.population_size): + population[l] += np.random.randn(self.dim) * self.base_lr * 0.1 + population[l] = np.clip(population[l], self.bounds[0], self.bounds[1]) + fitness[l] = func(population[l]) + evaluations += 1 + if fitness[l] < self.f_opt: + self.f_opt = fitness[l] + self.x_opt = population[l] + + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.1, 0.9) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedHybridQuasiRandomDEGradientAnnealing(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2.py b/nevergrad/optimization/lama/RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2.py new file mode 100644 index 000000000..b539777e5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2.py @@ -0,0 +1,55 @@ +import numpy as np + + +class RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 300 # Increased population size for enhanced exploration + self.F_base = 0.5 # Lowered base mutation factor for more cautious exploration + self.CR_base = 0.7 # Lower base crossover probability for focused exploitation + self.adaptive_F_amplitude = 0.2 # Reduced mutation factor amplitude for stability + self.adaptive_CR_amplitude = 0.2 # Increased crossover rate amplitude for dynamic adaptation + self.phase_shift = np.pi / 3 # Adjusted phase shift for better phase diversity + + def __call__(self, func): + # Initialize population within the bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main loop over the budget + for i in range(int(self.budget / self.pop_size)): + # Dynamic mutation and crossover factors with phase-shifted sinusoidal modulation + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.adaptive_F_amplitude * np.sin(2 * np.pi * iteration_ratio) + CR = self.CR_base + self.adaptive_CR_amplitude * np.sin( + 2 * np.pi * iteration_ratio + self.phase_shift + ) + + for j in range(self.pop_size): + # Mutation: DE/rand/1/bin with adaptive F + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure boundaries are respected + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedHyperEvolvedDynamicRAMEDS.py b/nevergrad/optimization/lama/RefinedHyperEvolvedDynamicRAMEDS.py new file mode 100644 index 000000000..f9d39c094 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperEvolvedDynamicRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedHyperEvolvedDynamicRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_base=0.8, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_base = crossover_base # Base rate for crossover + self.F_min = F_min # Minimum mutation factor + self.F_max = F_max # Maximum mutation factor + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Tracking best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamically adjust mutation factor based on evaluations and variance of fitness + fitness_variance = np.var(fitness) + F = self.F_min + (self.F_max - self.F_min) * (1 - np.exp(-fitness_variance)) * np.random.rand() + + if evaluations % (self.budget // 10) == 0: + # Periodically update elite repository + sorted_indices = np.argsort(fitness) + elite = population[sorted_indices[: self.elite_size]].copy() + elite_fitness = fitness[sorted_indices[: self.elite_size]].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(best_solution + F * (b - c), lb, ub) + + # Adaptive crossover rate based on the fitness improvement rate + improv_rate = np.abs(fitness[i] - np.mean(fitness)) / (np.std(fitness) + 1e-8) + cross_rate = self.crossover_base + 0.2 * np.tanh(improv_rate) + cross_points = np.random.rand(dimension) < cross_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Memory update with better solutions + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedHyperOptimizedDynamicPrecisionOptimizer.py b/nevergrad/optimization/lama/RefinedHyperOptimizedDynamicPrecisionOptimizer.py new file mode 100644 index 000000000..9e3fb8dd6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperOptimizedDynamicPrecisionOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class RefinedHyperOptimizedDynamicPrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound + self.ub = 5.0 # Upper bound + + def __call__(self, func): + # Initialize advanced temperature and cooling parameters + T = 1.3 # Slightly higher initial temperature for more aggressive early exploration + T_min = 0.0001 # Lower minimum temperature for fine-grained search near the end + alpha = 0.90 # Slower cooling rate to maintain exploration capabilities longer + + population_size = 100 # Increased population size for better coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Adaptive mutation strategy and temperate-dependent dynamic acceptance criteria + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutant vector calculation with dynamic mutation factor based on T and evaluations + dynamic_F = ( + 0.85 + * np.exp(-0.12 * T) + * (0.65 + 0.35 * np.sin(1.5 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR = 0.80 + 0.15 * np.cos( + 1.8 * np.pi * evaluation_count / self.budget + ) # Dynamic crossover probability + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Modified acceptance criterion, more reactive at lower temperatures with enhanced exploration + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.06 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cooling strategy: include a modulated dynamic non-linear adjustment based on the search progress + adaptive_cooling = alpha - 0.02 * np.cos(2.0 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedHyperOptimizedThermalEvolutionaryOptimizer.py b/nevergrad/optimization/lama/RefinedHyperOptimizedThermalEvolutionaryOptimizer.py new file mode 100644 index 000000000..b4b31a21c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperOptimizedThermalEvolutionaryOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class RefinedHyperOptimizedThermalEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters for refined control + T = 1.1 # Optimized starting temperature for better initial exploration + T_min = 0.0005 # Lower minimum temperature for extended fine-tuning phase + alpha = 0.95 # Slower cooling rate to enhance exploration over iterations + + # Mutation and crossover parameters further optimized + F_base = 0.7 # Base mutation factor adjusted for a balance between exploration and exploitation + CR = 0.92 # Crossover probability finely tuned for better solution diversity and quality + + population_size = 85 # Adjusted population size for optimal usage of the budget + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation dynamics with temperature-dependent mutation scaling + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influences by both temperature and evaluation progress + dynamic_F = ( + F_base * np.exp(-0.1 * T) * (0.5 + 0.5 * np.tanh(2 * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion incorporating both temperature and delta fitness + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.06 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling strategy with periodic modulation to prevent stagnation + adaptive_cooling = alpha - 0.01 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedHyperRefinedDynamicPrecisionOptimizerV50.py b/nevergrad/optimization/lama/RefinedHyperRefinedDynamicPrecisionOptimizerV50.py new file mode 100644 index 000000000..ff95e3880 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperRefinedDynamicPrecisionOptimizerV50.py @@ -0,0 +1,58 @@ +import numpy as np + + +class RefinedHyperRefinedDynamicPrecisionOptimizerV50: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of 5 + self.lb = -5.0 # Lower bound is -5.0 + self.ub = 5.0 # Upper bound is 5.0 + + def __call__(self, func): + # Initiate temperature and cooling parameters with refined values + T = 1.15 # Slightly increased starting temperature for better early exploration + T_min = 0.0005 # Lower minimal temperature for deep late-stage search + alpha = 0.92 # Slow cooling rate to enhance search persistence + + # Mutation and crossover parameters finely-tuned for this problem set + F = 0.75 # Mutation factor adjusted for a good balance between exploration and exploitation + CR = 0.87 # Crossover probability adjusted to maintain genetic diversity + + population_size = 80 # Population size optimized for individual evaluations + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with sigmoid adaptation for mutation factor + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.exp(-0.07 * T) * (0.7 + 0.3 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Acceptance criteria with refined temperature dependence + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / T * 1.05 + ): # Adjusted acceptance probability + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy incorporating sinusoidal modulation + adaptive_cooling = alpha - 0.008 * np.cos(2.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV52.py b/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV52.py new file mode 100644 index 000000000..4bf62c668 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV52.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedHyperStrategicOptimizerV52: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.55, + F_range=0.45, + CR=0.95, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased base mutation factor to enhance global exploration + self.F_range = F_range # Maintaining a moderate mutation factor range for balanced dynamics + self.CR = CR # High crossover probability to ensure good trait mixing + self.elite_fraction = ( + elite_fraction # Reducing elite fraction to concentrate more on the very best solutions + ) + self.mutation_strategy = mutation_strategy # Adaptive strategy to dynamically select mutation base + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the search space bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Enhance probability selection for the best individual + if np.random.rand() < 0.75: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjusting F dynamically within a controlled range + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using binomial method + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection step to potentially update population + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Break the loop if the budget is exceeded + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV55.py b/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV55.py new file mode 100644 index 000000000..af6250f11 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedHyperStrategicOptimizerV55.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedHyperStrategicOptimizerV55: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.12, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Refined base mutation factor + self.F_range = F_range # Slightly adjusted mutation range for flexible adaptation + self.CR = CR # Tuned crossover probability for optimal information exchange + self.elite_fraction = ( + elite_fraction # Increased elite fraction for a more focused search on top performers + ) + self.mutation_strategy = ( + mutation_strategy # Maintains an adaptive mutation strategy for dynamic adaptation + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Slightly higher probability to select current best to enhance focus on promising regions + if np.random.rand() < 0.8: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjust mutation factor + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..cc8a04af4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution.py @@ -0,0 +1,173 @@ +import numpy as np +from sklearn.cluster import KMeans + + +class RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7, cluster_size=5): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + self.cluster_size = cluster_size + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + kmeans = KMeans(n_clusters=self.cluster_size, random_state=0).fit(population) + cluster_centers = kmeans.cluster_centers_ + for i in range(len(population)): + if np.linalg.norm(population[i] - cluster_centers[kmeans.labels_[i]]) < 1e-1: + population[i] = random_vector() + fitness[i] = func(population[i]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + return strategy_1, strategy_2 + + def local_search(x): + best_local = x + best_local_fitness = func(x) + step_size = 0.01 + for i in range(10): + new_x = best_local + step_size * np.random.randn(self.dim) + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_fitness = func(new_x) + if new_fitness < best_local_fitness: + best_local = new_x + best_local_fitness = new_fitness + return best_local, best_local_fitness + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + success_count_history = [] + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + # Perform local search on top individuals + for i in range(min(self.cluster_size, self.population_size)): + if evaluations >= self.budget: + break + local_best, local_best_fitness = local_search(population[i]) + if local_best_fitness < fitness[i]: + population[i] = local_best + fitness[i] = local_best_fitness + success_count += 1 + evaluations += 10 + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + success_count_history.append(success_rate) + if len(success_count_history) > 10: + success_count_history.pop(0) + + avg_success_rate = np.mean(success_count_history) + if avg_success_rate > 0.2: + self.mutation_factor *= 1.1 + self.crossover_rate *= 1.05 + else: + self.mutation_factor *= 0.9 + self.crossover_rate *= 0.95 + + self.mutation_factor = np.clip(self.mutation_factor, 0.4, 1.0) + self.crossover_rate = np.clip(self.crossover_rate, 0.5, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2.py b/nevergrad/optimization/lama/RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2.py new file mode 100644 index 000000000..7a98f41d5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2.py @@ -0,0 +1,139 @@ +import numpy as np + + +class RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.3 + self.memory_size = 5 + self.w = 0.6 + self.c1 = 1.7 + self.c2 = 1.7 + self.phase_switch_ratio = 0.4 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.zeros((self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.5, 1.0), np.clip(adaptive_CR, 0.5, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4.py b/nevergrad/optimization/lama/RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4.py new file mode 100644 index 000000000..cfce0519c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4.py @@ -0,0 +1,167 @@ +import numpy as np + + +class RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Increased population size for better exploration + w = 0.6 # Inertia weight for PSO + c1 = 0.9 # Cognitive coefficient for PSO + c2 = 0.8 # Social coefficient for PSO + initial_F = 0.7 # Differential weight for DE + initial_CR = 0.8 # Crossover probability for DE + restart_threshold = 0.15 * self.budget # Restart after 15% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into the population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + replace_indices = np.random.choice(range(population_size), elite_size, replace=False) + new_population[replace_indices] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedInertiaFocalOptimizer.py b/nevergrad/optimization/lama/RefinedInertiaFocalOptimizer.py new file mode 100644 index 000000000..199ac80f3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedInertiaFocalOptimizer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedInertiaFocalOptimizer: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, particles=30): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.particles = particles + self.inertia_weight = 0.9 + self.cognitive_coeff = 2.0 + self.social_coeff = 2.0 + self.focus_factor = 0.1 # Intensity of focusing on better regions over time + + def initialize_particles(self): + positions = np.random.uniform(self.bounds[0], self.bounds[1], (self.particles, self.dimension)) + velocities = np.zeros_like(positions) + return positions, velocities + + def evaluate_particles(self, func, positions): + fitness = np.array([func(pos) for pos in positions]) + return fitness + + def optimize(self, func): + positions, velocities = self.initialize_particles() + fitness = self.evaluate_particles(func, positions) + personal_best_positions = np.copy(positions) + personal_best_fitness = np.copy(fitness) + global_best_position = positions[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + evaluations = self.particles + + while evaluations < self.budget: + for i in range(self.particles): + r1, r2 = np.random.rand(2) + + # Dynamic adjustment of inertia weight + inertia_decay = (1 - (evaluations / self.budget)) ** self.focus_factor + inertia_weight = self.inertia_weight * inertia_decay + + # Velocity update formula + velocities[i] = ( + inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_best_positions[i] - positions[i]) + + self.social_coeff * r2 * (global_best_position - positions[i]) + ) + + # Position update + positions[i] += velocities[i] + positions[i] = np.clip(positions[i], self.bounds[0], self.bounds[1]) + + new_fitness = func(positions[i]) + evaluations += 1 + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = positions[i] + personal_best_fitness[i] = new_fitness + + if new_fitness < global_best_fitness: + global_best_position = positions[i] + global_best_fitness = new_fitness + + if evaluations >= self.budget: + break + + return global_best_fitness, global_best_position + + def __call__(self, func): + return self.optimize(func) diff --git a/nevergrad/optimization/lama/RefinedIntelligentEvolvingAdaptiveStrategyV35.py b/nevergrad/optimization/lama/RefinedIntelligentEvolvingAdaptiveStrategyV35.py new file mode 100644 index 000000000..3ade83d77 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedIntelligentEvolvingAdaptiveStrategyV35.py @@ -0,0 +1,75 @@ +import numpy as np + + +class RefinedIntelligentEvolvingAdaptiveStrategyV35: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.8, CR_init=0.9): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.history = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b = np.random.choice(idxs, 2, replace=False) + # Using a simpler, more stable mutation strategy + mutant = population[a] + self.F * (population[b] - population[best_idx]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration): + # Simplifying parameter adjustment to focus on decaying F and stable CR + self.F = 0.8 * np.exp(-2 * iteration / self.budget) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + iteration = 0 + while evaluations < self.budget: + self.adjust_parameters(iteration) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i], fitnesses[i] = trial, trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + self.history.append((trial, trial_fitness)) + + if evaluations >= self.budget: + break + iteration += 1 + + # Optional: Retrieve the best found solution in history instead of the current population + if self.history: + best_solution, best_fitness = min(self.history, key=lambda x: x[1]) + else: + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV10Plus.py b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV10Plus.py new file mode 100644 index 000000000..2d0afdb7c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV10Plus.py @@ -0,0 +1,109 @@ +import numpy as np + + +class RefinedIslandEvolutionStrategyV10Plus: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=30, + population_per_island=100, + migration_rate=0.15, + mutation_intensity=1.2, + mutation_decay=0.98, + elite_ratio=0.2, + crossover_probability=0.95, + tournament_size=3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.uniform(0.3, 0.7, self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV2.py b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV2.py new file mode 100644 index 000000000..0943f79d3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV2.py @@ -0,0 +1,98 @@ +import numpy as np + + +class RefinedIslandEvolutionStrategyV2: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=6, + population_per_island=25, + migration_rate=0.2, + mutation_intensity=0.7, + mutation_decay=0.98, + elite_ratio=0.25, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + # Fill the rest of the island population + for _ in range(self.population_per_island - len(elites)): + parents = np.random.choice(island_pop.shape[0], 2, replace=False) + child = self.crossover(island_pop[parents[0]], island_pop[parents[1]]) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + # Introduce new genetic material by shuffling some individuals between islands + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) # Shuffle the migration indices to mix individuals + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV6.py b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV6.py new file mode 100644 index 000000000..5e67e689f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV6.py @@ -0,0 +1,109 @@ +import numpy as np + + +class RefinedIslandEvolutionStrategyV6: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=15, + population_per_island=60, + migration_rate=0.2, + mutation_intensity=1.0, + mutation_decay=0.98, + elite_ratio=0.1, + crossover_probability=0.8, + tournament_size=3, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV9.py b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV9.py new file mode 100644 index 000000000..fa2016941 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedIslandEvolutionStrategyV9.py @@ -0,0 +1,109 @@ +import numpy as np + + +class RefinedIslandEvolutionStrategyV9: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + islands=30, + population_per_island=100, + migration_rate=0.15, + mutation_intensity=0.75, + mutation_decay=0.95, + elite_ratio=0.1, + crossover_probability=0.95, + tournament_size=7, + ): + self.budget = budget + self.dimension = dimension + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.islands = islands + self.population_per_island = population_per_island + self.migration_rate = migration_rate + self.mutation_intensity = mutation_intensity + self.mutation_decay = mutation_decay + self.elite_ratio = elite_ratio + self.elite_count = int(self.population_per_island * self.elite_ratio) + self.crossover_probability = crossover_probability + self.tournament_size = tournament_size + self.total_population_size = self.islands * self.population_per_island + + def initialize_population(self): + return np.random.uniform( + self.lower_bound, self.upper_bound, (self.total_population_size, self.dimension) + ) + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.lower_bound, self.upper_bound) + + def crossover(self, parent1, parent2): + if np.random.rand() < self.crossover_probability: + alpha = np.random.rand(self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.lower_bound, self.upper_bound) + else: + return parent1.copy() + + def tournament_selection(self, population, fitness): + indices = np.random.randint(0, population.shape[0], self.tournament_size) + best_index = indices[np.argmin(fitness[indices])] + return population[best_index] + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = np.array([func(ind) for ind in population]) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.total_population_size + + while evaluations < self.budget: + new_population = [] + for i in range(self.islands): + start_idx = i * self.population_per_island + end_idx = start_idx + self.population_per_island + island_pop = population[start_idx:end_idx] + island_fit = fitness[start_idx:end_idx] + + elites = self.select_elites(island_pop, island_fit) + + for _ in range(self.population_per_island - len(elites)): + parent1 = self.tournament_selection(island_pop, island_fit) + parent2 = self.tournament_selection(island_pop, island_fit) + child = self.crossover(parent1, parent2) + mutated_child = self.mutate(child) + new_population.append(mutated_child) + + new_population.extend(elites) + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + if evaluations + self.total_population_size > self.budget: + break + + if np.random.rand() < self.migration_rate: + migrants = int(self.migration_rate * self.total_population_size) + migration_indices = np.random.permutation(self.total_population_size)[:migrants] + np.random.shuffle(migration_indices) + population[migration_indices] = population[np.random.permutation(migration_indices)] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.total_population_size + self.mutation_intensity *= self.mutation_decay + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedMemeticDifferentialEvolution.py new file mode 100644 index 000000000..4d83ee32d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemeticDifferentialEvolution.py @@ -0,0 +1,84 @@ +import numpy as np + + +class RefinedMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F=0.8): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR=0.9): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter=5): + best_x = x.copy() + best_f = func(x) + step_size = 0.01 + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Differential Evolution mutation and crossover + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b) + trial_vector = self.crossover(population[i], mutant_vector) + + # Evaluate trial vector + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + # Apply local search on selected individuals + if np.random.rand() < 0.2: + local_best_x, local_best_f = self.local_search(population[i], func) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Periodically re-initialize worst individuals to enhance exploration + if evaluations % (population_size * 2) == 0: + worst_indices = np.argsort(fitness)[-int(0.2 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizer.py b/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizer.py new file mode 100644 index 000000000..e7733eec7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizer.py @@ -0,0 +1,186 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedMemeticDiverseOptimizer: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + self.diversity_check_interval = 50 + self.diversity_threshold = 0.1 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness / sharing_factor + + def calculate_diversity(population): + pairwise_distances = np.sum((population[:, None] - population[None, :]) ** 2, axis=-1) + mean_distance = np.sum(pairwise_distances) / (self.population_size * (self.population_size - 1)) + return mean_distance + + def enhanced_local_search(func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < evaluate(archive[worst_index]): + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = enhanced_local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Check and adapt diversity + if eval_count % self.diversity_check_interval == 0: + diversity = calculate_diversity(population) + if diversity < self.diversity_threshold: + new_population = self.rng.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + new_fitness = np.array([evaluate(ind) for ind in new_population]) + eval_count += self.population_size + population = np.vstack((population, new_population)) + fitness = np.hstack((fitness, new_fitness)) + top_indices = np.argsort(fitness)[: self.population_size] + population = population[top_indices] + fitness = fitness[top_indices] + + # Elite update using archive members + if self.rng.random() < self.elite_update_rate: + archive_index = self.rng.choice(len(archive)) + elite_index = self.rng.choice(elite_count) + population[elite_index] = archive[archive_index] + fitness[elite_index] = evaluate(population[elite_index]) + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizerV4.py b/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizerV4.py new file mode 100644 index 000000000..fe53bd26e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemeticDiverseOptimizerV4.py @@ -0,0 +1,186 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedMemeticDiverseOptimizerV4: + def __init__(self, budget=10000, population_size=200, memetic_search_iters=20): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.7 + self.cognitive_coeff = 1.4 + self.social_coeff = 1.6 + self.strategy_switch_threshold = 0.01 + self.rng = np.random.default_rng() + self.num_strategies = 3 + self.tol = 1e-6 + self.memetic_search_iters = memetic_search_iters + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + self.elite_update_rate = 0.05 + self.fitness_sharing_radius = 0.05 + self.diversity_check_interval = 50 + self.diversity_threshold = 0.1 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + def fitness_sharing(fitness): + sharing_factor = np.ones(self.population_size) + for i in range(self.population_size): + for j in range(self.population_size): + if i != j and np.linalg.norm(population[i] - population[j]) < self.fitness_sharing_radius: + sharing_factor[i] += 1 + return fitness * sharing_factor + + def calculate_diversity(population): + pairwise_distances = np.sum((population[:, None] - population[None, :]) ** 2, axis=-1) + mean_distance = np.sum(pairwise_distances) / (self.population_size * (self.population_size - 1)) + return mean_distance + + def enhanced_local_search(func, x_start): + res = minimize( + func, + x_start, + method="Nelder-Mead", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.memetic_search_iters}, + ) + if res.success: + return res.x, res.fun + return None + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + else: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Apply fitness sharing to maintain diversity + shared_fitness = fitness_sharing(fitness) + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = enhanced_local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + # Check and adapt diversity + if eval_count % self.diversity_check_interval == 0: + diversity = calculate_diversity(population) + if diversity < self.diversity_threshold: + new_population = self.rng.uniform( + self.bounds[0], self.bounds[1], (self.population_size, self.dim) + ) + new_fitness = np.array([evaluate(ind) for ind in new_population]) + eval_count += self.population_size + population = np.vstack((population, new_population)) + fitness = np.hstack((fitness, new_fitness)) + top_indices = np.argsort(fitness)[: self.population_size] + population = population[top_indices] + fitness = fitness[top_indices] + + # Elite update using archive members + if self.rng.random() < self.elite_update_rate: + archive_index = self.rng.choice(len(archive)) + elite_index = self.rng.choice(elite_count) + population[elite_index] = archive[archive_index] + fitness[elite_index] = evaluate(population[elite_index]) + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py b/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py new file mode 100644 index 000000000..c75f081bb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py @@ -0,0 +1,157 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedMemeticQuantumDifferentialOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.c1 = 1.5 + self.c2 = 1.5 + self.epsilon = 1e-6 # Convergence threshold + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < self.epsilon or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + self.c1 * r1 * (personal_bests[i] - particles[i]) + + self.c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + self.c1 = np.random.uniform(1.0, 2.5) + self.c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemoryAdaptiveDynamicHybridOptimizer.py b/nevergrad/optimization/lama/RefinedMemoryAdaptiveDynamicHybridOptimizer.py new file mode 100644 index 000000000..13324e0d8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryAdaptiveDynamicHybridOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import pdist + + +class RefinedMemoryAdaptiveDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + stagnation_threshold=10, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.diversity_threshold = diversity_threshold + self.stagnation_threshold = stagnation_threshold + self.global_best_history = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def population_diversity(self, population): + if len(population) < 2: + return 0.0 + distances = pdist(population) + return np.mean(distances) + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Check population diversity and restart if necessary + if self.population_diversity(population) < self.diversity_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Restart mechanism based on stagnation + if len(self.global_best_history) > self.stagnation_threshold: + if np.std(self.global_best_history[-self.stagnation_threshold :]) < 1e-5: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemoryAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/RefinedMemoryAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..efaf411b2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryAdaptiveHybridOptimizer.py @@ -0,0 +1,158 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedMemoryAdaptiveHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemoryEnhancedDynamicHybridOptimizer.py b/nevergrad/optimization/lama/RefinedMemoryEnhancedDynamicHybridOptimizer.py new file mode 100644 index 000000000..45f9e2320 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryEnhancedDynamicHybridOptimizer.py @@ -0,0 +1,199 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.spatial.distance import pdist, squareform + + +class RefinedMemoryEnhancedDynamicHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + diversity_threshold=0.1, + stagnation_threshold=10, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.diversity_threshold = diversity_threshold + self.stagnation_threshold = stagnation_threshold + self.global_best_history = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def restart_population(self, size): + return np.random.uniform(self.bounds[0], self.bounds[1], (size, self.dim)) + + def population_diversity(self, population): + if len(population) < 2: + return 0.0 + distances = pdist(population) + return np.mean(distances) + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history.append(g_best_fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Check population diversity and restart if necessary + if self.population_diversity(population) < self.diversity_threshold: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Restart mechanism based on stagnation + if len(self.global_best_history) > self.stagnation_threshold: + if np.std(self.global_best_history[-self.stagnation_threshold :]) < 1e-5: + population = self.restart_population(current_pop_size) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += current_pop_size + velocities = np.random.uniform(-1, 1, (current_pop_size, self.dim)) + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + self.global_best_history = [] + + # Perform local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemoryEnhancedHybridOptimizerV2.py b/nevergrad/optimization/lama/RefinedMemoryEnhancedHybridOptimizerV2.py new file mode 100644 index 000000000..8346c4bc3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryEnhancedHybridOptimizerV2.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize +from scipy.stats import levy_stable + + +class RefinedMemoryEnhancedHybridOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + levy_alpha=1.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.levy_alpha = levy_alpha + + def levy_flight(self, size): + return levy_stable.rvs(self.levy_alpha, 0, size=size) + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // self.init_pop_size + + current_pop_size = self.init_pop_size + successful_steps = [] + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + if len(idxs) < 3: + continue # Skip mutation if less than 3 distinct individuals + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing + if self.eval_count < global_search_budget / 2: + current_pop_size = int(self.init_pop_size * (1 - self.eval_count / global_search_budget)) + current_pop_size = max(current_pop_size, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + + # Perform diverse local search on the best individuals + for i in range(current_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(population[i], func, local_budget) + if new_f < fitness[i]: + fitness[i] = new_f + population[i] = new_x + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72.py b/nevergrad/optimization/lama/RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72.py new file mode 100644 index 000000000..0cdc1acbc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72.py @@ -0,0 +1,84 @@ +import numpy as np + + +class RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + a, b, c = np.random.choice(size, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Weighted effect based on memory utility + memory_effect = ( + np.average(self.memory, axis=0, weights=np.linspace(1, 0.1, len(self.memory))) + if self.memory + else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target or np.random.rand() < 0.05: # Probabilistic acceptance + self.memory.append(trial - target) + if len(self.memory) > 10: + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + scale = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(2 * np.pi * scale), 0.1, 1) + self.CR = np.clip(0.5 + 0.5 * np.cos(2 * np.pi * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedMemoryGuidedHybridStrategyV63.py b/nevergrad/optimization/lama/RefinedMemoryGuidedHybridStrategyV63.py new file mode 100644 index 000000000..c6d8967ac --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMemoryGuidedHybridStrategyV63.py @@ -0,0 +1,69 @@ +import numpy as np + + +class RefinedMemoryGuidedHybridStrategyV63: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, memory_size=20): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init # Mutation factor + self.CR = CR_init # Crossover probability + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + memory_effect = ( + self.memory[np.random.randint(len(self.memory))] if self.memory else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + memory_effect + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + if len(self.memory) < self.memory_size: + self.memory.append(trial - target) + else: + self.memory[np.random.randint(self.memory_size)] = trial - target + return trial, f_trial + else: + return target, f_target + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedMetaNetAQAPSO.py b/nevergrad/optimization/lama/RefinedMetaNetAQAPSO.py new file mode 100644 index 000000000..2dd60ab0f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMetaNetAQAPSO.py @@ -0,0 +1,123 @@ +import numpy as np + + +class RefinedMetaNetAQAPSO: + def __init__(self, budget=1000, num_particles=50, local_search_iters=200): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + self.local_search_iters = local_search_iters + self.adaptive_iters = 200 + self.explore_prob = 0.1 + self.early_stopping = budget // 2 + self.vel_limit = 1.0 + self.step_size = 0.1 + self.max_local_search_attempts = 3 + self.meta_net_iters = 800 + self.meta_net_lr = 0.2 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(self.local_search_iters): + x_new = x + self.step_size * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.9 - 0.6 * t / self.budget + + def update_parameters(self, t, cognitive_weight, social_weight): + if t < self.adaptive_iters: + return cognitive_weight + 0.05, social_weight + 0.05 + else: + return cognitive_weight - 0.05, social_weight - 0.05 + + def meta_network(self, x, func): + for _ in range(self.meta_net_iters): + gradient = np.zeros_like(x) + for _ in range(5): + perturbation = np.random.randn(self.dim) * self.meta_net_lr + f_plus = func(x + perturbation) + f_minus = func(x - perturbation) + gradient += (f_plus - f_minus) * perturbation + + x -= self.meta_net_lr * gradient + + return x + + def __call__(self, func, cognitive_weight=2.0, social_weight=2.5): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t, cognitive_weight, social_weight) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = np.clip( + ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ), + -self.vel_limit, + self.vel_limit, + ) + + accel = 0.6 * r3 * (global_best_pos - particles_pos[i]) + particles_vel[i] += np.clip(accel, -self.vel_limit, self.vel_limit) + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + particles_pos[i] = self.meta_network(particles_pos[i], func) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + if np.random.rand() < self.explore_prob: + particles_pos = self.random_restart() + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + if t % 20 == 0: + for i in range(self.num_particles): + for _ in range(self.max_local_search_attempts): + particles_pos[i], f_val = self.local_search(particles_pos[i], func) + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if t > self.early_stopping and self.f_opt == personal_best_val[global_best_idx]: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMultiFocalAdaptiveElitistStrategyV4.py b/nevergrad/optimization/lama/RefinedMultiFocalAdaptiveElitistStrategyV4.py new file mode 100644 index 000000000..ab897f5f8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiFocalAdaptiveElitistStrategyV4.py @@ -0,0 +1,83 @@ +import numpy as np + + +class RefinedMultiFocalAdaptiveElitistStrategyV4: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.2, + mutation_intensity=0.1, + crossover_rate=0.7, + multi_focus=False, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.multi_focus = multi_focus # Enable/disable multi-focus feature + + def __call__(self, func): + # Initialize the population uniformly within the bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_rate: + # Crossover between two elites or elite and random individual + if self.multi_focus and np.random.random() < 0.5: + parent1 = elites[np.random.choice(len(elites))] + parent2 = population[np.random.randint(0, self.population_size)] + else: + parent_indices = np.random.choice(len(elites), 2, replace=False) + parent1, parent2 = elites[parent_indices[0]], elites[parent_indices[1]] + child = self.recombine(parent1, parent2) + else: + # Mutation of an elite + parent = elites[np.random.choice(len(elites))] + child = self.mutate(parent, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def mutate(self, individual, evaluations): + # Adaptive mutation + scale = self.mutation_intensity * np.exp( + -evaluations / self.budget * 3 + ) # Increased decay rate for mutation scale + return individual + np.random.normal(0, scale, self.dimension) + + def recombine(self, parent1, parent2): + # Linear combination of parents with randomized weight to increase diversity + alpha = np.random.uniform(0.3, 0.7) + return alpha * parent1 + (1 - alpha) * parent2 diff --git a/nevergrad/optimization/lama/RefinedMultiOperatorAdaptiveOptimization.py b/nevergrad/optimization/lama/RefinedMultiOperatorAdaptiveOptimization.py new file mode 100644 index 000000000..da1cbaab5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiOperatorAdaptiveOptimization.py @@ -0,0 +1,163 @@ +import numpy as np + + +class RefinedMultiOperatorAdaptiveOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1, c2 = 1.5, 1.5 + w = 0.7 + w_min = 0.4 + w_max = 0.9 + w_decay = 0.995 + + # Differential Evolution parameters + F_base = 0.8 + CR_base = 0.9 + + # Gradient-based search parameters + alpha_base = 0.1 + beta_base = 0.9 + epsilon = 1e-8 + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 50 + + prev_f = np.inf + + # Adaptive parameters + adaptive_CR = CR_base + adaptive_F = F_base + adaptive_alpha = alpha_base + adaptive_beta = beta_base + + def adapt_params(i): + # Dynamically adjust parameters based on progress + nonlocal adaptive_CR, adaptive_F, adaptive_alpha, adaptive_beta + adaptive_CR = CR_base - 0.5 * (i / self.budget) + adaptive_F = F_base + 0.2 * (i / self.budget) + adaptive_alpha = alpha_base + 0.1 * (i / self.budget) + adaptive_beta = beta_base - 0.3 * (i / self.budget) + + # Hybrid loop (combining PSO, Gradient-based search, and Differential Evolution) + for i in range(self.budget): + adapt_params(i) + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = adaptive_beta * v - adaptive_alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < adaptive_CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + adaptive_F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < adaptive_CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / (abs(prev_f) + epsilon) > 0.01: + adaptive_alpha *= 1.05 # Increase learning rate if improvement is significant + else: + adaptive_alpha *= 0.95 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Decay inertia weight + w = max(w_min, w * w_decay) + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedMultiOperatorAdaptiveOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedMultiPhaseAdaptiveHybridDEPSO.py b/nevergrad/optimization/lama/RefinedMultiPhaseAdaptiveHybridDEPSO.py new file mode 100644 index 000000000..dd42f97a5 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiPhaseAdaptiveHybridDEPSO.py @@ -0,0 +1,187 @@ +import numpy as np + + +class RefinedMultiPhaseAdaptiveHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 # Increased population size for better exploration + w = 0.5 # Inertia weight for PSO + c1 = 0.8 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Differential weight for DE + initial_CR = 0.9 # Crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + elite_size = 5 # Number of elite solutions to maintain in memory + local_search_prob = 0.3 # Probability of performing local search + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def update_elite_memory(elite_memory, new_solution, new_fitness): + if len(elite_memory) < elite_size: + elite_memory.append((new_solution, new_fitness)) + else: + elite_memory.sort(key=lambda x: x[1]) + if new_fitness < elite_memory[-1][1]: + elite_memory[-1] = (new_solution, new_fitness) + + def local_search(solution): + # Randomly perturb the solution + perturbation = np.random.normal(0, 0.1, size=self.dim) + new_solution = np.clip(solution + perturbation, bounds[0], bounds[1]) + new_fitness = func(new_solution) + return new_solution, new_fitness + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + elite_memory = [] + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + update_elite_memory(elite_memory, trial, f_trial) + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # Incorporate elite memory solutions into the population + if elite_memory: + elite_solutions, _ = zip(*elite_memory) + elite_solutions = np.array(elite_solutions) + replace_indices = np.random.choice(range(population_size), elite_size, replace=False) + new_population[replace_indices] = elite_solutions + + # PSO update + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + # Local search phase + if np.random.rand() < local_search_prob: + best_ind = population[np.argmin(fitness)] + new_solution, new_fitness_val = local_search(best_ind) + evaluations += 1 + + if new_fitness_val < self.f_opt: + self.f_opt = new_fitness_val + self.x_opt = new_solution + last_improvement = evaluations + update_elite_memory(elite_memory, new_solution, new_fitness_val) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMultiStageAdaptiveSearch.py b/nevergrad/optimization/lama/RefinedMultiStageAdaptiveSearch.py new file mode 100644 index 000000000..a27339163 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiStageAdaptiveSearch.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedMultiStageAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Start with a random point in the search space + current_point = np.random.uniform(-5.0, 5.0, self.dim) + current_f = func(current_point) + + # Update if the initial guess is better + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Set initial scale and adaptive rates + scale = 0.5 + global_scale = 0.1 + local_scale = 0.01 + + # Adaptive scale factors + exploration_scale_factor = 1.1 + exploitation_scale_factor = 0.9 + + # Temperature for simulated annealing like probability acceptance + temperature = 1.0 + min_temperature = 0.01 + temperature_decay = 0.95 + + for i in range(1, self.budget): + # Calculate current temperature + temperature = max(min_temperature, temperature * temperature_decay) + + # Choose strategy: global or local search + if np.random.rand() < 0.5: + # Global search with larger mutations + candidate = current_point + np.random.normal(0, global_scale, self.dim) + else: + # Local search with small mutations + candidate = current_point + np.random.normal(0, local_scale, self.dim) + + candidate = np.clip(candidate, -5.0, 5.0) # Ensure candidate remains within bounds + candidate_f = func(candidate) + + # Apply acceptance criteria + if candidate_f < current_f or np.exp((current_f - candidate_f) / temperature) > np.random.rand(): + current_point = candidate + current_f = candidate_f + + # Update optimal solution found + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate + + # Adjust scale based on the search type used + if np.random.rand() < 0.5: + global_scale *= exploration_scale_factor + else: + local_scale *= exploration_scale_factor + else: + global_scale *= exploitation_scale_factor + local_scale *= exploitation_scale_factor + + # Clamp scale values to prevent them from becoming too large or too small + global_scale = np.clip(global_scale, 0.05, 1.0) + local_scale = np.clip(local_scale, 0.001, 0.1) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMultiStrategyDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedMultiStrategyDifferentialEvolution.py new file mode 100644 index 000000000..27ce0aa20 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiStrategyDifferentialEvolution.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedMultiStrategyDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + self.pop_size = 100 + self.F = 0.8 + self.CR = 0.9 + self.local_search_prob = 0.3 + self.restart_threshold = 20 + self.strategy_weights = np.ones(4) + self.strategy_success = np.zeros(4) + self.learning_rate = 0.1 + self.no_improvement_count = 0 + self.elite_fraction = 0.2 + + def _initialize_population(self): + return np.random.uniform(self.lb, self.ub, (self.pop_size, self.dim)) + + def _local_search(self, x, func): + x_local, f_local = minimize( + func, x, method="L-BFGS-B", bounds=[(self.lb, self.ub)] * self.dim + ).x, func(x) + return x_local, f_local + + def _dynamic_parameters(self): + self.F = np.clip(self.F + np.random.normal(0, self.learning_rate), 0.5, 1.5) + self.CR = np.clip(self.CR + np.random.normal(0, self.learning_rate), 0.2, 1.0) + + def _mutation_best_1(self, population, best_idx, r1, r2): + return population[best_idx] + self.F * (population[r1] - population[r2]) + + def _mutation_rand_1(self, population, r1, r2, r3): + return population[r1] + self.F * (population[r2] - population[r3]) + + def _mutation_rand_2(self, population, r1, r2, r3, r4, r5): + return ( + population[r1] + + self.F * (population[r2] - population[r3]) + + self.F * (population[r4] - population[r5]) + ) + + def _mutation_best_2(self, population, best_idx, r1, r2, r3, r4): + return ( + population[best_idx] + + self.F * (population[r1] - population[r2]) + + self.F * (population[r3] - population[r4]) + ) + + def _select_strategy(self): + return np.random.choice( + [self._mutation_best_1, self._mutation_rand_1, self._mutation_rand_2, self._mutation_best_2], + p=self.strategy_weights / self.strategy_weights.sum(), + ) + + def __call__(self, func): + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations = len(population) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)].copy() + + while self.evaluations < self.budget: + new_population = [] + new_fitness = [] + + for i in range(self.pop_size): + if self.evaluations >= self.budget: + break + + strategy = self._select_strategy() + indices = [idx for idx in range(self.pop_size) if idx != i] + r1, r2, r3, r4, r5 = np.random.choice(indices, 5, replace=False) + best_idx = np.argmin(fitness) + + if strategy == self._mutation_best_1: + donor = self._mutation_best_1(population, best_idx, r1, r2) + elif strategy == self._mutation_rand_1: + donor = self._mutation_rand_1(population, r1, r2, r3) + elif strategy == self._mutation_rand_2: + donor = self._mutation_rand_2(population, r1, r2, r3, r4, r5) + else: # strategy == self._mutation_best_2 + donor = self._mutation_best_2(population, best_idx, r1, r2, r3, r4) + + trial = np.clip(donor, self.lb, self.ub) + if np.random.rand() < self.CR: + trial = np.where(np.random.rand(self.dim) < self.CR, trial, population[i]) + else: + trial = population[i] + + f_trial = func(trial) + self.evaluations += 1 + + if f_trial < fitness[i]: + new_population.append(trial) + new_fitness.append(f_trial) + strategy_idx = [ + self._mutation_best_1, + self._mutation_rand_1, + self._mutation_rand_2, + self._mutation_best_2, + ].index(strategy) + self.strategy_success[strategy_idx] += 1 + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + self.no_improvement_count = 0 + else: + new_population.append(population[i]) + new_fitness.append(fitness[i]) + + population = np.array(new_population) + fitness = np.array(new_fitness) + + if np.random.rand() < self.local_search_prob: + elite_indices = np.argsort(fitness)[: int(self.elite_fraction * self.pop_size)] + for idx in elite_indices: + if self.evaluations >= self.budget: + break + x_local, f_local = self._local_search(population[idx], func) + self.evaluations += 1 + if f_local < fitness[idx]: + population[idx] = x_local + fitness[idx] = f_local + if f_local < self.f_opt: + self.f_opt = f_local + self.x_opt = x_local + self.no_improvement_count = 0 + + if self.no_improvement_count >= self.restart_threshold: + population = self._initialize_population() + fitness = np.array([func(ind) for ind in population]) + self.evaluations += len(population) + self.no_improvement_count = 0 + + # Adaptive strategy selection + self.strategy_weights = self.strategy_success + 1 + self.strategy_success.fill(0) + self.no_improvement_count += 1 + + # Dynamic population resizing based on performance + if self.no_improvement_count >= 10: + self.pop_size = max(20, self.pop_size - 10) + population = population[: self.pop_size] + fitness = fitness[: self.pop_size] + self.no_improvement_count = 0 + + self._dynamic_parameters() + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMultiStrategySelfAdaptiveDE.py b/nevergrad/optimization/lama/RefinedMultiStrategySelfAdaptiveDE.py new file mode 100644 index 000000000..b868de408 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiStrategySelfAdaptiveDE.py @@ -0,0 +1,114 @@ +import numpy as np + + +class RefinedMultiStrategySelfAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F, fitness): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F, fitness): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i], fitness) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedMultiStrategySwarmDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedMultiStrategySwarmDifferentialEvolution.py new file mode 100644 index 000000000..a359ef355 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedMultiStrategySwarmDifferentialEvolution.py @@ -0,0 +1,58 @@ +import numpy as np + + +class RefinedMultiStrategySwarmDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality + self.pop_size = 250 # Increased population size for enhanced exploration + self.F_base = 0.6 # Base mutation factor + self.CR = 0.8 # Crossover probability + self.adapt_rate = 0.2 # Rate at which F adapts dynamically + self.lambd = 0.85 # Control parameter for mutation strategy switching + + def __call__(self, func): + # Initialize population within search space bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + for i in range(int(self.budget / self.pop_size)): + # Adaptive mutation factor influenced by a cosine curve for non-linear adaptation + F_adapted = self.F_base + self.adapt_rate * np.cos(2 * np.pi * i / (self.budget / self.pop_size)) + + for j in range(self.pop_size): + # Dual mutation strategy controlled by dynamic parameter lambda + if np.random.rand() < self.lambd: + # Strategy 1: DE/rand/2/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c, d = pop[np.random.choice(idxs, 4, replace=False)] + mutant = a + F_adapted * (b - c) + F_adapted * (c - d) + else: + # Strategy 2: DE/best/2/bin + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b = pop[np.random.choice(idxs, 2, replace=False)] + mutant = best_ind + F_adapted * (a - b) + F_adapted * (b - pop[j]) + + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedNicheDifferentialParticleSwarmOptimizer.py b/nevergrad/optimization/lama/RefinedNicheDifferentialParticleSwarmOptimizer.py new file mode 100644 index 000000000..5b0e3535a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedNicheDifferentialParticleSwarmOptimizer.py @@ -0,0 +1,149 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedNicheDifferentialParticleSwarmOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.init_num_niches = 6 + self.alpha = 0.5 + self.beta = 0.5 + self.local_search_prob = 0.1 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niche]) for niche in niches] + evaluations = self.swarm_size * self.init_num_niches + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + velocities = [ + np.random.uniform(-1, 1, (self.swarm_size, self.dim)) for _ in range(self.init_num_niches) + ] + local_bests = [niches[i][np.argmin(fitness[i])] for i in range(len(niches))] + local_best_fits = [min(fitness[i]) for i in range(len(niches))] + global_best = local_bests[np.argmin(local_best_fits)] + global_best_fit = min(local_best_fits) + + while evaluations < self.budget: + new_niches = [] + new_fitness = [] + + for n in range(len(niches)): + new_niche = [] + new_fit = [] + + for i in range(len(niches[n])): + r1, r2 = np.random.rand(), np.random.rand() + velocities[n][i] = ( + w * velocities[n][i] + + c1 * r1 * (local_bests[n] - niches[n][i]) + + c2 * r2 * (global_best - niches[n][i]) + ) + + trial_pso = niches[n][i] + velocities[n][i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(len(niches[n])) + indices = np.delete(indices, i) + a, b, c = niches[n][np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, niches[n][i]) + + trial = self.alpha * trial_de + self.beta * trial_pso + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial, func) + evaluations += 1 + else: + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[n][i]: + new_niche.append(trial) + new_fit.append(f_trial) + if f_trial < local_best_fits[n]: + local_best_fits[n] = f_trial + local_bests[n] = trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_niche.append(niches[n][i]) + new_fit.append(fitness[n][i]) + + if evaluations >= self.budget: + break + + new_niches.append(np.array(new_niche)) + new_fitness.append(np.array(new_fit)) + + niches = new_niches + fitness = new_fitness + + for n in range(len(niches)): + if np.std(fitness[n]) < 1e-5 and evaluations < self.budget: + niches[n] = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness[n] = np.array([func(ind) for ind in niches[n]]) + evaluations += self.swarm_size + + if evaluations % (self.swarm_size * self.init_num_niches) == 0: + all_particles = np.concatenate(niches) + all_fitness = np.concatenate(fitness) + sorted_indices = np.argsort(all_fitness) + num_niches = max(2, len(niches) // 2) + niches = [all_particles[sorted_indices[i::num_niches]] for i in range(num_niches)] + fitness = [all_fitness[sorted_indices[i::num_niches]] for i in range(num_niches)] + velocities = [np.random.uniform(-1, 1, (len(niche), self.dim)) for niche in niches] + local_bests = [niches[i][0] for i in range(num_niches)] + local_best_fits = [fitness[i][0] for i in range(num_niches)] + + w = np.random.uniform(0.4, 0.9) + c1 = np.random.uniform(1.0, 2.0) + c2 = np.random.uniform(1.0, 2.0) + + if evaluations % (self.swarm_size * self.init_num_niches * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * self.init_num_niches * 10) == 0: + diversity = np.mean([np.std(fit) for fit in fitness]) + if diversity < 1e-3: + niches = [ + np.array([self.random_bounds() for _ in range(self.swarm_size)]) + for _ in range(self.init_num_niches) + ] + fitness = [np.array([func(ind) for ind in niches[n]]) for n in range(len(niches))] + evaluations += self.swarm_size * self.init_num_niches + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedOptimalDynamicPrecisionOptimizerV15.py b/nevergrad/optimization/lama/RefinedOptimalDynamicPrecisionOptimizerV15.py new file mode 100644 index 000000000..186a7a4e4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimalDynamicPrecisionOptimizerV15.py @@ -0,0 +1,62 @@ +import numpy as np + + +class RefinedOptimalDynamicPrecisionOptimizerV15: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is consistently set to 5 + self.lb = -5.0 # Lower boundary of the search space + self.ub = 5.0 # Upper boundary of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters with slight refinements + T = 1.1 # Slightly reduced starting temperature for a more balanced exploration + T_min = 0.0004 # Optimized minimum temperature for deeper late-stage precision + alpha = 0.91 # Adjusted cooling rate for more gradual transition in phases + + # Mutation and crossover parameters are finely tuned for optimal performance + F = 0.78 # Fine-tuned Mutation factor + CR = 0.88 # Fine-tuned Crossover probability + + population_size = 82 # Optimized population size for efficient search + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation with sigmoid-based adaptation for responsive mutation control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor using an improved sigmoid for more responsive control + dynamic_F = ( + F + * np.exp(-0.065 * T) + * (0.65 + 0.35 * np.tanh(3.5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria with adjusted sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Upgraded adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.0075 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedOptimalEnhancedRAMEDS.py b/nevergrad/optimization/lama/RefinedOptimalEnhancedRAMEDS.py new file mode 100644 index 000000000..f4bd9166c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimalEnhancedRAMEDS.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedOptimalEnhancedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.93, + F_min=0.45, + F_max=0.85, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Periodically shuffle elites to prevent local minima stagnation + if evaluations % (self.budget // 10) == 0: + np.random.shuffle(elite) + + for i in range(self.population_size): + # Adaptive mutation factor with an improved modulation strategy + F = self.F_max - (self.F_max - self.F_min) * np.sin(2 * np.pi * evaluations / self.budget) + + # Mutation: DE/current-to-best/1 with improved selection for mutation basis + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.65 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory strategically + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedOptimalEvolutionaryGradientOptimizerV12.py b/nevergrad/optimization/lama/RefinedOptimalEvolutionaryGradientOptimizerV12.py new file mode 100644 index 000000000..5ec9a4581 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimalEvolutionaryGradientOptimizerV12.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedOptimalEvolutionaryGradientOptimizerV12: + def __init__( + self, + budget=10000, + population_size=125, + F_base=0.6, + F_range=0.44, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.8: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5.py b/nevergrad/optimization/lama/RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5.py new file mode 100644 index 000000000..996fa7ecd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5.py @@ -0,0 +1,146 @@ +import numpy as np + + +class RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Reduced for more rapid convergence + self.initial_F = 0.5 # Refined mutation factor + self.initial_CR = 0.8 # Refined crossover rate + self.elite_rate = 0.1 # Refined elite rate + self.local_search_rate = 0.3 # Refined local search rate + self.memory_size = 10 # Smaller memory for quicker adaptations + self.w = 0.5 # Balanced inertia weight + self.c1 = 1.3 # Slightly reduced cognitive component + self.c2 = 1.5 # Reduced social component for better balance + self.adaptive_phase_ratio = 0.5 # Balance between DE and PSO phases + self.alpha = 0.2 # Refined differential weight + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + population, fitness = initialize_population() + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.01 # Controlled local search step size + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.05 * np.random.randn()) + adaptive_CR = memory_CR[idx] + (0.05 * np.random.randn()) + return np.clip(adaptive_F, 0.1, 1.0), np.clip(adaptive_CR, 0.1, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.adaptive_phase_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing.py b/nevergrad/optimization/lama/RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing.py new file mode 100644 index 000000000..9fb05d1c0 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing.py @@ -0,0 +1,141 @@ +import numpy as np + + +class RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing: + def __init__(self, budget=10000): + self.budget = budget + self.dim = None + + def __call__(self, func): + self.dim = len(func.bounds.lb) + self.f_opt = np.Inf + self.x_opt = None + evaluations = 0 + + T_initial = 1.0 # Initial temperature + T_min = 1e-5 # Minimum temperature + alpha_initial = 0.9 # Cooling rate for initial phase + beta_initial = 1.2 # Initial control parameter for acceptance probability + + x_current = np.random.uniform(func.bounds.lb, func.bounds.ub) # Initial solution + f_current = func(x_current) + evaluations += 1 + + # Memory for storing best solutions + memory_size = 20 + memory = np.zeros((memory_size, self.dim)) + memory_scores = np.full(memory_size, np.Inf) + memory[0] = x_current + memory_scores[0] = f_current + + T = T_initial + beta = beta_initial + + phase1 = self.budget // 4 + phase2 = self.budget // 2 + phase3 = 3 * self.budget // 4 + + while evaluations < self.budget and T > T_min: + for _ in range(memory_size): + if np.random.rand() < 0.5: + x_candidate = memory[np.argmin(memory_scores)] + T * np.random.randn(self.dim) + else: + x_candidate = memory[np.random.randint(memory_size)] + T * np.random.randn(self.dim) + + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + + if f_candidate < f_current or np.exp(beta * (f_current - f_candidate) / T) > np.random.rand(): + x_current = x_candidate + f_current = f_candidate + + worst_idx = np.argmax(memory_scores) + if f_current < memory_scores[worst_idx]: + memory[worst_idx] = x_current + memory_scores[worst_idx] = f_current + + if f_current < self.f_opt: + self.f_opt = f_current + self.x_opt = x_current + + T *= alpha_initial + + # Adaptive adjustments for beta and alpha + if evaluations < phase1: + beta = 2.0 + alpha = 0.95 + elif evaluations < phase2: + beta = 1.5 + alpha = 0.93 + elif evaluations < phase3: + beta = 1.0 + alpha = 0.92 + else: + beta = 2.5 + alpha = 0.90 + + # Refined enhanced gradient-based local search refinement + if evaluations % (self.budget // 8) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._local_refinement(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Dimensional adjustment with adaptive step size + if evaluations % (self.budget // 6) == 0: + x_best_memory = memory[np.argmin(memory_scores)] + x_best_memory = self._dimensional_adjustment(func, x_best_memory) + f_best_memory = func(x_best_memory) + evaluations += 1 + if f_best_memory < self.f_opt: + self.f_opt = f_best_memory + self.x_opt = x_best_memory + + # Improved periodic exploration boost + if evaluations % (self.budget // 5) == 0: + best_memory_idx = np.argmin(memory_scores) + for _ in range(memory_size // 2): + if np.random.rand() < 0.2: + x_candidate = memory[best_memory_idx] + np.random.uniform(-1, 1, self.dim) + else: + x_candidate = np.random.uniform(func.bounds.lb, func.bounds.ub) + x_candidate = np.clip(x_candidate, func.bounds.lb, func.bounds.ub) + f_candidate = func(x_candidate) + evaluations += 1 + if f_candidate < self.f_opt: + self.f_opt = f_candidate + self.x_opt = x_candidate + + worst_idx = np.argmax(memory_scores) + if f_candidate < memory_scores[worst_idx]: + memory[worst_idx] = x_candidate + memory_scores[worst_idx] = f_candidate + + return self.f_opt, self.x_opt + + def _local_refinement(self, func, x, iters=100, step_size=0.01): + for _ in range(iters): + gradient = self._approximate_gradient(func, x) + x -= step_size * gradient + x = np.clip(x, func.bounds.lb, func.bounds.ub) + return x + + def _approximate_gradient(self, func, x, epsilon=1e-8): + grad = np.zeros_like(x) + fx = func(x) + for i in range(self.dim): + x_eps = np.copy(x) + x_eps[i] += epsilon + grad[i] = (func(x_eps) - fx) / epsilon + return grad + + def _dimensional_adjustment(self, func, x, step_factor=0.1): + new_x = np.copy(x) + for i in range(self.dim): + new_x[i] += step_factor * (np.random.uniform(-1, 1) * (func.bounds.ub[i] - func.bounds.lb[i])) + new_x = np.clip(new_x, func.bounds.lb, func.bounds.ub) + return new_x diff --git a/nevergrad/optimization/lama/RefinedOptimizedEnhancedDualStrategyAdaptiveDE.py b/nevergrad/optimization/lama/RefinedOptimizedEnhancedDualStrategyAdaptiveDE.py new file mode 100644 index 000000000..95153f6b7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimizedEnhancedDualStrategyAdaptiveDE.py @@ -0,0 +1,127 @@ +import numpy as np + + +class RefinedOptimizedEnhancedDualStrategyAdaptiveDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 60 + self.initial_mutation_factor = 0.85 + self.final_mutation_factor = 0.35 + self.crossover_prob = 0.85 + self.elitism_rate = 0.3 + self.local_search_prob = 0.2 + self.archive = [] + self.tol = 1e-6 # Tolerance for convergence check + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + self.budget -= self.pop_size + + generation = 0 + last_best_fitness = self.f_opt + + while self.budget > 0: + # Adaptive mutation factor + mutation_factor = self.initial_mutation_factor - ( + (self.initial_mutation_factor - self.final_mutation_factor) + * (generation / (self.budget / self.pop_size)) + ) + + # Elitism: preserve top individuals + elite_count = max(1, int(self.elitism_rate * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_pop = pop[elite_indices] + elite_fitness = fitness[elite_indices] + + # Dual-strategy evolution + new_pop = [] + for i in range(self.pop_size): + if self.budget <= 0: + break + + if np.random.rand() < 0.5: + idxs = np.random.choice(range(self.pop_size), 3, replace=False) + x1, x2, x3 = pop[idxs] + else: + idxs = np.random.choice(elite_count, 3, replace=False) + x1, x2, x3 = elite_pop[idxs] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, lower_bound, upper_bound) + + cross_points = np.random.rand(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Introduce elitist guidance in crossover stage + trial = trial + np.random.rand(self.dim) * (elite_pop[np.random.randint(elite_count)] - trial) + trial = np.clip(trial, lower_bound, upper_bound) + + # Local search phase with some probability + if np.random.rand() < self.local_search_prob: + trial = self.local_search(trial, func) + + f_trial = func(trial) + self.budget -= 1 + if f_trial < fitness[i]: + new_pop.append(trial) + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_pop.append(pop[i]) + + # Archive mechanism + self.archive.extend(new_pop) + if len(self.archive) > self.pop_size: + self.archive = self.archive[-self.pop_size :] + + if self.budget % int(self.pop_size * 0.1) == 0 and self.archive: + archive_idx = np.random.choice(len(self.archive)) + archive_ind = self.archive[archive_idx] + f_archive = func(archive_ind) + self.budget -= 1 + if f_archive < self.f_opt: + self.f_opt = f_archive + self.x_opt = archive_ind + + new_pop = np.array(new_pop) + combined_pop = np.vstack((elite_pop, new_pop[elite_count:])) + combined_fitness = np.hstack((elite_fitness, fitness[elite_count:])) + + pop = combined_pop + fitness = combined_fitness + + # Convergence check + if np.abs(self.f_opt - last_best_fitness) < self.tol: + break # Stop if the improvement is below the tolerance level + last_best_fitness = self.f_opt + + generation += 1 + + return self.f_opt, self.x_opt + + def local_search(self, x, func): + best_x = x.copy() + best_f = func(x) + perturbation = 0.01 * ( + np.random.rand(self.dim) - 0.5 + ) # Even smaller perturbation for finer adjustments + new_x = x + perturbation + new_x = np.clip(new_x, -5.0, 5.0) + new_f = func(new_x) + self.budget -= 1 # Account for the local search function evaluation + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x diff --git a/nevergrad/optimization/lama/RefinedOptimizedHybridAdaptiveMultiStageOptimization.py b/nevergrad/optimization/lama/RefinedOptimizedHybridAdaptiveMultiStageOptimization.py new file mode 100644 index 000000000..d29e6902a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedOptimizedHybridAdaptiveMultiStageOptimization.py @@ -0,0 +1,139 @@ +import numpy as np + + +class RefinedOptimizedHybridAdaptiveMultiStageOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 120 + self.initial_F = 0.8 + self.initial_CR = 0.9 + self.elite_rate = 0.1 + self.local_search_rate = 0.3 + self.memory_size = 20 + self.w = 0.8 + self.c1 = 1.5 + self.c2 = 1.5 + self.phase_switch_ratio = 0.5 + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + population = np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + velocities = np.random.uniform(-1, 1, (self.population_size, self.dim)) + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + self.eval_count = self.population_size + + memory_F = np.full(self.memory_size, self.initial_F) + memory_CR = np.full(self.memory_size, self.initial_CR) + memory_idx = 0 + + def local_search(position): + step_size = 0.1 + candidate = position + np.random.uniform(-step_size, step_size, position.shape) + return clip_bounds(candidate) + + def adapt_parameters(): + idx = np.random.randint(0, self.memory_size) + adaptive_F = memory_F[idx] + (0.1 * np.random.rand() - 0.05) + adaptive_CR = memory_CR[idx] + (0.1 * np.random.rand() - 0.05) + return np.clip(adaptive_F, 0.4, 1.0), np.clip(adaptive_CR, 0.4, 1.0) + + def evolutionary_phase(): + nonlocal best_value, best_position, memory_idx + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + elite_count = int(self.elite_rate * self.population_size) + sorted_indices = np.argsort(fitness) + elites = population[sorted_indices[:elite_count]] + new_population[:elite_count] = elites + new_fitness[:elite_count] = fitness[sorted_indices[:elite_count]] + + for i in range(elite_count, self.population_size): + if self.eval_count >= self.budget: + break + indices = list(range(self.population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + F, CR = adapt_parameters() + mutant = clip_bounds(a + F * (b - c)) + trial = np.copy(population[i]) + for d in range(self.dim): + if np.random.rand() < CR: + trial[d] = mutant[d] + + if np.random.rand() < self.local_search_rate: + candidate = local_search(trial) + else: + candidate = trial + + candidate = clip_bounds(candidate) + candidate_value = func(candidate) + self.eval_count += 1 + + if candidate_value < fitness[i]: + new_population[i] = candidate + new_fitness[i] = candidate_value + + memory_F[memory_idx] = F + memory_CR[memory_idx] = CR + memory_idx = (memory_idx + 1) % self.memory_size + + if candidate_value < best_value: + best_value = candidate_value + best_position = candidate + + return new_population, new_fitness + + def swarm_phase(): + nonlocal best_value, best_position + new_population = np.copy(population) + new_fitness = np.copy(fitness) + + for i in range(self.population_size): + r1 = np.random.rand(self.dim) + r2 = np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (personal_best_positions[i] - population[i]) + + self.c2 * r2 * (best_position - population[i]) + ) + new_population[i] = clip_bounds(population[i] + velocities[i]) + new_fitness[i] = func(new_population[i]) + self.eval_count += 1 + + if new_fitness[i] < personal_best_fitness[i]: + personal_best_fitness[i] = new_fitness[i] + personal_best_positions[i] = new_population[i] + + if new_fitness[i] < best_value: + best_value = new_fitness[i] + best_position = new_population[i] + + return new_population, new_fitness + + while self.eval_count < self.budget: + if self.eval_count < self.phase_switch_ratio * self.budget: + population, fitness = evolutionary_phase() + else: + population, fitness = swarm_phase() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedOptimizedHybridAdaptiveMultiStageOptimization(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedPrecisionAdaptivePSO.py b/nevergrad/optimization/lama/RefinedPrecisionAdaptivePSO.py new file mode 100644 index 000000000..5fb1550e2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedPrecisionAdaptivePSO.py @@ -0,0 +1,52 @@ +import numpy as np + + +class RefinedPrecisionAdaptivePSO: + def __init__( + self, budget=10000, population_size=200, inertia_weight=0.9, cognitive_weight=0.5, social_weight=0.5 + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight # How much to weigh the previous velocity + self.cognitive_weight = cognitive_weight # How much to consider personal best + self.social_weight = social_weight # How much to consider global best + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Boundary limits of the search space + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + + while evaluation_counter < self.budget: + for i in range(self.population_size): + personal_component = np.random.rand(self.dim) * (personal_best_positions[i] - particles[i]) + social_component = np.random.rand(self.dim) * (global_best_position - particles[i]) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_weight * personal_component + + self.social_weight * social_component + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/RefinedPrecisionEnhancedDualStrategyOptimizer.py b/nevergrad/optimization/lama/RefinedPrecisionEnhancedDualStrategyOptimizer.py new file mode 100644 index 000000000..d4bb3f88a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedPrecisionEnhancedDualStrategyOptimizer.py @@ -0,0 +1,73 @@ +import numpy as np + + +class RefinedPrecisionEnhancedDualStrategyOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Optimizer parameters + population_size = 200 # Further increased population size for broader exploration + mutation_factor = 0.9 # Slightly increased mutation factor for enhanced exploration + crossover_rate = 0.8 # Increased crossover rate for better gene mixing + elite_size = 15 # Slightly increased number of elite individuals to preserve more good solutions + hybridization_frequency = 10 # Frequency at which hybridization occurs + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + # Preserve elite solutions + elite_indices = np.argsort(fitness)[:elite_size] + new_population[:elite_size] = population[elite_indices] + new_fitness[:elite_size] = fitness[elite_indices] + + # Generate the rest of the new population + for i in range(elite_size, population_size): + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + + # Hybridization of mutation strategies + if evaluations % hybridization_frequency == 0: + # Combining best with random strategy + d, e = population[np.random.choice(population_size, 2, replace=False)] + mutant = best_solution + mutation_factor * (d - e) + else: + mutant = a + mutation_factor * (b - c) + + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial_vector = np.where(np.random.rand(self.dim) < crossover_rate, mutant, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + # Update best solution found + if new_fitness[i] < best_fitness: + best_fitness = new_fitness[i] + best_solution = new_population[i] + + population = new_population + fitness = new_fitness + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedPrecisionEnhancedSpatialAdaptiveEvolver.py b/nevergrad/optimization/lama/RefinedPrecisionEnhancedSpatialAdaptiveEvolver.py new file mode 100644 index 000000000..928b875d4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedPrecisionEnhancedSpatialAdaptiveEvolver.py @@ -0,0 +1,86 @@ +import numpy as np + + +class RefinedPrecisionEnhancedSpatialAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + initial_step_size=1.0, + step_decay=0.98, + elite_ratio=0.02, + mutation_intensity=0.03, + local_search_prob=0.2, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, individual): + tweaks = np.random.normal( + 0, self.step_size * 0.05, self.dimension + ) # Reduced the tweak scale for finer local search + return np.clip(individual + tweaks, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * ( + self.step_decay**generation + ) # Slower decay for more sustained exploration + + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: + for idx in range(self.population_size): + candidate = self.local_search(new_population[idx]) + candidate_fitness = func(candidate) + if candidate_fitness < new_fitness[idx]: + new_population[idx] = candidate + new_fitness[idx] = candidate_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedPrecisionEvolutionaryThermalOptimizer.py b/nevergrad/optimization/lama/RefinedPrecisionEvolutionaryThermalOptimizer.py new file mode 100644 index 000000000..8602f0273 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedPrecisionEvolutionaryThermalOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class RefinedPrecisionEvolutionaryThermalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Enhanced initial temperature and adjusted cooling rate for improved exploration-exploitation balance + T = 1.5 + T_min = 0.001 + alpha = 0.98 + + # Optimal mutation and crossover parameters derived from prior performance analysis + F = 0.75 # Dynamically adjusted mutation factor + CR = 0.88 # Crossover probability to maintain genetic diversity + + population_size = 80 # Adjusted population size for better initial search scope + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce dynamic mutation and sophisticated simulated annealing acceptance conditions + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * (0.5 + 0.5 * np.sin(np.pi * T)) * (0.6 + 0.4 * (evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion considering the magnitude of fitness improvements + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.log(1 + np.abs(delta_fitness)))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling rate based on the optimization stage + adaptive_cooling = alpha - 0.015 * (evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedPrecisionTunedCrossoverElitistStrategyV12.py b/nevergrad/optimization/lama/RefinedPrecisionTunedCrossoverElitistStrategyV12.py new file mode 100644 index 000000000..927287e88 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedPrecisionTunedCrossoverElitistStrategyV12.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedPrecisionTunedCrossoverElitistStrategyV12: + def __init__( + self, + budget, + dimension=5, + population_size=400, + elite_fraction=0.2, + mutation_intensity=0.015, + crossover_rate=0.85, + adaptive_intensity=0.9, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_intensity = mutation_intensity + self.crossover_rate = crossover_rate + self.adaptive_intensity = adaptive_intensity + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population + new_population = np.empty_like(population) + for i in range(self.population_size): + parent1 = elites[np.random.choice(len(elites))] + if np.random.random() < self.crossover_rate: + # Perform crossover + parent2 = elites[np.random.choice(len(elites))] + child = self.dynamic_crossover(parent1, parent2) + else: + # Mutation of an elite + child = self.adaptive_mutate(parent1, evaluations) + + new_population[i] = np.clip(child, -5.0, 5.0) + + # Evaluate new population + for i in range(self.population_size): + new_fitness = func(new_population[i]) + evaluations += 1 + + # Update the best solution found + if new_fitness < best_fitness: + best_fitness = new_fitness + best_individual = new_population[i] + + if evaluations >= self.budget: + break + + # Replace old population + population = new_population + fitness = np.array([func(individual) for individual in population]) + + return best_fitness, best_individual + + def adaptive_mutate(self, individual, evaluations): + # Adaptive mutation intensity based on normalized evaluations + normalized_time = evaluations / self.budget + intensity = ( + self.mutation_intensity * (1 - normalized_time) + self.mutation_intensity * normalized_time / 2 + ) + return individual + np.random.normal(0, intensity, self.dimension) + + def dynamic_crossover(self, parent1, parent2): + # Blend between parents with adaptive weighting + weight = np.random.beta(2.5, 2.5) # Beta distribution for a balance + return weight * parent1 + (1 - weight) * parent2 diff --git a/nevergrad/optimization/lama/RefinedProgressiveParticleSwarmOptimization.py b/nevergrad/optimization/lama/RefinedProgressiveParticleSwarmOptimization.py new file mode 100644 index 000000000..473c18bf4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedProgressiveParticleSwarmOptimization.py @@ -0,0 +1,65 @@ +import numpy as np + + +class RefinedProgressiveParticleSwarmOptimization: + def __init__( + self, budget=10000, population_size=50, omega_start=0.9, omega_end=0.4, phi_p=0.2, phi_g=0.5 + ): + self.budget = budget + self.population_size = population_size + self.omega_start = omega_start # Initial inertia weight + self.omega_end = omega_end # Final inertia weight + self.phi_p = phi_p # Personal learning coefficient + self.phi_g = phi_g # Global learning coefficient + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + # Initialize population + pop = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocity = np.zeros((self.population_size, self.dim)) + personal_best = pop.copy() + fitness = np.array([func(ind) for ind in pop]) + personal_best_fitness = fitness.copy() + + # Initialize global best + global_best_idx = np.argmin(fitness) + global_best = pop[global_best_idx] + + evaluations = self.population_size + + # Main optimization loop + while evaluations < self.budget: + r_p = np.random.uniform(0, 1, (self.population_size, self.dim)) + r_g = np.random.uniform(0, 1, (self.population_size, self.dim)) + + # Calculate dynamic inertia weight based on iteration progress + progress_ratio = evaluations / self.budget + omega = self.omega_start - (self.omega_start - self.omega_end) * progress_ratio + + # Update velocity and positions + velocity = ( + omega * velocity + + self.phi_p * r_p * (personal_best - pop) + + self.phi_g * r_g * (global_best - pop) + ) + pop = np.clip(pop + velocity, lb, ub) + + # Evaluate new positions + for i in range(self.population_size): + current_fitness = func(pop[i]) + evaluations += 1 + + if current_fitness < personal_best_fitness[i]: + personal_best[i] = pop[i] + personal_best_fitness[i] = current_fitness + + if current_fitness < fitness[global_best_idx]: + global_best_idx = i + global_best = pop[i] + + # Adaptive learning coefficients based on progress + self.phi_p = max(0.1, self.phi_p - progress_ratio * 0.05) + self.phi_g = min(0.6, self.phi_g + progress_ratio * 0.05) + + return fitness[global_best_idx], global_best diff --git a/nevergrad/optimization/lama/RefinedProgressiveQuorumEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedProgressiveQuorumEvolutionStrategy.py new file mode 100644 index 000000000..0cca7be50 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedProgressiveQuorumEvolutionStrategy.py @@ -0,0 +1,67 @@ +import numpy as np + + +class RefinedProgressiveQuorumEvolutionStrategy: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_scale=0.1, + quorum_size=5, + adaptive_mutation=True, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_scale = mutation_scale + self.quorum_size = quorum_size + self.adaptive_mutation = adaptive_mutation + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Adaptive mutation scale factor + scale_factor = self.mutation_scale + + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select a quorum randomly and choose the best among them + quorum_indices = np.random.choice(self.population_size, self.quorum_size, replace=False) + elite_idx = quorum_indices[np.argmin(fitness[quorum_indices])] + elite = population[elite_idx] + + # Mutation based on Gaussian noise + mutation = np.random.normal(0, scale_factor, self.dimension) + child = np.clip(elite + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + # Update population and fitness + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adaptively adjust mutation scale if enabled + if self.adaptive_mutation: + if i % 20 == 0 and scale_factor > 0.01: # Reduce mutation scale periodically + scale_factor *= 0.95 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedQuadraticAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/RefinedQuadraticAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..6f31561c1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuadraticAdaptiveEvolutionStrategy.py @@ -0,0 +1,72 @@ +import numpy as np + + +class RefinedQuadraticAdaptiveEvolutionStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.8): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.alpha = 0.01 # Gradual adjustment rate for F and CR + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b = np.random.choice(idxs, 2, replace=False) + mutant = population[best_idx] + self.F * (population[a] - population[b]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration): + # Dynamically adjusting F and CR based on iteration progression + scale = iteration / self.budget + self.F = np.clip(self.F * (1 - self.alpha * scale), 0.1, 1) + self.CR = np.clip(self.CR * (1 + self.alpha * scale), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..49e248e8b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveExplorationOptimization.py @@ -0,0 +1,212 @@ +import numpy as np + + +class RefinedQuantumAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 30 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # Particle Swarm Optimization constants + c1 = 1.5 # Cognitive constant + c2 = 1.5 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.05 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Reduced max stagnation to trigger diversity enforcement more frequently + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + max_exploration_cycles = 20 # Maximum exploration cycles + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_exploration_cycles: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedQuantumAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridOptimizerV4.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridOptimizerV4.py new file mode 100644 index 000000000..11ae87e8a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridOptimizerV4.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedQuantumAdaptiveHybridOptimizerV4: + def __init__( + self, + budget=10000, + population_size=150, + inertia_weight=0.9, + cognitive_coef=2.5, + social_coef=2.5, + quantum_probability=0.1, + damping_factor=0.99, + adaptive_quantum_shift=0.01, + division_factor=10, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.adaptive_quantum_shift = adaptive_quantum_shift + self.division_factor = division_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + # Gradually decreasing inertia weight for better convergence + inertia = self.inertia_weight * ( + self.damping_factor ** (evaluations / (self.budget / self.division_factor)) + ) + + velocities[i] = ( + inertia * velocities[i] + + self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + + self.social_coef * r2 * (global_best - particles[i]) + ) + + if np.random.rand() < self.quantum_probability: + # Quantum leap with adaptive step size + step_size = self.dim**-0.5 * (1 - evaluations / self.budget) # Decaying step size + quantum_leap = global_best + np.random.normal(0, step_size, self.dim) + particles[i] = np.clip(quantum_leap, self.lb, self.ub) + else: + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Increase quantum probability and decrease inertia weight dynamically + self.quantum_probability += self.adaptive_quantum_shift + self.inertia_weight *= self.damping_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridSearchV3.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridSearchV3.py new file mode 100644 index 000000000..eaa20e62a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveHybridSearchV3.py @@ -0,0 +1,104 @@ +import numpy as np + + +class RefinedQuantumAdaptiveHybridSearchV3: + def __init__( + self, + budget, + dimension=5, + population_size=120, + elite_frac=0.25, + mutation_intensity=0.7, + crossover_prob=0.75, + quantum_prob=0.85, + gradient_prob=0.65, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_frac) + self.mutation_intensity = mutation_intensity + self.crossover_prob = crossover_prob + self.quantum_prob = quantum_prob + self.gradient_prob = gradient_prob + + def __call__(self, func): + # Initialize the population uniformly within the bounds. + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Select elites based on fitness + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + # Generate new population members + new_population = np.empty_like(population) + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + # Perform crossover from two randomly selected elites + p1, p2 = np.random.choice(elite_indices, 2, replace=False) + offspring = self.crossover(population[p1], population[p2]) + else: + # Directly copy an elite + offspring = population[np.random.choice(elite_indices)] + + # Apply quantum state update with a probability + if np.random.random() < self.quantum_prob: + offspring = self.quantum_state_update(offspring, best_individual) + + # Apply gradient enhancement with a probability + if np.random.random() < self.gradient_prob: + offspring = self.gradient_boost(offspring, func) + + # Mutate the offspring + mutation_scale = self.adaptive_mutation_scale(evaluations) + offspring += np.random.normal(0, mutation_scale, self.dimension) + offspring = np.clip(offspring, -5, 5) # Ensure bounds are respected + + new_population[i] = offspring + + # Evaluate the new population + fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Update best individual if improved + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_fitness: + best_fitness = fitness[current_best_idx] + best_individual = new_population[current_best_idx] + + # Update the population + population = new_population + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + # Perform an alpha-blended crossover + alpha = np.random.rand() + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_state_update(self, individual, global_best): + # Apply a quantum-inspired perturbation + perturbation = np.random.normal(0, 0.1, self.dimension) + return global_best + perturbation * (global_best - individual) + + def gradient_boost(self, individual, func): + # Apply a simple numerical gradient approximation + grad_est = np.zeros(self.dimension) + fx = func(individual) + h = 1e-5 + for i in range(self.dimension): + x_new = individual.copy() + x_new[i] += h + grad_est[i] = (func(x_new) - fx) / h + return individual - 0.01 * grad_est # Update using a small learning rate + + def adaptive_mutation_scale(self, evaluations): + # Reduce mutation scale as the number of evaluations increases + return self.mutation_intensity * np.exp(-0.1 * evaluations / self.budget) diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveLevySwarmOptimization.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveLevySwarmOptimization.py new file mode 100644 index 000000000..207a152a4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveLevySwarmOptimization.py @@ -0,0 +1,160 @@ +import numpy as np + + +class RefinedQuantumAdaptiveLevySwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def levy_flight(self, dim, beta=1.5): + sigma_u = ( + np.math.gamma(1 + beta) + * np.sin(np.pi * beta / 2) + / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)) + ) ** (1 / beta) + u = np.random.normal(0, sigma_u, dim) + v = np.random.normal(0, 1, dim) + step = u / np.abs(v) ** (1 / beta) + return step + + def adaptive_parameters(self, evaluations, max_evaluations): + progress = evaluations / max_evaluations + inertia_weight = 0.9 - 0.7 * progress + cognitive_coefficient = 1.5 + 0.5 * progress + social_coefficient = 1.5 - 0.5 * progress + differential_weight = 0.8 - 0.4 * progress + crossover_rate = 0.9 - 0.3 * progress + quantum_factor = 0.5 - 0.2 * progress + levy_factor = 0.1 + 0.3 * progress + return ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) + + def __call__(self, func): + population_size = 40 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = population_size + + personal_best_positions = np.copy(population) + personal_best_fitness = np.copy(fitness) + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + self.f_opt = global_best_fitness + self.x_opt = global_best_position + + while evaluations < self.budget: + ( + inertia_weight, + cognitive_coefficient, + social_coefficient, + differential_weight, + crossover_rate, + quantum_factor, + levy_factor, + ) = self.adaptive_parameters(evaluations, self.budget) + + for i in range(population_size): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + inertia = inertia_weight * velocity[i] + cognitive = cognitive_coefficient * r1 * (personal_best_positions[i] - population[i]) + social = social_coefficient * r2 * (global_best_position - population[i]) + velocity[i] = inertia + cognitive + social + new_position = np.clip(population[i] + velocity[i], self.lb, self.ub) + new_fitness = func(new_position) + evaluations += 1 + + if new_fitness < fitness[i]: + population[i] = new_position + fitness[i] = new_fitness + + if new_fitness < personal_best_fitness[i]: + personal_best_positions[i] = new_position + personal_best_fitness[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_position + + indices = list(range(population_size)) + indices.remove(i) + + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant_vector = np.clip(a + differential_weight * (b - c), self.lb, self.ub) + + crossover_mask = np.random.rand(self.dim) < crossover_rate + if not np.any(crossover_mask): + crossover_mask[np.random.randint(0, self.dim)] = True + + trial_vector = np.where(crossover_mask, mutant_vector, population[i]) + trial_fitness = func(trial_vector) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < personal_best_fitness[i]: + personal_best_positions[i] = trial_vector + personal_best_fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + quantum_particles = population + quantum_factor * np.random.uniform( + -1, 1, (population_size, self.dim) + ) + quantum_particles = np.clip(quantum_particles, self.lb, self.ub) + quantum_fitness = np.array([func(ind) for ind in quantum_particles]) + evaluations += population_size + + for i in range(population_size): + if quantum_fitness[i] < fitness[i]: + population[i] = quantum_particles[i] + fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < personal_best_fitness[i]: + personal_best_positions[i] = quantum_particles[i] + personal_best_fitness[i] = quantum_fitness[i] + + if quantum_fitness[i] < self.f_opt: + self.f_opt = quantum_fitness[i] + self.x_opt = quantum_particles[i] + + global_best_position = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + if evaluations + population_size <= self.budget: + for i in range(population_size): + if np.random.rand() < 0.5: + local_search_iters = 3 + for _ in range(local_search_iters): + levy_step = levy_factor * self.levy_flight(self.dim) + candidate = np.clip(population[i] + levy_step, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations += 1 + + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + + if candidate_fitness < personal_best_fitness[i]: + personal_best_positions[i] = candidate + personal_best_fitness[i] = candidate_fitness + + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveMultiPopulationDE.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveMultiPopulationDE.py new file mode 100644 index 000000000..68e5fb5c8 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveMultiPopulationDE.py @@ -0,0 +1,178 @@ +import numpy as np + + +class RefinedQuantumAdaptiveMultiPopulationDE: + def __init__( + self, + budget=10000, + population_size=100, + elite_size=10, + local_search_steps=100, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + step_size = 0.01 + for _ in range(self.local_search_steps): + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + strategy = np.random.choice(strategies) + if strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + elif strategy == self.quantum_jolt: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveOptimizerV2.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveOptimizerV2.py new file mode 100644 index 000000000..d6775743a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveOptimizerV2.py @@ -0,0 +1,86 @@ +import numpy as np + + +class RefinedQuantumAdaptiveOptimizerV2: + def __init__( + self, + budget=10000, + population_size=80, + inertia_weight=0.9, + cognitive_coef=2.0, + social_coef=2.0, + quantum_probability=0.10, + damping_factor=0.98, + adaptive_quantum_shift=0.01, + elite_strategy=True, + ): + self.budget = budget + self.population_size = population_size + self.inertia_weight = inertia_weight + self.cognitive_coef = cognitive_coef + self.social_coef = social_coef + self.quantum_probability = quantum_probability + self.damping_factor = damping_factor + self.adaptive_quantum_shift = adaptive_quantum_shift + self.elite_strategy = elite_strategy + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coef * r1 * (personal_bests[i] - particles[i]) + + self.social_coef * r2 * (global_best - particles[i]) + ) + + if np.random.rand() < self.quantum_probability: + # Enhanced Quantum movement + quantum_leap = global_best + np.random.normal(0, 1, self.dim) * ( + global_best - personal_bests[i] + ) + particles[i] = np.clip(quantum_leap, self.lb, self.ub) + else: + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + self.inertia_weight *= self.damping_factor + self.quantum_probability += self.adaptive_quantum_shift + + # Elite strategy: include random re-initialization of worst performers + if self.elite_strategy: + worst_indices = np.argsort(-personal_best_scores)[: self.population_size // 10] + for idx in worst_indices: + particles[idx] = np.random.uniform(self.lb, self.ub, self.dim) + velocities[idx] = np.zeros(self.dim) + personal_best_scores[idx] = func(particles[idx]) + personal_bests[idx] = particles[idx] + evaluations += 1 + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumAdaptiveVelocityOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumAdaptiveVelocityOptimizer.py new file mode 100644 index 000000000..832341345 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumAdaptiveVelocityOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedQuantumAdaptiveVelocityOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 40 # Optimized smaller population size for enhanced exploration + inertia_weight = 0.85 # Slightly reduced inertia for better adaptation + cognitive_coefficient = ( + 2.2 # Increased personal learning for better convergence on individual experience + ) + social_coefficient = 2.2 # Increased social learning for stronger global convergence + velocity_limit = 0.15 # Reduced velocity limit for more delicate adjustments + quantum_momentum = 0.03 # Slightly reduced momentum for finer quantum jumps + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + inertia_decay = np.power((1 - (current_budget / self.budget)), 2) # Exponential decay for inertia + w = inertia_weight * inertia_decay + + for i in range(population_size): + if current_budget >= self.budget: + break + + # Adaptive quantum jump + quantum_probability = 0.1 * np.exp(-5 * (current_budget / self.budget)) + if np.random.rand() < quantum_probability: + quantum_jump = np.random.normal(0, quantum_momentum, self.dim) + population[i] += quantum_jump + + # PSO velocity updates with clamping + inertia_component = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia_component + cognitive_component + social_component, + -velocity_limit, + velocity_limit, + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Personal and global best updates + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/RefinedQuantumCognitionAdaptiveTuningOptimizerV15.py b/nevergrad/optimization/lama/RefinedQuantumCognitionAdaptiveTuningOptimizerV15.py new file mode 100644 index 000000000..a6f28fcdf --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumCognitionAdaptiveTuningOptimizerV15.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedQuantumCognitionAdaptiveTuningOptimizerV15: + def __init__( + self, + budget=10000, + population_size=50, + inertia_weight=0.9, + cognitive_coeff=2.1, + social_coeff=2.1, + inertia_decay=0.995, + quantum_jump_rate=0.015, + min_quantum_scale=0.005, + max_quantum_scale=0.03, + quantum_decay=0.99, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.quantum_decay = quantum_decay + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (personal_bests[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = candidate_position + personal_best_scores[i] = score + + if score < global_best_score: + global_best = candidate_position + global_best_score = score + + # Adjust decay rates and scaling factors based on progress + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumCognitionHybridOptimizerV22.py b/nevergrad/optimization/lama/RefinedQuantumCognitionHybridOptimizerV22.py new file mode 100644 index 000000000..b283e68e2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumCognitionHybridOptimizerV22.py @@ -0,0 +1,84 @@ +import numpy as np + + +class RefinedQuantumCognitionHybridOptimizerV22: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coeff=2.8, + social_coeff=2.8, + inertia_decay=0.99, + quantum_jump_rate=0.2, + quantum_scale=0.03, + quantum_decay=0.97, + mutation_rate=0.1, + mutation_scale=0.2, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coeff = cognitive_coeff + self.social_coeff = social_coeff + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.quantum_decay = quantum_decay + self.mutation_rate = mutation_rate + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + best_particles = particles.copy() + best_values = np.array([func(p) for p in particles]) + global_best = best_particles[np.argmin(best_values)] + global_best_value = min(best_values) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + quantum_deviation = np.random.normal( + 0, self.quantum_scale * (self.ub - self.lb), self.dim + ) + candidate_position = global_best + quantum_deviation + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coeff * r1 * (best_particles[i] - particles[i]) + + self.social_coeff * r2 * (global_best - particles[i]) + ) + candidate_position = particles[i] + velocities[i] + + # Mutation mechanism + if np.random.rand() < self.mutation_rate: + mutation = np.random.normal(0, self.mutation_scale, self.dim) + candidate_position += mutation + + candidate_position = np.clip(candidate_position, self.lb, self.ub) + score = func(candidate_position) + evaluations += 1 + + if score < best_values[i]: + best_particles[i] = candidate_position + best_values[i] = score + + if score < global_best_value: + global_best = candidate_position + global_best_value = score + + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= self.quantum_decay + self.quantum_scale *= self.quantum_decay + + if evaluations >= self.budget: + break + + return global_best_value, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV13.py b/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV13.py new file mode 100644 index 000000000..133cc7216 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV13.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedQuantumCognitionOptimizerV13: + def __init__( + self, + budget=10000, + population_size=30, + inertia_weight=0.8, + cognitive_coefficient=2.3, + social_coefficient=2.3, + inertia_decay=0.95, + quantum_jump_rate=0.01, + min_quantum_scale=0.03, + max_quantum_scale=0.1, + adaptive_scale_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with refined adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Adjust decay rates and scaling factors based on progress + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= 1 - self.adaptive_scale_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV4.py b/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV4.py new file mode 100644 index 000000000..8d0e8184c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumCognitionOptimizerV4.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedQuantumCognitionOptimizerV4: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.9, + cognitive_coefficient=2.0, + social_coefficient=2.0, + inertia_decay=0.95, + quantum_jump_rate=0.25, + quantum_scale=0.1, + adaptive_scale_factor=0.2, + multimodal_enhancement=True, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + self.multimodal_enhancement = multimodal_enhancement + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Enhanced quantum jump with improved adaptive scale for complex landscapes + quantum_deviation = np.random.normal( + 0, + self.quantum_scale + * (1 + self.adaptive_scale_factor * np.log(1 + abs(global_best_score))), + self.dim, + ) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Improved velocity update with increased coefficients for cognitive and social terms + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + self.inertia_weight *= ( + self.inertia_decay + ) # Gradual reduction in inertia weight to favor exploitation over time + + # Multimodal enhancement: occasional random reinitialization of a subset of particles to diversify the search + if self.multimodal_enhancement and evaluations % 1000 == 0: + idx_to_reset = np.random.choice( + np.arange(self.population_size), size=int(0.1 * self.population_size), replace=False + ) + particles[idx_to_reset] = np.random.uniform(self.lb, self.ub, (len(idx_to_reset), self.dim)) + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumCovarianceMatrixDifferentialEvolutionV4.py b/nevergrad/optimization/lama/RefinedQuantumCovarianceMatrixDifferentialEvolutionV4.py new file mode 100644 index 000000000..b19dfe3e2 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumCovarianceMatrixDifferentialEvolutionV4.py @@ -0,0 +1,194 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumCovarianceMatrixDifferentialEvolutionV4: + def __init__(self, budget=10000): + self.budget = budget + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.dim = 5 + self.population_size = 100 # Increased population size for better exploration + self.sigma = 0.1 # Further reduced sigma for better precision + self.c1 = 0.1 + self.cmu = 0.05 + self.weights = np.log(self.population_size / 2 + 1) - np.log( + np.arange(1, self.population_size // 2 + 1) + ) + self.weights /= np.sum(self.weights) + self.mu = len(self.weights) + self.F = 0.6 # Adjusted differential weight for balanced exploration/exploitation + self.CR = 0.8 # Adjusted crossover rate for more diversity + self.elitism_rate = 0.2 # Adjusted elitism rate to retain more diversity + self.eval_count = 0 + self.alpha_levy = 0.01 # Adjusted Levy flight step size for better precision + self.levy_prob = 0.05 # Adjusted Levy flight probability to avoid excessive randomness + self.adaptive_learning_rate = 0.01 # Adjusted adaptive learning rate for stability + self.strategy_switches = [0.25, 0.5, 0.75] + self.local_opt_prob = 0.4 # Increased probability of local optimization + self.learning_rate_decay = 0.9 # Adjusted learning rate decay for stability + + def __call__(self, func): + def clip_bounds(candidate): + return np.clip(candidate, self.lower_bound, self.upper_bound) + + def initialize_population(): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dim) + ) + fitness = np.array([func(ind) for ind in population]) + self.eval_count += self.population_size + return population, fitness + + def adapt_sigma(): + self.sigma *= np.exp(self.adaptive_learning_rate * (np.random.randn() - 0.5)) + self.adaptive_learning_rate *= self.learning_rate_decay + + def recombination(population, fitness): + sorted_indices = np.argsort(fitness) + selected_population = population[sorted_indices[: self.mu]] + recombined = np.dot(self.weights, selected_population) + return recombined, sorted_indices, selected_population + + def update_covariance_matrix(cov_matrix, selected_population, mean, recombined): + z = (selected_population - mean) / self.sigma + rank_one = np.outer(z[0], z[0]) + rank_mu = sum(self.weights[i] * np.outer(z[i], z[i]) for i in range(self.mu)) + cov_matrix = (1 - self.c1 - self.cmu) * cov_matrix + self.c1 * rank_one + self.cmu * rank_mu + return cov_matrix + + def sample_offspring(recombined, cov_matrix): + offspring = np.random.multivariate_normal( + recombined, self.sigma**2 * cov_matrix, self.population_size + ) + return clip_bounds(offspring) + + def levy_flight_step(x): + u = np.random.normal(0, 1, self.dim) * self.alpha_levy + v = np.random.normal(0, 1, self.dim) + step = u / (np.abs(v) ** (1 / 3)) + return x + step + + def differential_evolution(population, fitness): + new_population = np.copy(population) + for i in range(self.population_size): + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices[0]], population[indices[1]], population[indices[2]] + mutant_vector = clip_bounds(x1 + self.F * (x2 - x3)) + crossover = np.random.rand(self.dim) < self.CR + if not np.any(crossover): + crossover[np.random.randint(self.dim)] = True + trial_vector = np.where(crossover, mutant_vector, population[i]) + trial_vector = clip_bounds(trial_vector) + trial_fitness = func(trial_vector) + self.eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial_vector + fitness[i] = trial_fitness + return new_population, fitness + + def retain_elite(population, fitness, new_population, new_fitness): + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness) + elite_count = int(self.elitism_rate * self.population_size) + retained_indices = sorted_indices[: self.population_size - elite_count] + retained_population = combined_population[retained_indices] + retained_fitness = combined_fitness[retained_indices] + elite_indices = sorted_indices[:elite_count] + elite_population = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + return np.vstack((retained_population, elite_population)), np.hstack( + (retained_fitness, elite_fitness) + ) + + def dynamic_strategy_switching(): + """Switch strategy based on current performance.""" + if self.eval_count < self.budget * self.strategy_switches[0]: + return "explorative" + elif self.eval_count < self.budget * self.strategy_switches[1]: + return "balanced" + elif self.eval_count < self.budget * self.strategy_switches[2]: + return "exploitative" + else: + return "converging" + + def levy_flight_optimization(population): + for i in range(self.population_size): + if np.random.rand() < self.levy_prob: + population[i] = levy_flight_step(population[i]) + return population + + def hybridization(population, cov_matrix): + prob_hybrid = 0.1 # Reduced hybridization probability for stability + for i in range(self.population_size): + if np.random.rand() < prob_hybrid: + population[i] = population[i] + np.random.multivariate_normal( + np.zeros(self.dim), cov_matrix + ) + return clip_bounds(population) + + def local_refinement(population, fitness): + """Local refinement using Nelder-Mead or similar method.""" + for i in range(self.population_size): + if np.random.rand() < self.local_opt_prob: + result = minimize(func, population[i], method="nelder-mead", options={"maxiter": 50}) + if result.fun < fitness[i]: + population[i] = result.x + fitness[i] = result.fun + return population, fitness + + def adapt_parameters_based_on_performance(): + """Adapt parameters like CR, F dynamically based on performance metrics.""" + if np.std(fitness) < 1e-5: # Indicating convergence + self.CR = min(1, self.CR + 0.1) + self.F = min(1, self.F + 0.1) + else: + self.CR = max(0.1, self.CR - 0.1) + self.F = max(0.1, self.F - 0.1) + + population, fitness = initialize_population() + cov_matrix = np.identity(self.dim) + + best_index = np.argmin(fitness) + best_position = population[best_index] + best_value = fitness[best_index] + + mean = np.mean(population, axis=0) + + while self.eval_count < self.budget: + strategy = dynamic_strategy_switching() + adapt_sigma() + recombined, sorted_indices, selected_population = recombination(population, fitness) + cov_matrix = update_covariance_matrix(cov_matrix, selected_population, mean, recombined) + offspring = sample_offspring(recombined, cov_matrix) + + new_population, new_fitness = differential_evolution(offspring, fitness.copy()) + + population, fitness = retain_elite(population, fitness, new_population, new_fitness) + + if strategy == "explorative": + population = levy_flight_optimization(population) + + if strategy == "balanced": + population = hybridization(population, cov_matrix) + + if strategy == "converging": + population, fitness = local_refinement(population, fitness) + + best_index = np.argmin(fitness) + if fitness[best_index] < best_value: + best_value = fitness[best_index] + best_position = population[best_index] + + mean = np.mean(population, axis=0) + + adapt_parameters_based_on_performance() + + return best_value, best_position + + +# Example usage: +# func = SomeBlackBoxFunction() # The black box function to be optimized +# optimizer = RefinedQuantumCovarianceMatrixDifferentialEvolutionV4(budget=10000) +# best_value, best_position = optimizer(func) diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism.py new file mode 100644 index 000000000..ff3b74c8b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism.py @@ -0,0 +1,152 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 15 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + self.memorized_individuals = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def hybrid_search(self, x, func): + candidate_positions = [ + np.clip(x + np.random.randn(self.dim) * 0.1, self.bounds[0], self.bounds[1]) for _ in range(10) + ] + candidate_fitness = [func(pos) for pos in candidate_positions] + best_candidate = candidate_positions[np.argmin(candidate_fitness)] + return self.local_search(best_candidate, func) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + self.memorized_individuals = self.memory.copy() + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveLearning.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveLearning.py new file mode 100644 index 000000000..1bfb2ad53 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveLearning.py @@ -0,0 +1,166 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialEvolutionWithAdaptiveLearning: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 50 + self.elite_size = 10 + self.alpha = 0.7 + self.beta = 0.5 + self.local_search_prob = 0.3 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.restart_threshold = 50 + self.memory_update_interval = 20 + self.memory_size = 10 + self.memory = [] + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + res = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return (res.x, res.fun) if res.success else (x, func(x)) + + def quantum_update(self, x, elites): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = self.beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func, evaluations): + std_dev = np.std(fitness) + if std_dev < self.diversity_threshold: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + return population, fitness, evaluations + + def update_memory(self, memory, population, fitness): + combined = sorted(list(memory) + list(zip(population, fitness)), key=lambda x: x[1]) + return combined[: self.memory_size] + + def enhanced_elitist_learning(self, population, fitness): + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + step_size = self.alpha * np.random.randn(self.dim) + new_individual = best_individual + step_size + return np.clip(new_individual, self.bounds[0], self.bounds[1]) + + def multiple_strategy_search(self, x, func): + strategy = np.random.choice(["perturbation", "local_search", "random_restart"]) + if strategy == "perturbation": + perturbed = x + np.random.randn(self.dim) * 0.1 + perturbed = np.clip(perturbed, self.bounds[0], self.bounds[1]) + return (perturbed, func(perturbed)) + elif strategy == "local_search": + return self.local_search(x, func) + elif strategy == "random_restart": + random_restart = self.random_bounds() + return (random_restart, func(random_restart)) + + def enhanced_hybrid_search(self, population, fitness, func, evaluations): + if evaluations % self.memory_update_interval == 0: + for mem_ind in self.memory: + refined_mem, f_refined_mem = self.local_search(mem_ind[0], func) + if f_refined_mem < mem_ind[1]: + mem_ind = (refined_mem, f_refined_mem) + if f_refined_mem < self.f_opt: + self.f_opt = f_refined_mem + self.x_opt = refined_mem + evaluations += 1 + return evaluations + + def adaptive_learning(self, population, fitness, elites, func): + for i in range(len(population)): + trial = self.quantum_update(population[i], elites) + f_trial = func(trial) + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + return population, fitness + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + self.memory = [(population[i], fitness[i]) for i in range(self.memory_size)] + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + self.memory = self.update_memory(self.memory, population, fitness) + elite_particles = np.array([mem[0] for mem in self.memory]) + + population, fitness = self.adaptive_learning(population, fitness, elite_particles, func) + + if evaluations % self.restart_threshold == 0: + population, fitness, evaluations = self.adaptive_restart( + population, fitness, func, evaluations + ) + + if evaluations % self.memory_update_interval == 0: + self.memory = self.update_memory(self.memory, population, fitness) + + new_individual = self.enhanced_elitist_learning(population, fitness) + f_new_individual = func(new_individual) + evaluations += 1 + if f_new_individual < self.f_opt: + self.f_opt = f_new_individual + self.x_opt = new_individual + + evaluations = self.enhanced_hybrid_search(population, fitness, func, evaluations) + + if evaluations < self.budget: + for i in range(self.elite_size): + strategy_individual, f_strategy_individual = self.multiple_strategy_search( + population[i], func + ) + evaluations += 1 + if f_strategy_individual < self.f_opt: + self.f_opt = f_strategy_individual + self.x_opt = strategy_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py new file mode 100644 index 000000000..f997fda3e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch.py @@ -0,0 +1,183 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 100 + self.initial_num_elites = 5 + self.alpha = 0.5 + self.beta = 0.3 + self.local_search_prob = 0.6 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + self.diversity_threshold = 1e-3 + self.adaptive_restart_interval = 100 + self.memory_rate = 0.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_num_elites(self, diversity): + if diversity < self.diversity_threshold: + return max(2, int(self.population_size / 20)) + else: + return self.initial_num_elites + + def hybrid_local_search(self, x, func): + methods = ["L-BFGS-B", "TNC"] + f_best = np.inf + x_best = None + for method in methods: + result = minimize(func, x, method=method, bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + if result.fun < f_best: + f_best = result.fun + x_best = result.x + return x_best, f_best + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + memory = np.copy(population) + memory_fitness = np.copy(fitness) + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.hybrid_local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + diversity = np.std(fitness) + num_elites = self.adaptive_num_elites(diversity) + elite_particles = personal_bests[np.argsort(personal_best_fits)[:num_elites]] + + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations % self.adaptive_restart_interval == 0: + population, fitness = self.adaptive_restart(population, fitness, func) + + if evaluations % (self.population_size * 10) == 0: + if diversity < self.diversity_threshold: + for j in range(num_elites): + elite, elite_fit = self.hybrid_local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + # Memory update + for i in range(self.population_size): + if fitness[i] < memory_fitness[i]: + memory[i] = population[i] + memory_fitness[i] = fitness[i] + else: + trial = self.memory_rate * memory[i] + (1 - self.memory_rate) * population[i] + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism.py new file mode 100644 index 000000000..4185f323b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism.py @@ -0,0 +1,138 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.population_size = 80 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.epsilon = 1e-6 + self.CR = 0.9 + self.F = 0.8 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return np.clip(x + Q * v, self.bounds[0], self.bounds[1]) + + def adaptive_restart(self, population, fitness, global_best, global_best_fit, func): + std_dev = np.std(fitness) + if std_dev < self.epsilon: + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + return population, fitness, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + population = np.array([self.random_bounds() for _ in range(self.population_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + personal_bests = np.copy(population) + personal_best_fits = np.copy(fitness) + global_best = population[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + while evaluations < self.budget: + for i in range(self.population_size): + a, b, c = population[ + np.random.choice(np.delete(np.arange(self.population_size), i), 3, replace=False) + ] + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + refined_trial, f_refined_trial = self.local_search(trial, func) + evaluations += 1 + if f_refined_trial < fitness[i]: + population[i] = refined_trial + fitness[i] = f_refined_trial + if f_refined_trial < personal_best_fits[i]: + personal_bests[i] = refined_trial + personal_best_fits[i] = f_refined_trial + if f_refined_trial < global_best_fit: + global_best_fit = f_refined_trial + global_best = refined_trial + if f_refined_trial < self.f_opt: + self.f_opt = f_refined_trial + self.x_opt = refined_trial + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + for i in range(self.population_size): + trial = self.quantum_update(population[i], elite_particles, self.beta) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + population, fitness, global_best, global_best_fit = self.adaptive_restart( + population, fitness, global_best, global_best_fit, func + ) + + if evaluations % (self.population_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py new file mode 100644 index 000000000..d22497b19 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py @@ -0,0 +1,156 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialMemeticOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + self.c1 = 1.5 + self.c2 = 1.5 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elites, beta): + p_best = elites[np.random.randint(len(elites))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def enhanced_adaptive_restart( + self, particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ): + std_dev = np.std(personal_best_fits) + mean_fit = np.mean(personal_best_fits) + + if std_dev < 1e-3 or mean_fit < global_best_fit * 1.01: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + else: + global_best = global_best + global_best_fit = global_best_fit + + return particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + self.c1 * r1 * (personal_bests[i] - particles[i]) + + self.c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + w = np.random.uniform(0.3, 0.9) + self.c1 = np.random.uniform(1.0, 2.5) + self.c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( + self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func + ) + ) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + for j in range(self.num_elites): + elite, elite_fit = self.local_search(personal_bests[j], func) + evaluations += 1 + if elite_fit < personal_best_fits[j]: + personal_bests[j] = elite + personal_best_fits[j] = elite_fit + if elite_fit < global_best_fit: + global_best_fit = elite_fit + global_best = elite + if elite_fit < self.f_opt: + self.f_opt = elite_fit + self.x_opt = elite + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialParticleOptimizerWithElitism.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialParticleOptimizerWithElitism.py new file mode 100644 index 000000000..99ccddaa6 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialParticleOptimizerWithElitism.py @@ -0,0 +1,119 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedQuantumDifferentialParticleOptimizerWithElitism: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.swarm_size = 60 + self.num_elites = 5 + self.alpha = 0.6 + self.beta = 0.4 + self.local_search_prob = 0.4 + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + result = minimize(func, x, method="L-BFGS-B", bounds=[(self.bounds[0], self.bounds[1])] * self.dim) + return result.x, result.fun + + def quantum_update(self, x, elits, beta): + p_best = elits[np.random.randint(len(elits))] + u = np.random.uniform(0, 1, self.dim) + v = np.random.uniform(-1, 1, self.dim) + Q = beta * (p_best - x) * np.log(1 / u) + return x + Q * v + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations = self.swarm_size + + velocities = np.random.uniform(-1, 1, (self.swarm_size, self.dim)) + personal_bests = np.copy(particles) + personal_best_fits = np.copy(fitness) + global_best = particles[np.argmin(fitness)] + global_best_fit = np.min(fitness) + + w = 0.5 + c1 = 1.5 + c2 = 1.5 + + while evaluations < self.budget: + for i in range(self.swarm_size): + r1, r2 = np.random.rand(), np.random.rand() + velocities[i] = ( + w * velocities[i] + + c1 * r1 * (personal_bests[i] - particles[i]) + + c2 * r2 * (global_best - particles[i]) + ) + + trial_pso = particles[i] + velocities[i] + trial_pso = np.clip(trial_pso, self.bounds[0], self.bounds[1]) + + F = 0.8 + CR = 0.9 + indices = np.arange(self.swarm_size) + indices = np.delete(indices, i) + a, b, c = particles[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial_de = np.where(cross_points, mutant, particles[i]) + + if np.random.rand() < self.local_search_prob and evaluations < self.budget: + trial, f_trial = self.local_search(trial_de, func) + evaluations += 1 + else: + f_trial = func(trial_de) + evaluations += 1 + + elite_particles = personal_bests[np.argsort(personal_best_fits)[: self.num_elites]] + trial = self.quantum_update(trial_de, elite_particles, self.beta) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + particles[i] = trial + fitness[i] = f_trial + if f_trial < personal_best_fits[i]: + personal_bests[i] = trial + personal_best_fits[i] = f_trial + if f_trial < global_best_fit: + global_best_fit = f_trial + global_best = trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + if evaluations >= self.budget: + break + + w = np.random.uniform(0.3, 0.9) + c1 = np.random.uniform(1.0, 2.5) + c2 = np.random.uniform(1.0, 2.5) + + if evaluations % (self.swarm_size * 2) == 0: + improvement = (self.f_opt - global_best_fit) / self.f_opt if self.f_opt != 0 else 0 + if improvement < 0.01: + self.local_search_prob = min(1.0, self.local_search_prob + 0.1) + else: + self.local_search_prob = max(0.1, self.local_search_prob - 0.1) + + if evaluations % (self.swarm_size * 10) == 0: + diversity = np.std(fitness) + if diversity < 1e-3: + particles = np.array([self.random_bounds() for _ in range(self.swarm_size)]) + fitness = np.array([func(ind) for ind in particles]) + evaluations += self.swarm_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE.py b/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE.py new file mode 100644 index 000000000..e2da21840 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE.py @@ -0,0 +1,136 @@ +import numpy as np + + +class RefinedQuantumEnhancedAdaptiveMultiPhaseDE: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(5): # small fixed number of local steps + perturbation = np.random.normal(0, 0.1, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2.py b/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2.py new file mode 100644 index 000000000..be6803b3e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2.py @@ -0,0 +1,136 @@ +import numpy as np + + +class RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2: + def __init__(self, budget=10000, population_size=100, elite_size=10): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + + def local_search(self, elite_individual, func, bounds): + """Local search around elite individual for fine-tuning""" + best_local = elite_individual.copy() + best_fitness = func(elite_individual) + for _ in range(10): # increased number of local steps + perturbation = np.random.uniform(-0.05, 0.05, len(elite_individual)) + candidate = elite_individual + perturbation + candidate = np.clip(candidate, bounds[0], bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, bounds, F): + """Perform differential mutation""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), bounds[0], bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity, bounds): + """Apply quantum-inspired jolt to an individual""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, bounds[0], bounds[1]) + return jolted_individual + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + dim = 5 # given dimensionality + bounds = np.array([-5.0, 5.0]) + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (self.population_size, dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track best individual + best_idx = np.argmin(fitness) + self.x_opt = population[best_idx] + self.f_opt = fitness[best_idx] + + # Differential Evolution parameters + F = 0.8 + Cr = 0.9 + + # Adaptive parameter ranges + F_min, F_max = 0.5, 1.0 + Cr_min, Cr_max = 0.2, 0.9 + + while evaluations < self.budget: + new_population = np.zeros_like(population) + success_count = 0 + for i in range(self.population_size): + # Enhanced Differential Mutation + mutant = self.differential_mutation(population, bounds, F) + + # Quantum-inspired jolt to escape local optima + if np.random.rand() < 0.1: + jolt_intensity = 0.1 * (1 - success_count / self.population_size) + mutant = self.quantum_jolt(mutant, jolt_intensity, bounds) + + # Crossover strategy + cross_points = np.random.rand(dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, dim)] = True + + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + success_count += 1 + else: + new_population[i] = population[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + population = new_population + + # Adaptive Population Size + if success_count / self.population_size > 0.2: + self.population_size = min(self.population_size + 10, self.population_size * 2) + elif success_count / self.population_size < 0.1: + self.population_size = max(self.population_size - 10, self.population_size // 2) + + # Ensure the population size is within bounds + self.population_size = np.clip(self.population_size, 10, self.population_size * 2) + + # Resize population arrays if necessary + if self.population_size > population.shape[0]: + new_pop = np.random.uniform( + bounds[0], bounds[1], (self.population_size - population.shape[0], dim) + ) + population = np.vstack((population, new_pop)) + fitness = np.hstack((fitness, np.array([func(ind) for ind in new_pop]))) + elif self.population_size < population.shape[0]: + population = population[: self.population_size] + fitness = fitness[: self.population_size] + + # Perform local search on elite individuals + elite_indices = np.argsort(fitness)[: self.elite_size] + elite_population = population[elite_indices] + elite_fitness = fitness[elite_indices] + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func, bounds) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + # Self-Adaptive Control Parameters + success_rate = success_count / len(population) + if success_rate > 0.2: + F = min(F_max, F + 0.1 * (success_rate - 0.2)) + Cr = max(Cr_min, Cr - 0.1 * (0.2 - success_rate)) + else: + F = max(F_min, F - 0.1 * (0.2 - success_rate)) + Cr = min(Cr_max, Cr + 0.1 * (success_rate - 0.2)) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6.py b/nevergrad/optimization/lama/RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6.py new file mode 100644 index 000000000..c447d0160 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6.py @@ -0,0 +1,160 @@ +import numpy as np + + +class RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 25 # Increased population size for more diversity + w = 0.6 # Adjusted inertia weight for PSO for better balance + c1 = 0.9 # Cognitive coefficient for PSO + c2 = 1.2 # Social coefficient for PSO + initial_F = 0.7 # Differential weight for DE + initial_CR = 0.85 # Crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart threshold for dynamic restart + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best, alpha=0.3, beta=0.7): + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumEnhancedHybridDEPSO.py b/nevergrad/optimization/lama/RefinedQuantumEnhancedHybridDEPSO.py new file mode 100644 index 000000000..1b7e3a80b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEnhancedHybridDEPSO.py @@ -0,0 +1,162 @@ +import numpy as np + + +class RefinedQuantumEnhancedHybridDEPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 30 + w = 0.6 # Inertia weight for PSO + c1 = 0.7 # Cognitive coefficient for PSO + c2 = 0.9 # Social coefficient for PSO + initial_F = 0.8 # Initial differential weight for DE + initial_CR = 0.9 # Initial crossover probability for DE + restart_threshold = 0.1 * self.budget # Restart after 10% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + velocity = np.random.uniform(-1, 1, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, velocity, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + def mutation_strategy_1(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + return np.clip(a + F * (b - c), bounds[0], bounds[1]) + + def mutation_strategy_2(population, i, F): + indices = list(range(population_size)) + indices.remove(i) + a, b = population[np.random.choice(indices, 2, replace=False)] + global_best = population[np.argmin(fitness)] + return np.clip(a + F * (global_best - a) + F * (b - population[i]), bounds[0], bounds[1]) + + def select_mutation_strategy(): + return mutation_strategy_1 if np.random.rand() < 0.5 else mutation_strategy_2 + + def quantum_behavior(population, global_best): + alpha = 0.1 # Quantum-inspired parameter controlling the attraction to the global best + beta = 0.9 # Quantum-inspired diffusion parameter + for i in range(population_size): + direction = global_best - population[i] + step_size = alpha * np.random.normal(size=self.dim) + diffusion = beta * np.random.normal(size=self.dim) + population[i] = np.clip( + population[i] + direction * step_size + diffusion, bounds[0], bounds[1] + ) + return population + + population, velocity, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + + global_best = population[np.argmin(fitness)] + global_best_fitness = np.min(fitness) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + mutation_strategy = select_mutation_strategy() + mutant = mutation_strategy(population, i, F_values[i]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + # PSO update with quantum behavior + r1, r2 = np.random.rand(2) + velocity = ( + w * velocity + c1 * r1 * (personal_best - population) + c2 * r2 * (global_best - population) + ) + population = np.clip(population + velocity, bounds[0], bounds[1]) + population = quantum_behavior(population, global_best) + + # Update personal bests + for i in range(population_size): + if new_fitness[i] < personal_best_fitness[i]: + personal_best[i] = new_population[i] + personal_best_fitness[i] = new_fitness[i] + + # Update global best + if np.min(new_fitness) < global_best_fitness: + global_best = new_population[np.argmin(new_fitness)] + global_best_fitness = np.min(new_fitness) + last_improvement = evaluations + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Dynamic restart based on fitness stagnation + if evaluations - last_improvement > restart_threshold: + population, velocity, fitness = initialize_population() + F_values = np.full(population_size, initial_F) + CR_values = np.full(population_size, initial_CR) + last_improvement = evaluations + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptation.py b/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptation.py new file mode 100644 index 000000000..a19e71db1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptation.py @@ -0,0 +1,59 @@ +import numpy as np + + +class RefinedQuantumEvolutionaryAdaptation: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.pop_size = 250 # Increased population size for better exploration + self.sigma_initial = 0.3 # Reduced initial standard deviation for more precise mutations + self.learning_rate = 0.05 # Lower learning rate for gradual adaptive changes + self.CR = 0.9 # Increased crossover probability for stronger gene mixing + self.q_impact_initial = 0.1 # Increased initial quantum impact for robust global search + self.q_impact_decay = 0.99 # Slower decay rate for quantum impact to sustain influence + self.sigma_decay = 0.99 # Slower decay rate for sigma to maintain a valuable exploration range longer + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + q_impact = self.q_impact_initial + + # Evolution loop + for iteration in range(int(self.budget / self.pop_size)): + # Adapt sigma and quantum impact with refined rates + sigma *= self.sigma_decay + q_impact *= self.q_impact_decay + + # Generate new trial vectors + for i in range(self.pop_size): + # Mutation using differential evolution strategy with enhanced quantum impact + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + quantum_term = q_impact * np.random.standard_cauchy(self.dim) + mutant = best_ind + sigma * (a - b) + quantum_term + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover using an adaptively modified rate + CRi = self.CR + self.learning_rate * (np.random.randn()) + cross_points = np.random.rand(self.dim) < CRi + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Fitness evaluation and selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptiveOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptiveOptimizer.py new file mode 100644 index 000000000..4d2022ef4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumEvolutionaryAdaptiveOptimizer.py @@ -0,0 +1,80 @@ +import numpy as np + + +class RefinedQuantumEvolutionaryAdaptiveOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initial setup + current_budget = 0 + population_size = 50 # Increased population for better exploration + mutation_factor = 0.8 # Less aggressive mutation for stability + crossover_prob = 0.7 # Lower crossover probability for maintaining diversity + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Modifying quantum behavior for refined control + quantum_beta = 0.6 # Reduced quantum randomness + quantum_alpha = 0.01 # Smaller quantum step size + + quantum_population = quantum_beta * np.random.randn(population_size, self.dim) + population + quantum_population = np.clip(quantum_population, self.lower_bound, self.upper_bound) + + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + indices = np.delete(np.arange(population_size), i) + random_indices = np.random.choice(indices, 3, replace=False) + x1, x2, x3 = population[random_indices] + q1, q2, q3 = quantum_population[random_indices] + + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + quantum_mutant = q1 + quantum_alpha * (q2 - q3) + quantum_mutant = np.clip(quantum_mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < crossover_prob, mutant, population[i]) + quantum_trial = np.where( + np.random.rand(self.dim) < crossover_prob, quantum_mutant, quantum_population[i] + ) + + trial_fitness = func(trial) + quantum_trial_fitness = func(quantum_trial) + current_budget += 2 # Two function evaluations per iteration + + if quantum_trial_fitness < trial_fitness: + trial_fitness = quantum_trial_fitness + trial = quantum_trial + + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + + # Dynamic adaptation of mutation factor and crossover probability + mutation_factor *= 0.985 # Gradual decrease to fine-tune exploration + crossover_prob *= 1.015 # Incremental increase to enhance exploring crossover possibilities + quantum_alpha *= 0.97 # Reduce quantum steps gradually, focusing on exploitation + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedQuantumFluxDifferentialSwarm.py b/nevergrad/optimization/lama/RefinedQuantumFluxDifferentialSwarm.py new file mode 100644 index 000000000..96edc3226 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumFluxDifferentialSwarm.py @@ -0,0 +1,60 @@ +import numpy as np + + +class RefinedQuantumFluxDifferentialSwarm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 1200 # Adjusted population size for more exploration + self.F_base = 0.8 # Adjusted base factor for mutation for stronger mutations + self.CR_base = 0.85 # Adjusted base crossover probability + self.quantum_probability = 0.25 # Increased quantum-driven mutation probability + self.vortex_factor = 0.25 # Modified vortex factor for dynamic strategy modulation + self.epsilon = 1e-6 # Stability constant for quantum mutation + + def __call__(self, func): + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main optimization loop + for i in range(int(self.budget / self.pop_size)): + # Update dynamic parameters + iteration_ratio = i / (self.budget / self.pop_size) + F = self.F_base + self.vortex_factor * np.sin(2 * np.pi * iteration_ratio) + CR = self.CR_base - self.vortex_factor * np.cos(2 * np.pi * iteration_ratio) + + for j in range(self.pop_size): + # Quantum-inspired mutation with variable probability + if np.random.rand() < self.quantum_probability: + mean_quantum_state = best_ind + (pop[j] - best_ind) / 2 + scale = (np.abs(best_ind - pop[j]) + self.epsilon) / 2 + quantum_mutation = np.random.normal(mean_quantum_state, scale) + quantum_mutation = np.clip(quantum_mutation, -5.0, 5.0) + mutant = quantum_mutation + else: + # DE mutation: DE/rand/1/bin with best influence + idxs = [idx for idx in range(self.pop_size) if idx != j] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = a + F * (b - c) + F * (best_ind - pop[j]) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[j]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[j]: + pop[j] = trial + fitness[j] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedQuantumGradientAdaptiveExplorationOptimization.py b/nevergrad/optimization/lama/RefinedQuantumGradientAdaptiveExplorationOptimization.py new file mode 100644 index 000000000..9b302041a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumGradientAdaptiveExplorationOptimization.py @@ -0,0 +1,222 @@ +import numpy as np + + +class RefinedQuantumGradientAdaptiveExplorationOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Initialize parameters + self.f_opt = np.inf + self.x_opt = None + + # Initialize swarm + swarm_size = 20 # Increased swarm size to improve exploration + positions = np.random.uniform(self.lower_bound, self.upper_bound, (swarm_size, self.dim)) + velocities = np.zeros_like(positions) + personal_bests = positions.copy() + personal_best_scores = np.array([np.inf] * swarm_size) + + # Global best + global_best_position = None + global_best_score = np.inf + + # PSO constants + c1 = 2.0 # Cognitive constant + c2 = 2.0 # Social constant + w = 0.5 # Inertia weight + + # Learning rate adaptation parameters + alpha = 0.1 # Initial learning rate + beta = 0.9 # Momentum term + epsilon = 1e-8 # Small term to avoid division by zero + + # Differential Evolution parameters + F_min = 0.4 # Min differential weight + F_max = 0.9 # Max differential weight + CR = 0.9 # Crossover probability + + # Diversity enforcement parameters + diversity_threshold = 0.1 + stagnation_counter = 0 + max_stagnation = 20 # Max stagnation to trigger diversity enforcement + + # Exploration improvement parameters + exploration_factor = 0.2 # Exploration factor to enhance exploration phase + + # Quantum-inspired rotation matrix + theta = np.pi / 4 # Rotation angle + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + # Mutation factor for mutation-based exploration + mutation_factor = 0.2 + + # Adaptive threshold for learning rate tuning + improvement_threshold = 0.005 + + # Historical memory with annealing + historical_bests = [] + annealing_factor = 0.99 # Annealing factor for historical memory + + prev_f = np.inf + + for i in range(self.budget): + for idx in range(swarm_size): + x = positions[idx] + v = velocities[idx] + + # Evaluate the function at the current point + f = func(x) + if f < personal_best_scores[idx]: + personal_best_scores[idx] = f + personal_bests[idx] = x.copy() + + if f < global_best_score: + global_best_score = f + global_best_position = x.copy() + + if f < self.f_opt: + self.f_opt = f + self.x_opt = x.copy() + + # Update velocity and position using PSO + r1, r2 = np.random.random(self.dim), np.random.random(self.dim) + cognitive_component = c1 * r1 * (personal_bests[idx] - x) + social_component = c2 * r2 * (global_best_position - x) + velocities[idx] = w * v + cognitive_component + social_component + positions[idx] = x + velocities[idx] + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Gradient-based update + grad = np.zeros_like(x) + perturbation = 1e-5 + for j in range(self.dim): + x_perturb = x.copy() + x_perturb[j] += perturbation + grad[j] = (func(x_perturb) - f) / perturbation + + # Update the velocity and position using gradient + velocity = beta * v - alpha * grad + positions[idx] = x + velocity + positions[idx] = np.clip(positions[idx], self.lower_bound, self.upper_bound) + + # Apply Differential Evolution mutation and crossover + if np.random.rand() < CR: + indices = list(range(swarm_size)) + indices.remove(idx) + a, b, c = np.random.choice(indices, 3, replace=False) + F = F_min + (F_max - F_min) * np.random.rand() + mutant = personal_bests[a] + F * (personal_bests[b] - personal_bests[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + trial = np.where(np.random.rand(self.dim) < CR, mutant, x) + trial_f = func(trial) + + if trial_f < f: + positions[idx] = trial + f = trial_f + + if trial_f < personal_best_scores[idx]: + personal_best_scores[idx] = trial_f + personal_bests[idx] = trial.copy() + + if trial_f < global_best_score: + global_best_score = trial_f + global_best_position = trial.copy() + + if trial_f < self.f_opt: + self.f_opt = trial_f + self.x_opt = trial.copy() + + # Adapt the learning rate based on the improvement + if i > 0 and (prev_f - f) / abs(prev_f) > improvement_threshold: + alpha *= 1.1 # Increase learning rate if improvement is significant + else: + alpha *= 0.9 # Decrease learning rate if improvement is not significant + + prev_f = f + + # Check for stagnation and enforce diversity if needed + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + if np.linalg.norm(positions[idx] - global_best_position) < diversity_threshold: + positions[idx] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + stagnation_counter = 0 + + if i > 0 and prev_f == self.f_opt: + stagnation_counter += 1 + else: + stagnation_counter = 0 + + # Dynamic exploration phase + if stagnation_counter >= max_stagnation: + for idx in range(swarm_size): + new_position = global_best_position + exploration_factor * np.random.uniform( + -1, 1, self.dim + ) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + stagnation_counter = 0 + + # Quantum-inspired exploration using rotation matrix + if i % 100 == 0 and i > 0: + for idx in range(swarm_size): + new_position = np.dot(rotation_matrix, positions[idx]) + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Mutation-based exploration + if i % 200 == 0 and i > 0: + for idx in range(swarm_size): + mutation = mutation_factor * np.random.uniform(-1, 1, self.dim) + new_position = positions[idx] + mutation + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_f = func(new_position) + if new_f < personal_best_scores[idx]: + personal_best_scores[idx] = new_f + personal_bests[idx] = new_position + positions[idx] = new_position + if new_f < global_best_score: + global_best_score = new_f + global_best_position = new_position + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_position + + # Anneal historical best positions + if i % 50 == 0: + historical_bests = [pos * annealing_factor for pos in historical_bests] + historical_bests.append(global_best_position) + + prev_f = self.f_opt + + return self.f_opt, self.x_opt + + +# Usage example: +# optimizer = RefinedQuantumGradientAdaptiveExplorationOptimization(budget=10000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RefinedQuantumGradientSearch.py b/nevergrad/optimization/lama/RefinedQuantumGradientSearch.py new file mode 100644 index 000000000..2386501ac --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumGradientSearch.py @@ -0,0 +1,88 @@ +import numpy as np + + +class RefinedQuantumGradientSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 300 # Further increased population size for more exploration + elite_size = 30 # Slightly increased elite size for better exploitation + evaluations = 0 + mutation_factor = 0.75 # Adjusted mutation factor + crossover_probability = 0.95 # Very high crossover probability to better mix genetic information + quantum_probability = 0.15 # Increased initial quantum probability for aggressive exploration + learning_rate = 0.008 # Slightly decreased learning rate for more stable gradient descent + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + if abs(previous_best - self.f_opt) < 1e-7: # More sensitive threshold for convergence detection + mutation_factor *= 0.9 # More controlled mutation factor decrement + learning_rate *= 0.9 # Reduce learning rate to stabilize near minima + else: + mutation_factor *= 1.1 # Increment mutation factor to explore aggressively + learning_rate *= 1.1 # Increase learning rate for faster convergence on hills + previous_best = self.f_opt + + for _ in range(int(quantum_probability * population_size)): + quantum_individual = np.random.uniform(self.lb, self.ub, self.dim) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + elite_indices = np.argsort(fitness)[:elite_size] + for idx in elite_indices: + gradient = np.random.normal(0, 1, self.dim) # Simulated gradient representation + population[idx] += learning_rate * gradient + population[idx] = np.clip(population[idx], self.lb, self.ub) + new_fitness = func(population[idx]) + evaluations += 1 + + if new_fitness < fitness[idx]: + fitness[idx] = new_fitness + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = population[idx] + + new_population = [] + for i in range(population_size): + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial) + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + else: + new_population.append(population[i]) + + population = np.array(new_population) + quantum_probability = min(0.25, quantum_probability * 1.1) # Gently increase quantum probability + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV6.py b/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV6.py new file mode 100644 index 000000000..e051ba99f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV6.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedQuantumGuidedHybridSearchV6: + def __init__( + self, + budget, + dimension=5, + population_size=150, + elite_ratio=0.2, + mutation_scale=0.5, + mutation_decay=0.005, + crossover_prob=0.8, + quantum_factor=0.9, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.ceil(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_factor = quantum_factor + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Generate a new population + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + # Crossover from elite individuals + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + # Directly copy an elite with a probability + child = population[np.random.choice(elite_indices)] + + # Apply quantum tuning on a probability + if np.random.random() < self.quantum_factor: + child = self.quantum_tuning(child, best_individual) + + # Mutation with decreasing scale + mutation_scale = self.mutation_scale * np.exp( + -self.mutation_decay * evaluations / self.budget + ) + child += np.random.normal(0, mutation_scale, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Select the best from the new population + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + # Combine and sort populations based on fitness + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_tuning(self, individual, best_individual): + perturbation = np.random.normal(-0.1, 0.1, self.dimension) + return individual + perturbation * (best_individual - individual) diff --git a/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV8.py b/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV8.py new file mode 100644 index 000000000..1115e55b0 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumGuidedHybridSearchV8.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedQuantumGuidedHybridSearchV8: + def __init__( + self, + budget, + dimension=5, + population_size=250, + elite_ratio=0.1, + mutation_scale=0.5, + mutation_decay=0.005, + crossover_prob=0.9, + quantum_enhancement=True, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(np.floor(population_size * elite_ratio)) + self.mutation_scale = mutation_scale + self.mutation_decay = mutation_decay + self.crossover_prob = crossover_prob + self.quantum_enhancement = quantum_enhancement + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + + # Track the best individual + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + # Generate a new population + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elites = population[elite_indices] + + for i in range(self.population_size): + if np.random.random() < self.crossover_prob: + # Perform Crossover from elite individuals + parent1 = population[np.random.choice(elite_indices)] + parent2 = population[np.random.choice(elite_indices)] + child = self.crossover(parent1, parent2) + else: + # Directly copy an elite + child = population[np.random.choice(elite_indices)] + + # Apply quantum enhancement dynamically + if ( + self.quantum_enhancement and np.random.random() < 0.2 + ): # Lowered probability for quantum enhancement + child = self.quantum_tuning(child, best_individual) + + # Mutation with dynamic scale adjustment + mutation_scale = self.mutation_scale * np.exp(-self.mutation_decay * evaluations) + child += np.random.normal(0, mutation_scale, self.dimension) + child = np.clip(child, -5, 5) + + new_population[i] = child + + # Evaluate new population + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.population_size + + # Select the best from the new population + best_new_idx = np.argmin(new_fitness) + if new_fitness[best_new_idx] < best_fitness: + best_fitness = new_fitness[best_new_idx] + best_individual = new_population[best_new_idx] + + # Combine and sort populations based on fitness + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + sorted_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[sorted_indices] + fitness = combined_fitness[sorted_indices] + + return best_fitness, best_individual + + def crossover(self, parent1, parent2): + alpha = np.random.rand(self.dimension) + return alpha * parent1 + (1 - alpha) * parent2 + + def quantum_tuning(self, individual, best_individual): + perturbation = np.random.uniform(-1, 1, self.dimension) * 0.05 # Added uniform perturbation + return individual + perturbation * (best_individual - individual) diff --git a/nevergrad/optimization/lama/RefinedQuantumHybridAdaptiveStrategyV3.py b/nevergrad/optimization/lama/RefinedQuantumHybridAdaptiveStrategyV3.py new file mode 100644 index 000000000..e13f6c10c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumHybridAdaptiveStrategyV3.py @@ -0,0 +1,67 @@ +import numpy as np + + +class RefinedQuantumHybridAdaptiveStrategyV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 250 # Increased Population size for broader search + self.sigma_initial = 0.05 # Further reduced mutation spread + self.elitism_factor = 5 # Reduced elite size to increase diversity + self.sigma_decay = 0.99 # Reduced decay for slower convergence + self.CR_base = 0.9 # Increased crossover probability for more exploration + self.CR_decay = 0.995 # Slower decay rate for crossover probability + self.q_impact = 0.8 # Increased base quantum impact + self.q_impact_decay = 0.99 # Added decay to quantum impact to stabilize late convergence + self.adaptation_rate = 0.1 # Increased adaptation rate for dynamic response + + def __call__(self, func): + # Initialize population + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(x) for x in pop]) + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + sigma = self.sigma_initial + CR = self.CR_base + elite_size = int(self.elitism_factor * self.pop_size / 100) + q_impact = self.q_impact + + # Evolutionary loop + for _ in range(self.budget // self.pop_size): + for i in range(self.pop_size): + if i < elite_size: # Elite members are carried forward + continue + + # Mutation using a DE-like strategy with enhanced quantum effects + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = best_ind + sigma * (a - b + c) + q_impact * np.random.standard_cauchy(self.dim) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + # Adaptive updates to parameters + sigma *= self.sigma_decay + CR *= self.CR_decay + q_impact *= self.q_impact_decay # Decaying quantum impact based on adaptation rate + if np.random.rand() < 0.5: + q_impact += self.adaptation_rate * q_impact + else: + q_impact -= self.adaptation_rate * q_impact + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedQuantumHybridDynamicAdaptiveDE.py b/nevergrad/optimization/lama/RefinedQuantumHybridDynamicAdaptiveDE.py new file mode 100644 index 000000000..bacab4108 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumHybridDynamicAdaptiveDE.py @@ -0,0 +1,184 @@ +import numpy as np + + +class RefinedQuantumHybridDynamicAdaptiveDE: + def __init__( + self, + budget=10000, + population_size=150, + elite_size=20, + local_search_steps=30, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.03, + perturbation_decay=0.99, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedQuantumHybridEliteAdaptiveDE.py b/nevergrad/optimization/lama/RefinedQuantumHybridEliteAdaptiveDE.py new file mode 100644 index 000000000..0b7af6eb4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumHybridEliteAdaptiveDE.py @@ -0,0 +1,192 @@ +import numpy as np + + +class RefinedQuantumHybridEliteAdaptiveDE: + def __init__( + self, + budget=10000, + population_size=120, + elite_size=15, + local_search_steps=30, + F_min=0.5, + F_max=1.0, + Cr_min=0.2, + Cr_max=0.9, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.strategy_proportions = [0.5, 0.5] # Starting with equal proportions for both strategies + + def local_search(self, individual, func): + """Local search around an elite individual for fine-tuning using gradient estimation.""" + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + step_size = 0.01 * np.random.randn() # adaptive step size with Gaussian perturbation + perturbation = np.random.uniform(-step_size, step_size, len(individual)) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.8, 0.9 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies, p=self.strategy_proportions) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.1 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + # Adjust strategy proportions based on success rates + if selected_strategy == self.differential_mutation: + self.strategy_proportions[0] = min( + 1.0, self.strategy_proportions[0] + 0.05 * success_rate + ) + else: + self.strategy_proportions[1] = min( + 1.0, self.strategy_proportions[1] + 0.05 * success_rate + ) + total = sum(self.strategy_proportions) + self.strategy_proportions = [x / total for x in self.strategy_proportions] + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedQuantumInfluenceLocalSearchOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumInfluenceLocalSearchOptimizer.py new file mode 100644 index 000000000..cee7056dc --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInfluenceLocalSearchOptimizer.py @@ -0,0 +1,86 @@ +import numpy as np + + +class RefinedQuantumInfluenceLocalSearchOptimizer: + def __init__( + self, + budget, + dim=5, + population_size=50, + elite_size=5, + mutation_intensity=0.1, + local_search_intensity=0.01, + ): + self.budget = budget + self.dim = dim + self.population_size = population_size + self.elite_size = elite_size + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.mutation_intensity = mutation_intensity + self.local_search_intensity = local_search_intensity + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(x) for x in population]) + + def select_elites(self, population, fitnesses): + elite_indices = np.argsort(fitnesses)[: self.elite_size] + return population[elite_indices], fitnesses[elite_indices] + + def crossover(self, parent1, parent2): + mask = np.random.rand(self.dim) < 0.5 + offspring = np.where(mask, parent1, parent2) + return offspring + + def mutate(self, individual): + mutation = np.random.normal(0, self.mutation_intensity, self.dim) + mutated = individual + mutation + return np.clip(mutated, self.lower_bound, self.upper_bound) + + def local_search(self, func, candidate): + for _ in range(10): # perform 10 local search steps + perturbation = np.random.uniform( + -self.local_search_intensity, self.local_search_intensity, self.dim + ) + new_candidate = candidate + perturbation + new_candidate = np.clip(new_candidate, self.lower_bound, self.upper_bound) + if func(new_candidate) < func(candidate): + candidate = new_candidate + return candidate + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + elites, elite_fitness = self.select_elites(population, fitness) + + new_population = elites.copy() # start new population with elites + for _ in range(self.population_size - self.elite_size): + parents = np.random.choice(elites.shape[0], 2, replace=False) + parent1, parent2 = elites[parents[0]], elites[parents[1]] + offspring = self.crossover(parent1, parent2) + offspring = self.mutate(offspring) + offspring = self.local_search(func, offspring) # Perform local search on offspring + new_population = np.vstack((new_population, offspring)) + + new_fitness = self.evaluate_population(func, new_population) + + if np.min(new_fitness) < best_fitness: + best_idx = np.argmin(new_fitness) + best_individual = new_population[best_idx] + best_fitness = new_fitness[best_idx] + + population = new_population + fitness = new_fitness + + evaluations += self.population_size + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedQuantumInformedAdaptiveInertiaOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumInformedAdaptiveInertiaOptimizer.py new file mode 100644 index 000000000..3d06f6abb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInformedAdaptiveInertiaOptimizer.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedQuantumInformedAdaptiveInertiaOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 50 # Increased population size for better exploration + inertia_weight = 0.9 # Initial inertia + cognitive_coefficient = 2.0 # Increased personal learning effect + social_coefficient = 2.0 # Increased social influence + quantum_probability = 0.2 # Increased probability of quantum jumps + max_velocity = 1.0 # Added max velocity cap to stabilize updates + + # Initialize population and velocities + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Main optimization loop + while current_budget < self.budget: + w = inertia_weight * ( + 1 - (current_budget / self.budget) ** 1.5 + ) # More aggressive adaptive inertia + + for i in range(population_size): + if current_budget >= self.budget: + break + + if np.random.rand() < quantum_probability: + # Quantum jump strategy + population[i] = np.random.uniform(self.lower_bound, self.upper_bound, self.dim) + else: + # Standard PSO update strategy with velocity clamping + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = np.clip( + inertia + cognitive_component + social_component, -max_velocity, max_velocity + ) + + population[i] = np.clip(population[i] + velocity[i], self.lower_bound, self.upper_bound) + + # Function evaluation + fitness = func(population[i]) + current_budget += 1 + + # Personal best update + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Global best update + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/RefinedQuantumInformedAdaptivePSO.py b/nevergrad/optimization/lama/RefinedQuantumInformedAdaptivePSO.py new file mode 100644 index 000000000..d19b08b61 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInformedAdaptivePSO.py @@ -0,0 +1,71 @@ +import numpy as np + + +class RefinedQuantumInformedAdaptivePSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 150 # Slightly increased population for better exploration + inertia_weight = 0.95 # Slightly higher initial inertia for exploration + cognitive_coefficient = 1.5 # Reduced to prevent excessive local search + social_coefficient = 1.5 # Reduced to prevent excessive global search + final_inertia_weight = 0.3 # Reduced final inertia for fine-tuning exploitation + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + velocity = np.zeros((population_size, self.dim)) + personal_best_position = np.copy(population) + personal_best_fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + global_best_position = personal_best_position[np.argmin(personal_best_fitness)] + global_best_fitness = np.min(personal_best_fitness) + + # Optimization loop + while current_budget < self.budget: + w = inertia_weight - (inertia_weight - final_inertia_weight) * (current_budget / self.budget) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Quantum-inspired stochastic component adjusted with a dampening factor + quantum_factor = np.random.normal( + 0, 0.05, self.dim + ) # Reduced variance for more controlled exploration + + # Update velocity + inertia = w * velocity[i] + cognitive_component = ( + cognitive_coefficient + * np.random.rand(self.dim) + * (personal_best_position[i] - population[i]) + ) + social_component = ( + social_coefficient * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + velocity[i] = inertia + cognitive_component + social_component + quantum_factor + + # Update position + population[i] += velocity[i] + population[i] = np.clip(population[i], self.lower_bound, self.upper_bound) + + # Evaluate new position + fitness = func(population[i]) + current_budget += 1 + + # Update personal best + if fitness < personal_best_fitness[i]: + personal_best_position[i] = population[i] + personal_best_fitness[i] = fitness + + # Update global best + if fitness < global_best_fitness: + global_best_position = population[i] + global_best_fitness = fitness + + return global_best_fitness, global_best_position diff --git a/nevergrad/optimization/lama/RefinedQuantumInformedDifferentialStrategyV2.py b/nevergrad/optimization/lama/RefinedQuantumInformedDifferentialStrategyV2.py new file mode 100644 index 000000000..7cdee3035 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInformedDifferentialStrategyV2.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedQuantumInformedDifferentialStrategyV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = np.full(self.dim, -5.0) + self.ub = np.full(self.dim, 5.0) + + def __call__(self, func): + population_size = 100 + elite_size = 10 + evaluations = 0 + mutation_scale = 0.8 # Increased initial mutation scale + recombination_prob = 0.95 # Higher recombination probability + quantum_factor = 0.05 # Initial quantum factor + convergence_threshold = 1e-5 # Threshold to enhance convergence monitoring + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = np.inf + + while evaluations < self.budget: + # Check for convergence improvement + if abs(previous_best - self.f_opt) < convergence_threshold: + mutation_scale *= 0.9 # Reduce the mutation scale to fine-tune the search + previous_best = self.f_opt + + # Quantum-inspired solution space exploration + num_quantum_individuals = int(population_size * quantum_factor) + quantum_population = np.random.uniform(self.lb, self.ub, (num_quantum_individuals, self.dim)) + quantum_fitness = np.array([func(ind) for ind in quantum_population]) + evaluations += num_quantum_individuals + + combined_population = np.vstack((population, quantum_population)) + combined_fitness = np.hstack((fitness, quantum_fitness)) + + # Select the top-performing individuals as elite + elite_indices = np.argsort(combined_fitness)[:elite_size] + elite_individuals = combined_population[elite_indices] + elite_fitness = combined_fitness[elite_indices] + + # Differential evolution mutation and recombination + new_population = [] + for _ in range(population_size - elite_size): + indices = np.random.choice(elite_size, 3, replace=False) + x1, x2, x3 = elite_individuals[indices] + mutant = x1 + mutation_scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + child = np.where(np.random.rand(self.dim) < recombination_prob, mutant, x1) + + child_fitness = func(child) + evaluations += 1 + + if child_fitness < self.f_opt: + self.f_opt = child_fitness + self.x_opt = child + + new_population.append(child) + + # Update population and fitness + population = np.vstack((elite_individuals, new_population)) + fitness = np.array([func(ind) for ind in population]) + evaluations += len(new_population) + + # Dynamically adapt quantum factor based on convergence + if evaluations % 1000 == 0: + quantum_factor = min(0.5, quantum_factor + 0.05) # Gradually increase the quantum factor + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumInformedGradientOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumInformedGradientOptimizer.py new file mode 100644 index 000000000..de83896f3 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInformedGradientOptimizer.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedQuantumInformedGradientOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + current_budget = 0 + population_size = 150 # A smaller population for more focused search + mutation_factor = 0.5 # Reduced mutation for more controlled exploration + crossover_prob = 0.5 # Reduced crossover probability for more stable descent + learning_rate = 0.01 # Initial learning rate for gradient-based steps + + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + # Main optimization loop + while current_budget < self.budget: + new_population = np.empty_like(population) + gradients = np.zeros_like(population) + + # Estimating gradients using central difference method + for i in range(population_size): + if current_budget >= self.budget: + break + + base_ind = population[i] + for d in range(self.dim): + perturbed_ind_plus = np.array(base_ind) + perturbed_ind_minus = np.array(base_ind) + perturbed_ind_plus[d] += learning_rate + perturbed_ind_minus[d] -= learning_rate + + if current_budget + 2 <= self.budget: + fitness_plus = func(perturbed_ind_plus) + fitness_minus = func(perturbed_ind_minus) + current_budget += 2 + gradient = (fitness_plus - fitness_minus) / (2 * learning_rate) + gradients[i, d] = gradient + + # Apply learned gradients, mutation, and crossover to form new population + for i in range(population_size): + if current_budget >= self.budget: + break + + child = population[i] - learning_rate * gradients[i] # Applying gradient + child += np.random.randn(self.dim) * mutation_factor # Applying mutation + + # Crossover operation + if np.random.rand() < crossover_prob: + partner_idx = np.random.randint(population_size) + crossover_mask = np.random.rand(self.dim) < 0.5 + child = child * crossover_mask + population[partner_idx] * (1 - crossover_mask) + + child = np.clip(child, self.lower_bound, self.upper_bound) + child_fitness = func(child) + current_budget += 1 + + if child_fitness < fitness[i]: + new_population[i] = child + fitness[i] = child_fitness + else: + new_population[i] = population[i] + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_solution = child + + population = new_population + + # Adaptive adjustments to learning rate and mutation factor + mutation_factor *= 0.95 # Gradual decay + learning_rate *= 0.99 # Decreasing learning rate to refine convergence + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedQuantumInformedPSO.py b/nevergrad/optimization/lama/RefinedQuantumInformedPSO.py new file mode 100644 index 000000000..19c4dd24e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInformedPSO.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedQuantumInformedPSO: + def __init__( + self, + budget=10000, + population_size=250, + initial_inertia=0.95, + final_inertia=0.3, + cognitive_weight=2.2, + social_weight=2.0, + quantum_prob=0.15, + quantum_radius=0.15, + gradient_factor=0.02, + ): + self.budget = budget + self.population_size = population_size + self.initial_inertia = initial_inertia + self.final_inertia = final_inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.quantum_prob = quantum_prob + self.quantum_radius = quantum_radius + self.gradient_factor = gradient_factor + self.dim = 5 + self.lb, self.ub = -5.0, 5.0 + self.inertia_reduction = (self.initial_inertia - self.final_inertia) / budget + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + inertia = max(self.initial_inertia - evaluations * self.inertia_reduction, self.final_inertia) + + for i in range(self.population_size): + r1, r2 = np.random.rand(2) + + cognitive_component = self.cognitive_weight * r1 * (personal_bests[i] - particles[i]) + social_component = self.social_weight * r2 * (global_best - particles[i]) + gradient_component = ( + self.gradient_factor + * (global_best - particles[i]) + / (np.linalg.norm(global_best - particles[i]) + 1e-10) + ) + + velocities[i] = ( + inertia * velocities[i] + cognitive_component + social_component - gradient_component + ) + + if np.random.rand() < self.quantum_prob: + quantum_jump = np.random.normal(0, self.quantum_radius, self.dim) + particles[i] = global_best + quantum_jump + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumInfusedAdaptiveStrategyV2.py b/nevergrad/optimization/lama/RefinedQuantumInfusedAdaptiveStrategyV2.py new file mode 100644 index 000000000..7be05f065 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumInfusedAdaptiveStrategyV2.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedQuantumInfusedAdaptiveStrategyV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 * np.ones(self.dim) + self.ub = 5.0 * np.ones(self.dim) + + def __call__(self, func): + population_size = 150 + elite_size = 15 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.8 + quantum_probability = 0.1 + adaptive_rate = 0.05 + learning_period = 50 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + previous_best = self.f_opt + + while evaluations < self.budget: + # Quantum mutation incorporated with gradient information + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + quantum_individual = population[i] + np.random.normal(0, 1, self.dim) + quantum_individual = np.clip(quantum_individual, self.lb, self.ub) + quantum_fitness = func(quantum_individual) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_individual + fitness[i] = quantum_fitness + + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_individual + + # Evolve population with mutation and crossover + indices = np.random.permutation(population_size) + for i in indices: + if evaluations >= self.budget: + break + idxs = [idx for idx in indices if idx != i][:3] + a, b, c = population[idxs] + mutant = np.clip(a + mutation_factor * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < crossover_probability + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + # Adjust strategy parameters based on recent performance improvements + if evaluations % learning_period == 0: + if np.abs(previous_best - self.f_opt) < 1e-5: + mutation_factor *= 1 - adaptive_rate + crossover_probability *= 1 + adaptive_rate + else: + mutation_factor = min(mutation_factor * (1 + adaptive_rate), 1.0) + crossover_probability = max(crossover_probability * (1 - adaptive_rate), 0.4) + previous_best = self.f_opt + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumLevyMemeticDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedQuantumLevyMemeticDifferentialEvolution.py new file mode 100644 index 000000000..62f67e2fb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumLevyMemeticDifferentialEvolution.py @@ -0,0 +1,132 @@ +import numpy as np + + +class RefinedQuantumLevyMemeticDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 50 + self.memory_size = 5 + self.memory_index = 0 + self.memory_F = [0.5] * self.memory_size + self.memory_CR = [0.5] * self.memory_size + self.tau1 = 0.1 + self.tau2 = 0.1 + self.local_search_iters = 5 + self.elitism_rate = 0.2 + self.diversity_threshold = 1e-4 + self.local_search_prob = 0.2 + self.alpha = 0.01 + + def initialize_population(self, bounds): + return np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + + def select_parents(self, population, idx): + indices = np.delete(np.arange(self.pop_size), idx) + return population[np.random.choice(indices, 3, replace=False)] + + def mutate(self, base, diff1, diff2, F): + return np.clip(base + F * (diff1 - diff2), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def adapt_parameters(self): + F = self.memory_F[self.memory_index] + CR = self.memory_CR[self.memory_index] + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(0.5, 0.3), 0, 1) # Refined adaptation for F + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(0.5, 0.1), 0, 1) # Refined adaptation for CR + return F, CR + + def local_search(self, individual, bounds, func): + best_individual = np.copy(individual) + best_fitness = func(best_individual) + for _ in range(self.local_search_iters): + mutation = np.random.randn(self.dim) * 0.05 + trial = np.clip(individual + mutation, bounds.lb, bounds.ub) + trial_fitness = func(trial) + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + return best_individual, best_fitness + + def levy_flight(self, individual, bounds): + u = np.random.normal(0, 1, self.dim) * self.alpha + v = np.random.normal(0, 1, self.dim) + step = u / np.abs(v) ** (1 / 3) + return np.clip(individual + step, bounds.lb, bounds.ub) + + def hybrid_local_search(self, individual, bounds, func): + if np.random.rand() < self.local_search_prob: + return self.local_search(individual, bounds, func) + else: + mutation = self.levy_flight(individual, bounds) + trial_fitness = func(mutation) + return ( + (mutation, trial_fitness) + if trial_fitness < func(individual) + else (individual, func(individual)) + ) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population = self.initialize_population(bounds) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros_like(fitness) + + for i in range(self.pop_size): + parents = self.select_parents(population, i) + parent1, parent2, parent3 = parents + + F, CR = self.adapt_parameters() + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + elite_indices = np.argsort(new_fitness)[: int(self.elitism_rate * self.pop_size)] + for idx in elite_indices: + new_population[idx], new_fitness[idx] = self.hybrid_local_search( + new_population[idx], bounds, func + ) + evaluations += self.local_search_iters + + if self.diversity(new_population) < self.diversity_threshold and evaluations < self.budget: + new_population = self.initialize_population(bounds) + new_fitness = np.array([func(ind) for ind in new_population]) + evaluations += self.pop_size + + population = new_population + fitness = new_fitness + + # Update the memory with successful parameters + self.memory_F[self.memory_index] = 0.9 * self.memory_F[self.memory_index] + 0.1 * F + self.memory_CR[self.memory_index] = 0.9 * self.memory_CR[self.memory_index] + 0.1 * CR + self.memory_index = (self.memory_index + 1) % self.memory_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumMultiStrategyOptimization.py b/nevergrad/optimization/lama/RefinedQuantumMultiStrategyOptimization.py new file mode 100644 index 000000000..05dace86a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumMultiStrategyOptimization.py @@ -0,0 +1,184 @@ +import numpy as np + + +class RefinedQuantumMultiStrategyOptimization: + def __init__( + self, + budget=10000, + population_size=200, + elite_size=30, + local_search_steps=50, + F_min=0.4, + F_max=0.9, + Cr_min=0.3, + Cr_max=0.8, + perturbation_intensity=0.02, + perturbation_decay=0.98, + ): + self.budget = budget + self.population_size = population_size + self.elite_size = elite_size + self.local_search_steps = local_search_steps + self.dim = 5 # given dimensionality + self.bounds = np.array([-5.0, 5.0]) + self.F_min = F_min + self.F_max = F_max + self.Cr_min = Cr_min + self.Cr_max = Cr_max + self.perturbation_intensity = perturbation_intensity + self.perturbation_decay = perturbation_decay + + def local_search(self, individual, func): + best_local = individual.copy() + best_fitness = func(individual) + for _ in range(self.local_search_steps): + perturbation = np.random.uniform( + -self.perturbation_intensity, self.perturbation_intensity, len(individual) + ) + candidate = individual + perturbation + candidate = np.clip(candidate, self.bounds[0], self.bounds[1]) + candidate_fitness = func(candidate) + if candidate_fitness < best_fitness: + best_local = candidate + best_fitness = candidate_fitness + return best_local, best_fitness + + def differential_mutation(self, population, F): + """Perform differential mutation.""" + pop_size, dim = population.shape + idxs = np.random.choice(pop_size, 3, replace=False) + a, b, c = population[idxs[0]], population[idxs[1]], population[idxs[2]] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + return mutant + + def quantum_jolt(self, individual, intensity): + """Apply quantum-inspired jolt to an individual.""" + jolt = np.random.uniform(-intensity, intensity, len(individual)) + jolted_individual = np.clip(individual + jolt, self.bounds[0], self.bounds[1]) + return jolted_individual + + def entropy_based_selection(self, population, fitness): + """Select individuals based on entropy measure to maintain diversity.""" + probabilities = fitness / np.sum(fitness) + entropy = -np.sum(probabilities * np.log(probabilities + 1e-10)) + if entropy < np.log(len(population)) / 2: + selected_indices = np.argsort(fitness)[: self.elite_size] + else: + selected_indices = np.random.choice(len(population), self.elite_size, replace=False) + return selected_indices + + def multi_population_management(self, populations, fitnesses): + """Manage multiple sub-populations to enhance exploration and exploitation.""" + all_individuals = np.vstack(populations) + all_fitnesses = np.hstack(fitnesses) + best_idx = np.argmin(all_fitnesses) + overall_best = all_individuals[best_idx] + overall_best_fitness = all_fitnesses[best_idx] + + for i in range(len(populations)): + if overall_best_fitness < np.min(fitnesses[i]): + worst_idx = np.argmax(fitnesses[i]) + populations[i][worst_idx] = overall_best + fitnesses[i][worst_idx] = overall_best_fitness + + return populations, fitnesses + + def ensemble_optimization(self, func): + """Ensemble of multiple strategies to enhance exploration and exploitation.""" + strategies = [self.differential_mutation, self.quantum_jolt] + self.f_opt = np.Inf + self.x_opt = None + + sub_population_size = self.population_size // 2 + populations = [ + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + np.random.uniform(self.bounds[0], self.bounds[1], (sub_population_size, self.dim)), + ] + + fitnesses = [np.array([func(ind) for ind in pop]) for pop in populations] + evaluations = 2 * sub_population_size + + best_idx = np.argmin([np.min(fit) for fit in fitnesses]) + self.x_opt = populations[best_idx][np.argmin(fitnesses[best_idx])] + self.f_opt = np.min([np.min(fit) for fit in fitnesses]) + + F, Cr = 0.7, 0.8 + while evaluations < self.budget: + for k in range(2): + new_population = np.zeros_like(populations[k]) + success_count = 0 + for i in range(sub_population_size): + selected_strategy = np.random.choice(strategies) + if selected_strategy == self.differential_mutation: + mutant = self.differential_mutation(populations[k], F) + else: + intensity = 0.05 * (1 - success_count / sub_population_size) + mutant = self.quantum_jolt(populations[k][i], intensity) + + cross_points = np.random.rand(self.dim) < Cr + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + + trial = np.where(cross_points, mutant, populations[k][i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitnesses[k][i]: + new_population[i] = trial + fitnesses[k][i] = trial_fitness + success_count += 1 + else: + new_population[i] = populations[k][i] + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + populations[k] = new_population + + elite_indices = self.entropy_based_selection(populations[k], fitnesses[k]) + elite_population = populations[k][elite_indices] + elite_fitness = fitnesses[k][elite_indices] + + # Performing local search on the elite population + for j in range(self.elite_size): + elite_population[j], elite_fitness[j] = self.local_search(elite_population[j], func) + if elite_fitness[j] < self.f_opt: + self.f_opt = elite_fitness[j] + self.x_opt = elite_population[j] + + worst_indices = np.argsort(fitnesses[k])[-self.elite_size :] + for idx in worst_indices: + if evaluations >= self.budget: + break + elite_idx = np.random.choice(elite_indices) + worst_idx = idx + + difference_vector = populations[k][elite_idx] - populations[k][worst_idx] + new_candidate = populations[k][worst_idx] + np.random.rand() * difference_vector + new_candidate = np.clip(new_candidate, self.bounds[0], self.bounds[1]) + new_candidate_fitness = func(new_candidate) + evaluations += 1 + + if new_candidate_fitness < fitnesses[k][worst_idx]: + populations[k][worst_idx] = new_candidate + fitnesses[k][worst_idx] = new_candidate_fitness + if new_candidate_fitness < self.f_opt: + self.f_opt = new_candidate_fitness + self.x_opt = new_candidate + + success_rate = success_count / sub_population_size + if success_rate > 0.2: + F = min(self.F_max, F + 0.05 * (success_rate - 0.2)) + Cr = max(self.Cr_min, Cr - 0.05 * (0.2 - success_rate)) + else: + F = max(self.F_min, F - 0.05 * (0.2 - success_rate)) + Cr = min(self.Cr_max, Cr + 0.05 * (success_rate - 0.2)) + + populations, fitnesses = self.multi_population_management(populations, fitnesses) + self.perturbation_intensity *= self.perturbation_decay + + return self.f_opt, self.x_opt + + def __call__(self, func): + return self.ensemble_optimization(func) diff --git a/nevergrad/optimization/lama/RefinedQuantumNesterovSynergyV2.py b/nevergrad/optimization/lama/RefinedQuantumNesterovSynergyV2.py new file mode 100644 index 000000000..6a418261f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumNesterovSynergyV2.py @@ -0,0 +1,68 @@ +import numpy as np + + +class RefinedQuantumNesterovSynergyV2: + def __init__( + self, + budget, + dim=5, + learning_rate=0.1, + momentum=0.95, + quantum_influence_rate=0.25, + adaptive_lr_factor=0.95, + elite_fraction=0.35, + noise_factor=0.25, + perturbation_intensity=0.1, + ): + self.budget = budget + self.dim = dim + self.learning_rate = learning_rate + self.momentum = momentum + self.quantum_influence_rate = quantum_influence_rate + self.adaptive_lr_factor = adaptive_lr_factor + self.elite_fraction = elite_fraction + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.noise_factor = noise_factor + self.perturbation_intensity = perturbation_intensity + + def initialize(self): + self.population = np.random.uniform( + self.lower_bound, self.upper_bound, (int(self.budget * self.elite_fraction), self.dim) + ) + self.velocities = np.zeros((int(self.budget * self.elite_fraction), self.dim)) + self.fitnesses = np.full(int(self.budget * self.elite_fraction), np.inf) + + def evaluate_population(self, func): + for i in range(len(self.population)): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + + def update_particles(self): + best_idx = np.argmin(self.fitnesses) + global_best = self.population[best_idx] + + for i in range(len(self.population)): + if np.random.rand() < self.quantum_influence_rate: + quantum_jump = np.random.normal(0, self.perturbation_intensity, self.dim) + self.population[i] += quantum_jump * (global_best - self.population[i]) + + noise = np.random.normal(0, self.noise_factor, self.dim) + self.velocities[i] = self.momentum * self.velocities[i] - self.learning_rate * noise + future_position = self.population[i] + self.momentum * self.velocities[i] + future_position = np.clip(future_position, self.lower_bound, self.upper_bound) + self.population[i] = future_position + + self.learning_rate *= self.adaptive_lr_factor + + def __call__(self, func): + self.initialize() + total_evaluations = len(self.population) + while total_evaluations < self.budget: + self.evaluate_population(func) + self.update_particles() + total_evaluations += len(self.population) + + best_idx = np.argmin(self.fitnesses) + return self.fitnesses[best_idx], self.population[best_idx] diff --git a/nevergrad/optimization/lama/RefinedQuantumResilientCrossoverEnhancer.py b/nevergrad/optimization/lama/RefinedQuantumResilientCrossoverEnhancer.py new file mode 100644 index 000000000..13341dc97 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumResilientCrossoverEnhancer.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedQuantumResilientCrossoverEnhancer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 100 + elite_size = 10 + evaluations = 0 + mutation_factor = 0.85 + crossover_probability = 0.8 + quantum_probability = 0.1 + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + # Evolution loop + while evaluations < self.budget: + # Quantum mutation step + if np.random.rand() < quantum_probability: + for i in range(elite_size): + if evaluations >= self.budget: + break + idx = np.random.choice(population_size) + quantum_mutant = population[idx] + np.random.normal(0, 1.0, self.dim) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[idx]: + population[idx] = quantum_mutant + fitness[idx] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Differential evolution operators + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 4, replace=False) + x1, x2, x3, x4 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3 + x4 - x1) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumSwarmOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumSwarmOptimizer.py new file mode 100644 index 000000000..6df26b766 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumSwarmOptimizer.py @@ -0,0 +1,76 @@ +import numpy as np + + +class RefinedQuantumSwarmOptimizer: + def __init__( + self, + budget=10000, + population_size=100, + inertia_weight=0.7, + cognitive_coefficient=2.0, + social_coefficient=2.0, + decay_rate=0.985, + quantum_jump_rate=0.08, + quantum_scale=0.08, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.decay_rate = decay_rate + self.quantum_jump_rate = quantum_jump_rate + self.quantum_scale = quantum_scale + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + if np.random.rand() < self.quantum_jump_rate: + # Quantum jump for exploration + particles[i] = global_best + np.random.normal(0, self.quantum_scale, self.dim) * ( + self.ub - self.lb + ) + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + # Classical PSO update for exploitation + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Adaptive decay of strategy parameters + self.quantum_jump_rate *= self.decay_rate + self.quantum_scale *= self.decay_rate + self.inertia_weight *= self.decay_rate + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV2.py b/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV2.py new file mode 100644 index 000000000..e3f5bc097 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV2.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedQuantumSymbioticStrategyV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 250 + elite_size = 50 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.7 + quantum_probability = 0.1 + adaptive_scaling_factor = lambda t: 0.3 * np.exp( + -0.1 * t + ) # Modified decay rate for enhanced precision + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Refined Symbiotic mutation and crossover + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV4.py b/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV4.py new file mode 100644 index 000000000..b87e1bc9c --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumSymbioticStrategyV4.py @@ -0,0 +1,75 @@ +import numpy as np + + +class RefinedQuantumSymbioticStrategyV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension of the problem + self.lb = -5.0 * np.ones(self.dim) # Lower bound of the search space + self.ub = 5.0 * np.ones(self.dim) # Upper bound of the search space + + def __call__(self, func): + population_size = 500 + elite_size = 100 + evaluations = 0 + mutation_factor = 0.8 + crossover_probability = 0.85 + quantum_probability = 0.15 + adaptive_scaling_factor = lambda t: 0.3 * np.exp(-0.3 * t) # Better control over mutation + + # Initialize population + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations += population_size + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + while evaluations < self.budget: + current_best_fitness = np.min(fitness) + + # Quantum mutation step with enhanced control + if np.random.rand() < quantum_probability: + elite_indices = np.argsort(fitness)[:elite_size] + for i in elite_indices: + if evaluations >= self.budget: + break + time_factor = evaluations / self.budget + quantum_mutant = population[i] + np.random.normal( + 0, adaptive_scaling_factor(time_factor), self.dim + ) + quantum_mutant = np.clip(quantum_mutant, self.lb, self.ub) + quantum_fitness = func(quantum_mutant) + evaluations += 1 + + if quantum_fitness < fitness[i]: + population[i] = quantum_mutant + fitness[i] = quantum_fitness + if quantum_fitness < self.f_opt: + self.f_opt = quantum_fitness + self.x_opt = quantum_mutant + + # Symbiotic mutation and crossover with refined population selection + for i in range(population_size): + if evaluations >= self.budget: + break + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation and Crossover + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + trial = np.where(np.random.rand(self.dim) < crossover_probability, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + # Selection with a focus on strong replacements + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedQuantumTunnelingOptimizerV19.py b/nevergrad/optimization/lama/RefinedQuantumTunnelingOptimizerV19.py new file mode 100644 index 000000000..24e824f93 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedQuantumTunnelingOptimizerV19.py @@ -0,0 +1,75 @@ +import numpy as np + + +class RefinedQuantumTunnelingOptimizerV19: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Refined strategy parameters + population_size = 100 # Further reduced population size + gamma = 0.2 # Lower initial gamma for more controlled exploration + gamma_min = 0.00001 # Even finer tuning in late optimization stages + gamma_decay = 0.999 # Slower gamma decay for sustained explorative capabilities + elite_count = 3 # Further focus on very best individuals + mutation_strength = 0.01 # Finer mutations for precise adjustments + crossover_probability = 0.95 # High crossover to enhance information exchange + tunneling_frequency = 0.8 # Enhanced tunneling to effectively escape local minima + + # Initialize population within bounds + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + gamma = max(gamma * gamma_decay, gamma_min) + + # Elite selection + elite_indices = np.argsort(fitness)[:elite_count] + elite_individuals = population[elite_indices] + new_population = [population[i] for i in elite_indices] + new_fitness = [fitness[i] for i in elite_indices] + + # Reproduction: crossover and mutation + while len(new_population) < population_size: + parent1_idx, parent2_idx = np.random.choice(elite_indices, 2, replace=False) + parent1, parent2 = population[parent1_idx], population[parent2_idx] + + # Crossover + if np.random.random() < crossover_probability: + crossover_point = np.random.randint(1, self.dim) + offspring = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) + else: + offspring = np.array(parent1) + + # Mutation + mutation = np.random.normal(0, mutation_strength, self.dim) + offspring += mutation + offspring = np.clip(offspring, -5.0, 5.0) + + # Quantum tunneling + if np.random.rand() < tunneling_frequency: + tunnel_point = np.random.uniform(-5.0, 5.0, self.dim) + offspring = gamma * offspring + (1 - gamma) * tunnel_point + + offspring_fitness = func(offspring) + evaluations_left -= 1 + + if evaluations_left <= 0: + break + + new_population.append(offspring) + new_fitness.append(offspring_fitness) + + if offspring_fitness < self.f_opt: + self.f_opt = offspring_fitness + self.x_opt = offspring + + population = np.array(new_population) + fitness = np.array(new_fitness) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedRAMEDSPro.py b/nevergrad/optimization/lama/RefinedRAMEDSPro.py new file mode 100644 index 000000000..cbcfea38a --- /dev/null +++ b/nevergrad/optimization/lama/RefinedRAMEDSPro.py @@ -0,0 +1,78 @@ +import numpy as np + + +class RefinedRAMEDSPro: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.8, + F_min=0.4, + F_max=1.0, + memory_size=20, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory with best initial individuals + memory_indices = np.argsort(fitness)[: self.memory_size] + memory = population[memory_indices].copy() + memory_fitness = fitness[memory_indices].copy() + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation factor with successful feedback modulation + F = np.interp(evaluations, [0, self.budget], [self.F_max, self.F_min]) + if evaluations % 20 == 0 and i == 0: + F += (0.5 - np.random.rand()) * 0.2 # Small random deviation every 20 evaluations + + # Mutation: DE/rand-to-best/1 + indices = np.random.choice(self.population_size, 2, replace=False) + r1, r2 = population[indices] + mutant = np.clip(best_solution + F * (r1 - r2), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + # Update memory if necessary + max_memory_f_idx = np.argmax(memory_fitness) + if fitness[i] < memory_fitness[max_memory_f_idx]: + memory[max_memory_f_idx] = population[i] + memory_fitness[max_memory_f_idx] = fitness[i] + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedRAMEDSv2.py b/nevergrad/optimization/lama/RefinedRAMEDSv2.py new file mode 100644 index 000000000..e7c2a6c78 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedRAMEDSv2.py @@ -0,0 +1,90 @@ +import numpy as np + + +class RefinedRAMEDSv2: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Adaptive mutation factor with sinusoidal modulation + F = self.F_max - (self.F_max - self.F_min) * np.sin(2 * np.pi * evaluations / self.budget) + + for i in range(self.population_size): + # Mutation: DE/current-to-best/1 + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.6 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(population[i] + F * (best_or_elite - population[i] + a - b), lb, ub) + + # Crossover adjustment based on diversity + if np.std(fitness) < np.mean(fitness) * 0.1: # Low diversity + self.crossover_rate *= 0.95 + else: + self.crossover_rate = min(self.crossover_rate / 0.95, 1.0) + + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and updating memory + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedSpatialAdaptiveOptimizer.py b/nevergrad/optimization/lama/RefinedSpatialAdaptiveOptimizer.py new file mode 100644 index 000000000..b603fc4e9 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedSpatialAdaptiveOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class RefinedSpatialAdaptiveOptimizer: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=100, + initial_step_size=1.5, + step_decay=0.95, + elite_ratio=0.2, + mutation_intensity=0.08, + local_search_prob=0.3, + refinement_steps=5, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.step_decay = step_decay + self.elite_count = int(population_size * elite_ratio) + self.mutation_intensity = mutation_intensity + self.local_search_prob = local_search_prob + self.refinement_steps = refinement_steps + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale * self.mutation_intensity, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def local_search(self, func, individual): + best_local = individual + best_fitness = func(individual) + for _ in range(self.refinement_steps): + candidate = np.clip( + individual + np.random.normal(0, self.step_size * 0.01, self.dimension), + self.bounds[0], + self.bounds[1], + ) + fitness = func(candidate) + if fitness < best_fitness: + best_fitness = fitness + best_local = candidate + return best_local, best_fitness + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = self.step_size * (self.step_decay**generation) + new_population = np.array( + [self.mutate(population[i], scale) for i in range(self.population_size)] + ) + new_fitness = self.evaluate_population(func, new_population) + + if np.random.rand() < self.local_search_prob: # Conduct local search on some individuals + for idx in range(self.population_size): + local_individual, local_fitness = self.local_search(func, new_population[idx]) + evaluations += self.refinement_steps # Account for the evaluations used in local search + if local_fitness < new_fitness[idx]: + new_population[idx] = local_individual + new_fitness[idx] = local_fitness + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + elite_indices = np.argsort(combined_fitness)[: self.population_size] + population = combined_population[elite_indices] + fitness = combined_fitness[elite_indices] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + if evaluations + self.population_size > self.budget: + break # Avoid exceeding the budget + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedSpiralSearchOptimizer.py b/nevergrad/optimization/lama/RefinedSpiralSearchOptimizer.py new file mode 100644 index 000000000..c1cca8298 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedSpiralSearchOptimizer.py @@ -0,0 +1,56 @@ +import numpy as np + + +class RefinedSpiralSearchOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial point in the center of the search space + initial_point = np.zeros(self.dim) + current_point = initial_point + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Parameters for spiral movement + radius = 5.0 # Maximum extent of the search space + angle_increment = np.pi / 8 # Smaller incremental angle for finer spiral + radius_decrement_factor = 0.9 # Slightly slower radius reduction to explore more thoroughly + spiral_budget = self.budget + + while spiral_budget > 0: + num_points = int(2 * np.pi / angle_increment) + for i in range(num_points): + if spiral_budget <= 0: + break + + angle = i * angle_increment + for dim in range(self.dim): # Spiral in all dimensions + dx = radius * np.cos(angle) + dy = radius * np.sin(angle) + delta = np.zeros(self.dim) + delta[dim % self.dim] = dx + delta[(dim + 1) % self.dim] = dy + + candidate_point = current_point + delta + candidate_point = np.clip( + candidate_point, -5.0, 5.0 + ) # Ensure the candidate is within bounds + candidate_f = func(candidate_point) + spiral_budget -= 1 + + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate_point + current_point = candidate_point # Move spiral center to new best location + + # Reduce the radius for the next spiral cycle + radius *= radius_decrement_factor + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedStochasticBalancingOptimizer.py b/nevergrad/optimization/lama/RefinedStochasticBalancingOptimizer.py new file mode 100644 index 000000000..ecfafa8cd --- /dev/null +++ b/nevergrad/optimization/lama/RefinedStochasticBalancingOptimizer.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedStochasticBalancingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the optimization problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.epsilon = 1e-8 # To prevent division by zero in adaptive mechanisms + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 100 + mutation_factor = 0.8 # Initial high mutation for diversity + crossover_prob = 0.9 # High crossover probability for better exploration + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + # Differential Evolution mutation strategy + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation: enhanced DE/current-to-best/1 strategy for faster convergence + best = population[np.argmin(fitness)] + mutant = ( + population[i] + mutation_factor * (best - population[i]) + mutation_factor * (x2 - x3) + ) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection step + if trial_fitness < fitness[i]: + new_population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + # Adapt mutation and crossover rates based on ongoing performance + diversity = np.std(population) + if diversity < 1e-1: + mutation_factor = max(0.1, mutation_factor - 0.05) + else: + mutation_factor = min(1.0, mutation_factor + 0.1) + + if np.std(fitness) / np.mean(fitness) < 0.1: # Low fitness variation + crossover_prob = max(0.7, crossover_prob - 0.05) + else: + crossover_prob = min(0.9, crossover_prob + 0.05) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinedStrategicAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedStrategicAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..b8403b279 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedStrategicAdaptiveDifferentialEvolution.py @@ -0,0 +1,65 @@ +import numpy as np + + +class RefinedStrategicAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set to 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 200 # Increased population size for more diverse solutions + mutation_factor = 0.5 # Start with a lower mutation factor for detailed initial exploration + crossover_prob = 0.9 # Higher crossover probability to promote better mixing of solutions + adaptive_factor = 0.98 # Slower reduction in mutation and crossover to maintain exploration + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + # Adaptation thresholds + no_improve_threshold = 0.1 * self.budget / population_size # More frequent adaptation checks + + for generation in range(int(self.budget / population_size)): + if generation % no_improve_threshold == 0 and generation != 0: + # Increase mutation and crossover if no improvement + mutation_factor = min(1.2 * mutation_factor, 1.0) + crossover_prob = min(1.1 * crossover_prob, 1.0) + + # Evolution strategy + for i in range(population_size): + indices = [j for j in range(population_size) if j != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover (binomial) + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + + # Adaptive mutation and crossover decrease + mutation_factor *= adaptive_factor + crossover_prob *= adaptive_factor + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/RefinedStrategicDiminishingEvolver.py b/nevergrad/optimization/lama/RefinedStrategicDiminishingEvolver.py new file mode 100644 index 000000000..77f30d0cb --- /dev/null +++ b/nevergrad/optimization/lama/RefinedStrategicDiminishingEvolver.py @@ -0,0 +1,70 @@ +import numpy as np + + +class RefinedStrategicDiminishingEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=50, + initial_step_size=1.0, + min_step_size=0.001, + elite_ratio=0.1, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.min_step_size = min_step_size + self.elite_count = int(population_size * elite_ratio) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = max( + self.min_step_size, self.step_size * np.exp(-generation / 20.0) + ) # Exponential decay of step size + + new_population = np.array([self.mutate(ind, scale) for ind in population]) + new_fitness = self.evaluate_population(func, new_population) + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + indices = np.argsort(combined_fitness) + population = combined_population[indices[: self.population_size]] + fitness = combined_fitness[indices[: self.population_size]] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedStrategicQuorumWithDirectionalBias.py b/nevergrad/optimization/lama/RefinedStrategicQuorumWithDirectionalBias.py new file mode 100644 index 000000000..6682b8c71 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedStrategicQuorumWithDirectionalBias.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RefinedStrategicQuorumWithDirectionalBias: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_scale=0.3, + elite_adaptation=0.02, + momentum=0.8, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = max(1, int(population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.elite_adaptation = elite_adaptation + self.momentum = momentum + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Initialize best solution and momentum vector + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + best_momentum = np.zeros(self.dimension) + + # Evolution loop + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Select a quorum randomly, including best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Find the local best in the quorum + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Mutation influenced by best, local best, and momentum + direction = best_individual - local_best + random_direction = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = random_direction * direction + self.momentum * best_momentum + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update best solution and momentum + if child_fitness < best_fitness: + best_momentum = child - best_individual + best_fitness = child_fitness + best_individual = child + + new_population.append(child) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Adapt the elite count based on progress + if self.elite_adaptation > 0: + self.elite_count = max( + 1, int(self.elite_count * (1 + self.elite_adaptation * np.random.uniform(-1, 1))) + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedSuperiorAdaptiveStrategyDE.py b/nevergrad/optimization/lama/RefinedSuperiorAdaptiveStrategyDE.py new file mode 100644 index 000000000..a44bb3d8b --- /dev/null +++ b/nevergrad/optimization/lama/RefinedSuperiorAdaptiveStrategyDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class RefinedSuperiorAdaptiveStrategyDE: + def __init__( + self, budget=10000, population_size=150, F_base=0.5, F_range=0.3, CR=0.95, strategy="refined_adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Refined adaptive mutation strategy + if self.strategy == "refined_adaptive": + # Use a mix of top performers and diversity + sorted_indices = np.argsort(fitness) + high_performers = sorted_indices[: max(2, self.population_size // 10)] + diverse_selection = np.random.choice(high_performers, size=1) + base = population[diverse_selection[0]] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Adjust F dynamically with a focus on convergence + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using three random distinct indices + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(base + F * (a - b + c), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedTemporalAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RefinedTemporalAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..905cb2a74 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedTemporalAdaptiveDifferentialEvolution.py @@ -0,0 +1,52 @@ +import numpy as np + + +class RefinedTemporalAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 80 # Adjusted population size for better population diversity + self.F_base = 0.8 # Initial mutation factor + self.CR = 0.9 # Crossover probability + self.F_min = 0.1 # Minimum mutation factor + self.F_decay = 0.985 # Adjusted decay factor for mutation rate + + def __call__(self, func): + # Initialize population uniformly within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + F = self.F_base + for iteration in range(n_iterations): + # Temporally decaying mutation factor + F = max(self.F_min, F * self.F_decay) # Ensure F does not go below F_min + + for i in range(self.pop_size): + # Differential evolution strategy: 'best/1/bin' + idxs = np.random.choice([idx for idx in range(self.pop_size) if idx != i], 2, replace=False) + b, c = pop[idxs] + mutant = pop[best_idx] + F * (b - c) + + # Clipping to bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < self.CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/RefinedUltimateEnhancedGuidedMassQGSA_v71.py b/nevergrad/optimization/lama/RefinedUltimateEnhancedGuidedMassQGSA_v71.py new file mode 100644 index 000000000..e4d2f8193 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimateEnhancedGuidedMassQGSA_v71.py @@ -0,0 +1,124 @@ +import numpy as np + + +class RefinedUltimateEnhancedGuidedMassQGSA_v71: + def __init__( + self, budget=1000, num_agents=30, G0=100.0, alpha=0.1, beta=0.1, lb=-5.0, ub=5.0, dimension=5 + ): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.beta = beta + self.lb = lb + self.ub = ub + self.dimension = dimension + self.f_opt = np.Inf + self.x_opt = None + self.prev_best_fitness = np.Inf + self.step_size = (ub - lb) * 0.1 + self.crossover_rate = 0.7 + self.explore_rate = 0.3 + self.inertia_weight = 0.9 + self.social_weight = 1.0 + self.cognitive_weight = 1.0 + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values - np.min(fitness_values) + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force, best_agent, personal_best): + r1 = np.random.rand(self.dimension) + r2 = np.random.rand(self.dimension) + velocity = ( + self.inertia_weight * force + + self.social_weight * r1 * (best_agent - agent) + + self.cognitive_weight * r2 * (personal_best - agent) + ) + new_pos = agent + self.alpha * velocity + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def _adaptive_parameters(self): + self.G0 *= 0.95 + self.alpha *= 0.95 + if self.f_opt < self.prev_best_fitness: + self.beta = min(0.2, self.beta * 1.03) + else: + self.beta = max(0.05, self.beta * 0.97) + self.prev_best_fitness = self.f_opt + + def _update_best_agent(self, agents, fitness_values, personal_best_values): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + best_fitness = fitness_values[best_agent_idx] + personal_best_values = np.minimum(fitness_values, personal_best_values) + return best_agent, best_agent_idx, best_fitness, personal_best_values + + def _adjust_agent_position(self, agent, best_agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + r * (best_agent - agent), self.lb, self.ub) + + def _crossover(self, agent, best_agent): + mask = np.random.choice([0, 1], size=self.dimension, p=[1 - self.crossover_rate, self.crossover_rate]) + new_agent = agent * mask + best_agent * (1 - mask) + return np.clip(new_agent, self.lb, self.ub) + + def _explore(self, agent): + r = np.random.uniform(-self.step_size, self.step_size, size=self.dimension) + return np.clip(agent + self.explore_rate * r, self.lb, self.ub) + + def _update_agents_with_ultimate_guided_mass(self, agents, fitness_values, masses, func): + personal_best_values = np.full(self.num_agents, np.Inf) + + for _ in range(self.budget): + for i in range(self.num_agents): + best_agent, best_agent_idx, best_fitness, personal_best_values = self._update_best_agent( + agents, fitness_values, personal_best_values + ) + + if i != best_agent_idx: + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for j in range(self.num_agents) + if j != best_agent_idx + ] + ) + guided_mass = ( + self.crossover_rate * agents[best_agent_idx] + (1 - self.crossover_rate) * agents[i] + ) + guide_force = self.G0 * masses[i] * (guided_mass - agents[i]) + new_agent = self._update_agent_position( + agents[i], force + guide_force, best_agent, personal_best_values[i] + ) + new_agent = self._adjust_agent_position(new_agent, best_agent) + new_agent = self._crossover(new_agent, best_agent) + new_agent = self._explore(new_agent) + new_fitness = self._objective_function(func, new_agent) + + if new_fitness < fitness_values[i]: + agents[i] = new_agent + fitness_values[i] = new_fitness + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_agent + + self._adaptive_parameters() + + def __call__(self, func): + agents = self._initialize_agents() + + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + self._update_agents_with_ultimate_guided_mass(agents, fitness_values, masses, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV16.py b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV16.py new file mode 100644 index 000000000..32302666f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV16.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedUltimateEvolutionaryGradientOptimizerV16: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.3, + CR=0.9, + elite_fraction=0.05, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'adaptive', 'random', 'balanced' + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy selection + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.8: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "random": + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "balanced": + if np.random.rand() < 0.5: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV17.py b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV17.py new file mode 100644 index 000000000..9ee61efe1 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV17.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RefinedUltimateEvolutionaryGradientOptimizerV17: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.07, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'adaptive', 'random', 'balanced' + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy selection + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.85: # Increased probability of selecting the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "random": + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "balanced": + if np.random.rand() < 0.5: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV34.py b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV34.py new file mode 100644 index 000000000..d11abe3e4 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryGradientOptimizerV34.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinedUltimateEvolutionaryGradientOptimizerV34: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.58, + F_range=0.42, + CR=0.93, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Adjusted base mutation factor for better balance + self.F_range = F_range # Reduced range to control mutation variability + self.CR = CR # Adjusted crossover probability for enhanced exploration within sub-spaces + self.elite_fraction = ( + elite_fraction # Modified elite fraction to focus on a slightly broader elite group + ) + self.mutation_strategy = mutation_strategy # Adaptive strategy prioritizes exploiting promising areas + self.dim = 5 # Dimensionality of the problem is fixed to 5 as given + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Adaptively choose base individual from either the best or a randomly selected elite + if np.random.rand() < 0.8: # Increased adaptive focus to exploit known good regions + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random selection of base from elite members + base = population[np.random.choice(elite_indices)] + + # Adjust F dynamically within a controlled range for better convergence + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation strategy DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with enhanced probability + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] is True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial individual and update if improvement + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check for budget exhaustion + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltimateEvolutionaryOptimizer.py b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryOptimizer.py new file mode 100644 index 000000000..1f7ea650d --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimateEvolutionaryOptimizer.py @@ -0,0 +1,62 @@ +import numpy as np + + +class RefinedUltimateEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature with a refined starting value and minimum threshold + T = 1.1 # Slightly decreased starting temperature for optimal initial exploration + T_min = 0.0005 # Lower minimum temperature threshold for fine-tuned exploitation + alpha = 0.92 # Modified cooling rate to allow extended search at each temperature level + + # Mutation and crossover parameters optimized based on previous results + F = 0.75 # Adjusted mutation factor to help balance exploration and exploitation + CR = 0.88 # Adjusted crossover probability to maintain diversity while improving offspring quality + + population_size = 80 # Slightly increased population size for more diverse initial solutions + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics with a focus on adaptive mutation factors + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor incorporating improved adaptive behavior + dynamic_F = ( + F + * np.exp(-0.12 * T) + * (0.65 + 0.35 * np.tanh(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion adapting better to changes in fitness landscape + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling with an updated modulation to account for search efficiency + adaptive_cooling = alpha - 0.012 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedUltimatePrecisionEvolutionaryOptimizerV42.py b/nevergrad/optimization/lama/RefinedUltimatePrecisionEvolutionaryOptimizerV42.py new file mode 100644 index 000000000..0e4487221 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltimatePrecisionEvolutionaryOptimizerV42.py @@ -0,0 +1,82 @@ +import numpy as np + + +class RefinedUltimatePrecisionEvolutionaryOptimizerV42: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.52, + F_range=0.48, + CR=0.92, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Reduced base mutation factor for more stable exploration + self.F_range = F_range # Increased range for mutation factor to enhance adaptive capabilities + self.CR = CR # Slightly lower crossover probability to maintain diversity + self.elite_fraction = ( + elite_fraction # Increased elite fraction to improve the influence of top performers + ) + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy for dynamic behavior based on fitness landscape + ) + self.dim = 5 # Problem dimensionality + self.lb = -5.0 # Search space lower bound + self.ub = 5.0 # Search space upper bound + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Adaptive base individual selection + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.80: # Higher preference for the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjusted mutation factor + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with refined CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..f1646c22f --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,174 @@ +import numpy as np +from scipy.optimize import minimize + + +class RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + blend_crossover_prob=0.3, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.blend_crossover_prob = blend_crossover_prob + self.max_no_improvement_ratio = max_no_improvement_ratio + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinedUltraEvolutionaryGradientOptimizerV28.py b/nevergrad/optimization/lama/RefinedUltraEvolutionaryGradientOptimizerV28.py new file mode 100644 index 000000000..96f4fa360 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltraEvolutionaryGradientOptimizerV28.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedUltraEvolutionaryGradientOptimizerV28: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.93, + elite_fraction=0.09, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.80: # Adjusted probability to balance the global and local search + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltraOptimizedDynamicPrecisionOptimizerV20.py b/nevergrad/optimization/lama/RefinedUltraOptimizedDynamicPrecisionOptimizerV20.py new file mode 100644 index 000000000..d63069621 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltraOptimizedDynamicPrecisionOptimizerV20.py @@ -0,0 +1,60 @@ +import numpy as np + + +class RefinedUltraOptimizedDynamicPrecisionOptimizerV20: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and refined cooling parameters + T = 1.2 # Slightly increased starting temperature for more aggressive exploration initially + T_min = 0.0003 # Lower minimum temperature to allow for detailed late-stage exploration + alpha = 0.91 # Slower cooling rate to extend effective search duration + + # Refined mutation and crossover parameters for improved performance + F = 0.77 # Mutation factor adjusted for a better balance of exploration and exploitation + CR = 0.89 # Crossover probability finely tuned for better genetic diversity maintenance + + population_size = 90 # Increased population size for better coverage and diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing an enhanced dynamic mutation strategy with adaptive sigmoid function + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation rate adapting with an advanced sigmoid model + dynamic_F = ( + F * np.exp(-0.055 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with temperature-sensitive decision making + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced cooling strategy integrating a periodic modulation for nuanced temperature control + adaptive_cooling = alpha - 0.007 * np.sin(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/RefinedUltraOptimizedEvolutionaryGradientOptimizerV31.py b/nevergrad/optimization/lama/RefinedUltraOptimizedEvolutionaryGradientOptimizerV31.py new file mode 100644 index 000000000..9e9c25af7 --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltraOptimizedEvolutionaryGradientOptimizerV31.py @@ -0,0 +1,79 @@ +import numpy as np + + +class RefinedUltraOptimizedEvolutionaryGradientOptimizerV31: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.60, + F_range=0.40, + CR=0.97, + elite_fraction=0.05, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.85: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinedUltraRefinedRAMEDS.py b/nevergrad/optimization/lama/RefinedUltraRefinedRAMEDS.py new file mode 100644 index 000000000..4b4db8a6e --- /dev/null +++ b/nevergrad/optimization/lama/RefinedUltraRefinedRAMEDS.py @@ -0,0 +1,74 @@ +import numpy as np + + +class RefinedUltraRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.3, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update mutation factor dynamically for precision as the optimization progresses + progress = evaluations / self.budget + F = self.F_max - (self.F_max - self.F_min) * progress + + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + # Introducing dynamic weighting for the best and random contribution + best_weight = 0.5 + 0.5 * np.sin(np.pi * progress) # Increases as progress goes + random_weight = 1 - best_weight + + mutant = np.clip( + a + F * (best_weight * (best_solution - a) + random_weight * (b - c)), lb, ub + ) + + # Crossover with decreased likelihood towards the end + cross_points = np.random.rand(dimension) < (self.crossover_rate * (1 - progress)) + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RefinementEnhancedHybridOptimizer.py b/nevergrad/optimization/lama/RefinementEnhancedHybridOptimizer.py new file mode 100644 index 000000000..73493bd89 --- /dev/null +++ b/nevergrad/optimization/lama/RefinementEnhancedHybridOptimizer.py @@ -0,0 +1,130 @@ +import numpy as np + + +class RefinementEnhancedHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 30 # Increased population size for better exploration + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 1.5 + self.c2 = 1.5 + self.w = 0.7 + self.elite_fraction = 0.2 # Increased elite fraction for better exploitation + self.diversity_threshold = 1e-6 # Reduced threshold for finer diversity control + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(5, self.budget - evaluations) + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.1, bounds.lb, bounds.ub + ) + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RefinementSelectiveCohortOptimization.py b/nevergrad/optimization/lama/RefinementSelectiveCohortOptimization.py new file mode 100644 index 000000000..a4997626e --- /dev/null +++ b/nevergrad/optimization/lama/RefinementSelectiveCohortOptimization.py @@ -0,0 +1,64 @@ +import numpy as np + + +class RefinementSelectiveCohortOptimization: + def __init__(self, budget, dimension=5, population_size=100, elite_fraction=0.1, mutation_scale=0.05): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(population_size * elite_fraction) + self.mutation_scale = mutation_scale + + def __call__(self, func): + # Initialize population within the bounds [-5, 5] + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(x) for x in population]) + evaluations = self.population_size + + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + while evaluations < self.budget: + new_population = np.empty_like(population) + elite_indices = np.argsort(fitness)[: self.elite_count] + elite_population = population[elite_indices] + + # Calculate weighted probabilities for selection based on inverse fitness rank + ranks = np.argsort(np.argsort(fitness)) + selection_probabilities = (self.population_size - ranks) / np.sum(self.population_size - ranks) + + for i in range(self.population_size): + # Select parents based on fitness-proportional selection + parents_indices = np.random.choice( + self.population_size, 2, p=selection_probabilities, replace=False + ) + parent1, parent2 = population[parents_indices] + + # Perform crossover and mutation + crossover_point = np.random.randint(1, self.dimension) + child = np.concatenate((parent1[:crossover_point], parent2[crossover_point:])) + mutation_vector = self.mutation_scale * np.random.randn(self.dimension) + child += mutation_vector + + # Ensure child stays within bounds + child = np.clip(child, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + new_population[i] = child + + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(x) for x in population]) + + # Adapt mutation scale by reducing it + self.mutation_scale *= 0.99 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/RefinementTunedPSO.py b/nevergrad/optimization/lama/RefinementTunedPSO.py new file mode 100644 index 000000000..77b2b6344 --- /dev/null +++ b/nevergrad/optimization/lama/RefinementTunedPSO.py @@ -0,0 +1,81 @@ +import numpy as np + + +class RefinementTunedPSO: + def __init__( + self, + budget=10000, + population_size=150, + omega_initial=0.9, + omega_final=0.4, + phi_p=0.2, + phi_g=0.8, + critical_depth=15, + adaptive_depth=5, + ): + self.budget = budget + self.population_size = population_size + self.omega_initial = omega_initial # Initial inertia coefficient + self.omega_final = omega_final # Final inertia coefficient + self.phi_p = phi_p # Personal preference influence + self.phi_g = phi_g # Global preference influence + self.dim = 5 # Problem dimensionality + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.critical_depth = critical_depth # Depth of performance evaluation for adaptive inertia + self.adaptive_depth = adaptive_depth # Depth used for quick adaptation checks + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + recent_scores = np.array([global_best_score]) + + while evaluation_counter < self.budget: + omega = self.adaptive_inertia(recent_scores, evaluation_counter) + + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + recent_scores = np.append(recent_scores, global_best_score)[-self.critical_depth :] + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position + + def adaptive_inertia(self, scores, evaluation_counter): + if len(scores) > self.adaptive_depth and np.std(scores[-self.adaptive_depth :]) < 0.01: + return max( + self.omega_final, + self.omega_initial + - (evaluation_counter / self.budget) * (self.omega_initial - self.omega_final) * 1.2, + ) + else: + return self.omega_initial - ( + (self.omega_initial - self.omega_final) * (evaluation_counter / self.budget) + ) diff --git a/nevergrad/optimization/lama/ResilientAdaptivePSO.py b/nevergrad/optimization/lama/ResilientAdaptivePSO.py new file mode 100644 index 000000000..64940e8af --- /dev/null +++ b/nevergrad/optimization/lama/ResilientAdaptivePSO.py @@ -0,0 +1,57 @@ +import numpy as np + + +class ResilientAdaptivePSO: + def __init__( + self, budget=10000, population_size=100, omega=0.7, phi_p=0.12, phi_g=0.25, precision_decay=0.95 + ): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia coefficient + self.phi_p = phi_p # Coefficient of personal best + self.phi_g = phi_g # Coefficient of global best + self.dim = 5 # Dimension of the problem + self.lb, self.ub = -5.0, 5.0 # Search space bounds + self.precision_decay = precision_decay + + def __call__(self, func): + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluation_counter = self.population_size + remaining_budget = self.budget - evaluation_counter + + while evaluation_counter < self.budget: + self.omega *= self.precision_decay # Gradually reduce inertia + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + self.omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + + current_score = func(particles[i]) + evaluation_counter += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluation_counter >= self.budget: + break + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/ResponsiveAdaptiveMemoryStrategyV52.py b/nevergrad/optimization/lama/ResponsiveAdaptiveMemoryStrategyV52.py new file mode 100644 index 000000000..94ac6f056 --- /dev/null +++ b/nevergrad/optimization/lama/ResponsiveAdaptiveMemoryStrategyV52.py @@ -0,0 +1,99 @@ +import numpy as np + + +class ResponsiveAdaptiveMemoryStrategyV52: + def __init__( + self, + budget, + dimension=5, + population_size=100, + F_init=0.5, + CR_init=0.9, + switch_ratio=0.5, + memory_size=20, + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.memory_size = memory_size + self.memory_weights = np.ones(memory_size) / memory_size # Initially equal weighting + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + memory_effect = ( + np.average(self.memory, axis=0, weights=self.memory_weights) + if self.memory + else np.zeros(self.dimension) + ) + mutant = population[a] + self.F * (population[b] - population[c]) + 0.1 * memory_effect + + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > self.memory_size: + self.memory.pop(0) + self.memory_weights = np.append(self.memory_weights[1:], 1.0) + self.memory_weights *= 0.95 # Decay older weights + self.memory_weights[-1] = 1.0 # Boost the latest memory + return trial, f_trial + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + progress = iteration / total_iterations + self.F = np.clip(0.5 + 0.5 * np.sin(np.pi * progress), 0.1, 1) + self.CR = np.clip(0.9 - 0.8 * progress, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ResponsiveAdaptiveStrategyV27.py b/nevergrad/optimization/lama/ResponsiveAdaptiveStrategyV27.py new file mode 100644 index 000000000..b849ca583 --- /dev/null +++ b/nevergrad/optimization/lama/ResponsiveAdaptiveStrategyV27.py @@ -0,0 +1,79 @@ +import numpy as np + + +class ResponsiveAdaptiveStrategyV27: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.prev_best_fitness = np.inf + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + d, e = np.random.choice(idxs, 2, replace=False) + mutant = population[a] + self.F * ( + population[b] - population[c] + 0.5 * (population[d] - population[e]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adapt_parameters(self, best_fitness): + improvement = self.prev_best_fitness - best_fitness + self.prev_best_fitness = best_fitness + # Adjust F and CR based on improvement rate + learning_rate = 0.1 + self.F += learning_rate * improvement + self.CR += learning_rate * (0.1 - improvement) + self.F = np.clip(self.F, 0.1, 1) + self.CR = np.clip(self.CR, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + best_idx = np.argmin(fitnesses) + self.prev_best_fitness = fitnesses[best_idx] + + while evaluations < self.budget: + phase = 1 if evaluations < self.switch_ratio * self.budget else 2 + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + self.adapt_parameters(trial_fitness) + + if evaluations >= self.budget: + break + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RestartAdaptiveDifferentialEvolutionPSO.py b/nevergrad/optimization/lama/RestartAdaptiveDifferentialEvolutionPSO.py new file mode 100644 index 000000000..8fb5531ac --- /dev/null +++ b/nevergrad/optimization/lama/RestartAdaptiveDifferentialEvolutionPSO.py @@ -0,0 +1,109 @@ +import numpy as np + + +class RestartAdaptiveDifferentialEvolutionPSO: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.initial_pop_size = 60 + self.min_pop_size = 20 + self.initial_F = 0.5 # Initial mutation factor + self.initial_CR = 0.9 # Initial crossover rate + self.c1 = 1.5 # Cognitive parameter + self.c2 = 1.5 # Social parameter + self.w = 0.5 # Inertia weight + self.restart_threshold = 100 # Stagnation threshold + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.initial_pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.initial_pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return parent1 + F * (parent2 - parent3) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + trial = np.array( + [mutant[j] if np.random.rand() < CR or j == j_rand else target[j] for j in range(self.dim)] + ) + return trial + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.initial_pop_size + + no_improvement_counter = 0 + stagnation_monitor = [] + + while evaluations < self.budget: + current_pop_size = max( + self.min_pop_size, int(self.initial_pop_size * ((self.budget - evaluations) / self.budget)) + ) + new_population = np.zeros_like(population[:current_pop_size]) + fitness = np.zeros(current_pop_size) + + for i in range(current_pop_size): + parent1, parent2, parent3 = self.select_parents(population) + F = np.random.uniform(0.4, 0.9) # Adaptive mutation factor + CR = np.random.uniform(0.6, 1.0) # Adaptive crossover rate + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + no_improvement_counter = 0 + else: + no_improvement_counter += 1 + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + personal_best_positions = personal_best_positions[:current_pop_size] + personal_best_scores = personal_best_scores[:current_pop_size] + velocities = velocities[:current_pop_size] + + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + stagnation_monitor.append(global_best_score) + + if no_improvement_counter >= self.restart_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + no_improvement_counter = 0 + evaluations += self.initial_pop_size + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RevisedEnhancedDifferentialEvolutionLSRefinement_v20.py b/nevergrad/optimization/lama/RevisedEnhancedDifferentialEvolutionLSRefinement_v20.py new file mode 100644 index 000000000..2d682c0af --- /dev/null +++ b/nevergrad/optimization/lama/RevisedEnhancedDifferentialEvolutionLSRefinement_v20.py @@ -0,0 +1,85 @@ +import numpy as np + + +class RevisedEnhancedDifferentialEvolutionLSRefinement_v20: + def __init__( + self, budget=10000, p_best=0.2, f_min=0.4, f_max=0.9, cr_min=0.2, cr_max=0.9, local_search_iters=10 + ): + self.budget = budget + self.dim = 5 + self.p_best = p_best + self.f_min = f_min + self.f_max = f_max + self.cr_min = cr_min + self.cr_max = cr_max + self.local_search_iters = local_search_iters + + def enhanced_de_local_search(self, func): + population = np.random.uniform(-5.0, 5.0, (10, self.dim)) + + def mutate(population, target_idx, f): + candidates = [idx for idx in range(len(population)) if idx != target_idx] + a, b, c = population[np.random.choice(candidates, 3, replace=False)] + return np.clip(a + f * (b - c), -5.0, 5.0) + + def crossover(trial, target, cr): + mask = np.random.rand(self.dim) < cr + if not np.any(mask): + mask[np.random.randint(0, self.dim)] = True + trial[mask] = target[mask] + return trial + + for _ in range(self.budget): + new_population = [] + for idx, target in enumerate(population): + f = np.clip(np.random.normal(np.mean([self.f_min, self.f_max]), 0.1), self.f_min, self.f_max) + cr = np.clip( + np.random.normal(np.mean([self.cr_min, self.cr_max]), 0.1), self.cr_min, self.cr_max + ) + + p_best_idxs = np.random.choice( + [i for i in range(10) if i != idx], int(self.p_best * 10), replace=False + ) + if idx in p_best_idxs: + p_best_idx = np.random.choice([i for i in range(10) if i != idx]) + p_best_target = population[p_best_idx] + trial = mutate( + [ + p_best_target, + target, + population[ + np.random.choice([i for i in range(10) if i not in [idx, p_best_idx]]) + ], + ], + 1, + f, + ) + else: + trial = mutate(population, idx, f) + + new_trial = crossover(trial.copy(), target, cr) + + target_val = func(target) + trial_val = func(trial) + new_trial_val = func(new_trial) + + if new_trial_val < target_val: + population[idx] = new_trial + if new_trial_val < trial_val: + population[idx] = trial + + for idx, target in enumerate(population): + for _ in range(self.local_search_iters): + perturbed = target + 0.1 * np.random.normal(0, 1, self.dim) + perturbed = np.clip(perturbed, -5.0, 5.0) + if func(perturbed) < func(target): + population[idx] = perturbed + + best_idx = np.argmin([func(sol) for sol in population]) + best_solution = population[best_idx] + best_fitness = func(best_solution) + + return best_fitness, best_solution + + def __call__(self, func): + return self.enhanced_de_local_search(func) diff --git a/nevergrad/optimization/lama/RevolutionaryFireworkAlgorithm.py b/nevergrad/optimization/lama/RevolutionaryFireworkAlgorithm.py new file mode 100644 index 000000000..0fc3271b0 --- /dev/null +++ b/nevergrad/optimization/lama/RevolutionaryFireworkAlgorithm.py @@ -0,0 +1,77 @@ +import numpy as np + + +class RevolutionaryFireworkAlgorithm: + def __init__(self, budget=10000, n_fireworks=30, n_sparks=10, alpha=0.1, beta=2.0, initial_sigma=1.0): + self.budget = budget + self.n_fireworks = n_fireworks + self.n_sparks = n_sparks + self.alpha = alpha + self.beta = beta + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.f_opt = np.inf + self.x_opt = None + self.sigma = initial_sigma + + def initialize_fireworks(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.n_fireworks, self.dim)) + + def explode_firework(self, firework): + sparks = np.random.uniform(firework - self.alpha, firework + self.alpha, (self.n_sparks, self.dim)) + return sparks + + def levy_flight(self, step_size=0.1): + beta = 1.5 + u = np.random.normal(0, self.sigma, size=self.dim) + v = np.random.normal(0, 1, size=self.dim) + step = u / abs(v) ** (1 / beta) + return step_size * step + + def clip_to_bounds(self, x): + return np.clip(x, self.bounds[0], self.bounds[1]) + + def enhance_fireworks(self, fireworks): + for i in range(self.n_fireworks): + fireworks[i] += self.levy_flight() * np.random.normal(0, 1, size=self.dim) + fireworks[i] = self.clip_to_bounds(fireworks[i]) + return fireworks + + def evolve_fireworks(self, fireworks, func): + for i in range(self.n_fireworks): + sparks = self.explode_firework(fireworks[i]) + + for spark in sparks: + if func(spark) < func(fireworks[i]): + fireworks[i] = spark + + for _ in range(self.n_sparks): + idx1, idx2 = np.random.choice(self.n_fireworks, 2, replace=False) + trial = fireworks[i] + self.beta * (fireworks[idx1] - fireworks[idx2]) + trial = self.clip_to_bounds(trial) + if func(trial) < func(fireworks[i]): + fireworks[i] = trial + + return fireworks + + def adapt_parameters(self, it): + self.sigma *= 0.9 # Adjusted sigma update rule for slower decrease + return max(0.1, self.sigma) + + def update_best_firework(self, fireworks, func): + best_idx = np.argmin([func(firework) for firework in fireworks]) + if func(fireworks[best_idx]) < self.f_opt: + self.f_opt = func(fireworks[best_idx]) + self.x_opt = fireworks[best_idx] + + def __call__(self, func): + fireworks = self.initialize_fireworks() + + for it in range(self.budget): + fireworks = self.enhance_fireworks(fireworks) + fireworks = self.evolve_fireworks(fireworks, func) + self.sigma = self.adapt_parameters(it) + + self.update_best_firework(fireworks, func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/RobustAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/RobustAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..00131ceff --- /dev/null +++ b/nevergrad/optimization/lama/RobustAdaptiveDifferentialEvolution.py @@ -0,0 +1,139 @@ +import numpy as np + + +class RobustAdaptiveDifferentialEvolution: + def __init__(self, budget, population_size=20, crossover_rate=0.9, mutation_factor=0.7): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + def adaptive_lr(success_rate): + if success_rate > 0.2: + return self.base_lr * 1.1 + else: + return self.base_lr * 0.9 + + def levy_flight(Lambda): + sigma = ( + np.math.gamma(1 + Lambda) + * np.sin(np.pi * Lambda / 2) + / (np.math.gamma((1 + Lambda) / 2) * Lambda * 2 ** ((Lambda - 1) / 2)) + ) ** (1 / Lambda) + u = np.random.randn() * sigma + v = np.random.randn() + step = u / abs(v) ** (1 / Lambda) + return 0.01 * step + + def dual_strategies(trial, grad): + perturbation = np.random.randn(self.dim) * self.base_lr + levy_step = levy_flight(1.5) * np.random.randn(self.dim) + strategy_1 = trial - self.epsilon * grad + perturbation + strategy_2 = trial + levy_step + + return strategy_1, strategy_2 + + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + evaluations = len(population) + success_rate = 0 + + while evaluations < self.budget: + success_count = 0 + + for j in range(self.population_size): + if evaluations >= self.budget: + break + + target = population[j] + try: + a, b, c = select_parents(population, fitness) + except ValueError: + continue # Skip if there is an error in parent selection + + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + strategy_1, strategy_2 = dual_strategies(trial, grad) + strategy_1 = np.clip(strategy_1, self.bounds[0], self.bounds[1]) + strategy_2 = np.clip(strategy_2, self.bounds[0], self.bounds[1]) + new_f1 = func(strategy_1) + new_f2 = func(strategy_2) + evaluations += 2 + + if new_f1 < fitness[j] or new_f2 < fitness[j]: + if new_f1 < new_f2: + population[j] = strategy_1 + fitness[j] = new_f1 + else: + population[j] = strategy_2 + fitness[j] = new_f2 + success_count += 1 + + if min(new_f1, new_f2) < self.f_opt: + self.f_opt = min(new_f1, new_f2) + self.x_opt = strategy_1 if new_f1 < new_f2 else strategy_2 + + maintain_diversity(population, fitness) + + success_rate = success_count / self.population_size + self.base_lr = adaptive_lr(success_rate) + self.base_lr = np.clip(self.base_lr, 1e-4, 1.0) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = RobustAdaptiveDifferentialEvolution(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/RobustAdaptiveMemoryLeveragedStrategyV43.py b/nevergrad/optimization/lama/RobustAdaptiveMemoryLeveragedStrategyV43.py new file mode 100644 index 000000000..31f43e649 --- /dev/null +++ b/nevergrad/optimization/lama/RobustAdaptiveMemoryLeveragedStrategyV43.py @@ -0,0 +1,83 @@ +import numpy as np + + +class RobustAdaptiveMemoryLeveragedStrategyV43: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + self.memory = [] + self.phase = 1 + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + if self.phase == 1: + mutant = population[best_idx] + self.F * (population[a] - population[b]) + else: + # Enhance mutation strategy by adapting based on memory depth + memory_factor = np.mean(self.memory, axis=0) if self.memory else np.zeros(self.dimension) + mutant = population[a] + self.F * (population[b] - population[c]) + 0.5 * memory_factor + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + self.memory.append(trial - target) + if len(self.memory) > 20: # Adjusted memory size + self.memory.pop(0) + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Adaptive parameter tuning based on sigmoid-based dynamic adjustment + scale = iteration / total_iterations + sigmoid_factor = 1 / (1 + np.exp(-12 * (scale - 0.5))) + self.F = np.clip(0.6 * sigmoid_factor, 0.1, 1) + self.CR = np.clip(0.8 * sigmoid_factor, 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget) + self.phase = 1 if evaluations < self.budget * self.switch_ratio else 2 + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/RobustCovarianceMatrixAdaptationMemeticSearch.py b/nevergrad/optimization/lama/RobustCovarianceMatrixAdaptationMemeticSearch.py new file mode 100644 index 000000000..2e3cf4f02 --- /dev/null +++ b/nevergrad/optimization/lama/RobustCovarianceMatrixAdaptationMemeticSearch.py @@ -0,0 +1,102 @@ +import numpy as np + + +class RobustCovarianceMatrixAdaptationMemeticSearch: + def __init__( + self, budget, population_size=50, memetic_rate=0.5, elite_fraction=0.2, learning_rate=0.01, sigma=0.3 + ): + self.budget = budget + self.population_size = population_size + self.memetic_rate = memetic_rate + self.elite_fraction = elite_fraction + self.learning_rate = learning_rate + self.sigma = sigma + + def gradient_estimation(self, func, x, h=1e-8): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def covariance_matrix_adaptation(self, func, pop, scores, mean, C): + n_samples = len(pop) + dim = pop.shape[1] + + new_pop = np.zeros_like(pop) + new_scores = np.zeros(n_samples) + + for i in range(n_samples): + z = np.random.randn(dim) + try: + y = np.dot(np.linalg.cholesky(C), z) + except np.linalg.LinAlgError: + y = np.dot(np.linalg.cholesky(C + 1e-8 * np.eye(dim)), z) + candidate = np.clip(mean + self.sigma * y, -5.0, 5.0) + new_pop[i] = candidate + new_scores[i] = func(candidate) + + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + # Initialize mean and covariance matrix + mean = np.mean(pop, axis=0) + C = np.cov(pop.T) + + for iteration in range(max_iterations): + # Perform covariance matrix adaptation step + pop, scores = self.covariance_matrix_adaptation(func, pop, scores, mean, C) + + # Perform memetic local search + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + pop[i], scores[i] = self.local_search(func, pop[i], scores[i]) + + # Update global best + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + # Update mean and covariance matrix + elite_count = int(self.population_size * self.elite_fraction) + elite_idx = np.argsort(scores)[:elite_count] + elite_pop = pop[elite_idx] + mean = np.mean(elite_pop, axis=0) + C = np.cov(elite_pop.T) + + evaluations += self.population_size + if evaluations >= self.budget: + break + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SADE.py b/nevergrad/optimization/lama/SADE.py new file mode 100644 index 000000000..482d45b60 --- /dev/null +++ b/nevergrad/optimization/lama/SADE.py @@ -0,0 +1,64 @@ +import numpy as np + + +class SADE: + def __init__(self, budget, population_size=50, F_base=0.5, CR_base=0.8): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base differential weight + self.CR_base = CR_base # Base crossover probability + self.dimension = 5 + self.bounds = (-5.0, 5.0) # As problem specification + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + evaluations = self.population_size + + # Initialize adaptive parameters + F = np.full(self.population_size, self.F_base) + CR = np.full(self.population_size, self.CR_base) + + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy adaptively based on fitness + if fitness[i] < np.median(fitness): + F[i] *= 1.1 + else: + F[i] *= 0.9 + + # Mutation and crossover + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F[i] * (b - c), self.bounds[0], self.bounds[1]) + + cross_points = np.random.rand(self.dimension) < CR[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + f_trial = func(trial) + evaluations += 1 + + # Selection: Accept the trial if it is better + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + CR[i] *= 1.05 # Increase CR to promote more exploration in future generations + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + CR[i] *= 0.95 # Decrease CR to reduce diversity + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SADEEM.py b/nevergrad/optimization/lama/SADEEM.py new file mode 100644 index 000000000..cf24de09b --- /dev/null +++ b/nevergrad/optimization/lama/SADEEM.py @@ -0,0 +1,64 @@ +import numpy as np + + +class SADEEM: + def __init__(self, budget, population_size=30, F=0.8, CR=0.9): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.F = F + self.CR = CR + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dimension)) + + def evaluate_population(self, population, func): + return np.array([func(ind) for ind in population]) + + def mutation(self, population, best_idx): + new_population = np.zeros_like(population) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + # Simple OBL + if np.random.rand() < 0.1: + mutant = self.lower_bound + self.upper_bound - mutant + new_population[i] = np.clip(mutant, self.lower_bound, self.upper_bound) + return new_population + + def crossover(self, population, mutant_population): + crossover_population = np.array( + [ + np.where(np.random.rand(self.dimension) < self.CR, mutant_population[i], population[i]) + for i in range(self.population_size) + ] + ) + return crossover_population + + def select(self, population, fitness, trial_population, func): + trial_fitness = self.evaluate_population(trial_population, func) + for i in range(self.population_size): + if trial_fitness[i] < fitness[i]: + fitness[i] = trial_fitness[i] + population[i] = trial_population[i] + return population, fitness + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(population, func) + best_idx = np.argmin(fitness) + + evaluations = self.population_size + while evaluations < self.budget: + mutant_population = self.mutation(population, best_idx) + crossover_population = self.crossover(population, mutant_population) + population, fitness = self.select(population, fitness, crossover_population, func) + evaluations += self.population_size + best_idx = np.argmin(fitness) + + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SADEIOL.py b/nevergrad/optimization/lama/SADEIOL.py new file mode 100644 index 000000000..e274d9753 --- /dev/null +++ b/nevergrad/optimization/lama/SADEIOL.py @@ -0,0 +1,79 @@ +import numpy as np + + +class SADEIOL: + def __init__(self, budget, population_size=50, F_init=0.5, CR_init=0.9): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.F_init = F_init + self.CR_init = CR_init + + def opposition_based_learning(self, population): + return self.lower_bound + self.upper_bound - population + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + success_history = [] + + # Strategic looping over the budget + while evaluations < self.budget: + F = np.clip(np.random.normal(self.F_init, 0.1), 0.1, 1) + CR = np.clip(np.random.normal(self.CR_init, 0.1), 0.1, 1) + + for i in range(self.population_size): + mutation_strategy = np.random.choice(["rand", "best", "current-to-best"]) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + + if mutation_strategy == "rand": + mutant = population[a] + F * (population[b] - population[c]) + elif mutation_strategy == "best": + mutant = population[best_idx] + F * (population[a] - population[b]) + elif mutation_strategy == "current-to-best": + mutant = ( + population[i] + + F * (population[best_idx] - population[i]) + + F * (population[a] - population[b]) + ) + + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + success_history.append(1) + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + else: + success_history.append(0) + + if len(success_history) > 50: # Update rates based on recent history + success_rate = np.mean(success_history[-50:]) + self.F_init = F * success_rate + self.CR_init = CR * success_rate + + if ( + evaluations % (self.population_size * 10) == 0 and np.std(fitness) < 1e-5 + ): # Stagnation detection + population = self.opposition_based_learning(population) + fitness = np.array([func(ind) for ind in population]) + evaluations += self.population_size + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SADEPF.py b/nevergrad/optimization/lama/SADEPF.py new file mode 100644 index 000000000..821940643 --- /dev/null +++ b/nevergrad/optimization/lama/SADEPF.py @@ -0,0 +1,61 @@ +import numpy as np + + +class SADEPF: + def __init__(self, budget, population_size=50, F_init=0.5, CR_init=0.9): + self.budget = budget + self.CR_init = CR_init + self.F_init = F_init + self.population_size = population_size + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population = np.random.uniform( + self.lower_bound, self.upper_bound, (self.population_size, self.dimension) + ) + fitness = np.array([func(ind) for ind in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + F = self.F_init + CR = self.CR_init + success_memory = [] + + evaluations = self.population_size + while evaluations < self.budget: + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + cross_points = np.random.rand(self.dimension) < CR + trial = np.where(cross_points, mutant, population[i]) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + success_memory.append(1) + else: + success_memory.append(0) + + if evaluations >= self.budget: + break + + # Adaptive Feedback Mechanism + if len(success_memory) > 20: # Using the last 20 steps to calculate success rate + success_rate = np.mean(success_memory[-20:]) + F = np.clip(F + 0.1 * (success_rate - 0.5), 0.1, 1.0) + CR = np.clip(CR + 0.1 * (success_rate - 0.5), 0.1, 1.0) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SAGEA.py b/nevergrad/optimization/lama/SAGEA.py new file mode 100644 index 000000000..2b3c9518a --- /dev/null +++ b/nevergrad/optimization/lama/SAGEA.py @@ -0,0 +1,59 @@ +import numpy as np + + +class SAGEA: + def __init__(self, budget, population_size=150, crossover_prob=0.8, mutation_factor=0.6): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.crossover_prob = crossover_prob + self.mutation_factor = mutation_factor + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + num_evals = self.population_size + + # Evolutionary loop + while num_evals < self.budget: + new_population = [] + for i in range(self.population_size): + # Mutation: DE/rand/1/bin with scaled mutation factor + indices = np.random.choice(self.population_size, 3, replace=False) + x1, x2, x3 = population[indices] + scale = 1.0 - (num_evals / self.budget) # Decrease scale over time + mutant = x1 + self.mutation_factor * scale * (x2 - x3) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_prob + trial_vector = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial_vector) + num_evals += 1 + + if trial_fitness < fitness[i]: + new_population.append(trial_vector) + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial_vector + else: + new_population.append(population[i]) + + if num_evals >= self.budget: + break + + population = np.array(new_population) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SGAE.py b/nevergrad/optimization/lama/SGAE.py new file mode 100644 index 000000000..a3bddf6e6 --- /dev/null +++ b/nevergrad/optimization/lama/SGAE.py @@ -0,0 +1,76 @@ +import numpy as np + + +class SGAE: + def __init__( + self, budget, population_size=100, F=0.8, CR=0.9, gradient_weight=0.1, mutation_strategy="best" + ): + self.budget = budget + self.population_size = population_size + self.dimension = 5 + self.lb = -5.0 + self.ub = 5.0 + self.F = F # Mutation factor + self.CR = CR # Crossover probability + self.gradient_weight = gradient_weight # Weight for gradient direction + self.mutation_strategy = mutation_strategy # Mutation strategy ('best' or 'random') + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = self.population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + while num_evals < self.budget: + new_population = np.empty_like(population) + + # Mutation and crossover + for i in range(self.population_size): + if num_evals >= self.budget: + break + # Select parents + indices = [idx for idx in range(self.population_size) if idx != i] + a, b, c = np.random.choice(indices, 3, replace=False) + + x_best = population[best_idx] if self.mutation_strategy == "best" else population[a] + + # Mutation + mutant = x_best + self.F * (population[b] - population[c]) + mutant = np.clip(mutant, self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dimension)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Calculate gradient approximation + perturbation = np.zeros(self.dimension) + perturbation[np.random.randint(0, self.dimension)] = 0.01 + grad_estimated = (func(population[i] + perturbation) - fitness[i]) / 0.01 + num_evals += 1 + + # Gradient exploitation + trial -= self.gradient_weight * grad_estimated * perturbation + + trial = np.clip(trial, self.lb, self.ub) + trial_fitness = func(trial) + num_evals += 1 + + # Selection + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_individual = trial.copy() + else: + new_population[i] = population[i] + + population = new_population.copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SGE.py b/nevergrad/optimization/lama/SGE.py new file mode 100644 index 000000000..533950bf3 --- /dev/null +++ b/nevergrad/optimization/lama/SGE.py @@ -0,0 +1,71 @@ +import numpy as np + + +class SGE: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + population_size = 100 + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dimension)) + fitness = np.array([func(ind) for ind in population]) + num_evals = population_size + + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + # Introducing learning rate and momentum for evolution strategy + learning_rate = 0.01 + momentum = 0.9 + velocity = np.zeros_like(population) + + while num_evals < self.budget: + new_population = np.empty_like(population) + new_fitness = np.empty_like(fitness) + + # Decay learning rate and adapt momentum + learning_rate *= 0.99 + current_momentum = momentum * (1 - 0.95 * (num_evals / self.budget)) + + for i in range(population_size): + if num_evals >= self.budget: + break + + # Gradient approximation via finite differences + perturbation = np.random.normal(0, 1, self.dimension) + candidate_plus = population[i] + learning_rate * perturbation + candidate_minus = population[i] - learning_rate * perturbation + + candidate_plus = np.clip(candidate_plus, self.lower_bound, self.upper_bound) + candidate_minus = np.clip(candidate_minus, self.lower_bound, self.upper_bound) + + fitness_plus = func(candidate_plus) + fitness_minus = func(candidate_minus) + num_evals += 2 + + # Compute approximate gradient + gradient = (fitness_minus - fitness_plus) / (2 * learning_rate * perturbation) + velocity[i] = current_momentum * velocity[i] - learning_rate * gradient + candidate = population[i] + velocity[i] + candidate = np.clip(candidate, self.lower_bound, self.upper_bound) + + trial_fitness = func(candidate) + num_evals += 1 + + # Keep track of new population and fitness + new_population[i] = candidate if trial_fitness < fitness[i] else population[i] + new_fitness[i] = trial_fitness if trial_fitness < fitness[i] else fitness[i] + + # Update population and best individual + population[:] = new_population + fitness[:] = new_fitness + best_idx = np.argmin(fitness) + if fitness[best_idx] < best_fitness: + best_fitness = fitness[best_idx] + best_individual = population[best_idx].copy() + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SORAMED.py b/nevergrad/optimization/lama/SORAMED.py new file mode 100644 index 000000000..87008be1b --- /dev/null +++ b/nevergrad/optimization/lama/SORAMED.py @@ -0,0 +1,85 @@ +import numpy as np + + +class SORAMED: + def __init__( + self, + budget, + population_size=100, + crossover_rate=0.85, + F_min=0.3, + F_max=0.8, + memory_size=100, + elite_size=3, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate # Further reduced to promote more diversity + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size # Further reduced to focus on a very small set of high-quality elites + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population randomly + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory for good solutions + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Elite solutions + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Track best solution + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elite pool + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor with sinusoidal decay + F = self.F_max - (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + + # Mutation with emphasis on elite and best solutions, and a new random component + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = population[np.random.choice(idxs, 4, replace=False)] + best_or_elite = ( + best_solution if np.random.rand() < 0.8 else elite[np.random.randint(0, self.elite_size)] + ) + mutant = np.clip(a + F * (best_or_elite - a + b - c + d), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate and possibly update population + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/ScaledHybridDifferentialEvolution.py b/nevergrad/optimization/lama/ScaledHybridDifferentialEvolution.py new file mode 100644 index 000000000..422ede7df --- /dev/null +++ b/nevergrad/optimization/lama/ScaledHybridDifferentialEvolution.py @@ -0,0 +1,56 @@ +import numpy as np + + +class ScaledHybridDifferentialEvolution: + def __init__(self, budget, dim=5, pop_size=200, F_start=0.9, F_end=0.5, CR=0.9): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.F_start = F_start # Initial mutation factor + self.F_end = F_end # Final mutation factor for later stages + self.CR = CR # Crossover probability + self.bounds = (-5.0, 5.0) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], size=(self.pop_size, self.dim)) + + def mutate(self, population, idx, n_evals): + indices = [i for i in range(self.pop_size) if i != idx] + a, b, c = np.random.choice(indices, 3, replace=False) + # Gradual adjustment of mutation factor from F_start to F_end + F = self.F_start - (self.F_start - self.F_end) * (n_evals / self.budget) + mutant = np.clip(population[a] + F * (population[b] - population[c]), self.bounds[0], self.bounds[1]) + return mutant + + def crossover(self, target, mutant): + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, target) + return trial + + def select(self, population, f_values, trial, trial_f, trial_idx): + if trial_f < f_values[trial_idx]: + population[trial_idx] = trial + f_values[trial_idx] = trial_f + + def __call__(self, func): + population = self.initialize_population() + f_values = np.array([func(ind) for ind in population]) + n_evals = self.pop_size + + while n_evals < self.budget: + for idx in range(self.pop_size): + mutant = self.mutate(population, idx, n_evals) + trial = self.crossover(population[idx], mutant) + trial_f = func(trial) + n_evals += 1 + self.select(population, f_values, trial, trial_f, idx) + if n_evals >= self.budget: + break + + self.f_opt = np.min(f_values) + self.x_opt = population[np.argmin(f_values)] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptingDifferentialEvolutionOptimizer.py b/nevergrad/optimization/lama/SelfAdaptingDifferentialEvolutionOptimizer.py new file mode 100644 index 000000000..1ffc1bbc6 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptingDifferentialEvolutionOptimizer.py @@ -0,0 +1,58 @@ +import numpy as np + + +class SelfAdaptingDifferentialEvolutionOptimizer: + def __init__(self, budget=10000, pop_size=50, init_F=0.8, init_CR=0.9): + self.budget = budget + self.pop_size = pop_size + self.init_F = init_F + self.init_CR = init_CR + self.dim = 5 # As stated, dimensionality is 5 + self.bounds = (-5.0, 5.0) # Bounds are given as [-5.0, 5.0] + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.pop_size + + # Differential weights and crossover probabilities for each individual + F_values = np.full(self.pop_size, self.init_F) + CR_values = np.full(self.pop_size, self.init_CR) + + while self.eval_count < self.budget: + for i in range(self.pop_size): + # Mutation + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + F = F_values[i] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + CR = CR_values[i] + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + # Self-adapting parameters + F_values[i] = F * 1.1 if F < 1 else F + CR_values[i] = CR * 1.1 if CR < 1 else CR + else: + F_values[i] = F * 0.9 if F > 0 else F + CR_values[i] = CR * 0.9 if CR > 0 else CR + + if self.eval_count >= self.budget: + break + + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveCovarianceMatrixDifferentialEvolution.py b/nevergrad/optimization/lama/SelfAdaptiveCovarianceMatrixDifferentialEvolution.py new file mode 100644 index 000000000..9e7c9186f --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveCovarianceMatrixDifferentialEvolution.py @@ -0,0 +1,115 @@ +import numpy as np + + +class SelfAdaptiveCovarianceMatrixDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F_base = 0.8 # Base differential weight + CR_base = 0.9 # Base crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F_base) + CR_values = np.full(population_size, CR_base) + + # Initialize covariance matrix + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 + np.eye( + self.dim + ) * 1e-6 # Ensure positive semi-definiteness + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation with covariance matrix adaptation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Update covariance matrix based on the new population + mean = np.mean(population, axis=0) + cov_matrix = np.cov(population.T) + cov_matrix = (cov_matrix + cov_matrix.T) / 2 + np.eye( + self.dim + ) * 1e-6 # Ensure positive semi-definiteness + + perturbation_population = np.zeros_like(population) + for i in range(population_size): + perturbation = np.random.multivariate_normal(mean, cov_matrix) + perturbation_population[i] = np.clip(perturbation, bounds[0], bounds[1]) + + f_trial = func(perturbation_population[i]) + evaluations += 1 + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = perturbation_population[i] + + if evaluations >= self.budget: + break + + # Combine the new population with perturbation population and select the best + combined_population = np.vstack((population, perturbation_population)) + combined_fitness = np.array([func(ind) for ind in combined_population]) + best_indices = np.argsort(combined_fitness)[:population_size] + population = combined_population[best_indices] + fitness = combined_fitness[best_indices] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..52e3d8f93 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolution.py @@ -0,0 +1,79 @@ +import numpy as np + + +class SelfAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithLocalRestart.py b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithLocalRestart.py new file mode 100644 index 000000000..a30bd5cb3 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithLocalRestart.py @@ -0,0 +1,101 @@ +import numpy as np + + +class SelfAdaptiveDifferentialEvolutionWithLocalRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + def local_restart(best_ind): + std_dev = np.std(population, axis=0) + new_population = best_ind + np.random.normal(scale=std_dev, size=(population_size, self.dim)) + new_population = np.clip(new_population, bounds[0], bounds[1]) + new_fitness = np.array([func(ind) for ind in new_population]) + return new_population, new_fitness + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + best_ind = population[np.argmin(fitness)] + population, fitness = local_restart(best_ind) + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithMemeticSearch.py b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithMemeticSearch.py new file mode 100644 index 000000000..fe3996e7a --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithMemeticSearch.py @@ -0,0 +1,128 @@ +import numpy as np + + +class SelfAdaptiveDifferentialEvolutionWithMemeticSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + + # Initialize population + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + evaluations = population_size + + # Initialize self-adaptive parameters + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + while evaluations < self.budget: + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + # Adaptation of F and CR + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + # Update the best found solution + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + # Apply memetic search on the best solution found so far + if evaluations < self.budget: + local_search_budget = int(self.budget * 0.05) # allocate 5% of the budget for local search + perturbation_std = 0.1 # adjust perturbation standard deviation + + for _ in range(local_search_budget): + perturbation = np.random.normal(0, perturbation_std, self.dim) + local_trial = np.clip(self.x_opt + perturbation, bounds[0], bounds[1]) + f_local_trial = func(local_trial) + evaluations += 1 + + if f_local_trial < self.f_opt: + self.f_opt = f_local_trial + self.x_opt = local_trial + + if evaluations >= self.budget: + break + + # Additional memetic search using a gradient-based method + gradient_search_budget = int( + self.budget * 0.05 + ) # allocate another 5% of the budget for gradient search + + for _ in range(gradient_search_budget): + gradient = self.compute_gradient(func, self.x_opt) + local_trial = np.clip(self.x_opt - 0.01 * gradient, bounds[0], bounds[1]) + f_local_trial = func(local_trial) + evaluations += 1 + + if f_local_trial < self.f_opt: + self.f_opt = f_local_trial + self.x_opt = local_trial + + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt + + def compute_gradient(self, func, x): + epsilon = 1e-8 + gradient = np.zeros(self.dim) + for i in range(self.dim): + x_upper = x.copy() + x_lower = x.copy() + x_upper[i] += epsilon + x_lower[i] -= epsilon + f_upper = func(x_upper) + f_lower = func(x_lower) + gradient[i] = (f_upper - f_lower) / (2 * epsilon) + return gradient diff --git a/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithRestart.py b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithRestart.py new file mode 100644 index 000000000..35f882feb --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveDifferentialEvolutionWithRestart.py @@ -0,0 +1,93 @@ +import numpy as np + + +class SelfAdaptiveDifferentialEvolutionWithRestart: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # fixed dimensionality + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + bounds = np.array([-5.0, 5.0]) + population_size = 20 + F = 0.8 # Differential weight + CR = 0.9 # Crossover probability + restart_threshold = 0.2 * self.budget # Restart after 20% of budget if no improvement + + def initialize_population(): + population = np.random.uniform(bounds[0], bounds[1], (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + return population, fitness + + def adaptive_parameters(F_values, CR_values): + for i in range(population_size): + if np.random.rand() < 0.1: + F_values[i] = 0.1 + 0.9 * np.random.rand() + if np.random.rand() < 0.1: + CR_values[i] = np.random.rand() + return F_values, CR_values + + population, fitness = initialize_population() + evaluations = population_size + + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + + last_improvement = evaluations + + while evaluations < self.budget: + if evaluations - last_improvement > restart_threshold: + population, fitness = initialize_population() + F_values = np.full(population_size, F) + CR_values = np.full(population_size, CR) + last_improvement = evaluations + + new_population = np.zeros_like(population) + new_fitness = np.zeros(population_size) + new_F_values = np.zeros(population_size) + new_CR_values = np.zeros(population_size) + + for i in range(population_size): + F_values, CR_values = adaptive_parameters(F_values, CR_values) + + # Mutation + indices = list(range(population_size)) + indices.remove(i) + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F_values[i] * (b - c), bounds[0], bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR_values[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + + if f_trial < fitness[i]: + new_population[i] = trial + new_fitness[i] = f_trial + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + last_improvement = evaluations + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + new_F_values[i] = F_values[i] + new_CR_values[i] = CR_values[i] + + if evaluations >= self.budget: + break + + population, fitness = new_population, new_fitness + F_values, CR_values = new_F_values, new_CR_values + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveDifferentialSwarmOptimization.py b/nevergrad/optimization/lama/SelfAdaptiveDifferentialSwarmOptimization.py new file mode 100644 index 000000000..ef349fd78 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveDifferentialSwarmOptimization.py @@ -0,0 +1,97 @@ +import numpy as np + + +class SelfAdaptiveDifferentialSwarmOptimization: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.3 * np.random.rand() + CR = 0.9 - 0.6 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.8 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.02 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + # Re-initialize half of the worst individuals to maintain diversity + worst_indices = np.argsort(fitness)[-int(0.5 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 3) == 0 and population_size > 20: + elite_indices = np.argsort(fitness)[: int(0.6 * population_size)] + population = population[elite_indices] + fitness = fitness[elite_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/SelfAdaptiveEvolutionaryAlgorithm.py new file mode 100644 index 000000000..365d0288c --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveEvolutionaryAlgorithm.py @@ -0,0 +1,60 @@ +import numpy as np + + +class SelfAdaptiveEvolutionaryAlgorithm: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 50 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + + # Initialize strategy parameters for each individual + F = np.random.uniform(0.5, 1.0, population_size) + CR = np.random.uniform(0.1, 0.9, population_size) + + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + # Mutation + indices = np.random.choice([j for j in range(population_size) if j != i], 3, replace=False) + x1, x2, x3 = population[indices] + mutant_vector = x1 + F[i] * (x2 - x3) + mutant_vector = np.clip(mutant_vector, self.lb, self.ub) + + # Crossover + trial_vector = np.copy(population[i]) + crossover_points = np.random.rand(self.dim) < CR[i] + if not np.any(crossover_points): + crossover_points[np.random.randint(0, self.dim)] = True + + trial_vector[crossover_points] = mutant_vector[crossover_points] + + # Selection + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + # Adapt strategy parameters + F[i] = F[i] + 0.1 * (np.random.rand() - 0.5) + F[i] = np.clip(F[i], 0.5, 1.0) + CR[i] = CR[i] + 0.1 * (np.random.rand() - 0.5) + CR[i] = np.clip(CR[i], 0.1, 0.9) + + # Update global best + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveHybridOptimizer.py b/nevergrad/optimization/lama/SelfAdaptiveHybridOptimizer.py new file mode 100644 index 000000000..772c51e35 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveHybridOptimizer.py @@ -0,0 +1,130 @@ +import numpy as np + + +class SelfAdaptiveHybridOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 40 # Increased population size for better exploration + self.initial_F = 0.5 + self.initial_CR = 0.9 + self.c1 = 1.2 # Reduced c1 for better balance + self.c2 = 1.2 # Reduced c2 for better balance + self.w = 0.6 # Adjusted inertia weight for better convergence + self.elite_fraction = 0.3 # Increased elite fraction for better local refinement + self.diversity_threshold = 1e-5 # Adjusted threshold for controlled diversity + self.tau1 = 0.1 + self.tau2 = 0.1 + + def initialize_population(self, bounds): + population = np.random.uniform(bounds.lb, bounds.ub, (self.pop_size, self.dim)) + velocities = np.random.uniform(-1, 1, (self.pop_size, self.dim)) + return population, velocities + + def select_parents(self, population): + idxs = np.random.choice(range(population.shape[0]), 3, replace=False) + return population[idxs] + + def mutate(self, parent1, parent2, parent3, F): + return np.clip(parent1 + F * (parent2 - parent3), -5.0, 5.0) + + def crossover(self, target, mutant, CR): + j_rand = np.random.randint(self.dim) + return np.where(np.random.rand(self.dim) < CR, mutant, target) + + def diversity(self, population): + return np.mean(np.std(population, axis=0)) + + def adapt_parameters(self, F, CR): + if np.random.rand() < self.tau1: + F = np.clip(np.random.normal(F, 0.1), 0, 1) + if np.random.rand() < self.tau2: + CR = np.clip(np.random.normal(CR, 0.1), 0, 1) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + bounds = func.bounds + + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations = self.pop_size + + F = self.initial_F + CR = self.initial_CR + + while evaluations < self.budget: + new_population = np.zeros((self.pop_size, self.dim)) + fitness = np.zeros(self.pop_size) + + for i in range(self.pop_size): + # Parent selection and mutation + parent1, parent2, parent3 = self.select_parents(population) + F, CR = self.adapt_parameters(F, CR) + mutant = self.mutate(parent1, parent2, parent3, F) + trial = self.crossover(population[i], mutant, CR) + + trial_fitness = func(trial) + evaluations += 1 + + if trial_fitness < personal_best_scores[i]: + personal_best_positions[i] = trial + personal_best_scores[i] = trial_fitness + + if personal_best_scores[i] < global_best_score: + global_best_position = personal_best_positions[i] + global_best_score = personal_best_scores[i] + + velocities[i] = ( + self.w * velocities[i] + + self.c1 * np.random.rand(self.dim) * (personal_best_positions[i] - population[i]) + + self.c2 * np.random.rand(self.dim) * (global_best_position - population[i]) + ) + new_population[i] = population[i] + velocities[i] + new_population[i] = np.clip(new_population[i], bounds.lb, bounds.ub) + fitness[i] = func(new_population[i]) + evaluations += 1 + + population = new_population + if np.min(fitness) < self.f_opt: + self.f_opt = np.min(fitness) + self.x_opt = population[np.argmin(fitness)] + + # Elite selection for local search + elite_count = max(1, int(self.elite_fraction * self.pop_size)) + elite_indices = np.argsort(fitness)[:elite_count] + elite_population = population[elite_indices] + elite_velocities = velocities[elite_indices] + + for idx in range(elite_count): + local_search_budget = min(10, self.budget - evaluations) # Increased local search iterations + for _ in range(local_search_budget): + trial = np.clip( + elite_population[idx] + np.random.randn(self.dim) * 0.05, bounds.lb, bounds.ub + ) # Reduced perturbation + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[elite_indices[idx]]: + elite_population[idx] = trial + fitness[elite_indices[idx]] = trial_fitness + if evaluations >= self.budget: + break + + # Reinitialization if diversity is too low + if self.diversity(population) < self.diversity_threshold: + population, velocities = self.initialize_population(bounds) + personal_best_positions = np.copy(population) + personal_best_scores = np.array([func(ind) for ind in population]) + global_best_position = personal_best_positions[np.argmin(personal_best_scores)] + global_best_score = np.min(personal_best_scores) + evaluations += self.pop_size + else: + # Update population with elite individuals + population[:elite_count] = elite_population + velocities[:elite_count] = elite_velocities + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveInterleavedOptimization.py b/nevergrad/optimization/lama/SelfAdaptiveInterleavedOptimization.py new file mode 100644 index 000000000..0e35829d5 --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveInterleavedOptimization.py @@ -0,0 +1,105 @@ +import numpy as np + + +class SelfAdaptiveInterleavedOptimization: + def __init__(self, budget, population_size=30, mutation_rate=0.8, crossover_rate=0.9, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x): + epsilon = 1e-8 + grad = np.zeros_like(x) + for i in range(len(x)): + x_pos = np.copy(x) + x_neg = np.copy(x) + x_pos[i] += epsilon + x_neg[i] -= epsilon + grad[i] = (func(x_pos) - func(x_neg)) / (2 * epsilon) + return grad + + def differential_evolution(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + self.mutation_rate * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < self.crossover_rate + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, pop, scores): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + grad = self.gradient_estimation(func, pop[i]) + candidate = np.clip(pop[i] - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = candidate + return new_pop, new_scores + + def adaptive_parameters(self, iteration, max_iterations): + self.mutation_rate = 0.5 + 0.5 * (iteration / max_iterations) + self.crossover_rate = 0.5 + 0.5 * (iteration / max_iterations) + self.learning_rate = 0.01 * (1 - (iteration / max_iterations)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = (self.budget // self.population_size) * 2 + + iteration = 0 + while evaluations < self.budget: + self.adaptive_parameters(iteration, max_iterations) + + # Perform differential evolution step + pop, scores = self.differential_evolution(func, pop, scores) + evaluations += self.population_size + + # Update global best from differential evolution + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + if evaluations >= self.budget: + break + + # Perform local search step + pop, scores = self.local_search(func, pop, scores) + evaluations += self.population_size + + # Update global best from local search + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveMemeticAlgorithmV2.py b/nevergrad/optimization/lama/SelfAdaptiveMemeticAlgorithmV2.py new file mode 100644 index 000000000..85c0434dc --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveMemeticAlgorithmV2.py @@ -0,0 +1,109 @@ +import numpy as np + + +class SelfAdaptiveMemeticAlgorithmV2: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, alpha=0.5, beta=0.5, memetic_rate=0.3): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.alpha = alpha # Exploration weight + self.beta = beta # Exploitation weight + self.memetic_rate = memetic_rate # Probability of applying local search + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def hybrid_step(self, func, pop, scores, crossover_rates, mutation_factors, learning_rate): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + learning_rate = self.alpha * ((1 - iteration / max_iterations) ** self.beta) + + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rates, mutation_factors, learning_rate + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveMemeticEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/SelfAdaptiveMemeticEvolutionaryAlgorithm.py new file mode 100644 index 000000000..a62e956ba --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveMemeticEvolutionaryAlgorithm.py @@ -0,0 +1,109 @@ +import numpy as np + + +class SelfAdaptiveMemeticEvolutionaryAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score, learning_rate): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, iteration, max_iterations, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def hybrid_step( + self, func, pop, scores, crossover_rates, mutation_factors, learning_rate, memetic_probability + ): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < memetic_probability: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i], learning_rate) + return new_pop, new_scores + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + learning_rate = 0.01 * ((1 - iteration / max_iterations) ** 0.5) + memetic_probability = 0.5 * (1 + np.cos(iteration / max_iterations * np.pi)) + + # Adapt parameters + self.adaptive_parameters(iteration, max_iterations, crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.hybrid_step( + func, pop, scores, crossover_rates, mutation_factors, learning_rate, memetic_probability + ) + evaluations += self.population_size + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveOppositionBasedHarmonySearchDE.py b/nevergrad/optimization/lama/SelfAdaptiveOppositionBasedHarmonySearchDE.py new file mode 100644 index 000000000..61759728c --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveOppositionBasedHarmonySearchDE.py @@ -0,0 +1,107 @@ +import numpy as np + + +class SelfAdaptiveOppositionBasedHarmonySearchDE: + def __init__( + self, budget=10000, harmony_memory_size=20, hmcr=0.7, par=0.4, de_sf=0.8, de_cr=0.5, de_step_size=0.1 + ): + self.budget = budget + self.harmony_memory_size = harmony_memory_size + self.hmcr = hmcr + self.par = par + self.de_sf = de_sf + self.de_cr = de_cr + self.de_step_size = de_step_size + + self.dim = 5 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_harmony_memory(self, func): + self.harmony_memory = np.random.uniform( + func.bounds.lb, func.bounds.ub, (self.harmony_memory_size, self.dim) + ) + self.harmony_memory_fitness = np.array([func(x) for x in self.harmony_memory]) + + def harmony_search(self, func): + new_harmony = np.zeros(self.dim) + for j in range(self.dim): + if np.random.rand() < self.hmcr: + idx = np.random.randint(self.harmony_memory_size) + new_harmony[j] = self.harmony_memory[idx, j] + else: + new_harmony[j] = np.random.uniform(func.bounds.lb[j], func.bounds.ub[j]) + + if np.random.rand() < self.par: + new_harmony[j] += np.random.uniform(-1, 1) * self.de_step_size + + new_harmony[j] = np.clip(new_harmony[j], func.bounds.lb[j], func.bounds.ub[j]) + + return new_harmony + + def opposition_based_learning(self, solution, bounds): + return 2 * bounds.lb - solution + 2 * (solution - bounds.lb) + + def differential_evolution(self, func, current_harmony, best_harmony): + mutant_harmony = current_harmony + self.de_sf * (best_harmony - current_harmony) + crossover_mask = np.random.rand(self.dim) < self.de_cr + trial_harmony = np.where(crossover_mask, mutant_harmony, current_harmony) + return np.clip(trial_harmony, func.bounds.lb, func.bounds.ub) + + def self_adaptive_parameter_update(self, success): + if success: + self.hmcr = min(1.0, self.hmcr * 1.05) # Increase HMCR if successful + self.par = max(0.0, self.par * 0.95) # Decrease PAR if successful + self.de_sf = max(0.5, self.de_sf * 1.05) # Increase DE scale factor if successful + self.de_cr = min(1.0, self.de_cr * 1.05) # Increase DE crossover rate if successful + else: + self.hmcr = max(0.0, self.hmcr * 0.95) # Decrease HMCR if not successful + self.par = min(1.0, self.par * 1.05) # Increase PAR if not successful + self.de_sf = max(0.5, self.de_sf * 0.95) # Decrease DE scale factor if not successful + self.de_cr = max(0.0, self.de_cr * 0.95) # Decrease DE crossover rate if not successful + + def __call__(self, func): + self.initialize_harmony_memory(func) + + for i in range(self.budget): + new_harmony = self.harmony_search(func) + new_fitness = func(new_harmony) + + if new_fitness < self.f_opt: + self.f_opt = new_fitness + self.x_opt = new_harmony + + idx_worst = np.argmax(self.harmony_memory_fitness) + if new_fitness < self.harmony_memory_fitness[idx_worst]: + self.harmony_memory[idx_worst] = new_harmony + self.harmony_memory_fitness[idx_worst] = new_fitness + + improved_harmony = self.opposition_based_learning(new_harmony, func.bounds) + improved_fitness = func(improved_harmony) + + if improved_fitness < self.f_opt: + self.f_opt = improved_fitness + self.x_opt = improved_harmony + + idx_worst_improved = np.argmax(self.harmony_memory_fitness) + if improved_fitness < self.harmony_memory_fitness[idx_worst_improved]: + self.harmony_memory[idx_worst_improved] = improved_harmony + self.harmony_memory_fitness[idx_worst_improved] = improved_fitness + + best_harmony = self.harmony_memory[np.argmin(self.harmony_memory_fitness)] + trial_harmony = self.differential_evolution(func, new_harmony, best_harmony) + trial_fitness = func(trial_harmony) + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_harmony + + idx_worst_trial = np.argmax(self.harmony_memory_fitness) + if trial_fitness < self.harmony_memory_fitness[idx_worst_trial]: + self.harmony_memory[idx_worst_trial] = trial_harmony + self.harmony_memory_fitness[idx_worst_trial] = trial_fitness + self.self_adaptive_parameter_update(True) + else: + self.self_adaptive_parameter_update(False) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SelfAdaptiveQuantumMemeticAlgorithm.py b/nevergrad/optimization/lama/SelfAdaptiveQuantumMemeticAlgorithm.py new file mode 100644 index 000000000..a57870eda --- /dev/null +++ b/nevergrad/optimization/lama/SelfAdaptiveQuantumMemeticAlgorithm.py @@ -0,0 +1,118 @@ +import numpy as np + + +class SelfAdaptiveQuantumMemeticAlgorithm: + def __init__(self, budget, population_size=50, tau1=0.1, tau2=0.1, memetic_rate=0.3, learning_rate=0.01): + self.budget = budget + self.population_size = population_size + self.tau1 = tau1 + self.tau2 = tau2 + self.memetic_rate = memetic_rate + self.learning_rate = learning_rate + + def gradient_estimation(self, func, x, h=1e-6): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def quantum_walk(self, x, global_best, alpha=0.1): + return np.clip(x + alpha * (global_best - x) * np.random.uniform(-1, 1, size=x.shape), -5.0, 5.0) + + def evolutionary_step(self, func, pop, scores, crossover_rates, mutation_factors): + new_pop = np.copy(pop) + new_scores = np.copy(scores) + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + F = mutation_factors[i] + mutant = np.clip(a + F * (b - c), -5.0, 5.0) + cross_points = np.random.rand(len(mutant)) < crossover_rates[i] + if not np.any(cross_points): + cross_points[np.random.randint(0, len(mutant))] = True + trial = np.where(cross_points, mutant, pop[i]) + f = func(trial) + if f < scores[i]: + new_scores[i] = f + new_pop[i] = trial + return new_pop, new_scores + + def local_search(self, func, x, score): + grad = self.gradient_estimation(func, x) + candidate = np.clip(x - self.learning_rate * grad, -5.0, 5.0) + f = func(candidate) + if f < score: + return candidate, f + return x, score + + def adaptive_parameters(self, crossover_rates, mutation_factors): + for i in range(self.population_size): + if np.random.rand() < self.tau1: + crossover_rates[i] = np.clip(crossover_rates[i] + np.random.normal(0, 0.1), 0, 1) + if np.random.rand() < self.tau2: + mutation_factors[i] = np.clip(mutation_factors[i] + np.random.normal(0, 0.1), 0, 2) + + def ensemble_step(self, func, pop, scores, crossover_rates, mutation_factors, global_best): + new_pop, new_scores = self.evolutionary_step(func, pop, scores, crossover_rates, mutation_factors) + for i in range(self.population_size): + if np.random.rand() < self.memetic_rate: + new_pop[i], new_scores[i] = self.local_search(func, new_pop[i], new_scores[i]) + else: + new_pop[i] = self.quantum_walk(new_pop[i], global_best) + new_scores[i] = func(new_pop[i]) + return new_pop, new_scores + + def temperature_schedule(self, current_iter, max_iter): + return max(0.5, (1 - current_iter / max_iter)) + + def __call__(self, func): + np.random.seed(0) + dim = 5 + lower_bound = -5.0 + upper_bound = 5.0 + + # Initialize population + pop = np.random.uniform(lower_bound, upper_bound, (self.population_size, dim)) + scores = np.array([func(ind) for ind in pop]) + + # Initialize crossover rates and mutation factors + crossover_rates = np.random.uniform(0.5, 1.0, self.population_size) + mutation_factors = np.random.uniform(0.5, 1.0, self.population_size) + + # Global best initialization + best_idx = np.argmin(scores) + global_best_position = pop[best_idx] + global_best_score = scores[best_idx] + + evaluations = self.population_size + max_iterations = self.budget // self.population_size + + iteration = 0 + while evaluations < self.budget: + # Adapt parameters + self.adaptive_parameters(crossover_rates, mutation_factors) + + # Perform hybrid step + pop, scores = self.ensemble_step( + func, pop, scores, crossover_rates, mutation_factors, global_best_position + ) + evaluations += self.population_size + + current_temp = self.temperature_schedule(iteration, max_iterations) + self.learning_rate *= current_temp + + # Update global best from population + best_idx = np.argmin(scores) + if scores[best_idx] < global_best_score: + global_best_score = scores[best_idx] + global_best_position = pop[best_idx] + + iteration += 1 + + self.f_opt = global_best_score + self.x_opt = global_best_position + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SequentialAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/SequentialAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..066456bbc --- /dev/null +++ b/nevergrad/optimization/lama/SequentialAdaptiveDifferentialEvolution.py @@ -0,0 +1,45 @@ +import numpy as np + + +class SequentialAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 150 # Population size + self.F_base = 0.5 # Base differential weight + self.CR = 0.8 # Crossover probability + + def __call__(self, func): + # Initialize population and fitness + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + # Adaptive F: decreases from 0.9 to 0.5 based on iteration count + F_dynamic = self.F_base + (0.9 - self.F_base) * (1 - iteration / n_iterations) + + for i in range(self.pop_size): + # Mutation and Crossover + indices = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = a + F_dynamic * (b - c) + mutant = np.clip(mutant, -5.0, 5.0) # Ensure mutant is within bounds + trial = np.where(np.random.rand(self.dim) < self.CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/SequentialQuadraticAdaptiveEvolutionStrategy.py b/nevergrad/optimization/lama/SequentialQuadraticAdaptiveEvolutionStrategy.py new file mode 100644 index 000000000..4926a8756 --- /dev/null +++ b/nevergrad/optimization/lama/SequentialQuadraticAdaptiveEvolutionStrategy.py @@ -0,0 +1,63 @@ +import numpy as np + + +class SequentialQuadraticAdaptiveEvolutionStrategy: + def __init__(self, budget, dimension=5, population_size=50, F_init=0.5, CR_init=0.8): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, index): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration): + # Adjusting F and CR based on iteration progression + scale = iteration / self.budget + self.F = np.clip(np.random.normal(0.5 * (1 - scale), 0.1), 0.1, 1) + self.CR = np.clip(np.random.normal(0.8 * (1 - scale), 0.05), 0.1, 1) + + def __call__(self, func): + population = self.initialize_population() + self.fitness = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + + while evaluations < self.budget: + self.adjust_parameters(iteration) + + for i in range(self.pop_size): + mutant = self.mutate(population, i) + trial = self.crossover(population[i], mutant) + population[i], self.fitness[i] = self.select(population[i], trial, func) + evaluations += 1 + if evaluations >= self.budget: + break + iteration += 1 + + f_opt = np.min(self.fitness) + x_opt = population[np.argmin(self.fitness)] + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SequentialQuadraticExploitationSearch.py b/nevergrad/optimization/lama/SequentialQuadraticExploitationSearch.py new file mode 100644 index 000000000..e64e3f913 --- /dev/null +++ b/nevergrad/optimization/lama/SequentialQuadraticExploitationSearch.py @@ -0,0 +1,73 @@ +import numpy as np + + +class SequentialQuadraticExploitationSearch: + def __init__(self, budget): + self.budget = budget + self.dimension = 5 # Dimensionality of the BBOB test suite + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Start with random initialization + current_position = np.random.uniform(self.lower_bound, self.upper_bound, self.dimension) + current_fitness = func(current_position) + self.update_optimum(current_position, current_fitness) + + # Control parameters + delta = 0.5 # Initial step size for exploratory moves + alpha = 0.5 # Step size reduction factor + epsilon = 1e-6 # Convergence criterion + + iteration = 1 + while iteration < self.budget: + # Create a set of points around the current position + points = np.array( + [current_position + delta * np.eye(self.dimension)[:, i] for i in range(self.dimension)] + + [current_position - delta * np.eye(self.dimension)[:, i] for i in range(self.dimension)] + ) + points = np.clip(points, self.lower_bound, self.upper_bound) + + # Evaluate the function at these points + fitnesses = np.array([func(point) for point in points]) + + # Approximate a quadratic model based on the current position and evaluated points + A, b = self.fit_quadratic(current_position, points, fitnesses) + if np.linalg.cond(A) < 1 / epsilon: # Check conditioning to prevent bad fits + # Update the position by moving opposite to the gradient implied by the quadratic model + try: + step_direction = -np.linalg.solve(A, b) + new_position = current_position + step_direction + new_position = np.clip(new_position, self.lower_bound, self.upper_bound) + new_fitness = func(new_position) + except np.linalg.LinAlgError: + new_position = current_position + new_fitness = current_fitness + + if new_fitness < current_fitness: + current_position, current_fitness = new_position, new_fitness + delta = min(delta / alpha, 1.0) # Increase step size upon success + else: + delta *= alpha # Reduce step size upon failure + + self.update_optimum(current_position, current_fitness) + iteration += 2 * self.dimension + 1 + + return self.f_opt, self.x_opt + + def update_optimum(self, x, f): + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + def fit_quadratic(self, center, points, fitnesses): + # Fit a quadratic model to the points and their fitness values + n = len(points) + X = np.hstack([np.ones((n, 1)), points - center, ((points - center) ** 2)]) + coeffs = np.linalg.lstsq(X, fitnesses, rcond=None)[0] + A = np.diag(coeffs[1 + self.dimension :]) + b = coeffs[1 : 1 + self.dimension] + return A, b diff --git a/nevergrad/optimization/lama/SimpleHybridDE.py b/nevergrad/optimization/lama/SimpleHybridDE.py new file mode 100644 index 000000000..acccb4570 --- /dev/null +++ b/nevergrad/optimization/lama/SimpleHybridDE.py @@ -0,0 +1,73 @@ +import numpy as np + + +class SimpleHybridDE: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.pop_size = 20 + self.F = 0.7 # Differential weight + self.CR = 0.9 # Crossover probability + + def random_bounds(self): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def local_search(self, x, func): + best = x + f_best = func(x) + perturbations = np.linspace(-0.1, 0.1, 5) + for d in range(self.dim): + for perturb in perturbations: + x_new = np.copy(x) + x_new[d] += perturb + x_new = np.clip(x_new, self.bounds[0], self.bounds[1]) + f_new = func(x_new) + if f_new < f_best: + best = x_new + f_best = f_new + return best + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population = np.array([self.random_bounds() for _ in range(self.pop_size)]) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.pop_size + + while evaluations < self.budget: + for i in range(self.pop_size): + # Select three distinct individuals (but different from i) + indices = np.arange(self.pop_size) + indices = indices[indices != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Differential Evolution mutation and crossover + mutant = np.clip(a + self.F * (b - c), self.bounds[0], self.bounds[1]) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Local Search with a small probability + if np.random.rand() < 0.2 and evaluations + 5 <= self.budget: + trial = self.local_search(trial, func) + evaluations += 5 + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + # Check if we've exhausted our budget + if evaluations >= self.budget: + break + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SimplifiedAdaptiveDynamicDualPhaseStrategyV18.py b/nevergrad/optimization/lama/SimplifiedAdaptiveDynamicDualPhaseStrategyV18.py new file mode 100644 index 000000000..b7097ace8 --- /dev/null +++ b/nevergrad/optimization/lama/SimplifiedAdaptiveDynamicDualPhaseStrategyV18.py @@ -0,0 +1,70 @@ +import numpy as np + + +class SimplifiedAdaptiveDynamicDualPhaseStrategyV18: + def __init__( + self, budget, dimension=5, population_size=100, F_min=0.5, F_max=0.8, CR_min=0.8, CR_max=0.9 + ): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F_min = F_min + self.F_max = F_max + self.CR_min = CR_min + self.CR_max = CR_max + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + a, b, c = np.random.choice(idxs, 3, replace=False) + mutant = population[a] + self.F * (population[b] - population[c]) + if phase == 2: + d, e = np.random.choice(idxs, 2, replace=False) + mutant += 0.5 * self.F * (population[d] - population[e]) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + return np.where(crossover_mask, mutant, target) + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + return (trial, f_trial) if f_trial < f_target else (target, f_target) + + def adjust_parameters(self, iteration, total_iterations): + # Using a linear schedule for parameter adaptation + t = iteration / total_iterations + self.F = self.F_min + t * (self.F_max - self.F_min) + self.CR = self.CR_max - t * (self.CR_max - self.CR_min) + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + + while evaluations < self.budget: + self.adjust_parameters(iteration, self.budget / self.pop_size) + + for i in range(self.pop_size): + phase = 2 if iteration > (self.budget / self.pop_size) / 2 else 1 + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + population[i], fitnesses[i] = self.select(population[i], trial, func) + evaluations += 1 + if fitnesses[i] < fitnesses[best_idx]: + best_idx = i + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SimulatedAnnealingOptimizer.py b/nevergrad/optimization/lama/SimulatedAnnealingOptimizer.py new file mode 100644 index 000000000..69f986214 --- /dev/null +++ b/nevergrad/optimization/lama/SimulatedAnnealingOptimizer.py @@ -0,0 +1,41 @@ +import numpy as np + + +class SimulatedAnnealingOptimizer: + def __init__(self, budget, initial_temperature=100.0, cooling_rate=0.99): + self.budget = budget + self.initial_temperature = initial_temperature + self.cooling_rate = cooling_rate + + def acceptance_probability(self, cost, new_cost, temperature): + if new_cost < cost: + return 1.0 + return np.exp((cost - new_cost) / temperature) + + def perturb_solution(self, current_solution, func, temperature): + candidate_solution = current_solution + np.random.normal(0, 0.1, size=current_solution.shape) + candidate_solution = np.clip(candidate_solution, func.bounds.lb, func.bounds.ub) + return candidate_solution + + def __call__(self, func): + temperature = self.initial_temperature + current_solution = np.random.uniform(func.bounds.lb, func.bounds.ub) + current_cost = func(current_solution) + best_solution = current_solution + best_cost = current_cost + + for _ in range(self.budget): + new_solution = self.perturb_solution(current_solution, func, temperature) + new_cost = func(new_solution) + + if self.acceptance_probability(current_cost, new_cost, temperature) > np.random.rand(): + current_solution = new_solution + current_cost = new_cost + + if new_cost < best_cost: + best_solution = new_solution + best_cost = new_cost + + temperature *= self.cooling_rate + + return best_cost, best_solution diff --git a/nevergrad/optimization/lama/SpiralSearchOptimizer.py b/nevergrad/optimization/lama/SpiralSearchOptimizer.py new file mode 100644 index 000000000..3011f6980 --- /dev/null +++ b/nevergrad/optimization/lama/SpiralSearchOptimizer.py @@ -0,0 +1,52 @@ +import numpy as np + + +class SpiralSearchOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initial point in the center of the search space + initial_point = np.zeros(self.dim) + current_point = initial_point + current_f = func(current_point) + if current_f < self.f_opt: + self.f_opt = current_f + self.x_opt = current_point + + # Parameters for spiral movement + radius = 5.0 # Maximum extent of the search space + angle_increment = np.pi / 4 # Incremental angle for spiral + radius_decrement_factor = 0.95 # Reduce radius after each full spiral + spiral_budget = self.budget + + while spiral_budget > 0: + num_points = int(2 * np.pi / angle_increment) + for i in range(num_points): + if spiral_budget <= 0: + break + + angle = i * angle_increment + dx = radius * np.cos(angle) + dy = radius * np.sin(angle) + candidate_point = current_point + np.array( + [dx, dy, 0, 0, 0] + ) # Spiral in 2D, constant in other dimensions + candidate_point = np.clip(candidate_point, -5.0, 5.0) # Ensure the candidate is within bounds + + candidate_f = func(candidate_point) + spiral_budget -= 1 + + if candidate_f < self.f_opt: + self.f_opt = candidate_f + self.x_opt = candidate_point + current_point = candidate_point # Move spiral center to new best location + + # Reduce the radius for the next spiral cycle + radius *= radius_decrement_factor + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/StabilizedQuantumCognitionOptimizerV11.py b/nevergrad/optimization/lama/StabilizedQuantumCognitionOptimizerV11.py new file mode 100644 index 000000000..66da8502a --- /dev/null +++ b/nevergrad/optimization/lama/StabilizedQuantumCognitionOptimizerV11.py @@ -0,0 +1,78 @@ +import numpy as np + + +class StabilizedQuantumCognitionOptimizerV11: + def __init__( + self, + budget=10000, + population_size=30, + inertia_weight=0.7, + cognitive_coefficient=1.8, + social_coefficient=1.8, + inertia_decay=0.99, + quantum_jump_rate=0.05, + min_quantum_scale=0.01, + max_quantum_scale=0.1, + adaptive_scale_factor=0.5, + ): + self.budget = budget + self.population_size = population_size + self.dim = 5 # Dimensionality of the problem + self.lb, self.ub = -5.0, 5.0 # Bounds of the search space + self.inertia_weight = inertia_weight + self.cognitive_coefficient = cognitive_coefficient + self.social_coefficient = social_coefficient + self.inertia_decay = inertia_decay + self.quantum_jump_rate = quantum_jump_rate + self.min_quantum_scale = min_quantum_scale + self.max_quantum_scale = max_quantum_scale + self.adaptive_scale_factor = adaptive_scale_factor + + def __call__(self, func): + # Initialize particle positions and velocities + particles = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + velocities = np.zeros_like(particles) + personal_bests = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best = personal_bests[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + # Quantum Jump Strategy with adaptive scaling + if np.random.rand() < self.quantum_jump_rate: + quantum_scale = np.random.uniform(self.min_quantum_scale, self.max_quantum_scale) + quantum_deviation = np.random.normal(0, quantum_scale, self.dim) + particles[i] = global_best + quantum_deviation + particles[i] = np.clip(particles[i], self.lb, self.ub) + else: + r1, r2 = np.random.rand(2) + velocities[i] = ( + self.inertia_weight * velocities[i] + + self.cognitive_coefficient * r1 * (personal_bests[i] - particles[i]) + + self.social_coefficient * r2 * (global_best - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], self.lb, self.ub) + score = func(particles[i]) + evaluations += 1 + + if score < personal_best_scores[i]: + personal_bests[i] = particles[i] + personal_best_scores[i] = score + + if score < global_best_score: + global_best = particles[i] + global_best_score = score + + # Decay inertia weight and quantum jump rate + self.inertia_weight *= self.inertia_decay + self.quantum_jump_rate *= 1 - self.adaptive_scale_factor + + if evaluations >= self.budget: + break + + return global_best_score, global_best diff --git a/nevergrad/optimization/lama/StabilizedQuantumConcentricOptimizer.py b/nevergrad/optimization/lama/StabilizedQuantumConcentricOptimizer.py new file mode 100644 index 000000000..1f87f61e4 --- /dev/null +++ b/nevergrad/optimization/lama/StabilizedQuantumConcentricOptimizer.py @@ -0,0 +1,65 @@ +import numpy as np + + +class StabilizedQuantumConcentricOptimizer: + def __init__( + self, + budget, + dim=5, + pop_size=100, + elite_rate=0.2, + initial_mutation_scale=0.5, + mutation_decay_factor=0.95, + ): + self.budget = budget + self.dim = dim + self.pop_size = pop_size + self.elite_count = int(pop_size * elite_rate) + self.initial_mutation_scale = initial_mutation_scale + self.mutation_decay_factor = mutation_decay_factor + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def initialize(self): + self.population = np.random.uniform(self.lower_bound, self.upper_bound, (self.pop_size, self.dim)) + self.fitnesses = np.full(self.pop_size, np.inf) + self.best_solution = None + self.best_fitness = np.inf + self.mutation_scale = self.initial_mutation_scale + + def evaluate_fitness(self, func): + for i in range(self.pop_size): + fitness = func(self.population[i]) + if fitness < self.fitnesses[i]: + self.fitnesses[i] = fitness + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_solution = np.copy(self.population[i]) + + def update_population(self): + # Sort population by fitness and select elites + sorted_indices = np.argsort(self.fitnesses) + elite_indices = sorted_indices[: self.elite_count] + non_elite_indices = sorted_indices[self.elite_count :] + + # Generate new solutions based on elites + for idx in non_elite_indices: + elite_sample = self.population[np.random.choice(elite_indices)] + deviation = np.random.normal( + 0, max(self.mutation_scale, 0.001), self.dim + ) # Ensuring non-negative scale + self.population[idx] = elite_sample + deviation + self.population[idx] = np.clip(self.population[idx], self.lower_bound, self.upper_bound) + + # Reduce mutation scale + self.mutation_scale *= self.mutation_decay_factor + + def __call__(self, func): + self.initialize() + evaluations = 0 + while evaluations < self.budget: + self.evaluate_fitness(func) + self.update_population() + evaluations += self.pop_size + + return self.best_fitness, self.best_solution diff --git a/nevergrad/optimization/lama/StabilizedRefinedEnhancedDynamicBalancingPSO.py b/nevergrad/optimization/lama/StabilizedRefinedEnhancedDynamicBalancingPSO.py new file mode 100644 index 000000000..1901572f6 --- /dev/null +++ b/nevergrad/optimization/lama/StabilizedRefinedEnhancedDynamicBalancingPSO.py @@ -0,0 +1,64 @@ +import numpy as np + + +class StabilizedRefinedEnhancedDynamicBalancingPSO: + def __init__( + self, budget=10000, population_size=200, omega=0.5, phi_p=0.2, phi_g=0.3, adaptive_threshold=0.1 + ): + self.budget = budget + self.population_size = population_size + self.omega = omega # Inertia coefficient + self.phi_p = phi_p # Coefficient of personal best + self.phi_g = phi_g # Coefficient of global best + self.adaptive_threshold = adaptive_threshold + self.dim = 5 # Dimension of the problem + + def __call__(self, func): + lb, ub = -5.0, 5.0 # Search space bounds + particles = np.random.uniform(lb, ub, (self.population_size, self.dim)) + velocities = np.zeros((self.population_size, self.dim)) + personal_best_positions = particles.copy() + personal_best_scores = np.array([func(p) for p in particles]) + global_best_position = particles[np.argmin(personal_best_scores)] + global_best_score = min(personal_best_scores) + + evaluations = self.population_size + + while evaluations < self.budget: + for i in range(self.population_size): + r_p = np.random.rand(self.dim) + r_g = np.random.rand(self.dim) + + velocities[i] = ( + self.omega * velocities[i] + + self.phi_p * r_p * (personal_best_positions[i] - particles[i]) + + self.phi_g * r_g * (global_best_position - particles[i]) + ) + + particles[i] += velocities[i] + particles[i] = np.clip(particles[i], lb, ub) + + current_score = func(particles[i]) + evaluations += 1 + + if current_score < personal_best_scores[i]: + personal_best_positions[i] = particles[i] + personal_best_scores[i] = current_score + + if current_score < global_best_score: + global_best_position = particles[i] + global_best_score = current_score + + if evaluations >= self.budget: + break + + # Adaptive diversity control + diversity = np.std(particles) + if diversity < self.adaptive_threshold: + self.phi_p += 0.01 # encourage exploration + self.phi_g -= 0.01 # reduce exploitation + else: + self.phi_p -= 0.01 # reduce exploration + self.phi_g += 0.01 # encourage exploitation + + return global_best_score, global_best_position diff --git a/nevergrad/optimization/lama/StochasticAdaptiveEvolutionaryOptimizer.py b/nevergrad/optimization/lama/StochasticAdaptiveEvolutionaryOptimizer.py new file mode 100644 index 000000000..f9089b9da --- /dev/null +++ b/nevergrad/optimization/lama/StochasticAdaptiveEvolutionaryOptimizer.py @@ -0,0 +1,97 @@ +import numpy as np + + +class StochasticAdaptiveEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.lb = -5.0 + self.ub = 5.0 + + def differential_mutation(self, target, best, r1, r2, F): + mutant = target + F * (best - target) + F * (r1 - r2) + return np.clip(mutant, self.lb, self.ub) + + def crossover(self, target, mutant, CR): + crossover_mask = np.random.rand(self.dim) < CR + offspring = np.where(crossover_mask, mutant, target) + return offspring + + def local_search(self, x, func, max_iter, step_size): + best_x = x.copy() + best_f = func(x) + for _ in range(max_iter): + perturbation = np.random.uniform(-step_size, step_size, self.dim) + new_x = np.clip(best_x + perturbation, self.lb, self.ub) + new_f = func(new_x) + if new_f < best_f: + best_x = new_x + best_f = new_f + return best_x, best_f + + def adaptive_parameters(self, iteration, max_iterations): + F = 0.5 + 0.5 * np.random.rand() + CR = 0.9 - 0.5 * (iteration / max_iterations) + return F, CR + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = len(fitness) + max_iterations = self.budget // population_size + + iteration = 0 + while evaluations < self.budget: + for i in range(population_size): + if evaluations >= self.budget: + break + + F, CR = self.adaptive_parameters(iteration, max_iterations) + + idxs = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best = population[np.argmin(fitness)] + mutant_vector = self.differential_mutation(population[i], best, a, b, F) + trial_vector = self.crossover(population[i], mutant_vector, CR) + + trial_fitness = func(trial_vector) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial_vector + fitness[i] = trial_fitness + + if trial_fitness < self.f_opt: + self.f_opt = trial_fitness + self.x_opt = trial_vector + + if np.random.rand() < 0.3 and evaluations + 5 <= self.budget: + local_best_x, local_best_f = self.local_search( + population[i], func, max_iter=10, step_size=0.05 + ) + evaluations += 5 + if local_best_f < fitness[i]: + population[i] = local_best_x + fitness[i] = local_best_f + if local_best_f < self.f_opt: + self.f_opt = local_best_f + self.x_opt = local_best_x + + if evaluations % (population_size * 3) == 0: + worst_indices = np.argsort(fitness)[-int(0.3 * population_size) :] + for idx in worst_indices: + population[idx] = np.random.uniform(self.lb, self.ub, self.dim) + fitness[idx] = func(population[idx]) + evaluations += 1 + + if iteration % (max_iterations // 3) == 0 and population_size > 20: + best_indices = np.argsort(fitness)[: int(0.7 * population_size)] + population = population[best_indices] + fitness = fitness[best_indices] + population_size = len(population) + + iteration += 1 + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/StochasticBalancingOptimizer.py b/nevergrad/optimization/lama/StochasticBalancingOptimizer.py new file mode 100644 index 000000000..4bde91ccf --- /dev/null +++ b/nevergrad/optimization/lama/StochasticBalancingOptimizer.py @@ -0,0 +1,78 @@ +import numpy as np + + +class StochasticBalancingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the optimization problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.epsilon = 1e-8 # To prevent division by zero in adaptive mechanisms + + def __call__(self, func): + # Optimization setup + current_budget = 0 + population_size = 100 + mutation_factor = 0.5 # Dynamic mutation factor initialization + crossover_prob = 0.7 # Crossover probability + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + best_index = np.argmin(fitness) + best_solution = population[best_index] + best_fitness = fitness[best_index] + + while current_budget < self.budget: + new_population = np.empty_like(population) + for i in range(population_size): + if current_budget >= self.budget: + break + + inds = np.random.choice(population_size, 3, replace=False) + x1, x2, x3 = population[inds] + + # Mutation step + mutant = x1 + mutation_factor * (x2 - x3) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover step + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + trial_fitness = func(trial) + current_budget += 1 + + # Selection step + if trial_fitness < fitness[i]: + new_population[i] = trial + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_solution = trial + else: + new_population[i] = population[i] + + population = new_population + fitness = np.array([func(ind) for ind in population]) + current_budget += population_size + + # Adapt mutation and crossover rates based on diversity and fitness improvements + diversity = np.std(population) + if diversity < 1e-1: # Low diversity triggers more exploration + mutation_factor = min(1.0, mutation_factor + 0.1) + else: + mutation_factor = max(0.1, mutation_factor - 0.02) + + crossover_prob = min( + 1.0, + crossover_prob + + 0.05 * (1 - diversity / (self.upper_bound - self.lower_bound + self.epsilon)), + ) + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/StochasticGradientEnhancedDE.py b/nevergrad/optimization/lama/StochasticGradientEnhancedDE.py new file mode 100644 index 000000000..fb64c4154 --- /dev/null +++ b/nevergrad/optimization/lama/StochasticGradientEnhancedDE.py @@ -0,0 +1,99 @@ +import numpy as np + + +class StochasticGradientEnhancedDE: + def __init__(self, budget, population_size=20, crossover_rate=0.7, mutation_factor=0.8): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_factor = mutation_factor + self.base_lr = 0.1 + self.epsilon = 1e-8 + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + def maintain_diversity(population, fitness): + for i in range(len(population)): + for j in range(i + 1, len(population)): + if np.linalg.norm(population[i] - population[j]) < 1e-3: + if fitness[i] > fitness[j]: + population[i] = random_vector() + fitness[i] = func(population[i]) + else: + population[j] = random_vector() + fitness[j] = func(population[j]) + + def select_parents(population, fitness): + # Normalize fitness values to select parents based on their inverse fitness + fitness = np.array(fitness) + fitness = fitness - np.min(fitness) + 1e-8 # Ensure all fitness values are positive + probabilities = 1 / fitness + probabilities /= probabilities.sum() + parents_idx = np.random.choice(np.arange(len(population)), size=3, p=probabilities, replace=False) + return population[parents_idx[0]], population[parents_idx[1]], population[parents_idx[2]] + + # Initialize population + population = [random_vector() for _ in range(self.population_size)] + fitness = [func(ind) for ind in population] + + for ind, fit in zip(population, fitness): + if fit < self.f_opt: + self.f_opt = fit + self.x_opt = ind + + for i in range(1, self.budget): + success_count = 0 + + # Differential Evolution + for j in range(self.population_size): + target = population[j] + a, b, c = select_parents(population, fitness) + mutant = np.clip(a + self.mutation_factor * (b - c), self.bounds[0], self.bounds[1]) + + trial = np.copy(target) + for k in range(self.dim): + if np.random.rand() < self.crossover_rate: + trial[k] = mutant[k] + + grad = gradient_estimate(trial) + perturbation = np.random.randn(self.dim) * self.base_lr + new_x = trial - self.epsilon * grad + perturbation + + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < fitness[j]: + population[j] = new_x + fitness[j] = new_f + success_count += 1 + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + + # Maintain diversity + maintain_diversity(population, fitness) + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = StochasticGradientEnhancedDE(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/StochasticGradientExploration.py b/nevergrad/optimization/lama/StochasticGradientExploration.py new file mode 100644 index 000000000..d3177aabd --- /dev/null +++ b/nevergrad/optimization/lama/StochasticGradientExploration.py @@ -0,0 +1,66 @@ +import numpy as np + + +class StochasticGradientExploration: + def __init__(self, budget): + self.budget = budget + self.dim = 5 + self.bounds = [-5.0, 5.0] + self.learning_rate = 0.1 + self.epsilon = 1e-8 + self.exploration_prob = 0.2 # Increase exploration probability to 20% + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + def random_vector(): + return np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + + def gradient_estimate(x, h=1e-5): + grad = np.zeros_like(x) + for i in range(len(x)): + x1 = np.copy(x) + x2 = np.copy(x) + x1[i] += h + x2[i] -= h + grad[i] = (func(x1) - func(x2)) / (2 * h) + return grad + + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + + for i in range(1, self.budget): + if np.random.rand() < self.exploration_prob: + # Perform random exploration + x = random_vector() + f = func(x) + if f < self.f_opt: + self.f_opt = f + self.x_opt = x + else: + # Perform gradient-based exploitation with stochastic perturbation + grad = gradient_estimate(x) + adapt_lr = self.learning_rate / (np.sqrt(i) + self.epsilon) + perturbation = np.random.randn(self.dim) * adapt_lr # Stochastic perturbation + + new_x = x - adapt_lr * grad + perturbation + new_x = np.clip(new_x, self.bounds[0], self.bounds[1]) + new_f = func(new_x) + + if new_f < self.f_opt: + self.f_opt = new_f + self.x_opt = new_x + x = new_x + else: + x = random_vector() # Restart exploration from random point + + return self.f_opt, self.x_opt + + +# Example of usage: +# optimizer = StochasticGradientExploration(budget=1000) +# best_value, best_solution = optimizer(some_black_box_function) diff --git a/nevergrad/optimization/lama/StochasticGradientHybridOptimization.py b/nevergrad/optimization/lama/StochasticGradientHybridOptimization.py new file mode 100644 index 000000000..929c28d22 --- /dev/null +++ b/nevergrad/optimization/lama/StochasticGradientHybridOptimization.py @@ -0,0 +1,54 @@ +import numpy as np + + +class StochasticGradientHybridOptimization: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=100): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_top_individuals(self, population, fitness, num_best): + indices = np.argsort(fitness)[:num_best] + return population[indices], fitness[indices] + + def adapt_mutation_strength(self, generation, base_strength=1.0, decay_rate=0.98): + return base_strength * (decay_rate**generation) + + def mutate_population(self, population, strength): + mutations = np.random.normal(0, strength, population.shape) + return np.clip(population + mutations, self.lower_bound, self.upper_bound) + + def hybridize(self, best_individuals, mutation_strength, population_size): + num_top = len(best_individuals) + new_population = np.tile(best_individuals, (population_size // num_top, 1)) + return self.mutate_population(new_population, mutation_strength) + + def __call__(self, func): + population_size = 200 + num_generations = self.budget // population_size + num_best = 10 # Top individuals to focus on + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_individuals, best_fitness = self.select_top_individuals(population, fitness, num_best) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_individuals[0] + + # Adaptively change mutation strength + strength = self.adapt_mutation_strength(gen) + population = self.hybridize(best_individuals, strength, population_size) + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/StochasticGradientQuorumOptimization.py b/nevergrad/optimization/lama/StochasticGradientQuorumOptimization.py new file mode 100644 index 000000000..282d3934f --- /dev/null +++ b/nevergrad/optimization/lama/StochasticGradientQuorumOptimization.py @@ -0,0 +1,75 @@ +import numpy as np + + +class StochasticGradientQuorumOptimization: + def __init__( + self, + budget, + dimension=5, + population_size=80, + elite_fraction=0.25, + mutation_scale=0.1, + momentum=0.95, + learning_rate=0.02, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = int(max(1, population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.momentum = momentum + self.learning_rate = learning_rate + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Track best solution + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + velocity = np.zeros(self.dimension) + + # Optimization loop + while evaluations < self.budget: + new_population = np.empty_like(population) + for i in range(self.population_size): + # Select elite indices including the best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Determine the local best + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Gradient-inspired mutation and update strategy + gradient = best_individual - local_best + random_noise = np.random.normal(0, self.mutation_scale, self.dimension) + mutation = gradient * random_noise * self.learning_rate + self.momentum * velocity + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update the best solution and velocity + if child_fitness < best_fitness: + velocity = child - best_individual + self.momentum * velocity + best_fitness = child_fitness + best_individual = child + + new_population[i, :] = child + + if evaluations >= self.budget: + break + + population = new_population + fitness = np.array([func(ind) for ind in population]) + + # Adapt mutation scale and elite count dynamically + self.mutation_scale *= 1 + np.random.uniform(-0.05, 0.05) + self.elite_count = int(max(1, self.elite_count * np.random.uniform(0.95, 1.05))) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/StrategicAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/StrategicAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..1a1e26580 --- /dev/null +++ b/nevergrad/optimization/lama/StrategicAdaptiveDifferentialEvolution.py @@ -0,0 +1,69 @@ +import numpy as np + + +class StrategicAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality as given + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Configuration + population_size = 150 # Increased population size for robust exploration of search space + mutation_factor = 0.8 # Moderately high initial mutation factor for broad exploration + crossover_prob = 0.7 # Relatively lower crossover probability to maintain diversity + adaptive_factor = 0.95 # Adaptive factor to modify mutation and crossover based on fitness trend + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_value = fitness[best_idx] + + # Analyze and adapt strategy parameters dynamically + last_improvement = 0 # Track last improvement generation + improvements = 0 # Track total improvements + + for generation in range(int(self.budget / population_size)): + if generation - last_improvement > 50: # If no improvement over 50 generations + mutation_factor *= 1.05 # Increase mutation factor to escape potential local minima + crossover_prob *= 1.05 # Increase crossover probability to encourage diversity + last_improvement = generation # Reset last improvement tracker + + # Generate new population + for i in range(population_size): + indices = [j for j in range(population_size) if j != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + mutant = a + mutation_factor * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover (binomial) + trial = np.array( + [ + mutant[j] if np.random.rand() < crossover_prob else population[i][j] + for j in range(self.dim) + ] + ) + + # Selection and update + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Check if this is the best solution found + if trial_fitness < best_value: + best_value = trial_fitness + best_solution = trial.copy() + improvements += 1 + last_improvement = generation + + # Adapt mutation and crossover probabilistically based on historical improvements + if improvements > 0: + mutation_factor *= adaptive_factor + crossover_prob *= adaptive_factor + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/StrategicDifferentialEvolution.py b/nevergrad/optimization/lama/StrategicDifferentialEvolution.py new file mode 100644 index 000000000..5929ec32a --- /dev/null +++ b/nevergrad/optimization/lama/StrategicDifferentialEvolution.py @@ -0,0 +1,57 @@ +import numpy as np + + +class StrategicDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 + self.pop_size = 150 # Population size has been adjusted for enhanced exploration + self.F_base = 0.5 # Base differential weight + self.CR_base = 0.5 # Base crossover probability + + def __call__(self, func): + # Initialize population randomly within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Main evolutionary loop + n_iterations = int(self.budget / self.pop_size) + for iteration in range(n_iterations): + for i in range(self.pop_size): + # Indices for mutation strategy (excluding current index i) + idxs = [idx for idx in range(self.pop_size) if idx != i] + a, b, c = pop[np.random.choice(idxs, 3, replace=False)] + + # Mutation strategy: DE/rand-to-best/2 + best = pop[best_idx] + mutant = np.clip( + pop[i] + self.F_base * (best - pop[i]) + self.F_base * (a - b + c - pop[i]), -5.0, 5.0 + ) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR_base + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + fitness[i] = trial_fitness + pop[i] = trial + # Update best solution if found + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + best_idx = i + + # Adaptive mutation and crossover rates + self.F_base = 0.1 + 0.7 * (1 - iteration / n_iterations) + self.CR_base = 0.1 + 0.8 * (iteration / n_iterations) + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/StrategicDiminishingAdaptiveEvolver.py b/nevergrad/optimization/lama/StrategicDiminishingAdaptiveEvolver.py new file mode 100644 index 000000000..7a6bbe688 --- /dev/null +++ b/nevergrad/optimization/lama/StrategicDiminishingAdaptiveEvolver.py @@ -0,0 +1,69 @@ +import numpy as np + + +class StrategicDiminishingAdaptiveEvolver: + def __init__( + self, + budget, + dimension=5, + lower_bound=-5.0, + upper_bound=5.0, + population_size=30, + initial_step_size=1.0, + min_step_size=0.001, + elite_ratio=0.2, + ): + self.budget = budget + self.dimension = dimension + self.bounds = np.array([lower_bound, upper_bound]) + self.population_size = population_size + self.step_size = initial_step_size + self.min_step_size = min_step_size + self.elite_count = int(population_size * elite_ratio) + + def initialize_population(self): + return np.random.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dimension)) + + def mutate(self, individual, scale): + mutation = np.random.normal(0, scale, self.dimension) + return np.clip(individual + mutation, self.bounds[0], self.bounds[1]) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_elites(self, population, fitness): + elite_indices = np.argsort(fitness)[: self.elite_count] + return population[elite_indices], fitness[elite_indices] + + def __call__(self, func): + population = self.initialize_population() + fitness = self.evaluate_population(func, population) + best_individual, best_fitness = population[np.argmin(fitness)], np.min(fitness) + + evaluations = self.population_size + generation = 0 + + while evaluations < self.budget: + scale = max( + self.min_step_size, self.step_size / (1 + generation / 10.0) + ) # Diminish step size strategically + + new_population = np.array([self.mutate(ind, scale) for ind in population]) + new_fitness = self.evaluate_population(func, new_population) + + combined_population = np.vstack((population, new_population)) + combined_fitness = np.hstack((fitness, new_fitness)) + indices = np.argsort(combined_fitness) + population = combined_population[indices[: self.population_size]] + fitness = combined_fitness[indices[: self.population_size]] + + current_best = population[np.argmin(fitness)] + current_best_fitness = np.min(fitness) + if current_best_fitness < best_fitness: + best_fitness = current_best_fitness + best_individual = current_best + + evaluations += self.population_size + generation += 1 + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/StrategicHybridDE.py b/nevergrad/optimization/lama/StrategicHybridDE.py new file mode 100644 index 000000000..30cc3baa6 --- /dev/null +++ b/nevergrad/optimization/lama/StrategicHybridDE.py @@ -0,0 +1,68 @@ +import numpy as np + + +class StrategicHybridDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_range=0.3, CR=0.9, hybridization_factor=0.2 + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.hybridization_factor = hybridization_factor # Factor for hybrid mutation strategy + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Hybrid mutation strategy + if np.random.rand() < self.hybridization_factor: + # Use best individual for base with a probability defined by hybridization_factor + base = best_individual + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust F + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using differential evolution strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using binomial method + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/StrategicMultiPhaseEvolutionaryAlgorithm.py b/nevergrad/optimization/lama/StrategicMultiPhaseEvolutionaryAlgorithm.py new file mode 100644 index 000000000..4e37bd25f --- /dev/null +++ b/nevergrad/optimization/lama/StrategicMultiPhaseEvolutionaryAlgorithm.py @@ -0,0 +1,93 @@ +import numpy as np + + +class StrategicMultiPhaseEvolutionaryAlgorithm: + def __init__(self, budget, dimension=5, lower_bound=-5.0, upper_bound=5.0, population_size=50): + self.budget = budget + self.dimension = dimension + self.bounds = {"lb": lower_bound, "ub": upper_bound} + self.population_size = population_size + self.mutation_rate = 0.1 # Initial mutation rate + self.crossover_probability = 0.8 # Probability of crossover + self.elitism = True # Enable elitism + + def mutate(self, individual, phase): + """Apply Gaussian mutation based on the phase of the algorithm.""" + if phase < 0.3: + scale = 0.5 # High exploration in the early phase + elif phase < 0.6: + scale = 0.2 # Focused exploration in the mid phase + else: + scale = 0.05 # Fine-tuning in the late phase + + mutation = np.random.normal(0, scale, self.dimension) + mutant = individual + mutation + return np.clip(mutant, self.bounds["lb"], self.bounds["ub"]) + + def crossover(self, parent1, parent2): + """Simulated binary crossover, for better offspring production.""" + alpha = np.random.uniform(-0.5, 1.5, self.dimension) + offspring = alpha * parent1 + (1 - alpha) * parent2 + return np.clip(offspring, self.bounds["lb"], self.bounds["ub"]) + + def select(self, population, fitness, offspring, offspring_fitness): + """Selects the next generation using elitism and tournament selection.""" + combined_population = np.vstack((population, offspring)) + combined_fitness = np.concatenate((fitness, offspring_fitness)) + indices = np.argsort(combined_fitness) + + if self.elitism: + best_indices = indices[: self.population_size] + else: + # Random selection with preference to lower fitness + probabilities = 1 / (1 + np.exp(combined_fitness - np.median(combined_fitness))) + best_indices = np.random.choice( + len(combined_population), size=self.population_size, p=probabilities / np.sum(probabilities) + ) + + return combined_population[best_indices], combined_fitness[best_indices] + + def __call__(self, func): + # Initialize population + population = np.random.uniform( + self.bounds["lb"], self.bounds["ub"], (self.population_size, self.dimension) + ) + fitness = np.array([func(individual) for individual in population]) + f_opt = np.min(fitness) + x_opt = population[np.argmin(fitness)] + + # Evolutionary loop + for iteration in range(self.budget // self.population_size): + phase = iteration / (self.budget / self.population_size) + offspring = [] + offspring_fitness = [] + + # Generate offspring + for idx in range(self.population_size): + # Mutation + mutant = self.mutate(population[idx], phase) + + # Crossover + if np.random.rand() < self.crossover_probability: + partner_idx = np.random.randint(self.population_size) + child = self.crossover(mutant, population[partner_idx]) + else: + child = mutant + + # Evaluate + child_fitness = func(child) + offspring.append(child) + offspring_fitness.append(child_fitness) + + # Selection + offspring = np.array(offspring) + offspring_fitness = np.array(offspring_fitness) + population, fitness = self.select(population, fitness, offspring, offspring_fitness) + + # Update best found solution + min_idx = np.argmin(fitness) + if fitness[min_idx] < f_opt: + f_opt = fitness[min_idx] + x_opt = population[min_idx] + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/StrategicQuorumMutationWithAdaptiveElites.py b/nevergrad/optimization/lama/StrategicQuorumMutationWithAdaptiveElites.py new file mode 100644 index 000000000..c90d85e0a --- /dev/null +++ b/nevergrad/optimization/lama/StrategicQuorumMutationWithAdaptiveElites.py @@ -0,0 +1,72 @@ +import numpy as np + + +class StrategicQuorumMutationWithAdaptiveElites: + def __init__( + self, + budget, + dimension=5, + population_size=100, + elite_fraction=0.1, + mutation_scale=0.5, + elite_adaptation=0.05, + ): + self.budget = budget + self.dimension = dimension + self.population_size = population_size + self.elite_count = max(1, int(population_size * elite_fraction)) + self.mutation_scale = mutation_scale + self.elite_adaptation = elite_adaptation + + def __call__(self, func): + # Initialize population + population = np.random.uniform(-5.0, 5.0, (self.population_size, self.dimension)) + fitness = np.array([func(individual) for individual in population]) + evaluations = self.population_size + + # Initialize best solution found + best_idx = np.argmin(fitness) + best_individual = population[best_idx] + best_fitness = fitness[best_idx] + + # Evolution loop + while evaluations < self.budget: + new_population = [] + for i in range(self.population_size): + # Select a quorum randomly, include best individual + quorum_indices = np.random.choice(self.population_size, self.elite_count - 1, replace=False) + quorum_indices = np.append(quorum_indices, best_idx) + quorum = population[quorum_indices] + quorum_fitness = fitness[quorum_indices] + + # Find the best in the quorum + local_best_idx = np.argmin(quorum_fitness) + local_best = quorum[local_best_idx] + + # Mutation influenced by both best individual and local best + direction = best_individual - local_best + mutation = np.random.normal(0, self.mutation_scale, self.dimension) * direction + child = np.clip(local_best + mutation, -5.0, 5.0) + child_fitness = func(child) + evaluations += 1 + + # Update best solution found + if child_fitness < best_fitness: + best_fitness = child_fitness + best_individual = child + + new_population.append(child) + + if evaluations >= self.budget: + break + + population = np.array(new_population) + fitness = np.array([func(ind) for ind in population]) + + # Adapt the elite count based on progress + if self.elite_adaptation > 0: + self.elite_count = max( + 1, int(self.elite_count * (1 + self.elite_adaptation * np.random.uniform(-1, 1))) + ) + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/StrategicResilienceAdaptiveSearch.py b/nevergrad/optimization/lama/StrategicResilienceAdaptiveSearch.py new file mode 100644 index 000000000..1eb716300 --- /dev/null +++ b/nevergrad/optimization/lama/StrategicResilienceAdaptiveSearch.py @@ -0,0 +1,72 @@ +import numpy as np + + +class StrategicResilienceAdaptiveSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound for each dimension + self.ub = 5.0 # Upper bound for each dimension + + def __call__(self, func): + # Initial solution and function value tracking + self.f_opt = np.Inf + self.x_opt = None + + # Initialize population + population_size = 100 + population = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(individual) for individual in population]) + + # Find initial best solution + best_idx = np.argmin(fitness) + self.f_opt = fitness[best_idx] + self.x_opt = population[best_idx].copy() + + # Stagnation counter to enable strategic diversity injections + stagnation_counter = 0 + + # Main optimization loop + evaluations_used = population_size + while evaluations_used < self.budget: + # Adjust mutation scale based on remaining budget and stagnation + remaining_budget = self.budget - evaluations_used + mutation_scale = 0.1 * (remaining_budget / self.budget) + 0.05 * (stagnation_counter / 10) + + # Generate new candidates + for i in range(population_size): + perturbation = np.random.normal(0, mutation_scale, self.dim) + candidate = population[i] + perturbation + candidate = np.clip(candidate, self.lb, self.ub) + candidate_fitness = func(candidate) + evaluations_used += 1 + + # Solution acceptance or rejection + if candidate_fitness < fitness[i]: + population[i] = candidate + fitness[i] = candidate_fitness + if candidate_fitness < self.f_opt: + self.f_opt = candidate_fitness + self.x_opt = candidate.copy() + stagnation_counter = 0 # Reset stagnation counter on improvement + else: + stagnation_counter += 1 + + # Strategic diversity injection mechanism + if stagnation_counter >= 50: + stagnation_counter = 0 # Reset counter + # Inject new random solutions + new_inds = np.random.uniform(self.lb, self.ub, (population_size // 4, self.dim)) + new_fitness = np.array([func(ind) for ind in new_inds]) + # Replace a quarter of the worst solutions + worst_indices = np.argsort(fitness)[-population_size // 4 :] + population[worst_indices] = new_inds + fitness[worst_indices] = new_fitness + evaluations_used += population_size // 4 + + return self.f_opt, self.x_opt + + +# Example of usage (requires a function `func` and bounds to run): +# optimizer = StrategicResilienceAdaptiveSearch(budget=10000) +# best_value, best_solution = optimizer(func) diff --git a/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimization.py b/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimization.py new file mode 100644 index 000000000..32aafd020 --- /dev/null +++ b/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimization.py @@ -0,0 +1,91 @@ +import numpy as np + + +class SuperDynamicQuantumSwarmOptimization: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + + def update_parameters(self, iteration): + self.inertia_weight = self.max_inertia_weight - ( + self.max_inertia_weight - self.min_inertia_weight + ) * ((iteration + 1) / self.budget) + self.cognitive_weight = self.max_cognitive_weight - ( + self.max_cognitive_weight - self.min_cognitive_weight + ) * ((iteration + 1) / self.budget) + self.social_weight = self.max_social_weight + (self.min_social_weight - self.max_social_weight) * ( + (iteration + 1) / self.budget + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(self.search_space[0], self.search_space[1]) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimizationImproved.py b/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimizationImproved.py new file mode 100644 index 000000000..250a7a727 --- /dev/null +++ b/nevergrad/optimization/lama/SuperDynamicQuantumSwarmOptimizationImproved.py @@ -0,0 +1,93 @@ +import numpy as np + + +class SuperDynamicQuantumSwarmOptimizationImproved: + def __init__( + self, + budget=10000, + num_particles=30, + max_inertia_weight=0.9, + min_inertia_weight=0.6, + max_cognitive_weight=2.5, + min_cognitive_weight=1.5, + max_social_weight=1.7, + min_social_weight=0.8, + boundary_handling=True, + alpha=0.6, + delta=0.1, + decay_rate=0.9, + ): + self.budget = budget + self.num_particles = num_particles + self.max_inertia_weight = max_inertia_weight + self.min_inertia_weight = min_inertia_weight + self.max_cognitive_weight = max_cognitive_weight + self.min_cognitive_weight = min_cognitive_weight + self.max_social_weight = max_social_weight + self.min_social_weight = min_social_weight + self.alpha = alpha + self.decay_rate = decay_rate + self.dim = 5 + self.search_space = (-5.0, 5.0) + self.particles = np.random.uniform( + self.search_space[0], self.search_space[1], (self.num_particles, self.dim) + ) + self.velocities = np.zeros((self.num_particles, self.dim)) + self.personal_bests = self.particles.copy() + self.personal_best_values = np.full(self.num_particles, np.inf) + self.global_best = None + self.global_best_value = np.inf + self.boundary_handling = boundary_handling + self.delta = delta + + def update_parameters(self, iteration): + self.inertia_weight = max( + self.min_inertia_weight, self.max_inertia_weight * (self.decay_rate**iteration) + ) + self.cognitive_weight = max( + self.min_cognitive_weight, self.max_cognitive_weight * (self.decay_rate**iteration) + ) + self.social_weight = max( + self.min_social_weight, self.max_social_weight * (self.decay_rate**iteration) + ) + + def update_velocity_position(self, i, func): + current_position = self.particles[i] + velocity_term1 = self.inertia_weight * self.velocities[i] + velocity_term2 = ( + self.cognitive_weight * np.random.rand() * (self.personal_bests[i] - current_position) + ) + velocity_term3 = ( + self.social_weight + * np.random.rand() + * (self.global_best - current_position if self.global_best is not None else 0) + ) + new_velocity = velocity_term1 + velocity_term2 + velocity_term3 + new_position = ( + current_position + + self.alpha * new_velocity + + (1 - self.alpha) * np.random.uniform(self.search_space[0], self.search_space[1]) + ) + + if self.boundary_handling: + new_position = np.clip(new_position, self.search_space[0], self.search_space[1]) + + f = func(new_position) + if f < self.personal_best_values[i]: + self.personal_bests[i] = new_position + self.personal_best_values[i] = f + + if f < self.global_best_value: + self.global_best = new_position + self.global_best_value = f + + self.velocities[i] = self.delta * new_velocity + self.particles[i] = new_position + + def __call__(self, func): + for i in range(self.budget): + self.update_parameters(i) + for i in range(self.num_particles): + self.update_velocity_position(i, func) + + return self.global_best_value, self.global_best diff --git a/nevergrad/optimization/lama/SuperOptimizedRAMEDS.py b/nevergrad/optimization/lama/SuperOptimizedRAMEDS.py new file mode 100644 index 000000000..3fe90c499 --- /dev/null +++ b/nevergrad/optimization/lama/SuperOptimizedRAMEDS.py @@ -0,0 +1,89 @@ +import numpy as np + + +class SuperOptimizedRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.5, + F_max=1.2, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Dynamic adjustment of mutation factor based on the convergence rate + dynamic_factor = evaluations / self.budget + F = self.F_min + (self.F_max - self.F_min) * np.exp(-4 * dynamic_factor) # Exponential decay + + # Crossover rate update based on a sinusoidal pattern + self.crossover_rate = 0.5 + 0.45 * np.sin(np.pi * dynamic_factor) + + # Update elites from both population and memory + combined_population = np.concatenate((population, memory[: np.argmin(memory_fitness)])) + combined_fitness = np.concatenate((fitness, memory_fitness[: np.argmin(memory_fitness)])) + elite_indices = np.argsort(combined_fitness)[: self.elite_size] + elite = combined_population[elite_indices].copy() + elite_fitness = combined_fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip( + a + F * (b - c + elite[np.random.randint(self.elite_size)] - population[i]), lb, ub + ) + + # Crossover operation + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory strategy: store replaced solutions if they are better than the worst in memory + if trial_fitness < np.max(memory_fitness): + replace_idx = np.argmax(memory_fitness) + memory[replace_idx] = population[i] + memory_fitness[replace_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SuperRefinedRAMEDSv5.py b/nevergrad/optimization/lama/SuperRefinedRAMEDSv5.py new file mode 100644 index 000000000..8cd69d65e --- /dev/null +++ b/nevergrad/optimization/lama/SuperRefinedRAMEDSv5.py @@ -0,0 +1,82 @@ +import numpy as np + + +class SuperRefinedRAMEDSv5: + def __init__( + self, + budget, + population_size=50, + base_crossover_rate=0.9, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.base_crossover_rate = base_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory and elite structures + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + # Adaptive mutation factor with dynamic modulation + F = self.F_min + (self.F_max - self.F_min) * np.sin(np.pi * evaluations / self.budget) + + for i in range(self.population_size): + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + best_or_elite = elite[np.random.randint(0, self.elite_size)] + mutant = np.clip( + population[i] + F * (best_or_elite - population[i] + a - b), self.lb, self.ub + ) + + # Adaptive crossover + crossover_rate = self.base_crossover_rate * (1 - evaluations / self.budget) + cross_points = np.random.rand(self.dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Selection and memory update + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + worst_idx = np.argmax(memory_fitness) + if memory_fitness[worst_idx] > fitness[i]: + memory[worst_idx] = population[i].copy() + memory_fitness[worst_idx] = fitness[i] + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5.py b/nevergrad/optimization/lama/SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5.py new file mode 100644 index 000000000..7976ddf1f --- /dev/null +++ b/nevergrad/optimization/lama/SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5.py @@ -0,0 +1,85 @@ +import numpy as np + + +class SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): + x_new = x + 0.1 * np.random.randn(self.dim) + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.6 - 0.45 * t / self.budget # Adjusted inertia weight update for better convergence + + def update_parameters(self, t): + return 1.8 - 1.6 * t / (1.8 * self.budget), 2.3 - 1.8 * t / ( + 1.8 * self.budget + ) # Refined cognitive and social weights update for improved exploration + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.8 * r3 * (global_best_pos - particles_pos[i]) # Adjusted acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search every 50 iterations + if t % 50 == 0: + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16.py b/nevergrad/optimization/lama/SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16.py new file mode 100644 index 000000000..93bba0daf --- /dev/null +++ b/nevergrad/optimization/lama/SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16.py @@ -0,0 +1,95 @@ +import numpy as np + + +class SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16: + def __init__( + self, + budget=10000, + num_particles=30, + inertia_weight=0.7, + cognitive_weight=1.0, + social_weight=1.0, + damping=0.9, + step_size=0.1, + boundary=5.0, + ): + self.budget = budget + self.dim = 5 + self.num_particles = num_particles + self.inertia_weight = inertia_weight + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.damping = damping + self.step_size = step_size + self.best_fitness = np.inf + self.best_position = None + self.boundary = boundary + + def initialize_particles(self): + self.particles_position = np.random.uniform( + -self.boundary, self.boundary, (self.num_particles, self.dim) + ) + self.particles_velocity = np.zeros((self.num_particles, self.dim)) + self.particles_best_position = self.particles_position.copy() + self.particles_best_fitness = np.full(self.num_particles, np.inf) + self.global_best_position = None + self.global_best_fitness = np.inf + + def update_particles(self, func): + for i in range(self.num_particles): + fitness = func(self.particles_position[i]) + if fitness < self.particles_best_fitness[i]: + self.particles_best_fitness[i] = fitness + self.particles_best_position[i] = self.particles_position[i].copy() + + if fitness < self.global_best_fitness: + self.global_best_fitness = fitness + self.global_best_position = self.particles_position[i].copy() + + inertia_term = self.inertia_weight * self.particles_velocity[i] + cognitive_term = ( + self.cognitive_weight + * np.random.rand() + * (self.particles_best_position[i] - self.particles_position[i]) + ) + social_term = ( + self.social_weight + * np.random.rand() + * (self.global_best_position - self.particles_position[i]) + ) + + self.particles_velocity[i] = self.damping * ( + self.particles_velocity[i] + self.step_size * (inertia_term + cognitive_term + social_term) + ) + self.particles_position[i] += self.particles_velocity[i] + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def adapt_parameters(self): + self.step_size = np.clip(self.step_size + 0.001 * np.random.randn(), 0.01, 0.2) + self.damping = np.clip(self.damping - 0.001 * np.random.randn(), 0.9, 0.98) + self.inertia_weight = np.clip(self.inertia_weight + 0.001 * np.random.randn(), 0.7, 0.8) + self.cognitive_weight = np.clip(self.cognitive_weight + 0.001 * np.random.randn(), 0.9, 1.1) + self.social_weight = np.clip(self.social_weight + 0.001 * np.random.randn(), 0.9, 1.1) + + def enhance_exploration(self): + for i in range(self.num_particles): + self.particles_position[i] += np.random.normal(0, 0.1, self.dim) + self.particles_position[i] = np.clip(self.particles_position[i], -self.boundary, self.boundary) + + def __call__(self, func): + self.best_fitness = np.inf + self.best_position = None + self.global_best_position = None + self.global_best_fitness = np.inf + + self.initialize_particles() + + for _ in range(1, self.budget): + self.update_particles(func) + self.adapt_parameters() + self.enhance_exploration() + + self.best_fitness = self.global_best_fitness + self.best_position = self.global_best_position + + return self.best_fitness, self.best_position diff --git a/nevergrad/optimization/lama/SuperiorAdaptiveStrategyDE.py b/nevergrad/optimization/lama/SuperiorAdaptiveStrategyDE.py new file mode 100644 index 000000000..2d765a58f --- /dev/null +++ b/nevergrad/optimization/lama/SuperiorAdaptiveStrategyDE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class SuperiorAdaptiveStrategyDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.48, F_range=0.22, CR=0.88, strategy="adaptive" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main evolutionary loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy + if self.strategy == "adaptive": + # Use a blend of best and random selection + best_idx = np.argmin(fitness) + random_idx = np.random.randint(self.population_size) + base = population[best_idx] if np.random.rand() < 0.5 else population[random_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Adjust F dynamically + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using two random distinct indices + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection and evaluation + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check budget exhaustion + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SuperiorEnhancedDynamicPrecisionOptimizerV1.py b/nevergrad/optimization/lama/SuperiorEnhancedDynamicPrecisionOptimizerV1.py new file mode 100644 index 000000000..392a484af --- /dev/null +++ b/nevergrad/optimization/lama/SuperiorEnhancedDynamicPrecisionOptimizerV1.py @@ -0,0 +1,61 @@ +import numpy as np + + +class SuperiorEnhancedDynamicPrecisionOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and advanced cooling parameters + T = 1.25 # Higher initial temperature for a more aggressive exploratory start + T_min = 0.0003 # Further lowered minimum temperature for deeper fine-tuning in late stages + alpha = 0.93 # Slower cooling rate to maximize the duration of effective search + + # Mutation and crossover parameters finely tuned for dynamic adaptability + F = 0.77 # Slightly increased Mutation factor for more robust exploration + CR = 0.89 # Higher Crossover probability to enhance solution diversity + + population_size = 78 # Slightly adjusted population size for more balanced computation + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation factor integrations with temperature and progress-driven modifications + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor adjusted for controlled exploration-exploitation balance + dynamic_F = ( + F + * np.exp(-0.05 * T) + * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced probabilistic acceptance condition with a more responsive temperature adaptation + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling with a modulated decay factor based on search phase + adaptive_cooling = alpha - 0.006 * np.sin(4 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SuperiorHybridEvolutionaryAnnealingOptimizer.py b/nevergrad/optimization/lama/SuperiorHybridEvolutionaryAnnealingOptimizer.py new file mode 100644 index 000000000..2738803d2 --- /dev/null +++ b/nevergrad/optimization/lama/SuperiorHybridEvolutionaryAnnealingOptimizer.py @@ -0,0 +1,56 @@ +import numpy as np + + +class SuperiorHybridEvolutionaryAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initial temperature for simulated annealing, starting slightly higher for broader exploration + T = 2.0 + T_min = 0.001 # Lower minimum temperature for finer control at late stages + alpha = 0.90 # Cooling rate, tuned for optimal exploration-exploitation balance + + # Differential evolution parameters adjusted for robustness and efficiency + F = 0.75 # Mutation factor adjusted for optimal mutation balance + CR = 0.88 # Crossover probability to ensure effective trait mixing + + # Increase population size for better initial coverage and diversity + population_size = 60 + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Introduce a dynamic mutation factor that decreases as temperature drops + dynamic_F = F * (1 - 0.05 * np.log10(1 + (1 / T))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Simulated annealing acceptance criterion, adjusted for a sharper probability curve + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / (T**1.5)): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cooling schedule that dynamically adjusts based on search progress + adaptive_cooling = alpha - 0.01 * (evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SuperiorOptimalEnhancedStrategyDE.py b/nevergrad/optimization/lama/SuperiorOptimalEnhancedStrategyDE.py new file mode 100644 index 000000000..e7a78fc2a --- /dev/null +++ b/nevergrad/optimization/lama/SuperiorOptimalEnhancedStrategyDE.py @@ -0,0 +1,65 @@ +import numpy as np + + +class SuperiorOptimalEnhancedStrategyDE: + def __init__( + self, budget=10000, population_size=150, F_base=0.5, F_range=0.5, CR=0.85, strategy="best-2-bin" + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy dynamically + if self.strategy == "best-2-bin": + best = population[best_idx] + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c, d = population[np.random.choice(idxs, 4, replace=False)] + F = self.F_base + np.random.rand() * self.F_range + mutant = np.clip(best + F * (a - b + c - d), self.lb, self.ub) + else: + idxs = [idx for idx in range(self.population_size) if idx != i] + base, a, b, c = population[np.random.choice(idxs, 4, replace=False)] + F = self.F_base + np.random.rand() * self.F_range + mutant = np.clip(base + F * (a - b + c), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SuperiorRefinedEvolutionaryGradientOptimizerV13.py b/nevergrad/optimization/lama/SuperiorRefinedEvolutionaryGradientOptimizerV13.py new file mode 100644 index 000000000..1aee76519 --- /dev/null +++ b/nevergrad/optimization/lama/SuperiorRefinedEvolutionaryGradientOptimizerV13.py @@ -0,0 +1,81 @@ +import numpy as np + + +class SuperiorRefinedEvolutionaryGradientOptimizerV13: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.55, + F_range=0.45, + CR=0.95, + elite_fraction=0.1, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if ( + np.random.rand() < 0.75 + ): # Modified probability to select the current best, enhancing global search + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SupremeDynamicAdaptiveOptimizerV5.py b/nevergrad/optimization/lama/SupremeDynamicAdaptiveOptimizerV5.py new file mode 100644 index 000000000..2694c58cf --- /dev/null +++ b/nevergrad/optimization/lama/SupremeDynamicAdaptiveOptimizerV5.py @@ -0,0 +1,59 @@ +import numpy as np + + +class SupremeDynamicAdaptiveOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Enhanced temperature and cooling schedule + T = 1.2 # Higher initial temperature for more aggressive early exploration + T_min = 0.0003 # Lower end temperature for extended fine-tuning stage + alpha = 0.95 # Less aggressive cooling to allow more thorough search + + # Refined mutation and crossover strategy + F = 0.8 # Slightly increased mutation factor + CR = 0.9 # Increased crossover probability to foster better information exchange + + population_size = 100 # Increased population size for better sampling of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing an adaptive mutation strategy with enhanced dynamic components + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # More aggressive dynamic mutation factor with revised formula + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More refined acceptance criteria with an aggressive threshold for accepting worse solutions + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Further enhanced adaptive cooling with modified modulation for precision + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV1.py b/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV1.py new file mode 100644 index 000000000..1d2047896 --- /dev/null +++ b/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV1.py @@ -0,0 +1,61 @@ +import numpy as np + + +class SupremeDynamicPrecisionOptimizerV1: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and annealing parameters + T = 1.1 # Optimized initial temperature to encourage early exploration + T_min = 0.0003 # Lower minimum temperature for prolonged deep exploration + alpha = 0.95 # Slower cooling rate to maintain a balance between exploration and exploitation + + # Mutation and crossover parameters fine-tuned for optimal search dynamics + F = 0.82 # Mutation factor adjusted for aggressive exploration + CR = 0.85 # High crossover probability to ensure diversity in solutions + + population_size = 90 # Optimized population size for effective coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Integrate advanced mutation strategy with dynamic control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor adapts based on the sigmoid function for refined control + dynamic_F = ( + F + * (1 - np.exp(-0.05 * T)) + * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criteria incorporating a dynamic temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy enhanced with sinusoidal modulation for longer effective search + adaptive_cooling = alpha - 0.007 * np.sin(4.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV2.py b/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV2.py new file mode 100644 index 000000000..cecd9ed96 --- /dev/null +++ b/nevergrad/optimization/lama/SupremeDynamicPrecisionOptimizerV2.py @@ -0,0 +1,57 @@ +import numpy as np + + +class SupremeDynamicPrecisionOptimizerV2: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Starting temperature + T_min = 0.001 # Minimum temperature threshold for annealing + alpha = 0.92 # Cooling rate, fine-tuned for a more gradual decrease + + # Mutation and crossover parameters optimized further + F = 0.75 # Mutation factor tuned for a better balance between exploration and exploitation + CR = 0.85 # Crossover probability adjusted to maintain diversity while promoting good traits + + population_size = 80 # Adjusted population size for better performance within the budget + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics and temperature-dependent acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and progress + dynamic_F = ( + F * np.exp(-0.06 * T) * (0.7 + 0.3 * np.cos(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criterion based on delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy that adjusts based on performance and remaining budget + adaptive_cooling = alpha - 0.008 * np.sin(1.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SupremeEvolutionaryGradientHybridOptimizerV6.py b/nevergrad/optimization/lama/SupremeEvolutionaryGradientHybridOptimizerV6.py new file mode 100644 index 000000000..81eec1a59 --- /dev/null +++ b/nevergrad/optimization/lama/SupremeEvolutionaryGradientHybridOptimizerV6.py @@ -0,0 +1,78 @@ +import numpy as np + + +class SupremeEvolutionaryGradientHybridOptimizerV6: + def __init__( + self, + budget=10000, + population_size=100, + F_base=0.5, + F_range=0.5, + CR=0.85, + elite_fraction=0.1, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive', 'best', 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Adaptively choose base individual based on performance + if np.random.rand() < 0.8: # High probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjust F according to a uniform distribution reflecting progress + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/SupremeOptimalPrecisionEvolutionaryThermalOptimizer.py b/nevergrad/optimization/lama/SupremeOptimalPrecisionEvolutionaryThermalOptimizer.py new file mode 100644 index 000000000..361f2cdca --- /dev/null +++ b/nevergrad/optimization/lama/SupremeOptimalPrecisionEvolutionaryThermalOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class SupremeOptimalPrecisionEvolutionaryThermalOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Starting temperature + T_min = 0.001 # Minimum temperature threshold for annealing + alpha = 0.93 # Cooling rate, selected for extended search + + # Mutation and crossover parameters optimized further + F = 0.7 # Mutation factor adjusted for a balance between exploration and exploitation + CR = 0.9 # Crossover probability set to ensure high diversity in solutions + + population_size = 75 # Population size optimized for the budget and problem complexity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics and temperature-dependent acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by temperature and progress + dynamic_F = F * np.exp(-0.1 * T) * (0.6 + 0.4 * np.tanh(evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Improved acceptance criterion based on delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy that adjusts based on current performance and remaining budget + adaptive_cooling = alpha - 0.01 * np.sin(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/SupremeUltraEnhancedEvolutionaryOptimizer.py b/nevergrad/optimization/lama/SupremeUltraEnhancedEvolutionaryOptimizer.py new file mode 100644 index 000000000..dfb328005 --- /dev/null +++ b/nevergrad/optimization/lama/SupremeUltraEnhancedEvolutionaryOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class SupremeUltraEnhancedEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and enhanced cooling parameters + T = 1.15 # Initial higher temperature for more aggressive exploration + T_min = 0.0005 # Lower temperature threshold for fine-tuned exploitation + alpha = 0.92 # Cooling rate, slightly adjusted for optimal cooling balance + + # Mutation and crossover parameters further refined + F = 0.75 # Higher mutation factor for aggressive search in early stages + CR = 0.92 # Increased crossover probability for maintaining high genetic diversity + + population_size = 80 # Increased population size for more diverse initial solutions + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation dynamics with temperature and evaluation adaptive mutation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor with enhanced exploration and exploitation balance + dynamic_F = ( + F * np.exp(-0.11 * T) * (0.7 + 0.3 * np.sin(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Superior acceptance criterion that considers advanced thermal effects + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling with enhanced periodic modulation + adaptive_cooling = alpha - 0.012 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/TemporalAdaptiveDifferentialEvolution.py b/nevergrad/optimization/lama/TemporalAdaptiveDifferentialEvolution.py new file mode 100644 index 000000000..93c3da598 --- /dev/null +++ b/nevergrad/optimization/lama/TemporalAdaptiveDifferentialEvolution.py @@ -0,0 +1,51 @@ +import numpy as np + + +class TemporalAdaptiveDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality of the problem + self.pop_size = 50 # Population size reduced for more focused search + self.F_base = 0.5 # Base mutation factor + self.CR = 0.9 # Crossover probability + self.F_decay = 0.99 # Decay factor for mutation rate + + def __call__(self, func): + # Initialize population uniformly within bounds + pop = np.random.uniform(-5.0, 5.0, (self.pop_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + + # Track the best solution found + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_ind = pop[best_idx].copy() + + # Evolution loop + n_iterations = int(self.budget / self.pop_size) + F = self.F_base + for iteration in range(n_iterations): + # Temporally decaying mutation factor + F *= self.F_decay + + for i in range(self.pop_size): + # Mutation strategy: 'rand/1/bin' + idxs = np.random.choice([idx for idx in range(self.pop_size) if idx != i], 3, replace=False) + a, b, c = pop[idxs] + mutant = pop[i] + F * (a - b + c - pop[i]) + + # Clipping to bounds + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + trial = np.where(np.random.rand(self.dim) < self.CR, mutant, pop[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_fitness = trial_fitness + best_ind = trial.copy() + + return best_fitness, best_ind diff --git a/nevergrad/optimization/lama/TurbochargedDifferentialEvolution.py b/nevergrad/optimization/lama/TurbochargedDifferentialEvolution.py new file mode 100644 index 000000000..27e228136 --- /dev/null +++ b/nevergrad/optimization/lama/TurbochargedDifferentialEvolution.py @@ -0,0 +1,72 @@ +import numpy as np + + +class TurbochargedDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension fixed as per the problem + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def __call__(self, func): + # Increased population size for better search space coverage + population_size = 150 + mutation_factor = 0.5 # Initializing with a lower mutation factor + crossover_prob = 0.7 # Higher crossover probability for increased diversity + + # Initialize population + population = np.random.uniform(self.lower_bound, self.upper_bound, (population_size, self.dim)) + fitness = np.array([func(x) for x in population]) + + best_idx = np.argmin(fitness) + best_value = fitness[best_idx] + best_solution = population[best_idx].copy() + + # Adaptive mutation and crossover approach with direct feedback from performance + performance_feedback = 0.1 + + for _ in range(self.budget // population_size): + new_population = np.empty_like(population) + new_fitness = np.zeros(population_size) + + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = population[np.random.choice(indices, 3, replace=False)] + + # Mutate: Dithering mutation strategy + local_mutation = mutation_factor + performance_feedback * (np.random.rand() - 0.5) + mutant = a + local_mutation * (b - c) + mutant = np.clip(mutant, self.lower_bound, self.upper_bound) + + # Crossover: Binomial + cross_points = np.random.rand(self.dim) < crossover_prob + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + trial_fitness = func(trial) + if trial_fitness < fitness[i]: + new_population[i] = trial + new_fitness[i] = trial_fitness + else: + new_population[i] = population[i] + new_fitness[i] = fitness[i] + + population = new_population + fitness = new_fitness + + # Update the best solution found + current_best_idx = np.argmin(fitness) + current_best_value = fitness[current_best_idx] + if current_best_value < best_value: + best_value = current_best_value + best_solution = population[current_best_idx].copy() + + # Dynamic adaptation based on feedback + if current_best_value < best_value: + performance_feedback *= 1.05 # Increase mutation if performance is improving + else: + performance_feedback *= 0.95 # Decrease mutation if performance stagnates + + return best_value, best_solution diff --git a/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithm.py b/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithm.py new file mode 100644 index 000000000..b3cab2364 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithm.py @@ -0,0 +1,98 @@ +import numpy as np + + +class UltimateDynamicFireworkAlgorithm: + def __init__( + self, + population_size=50, + max_sparks=10, + max_generations=1500, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.9, + p_dt=0.05, + exploration_range=0.6, + mutation_rate=0.20, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.exploration_range = exploration_range + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0, np.Inf) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, beta): + return x + np.random.uniform(-self.exploration_range, self.exploration_range, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def mutation_operator(self, x): + return x + np.random.normal(0, self.mutation_rate, size=self.dim) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.95 # Decrease alpha + self.beta[k] *= 1.05 # Increase beta + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark = self.mutation_operator(new_spark) + new_fitness = func(new_spark) + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0, new_fitness) + else: + self.fireworks[i] = ( + np.copy(self.fireworks[i][0]), + self.fireworks[i][1] + 1, + self.fireworks[i][2], + ) + + self.update_parameters(i, fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = ( + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), + 0, + np.Inf, + ) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithmImproved.py b/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithmImproved.py new file mode 100644 index 000000000..e370c09e4 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateDynamicFireworkAlgorithmImproved.py @@ -0,0 +1,96 @@ +import numpy as np + + +class UltimateDynamicFireworkAlgorithmImproved: + def __init__( + self, + population_size=30, + max_sparks=5, + max_generations=1000, + initial_alpha=0.1, + initial_beta=0.2, + p_ex=0.8, + p_dt=0.1, + mutation_rate=0.05, + ): + self.population_size = population_size + self.max_sparks = max_sparks + self.max_generations = max_generations + self.initial_alpha = initial_alpha + self.initial_beta = initial_beta + self.p_ex = p_ex + self.p_dt = p_dt + self.mutation_rate = mutation_rate + self.budget = 0 + self.f_opt = np.Inf + self.x_opt = None + + def initialize_population(self, func): + self.dim = func.bounds.ub.shape[0] + self.population = np.random.uniform( + func.bounds.lb, func.bounds.ub, size=(self.population_size, self.dim) + ) + self.fireworks = [(np.copy(x), 0) for x in self.population] + self.best_individual = None + self.best_fitness = np.Inf + self.alpha = np.full(self.population_size, self.initial_alpha) + self.beta = np.full(self.population_size, self.initial_beta) + + def explosion_operator(self, x, func, beta): + return x + np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim) * beta + + def attraction_operator(self, x, y, alpha): + return x + alpha * (y - x) + + def update_parameters(self, k, fitness_diff): + if fitness_diff < 0: + self.alpha[k] *= 0.9 # Decrease alpha + self.beta[k] *= 1.1 # Increase beta + else: + self.alpha[k] *= 1.1 # Increase alpha + self.beta[k] *= 0.9 # Decrease beta + + def adapt_mutation_rate(self, fitness_diff): + if fitness_diff < 0: + self.mutation_rate *= 0.95 # Decrease mutation rate + else: + self.mutation_rate *= 1.05 # Increase mutation rate + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + self.initialize_population(func) + + for _ in range(self.max_generations): + for i, (x, _) in enumerate(self.fireworks): + fitness = func(x) + if fitness < self.best_fitness: + self.best_fitness = fitness + self.best_individual = np.copy(x) + + for _ in range(self.max_sparks): + if np.random.rand() < self.p_ex: + new_spark = self.explosion_operator(x, func, self.beta[i]) + else: + j = np.random.randint(0, self.population_size) + new_spark = self.attraction_operator(x, self.fireworks[j][0], self.alpha[i]) + + new_spark += np.random.normal(0, self.mutation_rate, size=self.dim) + new_fitness = func(new_spark) + + fitness_diff = new_fitness - func(self.fireworks[i][0]) + if fitness_diff < 0: + self.fireworks[i] = (np.copy(new_spark), 0) + else: + self.fireworks[i] = (np.copy(self.fireworks[i][0]), self.fireworks[i][1] + 1) + + self.update_parameters(i, fitness_diff) + self.adapt_mutation_rate(fitness_diff) + + if self.fireworks[i][1] > self.p_dt * self.max_sparks: + self.fireworks[i] = (np.random.uniform(func.bounds.lb, func.bounds.ub, size=self.dim), 0) + + self.f_opt = func(self.best_individual) + self.x_opt = self.best_individual + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19.py b/nevergrad/optimization/lama/UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19.py new file mode 100644 index 000000000..884bbd756 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19.py @@ -0,0 +1,82 @@ +import numpy as np + + +class UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19: + def __init__( + self, + budget=10000, + population_size=145, + F_base=0.58, + F_range=0.42, + CR=0.97, + elite_fraction=0.12, + mutation_strategy="adaptive_dynamic", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive_dynamic' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy: adaptive, but dynamically changes the probability threshold based on progress + adaptation_rate = evaluations / self.budget + selection_threshold = 0.85 - 0.2 * adaptation_rate # Decreases as evaluations increase + + if self.mutation_strategy == "adaptive_dynamic": + if np.random.rand() < selection_threshold: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F considering the phase of optimization + F_adjustment = 0.2 * np.sin(2 * np.pi * adaptation_rate) # Introduces periodic adaptation + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + F_adjustment + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV15.py b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV15.py new file mode 100644 index 000000000..baa8a7d5f --- /dev/null +++ b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV15.py @@ -0,0 +1,86 @@ +import numpy as np + + +class UltimateEvolutionaryGradientOptimizerV15: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.58, + F_range=0.42, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="balanced", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'adaptive', 'random', 'balanced' + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy selection + if self.mutation_strategy == "adaptive": + base = ( + best_individual + if np.random.rand() < 0.75 + else population[np.random.choice(elite_indices)] + ) + elif self.mutation_strategy == "random": + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "balanced": + if np.random.rand() < 0.5: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV26.py b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV26.py new file mode 100644 index 000000000..63cf5de88 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV26.py @@ -0,0 +1,81 @@ +import numpy as np + + +class UltimateEvolutionaryGradientOptimizerV26: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.58, + F_range=0.42, + CR=0.93, + elite_fraction=0.11, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if ( + np.random.rand() < 0.8 + ): # Increased probability to focus on the current best, enhancing global search + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV33.py b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV33.py new file mode 100644 index 000000000..64b8406b8 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateEvolutionaryGradientOptimizerV33.py @@ -0,0 +1,79 @@ +import numpy as np + + +class UltimateEvolutionaryGradientOptimizerV33: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.52, + F_range=0.48, + CR=0.97, + elite_fraction=0.07, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range to adjust mutation factor + self.CR = CR # Crossover probability, slightly higher to improve exploration + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem is fixed to 5 as given + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Adaptively choose base individual, either the best or from elite + if np.random.rand() < 0.85: # Increased focus on exploitation near promising areas + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Randomly select from elite for base + base = population[np.random.choice(elite_indices)] + + # Adjust F dynamically for exploration and exploitation balance + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation strategy DE/rand/1 + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with potential increase in probability + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation of the trial individual + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit condition if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltimateEvolutionaryOptimizer.py b/nevergrad/optimization/lama/UltimateEvolutionaryOptimizer.py new file mode 100644 index 000000000..7f9ae6e97 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateEvolutionaryOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltimateEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature with a slightly higher starting point and refine minimum threshold + T = 1.2 # Starting temperature increased for more aggressive initial exploration + T_min = 0.0003 # Lower temperature threshold for fine-tuned exploitation + alpha = 0.95 # Slightly slower cooling rate to allow more thorough search at each temperature level + + # Mutation and crossover parameters optimized for a balance between diversification and intensification + F = 0.8 # Higher mutation factor for more effective exploration early in the process + CR = 0.85 # Lowered crossover probability to ensure better offspring quality + + population_size = 100 # Increased population size for more diverse initial solutions + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics with a focus on adaptive mutation factors + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor incorporating more complex adaptive behavior + dynamic_F = ( + F * np.exp(-0.1 * T) * (0.7 + 0.3 * np.cos(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Superior acceptance criterion that adapts better to changes in fitness landscape + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.06 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling with an added modulation to account for search stagnation + adaptive_cooling = alpha - 0.015 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltimateRefinedAQAPSO_LS_DIW_AP.py b/nevergrad/optimization/lama/UltimateRefinedAQAPSO_LS_DIW_AP.py new file mode 100644 index 000000000..bf80126d6 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateRefinedAQAPSO_LS_DIW_AP.py @@ -0,0 +1,84 @@ +import numpy as np + + +class UltimateRefinedAQAPSO_LS_DIW_AP: + def __init__(self, budget=1000, num_particles=30): + self.budget = budget + self.num_particles = num_particles + self.dim = 5 + + def random_restart(self): + return np.random.uniform(-5.0, 5.0, size=(self.num_particles, self.dim)) + + def local_search(self, x, func): + best_x = x + best_f = func(x) + + for _ in range(500): # Keep the same local search iterations + x_new = x + 0.1 * np.random.randn(self.dim) # Adjusted the local search step size + x_new = np.clip(x_new, -5.0, 5.0) + f_val = func(x_new) + + if f_val < best_f: + best_f = f_val + best_x = x_new + + return best_x, best_f + + def update_inertia_weight(self, t): + return 0.8 - 0.6 * t / self.budget # Fine-tuned inertia weight update + + def update_parameters(self, t): + return 1.5 - t / (1.5 * self.budget), 2.0 - t / ( + 1.5 * self.budget + ) # Fine-tuned cognitive and social weights update + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + particles_pos = self.random_restart() + particles_vel = np.zeros((self.num_particles, self.dim)) + personal_best_pos = np.copy(particles_pos) + personal_best_val = np.array([func(x) for x in particles_pos]) + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + for t in range(1, self.budget + 1): + inertia_weight = self.update_inertia_weight(t) + cognitive_weight, social_weight = self.update_parameters(t) + + for i in range(self.num_particles): + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + r3 = np.random.rand() + + particles_vel[i] = ( + inertia_weight * particles_vel[i] + + cognitive_weight * r1 * (personal_best_pos[i] - particles_pos[i]) + + social_weight * r2 * (global_best_pos - particles_pos[i]) + ) + + accel = 1.7 * r3 * (global_best_pos - particles_pos[i]) # Fine-tuned acceleration coefficient + particles_vel[i] += accel + + particles_pos[i] += particles_vel[i] + particles_pos[i] = np.clip(particles_pos[i], -5.0, 5.0) + + f_val = func(particles_pos[i]) + + if f_val < personal_best_val[i]: + personal_best_val[i] = f_val + personal_best_pos[i] = np.copy(particles_pos[i]) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = np.copy(particles_pos[i]) + + global_best_idx = np.argmin(personal_best_val) + global_best_pos = np.copy(personal_best_pos[global_best_idx]) + + # Integrate local search + for i in range(self.num_particles): + particles_pos[i], _ = self.local_search(particles_pos[i], func) + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltimateRefinedPrecisionEvolutionaryOptimizerV41.py b/nevergrad/optimization/lama/UltimateRefinedPrecisionEvolutionaryOptimizerV41.py new file mode 100644 index 000000000..4ca18c5bf --- /dev/null +++ b/nevergrad/optimization/lama/UltimateRefinedPrecisionEvolutionaryOptimizerV41.py @@ -0,0 +1,80 @@ +import numpy as np + + +class UltimateRefinedPrecisionEvolutionaryOptimizerV41: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.58, + F_range=0.42, + CR=0.93, + elite_fraction=0.12, + mutation_strategy="hybrid", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly adjusted mutation factor for better global exploration + self.F_range = F_range # Adjusted mutation factor range for more controlled mutations + self.CR = CR # Slightly lower crossover probability to prevent premature convergence + self.elite_fraction = elite_fraction # Adjusted elite fraction to maintain a balance between exploration and exploitation + self.mutation_strategy = ( + mutation_strategy # Hybrid mutation strategy incorporating both random and best elements + ) + self.dim = 5 # Dimensionality of the problem fixed at 5 + self.lb = -5.0 # Search space lower bound + self.ub = 5.0 # Search space upper bound + + def __call__(self, func): + # Initialize the population + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "hybrid": + # Choose base individual: adaptive choice between best and random elite + if np.random.rand() < 0.80: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Calculate F dynamically within refined constraints + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Differential evolution mutation strategy (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation with refined CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate the trial solution + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18.py b/nevergrad/optimization/lama/UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18.py new file mode 100644 index 000000000..03c017734 --- /dev/null +++ b/nevergrad/optimization/lama/UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18.py @@ -0,0 +1,85 @@ +import numpy as np + + +class UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.52, + F_range=0.48, + CR=0.93, + elite_fraction=0.08, + mutation_strategy="balanced", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'adaptive', 'random', 'balanced' + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Mutation strategy selection + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.85: # Increased probability of selecting the best individual + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "random": + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "balanced": + if np.random.rand() < 0.5: + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraDynamicAdaptiveRAMEDS.py b/nevergrad/optimization/lama/UltraDynamicAdaptiveRAMEDS.py new file mode 100644 index 000000000..4b9d0a1b6 --- /dev/null +++ b/nevergrad/optimization/lama/UltraDynamicAdaptiveRAMEDS.py @@ -0,0 +1,92 @@ +import numpy as np + + +class UltraDynamicAdaptiveRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + tournament_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.tournament_size = tournament_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Cosine annealing mutation factor + F = self.F_min + 0.5 * (self.F_max - self.F_min) * (1 + np.cos(np.pi * evaluations / self.budget)) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Tournament selection for mutation base vector + tournament_indices = np.random.choice( + range(self.population_size), self.tournament_size, replace=False + ) + tournament_fitness = fitness[tournament_indices] + base_idx = tournament_indices[np.argmin(tournament_fitness)] + base = population[base_idx] + + # Differential mutation + idxs = np.array([idx for idx in range(self.population_size) if idx != i and idx != base_idx]) + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraDynamicDualPhaseOptimizedStrategyV16.py b/nevergrad/optimization/lama/UltraDynamicDualPhaseOptimizedStrategyV16.py new file mode 100644 index 000000000..18d889626 --- /dev/null +++ b/nevergrad/optimization/lama/UltraDynamicDualPhaseOptimizedStrategyV16.py @@ -0,0 +1,88 @@ +import numpy as np + + +class UltraDynamicDualPhaseOptimizedStrategyV16: + def __init__(self, budget, dimension=5, population_size=100, F_init=0.5, CR_init=0.9, switch_ratio=0.5): + self.budget = budget + self.dimension = dimension + self.pop_size = population_size + self.F = F_init + self.CR = CR_init + self.switch_ratio = switch_ratio + self.lower_bounds = -5.0 * np.ones(self.dimension) + self.upper_bounds = 5.0 * np.ones(self.dimension) + + def initialize_population(self): + return np.random.uniform(self.lower_bounds, self.upper_bounds, (self.pop_size, self.dimension)) + + def mutate(self, population, best_idx, index, phase): + size = len(population) + idxs = [idx for idx in range(size) if idx != index] + candidates = np.random.choice(idxs, 5, replace=False) + if phase == 1: + # Focusing more on the best individual and perturbations by two differences + mutant = ( + population[best_idx] + + self.F * (population[candidates[0]] - population[candidates[1]]) + + 0.5 * self.F * (population[candidates[2]] - population[candidates[3]]) + ) + else: + # Enhanced mutation strategy incorporating global best influence more significantly + mutant = ( + population[best_idx] + + self.F * (population[candidates[0]] - population[candidates[1]]) + + self.F * (population[candidates[2]] - population[best_idx]) + + 0.5 * self.F * (population[candidates[3]] - population[candidates[4]]) + ) + return np.clip(mutant, self.lower_bounds, self.upper_bounds) + + def crossover(self, target, mutant): + crossover_mask = np.random.rand(self.dimension) < self.CR + trial = np.where(crossover_mask, mutant, target) + return trial + + def select(self, target, trial, func): + f_target = func(target) + f_trial = func(trial) + if f_trial < f_target: + return trial, f_trial + else: + return target, f_target + + def adjust_parameters(self, iteration, total_iterations): + # Using a more dynamic, non-linear parameter scaling function + scale = iteration / total_iterations + self.F = 0.5 + 0.5 * np.sin(np.pi * scale) # Sine variation for F + self.CR = 0.9 - 0.4 * scale**2 # Quadratic decay for CR + + def __call__(self, func): + population = self.initialize_population() + fitnesses = np.array([func(ind) for ind in population]) + evaluations = len(population) + iteration = 0 + best_idx = np.argmin(fitnesses) + switch_point = int(self.switch_ratio * self.budget) + + while evaluations < self.budget: + phase = 1 if evaluations < switch_point else 2 + self.adjust_parameters(iteration, switch_point if phase == 1 else self.budget - switch_point) + + for i in range(self.pop_size): + mutant = self.mutate(population, best_idx, i, phase) + trial = self.crossover(population[i], mutant) + trial, trial_fitness = self.select(population[i], trial, func) + evaluations += 1 + + if trial_fitness < fitnesses[i]: + population[i] = trial + fitnesses[i] = trial_fitness + if trial_fitness < fitnesses[best_idx]: + best_idx = i + + if evaluations >= self.budget: + break + iteration += 1 + + best_fitness = fitnesses[best_idx] + best_solution = population[best_idx] + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV10.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV10.py new file mode 100644 index 000000000..efaca0f70 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV10.py @@ -0,0 +1,181 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV10: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + early_stopping_ratio=0.05, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.early_stopping_ratio = early_stopping_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + best_fitness_history = [] + max_stagnant_iterations = int(self.early_stopping_ratio * global_search_budget) + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Early stopping based on stagnant improvements + best_fitness_history.append(g_best_fitness) + if len(best_fitness_history) > max_stagnant_iterations: + recent_improvement = np.diff(best_fitness_history[-max_stagnant_iterations:]) + if np.all(recent_improvement == 0): + break + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV11.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV11.py new file mode 100644 index 000000000..ce2510659 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV11.py @@ -0,0 +1,184 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV11: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + early_stopping_ratio=0.05, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.early_stopping_ratio = early_stopping_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + best_fitness_history = [] + max_stagnant_iterations = int(self.early_stopping_ratio * global_search_budget) + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Early stopping based on stagnant improvements + best_fitness_history.append(g_best_fitness) + if len(best_fitness_history) > max_stagnant_iterations: + recent_improvement = np.diff(best_fitness_history[-max_stagnant_iterations:]) + if np.all(recent_improvement == 0): + break + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals, with a bias towards the global best + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + if np.random.rand() < 0.5: # 50% chance to do local search from global best + new_x, new_f = self.local_search(g_best, func, local_budget) + else: # otherwise, do local search on elite individuals + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV12.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV12.py new file mode 100644 index 000000000..597f45b13 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV12.py @@ -0,0 +1,190 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV12: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + adaptive_blend_chance=0.3, + max_memory_size=50, + early_stop_threshold=0.05, + enhanced_blend_weight=0.7, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.adaptive_blend_chance = adaptive_blend_chance + self.max_memory_size = max_memory_size + self.early_stop_threshold = early_stop_threshold + self.enhanced_blend_weight = enhanced_blend_weight + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + best_fitness_history = [] + max_stagnant_iterations = int(self.early_stop_threshold * global_search_budget) + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate adaptive blending crossover mechanism + if np.random.rand() < self.adaptive_blend_chance: + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = self.enhanced_blend_weight * trial + (1 - self.enhanced_blend_weight) * partner + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.max_memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Early stopping based on stagnant improvements + best_fitness_history.append(g_best_fitness) + if len(best_fitness_history) > max_stagnant_iterations: + recent_improvement = np.diff(best_fitness_history[-max_stagnant_iterations:]) + if np.all(recent_improvement == 0): + break + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals, with a bias towards the global best + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + if np.random.rand() < 0.5: # 50% chance to do local search from global best + new_x, new_f = self.local_search(g_best, func, local_budget) + else: # otherwise, do local search on elite individuals + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV2.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV2.py new file mode 100644 index 000000000..02de141d9 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV2.py @@ -0,0 +1,174 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV2: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + blend_crossover_prob=0.3, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.blend_crossover_prob = blend_crossover_prob + self.max_no_improvement_ratio = max_no_improvement_ratio + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV3.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV3.py new file mode 100644 index 000000000..dbeafbf80 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV3.py @@ -0,0 +1,174 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV3: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + blend_crossover_prob=0.3, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.blend_crossover_prob = blend_crossover_prob + self.max_no_improvement_ratio = max_no_improvement_ratio + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV4.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV4.py new file mode 100644 index 000000000..a8da209b5 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV4.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV4: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.max_no_improvement_ratio = max_no_improvement_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV7.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV7.py new file mode 100644 index 000000000..20ff6d400 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveMemoryHybridOptimizerV7.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedAdaptiveMemoryHybridOptimizerV7: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.max_no_improvement_ratio = max_no_improvement_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedAdaptiveRAMEDS.py b/nevergrad/optimization/lama/UltraEnhancedAdaptiveRAMEDS.py new file mode 100644 index 000000000..d8fc54bce --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedAdaptiveRAMEDS.py @@ -0,0 +1,88 @@ +import numpy as np + + +class UltraEnhancedAdaptiveRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.9, + F_min=0.5, + F_max=1.0, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adaptive mutation factor based on sigmoid function and gradient relative to the elite mean + elite_mean = np.mean(elite, axis=0) + gradient = best_solution - elite_mean + norm_gradient = np.linalg.norm(gradient) + F = self.F_min + (self.F_max - self.F_min) * np.exp(-norm_gradient) + + # Periodic elite update + if evaluations % (self.budget // 5) == 0: + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + + # Crossover considering a dynamic rate influenced by proximity to the elite mean + dynamic_cr = self.crossover_rate * ( + 1 - np.linalg.norm(population[i] - elite_mean) / (norm_gradient + 1e-5) + ) + cross_points = np.random.rand(dimension) < dynamic_cr + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update memory if the trial is better than the worst in memory + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraEnhancedDynamicDE.py b/nevergrad/optimization/lama/UltraEnhancedDynamicDE.py new file mode 100644 index 000000000..a2a8f5152 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedDynamicDE.py @@ -0,0 +1,69 @@ +import numpy as np + + +class UltraEnhancedDynamicDE: + def __init__( + self, budget=10000, population_size=100, F_base=0.5, F_adapt=0.3, CR=0.95, adapt_strategy=True + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Factor for mutation scale + self.F_adapt = F_adapt # Adaptation factor for mutation scale + self.CR = CR # Crossover probability + self.adapt_strategy = adapt_strategy # Adaptation strategy toggle + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Adaptive mutation strategy based on phase of optimization process + phase_ratio = evaluations / self.budget + if self.adapt_strategy and phase_ratio < 0.5: + idxs = np.argsort(fitness)[:2] # Use best individuals early on + base = population[idxs[np.random.randint(2)]] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjust mutation factor F + F = self.F_base + np.sin(phase_ratio * np.pi) * self.F_adapt + + # Mutation using derivative of best + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover using binomial method + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection step + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py b/nevergrad/optimization/lama/UltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py new file mode 100644 index 000000000..3d195acd9 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedEliteAdaptiveMemoryHybridOptimizer.py @@ -0,0 +1,174 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraEnhancedEliteAdaptiveMemoryHybridOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + blend_crossover_prob=0.3, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.blend_crossover_prob = blend_crossover_prob + self.max_no_improvement_ratio = max_no_improvement_ratio + self.memory = [] + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraEnhancedEvolutionaryGradientOptimizerV14.py b/nevergrad/optimization/lama/UltraEnhancedEvolutionaryGradientOptimizerV14.py new file mode 100644 index 000000000..32360f904 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedEvolutionaryGradientOptimizerV14.py @@ -0,0 +1,84 @@ +import numpy as np + + +class UltraEnhancedEvolutionaryGradientOptimizerV14: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.5, + F_range=0.5, + CR=0.9, + elite_fraction=0.05, + mutation_strategy="hybrid", + p_best=0.2, + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'adaptive', 'random', 'hybrid' + ) + self.p_best = p_best # Probability to choose among p-best individuals + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + p_best_size = int(self.p_best * self.population_size) + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + base = population[np.random.choice(elite_indices)] + elif self.mutation_strategy == "random": + base = population[np.random.randint(self.population_size)] + elif self.mutation_strategy == "hybrid": + if np.random.rand() < 0.5: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.min(np.random.choice(np.argsort(fitness)[:p_best_size], 1))] + + # Dynamic adjustment of F + F = self.F_base + (np.random.rand() - 0.5) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraEnhancedPrecisionEvolutionaryOptimizer.py b/nevergrad/optimization/lama/UltraEnhancedPrecisionEvolutionaryOptimizer.py new file mode 100644 index 000000000..b0d1e8e50 --- /dev/null +++ b/nevergrad/optimization/lama/UltraEnhancedPrecisionEvolutionaryOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraEnhancedPrecisionEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lb = -5.0 # Lower boundary of the search space + self.ub = 5.0 # Upper boundary of the search space + + def __call__(self, func): + # Updated thermal dynamics for enhanced exploration and exploitation balance + T = 1.05 # Initial temperature for broader initial exploration + T_min = 0.0005 # Minimum temperature for detailed exploitation + alpha = 0.95 # Cooling rate, optimized for gradual precision enhancement + + # Improved mutation and crossover dynamics + F = 0.8 # Enhanced mutation factor for aggressive diversification early on + CR = 0.88 # Crossover probability for maintaining genetic diversity + + population_size = 70 # Optimized population size for budget efficiency + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced evolutionary dynamics with adaptive mutation control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Temperature and evaluation adaptive mutation factor + dynamic_F = ( + F * np.exp(-0.09 * T) * (0.65 + 0.35 * np.sin(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion that considers delta fitness, temperature, and progress + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.sin(evaluation_count / self.budget))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling strategy + adaptive_cooling = alpha - 0.015 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraEvolutionaryGradientOptimizerV27.py b/nevergrad/optimization/lama/UltraEvolutionaryGradientOptimizerV27.py new file mode 100644 index 000000000..1a1c970cf --- /dev/null +++ b/nevergrad/optimization/lama/UltraEvolutionaryGradientOptimizerV27.py @@ -0,0 +1,81 @@ +import numpy as np + + +class UltraEvolutionaryGradientOptimizerV27: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.4, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if ( + np.random.rand() < 0.85 + ): # Increased probability to focus on the current best, enhancing global search + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Always use random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraFineSpiralDifferentialOptimizerV7.py b/nevergrad/optimization/lama/UltraFineSpiralDifferentialOptimizerV7.py new file mode 100644 index 000000000..11c34113f --- /dev/null +++ b/nevergrad/optimization/lama/UltraFineSpiralDifferentialOptimizerV7.py @@ -0,0 +1,76 @@ +import numpy as np + + +class UltraFineSpiralDifferentialOptimizerV7: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize parameters + population_size = 150 # Further reduced population for more focus on selected vectors + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Spiral and mutation parameters + min_radius = 0.00001 # Reduced minimum radius for finer local movements + max_radius = 1.5 # Reduced max radius for tighter local focus + radius_decay = 0.95 # Slower radius decay for extended local exploration + mutation_factor = 1.0 # Enhanced mutation factor for aggressive diversity + crossover_probability = 0.8 # Slightly reduced to allow more mutant characteristics + + # Advanced gradient local search parameters + step_size = 0.001 # Further refined step size + gradient_steps = 200 # Increased gradient steps for deeper local optimization + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential evolution mutation + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover operation + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral dynamic integration with reduced radius and slower decay + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Enhanced gradient descent-like local search + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation of the new solution + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + # Population update + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizer.py b/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizer.py new file mode 100644 index 000000000..79669c006 --- /dev/null +++ b/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizer.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraFineTunedEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature parameters for sophisticated annealing + T_initial = 1.2 # Starting temperature slightly increased for broader initial exploration + T = T_initial + T_min = 0.0005 # Lower minimum temperature for extended fine-tuning phases + alpha = 0.95 # Slower cooling rate to extend exploration phases at each temperature step + + # Optimized mutation and crossover parameters + F_initial = 0.8 # Higher initial mutation factor to enhance initial global search + CR = 0.85 # Slightly reduced crossover probability to improve offspring quality + + population_size = 90 # Increased population size for enhanced diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Enhanced mutation dynamics with adaptive mutation factor + while evaluation_count < self.budget and T > T_min: + F = F_initial * np.exp( + -0.2 * (T_initial - T) + ) # Adaptive mutation factor decreases with temperature + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criterion with modified temperature impact + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.03 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Non-linear adaptive cooling strategy with more periodic modulation + adaptive_cooling = alpha - 0.015 * np.cos(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizerV24.py b/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizerV24.py new file mode 100644 index 000000000..07abf4953 --- /dev/null +++ b/nevergrad/optimization/lama/UltraFineTunedEvolutionaryOptimizerV24.py @@ -0,0 +1,79 @@ +import numpy as np + + +class UltraFineTunedEvolutionaryOptimizerV24: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.65, + F_range=0.25, + CR=0.88, + elite_fraction=0.15, + mutation_strategy="advanced_adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Range for mutation factor adjustment, tightened for better control + self.CR = CR # Crossover probability, slightly reduced to enhance exploration + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Adaptive mutation strategy with an advanced approach + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "advanced_adaptive": + # Use focused elite choice with a higher precision and control in selection + if np.random.rand() < 0.85: # Increased likelihood to focus on the best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Default strategy using random elite base + base = population[np.random.choice(elite_indices)] + + # Fine-tuned mutation factor for more controlled exploration + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation scheme + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with slightly adjusted probability + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and potential replacement + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Terminate if budget is reached + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV18.py b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV18.py new file mode 100644 index 000000000..e18825ab0 --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV18.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraOptimizedDynamicPrecisionOptimizerV18: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality remains fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize the temperature and cooling strategy with refined parameters based on previous findings + T = 1.2 # Slightly higher initial temperature for a more aggressive global search at the beginning + T_min = 0.0003 # Lower minimum temperature to allow very precise exploration in the later stages + alpha = 0.95 # Slower cooling rate to sustain the search process over a longer period + + # Mutation and crossover parameters further refined for enhanced performance + F = 0.77 # Mutation factor adjusted for an optimal balance of exploration and exploitation + CR = 0.89 # Crossover probability finely tuned to encourage better integration of good traits + + population_size = 90 # Adjusted population size to optimize computational resources and diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing an enhanced dynamic mutation strategy with adaptive sigmoid function + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation rate adapting with a sophisticated sigmoid-based model + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with temperature-sensitive decision making + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced cooling strategy integrating a periodic modulation for more nuanced temperature control + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV19.py b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV19.py new file mode 100644 index 000000000..fd0e0e6ca --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV19.py @@ -0,0 +1,62 @@ +import numpy as np + + +class UltraOptimizedDynamicPrecisionOptimizerV19: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality remains fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters with refined settings + T = 1.2 # Increased starting temperature for more aggressive global exploration initially + T_min = 0.0003 # Lower minimum temperature to allow very detailed exploration at the end + alpha = 0.93 # Slower cooling rate to extend the search process over a longer time frame + + # Refined mutation and crossover parameters for improved performance + F = 0.78 # Mutation factor adjusted to enhance the balance of exploration and exploitation + CR = 0.88 # Crossover probability finely tuned for better trait integration + + population_size = 85 # Adjusted population size to optimize computational resources and diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing an enhanced dynamic mutation strategy with adaptive sigmoid function + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation rate adapting with a more advanced sigmoid model + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.75 + 0.25 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with temperature-sensitive decision making + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced cooling strategy integrating a periodic modulation for nuanced temperature control + adaptive_cooling = alpha - 0.009 * np.sin(2 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV52.py b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV52.py new file mode 100644 index 000000000..4fc32f4ea --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV52.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraOptimizedDynamicPrecisionOptimizerV52: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters more aggressively for deeper searches + T = 1.2 # Higher initial temperature for a broader initial search + T_min = 0.0001 # Lower minimum temperature to allow for very fine-grained late-stage optimization + alpha = 0.88 # Slower cooling rate to maintain the search effectiveness over a longer period + + # Mutation and crossover parameters are refined + F = 0.78 # Slightly higher Mutation factor to promote exploratory behavior + CR = 0.90 # Increased Crossover probability to ensure effective gene exchange + + population_size = 90 # Increased population size to improve search diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with improved adaptive control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor now includes a decay term that adjusts more smoothly + dynamic_F = ( + F * np.exp(-0.04 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.7))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria with an improved temperature scaling factor + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with a new sinusoidal modulation for more nuanced adjustments + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV53.py b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV53.py new file mode 100644 index 000000000..692a87503 --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedDynamicPrecisionOptimizerV53.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraOptimizedDynamicPrecisionOptimizerV53: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 as specified + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Further optimize temperature and cooling parameters for adaptive exploration + T = 1.18 # Starting temperature adjusted for a more aggressive initial exploration + T_min = 0.0003 # Lower minimum temperature to enable finer-grained late-stage optimization + alpha = 0.90 # Cooling rate optimized for an extended and flexible search + + # Mutation and crossover parameters finely tuned for diversity and convergence + F = 0.77 # Mutation factor adjusted to promote a better exploration-exploitation balance + CR = 0.88 # Crossover probability adjusted to optimize genetic mixing + + population_size = 85 # Population size tuned for a balanced search diversity and efficiency + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with adaptive mutation factor and cooling modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamically adapt mutation factor using a more responsive exponential decay + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.75 + 0.25 * np.cos(2 * np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Incorporate an improved acceptance criteria with a dynamically adjusted temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a new modulation curve for temperature adjustments + adaptive_cooling = alpha - 0.006 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraOptimizedEvolutionaryGradientOptimizerV30.py b/nevergrad/optimization/lama/UltraOptimizedEvolutionaryGradientOptimizerV30.py new file mode 100644 index 000000000..c7332f1f3 --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedEvolutionaryGradientOptimizerV30.py @@ -0,0 +1,79 @@ +import numpy as np + + +class UltraOptimizedEvolutionaryGradientOptimizerV30: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.58, + F_range=0.42, + CR=0.98, + elite_fraction=0.06, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.85: # Increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer.py b/nevergrad/optimization/lama/UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer.py new file mode 100644 index 000000000..b4d577a4c --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimension set as per problem description + self.lb = -5.0 # Lower boundary of the search space + self.ub = 5.0 # Upper boundary of the search space + + def __call__(self, func): + # Initialize thermal and evolutionary parameters + T = 1.2 # Starting temperature slightly higher for vigorous initial exploration + T_min = 0.001 # Lower threshold of temperature for fine-grained exploration at later stages + alpha = 0.91 # Cooling rate selected for a balanced exploration-exploitation trade-off + + # Mutation and crossover parameters refined for optimal performance + F_base = 0.75 # Mutation factor for controlling differential variation + CR = 0.92 # High crossover probability to maintain diversity + + population_size = 80 # Optimized population size for the budget + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced mutation dynamics with temperature and progress-dependent adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptive mutation factor influenced by both temperature and iteration progress + dynamic_F = F_base * np.exp(-0.12 * T) * (0.5 + 0.5 * np.tanh(evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criterion that considers both delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.045 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy enhanced with a non-linear modulation (sinusoidal adjustments) + adaptive_cooling = alpha - 0.015 * np.sin(1.8 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraOptimizedRAMEDS.py b/nevergrad/optimization/lama/UltraOptimizedRAMEDS.py new file mode 100644 index 000000000..4b5c701da --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedRAMEDS.py @@ -0,0 +1,73 @@ +import numpy as np + + +class UltraOptimizedRAMEDS: + def __init__( + self, + budget, + population_size=50, + initial_crossover_rate=0.9, + F_min=0.4, + F_max=0.8, + memory_size=30, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Adapt mutation factor with progress + progress = evaluations / self.budget + F = self.F_min + (self.F_max - self.F_min) * np.cos(np.pi * progress / 2) # Cosine annealing + + # Crossover rate adaptation with a logistic function + self.crossover_rate = 0.5 + 0.45 / (1 + np.exp(-10 * (progress - 0.5))) + + # Mutation and Crossover + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), lb, ub) + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraOptimizedSpiralDifferentialEvolution.py b/nevergrad/optimization/lama/UltraOptimizedSpiralDifferentialEvolution.py new file mode 100644 index 000000000..67894ea62 --- /dev/null +++ b/nevergrad/optimization/lama/UltraOptimizedSpiralDifferentialEvolution.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraOptimizedSpiralDifferentialEvolution: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population and parameters + population_size = 200 # Increased size for broader initial coverage + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + min_radius = 0.1 # Minimum radius for spiral + max_radius = 5.0 # Starting radius for spiral + radius_decay = 0.97 # More aggressive decay + mutation_factor = 0.9 # Higher mutation for aggressive exploration + crossover_probability = 0.7 # Probability for crossover + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential Evolution Strategy + a, b, c = np.random.choice(population_size, 3, replace=False) + mutant = population[a] + mutation_factor * (population[b] - population[c]) + mutant = np.clip(mutant, -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral adjustment + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * i / population_size + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Evaluation + f_trial = func(trial) + evaluations_left -= 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraPreciseDynamicOptimizerV26.py b/nevergrad/optimization/lama/UltraPreciseDynamicOptimizerV26.py new file mode 100644 index 000000000..933b3aa65 --- /dev/null +++ b/nevergrad/optimization/lama/UltraPreciseDynamicOptimizerV26.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraPreciseDynamicOptimizerV26: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Given dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.15 # Starting temperature, adjusted for broader initial exploration + T_min = 0.0002 # Lower minimum temperature for more thorough late-stage exploration + alpha = 0.90 # Cooling rate, optimized for extended search phases + + # Mutation and crossover parameters are finely tuned + F = 0.78 # Mutation factor, adjusted for optimal exploration-exploitation balance + CR = 0.88 # Crossover probability, modified for increased genetic diversity + + population_size = 85 # Adjusted population size for efficient search space scanning + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement a dynamic mutation approach with an adaptive exponential modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts using an exponential decay and hyperbolic tangent for refined control + dynamic_F = ( + F + * (1 - np.exp(-0.1 * T)) + * (0.65 + 0.35 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adapted acceptance criteria including a more aggressive temperature-dependent probability + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a sinusoidal modulation + adaptive_cooling = alpha - 0.009 * np.sin(2.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraPrecisionSpiralDifferentialOptimizerV9.py b/nevergrad/optimization/lama/UltraPrecisionSpiralDifferentialOptimizerV9.py new file mode 100644 index 000000000..6ed547d55 --- /dev/null +++ b/nevergrad/optimization/lama/UltraPrecisionSpiralDifferentialOptimizerV9.py @@ -0,0 +1,75 @@ +import numpy as np + + +class UltraPrecisionSpiralDifferentialOptimizerV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality set as constant + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population + population_size = 50 # Smaller population for faster convergence + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Tuned parameters for spirals and mutation + min_radius = 0.00005 # Further reduced minimum radius for ultra-precision + max_radius = 0.5 # Reduced maximum radius to focus nearer to current best regions + radius_decay = 0.99 # Slower decay rate to maintain explorative behavior longer + mutation_factor = 0.8 # Lower mutation factor to refine search rather than diversify too much + crossover_probability = 0.8 # Higher crossover probability to ensure thorough exploration + + # Local search adjustments + step_size = 0.0001 # Smaller step size for ultra-fine tuning + gradient_steps = 50 # Fewer steps to save budget for more global exploration + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential evolution mutation strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover operation + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Spiral motion integration for detailed search + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Local search with finer steps + for _ in range(gradient_steps): + new_trial = trial + np.random.normal(scale=step_size, size=self.dim) + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + if f_new_trial < func(trial): + trial = new_trial + + # Evaluate and update the population + f_trial = func(trial) + evaluations_left -= 1 + if evaluations_left <= 0: + break + + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraQuantumReactiveHybridStrategy.py b/nevergrad/optimization/lama/UltraQuantumReactiveHybridStrategy.py new file mode 100644 index 000000000..dff3ccca8 --- /dev/null +++ b/nevergrad/optimization/lama/UltraQuantumReactiveHybridStrategy.py @@ -0,0 +1,94 @@ +import numpy as np + + +class UltraQuantumReactiveHybridStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + self.population_size = 250 # Further increased population size to boost exploration + self.elite_size = 40 # Increased elite size to ensure retention of best solutions + self.crossover_fraction = 0.85 # Adjusted to enhance genetic diversity + self.mutation_scale = 0.02 # Further refined mutation scale for precise local searches + self.quantum_mutation_scale = 0.1 # Reduced for controlled explorative steps + self.quantum_probability = 0.15 # Increased for more frequent quantum mutations + self.reactivity_factor = 0.03 # Further refined for stable mutation adaptation + self.adaptive_quantum_boost = 0.03 # Increased boost factor for enhanced late-stage exploration + self.hybridization_rate = 0.1 # New: Rate at which we hybridize solutions from elite and random + + def initialize_population(self): + return np.random.uniform(self.lower_bound, self.upper_bound, (self.population_size, self.dim)) + + def evaluate(self, func, candidates): + return np.array([func(ind) for ind in candidates]) + + def select_elite(self, population, fitness): + indices = np.argsort(fitness)[: self.elite_size] + return population[indices], fitness[indices] + + def crossover_and_mutate(self, parents, num_offspring, iteration): + offspring = np.empty((num_offspring, self.dim)) + parent_indices = np.arange(len(parents)) + for i in range(num_offspring): + if np.random.rand() < self.crossover_fraction: + p1, p2 = np.random.choice(parent_indices, 2, replace=False) + cross_point = np.random.randint(1, self.dim) + offspring[i][:cross_point] = parents[p1][:cross_point] + offspring[i][cross_point:] = parents[p2][cross_point:] + else: + offspring[i] = parents[np.random.choice(parent_indices)] + + dynamic_scale = self.mutation_scale / (1 + iteration * self.reactivity_factor) + dynamic_quantum_scale = ( + self.quantum_mutation_scale + iteration * self.adaptive_quantum_boost + ) / (1 + iteration * self.reactivity_factor) + + if np.random.rand() < self.quantum_probability: + mutation_shift = np.random.normal(0, dynamic_quantum_scale, self.dim) + else: + mutation_shift = np.random.normal(0, dynamic_scale, self.dim) + offspring[i] += mutation_shift + offspring[i] = np.clip(offspring[i], self.lower_bound, self.upper_bound) + return offspring + + def hybridize(self, elite, random_selection): + hybrid_count = int(self.hybridization_rate * len(elite)) + hybrids = np.empty((hybrid_count, self.dim)) + for h in range(hybrid_count): + elite_member = elite[np.random.randint(len(elite))] + random_member = random_selection[np.random.randint(len(random_selection))] + mix_ratio = np.random.rand() + hybrids[h] = mix_ratio * elite_member + (1 - mix_ratio) * random_member + return hybrids + + def __call__(self, func): + population = self.initialize_population() + best_score = float("inf") + best_solution = None + evaluations_consumed = 0 + + iteration = 0 + while evaluations_consumed < self.budget: + fitness = self.evaluate(func, population) + evaluations_consumed += len(population) + + current_best_idx = np.argmin(fitness) + if fitness[current_best_idx] < best_score: + best_score = fitness[current_best_idx] + best_solution = population[current_best_idx].copy() + + if evaluations_consumed >= self.budget: + break + + elite_population, elite_fitness = self.select_elite(population, fitness) + num_offspring = self.population_size - self.elite_size - len(elite_population) + offspring = self.crossover_and_mutate(elite_population, num_offspring, iteration) + + random_selection = self.initialize_population()[: len(elite_population)] + hybrids = self.hybridize(elite_population, random_selection) + + population = np.vstack((elite_population, offspring, hybrids)) + iteration += 1 + + return best_score, best_solution diff --git a/nevergrad/optimization/lama/UltraRAMEDS.py b/nevergrad/optimization/lama/UltraRAMEDS.py new file mode 100644 index 000000000..00d0e1a06 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRAMEDS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class UltraRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + self.lb, self.ub, self.dimension = -5.0, 5.0, 5 + + def __call__(self, func): + # Initialize population and fitness + population = self.lb + (self.ub - self.lb) * np.random.rand(self.population_size, self.dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, self.dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, self.dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Adaptive mutation factor + F = self.F_max - (self.F_max - self.F_min) * np.cos(np.pi * evaluations / self.budget) + + # Mutation strategy based on memory and elite + if np.random.rand() < 0.75: # Use memory-based mutation with higher probability + ref_idx = np.random.randint(0, self.memory_size) + ref_individual = memory[ref_idx] if memory_fitness[ref_idx] != np.inf else population[i] + else: + ref_individual = elite[np.random.randint(0, self.elite_size)] + + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(ref_individual + F * (b - c), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + if trial_fitness < memory_fitness[i % self.memory_size]: + memory[i % self.memory_size] = trial.copy() + memory_fitness[i % self.memory_size] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveConvergenceStrategy.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveConvergenceStrategy.py new file mode 100644 index 000000000..7710b57c7 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveConvergenceStrategy.py @@ -0,0 +1,68 @@ +import numpy as np + + +class UltraRefinedAdaptiveConvergenceStrategy: + def __init__(self, budget, dim=5): + self.budget = budget + self.dim = dim + self.lower_bound = -5.0 + self.upper_bound = 5.0 + + def generate_initial_population(self, size=150): + return np.random.uniform(self.lower_bound, self.upper_bound, (size, self.dim)) + + def evaluate_population(self, func, population): + return np.array([func(ind) for ind in population]) + + def select_best(self, population, fitness, num_select): + indices = np.argsort(fitness)[:num_select] + return population[indices], fitness[indices] + + def mutate(self, population, mutation_rate, mutation_strength): + mutation_mask = np.random.rand(*population.shape) < mutation_rate + mutation_values = np.random.normal(0, mutation_strength, population.shape) + new_population = population + mutation_mask * mutation_values + return np.clip(new_population, self.lower_bound, self.upper_bound) + + def crossover(self, parents, num_children): + new_population = [] + for _ in range(num_children): + if np.random.rand() < 0.95: # High crossover probability + p1, p2 = np.random.choice(len(parents), 2, replace=False) + alpha = np.random.rand() + child = alpha * parents[p1] + (1 - alpha) * parents[p2] + else: # Slight chance to pass a direct parent + child = parents[np.random.randint(len(parents))] + new_population.append(child) + return np.array(new_population) + + def __call__(self, func): + population_size = 300 + num_generations = self.budget // population_size + elitism_size = int(population_size * 0.3) # Increased elitism size + mutation_rate = 0.07 # Reduced mutation rate for initial stability + mutation_strength = 0.7 # Reduced mutation strength + + population = self.generate_initial_population(population_size) + best_score = float("inf") + best_individual = None + + for gen in range(num_generations): + fitness = self.evaluate_population(func, population) + best_population, best_fitness = self.select_best(population, fitness, elitism_size) + + if best_fitness[0] < best_score: + best_score = best_fitness[0] + best_individual = best_population[0] + + non_elite_size = population_size - elitism_size + offspring = self.crossover(best_population, non_elite_size) + offspring = self.mutate(offspring, mutation_rate, mutation_strength) + population = np.vstack((best_population, offspring)) + + # Adaptive mutation strategy based on generational feedback + if gen % 5 == 0 and mutation_rate > 0.01: + mutation_rate -= 0.005 # Gradual decrease of mutation rate + mutation_strength *= 0.95 # Gradual decrease of mutation strength + + return best_score, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV5.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV5.py new file mode 100644 index 000000000..464e42444 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV5.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraRefinedAdaptiveMemoryHybridOptimizerV5: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.max_no_improvement_ratio = max_no_improvement_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV6.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV6.py new file mode 100644 index 000000000..2d8e771f3 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV6.py @@ -0,0 +1,172 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraRefinedAdaptiveMemoryHybridOptimizerV6: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + max_no_improvement_ratio=0.5, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.max_no_improvement_ratio = max_no_improvement_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= int(current_pop_size * self.max_no_improvement_ratio): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV8.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV8.py new file mode 100644 index 000000000..ca1af39ea --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV8.py @@ -0,0 +1,181 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraRefinedAdaptiveMemoryHybridOptimizerV8: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + early_stopping_ratio=0.05, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.early_stopping_ratio = early_stopping_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + best_fitness_history = [] + max_stagnant_iterations = int(self.early_stopping_ratio * global_search_budget) + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Early stopping based on stagnant improvements + best_fitness_history.append(g_best_fitness) + if len(best_fitness_history) > max_stagnant_iterations: + recent_improvement = np.diff(best_fitness_history[-max_stagnant_iterations:]) + if np.all(recent_improvement == 0): + break + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV9.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV9.py new file mode 100644 index 000000000..37d12e235 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveMemoryHybridOptimizerV9.py @@ -0,0 +1,181 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraRefinedAdaptiveMemoryHybridOptimizerV9: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + early_stopping_ratio=0.05, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory = [] + self.early_stopping_ratio = early_stopping_ratio + + def local_search(self, x, func, budget): + result = minimize( + func, x, method="L-BFGS-B", bounds=[self.bounds] * self.dim, options={"maxiter": budget} + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + best_fitness_history = [] + max_stagnant_iterations = int(self.early_stopping_ratio * global_search_budget) + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < 0.3: # 30% chance to apply blend crossover + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > 50: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # Early stopping based on stagnant improvements + best_fitness_history.append(g_best_fitness) + if len(best_fitness_history) > max_stagnant_iterations: + recent_improvement = np.diff(best_fitness_history[-max_stagnant_iterations:]) + if np.all(recent_improvement == 0): + break + + # Enhanced elitism: Ensure the best solutions are always retained and perform an elite local search + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptivePrecisionOptimizer.py b/nevergrad/optimization/lama/UltraRefinedAdaptivePrecisionOptimizer.py new file mode 100644 index 000000000..4071ea12c --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptivePrecisionOptimizer.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraRefinedAdaptivePrecisionOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and enhanced adaptive cooling parameters + T = 1.2 # Slightly higher starting temperature for initial global exploration + T_min = 0.0005 # Lower minimum temperature for fine-tuned exploitation + alpha = 0.92 # Stronger cooling rate to extend exploration phase + + # Mutation and crossover parameters dynamically adjusted + F_base = 0.8 # Base mutation factor + CR_base = 0.92 # Base crossover probability to ensure diversity + + population_size = 80 # Adjusted population size for a broader search + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Evolution loop with enhanced mutation dynamics considering temperature and feedback + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor influenced by adaptive feedback mechanisms + dynamic_F = ( + F_base * np.exp(-0.2 * T) * (0.65 + 0.35 * np.cos(np.pi * evaluation_count / self.budget)) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + CR_dynamic = CR_base - 0.1 * np.sin(3 * np.pi * evaluation_count / self.budget) + cross_points = np.random.rand(self.dim) < CR_dynamic + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adaptive acceptance criterion based on delta_fitness and temperature + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced cooling strategy with progressive adjustment based on search status + adaptive_cooling = alpha - 0.015 * np.sin(2.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedAdaptiveRAMEDS.py b/nevergrad/optimization/lama/UltraRefinedAdaptiveRAMEDS.py new file mode 100644 index 000000000..236b6af08 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedAdaptiveRAMEDS.py @@ -0,0 +1,88 @@ +import numpy as np + + +class UltraRefinedAdaptiveRAMEDS: + def __init__( + self, + budget, + population_size=100, + initial_crossover_rate=0.9, + F_min=0.1, + F_max=0.9, + memory_size=100, + elite_size=5, + ): + self.budget = budget + self.population_size = population_size + self.initial_crossover_rate = initial_crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Memory and elite initialization + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + temperature = 1.0 # Simulated annealing inspired temperature parameter + while evaluations < self.budget: + # Temperature-controlled mutation factor adaptation + F = self.F_min + (self.F_max - self.F_min) * np.exp(-10 * (temperature**2)) + temperature *= 0.995 # Cooling down the temperature + + # Variable crossover rate to ensure diverse genetic mixing over time + crossover_rate = self.initial_crossover_rate * ( + 0.5 + 0.5 * np.sin(2 * np.pi * evaluations / self.budget) + ) + + # Update elites + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + idxs = np.array([idx for idx in range(self.population_size) if idx != i]) + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (a - b), lb, ub) + + # Crossover + cross_points = np.random.rand(dimension) < crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluate trial solution + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Memory update strategy focusing on the worst replaced by better + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update the best found solution + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraRefinedConvergenceSpiralSearch.py b/nevergrad/optimization/lama/UltraRefinedConvergenceSpiralSearch.py new file mode 100644 index 000000000..8f02e3ed0 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedConvergenceSpiralSearch.py @@ -0,0 +1,82 @@ +import numpy as np + + +class UltraRefinedConvergenceSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem's constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Initialize the centroid and search parameters + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Initial radius + angle_increment = np.pi / 16 # Finer initial angle increment for exploration + + # Adaptive decay rates are more responsive to feedback + radius_decay = 0.90 # Modestly aggressive radius decay + angle_refinement = 0.95 # Less aggressive angle refinement + evaluations_left = self.budget + min_radius = 0.001 # Very fine minimum radius for detailed exploration + + # Dynamic adjustment scales based on the feedback + optimal_change_factor = 1.85 # Dynamically adjust the decay rates + no_improvement_count = 0 + last_best_f = np.inf + + # Improved escape mechanism with adaptive triggers + escape_momentum = 0 # Track escape momentum + escape_trigger = 10 # Sooner trigger for escape mechanism + + while evaluations_left > 0: + points = [] + function_values = [] + num_points = max(int(2 * np.pi / angle_increment), 8) # Ensure sufficient sampling + + for i in range(num_points): + if evaluations_left <= 0: + break + + angle = i * angle_increment + displacement = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + new_point = centroid + displacement + new_point = np.clip(new_point, -5.0, 5.0) # Enforce bounds + + f_val = func(new_point) + evaluations_left -= 1 + + points.append(new_point) + function_values.append(f_val) + + if f_val < self.f_opt: + self.f_opt = f_val + self.x_opt = new_point + + # Determine if there has been an improvement + if self.f_opt < last_best_f: + last_best_f = self.f_opt + no_improvement_count = 0 + radius_decay = min(radius_decay * optimal_change_factor, 0.93) # Slightly less aggressive + angle_refinement = min(angle_refinement * optimal_change_factor, 0.93) + else: + no_improvement_count += 1 + + # Update centroid based on feedback + if points: + best_index = np.argmin(function_values) + centroid = points[best_index] + + # Dynamically adjust escape and search parameters + if no_improvement_count > escape_trigger: + radius = min(radius / radius_decay, 5.0) # Increase radius to escape + angle_increment = np.pi / 8 # Reset angle increment to improve exploration + no_improvement_count = 0 + escape_momentum += 1 + else: + radius *= radius_decay # Tighten search + radius = max(radius, min_radius) # Ensure not too small + angle_increment *= angle_refinement # Refine search + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV10.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV10.py new file mode 100644 index 000000000..f6ae70809 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV10.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV10: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set to 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and advanced cooling parameters + T = 1.2 # Higher initial temperature for a more global exploration initially + T_min = 0.0003 # Lower minimum temperature for extensive late-stage exploration + alpha = 0.93 # Gradual cooling to maintain exploration capabilities longer + + # Mutation and crossover parameters for optimal exploration and exploitation + F = 0.77 # Mutation factor adjusted for a better balance + CR = 0.90 # Increased crossover probability for robust gene mixing + + population_size = 85 # Optimized population size + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Employing a dynamic mutation strategy with sigmoidal modulation for precision + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.75 + 0.25 * np.tanh(3.5 * (evaluation_count / self.budget - 0.45))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adaptive acceptance criteria with a temperature-sensitive function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Sophisticated adaptive cooling strategy with a sinusoidal amplitude modulation + adaptive_cooling = alpha - 0.009 * np.sin(2.8 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV11.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV11.py new file mode 100644 index 000000000..2e5582d70 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV11.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV11: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature, adjusted for more global exploration + T_min = 0.0003 # Further reduced minimum temperature for extensive late-stage exploration + alpha = 0.90 # Slower cooling rate to extend the effective search phase + + # Mutation and crossover parameters are optimized + F = 0.8 # Slightly increased Mutation factor for aggressive exploration and exploitation + CR = 0.88 # Adjusted Crossover probability for optimal gene mixing + + population_size = 90 # Adjusted population size for balanced exploration and exploitation + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation strategy with sigmoid-based modulation for precision + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts with a sigmoid function for refined control + dynamic_F = ( + F * np.exp(-0.08 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.4))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a more sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with sinusoidal amplitude modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV17.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV17.py new file mode 100644 index 000000000..f863e2d26 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV17.py @@ -0,0 +1,62 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV17: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize the temperature and cooling schedule with improved adaptive parameters + T = 1.2 # Increased starting temperature to enhance global search in initial phase + T_min = 0.0001 # Even lower minimum temperature for very deep late exploration + alpha = 0.91 # Slightly softer cooling rate to extend effective search time + + # Mutation and crossover parameters are further refined + F = 0.78 # Mutation factor adjusted for optimal diverse exploration + CR = 0.88 # Crossover probability finely tuned for better gene mixing + + population_size = 85 # Optimal population size for this problem setting + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing dynamic mutation with a refined sigmoid and adaptive strategy + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Mutation factor dynamically adapts using a sophisticated model + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.75 + 0.25 * np.sin(2 * np.pi * (evaluation_count / self.budget))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with a temperature-sensitive approach + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.055 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Sophisticated adaptive cooling strategy incorporating a periodic modulation + adaptive_cooling = alpha - 0.009 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV22.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV22.py new file mode 100644 index 000000000..dc2750499 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV22.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV22: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per the problem description + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Advanced temperature and cooling dynamics + T = 1.17 # Slightly increased starting temperature for enhanced initial exploration + T_min = 0.00045 # Lower minimum temperature for extended fine-tuning in the late stages + alpha = 0.91 # Slower cooling rate for prolonged exploration and exploitation period + + # Mutation and crossover parameters finely tuned for dynamic environment + F = 0.77 # Mutation factor adjusted for a better balance of explorative and exploitative moves + CR = 0.89 # Crossover probability optimized to sustain diversity and allow better convergence + + population_size = 82 # Adjusted population size for optimal performance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation strategy with sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation rate, adjusting with a sigmoid function for more refined control + sigmoid_adjustment = 0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5)) + dynamic_F = F * np.exp(-0.065 * T) * sigmoid_adjustment + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced sensitivity in acceptance criteria with a refined temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Sinusoidal modulation in the cooling strategy for finer temperature control + adaptive_cooling = alpha - 0.01 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV23.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV23.py new file mode 100644 index 000000000..92faac4b0 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV23.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV23: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Increased starting temperature for more aggressive initial exploration + T_min = 0.0003 # Lower minimum temperature to allow for more detailed late-stage search + alpha = 0.93 # Slower cooling rate to enhance the search duration + + # Mutation and crossover parameters are refined + F = 0.78 # Adjusted Mutation factor for better balance between exploration and exploitation + CR = 0.90 # Increased Crossover probability to improve genetic diversity + + population_size = 85 # Fine-tuned population size for optimal search performance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a refined dynamic mutation approach + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts using an enhanced sigmoid function for precise control + dynamic_F = ( + F * np.exp(-0.08 * T) * (0.8 + 0.2 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with a more dynamic temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV24.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV24.py new file mode 100644 index 000000000..f3ebcecc3 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV24.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV24: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initiate temperature and cooling parameters + T = 1.18 # Slightly increased starting temperature for more robust initial exploration + T_min = 0.0004 # Lower minimum temperature to allow more thorough late-stage search + alpha = 0.91 # Slower cooling rate to prolong the search phase + + # Mutation and crossover parameters are finely tuned + F = 0.77 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.89 # Modified Crossover probability to encourage better genetic diversity + + population_size = 83 # Adjusted population size to optimize exploration-exploitation balance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with enhanced sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts with an advanced sigmoid function for refined control + dynamic_F = F * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a temperature-sensitive function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with dynamic modulation + adaptive_cooling = alpha - 0.008 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV25.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV25.py new file mode 100644 index 000000000..825310470 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV25.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV25: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality of the problem + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling settings + T = 1.20 # Initial temperature, increased for broader initial search + T_min = 0.0003 # Minimum temperature, lowered to facilitate deeper search in late stages + alpha = 0.90 # Cooling rate, optimized to balance search duration and depth + + # Mutation and crossover parameters optimized + F = 0.78 # Mutation factor, adjusted for optimal balance + CR = 0.85 # Crossover probability, tweaked to enhance solution diversity + + population_size = 85 # Population size, adjusted for efficient search space coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation with enhanced sigmoid and exponential modulation for mutation strength + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Advanced dynamic mutation factor incorporating temperature and progress + dynamic_F = ( + F + * (1 - np.exp(-0.1 * T)) + * (0.6 + 0.4 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria incorporating more sensitive exploitation capability + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Introduction of a sinusoidal term to the cooling schedule for dynamic modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV26.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV26.py new file mode 100644 index 000000000..0c59b43fe --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV26.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV26: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and advanced cooling parameters + T = 1.2 # Starting temperature, elevated for broader initial searching + T_min = 0.0003 # Lower minimum temperature for extensive late-stage exploration + alpha = 0.91 # Modified cooling rate for sustained search duration + + # Mutation and crossover parameters are further optimized + F = 0.77 # Mutation factor, finely tuned for balanced exploration and exploitation + CR = 0.85 # Crossover probability, carefully adjusted to maintain genetic diversity + + population_size = 90 # Increased population size to enhance search space coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement dynamic mutation approach with exponential and hyperbolic modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.exp(-0.1 * T) * (0.7 + 0.3 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adapted acceptance criteria with thermal modulation + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy incorporating a sinusoidal modulation + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV27.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV27.py new file mode 100644 index 000000000..c829fa3f4 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV27.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV27: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Further refined temperature and cooling parameters + T = 1.2 # Slightly higher initial temperature for more aggressive early exploration + T_min = 0.0003 # Lower minimum temperature to enable deeper late-stage exploration + alpha = 0.9 # Adjusted cooling rate to better balance exploration and exploitation + + # Mutation and crossover parameters are optimized + F = 0.78 # Refined Mutation factor for optimal exploration-exploitation balance + CR = 0.88 # Adjusted Crossover probability to enhance genetic diversity + + population_size = 85 # Population size fine-tuned to enhance convergence rates + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement a dynamic mutation approach with enhanced sigmoid-based modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adjusted dynamic mutation factor incorporates precision control + dynamic_F = ( + F * np.exp(-0.08 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with modified temperature dependency + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Modified adaptive cooling strategy using sinusoidal modulation + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV28.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV28.py new file mode 100644 index 000000000..364abc8de --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV28.py @@ -0,0 +1,55 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV28: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Adjusted temperature and cooling parameters for enhanced exploration and exploitation + T = 1.2 # Starting temperature adjusted for broader initial exploration + T_min = 0.0003 # Lower minimum temperature to enable thorough late-stage search + alpha = 0.91 # Fine-tuned cooling rate + + # Mutation and crossover parameters optimized for diversity and convergence + F = 0.77 # Mutation factor adjusted for a balanced exploration-exploitation + CR = 0.89 # Crossover probability fine-tuned for maintaining genetic diversity + + population_size = 85 # Population size adjusted for effective search + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation strategy with exponential decay and sigmoid-based modulation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F * np.exp(-0.1 * T) * (0.85 + 0.15 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + if trial_fitness < fitness[i] or np.random.rand() < np.exp( + -(trial_fitness - fitness[i]) / (T * (1 + 0.05 * np.abs(trial_fitness - fitness[i]))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy adjusted with sinusoidal modulation for agile response + adaptive_cooling = alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV29.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV29.py new file mode 100644 index 000000000..40d28ebde --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV29.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV29: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.3 # Starting temperature, slightly increased for extended early exploration + T_min = 0.0003 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.95 # Slower cooling rate to extend the search phase more effectively + + # Mutation and crossover parameters are finely tuned + F = 0.8 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.85 # Modified Crossover probability to maintain genetic diversity + + population_size = 90 # Adjusted population size to optimize individual evaluations + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Advanced dynamic mutation approach with sigmoid-based and exponential adaptations + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts combining sigmoid and exponential decay + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.6))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More aggressive acceptance criteria incorporating a temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy using a sinusoidal modulation + adaptive_cooling = alpha - 0.005 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV30.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV30.py new file mode 100644 index 000000000..d105ea267 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV30.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV30: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.20 # Adjusted starting temperature for extended early exploration + T_min = 0.0004 # Adjusted lower minimum temperature for deeper late-stage exploration + alpha = 0.93 # Adjusted cooling rate to extend the search phase more effectively + + # Mutation and crossover parameters are finely tuned + F = 0.77 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.88 # Modified Crossover probability to maintain genetic diversity + + population_size = 85 # Adjusted population size to optimize individual evaluations + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with sigmoid-based and exponential adaptations + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts combining sigmoid and exponential decay + dynamic_F = ( + F + * np.exp(-0.07 * T) + * (0.75 + 0.25 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More aggressive acceptance criteria incorporating a temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy using a sinusoidal modulation + adaptive_cooling = alpha - 0.006 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV31.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV31.py new file mode 100644 index 000000000..f4e73189e --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV31.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV31: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.1 # Optimally adjusted starting temperature for extended early exploration + T_min = 0.0003 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.91 # Optimized cooling rate to extend the search phase + + # Mutation and crossover parameters are finely tuned + F = 0.78 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.89 # Modified Crossover probability to maintain genetic diversity + + population_size = 85 # Adjusted population size to optimize individual evaluations + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with sigmoid-based adaptation and detailed control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts with sigmoid function for refined control + dynamic_F = ( + F + * np.exp(-0.08 * T) + * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Acceptance criteria incorporate a more sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal modulation for phase consistency + adaptive_cooling = alpha - 0.007 * np.sin(4 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV32.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV32.py new file mode 100644 index 000000000..814d26190 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV32.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV32: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is set as per the problem description + self.lb = -5.0 # Lower bound as per the problem definition + self.ub = 5.0 # Upper bound as per the problem definition + + def __call__(self, func): + # Initialize temperature and cooling parameters for extended exploration and exploitation + T = 1.2 # Starting temperature slightly increased + T_min = 0.0003 # Lower minimum temperature for deeper exploration + alpha = 0.93 # Slower cooling rate to extend the search phase + + # Mutation and crossover parameters fine-tuned for balance and diversity + F = 0.77 # Adjusted Mutation factor + CR = 0.85 # Modified Crossover probability + + population_size = 82 # Population size slightly adjusted + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation with sigmoid-based adaptation for refined control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.65 + 0.35 * np.tanh(3.5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # More refined acceptance criteria incorporating a sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with sinusoidal modulation for temperature control + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV33.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV33.py new file mode 100644 index 000000000..149b8de63 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV33.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV33: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Advanced temperature and cooling settings for optimal exploration and exploitation + T = 1.2 # Increased initial temperature for broader initial exploration + T_min = 0.0003 # Lower minimum temperature for sustained exploration at later stages + alpha = 0.91 # Slower cooling rate to extend effective search period + + # Mutation and crossover parameters optimized for dynamic environments + F = 0.78 # Adjusted Mutation factor for better exploration/exploitation balance + CR = 0.88 # Enhanced Crossover probability to maintain sufficient genetic diversity + + population_size = 85 # Slightly increased population size for better sampling + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation with a sigmoid-based adaptation for refined control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Sigmoid-based dynamic mutation factor for more nuanced adaptation + dynamic_F = ( + F + * np.exp(-0.05 * T) + * (0.65 + 0.35 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Adaptive acceptance criteria incorporating a more intricate temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.065 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with sinusoidal modulation for refined temperature control + adaptive_cooling = alpha - 0.007 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV34.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV34.py new file mode 100644 index 000000000..5e93f5214 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV34.py @@ -0,0 +1,55 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV34: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters with refined settings + T = 1.18 # Modified starting temperature for broader initial exploration + T_min = 0.0003 # Further lowered minimum temperature for deeper late-stage exploration + alpha = 0.90 # Adjusted cooling rate for a slower reduction in temperature + + # Mutation and crossover parameters finely tuned for dynamic response + F = 0.77 # Adjusted Mutation factor for optimal balance between exploration and exploitation + CR = 0.89 # Higher Crossover probability to ensure better genetic diversity and mix + + population_size = 90 # Moderately increased population size for more robust sampling + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Integrate a more responsive dynamic mutation with a nuanced sigmoid adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Improved dynamic mutation factor for more balanced search adaptation + dynamic_F = ( + F * np.exp(-0.06 * T) * (0.6 + 0.4 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + if trial_fitness < fitness[i] or np.random.rand() < np.exp(-(trial_fitness - fitness[i]) / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with sinusoidal modulation for precise control + adaptive_cooling = alpha - 0.006 * np.sin(4 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV35.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV35.py new file mode 100644 index 000000000..07d04c5a2 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV35.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV35: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and advanced cooling parameters + T = 1.2 # Higher starting temperature to encourage extensive initial exploration + T_min = 0.0002 # Even lower minimum temperature for deeper exploration in the late stages + alpha = 0.95 # Slower cooling rate to extend the search duration and improve convergence + + # Adjust mutation and crossover parameters for dynamic adaptability + F = 0.8 # Increased Mutation factor to enhance exploratory capabilities + CR = 0.9 # High Crossover probability to ensure better gene mixing and diversity + + population_size = 100 # Increased population size to improve coverage of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with advanced adaptive control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Enhanced dynamic mutation factor with exponential decay and sigmoid modulation + dynamic_F = ( + F + * np.exp(-0.05 * T) + * (0.65 + 0.35 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + # Refined acceptance criteria incorporating temperature-based probabilistic acceptance + if trial_fitness < fitness[i] or np.random.rand() < np.exp(-(trial_fitness - fitness[i]) / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy using a sinusoidal modulation pattern + adaptive_cooling = alpha - 0.005 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV36.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV36.py new file mode 100644 index 000000000..046d448d9 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV36.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV36: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 per problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and advanced cooling parameters + T = 1.1 # More aggressive starting temperature for broader initial search + T_min = 0.0001 # Lower minimum temperature to allow finer search towards the end + alpha = 0.93 # Slower cooling rate to enhance thorough exploration across phases + + # Mutation and crossover parameters are fine-tuned for better adaptability + F = 0.78 # Mutation factor adjusted for a strong yet controlled explorative push + CR = 0.85 # Crossover probability adjusted to maintain diversity + + population_size = 85 # Optimized population size to ensure effective coverage and performance + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implementing a dynamic mutation approach with exponential and sigmoid adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adaptive mutation factor that changes based on the temperature and search progress + dynamic_F = ( + F * np.exp(-0.06 * T) * (0.6 + 0.4 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria using a temperature-modulated probabilistic approach + if delta_fitness < 0 or np.random.rand() < np.exp(-delta_fitness / T): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cooling strategy with modified sinusoidal modulation for better temperature control + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV37.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV37.py new file mode 100644 index 000000000..28d17c5ac --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV37.py @@ -0,0 +1,61 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV37: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Adjusted starting temperature to enhance initial exploratory capability + T_min = 0.0001 # Reduced minimum temperature for deeper late-stage optimization + alpha = 0.91 # Slower cooling rate to support prolonged search dynamics + + # Mutation and crossover parameters refined for adaptive balance + F = 0.8 # Slightly increased Mutation factor to boost exploratory ventures + CR = 0.88 # Increased Crossover probability to promote genetic diversity + + population_size = 90 # Increased population size for more robust sampling + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement dynamic mutation with combined exponential and logistic decay + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation dynamically adjusted with temperature and logistic function for precise control + dynamic_F = ( + F + * np.exp(-0.05 * T) + * (0.65 + 0.35 * (1 / (1 + np.exp(-10 * (evaluation_count / self.budget - 0.5))))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria using a dynamic temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.sin(np.pi * evaluation_count / self.budget))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Cooling strategy incorporating adaptive rate with sinusoidal influence + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV38.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV38.py new file mode 100644 index 000000000..dda07d1ea --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV38.py @@ -0,0 +1,63 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV38: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.18 # Slightly increased starting temperature for more aggressive initial exploration + T_min = 0.0003 # Further reduced minimum temperature for extended deep exploration + alpha = 0.93 # Adjusted slower cooling rate to extend effective search duration + + # Refined mutation and crossover parameters + F = 0.77 # Adjusted Mutation factor to encourage robust exploration and prevent premature convergence + CR = 0.89 # Increased Crossover probability to promote diversity + + population_size = ( + 85 # Carefully balanced population size for effective exploration and evaluation efficiency + ) + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement dynamic mutation with sigmoid-based adaptation for precise control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor incorporates a sigmoid function with refined tuning + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.72 + 0.28 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + # Enhanced acceptance criteria using dynamically adjusted temperature function + if trial_fitness < fitness[i] or np.random.rand() < np.exp( + -(trial_fitness - fitness[i]) + / (T * (1 + 0.07 * np.sin(np.pi * evaluation_count / self.budget))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy incorporating a dynamic rate with sinusoidal influence + adaptive_cooling = alpha - 0.007 * np.sin(3.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV39.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV39.py new file mode 100644 index 000000000..ccc4517fe --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV39.py @@ -0,0 +1,55 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV39: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Enhanced initial temperature for more explorative early stages + T = 1.2 + T_min = 0.0003 # Reduced minimum temperature for deeper late-stage exploration + alpha = 0.91 # Further slowed cooling rate + + # Fine-tuned mutation and crossover parameters + F = 0.78 # Adjusted Mutation factor + CR = 0.9 # Higher Crossover probability to enhance diversity + + population_size = 82 # Optimized population size + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation strategy with an enhanced sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = F * (0.75 + 0.25 * np.sin(np.pi * evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + # Utilizing a more dynamic temperature-dependent acceptance probability + if trial_fitness < fitness[i] or np.random.rand() < np.exp( + -(trial_fitness - fitness[i]) + / (T * (1 + 0.05 * np.tanh(3 * (evaluation_count / self.budget - 0.5)))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling increased complexity with phase-based modulation + T *= alpha - 0.005 * np.cos(2 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV4.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV4.py new file mode 100644 index 000000000..b90de4a0e --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV4.py @@ -0,0 +1,59 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV4: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.15 # Starting temperature, slightly increased for extended early exploration + T_min = 0.0005 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.92 # Slower cooling rate to extend the search phase + + # Mutation and crossover parameters are finely tuned + F = 0.75 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.87 # Modified Crossover probability to maintain genetic diversity + + population_size = 80 # Adjusted population size to optimize individual evaluations + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with a sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts with a sigmoid function for refined control + dynamic_F = ( + F * np.exp(-0.07 * T) * (0.7 + 0.3 * np.tanh(3 * (evaluation_count / self.budget - 0.5))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a more sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.06 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.008 * np.cos(2.5 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV40.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV40.py new file mode 100644 index 000000000..566938f0f --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV40.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV40: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initial temperature with further refined controls for late-stage optimization + T = 1.1 + T_min = 0.0001 + alpha = 0.93 # More gradual cooling to provide a deeper search in later stages + + # Mutation and crossover parameters optimized for more aggressive exploration and exploitation + F = 0.77 + CR = 0.88 # Slightly increased for enhanced mixing of genetic information + + population_size = 85 # Slightly larger population for more diverse genetic material + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation factor using logistic function for more precise adjustments + dynamic_F = F * ( + 0.75 + 0.25 * (1 / (1 + np.exp(-10 * (evaluation_count / self.budget - 0.5)))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + + # Enhanced fitness-based acceptance criteria + delta_fitness = trial_fitness - fitness[i] + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling with a more complex modulation pattern for temperature + T *= alpha - 0.007 * np.sin(3 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV41.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV41.py new file mode 100644 index 000000000..6bd97453d --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV41.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV41: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem specification + self.lb = -5.0 # Lower bound as per the problem specification + self.ub = 5.0 # Upper bound as per the problem specification + + def __call__(self, func): + # Initialize temperature and progressively adaptive cooling parameters + T = 1.1 # Slightly reduced initial temperature for more controlled exploration + T_min = 0.0004 # Adjusted minimum temperature for enhanced late-stage fine-tuning + alpha = 0.93 # Optimized cooling rate to extend exploration duration + + # Mutation and crossover parameters fine-tuned for enhanced performance + F = 0.77 # Adjusted Mutation factor to explore more diverse solutions + CR = 0.88 # Adjusted Crossover probability to ensure better gene mixing + + population_size = 85 # Adjusted population size to enhance diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Implement a dynamic mutation strategy with refined control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Dynamic mutation factor adapts using a sigmoid function for precise control + dynamic_F = F * (0.73 + 0.27 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with a temperature-dependent function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling schedule with refined sinusoidal modulation + T *= alpha - 0.01 * np.sin(3 * np.pi * evaluation_count / self.budget) + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV44.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV44.py new file mode 100644 index 000000000..ced94cbca --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV44.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV44: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Increased starting temperature for more aggressive exploration early on + T_min = 0.0003 # Even lower minimum temperature for deeper late-stage exploration + alpha = 0.90 # Slower cooling rate to extend the search phase further + + # Mutation and crossover parameters are finely tuned + F = 0.78 # Slightly increased Mutation factor for enhanced explorative capabilities + CR = 0.89 # Slightly increased Crossover probability to enhance genetic diversity + + population_size = 90 # Increased population size for better sample diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with a sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Mutation factor dynamically adapts with an enhanced sigmoid function for refined control + dynamic_F = F * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with a sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with refined sinusoidal modulation + adaptive_cooling = alpha - 0.009 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV45.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV45.py new file mode 100644 index 000000000..8f8cdeeaa --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV45.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV45: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Higher initial temperature for more aggressive early exploration + T_min = 0.0003 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.93 # Slower cooling rate to extend the search phase further + + # Mutation and crossover parameters are finely tuned + F = 0.78 # Slightly increased Mutation factor for enhanced explorative capabilities + CR = 0.89 # Slightly increased Crossover probability to enhance genetic diversity + + population_size = 85 # Slightly increased population size for better sample diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with a sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + + # Mutation factor dynamically adapts with an enhanced sigmoid function for refined control + dynamic_F = F * (0.7 + 0.3 * np.tanh(5 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria with a sensitive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with refined sinusoidal modulation + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV46.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV46.py new file mode 100644 index 000000000..26e4f70be --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV46.py @@ -0,0 +1,56 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV46: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and enhance cooling parameters + T = 1.18 # Slightly greater initial temperature to increase early exploration + T_min = 0.0004 # Lower minimum temperature to enable deep exploration in late stages + alpha = 0.92 # Slower cooling rate to maintain the search phase longer + + # Adjust mutation and crossover parameters for optimal search + F = 0.77 # Mutation factor adjusted for a more aggressive search + CR = 0.88 # Crossover probability adjusted for enhanced genetic diversity + + population_size = 85 # Incremented population size for better diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation adapting with a refined sigmoid function + dynamic_F = F * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with temperature function adaptation + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Advanced adaptive cooling strategy with sinusoidal modulation + adaptive_cooling = alpha - 0.0075 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV47.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV47.py new file mode 100644 index 000000000..0f6bc0f2a --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV47.py @@ -0,0 +1,54 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV47: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Further refine temperature and cooling parameters + T = 1.17 # Starting temperature, moderately high for broad early exploration + T_min = 0.0004 # Lower minimum temperature for more sustained late-stage exploration + alpha = 0.91 # Slower cooling rate to more extensively utilize the budget + + # Mutation and crossover parameters are finely tuned for enhanced performance + F = 0.78 # Slightly increased mutation factor to push the boundaries of exploration + CR = 0.89 # Slightly higher crossover probability to promote diversity + + population_size = 85 # Adjusted population size for better coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + dynamic_F = F * (0.75 + 0.25 * np.tanh(4 * (evaluation_count / self.budget - 0.5))) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhance adaptive cooling strategy with refined sinusoidal modulation + adaptive_cooling = alpha - 0.0075 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV5.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV5.py new file mode 100644 index 000000000..71ef7c2b2 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV5.py @@ -0,0 +1,60 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV5: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and adaptive cooling parameters for enhanced control + T = 1.2 # Slightly increased initial temperature for broader exploration at start + T_min = 0.0003 # Lowered minimum temperature for prolonged fine exploration + alpha = 0.93 # Slower cooling rate to retain effective search capability longer + + # Finely tuned mutation and crossover parameters for optimal diversity and convergence + F = 0.77 # Adjusted mutation factor to balance between global and local search + CR = 0.88 # Increased crossover probability to ensure robust mixing of solutions + + population_size = 85 # Slightly larger population for better sampling of the search space + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation using a refined sigmoid adaptation strategy + dynamic_F = ( + F + * np.exp(-0.06 * T) + * (0.75 + 0.25 * np.tanh(3.5 * (evaluation_count / self.budget - 0.45))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria incorporating a more responsive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Further enhanced cooling strategy with sinusoidal amplitude modulation + adaptive_cooling = alpha - 0.009 * np.cos(2.6 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV54.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV54.py new file mode 100644 index 000000000..f805c8217 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV54.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV54: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.20 # Adjusted higher initial Temperature for aggressive exploration at the start + T_min = 0.001 # Lower final temperature for finer optimization at later stages + alpha = 0.95 # Higher cooling rate to sustain a longer exploration phase with more gradual cooling + + # Mutation and crossover parameters are finely tuned + F = 0.78 # Adjusted Mutation factor to maintain a better balance between exploration and exploitation + CR = 0.85 # Crossover probability adjusted to ensure robust mixing of features + + population_size = 90 # Increased population size to provide more diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Dynamic mutation approach with logistic growth rate adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adjusting dynamic mutation factor using a logistic model for better control over exploration + dynamic_F = F / (1 + np.exp(-5 * (evaluation_count / self.budget - 0.5))) * (b - c) + mutant = np.clip(a + dynamic_F, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Incorporating a temperature-dependent acceptance probability + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adaptive cooling strategy with a polynomial decay model + adaptive_cooling = alpha - 0.007 * (evaluation_count / self.budget) ** 2 + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV55.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV55.py new file mode 100644 index 000000000..279a83b72 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV55.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV55: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Starting temperature, adjusted for aggressive early exploration + T_min = 0.0001 # Lower minimum temperature for deep late-stage exploration + alpha = 0.95 # Slower cooling rate to extend the effective search phase + + # Mutation and crossover parameters are finely tuned + F = 0.82 # Adjusted Mutation factor for a balance between exploration and exploitation + CR = 0.84 # Modified Crossover probability to ensure diverse genetic mixing + + population_size = 85 # Optimized population size to balance diversity and convergence + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with sigmoid-based adaptation + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation factor dynamically adapts using a logistic function refined for control + dynamic_F = F / (1 + np.exp(-10 * (evaluation_count / self.budget - 0.5))) * (b - c) + mutant = np.clip(a + dynamic_F, self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Refined acceptance criteria incorporate a temperature function adjusted for sensitivity + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.05 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with a sinusoidal modulation for better control + adaptive_cooling = alpha - 0.005 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV56.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV56.py new file mode 100644 index 000000000..b0e2819c9 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV56.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV56: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize temperature and cooling parameters + T = 1.2 # Enhanced starting temperature for more aggressive exploration + T_min = 0.0003 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.90 # Slower cooling rate to extend the effective search phase + + # Mutation and crossover parameters are finely tuned + F = 0.8 # Dynamic mutation factor for a better balance between exploration and exploitation + CR = 0.85 # Crossover probability to ensure genetic diversity + + population_size = 85 # A slightly larger population size for better search space coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Introduce a dynamic mutation approach with enhanced adaptive control + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Mutation adapts more responsively near critical phases of the search + dynamic_F = F / (1 + np.exp(-12 * (evaluation_count / self.budget - 0.5))) + 0.05 + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Enhanced acceptance criteria with improved temperature-based adjustments + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.07 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Adjusted cooling strategy with more refined control based on search progress + adaptive_cooling = alpha - 0.01 * np.sin(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV9.py b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV9.py new file mode 100644 index 000000000..cd55ef14f --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedDynamicPrecisionOptimizerV9.py @@ -0,0 +1,58 @@ +import numpy as np + + +class UltraRefinedDynamicPrecisionOptimizerV9: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Initialize temperature and adaptive cooling parameters for refined control + T = 1.2 # Increased initial temperature for broader initial exploration + T_min = 0.0003 # Lower minimum temperature for deeper late-stage exploration + alpha = 0.94 # Slower cooling rate to maintain search capability longer + + # Fine-tuned mutation and crossover parameters for optimal exploration and convergence + F = 0.78 # Adjusted mutation factor to balance between diversification and intensification + CR = 0.89 # High crossover probability to ensure robust solution mixing + + population_size = 90 # Increased population for better sampling and diversity + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Dynamic mutation using a refined exponential decay and tanh function for precision control + dynamic_F = ( + F * np.exp(-0.05 * T) * (0.8 + 0.2 * np.tanh(4 * (evaluation_count / self.budget - 0.4))) + ) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Advanced acceptance criteria incorporate a more responsive temperature function + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.08 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Enhanced adaptive cooling strategy with sinusoidal amplitude modulation + adaptive_cooling = alpha - 0.007 * np.cos(3 * np.pi * evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py b/nevergrad/optimization/lama/UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py new file mode 100644 index 000000000..f9c41435d --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer.py @@ -0,0 +1,203 @@ +import numpy as np +from scipy.optimize import minimize + + +class UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer: + def __init__( + self, + budget=10000, + init_pop_size=50, + min_pop_size=20, + init_F=0.8, + init_CR=0.9, + w=0.5, + c1=1.5, + c2=1.5, + local_search_budget_ratio=0.2, + memory_size=50, + blend_crossover_prob=0.3, + adaptive_memory=True, + ): + self.budget = budget + self.init_pop_size = init_pop_size + self.min_pop_size = min_pop_size + self.init_F = init_F + self.init_CR = init_CR + self.w = w + self.c1 = c1 + self.c2 = c2 + self.local_search_budget_ratio = local_search_budget_ratio + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.eval_count = 0 + self.memory_size = memory_size + self.blend_crossover_prob = blend_crossover_prob + self.memory = [] + self.adaptive_memory = adaptive_memory + + def local_search(self, x, func, budget): + result = minimize( + func, + x, + method="L-BFGS-B", + bounds=[(self.bounds[0], self.bounds[1])] * self.dim, + options={"maxiter": budget}, + ) + self.eval_count += result.nfev + return result.x, result.fun + + def adaptive_parameters(self, successful_steps): + if len(successful_steps) > 0: + avg_F, avg_CR = np.mean(successful_steps, axis=0) + return max(0.1, avg_F), max(0.1, avg_CR) + else: + return self.init_F, self.init_CR + + def crowding_distance(self, population, fitness): + dist = np.zeros(len(population)) + for i in range(len(population)): + dist[i] = np.sum( + [np.linalg.norm(population[i] - population[j]) for j in range(len(population)) if i != j] + ) + return dist + + def __call__(self, func): + # Initialize population and velocities for PSO + population = np.random.uniform(self.bounds[0], self.bounds[1], (self.init_pop_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + self.eval_count = self.init_pop_size + velocities = np.random.uniform(-1, 1, (self.init_pop_size, self.dim)) + + # Differential weights and crossover probabilities + F_values = np.full(self.init_pop_size, self.init_F) + CR_values = np.full(self.init_pop_size, self.init_CR) + + # Initialize the best known positions + p_best = population.copy() + p_best_fitness = fitness.copy() + g_best = population[np.argmin(fitness)] + g_best_fitness = np.min(fitness) + + local_search_budget = int(self.budget * self.local_search_budget_ratio) + global_search_budget = self.budget - local_search_budget + local_search_budget_per_individual = local_search_budget // max( + self.min_pop_size, 1 + ) # ensure non-zero division + + current_pop_size = self.init_pop_size + successful_steps = [] + + no_improvement_count = 0 + + while self.eval_count < global_search_budget: + for i in range(current_pop_size): + # Adapt parameters + F, CR = self.adaptive_parameters(successful_steps) + + # PSO update + r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim) + velocities[i] = ( + self.w * velocities[i] + + self.c1 * r1 * (p_best[i] - population[i]) + + self.c2 * r2 * (g_best - population[i]) + ) + population[i] = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + + # Mutation + idxs = [idx for idx in range(current_pop_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(a + F * (b - c), self.bounds[0], self.bounds[1]) + + # Crossover + cross_points = np.random.rand(self.dim) < CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Incorporate additional crossover mechanism with adaptive blending + if np.random.rand() < self.blend_crossover_prob: # adaptive blend crossover probability + partner_idx = np.random.choice(idxs) + partner = population[partner_idx] + trial = 0.5 * (trial + partner) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + + # Selection + f_trial = func(trial) + self.eval_count += 1 + if f_trial < fitness[i]: + fitness[i] = f_trial + population[i] = trial + successful_steps.append((F, CR)) + # Limit the memory size + if len(successful_steps) > self.memory_size: + successful_steps.pop(0) + # Self-adapting parameters + F_values[i] = min(F * 1.1, 1.0) + CR_values[i] = min(CR * 1.1, 1.0) + else: + F_values[i] = max(F * 0.9, 0.1) + CR_values[i] = max(CR * 0.9, 0.1) + + # Update personal best + if fitness[i] < p_best_fitness[i]: + p_best[i] = population[i] + p_best_fitness[i] = fitness[i] + + # Update global best + if fitness[i] < g_best_fitness: + g_best = population[i] + g_best_fitness = fitness[i] + no_improvement_count = 0 # reset no improvement count + else: + no_improvement_count += 1 + + if self.eval_count >= global_search_budget: + break + + # Dynamic population resizing based on performance + if no_improvement_count >= (current_pop_size / 2): + current_pop_size = max(current_pop_size - 1, self.min_pop_size) + population = population[:current_pop_size] + fitness = fitness[:current_pop_size] + velocities = velocities[:current_pop_size] + F_values = F_values[:current_pop_size] + CR_values = CR_values[:current_pop_size] + p_best = p_best[:current_pop_size] + p_best_fitness = p_best_fitness[:current_pop_size] + no_improvement_count = 0 + + # If the population size goes below a threshold, reinforce diversity + if current_pop_size < self.init_pop_size: + dist = self.crowding_distance(population, fitness) + new_individual = np.random.uniform(self.bounds[0], self.bounds[1], self.dim) + population = np.vstack([population, new_individual]) + new_fitness = np.array([func(new_individual)]) + fitness = np.append(fitness, new_fitness) + self.eval_count += 1 + velocities = np.vstack([velocities, np.random.uniform(-1, 1, self.dim)]) + F_values = np.append(F_values, self.init_F) + CR_values = np.append(CR_values, self.init_CR) + p_best = np.vstack([p_best, new_individual]) + p_best_fitness = np.append(p_best_fitness, new_fitness) + current_pop_size += 1 + + # Elitism: Ensure the best solutions are always retained + elite_individuals = np.argsort(fitness)[: self.min_pop_size] + elite_population = population[elite_individuals] + elite_fitness = fitness[elite_individuals] + + # Perform local search on the best individuals + for i in range(self.min_pop_size): + if self.eval_count >= self.budget: + break + local_budget = min(local_search_budget_per_individual, self.budget - self.eval_count) + new_x, new_f = self.local_search(elite_population[i], func, local_budget) + if new_f < elite_fitness[i]: + elite_fitness[i] = new_f + elite_population[i] = new_x + + best_idx = np.argmin(elite_fitness) + self.f_opt = elite_fitness[best_idx] + self.x_opt = elite_population[best_idx] + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientHybridOptimizerV5.py b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientHybridOptimizerV5.py new file mode 100644 index 000000000..7170e5043 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientHybridOptimizerV5.py @@ -0,0 +1,82 @@ +import numpy as np + + +class UltraRefinedEvolutionaryGradientHybridOptimizerV5: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.62, + F_range=0.38, + CR=0.92, + elite_fraction=0.15, + mutation_strategy="dynamic", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = ( + mutation_strategy # Type of mutation strategy: 'dynamic', 'best', or 'random' + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "dynamic": + # Dynamically choose the base between best and random elite based on iteration + if evaluations < self.budget * 0.5: + base = best_individual # Focus on exploitation initially + else: + base = population[np.random.choice(elite_indices)] # Shift to exploration later + else: + # Static strategy: always use best or random elite for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F influenced by the stage of optimization + stage = evaluations / self.budget + F = self.F_base + (np.cos(np.pi * stage) - 1) * self.F_range / 2 # Cycle F as cosine function + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV10.py b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV10.py new file mode 100644 index 000000000..15bb159f0 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV10.py @@ -0,0 +1,76 @@ +import numpy as np + + +class UltraRefinedEvolutionaryGradientOptimizerV10: + def __init__( + self, + budget=10000, + population_size=130, + F_base=0.6, + F_range=0.4, + CR=0.88, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within the bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Enhanced main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.8: # Slightly increased probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F, focusing on a more balanced approach + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation strategy + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if the budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV32.py b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV32.py new file mode 100644 index 000000000..0aa921fc0 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedEvolutionaryGradientOptimizerV32.py @@ -0,0 +1,81 @@ +import numpy as np + + +class UltraRefinedEvolutionaryGradientOptimizerV32: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.58, + F_range=0.42, + CR=0.96, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if ( + np.random.rand() < 0.8 + ): # Increased probability to use the current best, aiming for better exploitation + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + # Random elite selection for base + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedHybridEvolutionaryAnnealingOptimizer.py b/nevergrad/optimization/lama/UltraRefinedHybridEvolutionaryAnnealingOptimizer.py new file mode 100644 index 000000000..2aa918237 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedHybridEvolutionaryAnnealingOptimizer.py @@ -0,0 +1,57 @@ +import numpy as np + + +class UltraRefinedHybridEvolutionaryAnnealingOptimizer: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Dimensionality is fixed at 5 as per the problem description + self.lb = -5.0 # Lower bound as per the problem description + self.ub = 5.0 # Upper bound as per the problem description + + def __call__(self, func): + # Adjust initial temperature and cooling rate for optimal balance of exploration and exploitation + T = 1.2 + T_min = 0.005 # Modify minimum temperature for a more gradual cooling + alpha = 0.95 # Adjust cooling rate to allow more thorough exploration at higher temperatures + + # Refine mutation and crossover parameters for improved performance + F = 0.65 # Slightly lower mutation factor to stabilize the search + CR = 0.85 # Adjust crossover probability to ensure effective diversity maintenance + + population_size = 70 # Increase population size to improve initial coverage + pop = np.random.uniform(self.lb, self.ub, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in pop]) + f_opt = np.min(fitness) + x_opt = pop[np.argmin(fitness)] + evaluation_count = population_size + + # Utilize dynamic mutation factor and temperature-dependent simulated annealing acceptance + while evaluation_count < self.budget and T > T_min: + for i in range(population_size): + indices = [idx for idx in range(population_size) if idx != i] + a, b, c = pop[np.random.choice(indices, 3, replace=False)] + # Adjust mutation factor dynamically based on both temperature and iteration progress + dynamic_F = F * (1 - 0.05 * np.log(1 + T)) * (0.5 + 0.5 * (evaluation_count / self.budget)) + mutant = np.clip(a + dynamic_F * (b - c), self.lb, self.ub) + cross_points = np.random.rand(self.dim) < CR + trial = np.where(cross_points, mutant, pop[i]) + + trial_fitness = func(trial) + evaluation_count += 1 + delta_fitness = trial_fitness - fitness[i] + + # Implement a more nuanced acceptance criterion influenced by both delta_fitness and T + if delta_fitness < 0 or np.random.rand() < np.exp( + -delta_fitness / (T * (1 + 0.1 * np.abs(delta_fitness))) + ): + pop[i] = trial + fitness[i] = trial_fitness + if trial_fitness < f_opt: + f_opt = trial_fitness + x_opt = trial + + # Implement an adaptive cooling rate that fine-tunes based on the stage of optimization + adaptive_cooling = alpha - 0.02 * (evaluation_count / self.budget) + T *= adaptive_cooling + + return f_opt, x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV50.py b/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV50.py new file mode 100644 index 000000000..c918dd3ed --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV50.py @@ -0,0 +1,78 @@ +import numpy as np + + +class UltraRefinedHyperStrategicOptimizerV50: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.3, + CR=0.92, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Adjusted base mutation factor for more stable convergence + self.F_range = F_range # Narrower range for mutation factor to control exploration dynamically + self.CR = CR # Slightly reduced crossover probability to allow better exploitation + self.elite_fraction = elite_fraction # Reduced elite fraction to intensify competition + self.mutation_strategy = mutation_strategy # Maintained adaptive strategy for flexibility + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the given bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Select base individual based on strategy + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.8: # Increased probability to choose the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjust F dynamically within a narrower range + F = self.F_base + np.random.normal(0, self.F_range / 2) + + # Mutation using DE/rand/1/bin + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover operation + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Break the loop if the budget is reached + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV54.py b/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV54.py new file mode 100644 index 000000000..bbc1af3bf --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedHyperStrategicOptimizerV54.py @@ -0,0 +1,80 @@ +import numpy as np + + +class UltraRefinedHyperStrategicOptimizerV54: + def __init__( + self, + budget=10000, + population_size=120, + F_base=0.60, + F_range=0.40, + CR=0.93, + elite_fraction=0.08, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Slightly increased base mutation factor for stronger mutations + self.F_range = F_range # Narrowed mutation range for controlled exploration + self.CR = CR # Adjusted crossover probability for a better balance + self.elite_fraction = ( + elite_fraction # Optimized elite fraction to focus on a narrower set of top individuals + ) + self.mutation_strategy = mutation_strategy # Mutation strategy remains adaptive for flexibility + self.dim = 5 # Problem dimensionality + self.lb = -5.0 # Lower bound of the search space + self.ub = 5.0 # Upper bound of the search space + + def __call__(self, func): + # Initialize population within the search space bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + # Adaptive mutation strategy: higher chance to pick the best individual + if self.mutation_strategy == "adaptive": + if np.random.rand() < 0.85: # Increased probability to focus more on the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Adjust mutation factor dynamically + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # Mutation using DE/rand/1/bin scheme + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedPrecisionEvolutionaryOptimizerV43.py b/nevergrad/optimization/lama/UltraRefinedPrecisionEvolutionaryOptimizerV43.py new file mode 100644 index 000000000..93016d42e --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedPrecisionEvolutionaryOptimizerV43.py @@ -0,0 +1,80 @@ +import numpy as np + + +class UltraRefinedPrecisionEvolutionaryOptimizerV43: + def __init__( + self, + budget=10000, + population_size=135, + F_base=0.53, + F_range=0.47, + CR=0.93, + elite_fraction=0.11, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor slightly adjusted + self.F_range = F_range # Slightly wider range for mutation factor adjustment + self.CR = CR # Adjusted crossover probability for balanced exploration and exploitation + self.elite_fraction = elite_fraction # Adjusted elite fraction for a more effective elite influence + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy to dynamically adjust behavior + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Higher probability to select the best individual for mutation base + if np.random.rand() < 0.8: # Increased probability to emphasize exploitation + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Dynamically adjusted mutation factor + F = self.F_base + (np.random.rand() * self.F_range) + + # DE/rand/1 mutation strategy + idxs = [idx for idx in range(self.population_size) if idx not in [i, best_idx]] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover with a slightly adjusted CR + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness evaluation + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Exhaustion of budget check + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedRAMEDS.py b/nevergrad/optimization/lama/UltraRefinedRAMEDS.py new file mode 100644 index 000000000..cdb605216 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedRAMEDS.py @@ -0,0 +1,86 @@ +import numpy as np + + +class UltraRefinedRAMEDS: + def __init__( + self, + budget, + population_size=50, + crossover_rate=0.95, + F_min=0.5, + F_max=0.9, + memory_size=50, + elite_size=10, + ): + self.budget = budget + self.population_size = population_size + self.crossover_rate = crossover_rate + self.F_min = F_min + self.F_max = F_max + self.memory_size = memory_size + self.elite_size = elite_size + + def __call__(self, func): + lb, ub, dimension = -5.0, 5.0, 5 + + # Initialize population and fitness + population = lb + (ub - lb) * np.random.rand(self.population_size, dimension) + fitness = np.array([func(individual) for individual in population]) + + # Initialize memory for good solutions and their fitness + memory = np.empty((self.memory_size, dimension)) + memory_fitness = np.full(self.memory_size, np.inf) + + # Initialize elite solutions and their fitness + elite = np.empty((self.elite_size, dimension)) + elite_fitness = np.full(self.elite_size, np.inf) + + # Best solution tracking + best_idx = np.argmin(fitness) + best_solution = population[best_idx] + best_fitness = fitness[best_idx] + + evaluations = self.population_size + while evaluations < self.budget: + # Update elites based on fitness + elite_indices = np.argsort(fitness)[: self.elite_size] + elite = population[elite_indices].copy() + elite_fitness = fitness[elite_indices].copy() + + for i in range(self.population_size): + # Dynamic mutation factor using a sigmoidal modulation for fine-grained adaptation + F = self.F_min + (self.F_max - self.F_min) / ( + 1 + np.exp(-10 * (evaluations / self.budget - 0.5)) + ) + + # Mutation: DE/rand-to-best/1/binomial + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(c + F * (best_solution - c + a - b), lb, ub) + + # Enhanced Crossover + cross_points = np.random.rand(dimension) < self.crossover_rate + trial = np.where(cross_points, mutant, population[i]) + + # Evaluation and selection + trial_fitness = func(trial) + evaluations += 1 + if trial_fitness < fitness[i]: + # Update memory with a replacement strategy focusing on improvement + if trial_fitness < np.max(memory_fitness): + worst_memory_idx = np.argmax(memory_fitness) + memory[worst_memory_idx] = trial + memory_fitness[worst_memory_idx] = trial_fitness + + population[i] = trial + fitness[i] = trial_fitness + + # Update best solution if found + if trial_fitness < best_fitness: + best_solution = trial + best_fitness = trial_fitness + + if evaluations >= self.budget: + break + + return best_fitness, best_solution diff --git a/nevergrad/optimization/lama/UltraRefinedSpiralDifferentialClimberV3.py b/nevergrad/optimization/lama/UltraRefinedSpiralDifferentialClimberV3.py new file mode 100644 index 000000000..bb47a0c58 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedSpiralDifferentialClimberV3.py @@ -0,0 +1,72 @@ +import numpy as np + + +class UltraRefinedSpiralDifferentialClimberV3: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = np.zeros(self.dim) + + # Initialize population and parameters + population_size = 600 # Further increased size for exploratory coverage + population = np.random.uniform(-5.0, 5.0, (population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + + # Parameters for spiral dynamics and enhanced search techniques + min_radius = 0.005 # Further reduced for finer local search + max_radius = 5.0 # Adjusted to fully utilize the boundary + radius_decay = 0.98 # Slower decay to maintain spiral influence longer + mutation_factor = 0.7 # Further reduced mutation for stability + crossover_probability = 0.9 # Further increased crossover probability + + # Enhanced gradient-like search parameters + step_size = 0.02 # Further reduced step size for more precise adjustments + gradient_steps = 15 # Increased number of gradient steps + + evaluations_left = self.budget - population_size + + while evaluations_left > 0: + for i in range(population_size): + # Differential Evolution Strategy + indices = np.random.choice(population_size, 3, replace=False) + a, b, c = population[indices] + mutant = np.clip(a + mutation_factor * (b - c), -5.0, 5.0) + + # Crossover + cross_points = np.random.rand(self.dim) < crossover_probability + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Apply spiral dynamics + radius = max(min_radius, max_radius * radius_decay ** (self.budget - evaluations_left)) + angle = 2 * np.pi * np.random.rand() + spiral_offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + trial += spiral_offset + trial = np.clip(trial, -5.0, 5.0) + + # Enhanced gradient-like search for local refinement + for _ in range(gradient_steps): + new_trial = trial + np.random.randn(self.dim) * step_size + new_trial = np.clip(new_trial, -5.0, 5.0) + f_new_trial = func(new_trial) + evaluations_left -= 1 + if f_new_trial < func(trial): + trial = new_trial + + # Evaluation + f_trial = func(trial) + evaluations_left -= 1 + + # Selection + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < self.f_opt: + self.f_opt = f_trial + self.x_opt = trial + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/UltraRefinedStrategicEvolutionaryOptimizerV60.py b/nevergrad/optimization/lama/UltraRefinedStrategicEvolutionaryOptimizerV60.py new file mode 100644 index 000000000..2eda92f45 --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedStrategicEvolutionaryOptimizerV60.py @@ -0,0 +1,81 @@ +import numpy as np + + +class UltraRefinedStrategicEvolutionaryOptimizerV60: + def __init__( + self, + budget=10000, + population_size=140, + F_base=0.57, + F_range=0.38, + CR=0.93, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor refined to balance exploration and exploitation + self.F_range = F_range # Smaller range for mutation factor to enhance stability in mutation + self.CR = CR # Crossover probability adjusted for higher robustness + self.elite_fraction = elite_fraction # Increased elite fraction to focus more on the best candidates + self.mutation_strategy = ( + mutation_strategy # Adaptive mutation strategy to dynamically react to fitness landscape + ) + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population within the search bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Probabilistically select either the current best or an elite individual as the base + if ( + np.random.rand() < 0.80 + ): # Increased likelihood to exploit the best individual's information + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + else: + base = population[np.random.choice(elite_indices)] + + # Mutation factor F dynamically adjusted within a refined range + F = self.F_base + (np.random.rand() * 2 - 1) * self.F_range + + # Mutation (DE/rand/1) + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Crossover (binomial) + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Fitness evaluation and selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraRefinedStrategyDE.py b/nevergrad/optimization/lama/UltraRefinedStrategyDE.py new file mode 100644 index 000000000..bd211aadd --- /dev/null +++ b/nevergrad/optimization/lama/UltraRefinedStrategyDE.py @@ -0,0 +1,66 @@ +import numpy as np + + +class UltraRefinedStrategyDE: + def __init__(self, budget=10000, population_size=100, F_base=0.5, F_range=0.35, CR=0.92, strategy="best"): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base Differential weight + self.F_range = F_range # Range to vary F for increased diversity + self.CR = CR # Crossover probability + self.strategy = strategy # Strategy for mutation and selection, focusing on 'best' individuals + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population randomly + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main loop + while evaluations < self.budget: + for i in range(self.population_size): + # Select mutation strategy based on 'best' + if self.strategy == "best": + base = population[best_idx] + else: + base = population[ + np.random.choice([idx for idx in range(self.population_size) if idx != i]) + ] + + # Dynamically adjusting F for more exploration + F = self.F_base + np.random.rand() * self.F_range + + # Mutation using a more aggressive differential variation + idxs = [idx for idx in range(self.population_size) if idx != i] + a, b, c = population[np.random.choice(idxs, 3, replace=False)] + mutant = np.clip(base + F * (a - b + c - base), self.lb, self.ub) + + # Crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_idx = i + best_fitness = f_trial + best_individual = trial + + # Exit if budget exhausted + if evaluations >= self.budget: + break + + # Return the best solution found + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UltraSupremeEvolutionaryGradientHybridOptimizerV7.py b/nevergrad/optimization/lama/UltraSupremeEvolutionaryGradientHybridOptimizerV7.py new file mode 100644 index 000000000..e34764576 --- /dev/null +++ b/nevergrad/optimization/lama/UltraSupremeEvolutionaryGradientHybridOptimizerV7.py @@ -0,0 +1,80 @@ +import numpy as np + + +class UltraSupremeEvolutionaryGradientHybridOptimizerV7: + def __init__( + self, + budget=10000, + population_size=150, + F_base=0.6, + F_range=0.4, + CR=0.92, + elite_fraction=0.15, + mutation_strategy="adaptive", + ): + self.budget = budget + self.population_size = population_size + self.F_base = F_base # Base mutation factor + self.F_range = F_range # Dynamic range for mutation factor adjustment + self.CR = CR # Crossover probability + self.elite_fraction = elite_fraction # Fraction of top performers considered elite + self.mutation_strategy = mutation_strategy # Type of mutation strategy: 'adaptive' or 'random' + self.dim = 5 # Dimensionality of the problem + self.lb = -5.0 # Lower bound of search space + self.ub = 5.0 # Upper bound of search space + + def __call__(self, func): + # Initialize population uniformly within bounds + population = np.random.uniform(self.lb, self.ub, (self.population_size, self.dim)) + fitness = np.array([func(ind) for ind in population]) + evaluations = self.population_size + best_idx = np.argmin(fitness) + best_fitness = fitness[best_idx] + best_individual = population[best_idx] + + # Main optimization loop + while evaluations < self.budget: + elite_size = int(self.elite_fraction * self.population_size) + elite_indices = np.argsort(fitness)[:elite_size] + + for i in range(self.population_size): + if self.mutation_strategy == "adaptive": + # Use best or random elite based on mutation strategy + if np.random.rand() < 0.75: # Fine-tuned probability to select the current best + base = best_individual + else: + base = population[np.random.choice(elite_indices)] + + # Dynamic adjustment of F to be more centered around a higher base value + F = self.F_base + (2 * np.random.rand() - 1) * self.F_range + + # DE/rand/1 mutation using non-elite indices to ensure diversity + idxs = [ + idx + for idx in range(self.population_size) + if idx not in [i, best_idx] + list(elite_indices) + ] + a, b = population[np.random.choice(idxs, 2, replace=False)] + mutant = np.clip(base + F * (a - b), self.lb, self.ub) + + # Binomial crossover + cross_points = np.random.rand(self.dim) < self.CR + if not np.any(cross_points): + cross_points[np.random.randint(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + + # Selection based on fitness + f_trial = func(trial) + evaluations += 1 + if f_trial < fitness[i]: + population[i] = trial + fitness[i] = f_trial + if f_trial < best_fitness: + best_fitness = f_trial + best_individual = trial + + # Check if budget is exhausted + if evaluations >= self.budget: + break + + return best_fitness, best_individual diff --git a/nevergrad/optimization/lama/UnifiedAdaptiveMemeticOptimizer.py b/nevergrad/optimization/lama/UnifiedAdaptiveMemeticOptimizer.py new file mode 100644 index 000000000..627c78863 --- /dev/null +++ b/nevergrad/optimization/lama/UnifiedAdaptiveMemeticOptimizer.py @@ -0,0 +1,155 @@ +import numpy as np +from scipy.optimize import minimize + + +class UnifiedAdaptiveMemeticOptimizer: + def __init__(self, budget=10000, population_size=200): + self.budget = budget + self.population_size = population_size + self.dim = 5 + self.bounds = (-5.0, 5.0) + self.elite_fraction = 0.1 + self.crossover_prob = 0.9 + self.mutation_prob = 0.2 + self.swarm_inertia = 0.6 + self.cognitive_coeff = 1.5 + self.social_coeff = 1.5 + self.strategy_switch_threshold = 0.005 + self.rng = np.random.default_rng() + self.num_strategies = 4 + self.tol = 1e-6 + self.max_iter = 50 + self.mutation_factor = 0.8 + self.performance_memory = [] + self.memory_size = 30 + self.archive_size = 50 + self.learning_rate = 0.1 + self.min_local_search_iters = 10 + + def __call__(self, func): + def evaluate(individual): + return func(np.clip(individual, self.bounds[0], self.bounds[1])) + + population = self.rng.uniform(self.bounds[0], self.bounds[1], (self.population_size, self.dim)) + fitness = np.array([evaluate(ind) for ind in population]) + eval_count = self.population_size + + best_individual = population[np.argmin(fitness)] + best_fitness = np.min(fitness) + self.performance_memory = [best_fitness] * self.memory_size + last_switch_eval_count = 0 + current_strategy = 0 + + velocities = np.zeros((self.population_size, self.dim)) + personal_best = np.copy(population) + personal_best_fitness = np.copy(fitness) + archive = np.copy(population[: self.archive_size]) + + while eval_count < self.budget: + new_population = np.copy(population) + for i in range(self.population_size): + if current_strategy == 0: + # Genetic Algorithm + parent1, parent2 = population[self.rng.choice(self.population_size, 2, replace=False)] + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + child = np.where(cross_points, parent1, parent2) + mutation = self.rng.uniform(self.bounds[0], self.bounds[1], self.dim) + mutate = self.rng.random(self.dim) < self.mutation_prob + trial = np.where(mutate, mutation, child) + elif current_strategy == 1: + # Particle Swarm Optimization + r1 = self.rng.random(self.dim) + r2 = self.rng.random(self.dim) + velocities[i] = ( + self.swarm_inertia * velocities[i] + + self.cognitive_coeff * r1 * (personal_best[i] - population[i]) + + self.social_coeff * r2 * (best_individual - population[i]) + ) + trial = np.clip(population[i] + velocities[i], self.bounds[0], self.bounds[1]) + elif current_strategy == 2: + # Differential Evolution + indices = self.rng.choice(self.population_size, 3, replace=False) + x0, x1, x2 = population[indices] + mutant = np.clip(x0 + self.mutation_factor * (x1 - x2), self.bounds[0], self.bounds[1]) + cross_points = self.rng.random(self.dim) < self.crossover_prob + if not np.any(cross_points): + cross_points[self.rng.integers(0, self.dim)] = True + trial = np.where(cross_points, mutant, population[i]) + else: + # Memetic Algorithm with Local Search + elite_index = np.argmin(fitness) + trial = population[elite_index] + self.learning_rate * (self.rng.random(self.dim) - 0.5) + trial = np.clip(trial, self.bounds[0], self.bounds[1]) + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + + if current_strategy != 3: + trial_fitness = evaluate(trial) + eval_count += 1 + if trial_fitness < fitness[i]: + new_population[i] = trial + fitness[i] = trial_fitness + if trial_fitness < personal_best_fitness[i]: + personal_best[i] = trial + personal_best_fitness[i] = trial_fitness + if trial_fitness < best_fitness: + best_individual = trial + best_fitness = trial_fitness + + population = new_population + + # Memory-based archive learning + if best_fitness not in fitness: + if len(archive) < self.archive_size: + archive = np.vstack([archive, best_individual]) + else: + worst_index = np.argmax(np.array([evaluate(ind) for ind in archive])) + if best_fitness < archive[worst_index]: + archive[worst_index] = best_individual + + elite_count = max(1, int(self.population_size * self.elite_fraction)) + elite_indices = np.argsort(fitness)[:elite_count] + for idx in elite_indices: + res = self.local_search(func, population[idx]) + if res is not None: + eval_count += 1 + if res[1] < fitness[idx]: + population[idx] = res[0] + fitness[idx] = res[1] + if res[1] < best_fitness: + best_individual = res[0] + best_fitness = res[1] + + self.performance_memory.append(best_fitness) + if len(self.performance_memory) > self.memory_size: + self.performance_memory.pop(0) + + if eval_count - last_switch_eval_count >= self.memory_size: + improvement = (self.performance_memory[0] - self.performance_memory[-1]) / max( + 1e-10, self.performance_memory[0] + ) + if improvement < self.strategy_switch_threshold: + current_strategy = (current_strategy + 1) % self.num_strategies + last_switch_eval_count = eval_count + + self.f_opt = best_fitness + self.x_opt = best_individual + return self.f_opt, self.x_opt + + def local_search(self, func, x_start): + res = minimize( + func, + x_start, + method="L-BFGS-B", + bounds=[self.bounds] * self.dim, + tol=self.tol, + options={"maxiter": self.min_local_search_iters}, + ) + if res.success: + return res.x, res.fun + return None diff --git a/nevergrad/optimization/lama/VectorizedRefinedSpiralSearch.py b/nevergrad/optimization/lama/VectorizedRefinedSpiralSearch.py new file mode 100644 index 000000000..9df6fe0db --- /dev/null +++ b/nevergrad/optimization/lama/VectorizedRefinedSpiralSearch.py @@ -0,0 +1,52 @@ +import numpy as np + + +class VectorizedRefinedSpiralSearch: + def __init__(self, budget=10000): + self.budget = budget + self.dim = 5 # Fixed dimensionality as per problem constraints + + def __call__(self, func): + self.f_opt = np.inf + self.x_opt = None + + # Central point of the search space + centroid = np.random.uniform(-5.0, 5.0, self.dim) + radius = 5.0 # Maximum initial radius + angle_increment = 2 * np.pi / 100 # Angle increment for spiral movement + evaluations_left = self.budget + + # Adaptive parameters + radius_decay = 0.97 # Decay radius to focus search over time + angle_speed_increase = 1.02 # Increase angle speed to cover more area + + while evaluations_left > 0: + points = [] + # Generate points in a spiral around the centroid + for _ in range(min(evaluations_left, 100)): + angle = np.random.uniform(0, 2 * np.pi) + offset = radius * np.array([np.cos(angle), np.sin(angle)] + [0] * (self.dim - 2)) + point = centroid + offset + point = np.clip(point, -5.0, 5.0) + points.append(point) + + points = np.array(points) + evaluations_left -= len(points) + + # Evaluate the function at all points generated + results = np.array([func(p) for p in points]) + + # Find the best result + best_idx = np.argmin(results) + if results[best_idx] < self.f_opt: + self.f_opt = results[best_idx] + self.x_opt = points[best_idx] + + # Update the centroid towards the best found point in this iteration + centroid = self.x_opt + + # Adapt search parameters + radius *= radius_decay + angle_increment *= angle_speed_increase + + return self.f_opt, self.x_opt diff --git a/nevergrad/optimization/lama/eQGSA_v2.py b/nevergrad/optimization/lama/eQGSA_v2.py new file mode 100644 index 000000000..1763fa270 --- /dev/null +++ b/nevergrad/optimization/lama/eQGSA_v2.py @@ -0,0 +1,58 @@ +import numpy as np + + +class eQGSA_v2: + def __init__(self, budget=1000, num_agents=10, G0=100.0, alpha=0.1, lb=-5.0, ub=5.0, dimension=5): + self.budget = budget + self.num_agents = num_agents + self.G0 = G0 + self.alpha = alpha + self.lb = lb + self.ub = ub + self.dimension = dimension + + def _initialize_agents(self): + return np.random.uniform(self.lb, self.ub, size=(self.num_agents, self.dimension)) + + def _calculate_masses(self, fitness_values): + return 1 / (fitness_values + 1e-10) + + def _calculate_gravitational_force(self, agent, mass, best_agent): + return self.G0 * mass * (best_agent - agent) + + def _update_agent_position(self, agent, force): + new_pos = agent + self.alpha * force + return np.clip(new_pos, self.lb, self.ub) + + def _objective_function(self, func, x): + return func(x) + + def __call__(self, func): + self.f_opt = np.Inf + self.x_opt = None + + agents = self._initialize_agents() + fitness_values = np.array([self._objective_function(func, agent) for agent in agents]) + masses = self._calculate_masses(fitness_values) + + for _ in range(self.budget): + best_agent_idx = np.argmin(fitness_values) + best_agent = agents[best_agent_idx] + + for i in range(self.num_agents): + force = sum( + [ + self._calculate_gravitational_force(agents[i], masses[i], best_agent) + for i in range(self.num_agents) + if i != best_agent_idx + ] + ) + agents[i] = self._update_agent_position(agents[i], force) + agents[i] = np.clip(agents[i], self.lb, self.ub) + fitness_values[i] = self._objective_function(func, agents[i]) + + if fitness_values[i] < self.f_opt: + self.f_opt = fitness_values[i] + self.x_opt = agents[i] + + return self.f_opt, self.x_opt From cb79fe86a789d18b3699a5d9f29a47d15b58bd31 Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Mon, 24 Jun 2024 17:07:04 +0200 Subject: [PATCH 3/6] po --- nevergrad/optimization/recastlib.py | 36402 ++++++++------------------ 1 file changed, 10981 insertions(+), 25421 deletions(-) diff --git a/nevergrad/optimization/recastlib.py b/nevergrad/optimization/recastlib.py index fc807145f..5c00f9f9d 100644 --- a/nevergrad/optimization/recastlib.py +++ b/nevergrad/optimization/recastlib.py @@ -1025,44547 +1025,30107 @@ def _evaluate(self, X, out, *args, **kwargs): ###### LLAMA ####### lama_register = {} - try: from nevergrad.optimization.lama.AADCCS import AADCCS lama_register["AADCCS"] = AADCCS + res = NonObjectOptimizer(method="LLAMAAADCCS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAADCCS = NonObjectOptimizer(method="LLAMAAADCCS").set_name("LLAMAAADCCS", register=True) except Exception as e: print("AADCCS can not be imported: ", e) - try: from nevergrad.optimization.lama.AADEHLS import AADEHLS lama_register["AADEHLS"] = AADEHLS + res = NonObjectOptimizer(method="LLAMAAADEHLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAADEHLS = NonObjectOptimizer(method="LLAMAAADEHLS").set_name("LLAMAAADEHLS", register=True) except Exception as e: print("AADEHLS can not be imported: ", e) - try: from nevergrad.optimization.lama.AADMEM import AADMEM lama_register["AADMEM"] = AADMEM + res = NonObjectOptimizer(method="LLAMAAADMEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAADMEM = NonObjectOptimizer(method="LLAMAAADMEM").set_name("LLAMAAADMEM", register=True) except Exception as e: print("AADMEM can not be imported: ", e) - try: from nevergrad.optimization.lama.AAES import AAES lama_register["AAES"] = AAES + res = NonObjectOptimizer(method="LLAMAAAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAAES = NonObjectOptimizer(method="LLAMAAAES").set_name("LLAMAAAES", register=True) except Exception as e: print("AAES can not be imported: ", e) - try: from nevergrad.optimization.lama.ACDE import ACDE lama_register["ACDE"] = ACDE + res = NonObjectOptimizer(method="LLAMAACDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAACDE = NonObjectOptimizer(method="LLAMAACDE").set_name("LLAMAACDE", register=True) except Exception as e: print("ACDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ACMDEOBD import ACMDEOBD lama_register["ACMDEOBD"] = ACMDEOBD + res = NonObjectOptimizer(method="LLAMAACMDEOBD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAACMDEOBD = NonObjectOptimizer(method="LLAMAACMDEOBD").set_name("LLAMAACMDEOBD", register=True) except Exception as e: print("ACMDEOBD can not be imported: ", e) - try: from nevergrad.optimization.lama.ADAEDA import ADAEDA lama_register["ADAEDA"] = ADAEDA + res = NonObjectOptimizer(method="LLAMAADAEDA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADAEDA = NonObjectOptimizer(method="LLAMAADAEDA").set_name("LLAMAADAEDA", register=True) except Exception as e: print("ADAEDA can not be imported: ", e) - try: from nevergrad.optimization.lama.ADCE import ADCE lama_register["ADCE"] = ADCE + res = NonObjectOptimizer(method="LLAMAADCE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADCE = NonObjectOptimizer(method="LLAMAADCE").set_name("LLAMAADCE", register=True) except Exception as e: print("ADCE can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEA import ADEA lama_register["ADEA"] = ADEA + res = NonObjectOptimizer(method="LLAMAADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEA = NonObjectOptimizer(method="LLAMAADEA").set_name("LLAMAADEA", register=True) except Exception as e: print("ADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEAS import ADEAS lama_register["ADEAS"] = ADEAS + res = NonObjectOptimizer(method="LLAMAADEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEAS = NonObjectOptimizer(method="LLAMAADEAS").set_name("LLAMAADEAS", register=True) except Exception as e: print("ADEAS can not be imported: ", e) - try: from nevergrad.optimization.lama.ADECMS import ADECMS lama_register["ADECMS"] = ADECMS + res = NonObjectOptimizer(method="LLAMAADECMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADECMS = NonObjectOptimizer(method="LLAMAADECMS").set_name("LLAMAADECMS", register=True) except Exception as e: print("ADECMS can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEDCA import ADEDCA lama_register["ADEDCA"] = ADEDCA + res = NonObjectOptimizer(method="LLAMAADEDCA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEDCA = NonObjectOptimizer(method="LLAMAADEDCA").set_name("LLAMAADEDCA", register=True) except Exception as e: print("ADEDCA can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEDE import ADEDE lama_register["ADEDE"] = ADEDE + res = NonObjectOptimizer(method="LLAMAADEDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEDE = NonObjectOptimizer(method="LLAMAADEDE").set_name("LLAMAADEDE", register=True) except Exception as e: print("ADEDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEDLR import ADEDLR lama_register["ADEDLR"] = ADEDLR + res = NonObjectOptimizer(method="LLAMAADEDLR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEDLR = NonObjectOptimizer(method="LLAMAADEDLR").set_name("LLAMAADEDLR", register=True) except Exception as e: print("ADEDLR can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEDM import ADEDM lama_register["ADEDM"] = ADEDM + res = NonObjectOptimizer(method="LLAMAADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEDM = NonObjectOptimizer(method="LLAMAADEDM").set_name("LLAMAADEDM", register=True) except Exception as e: print("ADEDM can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEEM import ADEEM lama_register["ADEEM"] = ADEEM + res = NonObjectOptimizer(method="LLAMAADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEEM = NonObjectOptimizer(method="LLAMAADEEM").set_name("LLAMAADEEM", register=True) except Exception as e: print("ADEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEGE import ADEGE lama_register["ADEGE"] = ADEGE + res = NonObjectOptimizer(method="LLAMAADEGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEGE = NonObjectOptimizer(method="LLAMAADEGE").set_name("LLAMAADEGE", register=True) except Exception as e: print("ADEGE can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEGM import ADEGM lama_register["ADEGM"] = ADEGM + res = NonObjectOptimizer(method="LLAMAADEGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEGM = NonObjectOptimizer(method="LLAMAADEGM").set_name("LLAMAADEGM", register=True) except Exception as e: print("ADEGM can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEGS import ADEGS lama_register["ADEGS"] = ADEGS + res = NonObjectOptimizer(method="LLAMAADEGS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEGS = NonObjectOptimizer(method="LLAMAADEGS").set_name("LLAMAADEGS", register=True) except Exception as e: print("ADEGS can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEM import ADEM lama_register["ADEM"] = ADEM + res = NonObjectOptimizer(method="LLAMAADEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEM = NonObjectOptimizer(method="LLAMAADEM").set_name("LLAMAADEM", register=True) except Exception as e: print("ADEM can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEMSC import ADEMSC lama_register["ADEMSC"] = ADEMSC + res = NonObjectOptimizer(method="LLAMAADEMSC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEMSC = NonObjectOptimizer(method="LLAMAADEMSC").set_name("LLAMAADEMSC", register=True) except Exception as e: print("ADEMSC can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEPF import ADEPF lama_register["ADEPF"] = ADEPF + res = NonObjectOptimizer(method="LLAMAADEPF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEPF = NonObjectOptimizer(method="LLAMAADEPF").set_name("LLAMAADEPF", register=True) except Exception as e: print("ADEPF can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEPM import ADEPM lama_register["ADEPM"] = ADEPM + res = NonObjectOptimizer(method="LLAMAADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEPM = NonObjectOptimizer(method="LLAMAADEPM").set_name("LLAMAADEPM", register=True) except Exception as e: print("ADEPM can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEPMC import ADEPMC lama_register["ADEPMC"] = ADEPMC + res = NonObjectOptimizer(method="LLAMAADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEPMC = NonObjectOptimizer(method="LLAMAADEPMC").set_name("LLAMAADEPMC", register=True) except Exception as e: print("ADEPMC can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEPMI import ADEPMI lama_register["ADEPMI"] = ADEPMI + res = NonObjectOptimizer(method="LLAMAADEPMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEPMI = NonObjectOptimizer(method="LLAMAADEPMI").set_name("LLAMAADEPMI", register=True) except Exception as e: print("ADEPMI can not be imported: ", e) - try: from nevergrad.optimization.lama.ADEPR import ADEPR lama_register["ADEPR"] = ADEPR + res = NonObjectOptimizer(method="LLAMAADEPR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADEPR = NonObjectOptimizer(method="LLAMAADEPR").set_name("LLAMAADEPR", register=True) except Exception as e: print("ADEPR can not be imported: ", e) - try: from nevergrad.optimization.lama.ADES import ADES lama_register["ADES"] = ADES + res = NonObjectOptimizer(method="LLAMAADES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADES = NonObjectOptimizer(method="LLAMAADES").set_name("LLAMAADES", register=True) except Exception as e: print("ADES can not be imported: ", e) - try: from nevergrad.optimization.lama.ADESA import ADESA lama_register["ADESA"] = ADESA + res = NonObjectOptimizer(method="LLAMAADESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADESA = NonObjectOptimizer(method="LLAMAADESA").set_name("LLAMAADESA", register=True) except Exception as e: print("ADESA can not be imported: ", e) - try: from nevergrad.optimization.lama.ADE_FPC import ADE_FPC lama_register["ADE_FPC"] = ADE_FPC + res = NonObjectOptimizer(method="LLAMAADE_FPC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADE_FPC = NonObjectOptimizer(method="LLAMAADE_FPC").set_name("LLAMAADE_FPC", register=True) except Exception as e: print("ADE_FPC can not be imported: ", e) - try: from nevergrad.optimization.lama.ADGD import ADGD lama_register["ADGD"] = ADGD + res = NonObjectOptimizer(method="LLAMAADGD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADGD = NonObjectOptimizer(method="LLAMAADGD").set_name("LLAMAADGD", register=True) except Exception as e: print("ADGD can not be imported: ", e) - try: from nevergrad.optimization.lama.ADGE import ADGE lama_register["ADGE"] = ADGE + res = NonObjectOptimizer(method="LLAMAADGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADGE = NonObjectOptimizer(method="LLAMAADGE").set_name("LLAMAADGE", register=True) except Exception as e: print("ADGE can not be imported: ", e) - try: from nevergrad.optimization.lama.ADMDE import ADMDE lama_register["ADMDE"] = ADMDE + res = NonObjectOptimizer(method="LLAMAADMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADMDE = NonObjectOptimizer(method="LLAMAADMDE").set_name("LLAMAADMDE", register=True) except Exception as e: print("ADMDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ADMEMS import ADMEMS lama_register["ADMEMS"] = ADMEMS + res = NonObjectOptimizer(method="LLAMAADMEMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADMEMS = NonObjectOptimizer(method="LLAMAADMEMS").set_name("LLAMAADMEMS", register=True) except Exception as e: print("ADMEMS can not be imported: ", e) - try: from nevergrad.optimization.lama.ADSDiffEvo import ADSDiffEvo lama_register["ADSDiffEvo"] = ADSDiffEvo + res = NonObjectOptimizer(method="LLAMAADSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADSDiffEvo = NonObjectOptimizer(method="LLAMAADSDiffEvo").set_name("LLAMAADSDiffEvo", register=True) except Exception as e: print("ADSDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.ADSEA import ADSEA lama_register["ADSEA"] = ADSEA + res = NonObjectOptimizer(method="LLAMAADSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADSEA = NonObjectOptimizer(method="LLAMAADSEA").set_name("LLAMAADSEA", register=True) except Exception as e: print("ADSEA can not be imported: ", e) - try: from nevergrad.optimization.lama.ADSEAPlus import ADSEAPlus lama_register["ADSEAPlus"] = ADSEAPlus + res = NonObjectOptimizer(method="LLAMAADSEAPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAADSEAPlus = NonObjectOptimizer(method="LLAMAADSEAPlus").set_name("LLAMAADSEAPlus", register=True) except Exception as e: print("ADSEAPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.AGBES import AGBES lama_register["AGBES"] = AGBES + res = NonObjectOptimizer(method="LLAMAAGBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGBES = NonObjectOptimizer(method="LLAMAAGBES").set_name("LLAMAAGBES", register=True) except Exception as e: print("AGBES can not be imported: ", e) - try: from nevergrad.optimization.lama.AGCES import AGCES lama_register["AGCES"] = AGCES + res = NonObjectOptimizer(method="LLAMAAGCES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGCES = NonObjectOptimizer(method="LLAMAAGCES").set_name("LLAMAAGCES", register=True) except Exception as e: print("AGCES can not be imported: ", e) - try: from nevergrad.optimization.lama.AGDE import AGDE lama_register["AGDE"] = AGDE + res = NonObjectOptimizer(method="LLAMAAGDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGDE = NonObjectOptimizer(method="LLAMAAGDE").set_name("LLAMAAGDE", register=True) except Exception as e: print("AGDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AGDELS import AGDELS lama_register["AGDELS"] = AGDELS + res = NonObjectOptimizer(method="LLAMAAGDELS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGDELS = NonObjectOptimizer(method="LLAMAAGDELS").set_name("LLAMAAGDELS", register=True) except Exception as e: print("AGDELS can not be imported: ", e) - try: from nevergrad.optimization.lama.AGDiffEvo import AGDiffEvo lama_register["AGDiffEvo"] = AGDiffEvo + res = NonObjectOptimizer(method="LLAMAAGDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGDiffEvo = NonObjectOptimizer(method="LLAMAAGDiffEvo").set_name("LLAMAAGDiffEvo", register=True) except Exception as e: print("AGDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.AGEA import AGEA lama_register["AGEA"] = AGEA + res = NonObjectOptimizer(method="LLAMAAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGEA = NonObjectOptimizer(method="LLAMAAGEA").set_name("LLAMAAGEA", register=True) except Exception as e: print("AGEA can not be imported: ", e) - try: from nevergrad.optimization.lama.AGESA import AGESA lama_register["AGESA"] = AGESA + res = NonObjectOptimizer(method="LLAMAAGESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGESA = NonObjectOptimizer(method="LLAMAAGESA").set_name("LLAMAAGESA", register=True) except Exception as e: print("AGESA can not be imported: ", e) - try: from nevergrad.optimization.lama.AGGE import AGGE lama_register["AGGE"] = AGGE + res = NonObjectOptimizer(method="LLAMAAGGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGGE = NonObjectOptimizer(method="LLAMAAGGE").set_name("LLAMAAGGE", register=True) except Exception as e: print("AGGE can not be imported: ", e) - try: from nevergrad.optimization.lama.AGGES import AGGES lama_register["AGGES"] = AGGES + res = NonObjectOptimizer(method="LLAMAAGGES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGGES = NonObjectOptimizer(method="LLAMAAGGES").set_name("LLAMAAGGES", register=True) except Exception as e: print("AGGES can not be imported: ", e) - try: from nevergrad.optimization.lama.AGIDE import AGIDE lama_register["AGIDE"] = AGIDE + res = NonObjectOptimizer(method="LLAMAAGIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAGIDE = NonObjectOptimizer(method="LLAMAAGIDE").set_name("LLAMAAGIDE", register=True) except Exception as e: print("AGIDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AHDEMI import AHDEMI lama_register["AHDEMI"] = AHDEMI + res = NonObjectOptimizer(method="LLAMAAHDEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAHDEMI = NonObjectOptimizer(method="LLAMAAHDEMI").set_name("LLAMAAHDEMI", register=True) except Exception as e: print("AHDEMI can not be imported: ", e) - try: from nevergrad.optimization.lama.ALDEEM import ALDEEM lama_register["ALDEEM"] = ALDEEM + res = NonObjectOptimizer(method="LLAMAALDEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAALDEEM = NonObjectOptimizer(method="LLAMAALDEEM").set_name("LLAMAALDEEM", register=True) except Exception as e: print("ALDEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.ALES import ALES lama_register["ALES"] = ALES + res = NonObjectOptimizer(method="LLAMAALES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAALES = NonObjectOptimizer(method="LLAMAALES").set_name("LLAMAALES", register=True) except Exception as e: print("ALES can not be imported: ", e) - try: from nevergrad.optimization.lama.ALSS import ALSS lama_register["ALSS"] = ALSS + res = NonObjectOptimizer(method="LLAMAALSS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAALSS = NonObjectOptimizer(method="LLAMAALSS").set_name("LLAMAALSS", register=True) except Exception as e: print("ALSS can not be imported: ", e) - try: from nevergrad.optimization.lama.AMDE import AMDE lama_register["AMDE"] = AMDE + res = NonObjectOptimizer(method="LLAMAAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAMDE = NonObjectOptimizer(method="LLAMAAMDE").set_name("LLAMAAMDE", register=True) except Exception as e: print("AMDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AMES import AMES lama_register["AMES"] = AMES + res = NonObjectOptimizer(method="LLAMAAMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAMES = NonObjectOptimizer(method="LLAMAAMES").set_name("LLAMAAMES", register=True) except Exception as e: print("AMES can not be imported: ", e) - try: from nevergrad.optimization.lama.AMSDiffEvo import AMSDiffEvo lama_register["AMSDiffEvo"] = AMSDiffEvo + res = NonObjectOptimizer(method="LLAMAAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAMSDiffEvo = NonObjectOptimizer(method="LLAMAAMSDiffEvo").set_name("LLAMAAMSDiffEvo", register=True) except Exception as e: print("AMSDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.AMSEA import AMSEA lama_register["AMSEA"] = AMSEA + res = NonObjectOptimizer(method="LLAMAAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAMSEA = NonObjectOptimizer(method="LLAMAAMSEA").set_name("LLAMAAMSEA", register=True) except Exception as e: print("AMSEA can not be imported: ", e) - try: from nevergrad.optimization.lama.AN_MDEPSO import AN_MDEPSO lama_register["AN_MDEPSO"] = AN_MDEPSO + res = NonObjectOptimizer(method="LLAMAAN_MDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAN_MDEPSO = NonObjectOptimizer(method="LLAMAAN_MDEPSO").set_name("LLAMAAN_MDEPSO", register=True) except Exception as e: print("AN_MDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.APBES import APBES lama_register["APBES"] = APBES + res = NonObjectOptimizer(method="LLAMAAPBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAPBES = NonObjectOptimizer(method="LLAMAAPBES").set_name("LLAMAAPBES", register=True) except Exception as e: print("APBES can not be imported: ", e) - try: from nevergrad.optimization.lama.APDE import APDE lama_register["APDE"] = APDE + res = NonObjectOptimizer(method="LLAMAAPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAPDE = NonObjectOptimizer(method="LLAMAAPDE").set_name("LLAMAAPDE", register=True) except Exception as e: print("APDE can not be imported: ", e) - try: from nevergrad.optimization.lama.APDETL import APDETL lama_register["APDETL"] = APDETL + res = NonObjectOptimizer(method="LLAMAAPDETL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAPDETL = NonObjectOptimizer(method="LLAMAAPDETL").set_name("LLAMAAPDETL", register=True) except Exception as e: print("APDETL can not be imported: ", e) - try: from nevergrad.optimization.lama.APES import APES lama_register["APES"] = APES + res = NonObjectOptimizer(method="LLAMAAPES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAPES = NonObjectOptimizer(method="LLAMAAPES").set_name("LLAMAAPES", register=True) except Exception as e: print("APES can not be imported: ", e) - try: from nevergrad.optimization.lama.AQAPSO_LS_DIW import AQAPSO_LS_DIW lama_register["AQAPSO_LS_DIW"] = AQAPSO_LS_DIW - LLAMAAQAPSO_LS_DIW = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW").set_name( - "LLAMAAQAPSO_LS_DIW", register=True - ) + res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAQAPSO_LS_DIW = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW").set_name("LLAMAAQAPSO_LS_DIW", register=True) except Exception as e: print("AQAPSO_LS_DIW can not be imported: ", e) - try: from nevergrad.optimization.lama.AQAPSO_LS_DIW_AP import AQAPSO_LS_DIW_AP lama_register["AQAPSO_LS_DIW_AP"] = AQAPSO_LS_DIW_AP - LLAMAAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP").set_name( - "LLAMAAQAPSO_LS_DIW_AP", register=True - ) + res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP").set_name("LLAMAAQAPSO_LS_DIW_AP", register=True) except Exception as e: print("AQAPSO_LS_DIW_AP can not be imported: ", e) - try: from nevergrad.optimization.lama.ARDLS import ARDLS lama_register["ARDLS"] = ARDLS + res = NonObjectOptimizer(method="LLAMAARDLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAARDLS = NonObjectOptimizer(method="LLAMAARDLS").set_name("LLAMAARDLS", register=True) except Exception as e: print("ARDLS can not be imported: ", e) - try: from nevergrad.optimization.lama.ARESM import ARESM lama_register["ARESM"] = ARESM + res = NonObjectOptimizer(method="LLAMAARESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAARESM = NonObjectOptimizer(method="LLAMAARESM").set_name("LLAMAARESM", register=True) except Exception as e: print("ARESM can not be imported: ", e) - try: from nevergrad.optimization.lama.ARISA import ARISA lama_register["ARISA"] = ARISA + res = NonObjectOptimizer(method="LLAMAARISA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAARISA = NonObjectOptimizer(method="LLAMAARISA").set_name("LLAMAARISA", register=True) except Exception as e: print("ARISA can not be imported: ", e) - try: from nevergrad.optimization.lama.ASADEA import ASADEA lama_register["ASADEA"] = ASADEA + res = NonObjectOptimizer(method="LLAMAASADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAASADEA = NonObjectOptimizer(method="LLAMAASADEA").set_name("LLAMAASADEA", register=True) except Exception as e: print("ASADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.ASO import ASO lama_register["ASO"] = ASO + res = NonObjectOptimizer(method="LLAMAASO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAASO = NonObjectOptimizer(method="LLAMAASO").set_name("LLAMAASO", register=True) except Exception as e: print("ASO can not be imported: ", e) - try: from nevergrad.optimization.lama.AVDE import AVDE lama_register["AVDE"] = AVDE + res = NonObjectOptimizer(method="LLAMAAVDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAAVDE = NonObjectOptimizer(method="LLAMAAVDE").set_name("LLAMAAVDE", register=True) except Exception as e: print("AVDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AcceleratedAdaptivePrecisionCrossoverEvolution import ( - AcceleratedAdaptivePrecisionCrossoverEvolution, - ) + from nevergrad.optimization.lama.AcceleratedAdaptivePrecisionCrossoverEvolution import AcceleratedAdaptivePrecisionCrossoverEvolution - lama_register["AcceleratedAdaptivePrecisionCrossoverEvolution"] = ( - AcceleratedAdaptivePrecisionCrossoverEvolution - ) - LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( - method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution" - ).set_name("LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution", register=True) + lama_register["AcceleratedAdaptivePrecisionCrossoverEvolution"] = AcceleratedAdaptivePrecisionCrossoverEvolution + res = NonObjectOptimizer(method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer(method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution").set_name("LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution", register=True) except Exception as e: print("AcceleratedAdaptivePrecisionCrossoverEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveAnnealingDifferentialEvolution import ( - AdaptiveAnnealingDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveAnnealingDifferentialEvolution import AdaptiveAnnealingDifferentialEvolution lama_register["AdaptiveAnnealingDifferentialEvolution"] = AdaptiveAnnealingDifferentialEvolution - LLAMAAdaptiveAnnealingDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveAnnealingDifferentialEvolution" - ).set_name("LLAMAAdaptiveAnnealingDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveAnnealingDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveAnnealingDifferentialEvolution").set_name("LLAMAAdaptiveAnnealingDifferentialEvolution", register=True) except Exception as e: print("AdaptiveAnnealingDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveArchiveDE import AdaptiveArchiveDE lama_register["AdaptiveArchiveDE"] = AdaptiveArchiveDE - LLAMAAdaptiveArchiveDE = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE").set_name( - "LLAMAAdaptiveArchiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveArchiveDE = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE").set_name("LLAMAAdaptiveArchiveDE", register=True) except Exception as e: print("AdaptiveArchiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCMADiffEvoPSO import AdaptiveCMADiffEvoPSO lama_register["AdaptiveCMADiffEvoPSO"] = AdaptiveCMADiffEvoPSO - LLAMAAdaptiveCMADiffEvoPSO = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO").set_name( - "LLAMAAdaptiveCMADiffEvoPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCMADiffEvoPSO = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO").set_name("LLAMAAdaptiveCMADiffEvoPSO", register=True) except Exception as e: print("AdaptiveCMADiffEvoPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveChaoticFireworksOptimization import ( - AdaptiveChaoticFireworksOptimization, - ) + from nevergrad.optimization.lama.AdaptiveChaoticFireworksOptimization import AdaptiveChaoticFireworksOptimization lama_register["AdaptiveChaoticFireworksOptimization"] = AdaptiveChaoticFireworksOptimization - LLAMAAdaptiveChaoticFireworksOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveChaoticFireworksOptimization" - ).set_name("LLAMAAdaptiveChaoticFireworksOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveChaoticFireworksOptimization = NonObjectOptimizer(method="LLAMAAdaptiveChaoticFireworksOptimization").set_name("LLAMAAdaptiveChaoticFireworksOptimization", register=True) except Exception as e: print("AdaptiveChaoticFireworksOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveClusterBasedHybridOptimization import ( - AdaptiveClusterBasedHybridOptimization, - ) + from nevergrad.optimization.lama.AdaptiveClusterBasedHybridOptimization import AdaptiveClusterBasedHybridOptimization lama_register["AdaptiveClusterBasedHybridOptimization"] = AdaptiveClusterBasedHybridOptimization - LLAMAAdaptiveClusterBasedHybridOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveClusterBasedHybridOptimization" - ).set_name("LLAMAAdaptiveClusterBasedHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveClusterBasedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveClusterBasedHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveClusterBasedHybridOptimization").set_name("LLAMAAdaptiveClusterBasedHybridOptimization", register=True) except Exception as e: print("AdaptiveClusterBasedHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveClusterHybridOptimizationV5 import ( - AdaptiveClusterHybridOptimizationV5, - ) + from nevergrad.optimization.lama.AdaptiveClusterHybridOptimizationV5 import AdaptiveClusterHybridOptimizationV5 lama_register["AdaptiveClusterHybridOptimizationV5"] = AdaptiveClusterHybridOptimizationV5 - LLAMAAdaptiveClusterHybridOptimizationV5 = NonObjectOptimizer( - method="LLAMAAdaptiveClusterHybridOptimizationV5" - ).set_name("LLAMAAdaptiveClusterHybridOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveClusterHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveClusterHybridOptimizationV5 = NonObjectOptimizer(method="LLAMAAdaptiveClusterHybridOptimizationV5").set_name("LLAMAAdaptiveClusterHybridOptimizationV5", register=True) except Exception as e: print("AdaptiveClusterHybridOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveClusteredDifferentialEvolutionV2 import ( - AdaptiveClusteredDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.AdaptiveClusteredDifferentialEvolutionV2 import AdaptiveClusteredDifferentialEvolutionV2 lama_register["AdaptiveClusteredDifferentialEvolutionV2"] = AdaptiveClusteredDifferentialEvolutionV2 - LLAMAAdaptiveClusteredDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAAdaptiveClusteredDifferentialEvolutionV2" - ).set_name("LLAMAAdaptiveClusteredDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveClusteredDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveClusteredDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveClusteredDifferentialEvolutionV2").set_name("LLAMAAdaptiveClusteredDifferentialEvolutionV2", register=True) except Exception as e: print("AdaptiveClusteredDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCohortHarmonizationOptimization import ( - AdaptiveCohortHarmonizationOptimization, - ) + from nevergrad.optimization.lama.AdaptiveCohortHarmonizationOptimization import AdaptiveCohortHarmonizationOptimization lama_register["AdaptiveCohortHarmonizationOptimization"] = AdaptiveCohortHarmonizationOptimization - LLAMAAdaptiveCohortHarmonizationOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveCohortHarmonizationOptimization" - ).set_name("LLAMAAdaptiveCohortHarmonizationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCohortHarmonizationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCohortHarmonizationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveCohortHarmonizationOptimization").set_name("LLAMAAdaptiveCohortHarmonizationOptimization", register=True) except Exception as e: print("AdaptiveCohortHarmonizationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCohortMemeticAlgorithm import AdaptiveCohortMemeticAlgorithm lama_register["AdaptiveCohortMemeticAlgorithm"] = AdaptiveCohortMemeticAlgorithm - LLAMAAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveCohortMemeticAlgorithm" - ).set_name("LLAMAAdaptiveCohortMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCohortMemeticAlgorithm").set_name("LLAMAAdaptiveCohortMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveCohortMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveControlledMemoryAnnealing import ( - AdaptiveControlledMemoryAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveControlledMemoryAnnealing import AdaptiveControlledMemoryAnnealing lama_register["AdaptiveControlledMemoryAnnealing"] = AdaptiveControlledMemoryAnnealing - LLAMAAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveControlledMemoryAnnealing" - ).set_name("LLAMAAdaptiveControlledMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveControlledMemoryAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveControlledMemoryAnnealing").set_name("LLAMAAdaptiveControlledMemoryAnnealing", register=True) except Exception as e: print("AdaptiveControlledMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialEvolution import ( - AdaptiveCooperativeDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialEvolution import AdaptiveCooperativeDifferentialEvolution lama_register["AdaptiveCooperativeDifferentialEvolution"] = AdaptiveCooperativeDifferentialEvolution - LLAMAAdaptiveCooperativeDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCooperativeDifferentialEvolution" - ).set_name("LLAMAAdaptiveCooperativeDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCooperativeDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialEvolution").set_name("LLAMAAdaptiveCooperativeDifferentialEvolution", register=True) except Exception as e: print("AdaptiveCooperativeDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialMemeticAlgorithm import ( - AdaptiveCooperativeDifferentialMemeticAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialMemeticAlgorithm import AdaptiveCooperativeDifferentialMemeticAlgorithm - lama_register["AdaptiveCooperativeDifferentialMemeticAlgorithm"] = ( - AdaptiveCooperativeDifferentialMemeticAlgorithm - ) - LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm" - ).set_name("LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm", register=True) + lama_register["AdaptiveCooperativeDifferentialMemeticAlgorithm"] = AdaptiveCooperativeDifferentialMemeticAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm").set_name("LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveCooperativeDifferentialMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCovarianceGradientSearch import AdaptiveCovarianceGradientSearch lama_register["AdaptiveCovarianceGradientSearch"] = AdaptiveCovarianceGradientSearch - LLAMAAdaptiveCovarianceGradientSearch = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceGradientSearch" - ).set_name("LLAMAAdaptiveCovarianceGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceGradientSearch").set_name("LLAMAAdaptiveCovarianceGradientSearch", register=True) except Exception as e: print("AdaptiveCovarianceGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolution import ( - AdaptiveCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolution import AdaptiveCovarianceMatrixDifferentialEvolution - lama_register["AdaptiveCovarianceMatrixDifferentialEvolution"] = ( - AdaptiveCovarianceMatrixDifferentialEvolution - ) - LLAMAAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolution", register=True) + lama_register["AdaptiveCovarianceMatrixDifferentialEvolution"] = AdaptiveCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("AdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching import ( - AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching import AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching - lama_register["AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching"] = ( - AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching - ) - LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching" - ).set_name( - "LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching", register=True - ) + lama_register["AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching"] = AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching").set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching", register=True) except Exception as e: - print( - "AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching can not be imported: ", e - ) - + print("AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching can not be imported: ", e) try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolution import ( - AdaptiveCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolution import AdaptiveCovarianceMatrixEvolution lama_register["AdaptiveCovarianceMatrixEvolution"] = AdaptiveCovarianceMatrixEvolution - LLAMAAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixEvolution" - ).set_name("LLAMAAdaptiveCovarianceMatrixEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolution").set_name("LLAMAAdaptiveCovarianceMatrixEvolution", register=True) except Exception as e: print("AdaptiveCovarianceMatrixEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionStrategy import ( - AdaptiveCovarianceMatrixEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionStrategy import AdaptiveCovarianceMatrixEvolutionStrategy lama_register["AdaptiveCovarianceMatrixEvolutionStrategy"] = AdaptiveCovarianceMatrixEvolutionStrategy - LLAMAAdaptiveCovarianceMatrixEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy" - ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy").set_name("LLAMAAdaptiveCovarianceMatrixEvolutionStrategy", register=True) except Exception as e: print("AdaptiveCovarianceMatrixEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation import ( - AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation import AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation - lama_register["AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation"] = ( - AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation - ) - LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation" - ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation", register=True) + lama_register["AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation"] = AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation").set_name("LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation", register=True) except Exception as e: print("AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptation import ( - AdaptiveCovarianceMatrixSelfAdaptation, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptation import AdaptiveCovarianceMatrixSelfAdaptation lama_register["AdaptiveCovarianceMatrixSelfAdaptation"] = AdaptiveCovarianceMatrixSelfAdaptation - LLAMAAdaptiveCovarianceMatrixSelfAdaptation = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation" - ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixSelfAdaptation = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation").set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptation", register=True) except Exception as e: print("AdaptiveCovarianceMatrixSelfAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptationV2 import ( - AdaptiveCovarianceMatrixSelfAdaptationV2, - ) + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptationV2 import AdaptiveCovarianceMatrixSelfAdaptationV2 lama_register["AdaptiveCovarianceMatrixSelfAdaptationV2"] = AdaptiveCovarianceMatrixSelfAdaptationV2 - LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2" - ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2 = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2").set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2", register=True) except Exception as e: print("AdaptiveCovarianceMatrixSelfAdaptationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCrossoverDEPSO import AdaptiveCrossoverDEPSO lama_register["AdaptiveCrossoverDEPSO"] = AdaptiveCrossoverDEPSO - LLAMAAdaptiveCrossoverDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO").set_name( - "LLAMAAdaptiveCrossoverDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCrossoverDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO").set_name("LLAMAAdaptiveCrossoverDEPSO", register=True) except Exception as e: print("AdaptiveCrossoverDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCrossoverElitistStrategyV6 import ( - AdaptiveCrossoverElitistStrategyV6, - ) + from nevergrad.optimization.lama.AdaptiveCrossoverElitistStrategyV6 import AdaptiveCrossoverElitistStrategyV6 lama_register["AdaptiveCrossoverElitistStrategyV6"] = AdaptiveCrossoverElitistStrategyV6 - LLAMAAdaptiveCrossoverElitistStrategyV6 = NonObjectOptimizer( - method="LLAMAAdaptiveCrossoverElitistStrategyV6" - ).set_name("LLAMAAdaptiveCrossoverElitistStrategyV6", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverElitistStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCrossoverElitistStrategyV6 = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverElitistStrategyV6").set_name("LLAMAAdaptiveCrossoverElitistStrategyV6", register=True) except Exception as e: print("AdaptiveCrossoverElitistStrategyV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCrossoverSearch import AdaptiveCrossoverSearch lama_register["AdaptiveCrossoverSearch"] = AdaptiveCrossoverSearch - LLAMAAdaptiveCrossoverSearch = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch").set_name( - "LLAMAAdaptiveCrossoverSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCrossoverSearch = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch").set_name("LLAMAAdaptiveCrossoverSearch", register=True) except Exception as e: print("AdaptiveCrossoverSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalCooperativeSearch import ( - AdaptiveCulturalCooperativeSearch, - ) + from nevergrad.optimization.lama.AdaptiveCulturalCooperativeSearch import AdaptiveCulturalCooperativeSearch lama_register["AdaptiveCulturalCooperativeSearch"] = AdaptiveCulturalCooperativeSearch - LLAMAAdaptiveCulturalCooperativeSearch = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalCooperativeSearch" - ).set_name("LLAMAAdaptiveCulturalCooperativeSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalCooperativeSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalCooperativeSearch = NonObjectOptimizer(method="LLAMAAdaptiveCulturalCooperativeSearch").set_name("LLAMAAdaptiveCulturalCooperativeSearch", register=True) except Exception as e: print("AdaptiveCulturalCooperativeSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalDifferentialEvolution import ( - AdaptiveCulturalDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialEvolution import AdaptiveCulturalDifferentialEvolution lama_register["AdaptiveCulturalDifferentialEvolution"] = AdaptiveCulturalDifferentialEvolution - LLAMAAdaptiveCulturalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalDifferentialEvolution" - ).set_name("LLAMAAdaptiveCulturalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialEvolution").set_name("LLAMAAdaptiveCulturalDifferentialEvolution", register=True) except Exception as e: print("AdaptiveCulturalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalDifferentialMemeticEvolution import ( - AdaptiveCulturalDifferentialMemeticEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialMemeticEvolution import AdaptiveCulturalDifferentialMemeticEvolution - lama_register["AdaptiveCulturalDifferentialMemeticEvolution"] = ( - AdaptiveCulturalDifferentialMemeticEvolution - ) - LLAMAAdaptiveCulturalDifferentialMemeticEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution" - ).set_name("LLAMAAdaptiveCulturalDifferentialMemeticEvolution", register=True) + lama_register["AdaptiveCulturalDifferentialMemeticEvolution"] = AdaptiveCulturalDifferentialMemeticEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalDifferentialMemeticEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution").set_name("LLAMAAdaptiveCulturalDifferentialMemeticEvolution", register=True) except Exception as e: print("AdaptiveCulturalDifferentialMemeticEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalEvolutionStrategy import ( - AdaptiveCulturalEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionStrategy import AdaptiveCulturalEvolutionStrategy lama_register["AdaptiveCulturalEvolutionStrategy"] = AdaptiveCulturalEvolutionStrategy - LLAMAAdaptiveCulturalEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalEvolutionStrategy" - ).set_name("LLAMAAdaptiveCulturalEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionStrategy").set_name("LLAMAAdaptiveCulturalEvolutionStrategy", register=True) except Exception as e: print("AdaptiveCulturalEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalEvolutionaryAlgorithm import ( - AdaptiveCulturalEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionaryAlgorithm import AdaptiveCulturalEvolutionaryAlgorithm lama_register["AdaptiveCulturalEvolutionaryAlgorithm"] = AdaptiveCulturalEvolutionaryAlgorithm - LLAMAAdaptiveCulturalEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm" - ).set_name("LLAMAAdaptiveCulturalEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm").set_name("LLAMAAdaptiveCulturalEvolutionaryAlgorithm", register=True) except Exception as e: print("AdaptiveCulturalEvolutionaryAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveCulturalMemeticAlgorithm import AdaptiveCulturalMemeticAlgorithm lama_register["AdaptiveCulturalMemeticAlgorithm"] = AdaptiveCulturalMemeticAlgorithm - LLAMAAdaptiveCulturalMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalMemeticAlgorithm" - ).set_name("LLAMAAdaptiveCulturalMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticAlgorithm").set_name("LLAMAAdaptiveCulturalMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveCulturalMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveCulturalMemeticDifferentialEvolution import ( - AdaptiveCulturalMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveCulturalMemeticDifferentialEvolution import AdaptiveCulturalMemeticDifferentialEvolution - lama_register["AdaptiveCulturalMemeticDifferentialEvolution"] = ( - AdaptiveCulturalMemeticDifferentialEvolution - ) - LLAMAAdaptiveCulturalMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution" - ).set_name("LLAMAAdaptiveCulturalMemeticDifferentialEvolution", register=True) + lama_register["AdaptiveCulturalMemeticDifferentialEvolution"] = AdaptiveCulturalMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveCulturalMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution").set_name("LLAMAAdaptiveCulturalMemeticDifferentialEvolution", register=True) except Exception as e: print("AdaptiveCulturalMemeticDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDEPSOOptimizer import AdaptiveDEPSOOptimizer lama_register["AdaptiveDEPSOOptimizer"] = AdaptiveDEPSOOptimizer - LLAMAAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer").set_name( - "LLAMAAdaptiveDEPSOOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer").set_name("LLAMAAdaptiveDEPSOOptimizer", register=True) except Exception as e: print("AdaptiveDEPSOOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDEWithElitismAndLocalSearch import ( - AdaptiveDEWithElitismAndLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveDEWithElitismAndLocalSearch import AdaptiveDEWithElitismAndLocalSearch lama_register["AdaptiveDEWithElitismAndLocalSearch"] = AdaptiveDEWithElitismAndLocalSearch - LLAMAAdaptiveDEWithElitismAndLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDEWithElitismAndLocalSearch" - ).set_name("LLAMAAdaptiveDEWithElitismAndLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDEWithElitismAndLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDEWithElitismAndLocalSearch").set_name("LLAMAAdaptiveDEWithElitismAndLocalSearch", register=True) except Exception as e: print("AdaptiveDEWithElitismAndLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDEWithOrthogonalCrossover import ( - AdaptiveDEWithOrthogonalCrossover, - ) + from nevergrad.optimization.lama.AdaptiveDEWithOrthogonalCrossover import AdaptiveDEWithOrthogonalCrossover lama_register["AdaptiveDEWithOrthogonalCrossover"] = AdaptiveDEWithOrthogonalCrossover - LLAMAAdaptiveDEWithOrthogonalCrossover = NonObjectOptimizer( - method="LLAMAAdaptiveDEWithOrthogonalCrossover" - ).set_name("LLAMAAdaptiveDEWithOrthogonalCrossover", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithOrthogonalCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDEWithOrthogonalCrossover = NonObjectOptimizer(method="LLAMAAdaptiveDEWithOrthogonalCrossover").set_name("LLAMAAdaptiveDEWithOrthogonalCrossover", register=True) except Exception as e: print("AdaptiveDEWithOrthogonalCrossover can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDecayOptimizer import AdaptiveDecayOptimizer lama_register["AdaptiveDecayOptimizer"] = AdaptiveDecayOptimizer - LLAMAAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer").set_name( - "LLAMAAdaptiveDecayOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer").set_name("LLAMAAdaptiveDecayOptimizer", register=True) except Exception as e: print("AdaptiveDecayOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDifferentialCrossover import AdaptiveDifferentialCrossover lama_register["AdaptiveDifferentialCrossover"] = AdaptiveDifferentialCrossover - LLAMAAdaptiveDifferentialCrossover = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialCrossover" - ).set_name("LLAMAAdaptiveDifferentialCrossover", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialCrossover = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialCrossover").set_name("LLAMAAdaptiveDifferentialCrossover", register=True) except Exception as e: print("AdaptiveDifferentialCrossover can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDifferentialEvolution import AdaptiveDifferentialEvolution lama_register["AdaptiveDifferentialEvolution"] = AdaptiveDifferentialEvolution - LLAMAAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolution" - ).set_name("LLAMAAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolution").set_name("LLAMAAdaptiveDifferentialEvolution", register=True) except Exception as e: print("AdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionHarmonySearch import ( - AdaptiveDifferentialEvolutionHarmonySearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionHarmonySearch import AdaptiveDifferentialEvolutionHarmonySearch lama_register["AdaptiveDifferentialEvolutionHarmonySearch"] = AdaptiveDifferentialEvolutionHarmonySearch - LLAMAAdaptiveDifferentialEvolutionHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch" - ).set_name("LLAMAAdaptiveDifferentialEvolutionHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch").set_name("LLAMAAdaptiveDifferentialEvolutionHarmonySearch", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionOptimizer import ( - AdaptiveDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionOptimizer import AdaptiveDifferentialEvolutionOptimizer lama_register["AdaptiveDifferentialEvolutionOptimizer"] = AdaptiveDifferentialEvolutionOptimizer - LLAMAAdaptiveDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionOptimizer" - ).set_name("LLAMAAdaptiveDifferentialEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveDifferentialEvolutionOptimizer", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPSO import AdaptiveDifferentialEvolutionPSO lama_register["AdaptiveDifferentialEvolutionPSO"] = AdaptiveDifferentialEvolutionPSO - LLAMAAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionPSO" - ).set_name("LLAMAAdaptiveDifferentialEvolutionPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPSO").set_name("LLAMAAdaptiveDifferentialEvolutionPSO", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPlus import ( - AdaptiveDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPlus import AdaptiveDifferentialEvolutionPlus lama_register["AdaptiveDifferentialEvolutionPlus"] = AdaptiveDifferentialEvolutionPlus - LLAMAAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionPlus" - ).set_name("LLAMAAdaptiveDifferentialEvolutionPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPlus").set_name("LLAMAAdaptiveDifferentialEvolutionPlus", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( - AdaptiveDifferentialEvolutionWithAdaptivePerturbation, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithAdaptivePerturbation import AdaptiveDifferentialEvolutionWithAdaptivePerturbation - lama_register["AdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( - AdaptiveDifferentialEvolutionWithAdaptivePerturbation - ) - LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) + lama_register["AdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = AdaptiveDifferentialEvolutionWithAdaptivePerturbation + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation").set_name("LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( - AdaptiveDifferentialEvolutionWithBayesianLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithBayesianLocalSearch import AdaptiveDifferentialEvolutionWithBayesianLocalSearch - lama_register["AdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( - AdaptiveDifferentialEvolutionWithBayesianLocalSearch - ) - LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) + lama_register["AdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = AdaptiveDifferentialEvolutionWithBayesianLocalSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation import ( - AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation import AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation - lama_register["AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation"] = ( - AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation - ) - LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation", register=True) + lama_register["AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation"] = AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation").set_name("LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( - AdaptiveDifferentialEvolutionWithDynamicPopulationV2, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithDynamicPopulationV2 import AdaptiveDifferentialEvolutionWithDynamicPopulationV2 - lama_register["AdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( - AdaptiveDifferentialEvolutionWithDynamicPopulationV2 - ) - LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) + lama_register["AdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = AdaptiveDifferentialEvolutionWithDynamicPopulationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2").set_name("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGradientBoost import ( - AdaptiveDifferentialEvolutionWithGradientBoost, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGradientBoost import AdaptiveDifferentialEvolutionWithGradientBoost - lama_register["AdaptiveDifferentialEvolutionWithGradientBoost"] = ( - AdaptiveDifferentialEvolutionWithGradientBoost - ) - LLAMAAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGradientBoost", register=True) + lama_register["AdaptiveDifferentialEvolutionWithGradientBoost"] = AdaptiveDifferentialEvolutionWithGradientBoost + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMAAdaptiveDifferentialEvolutionWithGradientBoost", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGuidedSearch import ( - AdaptiveDifferentialEvolutionWithGuidedSearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGuidedSearch import AdaptiveDifferentialEvolutionWithGuidedSearch - lama_register["AdaptiveDifferentialEvolutionWithGuidedSearch"] = ( - AdaptiveDifferentialEvolutionWithGuidedSearch - ) - LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch", register=True) + lama_register["AdaptiveDifferentialEvolutionWithGuidedSearch"] = AdaptiveDifferentialEvolutionWithGuidedSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithGuidedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithLocalSearch import ( - AdaptiveDifferentialEvolutionWithLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithLocalSearch import AdaptiveDifferentialEvolutionWithLocalSearch - lama_register["AdaptiveDifferentialEvolutionWithLocalSearch"] = ( - AdaptiveDifferentialEvolutionWithLocalSearch - ) - LLAMAAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithLocalSearch", register=True) + lama_register["AdaptiveDifferentialEvolutionWithLocalSearch"] = AdaptiveDifferentialEvolutionWithLocalSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithLocalSearch", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithMemeticSearch import ( - AdaptiveDifferentialEvolutionWithMemeticSearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithMemeticSearch import AdaptiveDifferentialEvolutionWithMemeticSearch - lama_register["AdaptiveDifferentialEvolutionWithMemeticSearch"] = ( - AdaptiveDifferentialEvolutionWithMemeticSearch - ) - LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) + lama_register["AdaptiveDifferentialEvolutionWithMemeticSearch"] = AdaptiveDifferentialEvolutionWithMemeticSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithSurrogateAssistance import ( - AdaptiveDifferentialEvolutionWithSurrogateAssistance, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithSurrogateAssistance import AdaptiveDifferentialEvolutionWithSurrogateAssistance - lama_register["AdaptiveDifferentialEvolutionWithSurrogateAssistance"] = ( - AdaptiveDifferentialEvolutionWithSurrogateAssistance - ) - LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance" - ).set_name("LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance", register=True) + lama_register["AdaptiveDifferentialEvolutionWithSurrogateAssistance"] = AdaptiveDifferentialEvolutionWithSurrogateAssistance + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance").set_name("LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance", register=True) except Exception as e: print("AdaptiveDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialHarmonySearch import ( - AdaptiveDifferentialHarmonySearch, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialHarmonySearch import AdaptiveDifferentialHarmonySearch lama_register["AdaptiveDifferentialHarmonySearch"] = AdaptiveDifferentialHarmonySearch - LLAMAAdaptiveDifferentialHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialHarmonySearch" - ).set_name("LLAMAAdaptiveDifferentialHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialHarmonySearch").set_name("LLAMAAdaptiveDifferentialHarmonySearch", register=True) except Exception as e: print("AdaptiveDifferentialHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialMemeticAlgorithm import ( - AdaptiveDifferentialMemeticAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialMemeticAlgorithm import AdaptiveDifferentialMemeticAlgorithm lama_register["AdaptiveDifferentialMemeticAlgorithm"] = AdaptiveDifferentialMemeticAlgorithm - LLAMAAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialMemeticAlgorithm" - ).set_name("LLAMAAdaptiveDifferentialMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialMemeticAlgorithm").set_name("LLAMAAdaptiveDifferentialMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialQuantumEvolution import ( - AdaptiveDifferentialQuantumEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumEvolution import AdaptiveDifferentialQuantumEvolution lama_register["AdaptiveDifferentialQuantumEvolution"] = AdaptiveDifferentialQuantumEvolution - LLAMAAdaptiveDifferentialQuantumEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialQuantumEvolution" - ).set_name("LLAMAAdaptiveDifferentialQuantumEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialQuantumEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumEvolution").set_name("LLAMAAdaptiveDifferentialQuantumEvolution", register=True) except Exception as e: print("AdaptiveDifferentialQuantumEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDifferentialQuantumMetaheuristic import ( - AdaptiveDifferentialQuantumMetaheuristic, - ) + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumMetaheuristic import AdaptiveDifferentialQuantumMetaheuristic lama_register["AdaptiveDifferentialQuantumMetaheuristic"] = AdaptiveDifferentialQuantumMetaheuristic - LLAMAAdaptiveDifferentialQuantumMetaheuristic = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialQuantumMetaheuristic" - ).set_name("LLAMAAdaptiveDifferentialQuantumMetaheuristic", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialQuantumMetaheuristic = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumMetaheuristic").set_name("LLAMAAdaptiveDifferentialQuantumMetaheuristic", register=True) except Exception as e: print("AdaptiveDifferentialQuantumMetaheuristic can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDifferentialSpiralSearch import AdaptiveDifferentialSpiralSearch lama_register["AdaptiveDifferentialSpiralSearch"] = AdaptiveDifferentialSpiralSearch - LLAMAAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDifferentialSpiralSearch" - ).set_name("LLAMAAdaptiveDifferentialSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialSpiralSearch").set_name("LLAMAAdaptiveDifferentialSpiralSearch", register=True) except Exception as e: print("AdaptiveDifferentialSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDimensionalClimbingEvolutionStrategy import ( - AdaptiveDimensionalClimbingEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveDimensionalClimbingEvolutionStrategy import AdaptiveDimensionalClimbingEvolutionStrategy - lama_register["AdaptiveDimensionalClimbingEvolutionStrategy"] = ( - AdaptiveDimensionalClimbingEvolutionStrategy - ) - LLAMAAdaptiveDimensionalClimbingEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy" - ).set_name("LLAMAAdaptiveDimensionalClimbingEvolutionStrategy", register=True) + lama_register["AdaptiveDimensionalClimbingEvolutionStrategy"] = AdaptiveDimensionalClimbingEvolutionStrategy + res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDimensionalClimbingEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy").set_name("LLAMAAdaptiveDimensionalClimbingEvolutionStrategy", register=True) except Exception as e: print("AdaptiveDimensionalClimbingEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDimensionalCrossoverEvolver import ( - AdaptiveDimensionalCrossoverEvolver, - ) + from nevergrad.optimization.lama.AdaptiveDimensionalCrossoverEvolver import AdaptiveDimensionalCrossoverEvolver lama_register["AdaptiveDimensionalCrossoverEvolver"] = AdaptiveDimensionalCrossoverEvolver - LLAMAAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( - method="LLAMAAdaptiveDimensionalCrossoverEvolver" - ).set_name("LLAMAAdaptiveDimensionalCrossoverEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalCrossoverEvolver").set_name("LLAMAAdaptiveDimensionalCrossoverEvolver", register=True) except Exception as e: print("AdaptiveDimensionalCrossoverEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDirectionalBiasQuorumOptimization import ( - AdaptiveDirectionalBiasQuorumOptimization, - ) + from nevergrad.optimization.lama.AdaptiveDirectionalBiasQuorumOptimization import AdaptiveDirectionalBiasQuorumOptimization lama_register["AdaptiveDirectionalBiasQuorumOptimization"] = AdaptiveDirectionalBiasQuorumOptimization - LLAMAAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveDirectionalBiasQuorumOptimization" - ).set_name("LLAMAAdaptiveDirectionalBiasQuorumOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMAAdaptiveDirectionalBiasQuorumOptimization", register=True) except Exception as e: print("AdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDirectionalSearch import AdaptiveDirectionalSearch lama_register["AdaptiveDirectionalSearch"] = AdaptiveDirectionalSearch - LLAMAAdaptiveDirectionalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch").set_name( - "LLAMAAdaptiveDirectionalSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDirectionalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch").set_name("LLAMAAdaptiveDirectionalSearch", register=True) except Exception as e: print("AdaptiveDirectionalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDivergenceClusteringSearch import ( - AdaptiveDivergenceClusteringSearch, - ) + from nevergrad.optimization.lama.AdaptiveDivergenceClusteringSearch import AdaptiveDivergenceClusteringSearch lama_register["AdaptiveDivergenceClusteringSearch"] = AdaptiveDivergenceClusteringSearch - LLAMAAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( - method="LLAMAAdaptiveDivergenceClusteringSearch" - ).set_name("LLAMAAdaptiveDivergenceClusteringSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDivergenceClusteringSearch = NonObjectOptimizer(method="LLAMAAdaptiveDivergenceClusteringSearch").set_name("LLAMAAdaptiveDivergenceClusteringSearch", register=True) except Exception as e: print("AdaptiveDivergenceClusteringSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDiverseHybridOptimizer import AdaptiveDiverseHybridOptimizer lama_register["AdaptiveDiverseHybridOptimizer"] = AdaptiveDiverseHybridOptimizer - LLAMAAdaptiveDiverseHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveDiverseHybridOptimizer" - ).set_name("LLAMAAdaptiveDiverseHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiverseHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDiverseHybridOptimizer").set_name("LLAMAAdaptiveDiverseHybridOptimizer", register=True) except Exception as e: print("AdaptiveDiverseHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversifiedEvolutionStrategy import ( - AdaptiveDiversifiedEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveDiversifiedEvolutionStrategy import AdaptiveDiversifiedEvolutionStrategy lama_register["AdaptiveDiversifiedEvolutionStrategy"] = AdaptiveDiversifiedEvolutionStrategy - LLAMAAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveDiversifiedEvolutionStrategy" - ).set_name("LLAMAAdaptiveDiversifiedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedEvolutionStrategy").set_name("LLAMAAdaptiveDiversifiedEvolutionStrategy", register=True) except Exception as e: print("AdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearch import AdaptiveDiversifiedHarmonySearch lama_register["AdaptiveDiversifiedHarmonySearch"] = AdaptiveDiversifiedHarmonySearch - LLAMAAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveDiversifiedHarmonySearch" - ).set_name("LLAMAAdaptiveDiversifiedHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearch").set_name("LLAMAAdaptiveDiversifiedHarmonySearch", register=True) except Exception as e: print("AdaptiveDiversifiedHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearchOptimizer import ( - AdaptiveDiversifiedHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearchOptimizer import AdaptiveDiversifiedHarmonySearchOptimizer lama_register["AdaptiveDiversifiedHarmonySearchOptimizer"] = AdaptiveDiversifiedHarmonySearchOptimizer - LLAMAAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer" - ).set_name("LLAMAAdaptiveDiversifiedHarmonySearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer").set_name("LLAMAAdaptiveDiversifiedHarmonySearchOptimizer", register=True) except Exception as e: print("AdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDiversifiedSearch import AdaptiveDiversifiedSearch lama_register["AdaptiveDiversifiedSearch"] = AdaptiveDiversifiedSearch - LLAMAAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch").set_name( - "LLAMAAdaptiveDiversifiedSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch").set_name("LLAMAAdaptiveDiversifiedSearch", register=True) except Exception as e: print("AdaptiveDiversifiedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversityDifferentialHybrid import ( - AdaptiveDiversityDifferentialHybrid, - ) + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialHybrid import AdaptiveDiversityDifferentialHybrid lama_register["AdaptiveDiversityDifferentialHybrid"] = AdaptiveDiversityDifferentialHybrid - LLAMAAdaptiveDiversityDifferentialHybrid = NonObjectOptimizer( - method="LLAMAAdaptiveDiversityDifferentialHybrid" - ).set_name("LLAMAAdaptiveDiversityDifferentialHybrid", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversityDifferentialHybrid = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialHybrid").set_name("LLAMAAdaptiveDiversityDifferentialHybrid", register=True) except Exception as e: print("AdaptiveDiversityDifferentialHybrid can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversityDifferentialMemeticHybrid import ( - AdaptiveDiversityDifferentialMemeticHybrid, - ) + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialMemeticHybrid import AdaptiveDiversityDifferentialMemeticHybrid lama_register["AdaptiveDiversityDifferentialMemeticHybrid"] = AdaptiveDiversityDifferentialMemeticHybrid - LLAMAAdaptiveDiversityDifferentialMemeticHybrid = NonObjectOptimizer( - method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid" - ).set_name("LLAMAAdaptiveDiversityDifferentialMemeticHybrid", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversityDifferentialMemeticHybrid = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid").set_name("LLAMAAdaptiveDiversityDifferentialMemeticHybrid", register=True) except Exception as e: print("AdaptiveDiversityDifferentialMemeticHybrid can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversityMaintainedDifferentialEvolution import ( - AdaptiveDiversityMaintainedDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDiversityMaintainedDifferentialEvolution import AdaptiveDiversityMaintainedDifferentialEvolution - lama_register["AdaptiveDiversityMaintainedDifferentialEvolution"] = ( - AdaptiveDiversityMaintainedDifferentialEvolution - ) - LLAMAAdaptiveDiversityMaintainedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution" - ).set_name("LLAMAAdaptiveDiversityMaintainedDifferentialEvolution", register=True) + lama_register["AdaptiveDiversityMaintainedDifferentialEvolution"] = AdaptiveDiversityMaintainedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversityMaintainedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution").set_name("LLAMAAdaptiveDiversityMaintainedDifferentialEvolution", register=True) except Exception as e: print("AdaptiveDiversityMaintainedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDiversityMaintainingGradientEvolution import ( - AdaptiveDiversityMaintainingGradientEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDiversityMaintainingGradientEvolution import AdaptiveDiversityMaintainingGradientEvolution - lama_register["AdaptiveDiversityMaintainingGradientEvolution"] = ( - AdaptiveDiversityMaintainingGradientEvolution - ) - LLAMAAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDiversityMaintainingGradientEvolution" - ).set_name("LLAMAAdaptiveDiversityMaintainingGradientEvolution", register=True) + lama_register["AdaptiveDiversityMaintainingGradientEvolution"] = AdaptiveDiversityMaintainingGradientEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainingGradientEvolution").set_name("LLAMAAdaptiveDiversityMaintainingGradientEvolution", register=True) except Exception as e: print("AdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDiversityPSO import AdaptiveDiversityPSO lama_register["AdaptiveDiversityPSO"] = AdaptiveDiversityPSO - LLAMAAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO").set_name( - "LLAMAAdaptiveDiversityPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO").set_name("LLAMAAdaptiveDiversityPSO", register=True) except Exception as e: print("AdaptiveDiversityPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDolphinPodOptimization import AdaptiveDolphinPodOptimization lama_register["AdaptiveDolphinPodOptimization"] = AdaptiveDolphinPodOptimization - LLAMAAdaptiveDolphinPodOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveDolphinPodOptimization" - ).set_name("LLAMAAdaptiveDolphinPodOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDolphinPodOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDolphinPodOptimization").set_name("LLAMAAdaptiveDolphinPodOptimization", register=True) except Exception as e: print("AdaptiveDolphinPodOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDualPhaseDifferentialEvolution import ( - AdaptiveDualPhaseDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDualPhaseDifferentialEvolution import AdaptiveDualPhaseDifferentialEvolution lama_register["AdaptiveDualPhaseDifferentialEvolution"] = AdaptiveDualPhaseDifferentialEvolution - LLAMAAdaptiveDualPhaseDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDualPhaseDifferentialEvolution" - ).set_name("LLAMAAdaptiveDualPhaseDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseDifferentialEvolution").set_name("LLAMAAdaptiveDualPhaseDifferentialEvolution", register=True) except Exception as e: print("AdaptiveDualPhaseDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDualPhaseEvolutionarySwarmOptimization import ( - AdaptiveDualPhaseEvolutionarySwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveDualPhaseEvolutionarySwarmOptimization import AdaptiveDualPhaseEvolutionarySwarmOptimization - lama_register["AdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( - AdaptiveDualPhaseEvolutionarySwarmOptimization - ) - LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization" - ).set_name("LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) + lama_register["AdaptiveDualPhaseEvolutionarySwarmOptimization"] = AdaptiveDualPhaseEvolutionarySwarmOptimization + res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization").set_name("LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) except Exception as e: print("AdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( - AdaptiveDualPhaseOptimizationWithDynamicParameterControl, - ) + from nevergrad.optimization.lama.AdaptiveDualPhaseOptimizationWithDynamicParameterControl import AdaptiveDualPhaseOptimizationWithDynamicParameterControl - lama_register["AdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( - AdaptiveDualPhaseOptimizationWithDynamicParameterControl - ) - LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( - method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl" - ).set_name("LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) + lama_register["AdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = AdaptiveDualPhaseOptimizationWithDynamicParameterControl + res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl").set_name("LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) except Exception as e: print("AdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDualPhaseStrategy import AdaptiveDualPhaseStrategy lama_register["AdaptiveDualPhaseStrategy"] = AdaptiveDualPhaseStrategy - LLAMAAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy").set_name( - "LLAMAAdaptiveDualPhaseStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy").set_name("LLAMAAdaptiveDualPhaseStrategy", register=True) except Exception as e: print("AdaptiveDualPhaseStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDualPopulationDE_LS import AdaptiveDualPopulationDE_LS lama_register["AdaptiveDualPopulationDE_LS"] = AdaptiveDualPopulationDE_LS - LLAMAAdaptiveDualPopulationDE_LS = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS").set_name( - "LLAMAAdaptiveDualPopulationDE_LS", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualPopulationDE_LS = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS").set_name("LLAMAAdaptiveDualPopulationDE_LS", register=True) except Exception as e: print("AdaptiveDualPopulationDE_LS can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDualStrategyOptimizer import AdaptiveDualStrategyOptimizer lama_register["AdaptiveDualStrategyOptimizer"] = AdaptiveDualStrategyOptimizer - LLAMAAdaptiveDualStrategyOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveDualStrategyOptimizer" - ).set_name("LLAMAAdaptiveDualStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDualStrategyOptimizer").set_name("LLAMAAdaptiveDualStrategyOptimizer", register=True) except Exception as e: print("AdaptiveDualStrategyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDynamicDE import AdaptiveDynamicDE lama_register["AdaptiveDynamicDE"] = AdaptiveDynamicDE - LLAMAAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE").set_name( - "LLAMAAdaptiveDynamicDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE").set_name("LLAMAAdaptiveDynamicDE", register=True) except Exception as e: print("AdaptiveDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicDifferentialEvolution import ( - AdaptiveDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDynamicDifferentialEvolution import AdaptiveDynamicDifferentialEvolution lama_register["AdaptiveDynamicDifferentialEvolution"] = AdaptiveDynamicDifferentialEvolution - LLAMAAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicDifferentialEvolution" - ).set_name("LLAMAAdaptiveDynamicDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDifferentialEvolution").set_name("LLAMAAdaptiveDynamicDifferentialEvolution", register=True) except Exception as e: print("AdaptiveDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseEnhancedStrategyV20 import ( - AdaptiveDynamicDualPhaseEnhancedStrategyV20, - ) + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseEnhancedStrategyV20 import AdaptiveDynamicDualPhaseEnhancedStrategyV20 lama_register["AdaptiveDynamicDualPhaseEnhancedStrategyV20"] = AdaptiveDynamicDualPhaseEnhancedStrategyV20 - LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20" - ).set_name("LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20").set_name("LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20", register=True) except Exception as e: print("AdaptiveDynamicDualPhaseEnhancedStrategyV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseStrategyV11 import ( - AdaptiveDynamicDualPhaseStrategyV11, - ) + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseStrategyV11 import AdaptiveDynamicDualPhaseStrategyV11 lama_register["AdaptiveDynamicDualPhaseStrategyV11"] = AdaptiveDynamicDualPhaseStrategyV11 - LLAMAAdaptiveDynamicDualPhaseStrategyV11 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicDualPhaseStrategyV11" - ).set_name("LLAMAAdaptiveDynamicDualPhaseStrategyV11", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicDualPhaseStrategyV11 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseStrategyV11").set_name("LLAMAAdaptiveDynamicDualPhaseStrategyV11", register=True) except Exception as e: print("AdaptiveDynamicDualPhaseStrategyV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDynamicEvolutionStrategy import AdaptiveDynamicEvolutionStrategy lama_register["AdaptiveDynamicEvolutionStrategy"] = AdaptiveDynamicEvolutionStrategy - LLAMAAdaptiveDynamicEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicEvolutionStrategy" - ).set_name("LLAMAAdaptiveDynamicEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDynamicEvolutionStrategy").set_name("LLAMAAdaptiveDynamicEvolutionStrategy", register=True) except Exception as e: print("AdaptiveDynamicEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithm import ( - AdaptiveDynamicExplorationExploitationAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithm import AdaptiveDynamicExplorationExploitationAlgorithm - lama_register["AdaptiveDynamicExplorationExploitationAlgorithm"] = ( - AdaptiveDynamicExplorationExploitationAlgorithm - ) - LLAMAAdaptiveDynamicExplorationExploitationAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm" - ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithm", register=True) + lama_register["AdaptiveDynamicExplorationExploitationAlgorithm"] = AdaptiveDynamicExplorationExploitationAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithm", register=True) except Exception as e: print("AdaptiveDynamicExplorationExploitationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV2 import ( - AdaptiveDynamicExplorationExploitationAlgorithmV2, - ) + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV2 import AdaptiveDynamicExplorationExploitationAlgorithmV2 - lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV2"] = ( - AdaptiveDynamicExplorationExploitationAlgorithmV2 - ) - LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2" - ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2", register=True) + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV2"] = AdaptiveDynamicExplorationExploitationAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2", register=True) except Exception as e: print("AdaptiveDynamicExplorationExploitationAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV3 import ( - AdaptiveDynamicExplorationExploitationAlgorithmV3, - ) + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV3 import AdaptiveDynamicExplorationExploitationAlgorithmV3 - lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV3"] = ( - AdaptiveDynamicExplorationExploitationAlgorithmV3 - ) - LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3" - ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3", register=True) + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV3"] = AdaptiveDynamicExplorationExploitationAlgorithmV3 + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3", register=True) except Exception as e: print("AdaptiveDynamicExplorationExploitationAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationOptimization import ( - AdaptiveDynamicExplorationOptimization, - ) + from nevergrad.optimization.lama.AdaptiveDynamicExplorationOptimization import AdaptiveDynamicExplorationOptimization lama_register["AdaptiveDynamicExplorationOptimization"] = AdaptiveDynamicExplorationOptimization - LLAMAAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicExplorationOptimization" - ).set_name("LLAMAAdaptiveDynamicExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationOptimization").set_name("LLAMAAdaptiveDynamicExplorationOptimization", register=True) except Exception as e: print("AdaptiveDynamicExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithm import AdaptiveDynamicFireworkAlgorithm lama_register["AdaptiveDynamicFireworkAlgorithm"] = AdaptiveDynamicFireworkAlgorithm - LLAMAAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicFireworkAlgorithm" - ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithm").set_name("LLAMAAdaptiveDynamicFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithmRedesigned import ( - AdaptiveDynamicFireworkAlgorithmRedesigned, - ) + from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithmRedesigned import AdaptiveDynamicFireworkAlgorithmRedesigned lama_register["AdaptiveDynamicFireworkAlgorithmRedesigned"] = AdaptiveDynamicFireworkAlgorithmRedesigned - LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned" - ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned").set_name("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned", register=True) except Exception as e: print("AdaptiveDynamicFireworkAlgorithmRedesigned can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicFireworkDifferentialEvolutionV4 import ( - AdaptiveDynamicFireworkDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.AdaptiveDynamicFireworkDifferentialEvolutionV4 import AdaptiveDynamicFireworkDifferentialEvolutionV4 - lama_register["AdaptiveDynamicFireworkDifferentialEvolutionV4"] = ( - AdaptiveDynamicFireworkDifferentialEvolutionV4 - ) - LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4" - ).set_name("LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4", register=True) + lama_register["AdaptiveDynamicFireworkDifferentialEvolutionV4"] = AdaptiveDynamicFireworkDifferentialEvolutionV4 + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4").set_name("LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4", register=True) except Exception as e: print("AdaptiveDynamicFireworkDifferentialEvolutionV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDynamicHarmonySearch import AdaptiveDynamicHarmonySearch lama_register["AdaptiveDynamicHarmonySearch"] = AdaptiveDynamicHarmonySearch - LLAMAAdaptiveDynamicHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicHarmonySearch" - ).set_name("LLAMAAdaptiveDynamicHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHarmonySearch").set_name("LLAMAAdaptiveDynamicHarmonySearch", register=True) except Exception as e: print("AdaptiveDynamicHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizationV2 import ( - AdaptiveDynamicHybridOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizationV2 import AdaptiveDynamicHybridOptimizationV2 lama_register["AdaptiveDynamicHybridOptimizationV2"] = AdaptiveDynamicHybridOptimizationV2 - LLAMAAdaptiveDynamicHybridOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicHybridOptimizationV2" - ).set_name("LLAMAAdaptiveDynamicHybridOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizationV2").set_name("LLAMAAdaptiveDynamicHybridOptimizationV2", register=True) except Exception as e: print("AdaptiveDynamicHybridOptimizationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizer import AdaptiveDynamicHybridOptimizer lama_register["AdaptiveDynamicHybridOptimizer"] = AdaptiveDynamicHybridOptimizer - LLAMAAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicHybridOptimizer" - ).set_name("LLAMAAdaptiveDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizer").set_name("LLAMAAdaptiveDynamicHybridOptimizer", register=True) except Exception as e: print("AdaptiveDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicMemeticEvolutionaryAlgorithm import ( - AdaptiveDynamicMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveDynamicMemeticEvolutionaryAlgorithm import AdaptiveDynamicMemeticEvolutionaryAlgorithm lama_register["AdaptiveDynamicMemeticEvolutionaryAlgorithm"] = AdaptiveDynamicMemeticEvolutionaryAlgorithm - LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm" - ).set_name("LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("AdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicMultiStrategyDifferentialEvolution import ( - AdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveDynamicMultiStrategyDifferentialEvolution import AdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["AdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - AdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["AdaptiveDynamicMultiStrategyDifferentialEvolution"] = AdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("AdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveDynamicQuantumSwarmOptimization import ( - AdaptiveDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveDynamicQuantumSwarmOptimization import AdaptiveDynamicQuantumSwarmOptimization lama_register["AdaptiveDynamicQuantumSwarmOptimization"] = AdaptiveDynamicQuantumSwarmOptimization - LLAMAAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveDynamicQuantumSwarmOptimization" - ).set_name("LLAMAAdaptiveDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDynamicQuantumSwarmOptimization").set_name("LLAMAAdaptiveDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("AdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEliteCovarianceMatrixMemeticSearch import ( - AdaptiveEliteCovarianceMatrixMemeticSearch, - ) + from nevergrad.optimization.lama.AdaptiveEliteCovarianceMatrixMemeticSearch import AdaptiveEliteCovarianceMatrixMemeticSearch lama_register["AdaptiveEliteCovarianceMatrixMemeticSearch"] = AdaptiveEliteCovarianceMatrixMemeticSearch - LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch" - ).set_name("LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch").set_name("LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch", register=True) except Exception as e: print("AdaptiveEliteCovarianceMatrixMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEliteDifferentialEvolution import ( - AdaptiveEliteDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveEliteDifferentialEvolution import AdaptiveEliteDifferentialEvolution lama_register["AdaptiveEliteDifferentialEvolution"] = AdaptiveEliteDifferentialEvolution - LLAMAAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveEliteDifferentialEvolution" - ).set_name("LLAMAAdaptiveEliteDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteDifferentialEvolution").set_name("LLAMAAdaptiveEliteDifferentialEvolution", register=True) except Exception as e: print("AdaptiveEliteDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEliteDiverseHybridOptimizer import ( - AdaptiveEliteDiverseHybridOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveEliteDiverseHybridOptimizer import AdaptiveEliteDiverseHybridOptimizer lama_register["AdaptiveEliteDiverseHybridOptimizer"] = AdaptiveEliteDiverseHybridOptimizer - LLAMAAdaptiveEliteDiverseHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveEliteDiverseHybridOptimizer" - ).set_name("LLAMAAdaptiveEliteDiverseHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteDiverseHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteDiverseHybridOptimizer").set_name("LLAMAAdaptiveEliteDiverseHybridOptimizer", register=True) except Exception as e: print("AdaptiveEliteDiverseHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_LS_v2 import AdaptiveEliteGuidedDE_LS_v2 lama_register["AdaptiveEliteGuidedDE_LS_v2"] = AdaptiveEliteGuidedDE_LS_v2 - LLAMAAdaptiveEliteGuidedDE_LS_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2").set_name( - "LLAMAAdaptiveEliteGuidedDE_LS_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedDE_LS_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2").set_name("LLAMAAdaptiveEliteGuidedDE_LS_v2", register=True) except Exception as e: print("AdaptiveEliteGuidedDE_LS_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_v2 import AdaptiveEliteGuidedDE_v2 lama_register["AdaptiveEliteGuidedDE_v2"] = AdaptiveEliteGuidedDE_v2 - LLAMAAdaptiveEliteGuidedDE_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2").set_name( - "LLAMAAdaptiveEliteGuidedDE_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedDE_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2").set_name("LLAMAAdaptiveEliteGuidedDE_v2", register=True) except Exception as e: print("AdaptiveEliteGuidedDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE import AdaptiveEliteGuidedMutationDE lama_register["AdaptiveEliteGuidedMutationDE"] = AdaptiveEliteGuidedMutationDE - LLAMAAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( - method="LLAMAAdaptiveEliteGuidedMutationDE" - ).set_name("LLAMAAdaptiveEliteGuidedMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE").set_name("LLAMAAdaptiveEliteGuidedMutationDE", register=True) except Exception as e: print("AdaptiveEliteGuidedMutationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v3 import AdaptiveEliteGuidedMutationDE_v3 lama_register["AdaptiveEliteGuidedMutationDE_v3"] = AdaptiveEliteGuidedMutationDE_v3 - LLAMAAdaptiveEliteGuidedMutationDE_v3 = NonObjectOptimizer( - method="LLAMAAdaptiveEliteGuidedMutationDE_v3" - ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v3").set_name("LLAMAAdaptiveEliteGuidedMutationDE_v3", register=True) except Exception as e: print("AdaptiveEliteGuidedMutationDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v4 import AdaptiveEliteGuidedMutationDE_v4 lama_register["AdaptiveEliteGuidedMutationDE_v4"] = AdaptiveEliteGuidedMutationDE_v4 - LLAMAAdaptiveEliteGuidedMutationDE_v4 = NonObjectOptimizer( - method="LLAMAAdaptiveEliteGuidedMutationDE_v4" - ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v4", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedMutationDE_v4 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v4").set_name("LLAMAAdaptiveEliteGuidedMutationDE_v4", register=True) except Exception as e: print("AdaptiveEliteGuidedMutationDE_v4 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteGuidedRestartDE import AdaptiveEliteGuidedRestartDE lama_register["AdaptiveEliteGuidedRestartDE"] = AdaptiveEliteGuidedRestartDE - LLAMAAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( - method="LLAMAAdaptiveEliteGuidedRestartDE" - ).set_name("LLAMAAdaptiveEliteGuidedRestartDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteGuidedRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedRestartDE").set_name("LLAMAAdaptiveEliteGuidedRestartDE", register=True) except Exception as e: print("AdaptiveEliteGuidedRestartDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteHybridOptimizer import AdaptiveEliteHybridOptimizer lama_register["AdaptiveEliteHybridOptimizer"] = AdaptiveEliteHybridOptimizer - LLAMAAdaptiveEliteHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveEliteHybridOptimizer" - ).set_name("LLAMAAdaptiveEliteHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteHybridOptimizer").set_name("LLAMAAdaptiveEliteHybridOptimizer", register=True) except Exception as e: print("AdaptiveEliteHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEliteMemeticDifferentialEvolution import ( - AdaptiveEliteMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveEliteMemeticDifferentialEvolution import AdaptiveEliteMemeticDifferentialEvolution lama_register["AdaptiveEliteMemeticDifferentialEvolution"] = AdaptiveEliteMemeticDifferentialEvolution - LLAMAAdaptiveEliteMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveEliteMemeticDifferentialEvolution" - ).set_name("LLAMAAdaptiveEliteMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticDifferentialEvolution").set_name("LLAMAAdaptiveEliteMemeticDifferentialEvolution", register=True) except Exception as e: print("AdaptiveEliteMemeticDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizer import AdaptiveEliteMemeticOptimizer lama_register["AdaptiveEliteMemeticOptimizer"] = AdaptiveEliteMemeticOptimizer - LLAMAAdaptiveEliteMemeticOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveEliteMemeticOptimizer" - ).set_name("LLAMAAdaptiveEliteMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizer").set_name("LLAMAAdaptiveEliteMemeticOptimizer", register=True) except Exception as e: print("AdaptiveEliteMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV5 import AdaptiveEliteMemeticOptimizerV5 lama_register["AdaptiveEliteMemeticOptimizerV5"] = AdaptiveEliteMemeticOptimizerV5 - LLAMAAdaptiveEliteMemeticOptimizerV5 = NonObjectOptimizer( - method="LLAMAAdaptiveEliteMemeticOptimizerV5" - ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteMemeticOptimizerV5 = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV5").set_name("LLAMAAdaptiveEliteMemeticOptimizerV5", register=True) except Exception as e: print("AdaptiveEliteMemeticOptimizerV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV6 import AdaptiveEliteMemeticOptimizerV6 lama_register["AdaptiveEliteMemeticOptimizerV6"] = AdaptiveEliteMemeticOptimizerV6 - LLAMAAdaptiveEliteMemeticOptimizerV6 = NonObjectOptimizer( - method="LLAMAAdaptiveEliteMemeticOptimizerV6" - ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteMemeticOptimizerV6 = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV6").set_name("LLAMAAdaptiveEliteMemeticOptimizerV6", register=True) except Exception as e: print("AdaptiveEliteMemeticOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEliteMultiStrategyDifferentialEvolution import ( - AdaptiveEliteMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveEliteMultiStrategyDifferentialEvolution import AdaptiveEliteMultiStrategyDifferentialEvolution - lama_register["AdaptiveEliteMultiStrategyDifferentialEvolution"] = ( - AdaptiveEliteMultiStrategyDifferentialEvolution - ) - LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution" - ).set_name("LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) + lama_register["AdaptiveEliteMultiStrategyDifferentialEvolution"] = AdaptiveEliteMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("AdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveElitistDE import AdaptiveElitistDE lama_register["AdaptiveElitistDE"] = AdaptiveElitistDE - LLAMAAdaptiveElitistDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE").set_name( - "LLAMAAdaptiveElitistDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveElitistDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE").set_name("LLAMAAdaptiveElitistDE", register=True) except Exception as e: print("AdaptiveElitistDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveElitistDE_v3 import AdaptiveElitistDE_v3 lama_register["AdaptiveElitistDE_v3"] = AdaptiveElitistDE_v3 - LLAMAAdaptiveElitistDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3").set_name( - "LLAMAAdaptiveElitistDE_v3", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveElitistDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3").set_name("LLAMAAdaptiveElitistDE_v3", register=True) except Exception as e: print("AdaptiveElitistDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveElitistMutationDE import AdaptiveElitistMutationDE lama_register["AdaptiveElitistMutationDE"] = AdaptiveElitistMutationDE - LLAMAAdaptiveElitistMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE").set_name( - "LLAMAAdaptiveElitistMutationDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveElitistMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE").set_name("LLAMAAdaptiveElitistMutationDE", register=True) except Exception as e: print("AdaptiveElitistMutationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveElitistPopulationStrategy import ( - AdaptiveElitistPopulationStrategy, - ) + from nevergrad.optimization.lama.AdaptiveElitistPopulationStrategy import AdaptiveElitistPopulationStrategy lama_register["AdaptiveElitistPopulationStrategy"] = AdaptiveElitistPopulationStrategy - LLAMAAdaptiveElitistPopulationStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveElitistPopulationStrategy" - ).set_name("LLAMAAdaptiveElitistPopulationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveElitistPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveElitistPopulationStrategy = NonObjectOptimizer(method="LLAMAAdaptiveElitistPopulationStrategy").set_name("LLAMAAdaptiveElitistPopulationStrategy", register=True) except Exception as e: print("AdaptiveElitistPopulationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveElitistQuasiRandomDEGradientAnnealing import ( - AdaptiveElitistQuasiRandomDEGradientAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveElitistQuasiRandomDEGradientAnnealing import AdaptiveElitistQuasiRandomDEGradientAnnealing - lama_register["AdaptiveElitistQuasiRandomDEGradientAnnealing"] = ( - AdaptiveElitistQuasiRandomDEGradientAnnealing - ) - LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing" - ).set_name("LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing", register=True) + lama_register["AdaptiveElitistQuasiRandomDEGradientAnnealing"] = AdaptiveElitistQuasiRandomDEGradientAnnealing + res = NonObjectOptimizer(method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing").set_name("LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing", register=True) except Exception as e: print("AdaptiveElitistQuasiRandomDEGradientAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm import ( - AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm import AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm - lama_register["AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm"] = ( - AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm - ) - LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm" - ).set_name("LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) + lama_register["AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm"] = AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch import ( - AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch import AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch - lama_register["AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( - AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch - ) - LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch" - ).set_name("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) + lama_register["AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch"] = AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch").set_name("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) except Exception as e: print("AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch import ( - AdaptiveEnhancedEvolutionaryFireworksSearch, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch import AdaptiveEnhancedEvolutionaryFireworksSearch lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch"] = AdaptiveEnhancedEvolutionaryFireworksSearch - LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch" - ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch").set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch", register=True) except Exception as e: print("AdaptiveEnhancedEvolutionaryFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch_v2 import ( - AdaptiveEnhancedEvolutionaryFireworksSearch_v2, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch_v2 import AdaptiveEnhancedEvolutionaryFireworksSearch_v2 - lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch_v2"] = ( - AdaptiveEnhancedEvolutionaryFireworksSearch_v2 - ) - LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2" - ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2", register=True) + lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch_v2"] = AdaptiveEnhancedEvolutionaryFireworksSearch_v2 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2").set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2", register=True) except Exception as e: print("AdaptiveEnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedExplorationGravitationalSwarmOptimization import ( - AdaptiveEnhancedExplorationGravitationalSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedExplorationGravitationalSwarmOptimization import AdaptiveEnhancedExplorationGravitationalSwarmOptimization - lama_register["AdaptiveEnhancedExplorationGravitationalSwarmOptimization"] = ( - AdaptiveEnhancedExplorationGravitationalSwarmOptimization - ) - LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization" - ).set_name("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization", register=True) + lama_register["AdaptiveEnhancedExplorationGravitationalSwarmOptimization"] = AdaptiveEnhancedExplorationGravitationalSwarmOptimization + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization").set_name("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization", register=True) except Exception as e: print("AdaptiveEnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithm import ( - AdaptiveEnhancedFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithm import AdaptiveEnhancedFireworkAlgorithm lama_register["AdaptiveEnhancedFireworkAlgorithm"] = AdaptiveEnhancedFireworkAlgorithm - LLAMAAdaptiveEnhancedFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedFireworkAlgorithm" - ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveEnhancedFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( - AdaptiveEnhancedFireworkAlgorithmWithLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithmWithLocalSearch import AdaptiveEnhancedFireworkAlgorithmWithLocalSearch - lama_register["AdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( - AdaptiveEnhancedFireworkAlgorithmWithLocalSearch - ) - LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" - ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) + lama_register["AdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = AdaptiveEnhancedFireworkAlgorithmWithLocalSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) except Exception as e: print("AdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGradientGuidedHybridPSO import ( - AdaptiveEnhancedGradientGuidedHybridPSO, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGradientGuidedHybridPSO import AdaptiveEnhancedGradientGuidedHybridPSO lama_register["AdaptiveEnhancedGradientGuidedHybridPSO"] = AdaptiveEnhancedGradientGuidedHybridPSO - LLAMAAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO" - ).set_name("LLAMAAdaptiveEnhancedGradientGuidedHybridPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMAAdaptiveEnhancedGradientGuidedHybridPSO", register=True) except Exception as e: print("AdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligence import ( - AdaptiveEnhancedGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligence import AdaptiveEnhancedGravitationalSwarmIntelligence - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligence"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligence - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligence"] = AdaptiveEnhancedGravitationalSwarmIntelligence + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV18 import ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV18, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV18 import AdaptiveEnhancedGravitationalSwarmIntelligenceV18 - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV18"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV18 - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV18"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV18 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV2 import ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV2, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV2 import AdaptiveEnhancedGravitationalSwarmIntelligenceV2 - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV2"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV2 - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV2"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV22 import ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV22, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV22 import AdaptiveEnhancedGravitationalSwarmIntelligenceV22 - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV22"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV22 - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV22"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV22 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV29 import ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV29, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV29 import AdaptiveEnhancedGravitationalSwarmIntelligenceV29 - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV29"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV29 - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV29"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV29 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligenceV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV33 import ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV33, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV33 import AdaptiveEnhancedGravitationalSwarmIntelligenceV33 - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV33"] = ( - AdaptiveEnhancedGravitationalSwarmIntelligenceV33 - ) - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33" - ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33", register=True) + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV33"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV33 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33", register=True) except Exception as e: print("AdaptiveEnhancedGravitationalSwarmIntelligenceV33 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonicFireworkAlgorithm import ( - AdaptiveEnhancedHarmonicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonicFireworkAlgorithm import AdaptiveEnhancedHarmonicFireworkAlgorithm lama_register["AdaptiveEnhancedHarmonicFireworkAlgorithm"] = AdaptiveEnhancedHarmonicFireworkAlgorithm - LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm" - ).set_name("LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveEnhancedHarmonicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch import ( - AdaptiveEnhancedHarmonyFireworksSearch, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch import AdaptiveEnhancedHarmonyFireworksSearch lama_register["AdaptiveEnhancedHarmonyFireworksSearch"] = AdaptiveEnhancedHarmonyFireworksSearch - LLAMAAdaptiveEnhancedHarmonyFireworksSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch" - ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch").set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch", register=True) except Exception as e: print("AdaptiveEnhancedHarmonyFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch_v2 import ( - AdaptiveEnhancedHarmonyFireworksSearch_v2, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch_v2 import AdaptiveEnhancedHarmonyFireworksSearch_v2 lama_register["AdaptiveEnhancedHarmonyFireworksSearch_v2"] = AdaptiveEnhancedHarmonyFireworksSearch_v2 - LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2" - ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2").set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2", register=True) except Exception as e: print("AdaptiveEnhancedHarmonyFireworksSearch_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration import ( - AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration import AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration - lama_register["AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration"] = ( - AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration - ) - LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration" - ).set_name("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration", register=True) + lama_register["AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration"] = AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration").set_name("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration", register=True) except Exception as e: print("AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedMemeticDifferentialEvolution import ( - AdaptiveEnhancedMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticDifferentialEvolution import AdaptiveEnhancedMemeticDifferentialEvolution - lama_register["AdaptiveEnhancedMemeticDifferentialEvolution"] = ( - AdaptiveEnhancedMemeticDifferentialEvolution - ) - LLAMAAdaptiveEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution" - ).set_name("LLAMAAdaptiveEnhancedMemeticDifferentialEvolution", register=True) + lama_register["AdaptiveEnhancedMemeticDifferentialEvolution"] = AdaptiveEnhancedMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution").set_name("LLAMAAdaptiveEnhancedMemeticDifferentialEvolution", register=True) except Exception as e: print("AdaptiveEnhancedMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 import ( - AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 import AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 - lama_register["AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3"] = ( - AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 - ) - LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3" - ).set_name("LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3", register=True) + lama_register["AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3"] = AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3").set_name("LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3", register=True) except Exception as e: print("AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv10 import AdaptiveEnhancedMetaNetAQAPSOv10 lama_register["AdaptiveEnhancedMetaNetAQAPSOv10"] = AdaptiveEnhancedMetaNetAQAPSOv10 - LLAMAAdaptiveEnhancedMetaNetAQAPSOv10 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10" - ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv10", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMetaNetAQAPSOv10 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10").set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv10", register=True) except Exception as e: print("AdaptiveEnhancedMetaNetAQAPSOv10 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv11 import AdaptiveEnhancedMetaNetAQAPSOv11 lama_register["AdaptiveEnhancedMetaNetAQAPSOv11"] = AdaptiveEnhancedMetaNetAQAPSOv11 - LLAMAAdaptiveEnhancedMetaNetAQAPSOv11 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11" - ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv11", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMetaNetAQAPSOv11 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11").set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv11", register=True) except Exception as e: print("AdaptiveEnhancedMetaNetAQAPSOv11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseDifferentialEvolution import ( - AdaptiveEnhancedMultiPhaseDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseDifferentialEvolution import AdaptiveEnhancedMultiPhaseDifferentialEvolution - lama_register["AdaptiveEnhancedMultiPhaseDifferentialEvolution"] = ( - AdaptiveEnhancedMultiPhaseDifferentialEvolution - ) - LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution" - ).set_name("LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution", register=True) + lama_register["AdaptiveEnhancedMultiPhaseDifferentialEvolution"] = AdaptiveEnhancedMultiPhaseDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution").set_name("LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution", register=True) except Exception as e: print("AdaptiveEnhancedMultiPhaseDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseOptimizationAlgorithm import ( - AdaptiveEnhancedMultiPhaseOptimizationAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseOptimizationAlgorithm import AdaptiveEnhancedMultiPhaseOptimizationAlgorithm - lama_register["AdaptiveEnhancedMultiPhaseOptimizationAlgorithm"] = ( - AdaptiveEnhancedMultiPhaseOptimizationAlgorithm - ) - LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm" - ).set_name("LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm", register=True) + lama_register["AdaptiveEnhancedMultiPhaseOptimizationAlgorithm"] = AdaptiveEnhancedMultiPhaseOptimizationAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm").set_name("LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm", register=True) except Exception as e: print("AdaptiveEnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEnhancedQGSA_v7 import AdaptiveEnhancedQGSA_v7 lama_register["AdaptiveEnhancedQGSA_v7"] = AdaptiveEnhancedQGSA_v7 - LLAMAAdaptiveEnhancedQGSA_v7 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7").set_name( - "LLAMAAdaptiveEnhancedQGSA_v7", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedQGSA_v7 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7").set_name("LLAMAAdaptiveEnhancedQGSA_v7", register=True) except Exception as e: print("AdaptiveEnhancedQGSA_v7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedQuantumHarmonySearch import ( - AdaptiveEnhancedQuantumHarmonySearch, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumHarmonySearch import AdaptiveEnhancedQuantumHarmonySearch lama_register["AdaptiveEnhancedQuantumHarmonySearch"] = AdaptiveEnhancedQuantumHarmonySearch - LLAMAAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedQuantumHarmonySearch" - ).set_name("LLAMAAdaptiveEnhancedQuantumHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumHarmonySearch").set_name("LLAMAAdaptiveEnhancedQuantumHarmonySearch", register=True) except Exception as e: print("AdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedQuantumSimulatedAnnealing import ( - AdaptiveEnhancedQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumSimulatedAnnealing import AdaptiveEnhancedQuantumSimulatedAnnealing lama_register["AdaptiveEnhancedQuantumSimulatedAnnealing"] = AdaptiveEnhancedQuantumSimulatedAnnealing - LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing" - ).set_name("LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveEnhancedQuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 import ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11"] = ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 - ) - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11" - ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11", register=True) + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11", register=True) except Exception as e: print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 import ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14"] = ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 - ) - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14" - ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14", register=True) + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14", register=True) except Exception as e: print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 import ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28, - ) + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28"] = ( - AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 - ) - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 = NonObjectOptimizer( - method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28" - ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28", register=True) + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 + res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28", register=True) except Exception as e: print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveEnsembleMemeticAlgorithm import AdaptiveEnsembleMemeticAlgorithm lama_register["AdaptiveEnsembleMemeticAlgorithm"] = AdaptiveEnsembleMemeticAlgorithm - LLAMAAdaptiveEnsembleMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveEnsembleMemeticAlgorithm" - ).set_name("LLAMAAdaptiveEnsembleMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnsembleMemeticAlgorithm").set_name("LLAMAAdaptiveEnsembleMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveEnsembleMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialOptimization import ( - AdaptiveEvolutionaryDifferentialOptimization, - ) + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialOptimization import AdaptiveEvolutionaryDifferentialOptimization - lama_register["AdaptiveEvolutionaryDifferentialOptimization"] = ( - AdaptiveEvolutionaryDifferentialOptimization - ) - LLAMAAdaptiveEvolutionaryDifferentialOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveEvolutionaryDifferentialOptimization" - ).set_name("LLAMAAdaptiveEvolutionaryDifferentialOptimization", register=True) + lama_register["AdaptiveEvolutionaryDifferentialOptimization"] = AdaptiveEvolutionaryDifferentialOptimization + res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEvolutionaryDifferentialOptimization = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialOptimization").set_name("LLAMAAdaptiveEvolutionaryDifferentialOptimization", register=True) except Exception as e: print("AdaptiveEvolutionaryDifferentialOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialPopulationStrategy import ( - AdaptiveEvolutionaryDifferentialPopulationStrategy, - ) + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialPopulationStrategy import AdaptiveEvolutionaryDifferentialPopulationStrategy - lama_register["AdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( - AdaptiveEvolutionaryDifferentialPopulationStrategy - ) - LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy" - ).set_name("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) + lama_register["AdaptiveEvolutionaryDifferentialPopulationStrategy"] = AdaptiveEvolutionaryDifferentialPopulationStrategy + res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy").set_name("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) except Exception as e: print("AdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryFireworksSearch_v1 import ( - AdaptiveEvolutionaryFireworksSearch_v1, - ) + from nevergrad.optimization.lama.AdaptiveEvolutionaryFireworksSearch_v1 import AdaptiveEvolutionaryFireworksSearch_v1 lama_register["AdaptiveEvolutionaryFireworksSearch_v1"] = AdaptiveEvolutionaryFireworksSearch_v1 - LLAMAAdaptiveEvolutionaryFireworksSearch_v1 = NonObjectOptimizer( - method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1" - ).set_name("LLAMAAdaptiveEvolutionaryFireworksSearch_v1", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEvolutionaryFireworksSearch_v1 = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1").set_name("LLAMAAdaptiveEvolutionaryFireworksSearch_v1", register=True) except Exception as e: print("AdaptiveEvolutionaryFireworksSearch_v1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryGradientSearch import ( - AdaptiveEvolutionaryGradientSearch, - ) + from nevergrad.optimization.lama.AdaptiveEvolutionaryGradientSearch import AdaptiveEvolutionaryGradientSearch lama_register["AdaptiveEvolutionaryGradientSearch"] = AdaptiveEvolutionaryGradientSearch - LLAMAAdaptiveEvolutionaryGradientSearch = NonObjectOptimizer( - method="LLAMAAdaptiveEvolutionaryGradientSearch" - ).set_name("LLAMAAdaptiveEvolutionaryGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryGradientSearch").set_name("LLAMAAdaptiveEvolutionaryGradientSearch", register=True) except Exception as e: print("AdaptiveEvolutionaryGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveExplorationEvolutionStrategy import ( - AdaptiveExplorationEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveExplorationEvolutionStrategy import AdaptiveExplorationEvolutionStrategy lama_register["AdaptiveExplorationEvolutionStrategy"] = AdaptiveExplorationEvolutionStrategy - LLAMAAdaptiveExplorationEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveExplorationEvolutionStrategy" - ).set_name("LLAMAAdaptiveExplorationEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveExplorationEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveExplorationEvolutionStrategy").set_name("LLAMAAdaptiveExplorationEvolutionStrategy", register=True) except Exception as e: print("AdaptiveExplorationEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveExplorationExploitationDifferentialEvolution import ( - AdaptiveExplorationExploitationDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveExplorationExploitationDifferentialEvolution import AdaptiveExplorationExploitationDifferentialEvolution - lama_register["AdaptiveExplorationExploitationDifferentialEvolution"] = ( - AdaptiveExplorationExploitationDifferentialEvolution - ) - LLAMAAdaptiveExplorationExploitationDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution" - ).set_name("LLAMAAdaptiveExplorationExploitationDifferentialEvolution", register=True) + lama_register["AdaptiveExplorationExploitationDifferentialEvolution"] = AdaptiveExplorationExploitationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveExplorationExploitationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution").set_name("LLAMAAdaptiveExplorationExploitationDifferentialEvolution", register=True) except Exception as e: print("AdaptiveExplorationExploitationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveExplorationExploitationHybridAlgorithm import ( - AdaptiveExplorationExploitationHybridAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveExplorationExploitationHybridAlgorithm import AdaptiveExplorationExploitationHybridAlgorithm - lama_register["AdaptiveExplorationExploitationHybridAlgorithm"] = ( - AdaptiveExplorationExploitationHybridAlgorithm - ) - LLAMAAdaptiveExplorationExploitationHybridAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm" - ).set_name("LLAMAAdaptiveExplorationExploitationHybridAlgorithm", register=True) + lama_register["AdaptiveExplorationExploitationHybridAlgorithm"] = AdaptiveExplorationExploitationHybridAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveExplorationExploitationHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm").set_name("LLAMAAdaptiveExplorationExploitationHybridAlgorithm", register=True) except Exception as e: print("AdaptiveExplorationExploitationHybridAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveExploratoryOptimizer import AdaptiveExploratoryOptimizer lama_register["AdaptiveExploratoryOptimizer"] = AdaptiveExploratoryOptimizer - LLAMAAdaptiveExploratoryOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveExploratoryOptimizer" - ).set_name("LLAMAAdaptiveExploratoryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveExploratoryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveExploratoryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveExploratoryOptimizer").set_name("LLAMAAdaptiveExploratoryOptimizer", register=True) except Exception as e: print("AdaptiveExploratoryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveFeedbackControlStrategyV61 import ( - AdaptiveFeedbackControlStrategyV61, - ) + from nevergrad.optimization.lama.AdaptiveFeedbackControlStrategyV61 import AdaptiveFeedbackControlStrategyV61 lama_register["AdaptiveFeedbackControlStrategyV61"] = AdaptiveFeedbackControlStrategyV61 - LLAMAAdaptiveFeedbackControlStrategyV61 = NonObjectOptimizer( - method="LLAMAAdaptiveFeedbackControlStrategyV61" - ).set_name("LLAMAAdaptiveFeedbackControlStrategyV61", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackControlStrategyV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFeedbackControlStrategyV61 = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackControlStrategyV61").set_name("LLAMAAdaptiveFeedbackControlStrategyV61", register=True) except Exception as e: print("AdaptiveFeedbackControlStrategyV61 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveFeedbackEnhancedMemoryStrategyV71 import ( - AdaptiveFeedbackEnhancedMemoryStrategyV71, - ) + from nevergrad.optimization.lama.AdaptiveFeedbackEnhancedMemoryStrategyV71 import AdaptiveFeedbackEnhancedMemoryStrategyV71 lama_register["AdaptiveFeedbackEnhancedMemoryStrategyV71"] = AdaptiveFeedbackEnhancedMemoryStrategyV71 - LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71 = NonObjectOptimizer( - method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71" - ).set_name("LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71 = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71").set_name("LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71", register=True) except Exception as e: print("AdaptiveFeedbackEnhancedMemoryStrategyV71 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmEnhanced import ( - AdaptiveFireworkAlgorithmEnhanced, - ) + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmEnhanced import AdaptiveFireworkAlgorithmEnhanced lama_register["AdaptiveFireworkAlgorithmEnhanced"] = AdaptiveFireworkAlgorithmEnhanced - LLAMAAdaptiveFireworkAlgorithmEnhanced = NonObjectOptimizer( - method="LLAMAAdaptiveFireworkAlgorithmEnhanced" - ).set_name("LLAMAAdaptiveFireworkAlgorithmEnhanced", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFireworkAlgorithmEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmEnhanced").set_name("LLAMAAdaptiveFireworkAlgorithmEnhanced", register=True) except Exception as e: print("AdaptiveFireworkAlgorithmEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmOptimization import ( - AdaptiveFireworkAlgorithmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmOptimization import AdaptiveFireworkAlgorithmOptimization lama_register["AdaptiveFireworkAlgorithmOptimization"] = AdaptiveFireworkAlgorithmOptimization - LLAMAAdaptiveFireworkAlgorithmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveFireworkAlgorithmOptimization" - ).set_name("LLAMAAdaptiveFireworkAlgorithmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmOptimization").set_name("LLAMAAdaptiveFireworkAlgorithmOptimization", register=True) except Exception as e: print("AdaptiveFireworkAlgorithmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveFireworksEnhancedHarmonySearch import ( - AdaptiveFireworksEnhancedHarmonySearch, - ) + from nevergrad.optimization.lama.AdaptiveFireworksEnhancedHarmonySearch import AdaptiveFireworksEnhancedHarmonySearch lama_register["AdaptiveFireworksEnhancedHarmonySearch"] = AdaptiveFireworksEnhancedHarmonySearch - LLAMAAdaptiveFireworksEnhancedHarmonySearch = NonObjectOptimizer( - method="LLAMAAdaptiveFireworksEnhancedHarmonySearch" - ).set_name("LLAMAAdaptiveFireworksEnhancedHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFireworksEnhancedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFireworksEnhancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveFireworksEnhancedHarmonySearch").set_name("LLAMAAdaptiveFireworksEnhancedHarmonySearch", register=True) except Exception as e: print("AdaptiveFireworksEnhancedHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveFocusedEvolutionStrategy import AdaptiveFocusedEvolutionStrategy lama_register["AdaptiveFocusedEvolutionStrategy"] = AdaptiveFocusedEvolutionStrategy - LLAMAAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveFocusedEvolutionStrategy" - ).set_name("LLAMAAdaptiveFocusedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveFocusedEvolutionStrategy").set_name("LLAMAAdaptiveFocusedEvolutionStrategy", register=True) except Exception as e: print("AdaptiveFocusedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveFuzzyDynamicDE import AdaptiveFuzzyDynamicDE lama_register["AdaptiveFuzzyDynamicDE"] = AdaptiveFuzzyDynamicDE - LLAMAAdaptiveFuzzyDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE").set_name( - "LLAMAAdaptiveFuzzyDynamicDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveFuzzyDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE").set_name("LLAMAAdaptiveFuzzyDynamicDE", register=True) except Exception as e: print("AdaptiveFuzzyDynamicDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGaussianSearch import AdaptiveGaussianSearch lama_register["AdaptiveGaussianSearch"] = AdaptiveGaussianSearch - LLAMAAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch").set_name( - "LLAMAAdaptiveGaussianSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch").set_name("LLAMAAdaptiveGaussianSearch", register=True) except Exception as e: print("AdaptiveGaussianSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGlobalLocalSearchStrategyV62 import ( - AdaptiveGlobalLocalSearchStrategyV62, - ) + from nevergrad.optimization.lama.AdaptiveGlobalLocalSearchStrategyV62 import AdaptiveGlobalLocalSearchStrategyV62 lama_register["AdaptiveGlobalLocalSearchStrategyV62"] = AdaptiveGlobalLocalSearchStrategyV62 - LLAMAAdaptiveGlobalLocalSearchStrategyV62 = NonObjectOptimizer( - method="LLAMAAdaptiveGlobalLocalSearchStrategyV62" - ).set_name("LLAMAAdaptiveGlobalLocalSearchStrategyV62", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGlobalLocalSearchStrategyV62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGlobalLocalSearchStrategyV62 = NonObjectOptimizer(method="LLAMAAdaptiveGlobalLocalSearchStrategyV62").set_name("LLAMAAdaptiveGlobalLocalSearchStrategyV62", register=True) except Exception as e: print("AdaptiveGlobalLocalSearchStrategyV62 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientAssistedEvolution import ( - AdaptiveGradientAssistedEvolution, - ) + from nevergrad.optimization.lama.AdaptiveGradientAssistedEvolution import AdaptiveGradientAssistedEvolution lama_register["AdaptiveGradientAssistedEvolution"] = AdaptiveGradientAssistedEvolution - LLAMAAdaptiveGradientAssistedEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveGradientAssistedEvolution" - ).set_name("LLAMAAdaptiveGradientAssistedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientAssistedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientAssistedEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientAssistedEvolution").set_name("LLAMAAdaptiveGradientAssistedEvolution", register=True) except Exception as e: print("AdaptiveGradientAssistedEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBalancedCrossoverPSO import ( - AdaptiveGradientBalancedCrossoverPSO, - ) + from nevergrad.optimization.lama.AdaptiveGradientBalancedCrossoverPSO import AdaptiveGradientBalancedCrossoverPSO lama_register["AdaptiveGradientBalancedCrossoverPSO"] = AdaptiveGradientBalancedCrossoverPSO - LLAMAAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBalancedCrossoverPSO" - ).set_name("LLAMAAdaptiveGradientBalancedCrossoverPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMAAdaptiveGradientBalancedCrossoverPSO", register=True) except Exception as e: print("AdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBalancedEvolutionStrategy import ( - AdaptiveGradientBalancedEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptiveGradientBalancedEvolutionStrategy import AdaptiveGradientBalancedEvolutionStrategy lama_register["AdaptiveGradientBalancedEvolutionStrategy"] = AdaptiveGradientBalancedEvolutionStrategy - LLAMAAdaptiveGradientBalancedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBalancedEvolutionStrategy" - ).set_name("LLAMAAdaptiveGradientBalancedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedEvolutionStrategy").set_name("LLAMAAdaptiveGradientBalancedEvolutionStrategy", register=True) except Exception as e: print("AdaptiveGradientBalancedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingPlus import ( - AdaptiveGradientBoostedMemoryAnnealingPlus, - ) + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingPlus import AdaptiveGradientBoostedMemoryAnnealingPlus lama_register["AdaptiveGradientBoostedMemoryAnnealingPlus"] = AdaptiveGradientBoostedMemoryAnnealingPlus - LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus" - ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus").set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus", register=True) except Exception as e: print("AdaptiveGradientBoostedMemoryAnnealingPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl import ( - AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl, - ) + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl import AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl - lama_register["AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl"] = ( - AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl - ) - LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl" - ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl", register=True) + lama_register["AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl"] = AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl").set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl", register=True) except Exception as e: print("AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryExploration import ( - AdaptiveGradientBoostedMemoryExploration, - ) + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryExploration import AdaptiveGradientBoostedMemoryExploration lama_register["AdaptiveGradientBoostedMemoryExploration"] = AdaptiveGradientBoostedMemoryExploration - LLAMAAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBoostedMemoryExploration" - ).set_name("LLAMAAdaptiveGradientBoostedMemoryExploration", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryExploration").set_name("LLAMAAdaptiveGradientBoostedMemoryExploration", register=True) except Exception as e: print("AdaptiveGradientBoostedMemoryExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemorySimulatedAnnealing import ( - AdaptiveGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemorySimulatedAnnealing import AdaptiveGradientBoostedMemorySimulatedAnnealing - lama_register["AdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( - AdaptiveGradientBoostedMemorySimulatedAnnealing - ) - LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["AdaptiveGradientBoostedMemorySimulatedAnnealing"] = AdaptiveGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("AdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientClusteringEvolution import ( - AdaptiveGradientClusteringEvolution, - ) + from nevergrad.optimization.lama.AdaptiveGradientClusteringEvolution import AdaptiveGradientClusteringEvolution lama_register["AdaptiveGradientClusteringEvolution"] = AdaptiveGradientClusteringEvolution - LLAMAAdaptiveGradientClusteringEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveGradientClusteringEvolution" - ).set_name("LLAMAAdaptiveGradientClusteringEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientClusteringEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientClusteringEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientClusteringEvolution").set_name("LLAMAAdaptiveGradientClusteringEvolution", register=True) except Exception as e: print("AdaptiveGradientClusteringEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientCrossoverOptimizer import ( - AdaptiveGradientCrossoverOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveGradientCrossoverOptimizer import AdaptiveGradientCrossoverOptimizer lama_register["AdaptiveGradientCrossoverOptimizer"] = AdaptiveGradientCrossoverOptimizer - LLAMAAdaptiveGradientCrossoverOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveGradientCrossoverOptimizer" - ).set_name("LLAMAAdaptiveGradientCrossoverOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGradientCrossoverOptimizer").set_name("LLAMAAdaptiveGradientCrossoverOptimizer", register=True) except Exception as e: print("AdaptiveGradientCrossoverOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolution import ( - AdaptiveGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolution import AdaptiveGradientDifferentialEvolution lama_register["AdaptiveGradientDifferentialEvolution"] = AdaptiveGradientDifferentialEvolution - LLAMAAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveGradientDifferentialEvolution" - ).set_name("LLAMAAdaptiveGradientDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolution").set_name("LLAMAAdaptiveGradientDifferentialEvolution", register=True) except Exception as e: print("AdaptiveGradientDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionEnhanced import ( - AdaptiveGradientDifferentialEvolutionEnhanced, - ) + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionEnhanced import AdaptiveGradientDifferentialEvolutionEnhanced - lama_register["AdaptiveGradientDifferentialEvolutionEnhanced"] = ( - AdaptiveGradientDifferentialEvolutionEnhanced - ) - LLAMAAdaptiveGradientDifferentialEvolutionEnhanced = NonObjectOptimizer( - method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced" - ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionEnhanced", register=True) + lama_register["AdaptiveGradientDifferentialEvolutionEnhanced"] = AdaptiveGradientDifferentialEvolutionEnhanced + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientDifferentialEvolutionEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced").set_name("LLAMAAdaptiveGradientDifferentialEvolutionEnhanced", register=True) except Exception as e: print("AdaptiveGradientDifferentialEvolutionEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionPlus import ( - AdaptiveGradientDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionPlus import AdaptiveGradientDifferentialEvolutionPlus lama_register["AdaptiveGradientDifferentialEvolutionPlus"] = AdaptiveGradientDifferentialEvolutionPlus - LLAMAAdaptiveGradientDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAAdaptiveGradientDifferentialEvolutionPlus" - ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionPlus").set_name("LLAMAAdaptiveGradientDifferentialEvolutionPlus", register=True) except Exception as e: print("AdaptiveGradientDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialHybrid import ( - AdaptiveGradientDifferentialHybrid, - ) + from nevergrad.optimization.lama.AdaptiveGradientDifferentialHybrid import AdaptiveGradientDifferentialHybrid lama_register["AdaptiveGradientDifferentialHybrid"] = AdaptiveGradientDifferentialHybrid - LLAMAAdaptiveGradientDifferentialHybrid = NonObjectOptimizer( - method="LLAMAAdaptiveGradientDifferentialHybrid" - ).set_name("LLAMAAdaptiveGradientDifferentialHybrid", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientDifferentialHybrid = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialHybrid").set_name("LLAMAAdaptiveGradientDifferentialHybrid", register=True) except Exception as e: print("AdaptiveGradientDifferentialHybrid can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientEnhancedExplorationPSO import ( - AdaptiveGradientEnhancedExplorationPSO, - ) + from nevergrad.optimization.lama.AdaptiveGradientEnhancedExplorationPSO import AdaptiveGradientEnhancedExplorationPSO lama_register["AdaptiveGradientEnhancedExplorationPSO"] = AdaptiveGradientEnhancedExplorationPSO - LLAMAAdaptiveGradientEnhancedExplorationPSO = NonObjectOptimizer( - method="LLAMAAdaptiveGradientEnhancedExplorationPSO" - ).set_name("LLAMAAdaptiveGradientEnhancedExplorationPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedExplorationPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientEnhancedExplorationPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedExplorationPSO").set_name("LLAMAAdaptiveGradientEnhancedExplorationPSO", register=True) except Exception as e: print("AdaptiveGradientEnhancedExplorationPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGradientEnhancedMultiPhaseAnnealing import ( - AdaptiveGradientEnhancedMultiPhaseAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveGradientEnhancedMultiPhaseAnnealing import AdaptiveGradientEnhancedMultiPhaseAnnealing lama_register["AdaptiveGradientEnhancedMultiPhaseAnnealing"] = AdaptiveGradientEnhancedMultiPhaseAnnealing - LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing" - ).set_name("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing").set_name("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing", register=True) except Exception as e: print("AdaptiveGradientEnhancedMultiPhaseAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientEnhancedRAMEDS import AdaptiveGradientEnhancedRAMEDS lama_register["AdaptiveGradientEnhancedRAMEDS"] = AdaptiveGradientEnhancedRAMEDS - LLAMAAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( - method="LLAMAAdaptiveGradientEnhancedRAMEDS" - ).set_name("LLAMAAdaptiveGradientEnhancedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedRAMEDS").set_name("LLAMAAdaptiveGradientEnhancedRAMEDS", register=True) except Exception as e: print("AdaptiveGradientEnhancedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientEvolution import AdaptiveGradientEvolution lama_register["AdaptiveGradientEvolution"] = AdaptiveGradientEvolution - LLAMAAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution").set_name( - "LLAMAAdaptiveGradientEvolution", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution").set_name("LLAMAAdaptiveGradientEvolution", register=True) except Exception as e: print("AdaptiveGradientEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientExploration import AdaptiveGradientExploration lama_register["AdaptiveGradientExploration"] = AdaptiveGradientExploration - LLAMAAdaptiveGradientExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration").set_name( - "LLAMAAdaptiveGradientExploration", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration").set_name("LLAMAAdaptiveGradientExploration", register=True) except Exception as e: print("AdaptiveGradientExploration can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientExplorationV2 import AdaptiveGradientExplorationV2 lama_register["AdaptiveGradientExplorationV2"] = AdaptiveGradientExplorationV2 - LLAMAAdaptiveGradientExplorationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveGradientExplorationV2" - ).set_name("LLAMAAdaptiveGradientExplorationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExplorationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientExplorationV2 = NonObjectOptimizer(method="LLAMAAdaptiveGradientExplorationV2").set_name("LLAMAAdaptiveGradientExplorationV2", register=True) except Exception as e: print("AdaptiveGradientExplorationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientGuidedEvolution import AdaptiveGradientGuidedEvolution lama_register["AdaptiveGradientGuidedEvolution"] = AdaptiveGradientGuidedEvolution - LLAMAAdaptiveGradientGuidedEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveGradientGuidedEvolution" - ).set_name("LLAMAAdaptiveGradientGuidedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientGuidedEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientGuidedEvolution").set_name("LLAMAAdaptiveGradientGuidedEvolution", register=True) except Exception as e: print("AdaptiveGradientGuidedEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientInformedPSO import AdaptiveGradientInformedPSO lama_register["AdaptiveGradientInformedPSO"] = AdaptiveGradientInformedPSO - LLAMAAdaptiveGradientInformedPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO").set_name( - "LLAMAAdaptiveGradientInformedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientInformedPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO").set_name("LLAMAAdaptiveGradientInformedPSO", register=True) except Exception as e: print("AdaptiveGradientInformedPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientSampling import AdaptiveGradientSampling lama_register["AdaptiveGradientSampling"] = AdaptiveGradientSampling - LLAMAAdaptiveGradientSampling = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling").set_name( - "LLAMAAdaptiveGradientSampling", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientSampling = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling").set_name("LLAMAAdaptiveGradientSampling", register=True) except Exception as e: print("AdaptiveGradientSampling can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGradientSearch import AdaptiveGradientSearch lama_register["AdaptiveGradientSearch"] = AdaptiveGradientSearch - LLAMAAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch").set_name( - "LLAMAAdaptiveGradientSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch").set_name("LLAMAAdaptiveGradientSearch", register=True) except Exception as e: print("AdaptiveGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligence import ( - AdaptiveGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligence import AdaptiveGravitationalSwarmIntelligence lama_register["AdaptiveGravitationalSwarmIntelligence"] = AdaptiveGravitationalSwarmIntelligence - LLAMAAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligence" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligence", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAAdaptiveGravitationalSwarmIntelligence", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV15 import ( - AdaptiveGravitationalSwarmIntelligenceV15, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV15 import AdaptiveGravitationalSwarmIntelligenceV15 lama_register["AdaptiveGravitationalSwarmIntelligenceV15"] = AdaptiveGravitationalSwarmIntelligenceV15 - LLAMAAdaptiveGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV15", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV15 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV15", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligenceV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV2 import ( - AdaptiveGravitationalSwarmIntelligenceV2, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV2 import AdaptiveGravitationalSwarmIntelligenceV2 lama_register["AdaptiveGravitationalSwarmIntelligenceV2"] = AdaptiveGravitationalSwarmIntelligenceV2 - LLAMAAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV2", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV26 import ( - AdaptiveGravitationalSwarmIntelligenceV26, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV26 import AdaptiveGravitationalSwarmIntelligenceV26 lama_register["AdaptiveGravitationalSwarmIntelligenceV26"] = AdaptiveGravitationalSwarmIntelligenceV26 - LLAMAAdaptiveGravitationalSwarmIntelligenceV26 = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV26", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV26 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV26", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligenceV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV3 import ( - AdaptiveGravitationalSwarmIntelligenceV3, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV3 import AdaptiveGravitationalSwarmIntelligenceV3 lama_register["AdaptiveGravitationalSwarmIntelligenceV3"] = AdaptiveGravitationalSwarmIntelligenceV3 - LLAMAAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV3", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV4 import ( - AdaptiveGravitationalSwarmIntelligenceV4, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV4 import AdaptiveGravitationalSwarmIntelligenceV4 lama_register["AdaptiveGravitationalSwarmIntelligenceV4"] = AdaptiveGravitationalSwarmIntelligenceV4 - LLAMAAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4" - ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV4", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV4", register=True) except Exception as e: print("AdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( - AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, - ) + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - lama_register["AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( - AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - ) - LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( - method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" - ).set_name("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) + lama_register["AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation").set_name("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) except Exception as e: print("AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGuidedCulturalSearch import AdaptiveGuidedCulturalSearch lama_register["AdaptiveGuidedCulturalSearch"] = AdaptiveGuidedCulturalSearch - LLAMAAdaptiveGuidedCulturalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveGuidedCulturalSearch" - ).set_name("LLAMAAdaptiveGuidedCulturalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGuidedCulturalSearch = NonObjectOptimizer(method="LLAMAAdaptiveGuidedCulturalSearch").set_name("LLAMAAdaptiveGuidedCulturalSearch", register=True) except Exception as e: print("AdaptiveGuidedCulturalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveGuidedDifferentialEvolution import ( - AdaptiveGuidedDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveGuidedDifferentialEvolution import AdaptiveGuidedDifferentialEvolution lama_register["AdaptiveGuidedDifferentialEvolution"] = AdaptiveGuidedDifferentialEvolution - LLAMAAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveGuidedDifferentialEvolution" - ).set_name("LLAMAAdaptiveGuidedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGuidedDifferentialEvolution").set_name("LLAMAAdaptiveGuidedDifferentialEvolution", register=True) except Exception as e: print("AdaptiveGuidedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGuidedEvolutionStrategy import AdaptiveGuidedEvolutionStrategy lama_register["AdaptiveGuidedEvolutionStrategy"] = AdaptiveGuidedEvolutionStrategy - LLAMAAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveGuidedEvolutionStrategy" - ).set_name("LLAMAAdaptiveGuidedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveGuidedEvolutionStrategy").set_name("LLAMAAdaptiveGuidedEvolutionStrategy", register=True) except Exception as e: print("AdaptiveGuidedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGuidedHybridOptimizer import AdaptiveGuidedHybridOptimizer lama_register["AdaptiveGuidedHybridOptimizer"] = AdaptiveGuidedHybridOptimizer - LLAMAAdaptiveGuidedHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveGuidedHybridOptimizer" - ).set_name("LLAMAAdaptiveGuidedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGuidedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGuidedHybridOptimizer").set_name("LLAMAAdaptiveGuidedHybridOptimizer", register=True) except Exception as e: print("AdaptiveGuidedHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveGuidedMutationOptimizer import AdaptiveGuidedMutationOptimizer lama_register["AdaptiveGuidedMutationOptimizer"] = AdaptiveGuidedMutationOptimizer - LLAMAAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveGuidedMutationOptimizer" - ).set_name("LLAMAAdaptiveGuidedMutationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGuidedMutationOptimizer").set_name("LLAMAAdaptiveGuidedMutationOptimizer", register=True) except Exception as e: print("AdaptiveGuidedMutationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonicFireworkAlgorithm import ( - AdaptiveHarmonicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveHarmonicFireworkAlgorithm import AdaptiveHarmonicFireworkAlgorithm lama_register["AdaptiveHarmonicFireworkAlgorithm"] = AdaptiveHarmonicFireworkAlgorithm - LLAMAAdaptiveHarmonicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicFireworkAlgorithm" - ).set_name("LLAMAAdaptiveHarmonicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicFireworkAlgorithm").set_name("LLAMAAdaptiveHarmonicFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveHarmonicFireworkAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonicSearchOptimizer import AdaptiveHarmonicSearchOptimizer lama_register["AdaptiveHarmonicSearchOptimizer"] = AdaptiveHarmonicSearchOptimizer - LLAMAAdaptiveHarmonicSearchOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicSearchOptimizer" - ).set_name("LLAMAAdaptiveHarmonicSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSearchOptimizer").set_name("LLAMAAdaptiveHarmonicSearchOptimizer", register=True) except Exception as e: print("AdaptiveHarmonicSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimization import ( - AdaptiveHarmonicSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimization import AdaptiveHarmonicSwarmOptimization lama_register["AdaptiveHarmonicSwarmOptimization"] = AdaptiveHarmonicSwarmOptimization - LLAMAAdaptiveHarmonicSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicSwarmOptimization" - ).set_name("LLAMAAdaptiveHarmonicSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimization").set_name("LLAMAAdaptiveHarmonicSwarmOptimization", register=True) except Exception as e: print("AdaptiveHarmonicSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV2 import ( - AdaptiveHarmonicSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV2 import AdaptiveHarmonicSwarmOptimizationV2 lama_register["AdaptiveHarmonicSwarmOptimizationV2"] = AdaptiveHarmonicSwarmOptimizationV2 - LLAMAAdaptiveHarmonicSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicSwarmOptimizationV2" - ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV2").set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV2", register=True) except Exception as e: print("AdaptiveHarmonicSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV3 import ( - AdaptiveHarmonicSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV3 import AdaptiveHarmonicSwarmOptimizationV3 lama_register["AdaptiveHarmonicSwarmOptimizationV3"] = AdaptiveHarmonicSwarmOptimizationV3 - LLAMAAdaptiveHarmonicSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicSwarmOptimizationV3" - ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV3").set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV3", register=True) except Exception as e: print("AdaptiveHarmonicSwarmOptimizationV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV12 import AdaptiveHarmonicTabuSearchV12 lama_register["AdaptiveHarmonicTabuSearchV12"] = AdaptiveHarmonicTabuSearchV12 - LLAMAAdaptiveHarmonicTabuSearchV12 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicTabuSearchV12" - ).set_name("LLAMAAdaptiveHarmonicTabuSearchV12", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicTabuSearchV12 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV12").set_name("LLAMAAdaptiveHarmonicTabuSearchV12", register=True) except Exception as e: print("AdaptiveHarmonicTabuSearchV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV17 import AdaptiveHarmonicTabuSearchV17 lama_register["AdaptiveHarmonicTabuSearchV17"] = AdaptiveHarmonicTabuSearchV17 - LLAMAAdaptiveHarmonicTabuSearchV17 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicTabuSearchV17" - ).set_name("LLAMAAdaptiveHarmonicTabuSearchV17", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicTabuSearchV17 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV17").set_name("LLAMAAdaptiveHarmonicTabuSearchV17", register=True) except Exception as e: print("AdaptiveHarmonicTabuSearchV17 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV20 import AdaptiveHarmonicTabuSearchV20 lama_register["AdaptiveHarmonicTabuSearchV20"] = AdaptiveHarmonicTabuSearchV20 - LLAMAAdaptiveHarmonicTabuSearchV20 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicTabuSearchV20" - ).set_name("LLAMAAdaptiveHarmonicTabuSearchV20", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicTabuSearchV20 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV20").set_name("LLAMAAdaptiveHarmonicTabuSearchV20", register=True) except Exception as e: print("AdaptiveHarmonicTabuSearchV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV8 import AdaptiveHarmonicTabuSearchV8 lama_register["AdaptiveHarmonicTabuSearchV8"] = AdaptiveHarmonicTabuSearchV8 - LLAMAAdaptiveHarmonicTabuSearchV8 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonicTabuSearchV8" - ).set_name("LLAMAAdaptiveHarmonicTabuSearchV8", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonicTabuSearchV8 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV8").set_name("LLAMAAdaptiveHarmonicTabuSearchV8", register=True) except Exception as e: print("AdaptiveHarmonicTabuSearchV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonyFireworksAlgorithm import ( - AdaptiveHarmonyFireworksAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveHarmonyFireworksAlgorithm import AdaptiveHarmonyFireworksAlgorithm lama_register["AdaptiveHarmonyFireworksAlgorithm"] = AdaptiveHarmonyFireworksAlgorithm - LLAMAAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyFireworksAlgorithm" - ).set_name("LLAMAAdaptiveHarmonyFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyFireworksAlgorithm").set_name("LLAMAAdaptiveHarmonyFireworksAlgorithm", register=True) except Exception as e: print("AdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithm import AdaptiveHarmonyMemeticAlgorithm lama_register["AdaptiveHarmonyMemeticAlgorithm"] = AdaptiveHarmonyMemeticAlgorithm - LLAMAAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyMemeticAlgorithm" - ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithm").set_name("LLAMAAdaptiveHarmonyMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithmV15 import ( - AdaptiveHarmonyMemeticAlgorithmV15, - ) + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithmV15 import AdaptiveHarmonyMemeticAlgorithmV15 lama_register["AdaptiveHarmonyMemeticAlgorithmV15"] = AdaptiveHarmonyMemeticAlgorithmV15 - LLAMAAdaptiveHarmonyMemeticAlgorithmV15 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15" - ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithmV15", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyMemeticAlgorithmV15 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15").set_name("LLAMAAdaptiveHarmonyMemeticAlgorithmV15", register=True) except Exception as e: print("AdaptiveHarmonyMemeticAlgorithmV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV2 import ( - AdaptiveHarmonyMemeticOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV2 import AdaptiveHarmonyMemeticOptimizationV2 lama_register["AdaptiveHarmonyMemeticOptimizationV2"] = AdaptiveHarmonyMemeticOptimizationV2 - LLAMAAdaptiveHarmonyMemeticOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyMemeticOptimizationV2" - ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyMemeticOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV2").set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV2", register=True) except Exception as e: print("AdaptiveHarmonyMemeticOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV27 import ( - AdaptiveHarmonyMemeticOptimizationV27, - ) + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV27 import AdaptiveHarmonyMemeticOptimizationV27 lama_register["AdaptiveHarmonyMemeticOptimizationV27"] = AdaptiveHarmonyMemeticOptimizationV27 - LLAMAAdaptiveHarmonyMemeticOptimizationV27 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyMemeticOptimizationV27" - ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV27", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyMemeticOptimizationV27 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV27").set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV27", register=True) except Exception as e: print("AdaptiveHarmonyMemeticOptimizationV27 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonyMemeticSearchV2 import AdaptiveHarmonyMemeticSearchV2 lama_register["AdaptiveHarmonyMemeticSearchV2"] = AdaptiveHarmonyMemeticSearchV2 - LLAMAAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyMemeticSearchV2" - ).set_name("LLAMAAdaptiveHarmonyMemeticSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticSearchV2").set_name("LLAMAAdaptiveHarmonyMemeticSearchV2", register=True) except Exception as e: print("AdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonySearchOptimizerV2 import AdaptiveHarmonySearchOptimizerV2 lama_register["AdaptiveHarmonySearchOptimizerV2"] = AdaptiveHarmonySearchOptimizerV2 - LLAMAAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchOptimizerV2" - ).set_name("LLAMAAdaptiveHarmonySearchOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAAdaptiveHarmonySearchOptimizerV2", register=True) except Exception as e: print("AdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithCuckooInspiration import ( - AdaptiveHarmonySearchWithCuckooInspiration, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithCuckooInspiration import AdaptiveHarmonySearchWithCuckooInspiration lama_register["AdaptiveHarmonySearchWithCuckooInspiration"] = AdaptiveHarmonySearchWithCuckooInspiration - LLAMAAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration" - ).set_name("LLAMAAdaptiveHarmonySearchWithCuckooInspiration", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration").set_name("LLAMAAdaptiveHarmonySearchWithCuckooInspiration", register=True) except Exception as e: print("AdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 import ( - AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 import AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 - lama_register["AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2"] = ( - AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 - ) - LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2" - ).set_name("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2", register=True) + lama_register["AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2"] = AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2").set_name("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2", register=True) except Exception as e: print("AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlight import ( - AdaptiveHarmonySearchWithImprovedLevyFlight, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlight import AdaptiveHarmonySearchWithImprovedLevyFlight lama_register["AdaptiveHarmonySearchWithImprovedLevyFlight"] = AdaptiveHarmonySearchWithImprovedLevyFlight - LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight" - ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight").set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) except Exception as e: print("AdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlightInspiration import ( - AdaptiveHarmonySearchWithImprovedLevyFlightInspiration, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlightInspiration import AdaptiveHarmonySearchWithImprovedLevyFlightInspiration - lama_register["AdaptiveHarmonySearchWithImprovedLevyFlightInspiration"] = ( - AdaptiveHarmonySearchWithImprovedLevyFlightInspiration - ) - LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration" - ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration", register=True) + lama_register["AdaptiveHarmonySearchWithImprovedLevyFlightInspiration"] = AdaptiveHarmonySearchWithImprovedLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration").set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration", register=True) except Exception as e: print("AdaptiveHarmonySearchWithImprovedLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLevyFlightImprovement import ( - AdaptiveHarmonySearchWithLevyFlightImprovement, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLevyFlightImprovement import AdaptiveHarmonySearchWithLevyFlightImprovement - lama_register["AdaptiveHarmonySearchWithLevyFlightImprovement"] = ( - AdaptiveHarmonySearchWithLevyFlightImprovement - ) - LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement" - ).set_name("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement", register=True) + lama_register["AdaptiveHarmonySearchWithLevyFlightImprovement"] = AdaptiveHarmonySearchWithLevyFlightImprovement + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement").set_name("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement", register=True) except Exception as e: print("AdaptiveHarmonySearchWithLevyFlightImprovement can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimization import ( - AdaptiveHarmonySearchWithLocalOptimization, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimization import AdaptiveHarmonySearchWithLocalOptimization lama_register["AdaptiveHarmonySearchWithLocalOptimization"] = AdaptiveHarmonySearchWithLocalOptimization - LLAMAAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithLocalOptimization" - ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimization").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimization", register=True) except Exception as e: print("AdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationImproved import ( - AdaptiveHarmonySearchWithLocalOptimizationImproved, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationImproved import AdaptiveHarmonySearchWithLocalOptimizationImproved - lama_register["AdaptiveHarmonySearchWithLocalOptimizationImproved"] = ( - AdaptiveHarmonySearchWithLocalOptimizationImproved - ) - LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved" - ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved", register=True) + lama_register["AdaptiveHarmonySearchWithLocalOptimizationImproved"] = AdaptiveHarmonySearchWithLocalOptimizationImproved + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved", register=True) except Exception as e: print("AdaptiveHarmonySearchWithLocalOptimizationImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationV2 import ( - AdaptiveHarmonySearchWithLocalOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationV2 import AdaptiveHarmonySearchWithLocalOptimizationV2 - lama_register["AdaptiveHarmonySearchWithLocalOptimizationV2"] = ( - AdaptiveHarmonySearchWithLocalOptimizationV2 - ) - LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2" - ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2", register=True) + lama_register["AdaptiveHarmonySearchWithLocalOptimizationV2"] = AdaptiveHarmonySearchWithLocalOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2", register=True) except Exception as e: print("AdaptiveHarmonySearchWithLocalOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithSimulatedAnnealing import ( - AdaptiveHarmonySearchWithSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithSimulatedAnnealing import AdaptiveHarmonySearchWithSimulatedAnnealing lama_register["AdaptiveHarmonySearchWithSimulatedAnnealing"] = AdaptiveHarmonySearchWithSimulatedAnnealing - LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing" - ).set_name("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing").set_name("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHarmonyTabuOptimization import AdaptiveHarmonyTabuOptimization lama_register["AdaptiveHarmonyTabuOptimization"] = AdaptiveHarmonyTabuOptimization - LLAMAAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveHarmonyTabuOptimization" - ).set_name("LLAMAAdaptiveHarmonyTabuOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyTabuOptimization").set_name("LLAMAAdaptiveHarmonyTabuOptimization", register=True) except Exception as e: print("AdaptiveHarmonyTabuOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridAlgorithm import AdaptiveHybridAlgorithm lama_register["AdaptiveHybridAlgorithm"] = AdaptiveHybridAlgorithm - LLAMAAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm").set_name( - "LLAMAAdaptiveHybridAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm").set_name("LLAMAAdaptiveHybridAlgorithm", register=True) except Exception as e: print("AdaptiveHybridAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithGradientBoost import ( - AdaptiveHybridAnnealingWithGradientBoost, - ) + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithGradientBoost import AdaptiveHybridAnnealingWithGradientBoost lama_register["AdaptiveHybridAnnealingWithGradientBoost"] = AdaptiveHybridAnnealingWithGradientBoost - LLAMAAdaptiveHybridAnnealingWithGradientBoost = NonObjectOptimizer( - method="LLAMAAdaptiveHybridAnnealingWithGradientBoost" - ).set_name("LLAMAAdaptiveHybridAnnealingWithGradientBoost", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridAnnealingWithGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithGradientBoost").set_name("LLAMAAdaptiveHybridAnnealingWithGradientBoost", register=True) except Exception as e: print("AdaptiveHybridAnnealingWithGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithMemoryRefinement import ( - AdaptiveHybridAnnealingWithMemoryRefinement, - ) + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithMemoryRefinement import AdaptiveHybridAnnealingWithMemoryRefinement lama_register["AdaptiveHybridAnnealingWithMemoryRefinement"] = AdaptiveHybridAnnealingWithMemoryRefinement - LLAMAAdaptiveHybridAnnealingWithMemoryRefinement = NonObjectOptimizer( - method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement" - ).set_name("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridAnnealingWithMemoryRefinement = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement").set_name("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement", register=True) except Exception as e: print("AdaptiveHybridAnnealingWithMemoryRefinement can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridCMAESDE import AdaptiveHybridCMAESDE lama_register["AdaptiveHybridCMAESDE"] = AdaptiveHybridCMAESDE - LLAMAAdaptiveHybridCMAESDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE").set_name( - "LLAMAAdaptiveHybridCMAESDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridCMAESDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE").set_name("LLAMAAdaptiveHybridCMAESDE", register=True) except Exception as e: print("AdaptiveHybridCMAESDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 import ( - AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 import AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 - lama_register["AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3"] = ( - AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 - ) - LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3" - ).set_name("LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3", register=True) + lama_register["AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3"] = AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3", register=True) except Exception as e: print("AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridCulturalOptimizer import AdaptiveHybridCulturalOptimizer lama_register["AdaptiveHybridCulturalOptimizer"] = AdaptiveHybridCulturalOptimizer - LLAMAAdaptiveHybridCulturalOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveHybridCulturalOptimizer" - ).set_name("LLAMAAdaptiveHybridCulturalOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCulturalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridCulturalOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridCulturalOptimizer").set_name("LLAMAAdaptiveHybridCulturalOptimizer", register=True) except Exception as e: print("AdaptiveHybridCulturalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridDEPSOWithDynamicRestart import ( - AdaptiveHybridDEPSOWithDynamicRestart, - ) + from nevergrad.optimization.lama.AdaptiveHybridDEPSOWithDynamicRestart import AdaptiveHybridDEPSOWithDynamicRestart lama_register["AdaptiveHybridDEPSOWithDynamicRestart"] = AdaptiveHybridDEPSOWithDynamicRestart - LLAMAAdaptiveHybridDEPSOWithDynamicRestart = NonObjectOptimizer( - method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart" - ).set_name("LLAMAAdaptiveHybridDEPSOWithDynamicRestart", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridDEPSOWithDynamicRestart = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart").set_name("LLAMAAdaptiveHybridDEPSOWithDynamicRestart", register=True) except Exception as e: print("AdaptiveHybridDEPSOWithDynamicRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridDEWithIntensifiedLocalSearch import ( - AdaptiveHybridDEWithIntensifiedLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveHybridDEWithIntensifiedLocalSearch import AdaptiveHybridDEWithIntensifiedLocalSearch lama_register["AdaptiveHybridDEWithIntensifiedLocalSearch"] = AdaptiveHybridDEWithIntensifiedLocalSearch - LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch" - ).set_name("LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch").set_name("LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch", register=True) except Exception as e: print("AdaptiveHybridDEWithIntensifiedLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridDifferentialEvolution import ( - AdaptiveHybridDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveHybridDifferentialEvolution import AdaptiveHybridDifferentialEvolution lama_register["AdaptiveHybridDifferentialEvolution"] = AdaptiveHybridDifferentialEvolution - LLAMAAdaptiveHybridDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveHybridDifferentialEvolution" - ).set_name("LLAMAAdaptiveHybridDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveHybridDifferentialEvolution").set_name("LLAMAAdaptiveHybridDifferentialEvolution", register=True) except Exception as e: print("AdaptiveHybridDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridEvolutionStrategyV5 import ( - AdaptiveHybridEvolutionStrategyV5, - ) + from nevergrad.optimization.lama.AdaptiveHybridEvolutionStrategyV5 import AdaptiveHybridEvolutionStrategyV5 lama_register["AdaptiveHybridEvolutionStrategyV5"] = AdaptiveHybridEvolutionStrategyV5 - LLAMAAdaptiveHybridEvolutionStrategyV5 = NonObjectOptimizer( - method="LLAMAAdaptiveHybridEvolutionStrategyV5" - ).set_name("LLAMAAdaptiveHybridEvolutionStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridEvolutionStrategyV5 = NonObjectOptimizer(method="LLAMAAdaptiveHybridEvolutionStrategyV5").set_name("LLAMAAdaptiveHybridEvolutionStrategyV5", register=True) except Exception as e: print("AdaptiveHybridEvolutionStrategyV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridFireworkAlgorithm import AdaptiveHybridFireworkAlgorithm lama_register["AdaptiveHybridFireworkAlgorithm"] = AdaptiveHybridFireworkAlgorithm - LLAMAAdaptiveHybridFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveHybridFireworkAlgorithm" - ).set_name("LLAMAAdaptiveHybridFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridFireworkAlgorithm").set_name("LLAMAAdaptiveHybridFireworkAlgorithm", register=True) except Exception as e: print("AdaptiveHybridFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridGradientAnnealingWithVariableMemory import ( - AdaptiveHybridGradientAnnealingWithVariableMemory, - ) + from nevergrad.optimization.lama.AdaptiveHybridGradientAnnealingWithVariableMemory import AdaptiveHybridGradientAnnealingWithVariableMemory - lama_register["AdaptiveHybridGradientAnnealingWithVariableMemory"] = ( - AdaptiveHybridGradientAnnealingWithVariableMemory - ) - LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory = NonObjectOptimizer( - method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory" - ).set_name("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory", register=True) + lama_register["AdaptiveHybridGradientAnnealingWithVariableMemory"] = AdaptiveHybridGradientAnnealingWithVariableMemory + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory = NonObjectOptimizer(method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory").set_name("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory", register=True) except Exception as e: print("AdaptiveHybridGradientAnnealingWithVariableMemory can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridHarmonySearch import AdaptiveHybridHarmonySearch lama_register["AdaptiveHybridHarmonySearch"] = AdaptiveHybridHarmonySearch - LLAMAAdaptiveHybridHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch").set_name( - "LLAMAAdaptiveHybridHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch").set_name("LLAMAAdaptiveHybridHarmonySearch", register=True) except Exception as e: print("AdaptiveHybridHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridMetaOptimizer import AdaptiveHybridMetaOptimizer lama_register["AdaptiveHybridMetaOptimizer"] = AdaptiveHybridMetaOptimizer - LLAMAAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer").set_name( - "LLAMAAdaptiveHybridMetaOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer").set_name("LLAMAAdaptiveHybridMetaOptimizer", register=True) except Exception as e: print("AdaptiveHybridMetaOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridOptimization import AdaptiveHybridOptimization lama_register["AdaptiveHybridOptimization"] = AdaptiveHybridOptimization - LLAMAAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization").set_name( - "LLAMAAdaptiveHybridOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization").set_name("LLAMAAdaptiveHybridOptimization", register=True) except Exception as e: print("AdaptiveHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridOptimizationV2 import AdaptiveHybridOptimizationV2 lama_register["AdaptiveHybridOptimizationV2"] = AdaptiveHybridOptimizationV2 - LLAMAAdaptiveHybridOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHybridOptimizationV2" - ).set_name("LLAMAAdaptiveHybridOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV2").set_name("LLAMAAdaptiveHybridOptimizationV2", register=True) except Exception as e: print("AdaptiveHybridOptimizationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridOptimizationV3 import AdaptiveHybridOptimizationV3 lama_register["AdaptiveHybridOptimizationV3"] = AdaptiveHybridOptimizationV3 - LLAMAAdaptiveHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMAAdaptiveHybridOptimizationV3" - ).set_name("LLAMAAdaptiveHybridOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV3").set_name("LLAMAAdaptiveHybridOptimizationV3", register=True) except Exception as e: print("AdaptiveHybridOptimizationV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridOptimizer import AdaptiveHybridOptimizer lama_register["AdaptiveHybridOptimizer"] = AdaptiveHybridOptimizer - LLAMAAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer").set_name( - "LLAMAAdaptiveHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer").set_name("LLAMAAdaptiveHybridOptimizer", register=True) except Exception as e: print("AdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolution import ( - AdaptiveHybridParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolution import AdaptiveHybridParticleSwarmDifferentialEvolution - lama_register["AdaptiveHybridParticleSwarmDifferentialEvolution"] = ( - AdaptiveHybridParticleSwarmDifferentialEvolution - ) - LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution" - ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolution"] = AdaptiveHybridParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("AdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( - AdaptiveHybridParticleSwarmDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolutionPlus import AdaptiveHybridParticleSwarmDifferentialEvolutionPlus - lama_register["AdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( - AdaptiveHybridParticleSwarmDifferentialEvolutionPlus - ) - LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" - ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = AdaptiveHybridParticleSwarmDifferentialEvolutionPlus + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus").set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) except Exception as e: print("AdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridQuasiRandomGradientDE import ( - AdaptiveHybridQuasiRandomGradientDE, - ) + from nevergrad.optimization.lama.AdaptiveHybridQuasiRandomGradientDE import AdaptiveHybridQuasiRandomGradientDE lama_register["AdaptiveHybridQuasiRandomGradientDE"] = AdaptiveHybridQuasiRandomGradientDE - LLAMAAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( - method="LLAMAAdaptiveHybridQuasiRandomGradientDE" - ).set_name("LLAMAAdaptiveHybridQuasiRandomGradientDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridQuasiRandomGradientDE").set_name("LLAMAAdaptiveHybridQuasiRandomGradientDE", register=True) except Exception as e: print("AdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridRecombinativeStrategy import ( - AdaptiveHybridRecombinativeStrategy, - ) + from nevergrad.optimization.lama.AdaptiveHybridRecombinativeStrategy import AdaptiveHybridRecombinativeStrategy lama_register["AdaptiveHybridRecombinativeStrategy"] = AdaptiveHybridRecombinativeStrategy - LLAMAAdaptiveHybridRecombinativeStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveHybridRecombinativeStrategy" - ).set_name("LLAMAAdaptiveHybridRecombinativeStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridRecombinativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridRecombinativeStrategy = NonObjectOptimizer(method="LLAMAAdaptiveHybridRecombinativeStrategy").set_name("LLAMAAdaptiveHybridRecombinativeStrategy", register=True) except Exception as e: print("AdaptiveHybridRecombinativeStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveHybridSearchOptimizer import AdaptiveHybridSearchOptimizer lama_register["AdaptiveHybridSearchOptimizer"] = AdaptiveHybridSearchOptimizer - LLAMAAdaptiveHybridSearchOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveHybridSearchOptimizer" - ).set_name("LLAMAAdaptiveHybridSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridSearchOptimizer").set_name("LLAMAAdaptiveHybridSearchOptimizer", register=True) except Exception as e: print("AdaptiveHybridSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHybridSwarmEvolutionOptimization import ( - AdaptiveHybridSwarmEvolutionOptimization, - ) + from nevergrad.optimization.lama.AdaptiveHybridSwarmEvolutionOptimization import AdaptiveHybridSwarmEvolutionOptimization lama_register["AdaptiveHybridSwarmEvolutionOptimization"] = AdaptiveHybridSwarmEvolutionOptimization - LLAMAAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveHybridSwarmEvolutionOptimization" - ).set_name("LLAMAAdaptiveHybridSwarmEvolutionOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridSwarmEvolutionOptimization").set_name("LLAMAAdaptiveHybridSwarmEvolutionOptimization", register=True) except Exception as e: print("AdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveHyperQuantumStateCrossoverOptimizationV2 import ( - AdaptiveHyperQuantumStateCrossoverOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveHyperQuantumStateCrossoverOptimizationV2 import AdaptiveHyperQuantumStateCrossoverOptimizationV2 - lama_register["AdaptiveHyperQuantumStateCrossoverOptimizationV2"] = ( - AdaptiveHyperQuantumStateCrossoverOptimizationV2 - ) - LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2" - ).set_name("LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2", register=True) + lama_register["AdaptiveHyperQuantumStateCrossoverOptimizationV2"] = AdaptiveHyperQuantumStateCrossoverOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2").set_name("LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2", register=True) except Exception as e: print("AdaptiveHyperQuantumStateCrossoverOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveIncrementalCrossoverEnhancement import ( - AdaptiveIncrementalCrossoverEnhancement, - ) + from nevergrad.optimization.lama.AdaptiveIncrementalCrossoverEnhancement import AdaptiveIncrementalCrossoverEnhancement lama_register["AdaptiveIncrementalCrossoverEnhancement"] = AdaptiveIncrementalCrossoverEnhancement - LLAMAAdaptiveIncrementalCrossoverEnhancement = NonObjectOptimizer( - method="LLAMAAdaptiveIncrementalCrossoverEnhancement" - ).set_name("LLAMAAdaptiveIncrementalCrossoverEnhancement", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveIncrementalCrossoverEnhancement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveIncrementalCrossoverEnhancement = NonObjectOptimizer(method="LLAMAAdaptiveIncrementalCrossoverEnhancement").set_name("LLAMAAdaptiveIncrementalCrossoverEnhancement", register=True) except Exception as e: print("AdaptiveIncrementalCrossoverEnhancement can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveInertiaHybridOptimizer import AdaptiveInertiaHybridOptimizer lama_register["AdaptiveInertiaHybridOptimizer"] = AdaptiveInertiaHybridOptimizer - LLAMAAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveInertiaHybridOptimizer" - ).set_name("LLAMAAdaptiveInertiaHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveInertiaHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveInertiaHybridOptimizer").set_name("LLAMAAdaptiveInertiaHybridOptimizer", register=True) except Exception as e: print("AdaptiveInertiaHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveInertiaParticleOptimizer import AdaptiveInertiaParticleOptimizer lama_register["AdaptiveInertiaParticleOptimizer"] = AdaptiveInertiaParticleOptimizer - LLAMAAdaptiveInertiaParticleOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveInertiaParticleOptimizer" - ).set_name("LLAMAAdaptiveInertiaParticleOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveInertiaParticleOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleOptimizer").set_name("LLAMAAdaptiveInertiaParticleOptimizer", register=True) except Exception as e: print("AdaptiveInertiaParticleOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveInertiaParticleSwarmOptimization import ( - AdaptiveInertiaParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveInertiaParticleSwarmOptimization import AdaptiveInertiaParticleSwarmOptimization lama_register["AdaptiveInertiaParticleSwarmOptimization"] = AdaptiveInertiaParticleSwarmOptimization - LLAMAAdaptiveInertiaParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveInertiaParticleSwarmOptimization" - ).set_name("LLAMAAdaptiveInertiaParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveInertiaParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleSwarmOptimization").set_name("LLAMAAdaptiveInertiaParticleSwarmOptimization", register=True) except Exception as e: print("AdaptiveInertiaParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveLearningDifferentialEvolutionOptimizer import ( - AdaptiveLearningDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveLearningDifferentialEvolutionOptimizer import AdaptiveLearningDifferentialEvolutionOptimizer - lama_register["AdaptiveLearningDifferentialEvolutionOptimizer"] = ( - AdaptiveLearningDifferentialEvolutionOptimizer - ) - LLAMAAdaptiveLearningDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer" - ).set_name("LLAMAAdaptiveLearningDifferentialEvolutionOptimizer", register=True) + lama_register["AdaptiveLearningDifferentialEvolutionOptimizer"] = AdaptiveLearningDifferentialEvolutionOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLearningDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveLearningDifferentialEvolutionOptimizer", register=True) except Exception as e: print("AdaptiveLearningDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( - AdaptiveLevyDiversifiedMetaHeuristicAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveLevyDiversifiedMetaHeuristicAlgorithm import AdaptiveLevyDiversifiedMetaHeuristicAlgorithm - lama_register["AdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( - AdaptiveLevyDiversifiedMetaHeuristicAlgorithm - ) - LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" - ).set_name("LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) + lama_register["AdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = AdaptiveLevyDiversifiedMetaHeuristicAlgorithm + res = NonObjectOptimizer(method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) except Exception as e: print("AdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveLevyHarmonySearch import AdaptiveLevyHarmonySearch lama_register["AdaptiveLevyHarmonySearch"] = AdaptiveLevyHarmonySearch - LLAMAAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch").set_name( - "LLAMAAdaptiveLevyHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch").set_name("LLAMAAdaptiveLevyHarmonySearch", register=True) except Exception as e: print("AdaptiveLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing import ( - AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing import AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing - lama_register["AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing"] = ( - AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing - ) - LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing" - ).set_name("LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing", register=True) + lama_register["AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing"] = AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveLocalSearchOptimizer import AdaptiveLocalSearchOptimizer lama_register["AdaptiveLocalSearchOptimizer"] = AdaptiveLocalSearchOptimizer - LLAMAAdaptiveLocalSearchOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveLocalSearchOptimizer" - ).set_name("LLAMAAdaptiveLocalSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLocalSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchOptimizer").set_name("LLAMAAdaptiveLocalSearchOptimizer", register=True) except Exception as e: print("AdaptiveLocalSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveLocalSearchQuantumSimulatedAnnealing import ( - AdaptiveLocalSearchQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveLocalSearchQuantumSimulatedAnnealing import AdaptiveLocalSearchQuantumSimulatedAnnealing - lama_register["AdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( - AdaptiveLocalSearchQuantumSimulatedAnnealing - ) - LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing" - ).set_name("LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) + lama_register["AdaptiveLocalSearchQuantumSimulatedAnnealing"] = AdaptiveLocalSearchQuantumSimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticAlgorithm import AdaptiveMemeticAlgorithm lama_register["AdaptiveMemeticAlgorithm"] = AdaptiveMemeticAlgorithm - LLAMAAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm").set_name( - "LLAMAAdaptiveMemeticAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm").set_name("LLAMAAdaptiveMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer import ( - AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer import AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer - lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer"] = ( - AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer - ) - LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer" - ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer", register=True) + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer"] = AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer", register=True) except Exception as e: print("AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer import ( - AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer import AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer - lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer"] = ( - AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer - ) - LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer" - ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer", register=True) + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer"] = AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer").set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer", register=True) except Exception as e: print("AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolution import ( - AdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolution import AdaptiveMemeticDifferentialEvolution lama_register["AdaptiveMemeticDifferentialEvolution"] = AdaptiveMemeticDifferentialEvolution - LLAMAAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolution").set_name("LLAMAAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionOptimizer import ( - AdaptiveMemeticDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionOptimizer import AdaptiveMemeticDifferentialEvolutionOptimizer - lama_register["AdaptiveMemeticDifferentialEvolutionOptimizer"] = ( - AdaptiveMemeticDifferentialEvolutionOptimizer - ) - LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer", register=True) + lama_register["AdaptiveMemeticDifferentialEvolutionOptimizer"] = AdaptiveMemeticDifferentialEvolutionOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV2 import ( - AdaptiveMemeticDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV2 import AdaptiveMemeticDifferentialEvolutionV2 lama_register["AdaptiveMemeticDifferentialEvolutionV2"] = AdaptiveMemeticDifferentialEvolutionV2 - LLAMAAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV2" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV2").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV2", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV3 import ( - AdaptiveMemeticDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV3 import AdaptiveMemeticDifferentialEvolutionV3 lama_register["AdaptiveMemeticDifferentialEvolutionV3"] = AdaptiveMemeticDifferentialEvolutionV3 - LLAMAAdaptiveMemeticDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV3" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV3").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV3", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV4 import ( - AdaptiveMemeticDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV4 import AdaptiveMemeticDifferentialEvolutionV4 lama_register["AdaptiveMemeticDifferentialEvolutionV4"] = AdaptiveMemeticDifferentialEvolutionV4 - LLAMAAdaptiveMemeticDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV4" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV4", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV4").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV4", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV5 import ( - AdaptiveMemeticDifferentialEvolutionV5, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV5 import AdaptiveMemeticDifferentialEvolutionV5 lama_register["AdaptiveMemeticDifferentialEvolutionV5"] = AdaptiveMemeticDifferentialEvolutionV5 - LLAMAAdaptiveMemeticDifferentialEvolutionV5 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV5" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV5").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV5", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV6 import ( - AdaptiveMemeticDifferentialEvolutionV6, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV6 import AdaptiveMemeticDifferentialEvolutionV6 lama_register["AdaptiveMemeticDifferentialEvolutionV6"] = AdaptiveMemeticDifferentialEvolutionV6 - LLAMAAdaptiveMemeticDifferentialEvolutionV6 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV6" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV6", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV6").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV6", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV7 import ( - AdaptiveMemeticDifferentialEvolutionV7, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV7 import AdaptiveMemeticDifferentialEvolutionV7 lama_register["AdaptiveMemeticDifferentialEvolutionV7"] = AdaptiveMemeticDifferentialEvolutionV7 - LLAMAAdaptiveMemeticDifferentialEvolutionV7 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionV7" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV7", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV7").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV7", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR import ( - AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR import AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR - lama_register["AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR"] = ( - AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR - ) - LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR", register=True) + lama_register["AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR"] = AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance import ( - AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance import AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance - lama_register["AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance"] = ( - AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance - ) - LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance" - ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance", register=True) + lama_register["AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance"] = AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance", register=True) except Exception as e: print("AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialQuantumSearch import ( - AdaptiveMemeticDifferentialQuantumSearch, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialQuantumSearch import AdaptiveMemeticDifferentialQuantumSearch lama_register["AdaptiveMemeticDifferentialQuantumSearch"] = AdaptiveMemeticDifferentialQuantumSearch - LLAMAAdaptiveMemeticDifferentialQuantumSearch = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialQuantumSearch" - ).set_name("LLAMAAdaptiveMemeticDifferentialQuantumSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialQuantumSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialQuantumSearch").set_name("LLAMAAdaptiveMemeticDifferentialQuantumSearch", register=True) except Exception as e: print("AdaptiveMemeticDifferentialQuantumSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialSearch import ( - AdaptiveMemeticDifferentialSearch, - ) + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialSearch import AdaptiveMemeticDifferentialSearch lama_register["AdaptiveMemeticDifferentialSearch"] = AdaptiveMemeticDifferentialSearch - LLAMAAdaptiveMemeticDifferentialSearch = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDifferentialSearch" - ).set_name("LLAMAAdaptiveMemeticDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialSearch").set_name("LLAMAAdaptiveMemeticDifferentialSearch", register=True) except Exception as e: print("AdaptiveMemeticDifferentialSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticDiverseOptimizer import AdaptiveMemeticDiverseOptimizer lama_register["AdaptiveMemeticDiverseOptimizer"] = AdaptiveMemeticDiverseOptimizer - LLAMAAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticDiverseOptimizer" - ).set_name("LLAMAAdaptiveMemeticDiverseOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDiverseOptimizer").set_name("LLAMAAdaptiveMemeticDiverseOptimizer", register=True) except Exception as e: print("AdaptiveMemeticDiverseOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticEvolutionStrategy import AdaptiveMemeticEvolutionStrategy lama_register["AdaptiveMemeticEvolutionStrategy"] = AdaptiveMemeticEvolutionStrategy - LLAMAAdaptiveMemeticEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticEvolutionStrategy" - ).set_name("LLAMAAdaptiveMemeticEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionStrategy").set_name("LLAMAAdaptiveMemeticEvolutionStrategy", register=True) except Exception as e: print("AdaptiveMemeticEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryAlgorithm import ( - AdaptiveMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryAlgorithm import AdaptiveMemeticEvolutionaryAlgorithm lama_register["AdaptiveMemeticEvolutionaryAlgorithm"] = AdaptiveMemeticEvolutionaryAlgorithm - LLAMAAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm" - ).set_name("LLAMAAdaptiveMemeticEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMAAdaptiveMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("AdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryOptimizer import ( - AdaptiveMemeticEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryOptimizer import AdaptiveMemeticEvolutionaryOptimizer lama_register["AdaptiveMemeticEvolutionaryOptimizer"] = AdaptiveMemeticEvolutionaryOptimizer - LLAMAAdaptiveMemeticEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticEvolutionaryOptimizer" - ).set_name("LLAMAAdaptiveMemeticEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryOptimizer").set_name("LLAMAAdaptiveMemeticEvolutionaryOptimizer", register=True) except Exception as e: print("AdaptiveMemeticEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionarySearch import ( - AdaptiveMemeticEvolutionarySearch, - ) + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionarySearch import AdaptiveMemeticEvolutionarySearch lama_register["AdaptiveMemeticEvolutionarySearch"] = AdaptiveMemeticEvolutionarySearch - LLAMAAdaptiveMemeticEvolutionarySearch = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticEvolutionarySearch" - ).set_name("LLAMAAdaptiveMemeticEvolutionarySearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticEvolutionarySearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionarySearch").set_name("LLAMAAdaptiveMemeticEvolutionarySearch", register=True) except Exception as e: print("AdaptiveMemeticEvolutionarySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimization import ( - AdaptiveMemeticHarmonyOptimization, - ) + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimization import AdaptiveMemeticHarmonyOptimization lama_register["AdaptiveMemeticHarmonyOptimization"] = AdaptiveMemeticHarmonyOptimization - LLAMAAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticHarmonyOptimization" - ).set_name("LLAMAAdaptiveMemeticHarmonyOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimization").set_name("LLAMAAdaptiveMemeticHarmonyOptimization", register=True) except Exception as e: print("AdaptiveMemeticHarmonyOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimizationV5 import ( - AdaptiveMemeticHarmonyOptimizationV5, - ) + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimizationV5 import AdaptiveMemeticHarmonyOptimizationV5 lama_register["AdaptiveMemeticHarmonyOptimizationV5"] = AdaptiveMemeticHarmonyOptimizationV5 - LLAMAAdaptiveMemeticHarmonyOptimizationV5 = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticHarmonyOptimizationV5" - ).set_name("LLAMAAdaptiveMemeticHarmonyOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticHarmonyOptimizationV5 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimizationV5").set_name("LLAMAAdaptiveMemeticHarmonyOptimizationV5", register=True) except Exception as e: print("AdaptiveMemeticHarmonyOptimizationV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticHybridOptimizer import AdaptiveMemeticHybridOptimizer lama_register["AdaptiveMemeticHybridOptimizer"] = AdaptiveMemeticHybridOptimizer - LLAMAAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticHybridOptimizer" - ).set_name("LLAMAAdaptiveMemeticHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHybridOptimizer").set_name("LLAMAAdaptiveMemeticHybridOptimizer", register=True) except Exception as e: print("AdaptiveMemeticHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticOptimizer import AdaptiveMemeticOptimizer lama_register["AdaptiveMemeticOptimizer"] = AdaptiveMemeticOptimizer - LLAMAAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer").set_name( - "LLAMAAdaptiveMemeticOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer").set_name("LLAMAAdaptiveMemeticOptimizer", register=True) except Exception as e: print("AdaptiveMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemeticOptimizerV2 import AdaptiveMemeticOptimizerV2 lama_register["AdaptiveMemeticOptimizerV2"] = AdaptiveMemeticOptimizerV2 - LLAMAAdaptiveMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2").set_name( - "LLAMAAdaptiveMemeticOptimizerV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2").set_name("LLAMAAdaptiveMemeticOptimizerV2", register=True) except Exception as e: print("AdaptiveMemeticOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemeticParticleSwarmOptimization import ( - AdaptiveMemeticParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveMemeticParticleSwarmOptimization import AdaptiveMemeticParticleSwarmOptimization lama_register["AdaptiveMemeticParticleSwarmOptimization"] = AdaptiveMemeticParticleSwarmOptimization - LLAMAAdaptiveMemeticParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMemeticParticleSwarmOptimization" - ).set_name("LLAMAAdaptiveMemeticParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemeticParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMemeticParticleSwarmOptimization").set_name("LLAMAAdaptiveMemeticParticleSwarmOptimization", register=True) except Exception as e: print("AdaptiveMemeticParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryAssistedStrategyV41 import ( - AdaptiveMemoryAssistedStrategyV41, - ) + from nevergrad.optimization.lama.AdaptiveMemoryAssistedStrategyV41 import AdaptiveMemoryAssistedStrategyV41 lama_register["AdaptiveMemoryAssistedStrategyV41"] = AdaptiveMemoryAssistedStrategyV41 - LLAMAAdaptiveMemoryAssistedStrategyV41 = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryAssistedStrategyV41" - ).set_name("LLAMAAdaptiveMemoryAssistedStrategyV41", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryAssistedStrategyV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryAssistedStrategyV41 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryAssistedStrategyV41").set_name("LLAMAAdaptiveMemoryAssistedStrategyV41", register=True) except Exception as e: print("AdaptiveMemoryAssistedStrategyV41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryEnhancedDualStrategyV45 import ( - AdaptiveMemoryEnhancedDualStrategyV45, - ) + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedDualStrategyV45 import AdaptiveMemoryEnhancedDualStrategyV45 lama_register["AdaptiveMemoryEnhancedDualStrategyV45"] = AdaptiveMemoryEnhancedDualStrategyV45 - LLAMAAdaptiveMemoryEnhancedDualStrategyV45 = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45" - ).set_name("LLAMAAdaptiveMemoryEnhancedDualStrategyV45", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryEnhancedDualStrategyV45 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45").set_name("LLAMAAdaptiveMemoryEnhancedDualStrategyV45", register=True) except Exception as e: print("AdaptiveMemoryEnhancedDualStrategyV45 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemoryEnhancedSearch import AdaptiveMemoryEnhancedSearch lama_register["AdaptiveMemoryEnhancedSearch"] = AdaptiveMemoryEnhancedSearch - LLAMAAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryEnhancedSearch" - ).set_name("LLAMAAdaptiveMemoryEnhancedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedSearch").set_name("LLAMAAdaptiveMemoryEnhancedSearch", register=True) except Exception as e: print("AdaptiveMemoryEnhancedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryEnhancedStrategyV42 import ( - AdaptiveMemoryEnhancedStrategyV42, - ) + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedStrategyV42 import AdaptiveMemoryEnhancedStrategyV42 lama_register["AdaptiveMemoryEnhancedStrategyV42"] = AdaptiveMemoryEnhancedStrategyV42 - LLAMAAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryEnhancedStrategyV42" - ).set_name("LLAMAAdaptiveMemoryEnhancedStrategyV42", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedStrategyV42").set_name("LLAMAAdaptiveMemoryEnhancedStrategyV42", register=True) except Exception as e: print("AdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryEvolutionaryOptimizer import ( - AdaptiveMemoryEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveMemoryEvolutionaryOptimizer import AdaptiveMemoryEvolutionaryOptimizer lama_register["AdaptiveMemoryEvolutionaryOptimizer"] = AdaptiveMemoryEvolutionaryOptimizer - LLAMAAdaptiveMemoryEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryEvolutionaryOptimizer" - ).set_name("LLAMAAdaptiveMemoryEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEvolutionaryOptimizer").set_name("LLAMAAdaptiveMemoryEvolutionaryOptimizer", register=True) except Exception as e: print("AdaptiveMemoryEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealing import AdaptiveMemoryGradientAnnealing lama_register["AdaptiveMemoryGradientAnnealing"] = AdaptiveMemoryGradientAnnealing - LLAMAAdaptiveMemoryGradientAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryGradientAnnealing" - ).set_name("LLAMAAdaptiveMemoryGradientAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryGradientAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealing").set_name("LLAMAAdaptiveMemoryGradientAnnealing", register=True) except Exception as e: print("AdaptiveMemoryGradientAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingPlus import ( - AdaptiveMemoryGradientAnnealingPlus, - ) + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingPlus import AdaptiveMemoryGradientAnnealingPlus lama_register["AdaptiveMemoryGradientAnnealingPlus"] = AdaptiveMemoryGradientAnnealingPlus - LLAMAAdaptiveMemoryGradientAnnealingPlus = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryGradientAnnealingPlus" - ).set_name("LLAMAAdaptiveMemoryGradientAnnealingPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryGradientAnnealingPlus = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingPlus").set_name("LLAMAAdaptiveMemoryGradientAnnealingPlus", register=True) except Exception as e: print("AdaptiveMemoryGradientAnnealingPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingWithExplorationBoost import ( - AdaptiveMemoryGradientAnnealingWithExplorationBoost, - ) + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingWithExplorationBoost import AdaptiveMemoryGradientAnnealingWithExplorationBoost - lama_register["AdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( - AdaptiveMemoryGradientAnnealingWithExplorationBoost - ) - LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost" - ).set_name("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) + lama_register["AdaptiveMemoryGradientAnnealingWithExplorationBoost"] = AdaptiveMemoryGradientAnnealingWithExplorationBoost + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost").set_name("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) except Exception as e: print("AdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientSimulatedAnnealing import ( - AdaptiveMemoryGradientSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveMemoryGradientSimulatedAnnealing import AdaptiveMemoryGradientSimulatedAnnealing lama_register["AdaptiveMemoryGradientSimulatedAnnealing"] = AdaptiveMemoryGradientSimulatedAnnealing - LLAMAAdaptiveMemoryGradientSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing" - ).set_name("LLAMAAdaptiveMemoryGradientSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryGradientSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing").set_name("LLAMAAdaptiveMemoryGradientSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveMemoryGradientSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryGuidedEvolutionStrategyV57 import ( - AdaptiveMemoryGuidedEvolutionStrategyV57, - ) + from nevergrad.optimization.lama.AdaptiveMemoryGuidedEvolutionStrategyV57 import AdaptiveMemoryGuidedEvolutionStrategyV57 lama_register["AdaptiveMemoryGuidedEvolutionStrategyV57"] = AdaptiveMemoryGuidedEvolutionStrategyV57 - LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57 = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57" - ).set_name("LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57").set_name("LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57", register=True) except Exception as e: print("AdaptiveMemoryGuidedEvolutionStrategyV57 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemoryHybridAnnealing import AdaptiveMemoryHybridAnnealing lama_register["AdaptiveMemoryHybridAnnealing"] = AdaptiveMemoryHybridAnnealing - LLAMAAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryHybridAnnealing" - ).set_name("LLAMAAdaptiveMemoryHybridAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryHybridAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridAnnealing").set_name("LLAMAAdaptiveMemoryHybridAnnealing", register=True) except Exception as e: print("AdaptiveMemoryHybridAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO import AdaptiveMemoryHybridDEPSO lama_register["AdaptiveMemoryHybridDEPSO"] = AdaptiveMemoryHybridDEPSO - LLAMAAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO").set_name( - "LLAMAAdaptiveMemoryHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO").set_name("LLAMAAdaptiveMemoryHybridDEPSO", register=True) except Exception as e: print("AdaptiveMemoryHybridDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO_V2 import AdaptiveMemoryHybridDEPSO_V2 lama_register["AdaptiveMemoryHybridDEPSO_V2"] = AdaptiveMemoryHybridDEPSO_V2 - LLAMAAdaptiveMemoryHybridDEPSO_V2 = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryHybridDEPSO_V2" - ).set_name("LLAMAAdaptiveMemoryHybridDEPSO_V2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryHybridDEPSO_V2 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO_V2").set_name("LLAMAAdaptiveMemoryHybridDEPSO_V2", register=True) except Exception as e: print("AdaptiveMemoryHybridDEPSO_V2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemoryParticleDifferentialSearch import ( - AdaptiveMemoryParticleDifferentialSearch, - ) + from nevergrad.optimization.lama.AdaptiveMemoryParticleDifferentialSearch import AdaptiveMemoryParticleDifferentialSearch lama_register["AdaptiveMemoryParticleDifferentialSearch"] = AdaptiveMemoryParticleDifferentialSearch - LLAMAAdaptiveMemoryParticleDifferentialSearch = NonObjectOptimizer( - method="LLAMAAdaptiveMemoryParticleDifferentialSearch" - ).set_name("LLAMAAdaptiveMemoryParticleDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemoryParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemoryParticleDifferentialSearch").set_name("LLAMAAdaptiveMemoryParticleDifferentialSearch", register=True) except Exception as e: print("AdaptiveMemoryParticleDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMemorySelfTuningStrategyV60 import ( - AdaptiveMemorySelfTuningStrategyV60, - ) + from nevergrad.optimization.lama.AdaptiveMemorySelfTuningStrategyV60 import AdaptiveMemorySelfTuningStrategyV60 lama_register["AdaptiveMemorySelfTuningStrategyV60"] = AdaptiveMemorySelfTuningStrategyV60 - LLAMAAdaptiveMemorySelfTuningStrategyV60 = NonObjectOptimizer( - method="LLAMAAdaptiveMemorySelfTuningStrategyV60" - ).set_name("LLAMAAdaptiveMemorySelfTuningStrategyV60", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySelfTuningStrategyV60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemorySelfTuningStrategyV60 = NonObjectOptimizer(method="LLAMAAdaptiveMemorySelfTuningStrategyV60").set_name("LLAMAAdaptiveMemorySelfTuningStrategyV60", register=True) except Exception as e: print("AdaptiveMemorySelfTuningStrategyV60 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMemorySimulatedAnnealing import AdaptiveMemorySimulatedAnnealing lama_register["AdaptiveMemorySimulatedAnnealing"] = AdaptiveMemorySimulatedAnnealing - LLAMAAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveMemorySimulatedAnnealing" - ).set_name("LLAMAAdaptiveMemorySimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemorySimulatedAnnealing").set_name("LLAMAAdaptiveMemorySimulatedAnnealing", register=True) except Exception as e: print("AdaptiveMemorySimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSO import AdaptiveMetaNetAQAPSO lama_register["AdaptiveMetaNetAQAPSO"] = AdaptiveMetaNetAQAPSO - LLAMAAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO").set_name( - "LLAMAAdaptiveMetaNetAQAPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO").set_name("LLAMAAdaptiveMetaNetAQAPSO", register=True) except Exception as e: print("AdaptiveMetaNetAQAPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSOv13 import AdaptiveMetaNetAQAPSOv13 lama_register["AdaptiveMetaNetAQAPSOv13"] = AdaptiveMetaNetAQAPSOv13 - LLAMAAdaptiveMetaNetAQAPSOv13 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13").set_name( - "LLAMAAdaptiveMetaNetAQAPSOv13", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMetaNetAQAPSOv13 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13").set_name("LLAMAAdaptiveMetaNetAQAPSOv13", register=True) except Exception as e: print("AdaptiveMetaNetAQAPSOv13 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMetaNetPSO_v3 import AdaptiveMetaNetPSO_v3 lama_register["AdaptiveMetaNetPSO_v3"] = AdaptiveMetaNetPSO_v3 - LLAMAAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3").set_name( - "LLAMAAdaptiveMetaNetPSO_v3", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3").set_name("LLAMAAdaptiveMetaNetPSO_v3", register=True) except Exception as e: print("AdaptiveMetaNetPSO_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMetaNetPSOv3 import AdaptiveMetaNetPSOv3 lama_register["AdaptiveMetaNetPSOv3"] = AdaptiveMetaNetPSOv3 - LLAMAAdaptiveMetaNetPSOv3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3").set_name( - "LLAMAAdaptiveMetaNetPSOv3", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMetaNetPSOv3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3").set_name("LLAMAAdaptiveMetaNetPSOv3", register=True) except Exception as e: print("AdaptiveMetaNetPSOv3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMetaheuristicOptimization import ( - AdaptiveMetaheuristicOptimization, - ) + from nevergrad.optimization.lama.AdaptiveMetaheuristicOptimization import AdaptiveMetaheuristicOptimization lama_register["AdaptiveMetaheuristicOptimization"] = AdaptiveMetaheuristicOptimization - LLAMAAdaptiveMetaheuristicOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMetaheuristicOptimization" - ).set_name("LLAMAAdaptiveMetaheuristicOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMetaheuristicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMetaheuristicOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMetaheuristicOptimization").set_name("LLAMAAdaptiveMetaheuristicOptimization", register=True) except Exception as e: print("AdaptiveMetaheuristicOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMomentumOptimization import AdaptiveMomentumOptimization lama_register["AdaptiveMomentumOptimization"] = AdaptiveMomentumOptimization - LLAMAAdaptiveMomentumOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMomentumOptimization" - ).set_name("LLAMAAdaptiveMomentumOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMomentumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMomentumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMomentumOptimization").set_name("LLAMAAdaptiveMomentumOptimization", register=True) except Exception as e: print("AdaptiveMomentumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiExplorationAlgorithm import ( - AdaptiveMultiExplorationAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveMultiExplorationAlgorithm import AdaptiveMultiExplorationAlgorithm lama_register["AdaptiveMultiExplorationAlgorithm"] = AdaptiveMultiExplorationAlgorithm - LLAMAAdaptiveMultiExplorationAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveMultiExplorationAlgorithm" - ).set_name("LLAMAAdaptiveMultiExplorationAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiExplorationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiExplorationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMultiExplorationAlgorithm").set_name("LLAMAAdaptiveMultiExplorationAlgorithm", register=True) except Exception as e: print("AdaptiveMultiExplorationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiMemorySimulatedAnnealing import ( - AdaptiveMultiMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveMultiMemorySimulatedAnnealing import AdaptiveMultiMemorySimulatedAnnealing lama_register["AdaptiveMultiMemorySimulatedAnnealing"] = AdaptiveMultiMemorySimulatedAnnealing - LLAMAAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveMultiMemorySimulatedAnnealing" - ).set_name("LLAMAAdaptiveMultiMemorySimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiMemorySimulatedAnnealing").set_name("LLAMAAdaptiveMultiMemorySimulatedAnnealing", register=True) except Exception as e: print("AdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiOperatorDifferentialEvolution import ( - AdaptiveMultiOperatorDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveMultiOperatorDifferentialEvolution import AdaptiveMultiOperatorDifferentialEvolution lama_register["AdaptiveMultiOperatorDifferentialEvolution"] = AdaptiveMultiOperatorDifferentialEvolution - LLAMAAdaptiveMultiOperatorDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveMultiOperatorDifferentialEvolution" - ).set_name("LLAMAAdaptiveMultiOperatorDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiOperatorDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorDifferentialEvolution").set_name("LLAMAAdaptiveMultiOperatorDifferentialEvolution", register=True) except Exception as e: print("AdaptiveMultiOperatorDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiOperatorSearch import AdaptiveMultiOperatorSearch lama_register["AdaptiveMultiOperatorSearch"] = AdaptiveMultiOperatorSearch - LLAMAAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch").set_name( - "LLAMAAdaptiveMultiOperatorSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch").set_name("LLAMAAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("AdaptiveMultiOperatorSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV2 import AdaptiveMultiOperatorSearchV2 lama_register["AdaptiveMultiOperatorSearchV2"] = AdaptiveMultiOperatorSearchV2 - LLAMAAdaptiveMultiOperatorSearchV2 = NonObjectOptimizer( - method="LLAMAAdaptiveMultiOperatorSearchV2" - ).set_name("LLAMAAdaptiveMultiOperatorSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiOperatorSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV2").set_name("LLAMAAdaptiveMultiOperatorSearchV2", register=True) except Exception as e: print("AdaptiveMultiOperatorSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV3 import AdaptiveMultiOperatorSearchV3 lama_register["AdaptiveMultiOperatorSearchV3"] = AdaptiveMultiOperatorSearchV3 - LLAMAAdaptiveMultiOperatorSearchV3 = NonObjectOptimizer( - method="LLAMAAdaptiveMultiOperatorSearchV3" - ).set_name("LLAMAAdaptiveMultiOperatorSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiOperatorSearchV3 = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV3").set_name("LLAMAAdaptiveMultiOperatorSearchV3", register=True) except Exception as e: print("AdaptiveMultiOperatorSearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealing import AdaptiveMultiPhaseAnnealing lama_register["AdaptiveMultiPhaseAnnealing"] = AdaptiveMultiPhaseAnnealing - LLAMAAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing").set_name( - "LLAMAAdaptiveMultiPhaseAnnealing", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing").set_name("LLAMAAdaptiveMultiPhaseAnnealing", register=True) except Exception as e: print("AdaptiveMultiPhaseAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealingV2 import AdaptiveMultiPhaseAnnealingV2 lama_register["AdaptiveMultiPhaseAnnealingV2"] = AdaptiveMultiPhaseAnnealingV2 - LLAMAAdaptiveMultiPhaseAnnealingV2 = NonObjectOptimizer( - method="LLAMAAdaptiveMultiPhaseAnnealingV2" - ).set_name("LLAMAAdaptiveMultiPhaseAnnealingV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiPhaseAnnealingV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealingV2").set_name("LLAMAAdaptiveMultiPhaseAnnealingV2", register=True) except Exception as e: print("AdaptiveMultiPhaseAnnealingV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiPhaseOptimization import AdaptiveMultiPhaseOptimization lama_register["AdaptiveMultiPhaseOptimization"] = AdaptiveMultiPhaseOptimization - LLAMAAdaptiveMultiPhaseOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMultiPhaseOptimization" - ).set_name("LLAMAAdaptiveMultiPhaseOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiPhaseOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseOptimization").set_name("LLAMAAdaptiveMultiPhaseOptimization", register=True) except Exception as e: print("AdaptiveMultiPhaseOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiPopulationDifferentialEvolution import ( - AdaptiveMultiPopulationDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveMultiPopulationDifferentialEvolution import AdaptiveMultiPopulationDifferentialEvolution - lama_register["AdaptiveMultiPopulationDifferentialEvolution"] = ( - AdaptiveMultiPopulationDifferentialEvolution - ) - LLAMAAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveMultiPopulationDifferentialEvolution" - ).set_name("LLAMAAdaptiveMultiPopulationDifferentialEvolution", register=True) + lama_register["AdaptiveMultiPopulationDifferentialEvolution"] = AdaptiveMultiPopulationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiPopulationDifferentialEvolution").set_name("LLAMAAdaptiveMultiPopulationDifferentialEvolution", register=True) except Exception as e: print("AdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiStageOptimization import AdaptiveMultiStageOptimization lama_register["AdaptiveMultiStageOptimization"] = AdaptiveMultiStageOptimization - LLAMAAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStageOptimization" - ).set_name("LLAMAAdaptiveMultiStageOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMultiStageOptimization").set_name("LLAMAAdaptiveMultiStageOptimization", register=True) except Exception as e: print("AdaptiveMultiStageOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiStrategicOptimizer import AdaptiveMultiStrategicOptimizer lama_register["AdaptiveMultiStrategicOptimizer"] = AdaptiveMultiStrategicOptimizer - LLAMAAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategicOptimizer" - ).set_name("LLAMAAdaptiveMultiStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategicOptimizer").set_name("LLAMAAdaptiveMultiStrategicOptimizer", register=True) except Exception as e: print("AdaptiveMultiStrategicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiStrategyDE import AdaptiveMultiStrategyDE lama_register["AdaptiveMultiStrategyDE"] = AdaptiveMultiStrategyDE - LLAMAAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE").set_name( - "LLAMAAdaptiveMultiStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE").set_name("LLAMAAdaptiveMultiStrategyDE", register=True) except Exception as e: print("AdaptiveMultiStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDEWithMemory import ( - AdaptiveMultiStrategyDEWithMemory, - ) + from nevergrad.optimization.lama.AdaptiveMultiStrategyDEWithMemory import AdaptiveMultiStrategyDEWithMemory lama_register["AdaptiveMultiStrategyDEWithMemory"] = AdaptiveMultiStrategyDEWithMemory - LLAMAAdaptiveMultiStrategyDEWithMemory = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategyDEWithMemory" - ).set_name("LLAMAAdaptiveMultiStrategyDEWithMemory", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDEWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyDEWithMemory = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDEWithMemory").set_name("LLAMAAdaptiveMultiStrategyDEWithMemory", register=True) except Exception as e: print("AdaptiveMultiStrategyDEWithMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolution import ( - AdaptiveMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolution import AdaptiveMultiStrategyDifferentialEvolution lama_register["AdaptiveMultiStrategyDifferentialEvolution"] = AdaptiveMultiStrategyDifferentialEvolution - LLAMAAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategyDifferentialEvolution" - ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("AdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolutionPlus import ( - AdaptiveMultiStrategyDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolutionPlus import AdaptiveMultiStrategyDifferentialEvolutionPlus - lama_register["AdaptiveMultiStrategyDifferentialEvolutionPlus"] = ( - AdaptiveMultiStrategyDifferentialEvolutionPlus - ) - LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus" - ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus", register=True) + lama_register["AdaptiveMultiStrategyDifferentialEvolutionPlus"] = AdaptiveMultiStrategyDifferentialEvolutionPlus + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus").set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus", register=True) except Exception as e: print("AdaptiveMultiStrategyDifferentialEvolutionPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizer import AdaptiveMultiStrategyOptimizer lama_register["AdaptiveMultiStrategyOptimizer"] = AdaptiveMultiStrategyOptimizer - LLAMAAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategyOptimizer" - ).set_name("LLAMAAdaptiveMultiStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizer").set_name("LLAMAAdaptiveMultiStrategyOptimizer", register=True) except Exception as e: print("AdaptiveMultiStrategyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizerV2 import AdaptiveMultiStrategyOptimizerV2 lama_register["AdaptiveMultiStrategyOptimizerV2"] = AdaptiveMultiStrategyOptimizerV2 - LLAMAAdaptiveMultiStrategyOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveMultiStrategyOptimizerV2" - ).set_name("LLAMAAdaptiveMultiStrategyOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveMultiStrategyOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizerV2").set_name("LLAMAAdaptiveMultiStrategyOptimizerV2", register=True) except Exception as e: print("AdaptiveMultiStrategyOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveNicheDifferentialParticleSwarmOptimizer import ( - AdaptiveNicheDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveNicheDifferentialParticleSwarmOptimizer import AdaptiveNicheDifferentialParticleSwarmOptimizer - lama_register["AdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( - AdaptiveNicheDifferentialParticleSwarmOptimizer - ) - LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) + lama_register["AdaptiveNicheDifferentialParticleSwarmOptimizer"] = AdaptiveNicheDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("AdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveNichingDE_PSO import AdaptiveNichingDE_PSO lama_register["AdaptiveNichingDE_PSO"] = AdaptiveNichingDE_PSO - LLAMAAdaptiveNichingDE_PSO = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO").set_name( - "LLAMAAdaptiveNichingDE_PSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveNichingDE_PSO = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO").set_name("LLAMAAdaptiveNichingDE_PSO", register=True) except Exception as e: print("AdaptiveNichingDE_PSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolution import ( - AdaptiveOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolution import AdaptiveOppositionBasedDifferentialEvolution - lama_register["AdaptiveOppositionBasedDifferentialEvolution"] = ( - AdaptiveOppositionBasedDifferentialEvolution - ) - LLAMAAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveOppositionBasedDifferentialEvolution" - ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolution", register=True) + lama_register["AdaptiveOppositionBasedDifferentialEvolution"] = AdaptiveOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("AdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolutionImproved import ( - AdaptiveOppositionBasedDifferentialEvolutionImproved, - ) + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolutionImproved import AdaptiveOppositionBasedDifferentialEvolutionImproved - lama_register["AdaptiveOppositionBasedDifferentialEvolutionImproved"] = ( - AdaptiveOppositionBasedDifferentialEvolutionImproved - ) - LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved = NonObjectOptimizer( - method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved" - ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved", register=True) + lama_register["AdaptiveOppositionBasedDifferentialEvolutionImproved"] = AdaptiveOppositionBasedDifferentialEvolutionImproved + res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved").set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved", register=True) except Exception as e: print("AdaptiveOppositionBasedDifferentialEvolutionImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE import ( - AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE, - ) + from nevergrad.optimization.lama.AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE import AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE - lama_register["AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE"] = ( - AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE - ) - LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE = NonObjectOptimizer( - method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE" - ).set_name("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE", register=True) + lama_register["AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE"] = AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE + res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE").set_name("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE", register=True) except Exception as e: print("AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveOrthogonalDifferentialEvolution import ( - AdaptiveOrthogonalDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveOrthogonalDifferentialEvolution import AdaptiveOrthogonalDifferentialEvolution lama_register["AdaptiveOrthogonalDifferentialEvolution"] = AdaptiveOrthogonalDifferentialEvolution - LLAMAAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveOrthogonalDifferentialEvolution" - ).set_name("LLAMAAdaptiveOrthogonalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAAdaptiveOrthogonalDifferentialEvolution", register=True) except Exception as e: print("AdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveOscillatoryCrossoverDifferentialEvolution import ( - AdaptiveOscillatoryCrossoverDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveOscillatoryCrossoverDifferentialEvolution import AdaptiveOscillatoryCrossoverDifferentialEvolution - lama_register["AdaptiveOscillatoryCrossoverDifferentialEvolution"] = ( - AdaptiveOscillatoryCrossoverDifferentialEvolution - ) - LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution" - ).set_name("LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution", register=True) + lama_register["AdaptiveOscillatoryCrossoverDifferentialEvolution"] = AdaptiveOscillatoryCrossoverDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution").set_name("LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution", register=True) except Exception as e: print("AdaptiveOscillatoryCrossoverDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveParticleDifferentialSearch import ( - AdaptiveParticleDifferentialSearch, - ) + from nevergrad.optimization.lama.AdaptiveParticleDifferentialSearch import AdaptiveParticleDifferentialSearch lama_register["AdaptiveParticleDifferentialSearch"] = AdaptiveParticleDifferentialSearch - LLAMAAdaptiveParticleDifferentialSearch = NonObjectOptimizer( - method="LLAMAAdaptiveParticleDifferentialSearch" - ).set_name("LLAMAAdaptiveParticleDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveParticleDifferentialSearch").set_name("LLAMAAdaptiveParticleDifferentialSearch", register=True) except Exception as e: print("AdaptiveParticleDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveParticleSwarmOptimization import ( - AdaptiveParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveParticleSwarmOptimization import AdaptiveParticleSwarmOptimization lama_register["AdaptiveParticleSwarmOptimization"] = AdaptiveParticleSwarmOptimization - LLAMAAdaptiveParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveParticleSwarmOptimization" - ).set_name("LLAMAAdaptiveParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveParticleSwarmOptimization").set_name("LLAMAAdaptiveParticleSwarmOptimization", register=True) except Exception as e: print("AdaptiveParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePerturbationDifferentialEvolution import ( - AdaptivePerturbationDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptivePerturbationDifferentialEvolution import AdaptivePerturbationDifferentialEvolution lama_register["AdaptivePerturbationDifferentialEvolution"] = AdaptivePerturbationDifferentialEvolution - LLAMAAdaptivePerturbationDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptivePerturbationDifferentialEvolution" - ).set_name("LLAMAAdaptivePerturbationDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePerturbationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePerturbationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePerturbationDifferentialEvolution").set_name("LLAMAAdaptivePerturbationDifferentialEvolution", register=True) except Exception as e: print("AdaptivePerturbationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePopulationDifferentialEvolutionOptimizer import ( - AdaptivePopulationDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.AdaptivePopulationDifferentialEvolutionOptimizer import AdaptivePopulationDifferentialEvolutionOptimizer - lama_register["AdaptivePopulationDifferentialEvolutionOptimizer"] = ( - AdaptivePopulationDifferentialEvolutionOptimizer - ) - LLAMAAdaptivePopulationDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer" - ).set_name("LLAMAAdaptivePopulationDifferentialEvolutionOptimizer", register=True) + lama_register["AdaptivePopulationDifferentialEvolutionOptimizer"] = AdaptivePopulationDifferentialEvolutionOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePopulationDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer").set_name("LLAMAAdaptivePopulationDifferentialEvolutionOptimizer", register=True) except Exception as e: print("AdaptivePopulationDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( - AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, - ) + from nevergrad.optimization.lama.AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - lama_register["AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( - AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - ) - LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( - method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" - ).set_name("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) + lama_register["AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + res = NonObjectOptimizer(method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) except Exception as e: print("AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePopulationMemeticOptimizer import ( - AdaptivePopulationMemeticOptimizer, - ) + from nevergrad.optimization.lama.AdaptivePopulationMemeticOptimizer import AdaptivePopulationMemeticOptimizer lama_register["AdaptivePopulationMemeticOptimizer"] = AdaptivePopulationMemeticOptimizer - LLAMAAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( - method="LLAMAAdaptivePopulationMemeticOptimizer" - ).set_name("LLAMAAdaptivePopulationMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePopulationMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationMemeticOptimizer").set_name("LLAMAAdaptivePopulationMemeticOptimizer", register=True) except Exception as e: print("AdaptivePopulationMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePopulationResizingOptimizer import ( - AdaptivePopulationResizingOptimizer, - ) + from nevergrad.optimization.lama.AdaptivePopulationResizingOptimizer import AdaptivePopulationResizingOptimizer lama_register["AdaptivePopulationResizingOptimizer"] = AdaptivePopulationResizingOptimizer - LLAMAAdaptivePopulationResizingOptimizer = NonObjectOptimizer( - method="LLAMAAdaptivePopulationResizingOptimizer" - ).set_name("LLAMAAdaptivePopulationResizingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePopulationResizingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePopulationResizingOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationResizingOptimizer").set_name("LLAMAAdaptivePopulationResizingOptimizer", register=True) except Exception as e: print("AdaptivePopulationResizingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionCohortOptimizationV3 import ( - AdaptivePrecisionCohortOptimizationV3, - ) + from nevergrad.optimization.lama.AdaptivePrecisionCohortOptimizationV3 import AdaptivePrecisionCohortOptimizationV3 lama_register["AdaptivePrecisionCohortOptimizationV3"] = AdaptivePrecisionCohortOptimizationV3 - LLAMAAdaptivePrecisionCohortOptimizationV3 = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionCohortOptimizationV3" - ).set_name("LLAMAAdaptivePrecisionCohortOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCohortOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionCohortOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCohortOptimizationV3").set_name("LLAMAAdaptivePrecisionCohortOptimizationV3", register=True) except Exception as e: print("AdaptivePrecisionCohortOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionControlDifferentialEvolution import ( - AdaptivePrecisionControlDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptivePrecisionControlDifferentialEvolution import AdaptivePrecisionControlDifferentialEvolution - lama_register["AdaptivePrecisionControlDifferentialEvolution"] = ( - AdaptivePrecisionControlDifferentialEvolution - ) - LLAMAAdaptivePrecisionControlDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionControlDifferentialEvolution" - ).set_name("LLAMAAdaptivePrecisionControlDifferentialEvolution", register=True) + lama_register["AdaptivePrecisionControlDifferentialEvolution"] = AdaptivePrecisionControlDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionControlDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionControlDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionControlDifferentialEvolution").set_name("LLAMAAdaptivePrecisionControlDifferentialEvolution", register=True) except Exception as e: print("AdaptivePrecisionControlDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionCrossoverEvolution import ( - AdaptivePrecisionCrossoverEvolution, - ) + from nevergrad.optimization.lama.AdaptivePrecisionCrossoverEvolution import AdaptivePrecisionCrossoverEvolution lama_register["AdaptivePrecisionCrossoverEvolution"] = AdaptivePrecisionCrossoverEvolution - LLAMAAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionCrossoverEvolution" - ).set_name("LLAMAAdaptivePrecisionCrossoverEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCrossoverEvolution").set_name("LLAMAAdaptivePrecisionCrossoverEvolution", register=True) except Exception as e: print("AdaptivePrecisionCrossoverEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionDifferentialEvolution import ( - AdaptivePrecisionDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptivePrecisionDifferentialEvolution import AdaptivePrecisionDifferentialEvolution lama_register["AdaptivePrecisionDifferentialEvolution"] = AdaptivePrecisionDifferentialEvolution - LLAMAAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionDifferentialEvolution" - ).set_name("LLAMAAdaptivePrecisionDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDifferentialEvolution").set_name("LLAMAAdaptivePrecisionDifferentialEvolution", register=True) except Exception as e: print("AdaptivePrecisionDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptivePrecisionDivideSearch import AdaptivePrecisionDivideSearch lama_register["AdaptivePrecisionDivideSearch"] = AdaptivePrecisionDivideSearch - LLAMAAdaptivePrecisionDivideSearch = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionDivideSearch" - ).set_name("LLAMAAdaptivePrecisionDivideSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionDivideSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDivideSearch").set_name("LLAMAAdaptivePrecisionDivideSearch", register=True) except Exception as e: print("AdaptivePrecisionDivideSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionDynamicMemoryStrategyV48 import ( - AdaptivePrecisionDynamicMemoryStrategyV48, - ) + from nevergrad.optimization.lama.AdaptivePrecisionDynamicMemoryStrategyV48 import AdaptivePrecisionDynamicMemoryStrategyV48 lama_register["AdaptivePrecisionDynamicMemoryStrategyV48"] = AdaptivePrecisionDynamicMemoryStrategyV48 - LLAMAAdaptivePrecisionDynamicMemoryStrategyV48 = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48" - ).set_name("LLAMAAdaptivePrecisionDynamicMemoryStrategyV48", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionDynamicMemoryStrategyV48 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48").set_name("LLAMAAdaptivePrecisionDynamicMemoryStrategyV48", register=True) except Exception as e: print("AdaptivePrecisionDynamicMemoryStrategyV48 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionEvolutionStrategy import ( - AdaptivePrecisionEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdaptivePrecisionEvolutionStrategy import AdaptivePrecisionEvolutionStrategy lama_register["AdaptivePrecisionEvolutionStrategy"] = AdaptivePrecisionEvolutionStrategy - LLAMAAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionEvolutionStrategy" - ).set_name("LLAMAAdaptivePrecisionEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptivePrecisionEvolutionStrategy").set_name("LLAMAAdaptivePrecisionEvolutionStrategy", register=True) except Exception as e: print("AdaptivePrecisionEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptivePrecisionFocalStrategy import AdaptivePrecisionFocalStrategy lama_register["AdaptivePrecisionFocalStrategy"] = AdaptivePrecisionFocalStrategy - LLAMAAdaptivePrecisionFocalStrategy = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionFocalStrategy" - ).set_name("LLAMAAdaptivePrecisionFocalStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionFocalStrategy = NonObjectOptimizer(method="LLAMAAdaptivePrecisionFocalStrategy").set_name("LLAMAAdaptivePrecisionFocalStrategy", register=True) except Exception as e: print("AdaptivePrecisionFocalStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptivePrecisionHybridSearch import AdaptivePrecisionHybridSearch lama_register["AdaptivePrecisionHybridSearch"] = AdaptivePrecisionHybridSearch - LLAMAAdaptivePrecisionHybridSearch = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionHybridSearch" - ).set_name("LLAMAAdaptivePrecisionHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionHybridSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionHybridSearch").set_name("LLAMAAdaptivePrecisionHybridSearch", register=True) except Exception as e: print("AdaptivePrecisionHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionMemoryStrategyV47 import ( - AdaptivePrecisionMemoryStrategyV47, - ) + from nevergrad.optimization.lama.AdaptivePrecisionMemoryStrategyV47 import AdaptivePrecisionMemoryStrategyV47 lama_register["AdaptivePrecisionMemoryStrategyV47"] = AdaptivePrecisionMemoryStrategyV47 - LLAMAAdaptivePrecisionMemoryStrategyV47 = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionMemoryStrategyV47" - ).set_name("LLAMAAdaptivePrecisionMemoryStrategyV47", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionMemoryStrategyV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionMemoryStrategyV47 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionMemoryStrategyV47").set_name("LLAMAAdaptivePrecisionMemoryStrategyV47", register=True) except Exception as e: print("AdaptivePrecisionMemoryStrategyV47 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionRotationalClimbOptimizer import ( - AdaptivePrecisionRotationalClimbOptimizer, - ) + from nevergrad.optimization.lama.AdaptivePrecisionRotationalClimbOptimizer import AdaptivePrecisionRotationalClimbOptimizer lama_register["AdaptivePrecisionRotationalClimbOptimizer"] = AdaptivePrecisionRotationalClimbOptimizer - LLAMAAdaptivePrecisionRotationalClimbOptimizer = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionRotationalClimbOptimizer" - ).set_name("LLAMAAdaptivePrecisionRotationalClimbOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePrecisionRotationalClimbOptimizer").set_name("LLAMAAdaptivePrecisionRotationalClimbOptimizer", register=True) except Exception as e: print("AdaptivePrecisionRotationalClimbOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptivePrecisionSearch import AdaptivePrecisionSearch lama_register["AdaptivePrecisionSearch"] = AdaptivePrecisionSearch - LLAMAAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch").set_name( - "LLAMAAdaptivePrecisionSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch").set_name("LLAMAAdaptivePrecisionSearch", register=True) except Exception as e: print("AdaptivePrecisionSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptivePrecisionStrategicOptimizer import ( - AdaptivePrecisionStrategicOptimizer, - ) + from nevergrad.optimization.lama.AdaptivePrecisionStrategicOptimizer import AdaptivePrecisionStrategicOptimizer lama_register["AdaptivePrecisionStrategicOptimizer"] = AdaptivePrecisionStrategicOptimizer - LLAMAAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( - method="LLAMAAdaptivePrecisionStrategicOptimizer" - ).set_name("LLAMAAdaptivePrecisionStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePrecisionStrategicOptimizer").set_name("LLAMAAdaptivePrecisionStrategicOptimizer", register=True) except Exception as e: print("AdaptivePrecisionStrategicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQGSA import AdaptiveQGSA lama_register["AdaptiveQGSA"] = AdaptiveQGSA - LLAMAAdaptiveQGSA = NonObjectOptimizer(method="LLAMAAdaptiveQGSA").set_name( - "LLAMAAdaptiveQGSA", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQGSA = NonObjectOptimizer(method="LLAMAAdaptiveQGSA").set_name("LLAMAAdaptiveQGSA", register=True) except Exception as e: print("AdaptiveQGSA can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQGSA_EC import AdaptiveQGSA_EC lama_register["AdaptiveQGSA_EC"] = AdaptiveQGSA_EC - LLAMAAdaptiveQGSA_EC = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC").set_name( - "LLAMAAdaptiveQGSA_EC", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQGSA_EC = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC").set_name("LLAMAAdaptiveQGSA_EC", register=True) except Exception as e: print("AdaptiveQGSA_EC can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDE import AdaptiveQuantumAnnealingDE lama_register["AdaptiveQuantumAnnealingDE"] = AdaptiveQuantumAnnealingDE - LLAMAAdaptiveQuantumAnnealingDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE").set_name( - "LLAMAAdaptiveQuantumAnnealingDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumAnnealingDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE").set_name("LLAMAAdaptiveQuantumAnnealingDE", register=True) except Exception as e: print("AdaptiveQuantumAnnealingDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDEv2 import AdaptiveQuantumAnnealingDEv2 lama_register["AdaptiveQuantumAnnealingDEv2"] = AdaptiveQuantumAnnealingDEv2 - LLAMAAdaptiveQuantumAnnealingDEv2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumAnnealingDEv2" - ).set_name("LLAMAAdaptiveQuantumAnnealingDEv2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDEv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumAnnealingDEv2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDEv2").set_name("LLAMAAdaptiveQuantumAnnealingDEv2", register=True) except Exception as e: print("AdaptiveQuantumAnnealingDEv2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumCognitionOptimizerV3 import ( - AdaptiveQuantumCognitionOptimizerV3, - ) + from nevergrad.optimization.lama.AdaptiveQuantumCognitionOptimizerV3 import AdaptiveQuantumCognitionOptimizerV3 lama_register["AdaptiveQuantumCognitionOptimizerV3"] = AdaptiveQuantumCognitionOptimizerV3 - LLAMAAdaptiveQuantumCognitionOptimizerV3 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumCognitionOptimizerV3" - ).set_name("LLAMAAdaptiveQuantumCognitionOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCognitionOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumCognitionOptimizerV3 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCognitionOptimizerV3").set_name("LLAMAAdaptiveQuantumCognitionOptimizerV3", register=True) except Exception as e: print("AdaptiveQuantumCognitionOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumCrossoverOptimizer import ( - AdaptiveQuantumCrossoverOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumCrossoverOptimizer import AdaptiveQuantumCrossoverOptimizer lama_register["AdaptiveQuantumCrossoverOptimizer"] = AdaptiveQuantumCrossoverOptimizer - LLAMAAdaptiveQuantumCrossoverOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumCrossoverOptimizer" - ).set_name("LLAMAAdaptiveQuantumCrossoverOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCrossoverOptimizer").set_name("LLAMAAdaptiveQuantumCrossoverOptimizer", register=True) except Exception as e: print("AdaptiveQuantumCrossoverOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolution import ( - AdaptiveQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolution import AdaptiveQuantumDifferentialEvolution lama_register["AdaptiveQuantumDifferentialEvolution"] = AdaptiveQuantumDifferentialEvolution - LLAMAAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolution" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolution").set_name("LLAMAAdaptiveQuantumDifferentialEvolution", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionPlus import ( - AdaptiveQuantumDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionPlus import AdaptiveQuantumDifferentialEvolutionPlus lama_register["AdaptiveQuantumDifferentialEvolutionPlus"] = AdaptiveQuantumDifferentialEvolutionPlus - LLAMAAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionPlus", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionV2 import ( - AdaptiveQuantumDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionV2 import AdaptiveQuantumDifferentialEvolutionV2 lama_register["AdaptiveQuantumDifferentialEvolutionV2"] = AdaptiveQuantumDifferentialEvolutionV2 - LLAMAAdaptiveQuantumDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionV2" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionV2").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionV2", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( - AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - lama_register["AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( - AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" - ).set_name( - "LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True - ) + lama_register["AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True) except Exception as e: - print( - "AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e - ) - + print("AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e) try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( - AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - lama_register["AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( - AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) + lama_register["AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch import ( - AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch import AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch - lama_register["AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch"] = ( - AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch", register=True) + lama_register["AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch"] = AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory import ( - AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory import AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory - lama_register["AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory"] = ( - AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory", register=True) + lama_register["AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory"] = AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement import ( - AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement import AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement - lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement"] = ( - AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement" - ).set_name( - "LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement", register=True - ) + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement"] = AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement", register=True) except Exception as e: - print( - "AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement can not be imported: ", e - ) - + print("AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement can not be imported: ", e) try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch import ( - AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch import AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch - lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch"] = ( - AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch - ) - LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch" - ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch", register=True) + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch"] = AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch", register=True) except Exception as e: print("AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDiversityEnhancerV7 import ( - AdaptiveQuantumDiversityEnhancerV7, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDiversityEnhancerV7 import AdaptiveQuantumDiversityEnhancerV7 lama_register["AdaptiveQuantumDiversityEnhancerV7"] = AdaptiveQuantumDiversityEnhancerV7 - LLAMAAdaptiveQuantumDiversityEnhancerV7 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDiversityEnhancerV7" - ).set_name("LLAMAAdaptiveQuantumDiversityEnhancerV7", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDiversityEnhancerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDiversityEnhancerV7 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDiversityEnhancerV7").set_name("LLAMAAdaptiveQuantumDiversityEnhancerV7", register=True) except Exception as e: print("AdaptiveQuantumDiversityEnhancerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumDynamicTuningOptimizer import ( - AdaptiveQuantumDynamicTuningOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumDynamicTuningOptimizer import AdaptiveQuantumDynamicTuningOptimizer lama_register["AdaptiveQuantumDynamicTuningOptimizer"] = AdaptiveQuantumDynamicTuningOptimizer - LLAMAAdaptiveQuantumDynamicTuningOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumDynamicTuningOptimizer" - ).set_name("LLAMAAdaptiveQuantumDynamicTuningOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDynamicTuningOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumDynamicTuningOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDynamicTuningOptimizer").set_name("LLAMAAdaptiveQuantumDynamicTuningOptimizer", register=True) except Exception as e: print("AdaptiveQuantumDynamicTuningOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumEliteDifferentialEvolution import ( - AdaptiveQuantumEliteDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveQuantumEliteDifferentialEvolution import AdaptiveQuantumEliteDifferentialEvolution lama_register["AdaptiveQuantumEliteDifferentialEvolution"] = AdaptiveQuantumEliteDifferentialEvolution - LLAMAAdaptiveQuantumEliteDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumEliteDifferentialEvolution" - ).set_name("LLAMAAdaptiveQuantumEliteDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteDifferentialEvolution").set_name("LLAMAAdaptiveQuantumEliteDifferentialEvolution", register=True) except Exception as e: print("AdaptiveQuantumEliteDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumEliteMemeticOptimizer import ( - AdaptiveQuantumEliteMemeticOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumEliteMemeticOptimizer import AdaptiveQuantumEliteMemeticOptimizer lama_register["AdaptiveQuantumEliteMemeticOptimizer"] = AdaptiveQuantumEliteMemeticOptimizer - LLAMAAdaptiveQuantumEliteMemeticOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumEliteMemeticOptimizer" - ).set_name("LLAMAAdaptiveQuantumEliteMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteMemeticOptimizer").set_name("LLAMAAdaptiveQuantumEliteMemeticOptimizer", register=True) except Exception as e: print("AdaptiveQuantumEliteMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumEntropyDE import AdaptiveQuantumEntropyDE lama_register["AdaptiveQuantumEntropyDE"] = AdaptiveQuantumEntropyDE - LLAMAAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE").set_name( - "LLAMAAdaptiveQuantumEntropyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE").set_name("LLAMAAdaptiveQuantumEntropyDE", register=True) except Exception as e: print("AdaptiveQuantumEntropyDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumEvolutionStrategy import AdaptiveQuantumEvolutionStrategy lama_register["AdaptiveQuantumEvolutionStrategy"] = AdaptiveQuantumEvolutionStrategy - LLAMAAdaptiveQuantumEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumEvolutionStrategy" - ).set_name("LLAMAAdaptiveQuantumEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolutionStrategy").set_name("LLAMAAdaptiveQuantumEvolutionStrategy", register=True) except Exception as e: print("AdaptiveQuantumEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumEvolvedDiversityExplorerV15 import ( - AdaptiveQuantumEvolvedDiversityExplorerV15, - ) + from nevergrad.optimization.lama.AdaptiveQuantumEvolvedDiversityExplorerV15 import AdaptiveQuantumEvolvedDiversityExplorerV15 lama_register["AdaptiveQuantumEvolvedDiversityExplorerV15"] = AdaptiveQuantumEvolvedDiversityExplorerV15 - LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15" - ).set_name("LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15").set_name("LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15", register=True) except Exception as e: print("AdaptiveQuantumEvolvedDiversityExplorerV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch import ( - AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch import AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch - lama_register["AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch"] = ( - AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch - ) - LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch" - ).set_name("LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch", register=True) + lama_register["AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch"] = AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch").set_name("LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch", register=True) except Exception as e: print("AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedMemeticSearch import ( - AdaptiveQuantumGradientBoostedMemeticSearch, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedMemeticSearch import AdaptiveQuantumGradientBoostedMemeticSearch lama_register["AdaptiveQuantumGradientBoostedMemeticSearch"] = AdaptiveQuantumGradientBoostedMemeticSearch - LLAMAAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch" - ).set_name("LLAMAAdaptiveQuantumGradientBoostedMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch").set_name("LLAMAAdaptiveQuantumGradientBoostedMemeticSearch", register=True) except Exception as e: print("AdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientEnhancedOptimizer import ( - AdaptiveQuantumGradientEnhancedOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientEnhancedOptimizer import AdaptiveQuantumGradientEnhancedOptimizer lama_register["AdaptiveQuantumGradientEnhancedOptimizer"] = AdaptiveQuantumGradientEnhancedOptimizer - LLAMAAdaptiveQuantumGradientEnhancedOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer" - ).set_name("LLAMAAdaptiveQuantumGradientEnhancedOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientEnhancedOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer").set_name("LLAMAAdaptiveQuantumGradientEnhancedOptimizer", register=True) except Exception as e: print("AdaptiveQuantumGradientEnhancedOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimization import ( - AdaptiveQuantumGradientExplorationOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimization import AdaptiveQuantumGradientExplorationOptimization - lama_register["AdaptiveQuantumGradientExplorationOptimization"] = ( - AdaptiveQuantumGradientExplorationOptimization - ) - LLAMAAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientExplorationOptimization" - ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimization", register=True) + lama_register["AdaptiveQuantumGradientExplorationOptimization"] = AdaptiveQuantumGradientExplorationOptimization + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimization").set_name("LLAMAAdaptiveQuantumGradientExplorationOptimization", register=True) except Exception as e: print("AdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimizationV2 import ( - AdaptiveQuantumGradientExplorationOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimizationV2 import AdaptiveQuantumGradientExplorationOptimizationV2 - lama_register["AdaptiveQuantumGradientExplorationOptimizationV2"] = ( - AdaptiveQuantumGradientExplorationOptimizationV2 - ) - LLAMAAdaptiveQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2" - ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimizationV2", register=True) + lama_register["AdaptiveQuantumGradientExplorationOptimizationV2"] = AdaptiveQuantumGradientExplorationOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2").set_name("LLAMAAdaptiveQuantumGradientExplorationOptimizationV2", register=True) except Exception as e: print("AdaptiveQuantumGradientExplorationOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientHybridOptimizer import ( - AdaptiveQuantumGradientHybridOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumGradientHybridOptimizer import AdaptiveQuantumGradientHybridOptimizer lama_register["AdaptiveQuantumGradientHybridOptimizer"] = AdaptiveQuantumGradientHybridOptimizer - LLAMAAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientHybridOptimizer" - ).set_name("LLAMAAdaptiveQuantumGradientHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientHybridOptimizer").set_name("LLAMAAdaptiveQuantumGradientHybridOptimizer", register=True) except Exception as e: print("AdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumGradientOptimizer import AdaptiveQuantumGradientOptimizer lama_register["AdaptiveQuantumGradientOptimizer"] = AdaptiveQuantumGradientOptimizer - LLAMAAdaptiveQuantumGradientOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumGradientOptimizer" - ).set_name("LLAMAAdaptiveQuantumGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumGradientOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientOptimizer").set_name("LLAMAAdaptiveQuantumGradientOptimizer", register=True) except Exception as e: print("AdaptiveQuantumGradientOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumHarmonizedPSO import AdaptiveQuantumHarmonizedPSO lama_register["AdaptiveQuantumHarmonizedPSO"] = AdaptiveQuantumHarmonizedPSO - LLAMAAdaptiveQuantumHarmonizedPSO = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumHarmonizedPSO" - ).set_name("LLAMAAdaptiveQuantumHarmonizedPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHarmonizedPSO").set_name("LLAMAAdaptiveQuantumHarmonizedPSO", register=True) except Exception as e: print("AdaptiveQuantumHarmonizedPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumHybridOptimizer import AdaptiveQuantumHybridOptimizer lama_register["AdaptiveQuantumHybridOptimizer"] = AdaptiveQuantumHybridOptimizer - LLAMAAdaptiveQuantumHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumHybridOptimizer" - ).set_name("LLAMAAdaptiveQuantumHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridOptimizer").set_name("LLAMAAdaptiveQuantumHybridOptimizer", register=True) except Exception as e: print("AdaptiveQuantumHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumHybridSearchV2 import AdaptiveQuantumHybridSearchV2 lama_register["AdaptiveQuantumHybridSearchV2"] = AdaptiveQuantumHybridSearchV2 - LLAMAAdaptiveQuantumHybridSearchV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumHybridSearchV2" - ).set_name("LLAMAAdaptiveQuantumHybridSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumHybridSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridSearchV2").set_name("LLAMAAdaptiveQuantumHybridSearchV2", register=True) except Exception as e: print("AdaptiveQuantumHybridSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumInfluencedMemeticAlgorithm import ( - AdaptiveQuantumInfluencedMemeticAlgorithm, - ) + from nevergrad.optimization.lama.AdaptiveQuantumInfluencedMemeticAlgorithm import AdaptiveQuantumInfluencedMemeticAlgorithm lama_register["AdaptiveQuantumInfluencedMemeticAlgorithm"] = AdaptiveQuantumInfluencedMemeticAlgorithm - LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm" - ).set_name("LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm").set_name("LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm", register=True) except Exception as e: print("AdaptiveQuantumInfluencedMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumInformedDifferentialStrategy import ( - AdaptiveQuantumInformedDifferentialStrategy, - ) + from nevergrad.optimization.lama.AdaptiveQuantumInformedDifferentialStrategy import AdaptiveQuantumInformedDifferentialStrategy lama_register["AdaptiveQuantumInformedDifferentialStrategy"] = AdaptiveQuantumInformedDifferentialStrategy - LLAMAAdaptiveQuantumInformedDifferentialStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumInformedDifferentialStrategy" - ).set_name("LLAMAAdaptiveQuantumInformedDifferentialStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumInformedDifferentialStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedDifferentialStrategy").set_name("LLAMAAdaptiveQuantumInformedDifferentialStrategy", register=True) except Exception as e: print("AdaptiveQuantumInformedDifferentialStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumInformedGradientEnhancer import ( - AdaptiveQuantumInformedGradientEnhancer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumInformedGradientEnhancer import AdaptiveQuantumInformedGradientEnhancer lama_register["AdaptiveQuantumInformedGradientEnhancer"] = AdaptiveQuantumInformedGradientEnhancer - LLAMAAdaptiveQuantumInformedGradientEnhancer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumInformedGradientEnhancer" - ).set_name("LLAMAAdaptiveQuantumInformedGradientEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedGradientEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumInformedGradientEnhancer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedGradientEnhancer").set_name("LLAMAAdaptiveQuantumInformedGradientEnhancer", register=True) except Exception as e: print("AdaptiveQuantumInformedGradientEnhancer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumLeapOptimizer import AdaptiveQuantumLeapOptimizer lama_register["AdaptiveQuantumLeapOptimizer"] = AdaptiveQuantumLeapOptimizer - LLAMAAdaptiveQuantumLeapOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLeapOptimizer" - ).set_name("LLAMAAdaptiveQuantumLeapOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLeapOptimizer").set_name("LLAMAAdaptiveQuantumLeapOptimizer", register=True) except Exception as e: print("AdaptiveQuantumLeapOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialEnhancedOptimizer import ( - AdaptiveQuantumLevyDifferentialEnhancedOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialEnhancedOptimizer import AdaptiveQuantumLevyDifferentialEnhancedOptimizer - lama_register["AdaptiveQuantumLevyDifferentialEnhancedOptimizer"] = ( - AdaptiveQuantumLevyDifferentialEnhancedOptimizer - ) - LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer" - ).set_name("LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer", register=True) + lama_register["AdaptiveQuantumLevyDifferentialEnhancedOptimizer"] = AdaptiveQuantumLevyDifferentialEnhancedOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer").set_name("LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer", register=True) except Exception as e: print("AdaptiveQuantumLevyDifferentialEnhancedOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizer import ( - AdaptiveQuantumLevyDifferentialOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizer import AdaptiveQuantumLevyDifferentialOptimizer lama_register["AdaptiveQuantumLevyDifferentialOptimizer"] = AdaptiveQuantumLevyDifferentialOptimizer - LLAMAAdaptiveQuantumLevyDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer" - ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer").set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizer", register=True) except Exception as e: print("AdaptiveQuantumLevyDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizerV2 import ( - AdaptiveQuantumLevyDifferentialOptimizerV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizerV2 import AdaptiveQuantumLevyDifferentialOptimizerV2 lama_register["AdaptiveQuantumLevyDifferentialOptimizerV2"] = AdaptiveQuantumLevyDifferentialOptimizerV2 - LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2" - ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2").set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2", register=True) except Exception as e: print("AdaptiveQuantumLevyDifferentialOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 import ( - AdaptiveQuantumLevyDifferentialSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 import AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 - lama_register["AdaptiveQuantumLevyDifferentialSwarmOptimizationV2"] = ( - AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 - ) - LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2" - ).set_name("LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2", register=True) + lama_register["AdaptiveQuantumLevyDifferentialSwarmOptimizationV2"] = AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2", register=True) except Exception as e: print("AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicOptimization import ( - AdaptiveQuantumLevyDynamicOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicOptimization import AdaptiveQuantumLevyDynamicOptimization lama_register["AdaptiveQuantumLevyDynamicOptimization"] = AdaptiveQuantumLevyDynamicOptimization - LLAMAAdaptiveQuantumLevyDynamicOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDynamicOptimization" - ).set_name("LLAMAAdaptiveQuantumLevyDynamicOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDynamicOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicOptimization").set_name("LLAMAAdaptiveQuantumLevyDynamicOptimization", register=True) except Exception as e: print("AdaptiveQuantumLevyDynamicOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimization import ( - AdaptiveQuantumLevyDynamicSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimization import AdaptiveQuantumLevyDynamicSwarmOptimization lama_register["AdaptiveQuantumLevyDynamicSwarmOptimization"] = AdaptiveQuantumLevyDynamicSwarmOptimization - LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization" - ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization").set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization", register=True) except Exception as e: print("AdaptiveQuantumLevyDynamicSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimizationV2 import ( - AdaptiveQuantumLevyDynamicSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimizationV2 import AdaptiveQuantumLevyDynamicSwarmOptimizationV2 - lama_register["AdaptiveQuantumLevyDynamicSwarmOptimizationV2"] = ( - AdaptiveQuantumLevyDynamicSwarmOptimizationV2 - ) - LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2" - ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2", register=True) + lama_register["AdaptiveQuantumLevyDynamicSwarmOptimizationV2"] = AdaptiveQuantumLevyDynamicSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2", register=True) except Exception as e: print("AdaptiveQuantumLevyDynamicSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyEnhancedDifferentialOptimizer import ( - AdaptiveQuantumLevyEnhancedDifferentialOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyEnhancedDifferentialOptimizer import AdaptiveQuantumLevyEnhancedDifferentialOptimizer - lama_register["AdaptiveQuantumLevyEnhancedDifferentialOptimizer"] = ( - AdaptiveQuantumLevyEnhancedDifferentialOptimizer - ) - LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer" - ).set_name("LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer", register=True) + lama_register["AdaptiveQuantumLevyEnhancedDifferentialOptimizer"] = AdaptiveQuantumLevyEnhancedDifferentialOptimizer + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer").set_name("LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer", register=True) except Exception as e: print("AdaptiveQuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizer import ( - AdaptiveQuantumLevyMemeticOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizer import AdaptiveQuantumLevyMemeticOptimizer lama_register["AdaptiveQuantumLevyMemeticOptimizer"] = AdaptiveQuantumLevyMemeticOptimizer - LLAMAAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyMemeticOptimizer" - ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizer").set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizer", register=True) except Exception as e: print("AdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizerV2 import ( - AdaptiveQuantumLevyMemeticOptimizerV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizerV2 import AdaptiveQuantumLevyMemeticOptimizerV2 lama_register["AdaptiveQuantumLevyMemeticOptimizerV2"] = AdaptiveQuantumLevyMemeticOptimizerV2 - LLAMAAdaptiveQuantumLevyMemeticOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2" - ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2").set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizerV2", register=True) except Exception as e: print("AdaptiveQuantumLevyMemeticOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevySwarmOptimization import ( - AdaptiveQuantumLevySwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevySwarmOptimization import AdaptiveQuantumLevySwarmOptimization lama_register["AdaptiveQuantumLevySwarmOptimization"] = AdaptiveQuantumLevySwarmOptimization - LLAMAAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevySwarmOptimization" - ).set_name("LLAMAAdaptiveQuantumLevySwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevySwarmOptimization").set_name("LLAMAAdaptiveQuantumLevySwarmOptimization", register=True) except Exception as e: print("AdaptiveQuantumLevySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyTreeOptimization import ( - AdaptiveQuantumLevyTreeOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumLevyTreeOptimization import AdaptiveQuantumLevyTreeOptimization lama_register["AdaptiveQuantumLevyTreeOptimization"] = AdaptiveQuantumLevyTreeOptimization - LLAMAAdaptiveQuantumLevyTreeOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumLevyTreeOptimization" - ).set_name("LLAMAAdaptiveQuantumLevyTreeOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyTreeOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLevyTreeOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyTreeOptimization").set_name("LLAMAAdaptiveQuantumLevyTreeOptimization", register=True) except Exception as e: print("AdaptiveQuantumLevyTreeOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumLocalSearch import AdaptiveQuantumLocalSearch lama_register["AdaptiveQuantumLocalSearch"] = AdaptiveQuantumLocalSearch - LLAMAAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch").set_name( - "LLAMAAdaptiveQuantumLocalSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch").set_name("LLAMAAdaptiveQuantumLocalSearch", register=True) except Exception as e: print("AdaptiveQuantumLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticEvolutionaryOptimizer import ( - AdaptiveQuantumMemeticEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumMemeticEvolutionaryOptimizer import AdaptiveQuantumMemeticEvolutionaryOptimizer lama_register["AdaptiveQuantumMemeticEvolutionaryOptimizer"] = AdaptiveQuantumMemeticEvolutionaryOptimizer - LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer" - ).set_name("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer").set_name("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer", register=True) except Exception as e: print("AdaptiveQuantumMemeticEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticGradientBoost import ( - AdaptiveQuantumMemeticGradientBoost, - ) + from nevergrad.optimization.lama.AdaptiveQuantumMemeticGradientBoost import AdaptiveQuantumMemeticGradientBoost lama_register["AdaptiveQuantumMemeticGradientBoost"] = AdaptiveQuantumMemeticGradientBoost - LLAMAAdaptiveQuantumMemeticGradientBoost = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticGradientBoost" - ).set_name("LLAMAAdaptiveQuantumMemeticGradientBoost", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticGradientBoost").set_name("LLAMAAdaptiveQuantumMemeticGradientBoost", register=True) except Exception as e: print("AdaptiveQuantumMemeticGradientBoost can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizer import AdaptiveQuantumMemeticOptimizer lama_register["AdaptiveQuantumMemeticOptimizer"] = AdaptiveQuantumMemeticOptimizer - LLAMAAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticOptimizer" - ).set_name("LLAMAAdaptiveQuantumMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizer").set_name("LLAMAAdaptiveQuantumMemeticOptimizer", register=True) except Exception as e: print("AdaptiveQuantumMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerPlus import ( - AdaptiveQuantumMemeticOptimizerPlus, - ) + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerPlus import AdaptiveQuantumMemeticOptimizerPlus lama_register["AdaptiveQuantumMemeticOptimizerPlus"] = AdaptiveQuantumMemeticOptimizerPlus - LLAMAAdaptiveQuantumMemeticOptimizerPlus = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticOptimizerPlus" - ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerPlus", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticOptimizerPlus = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerPlus").set_name("LLAMAAdaptiveQuantumMemeticOptimizerPlus", register=True) except Exception as e: print("AdaptiveQuantumMemeticOptimizerPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV2 import ( - AdaptiveQuantumMemeticOptimizerV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV2 import AdaptiveQuantumMemeticOptimizerV2 lama_register["AdaptiveQuantumMemeticOptimizerV2"] = AdaptiveQuantumMemeticOptimizerV2 - LLAMAAdaptiveQuantumMemeticOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticOptimizerV2" - ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV2").set_name("LLAMAAdaptiveQuantumMemeticOptimizerV2", register=True) except Exception as e: print("AdaptiveQuantumMemeticOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV3 import ( - AdaptiveQuantumMemeticOptimizerV3, - ) + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV3 import AdaptiveQuantumMemeticOptimizerV3 lama_register["AdaptiveQuantumMemeticOptimizerV3"] = AdaptiveQuantumMemeticOptimizerV3 - LLAMAAdaptiveQuantumMemeticOptimizerV3 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMemeticOptimizerV3" - ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMemeticOptimizerV3 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV3").set_name("LLAMAAdaptiveQuantumMemeticOptimizerV3", register=True) except Exception as e: print("AdaptiveQuantumMemeticOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumMetaheuristic import AdaptiveQuantumMetaheuristic lama_register["AdaptiveQuantumMetaheuristic"] = AdaptiveQuantumMetaheuristic - LLAMAAdaptiveQuantumMetaheuristic = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumMetaheuristic" - ).set_name("LLAMAAdaptiveQuantumMetaheuristic", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumMetaheuristic = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMetaheuristic").set_name("LLAMAAdaptiveQuantumMetaheuristic", register=True) except Exception as e: print("AdaptiveQuantumMetaheuristic can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumPSO import AdaptiveQuantumPSO lama_register["AdaptiveQuantumPSO"] = AdaptiveQuantumPSO - LLAMAAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO").set_name( - "LLAMAAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO").set_name("LLAMAAdaptiveQuantumPSO", register=True) except Exception as e: print("AdaptiveQuantumPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumPSOEnhanced import AdaptiveQuantumPSOEnhanced lama_register["AdaptiveQuantumPSOEnhanced"] = AdaptiveQuantumPSOEnhanced - LLAMAAdaptiveQuantumPSOEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced").set_name( - "LLAMAAdaptiveQuantumPSOEnhanced", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumPSOEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced").set_name("LLAMAAdaptiveQuantumPSOEnhanced", register=True) except Exception as e: print("AdaptiveQuantumPSOEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumParticleDifferentialSwarm import ( - AdaptiveQuantumParticleDifferentialSwarm, - ) + from nevergrad.optimization.lama.AdaptiveQuantumParticleDifferentialSwarm import AdaptiveQuantumParticleDifferentialSwarm lama_register["AdaptiveQuantumParticleDifferentialSwarm"] = AdaptiveQuantumParticleDifferentialSwarm - LLAMAAdaptiveQuantumParticleDifferentialSwarm = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumParticleDifferentialSwarm" - ).set_name("LLAMAAdaptiveQuantumParticleDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumParticleDifferentialSwarm = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleDifferentialSwarm").set_name("LLAMAAdaptiveQuantumParticleDifferentialSwarm", register=True) except Exception as e: print("AdaptiveQuantumParticleDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumParticleSwarmOptimization import ( - AdaptiveQuantumParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.AdaptiveQuantumParticleSwarmOptimization import AdaptiveQuantumParticleSwarmOptimization lama_register["AdaptiveQuantumParticleSwarmOptimization"] = AdaptiveQuantumParticleSwarmOptimization - LLAMAAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumParticleSwarmOptimization" - ).set_name("LLAMAAdaptiveQuantumParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleSwarmOptimization").set_name("LLAMAAdaptiveQuantumParticleSwarmOptimization", register=True) except Exception as e: print("AdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumResonanceOptimizer import ( - AdaptiveQuantumResonanceOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumResonanceOptimizer import AdaptiveQuantumResonanceOptimizer lama_register["AdaptiveQuantumResonanceOptimizer"] = AdaptiveQuantumResonanceOptimizer - LLAMAAdaptiveQuantumResonanceOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumResonanceOptimizer" - ).set_name("LLAMAAdaptiveQuantumResonanceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumResonanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumResonanceOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumResonanceOptimizer").set_name("LLAMAAdaptiveQuantumResonanceOptimizer", register=True) except Exception as e: print("AdaptiveQuantumResonanceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumStrategicOptimizer import ( - AdaptiveQuantumStrategicOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveQuantumStrategicOptimizer import AdaptiveQuantumStrategicOptimizer lama_register["AdaptiveQuantumStrategicOptimizer"] = AdaptiveQuantumStrategicOptimizer - LLAMAAdaptiveQuantumStrategicOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumStrategicOptimizer" - ).set_name("LLAMAAdaptiveQuantumStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumStrategicOptimizer").set_name("LLAMAAdaptiveQuantumStrategicOptimizer", register=True) except Exception as e: print("AdaptiveQuantumStrategicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizationV2 import ( - AdaptiveQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizationV2 import AdaptiveQuantumSwarmOptimizationV2 lama_register["AdaptiveQuantumSwarmOptimizationV2"] = AdaptiveQuantumSwarmOptimizationV2 - LLAMAAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumSwarmOptimizationV2" - ).set_name("LLAMAAdaptiveQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumSwarmOptimizationV2", register=True) except Exception as e: print("AdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizerV2 import AdaptiveQuantumSwarmOptimizerV2 lama_register["AdaptiveQuantumSwarmOptimizerV2"] = AdaptiveQuantumSwarmOptimizerV2 - LLAMAAdaptiveQuantumSwarmOptimizerV2 = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumSwarmOptimizerV2" - ).set_name("LLAMAAdaptiveQuantumSwarmOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizerV2").set_name("LLAMAAdaptiveQuantumSwarmOptimizerV2", register=True) except Exception as e: print("AdaptiveQuantumSwarmOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuantumSymbioticStrategy import AdaptiveQuantumSymbioticStrategy lama_register["AdaptiveQuantumSymbioticStrategy"] = AdaptiveQuantumSymbioticStrategy - LLAMAAdaptiveQuantumSymbioticStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveQuantumSymbioticStrategy" - ).set_name("LLAMAAdaptiveQuantumSymbioticStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSymbioticStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuantumSymbioticStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSymbioticStrategy").set_name("LLAMAAdaptiveQuantumSymbioticStrategy", register=True) except Exception as e: print("AdaptiveQuantumSymbioticStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuasiGradientEvolution import AdaptiveQuasiGradientEvolution lama_register["AdaptiveQuasiGradientEvolution"] = AdaptiveQuasiGradientEvolution - LLAMAAdaptiveQuasiGradientEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveQuasiGradientEvolution" - ).set_name("LLAMAAdaptiveQuasiGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuasiGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuasiGradientEvolution").set_name("LLAMAAdaptiveQuasiGradientEvolution", register=True) except Exception as e: print("AdaptiveQuasiGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuasiRandomEnhancedDifferentialEvolution import ( - AdaptiveQuasiRandomEnhancedDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveQuasiRandomEnhancedDifferentialEvolution import AdaptiveQuasiRandomEnhancedDifferentialEvolution - lama_register["AdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( - AdaptiveQuasiRandomEnhancedDifferentialEvolution - ) - LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution" - ).set_name("LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) + lama_register["AdaptiveQuasiRandomEnhancedDifferentialEvolution"] = AdaptiveQuasiRandomEnhancedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution").set_name("LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) except Exception as e: print("AdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveQuasiRandomGradientDE import AdaptiveQuasiRandomGradientDE lama_register["AdaptiveQuasiRandomGradientDE"] = AdaptiveQuasiRandomGradientDE - LLAMAAdaptiveQuasiRandomGradientDE = NonObjectOptimizer( - method="LLAMAAdaptiveQuasiRandomGradientDE" - ).set_name("LLAMAAdaptiveQuasiRandomGradientDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomGradientDE").set_name("LLAMAAdaptiveQuasiRandomGradientDE", register=True) except Exception as e: print("AdaptiveQuasiRandomGradientDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveQuorumWithStrategicMutation import ( - AdaptiveQuorumWithStrategicMutation, - ) + from nevergrad.optimization.lama.AdaptiveQuorumWithStrategicMutation import AdaptiveQuorumWithStrategicMutation lama_register["AdaptiveQuorumWithStrategicMutation"] = AdaptiveQuorumWithStrategicMutation - LLAMAAdaptiveQuorumWithStrategicMutation = NonObjectOptimizer( - method="LLAMAAdaptiveQuorumWithStrategicMutation" - ).set_name("LLAMAAdaptiveQuorumWithStrategicMutation", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveQuorumWithStrategicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveQuorumWithStrategicMutation = NonObjectOptimizer(method="LLAMAAdaptiveQuorumWithStrategicMutation").set_name("LLAMAAdaptiveQuorumWithStrategicMutation", register=True) except Exception as e: print("AdaptiveQuorumWithStrategicMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveRefinedGradientBoostedAnnealing import ( - AdaptiveRefinedGradientBoostedAnnealing, - ) + from nevergrad.optimization.lama.AdaptiveRefinedGradientBoostedAnnealing import AdaptiveRefinedGradientBoostedAnnealing lama_register["AdaptiveRefinedGradientBoostedAnnealing"] = AdaptiveRefinedGradientBoostedAnnealing - LLAMAAdaptiveRefinedGradientBoostedAnnealing = NonObjectOptimizer( - method="LLAMAAdaptiveRefinedGradientBoostedAnnealing" - ).set_name("LLAMAAdaptiveRefinedGradientBoostedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRefinedGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveRefinedGradientBoostedAnnealing").set_name("LLAMAAdaptiveRefinedGradientBoostedAnnealing", register=True) except Exception as e: print("AdaptiveRefinedGradientBoostedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveRefinedHybridPSO_DE import AdaptiveRefinedHybridPSO_DE lama_register["AdaptiveRefinedHybridPSO_DE"] = AdaptiveRefinedHybridPSO_DE - LLAMAAdaptiveRefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE").set_name( - "LLAMAAdaptiveRefinedHybridPSO_DE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE").set_name("LLAMAAdaptiveRefinedHybridPSO_DE", register=True) except Exception as e: print("AdaptiveRefinedHybridPSO_DE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveRefinementEvolutiveStrategy import ( - AdaptiveRefinementEvolutiveStrategy, - ) + from nevergrad.optimization.lama.AdaptiveRefinementEvolutiveStrategy import AdaptiveRefinementEvolutiveStrategy lama_register["AdaptiveRefinementEvolutiveStrategy"] = AdaptiveRefinementEvolutiveStrategy - LLAMAAdaptiveRefinementEvolutiveStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveRefinementEvolutiveStrategy" - ).set_name("LLAMAAdaptiveRefinementEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRefinementEvolutiveStrategy = NonObjectOptimizer(method="LLAMAAdaptiveRefinementEvolutiveStrategy").set_name("LLAMAAdaptiveRefinementEvolutiveStrategy", register=True) except Exception as e: print("AdaptiveRefinementEvolutiveStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveRefinementPSO import AdaptiveRefinementPSO lama_register["AdaptiveRefinementPSO"] = AdaptiveRefinementPSO - LLAMAAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO").set_name( - "LLAMAAdaptiveRefinementPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO").set_name("LLAMAAdaptiveRefinementPSO", register=True) except Exception as e: print("AdaptiveRefinementPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveRefinementSearchStrategyV30 import ( - AdaptiveRefinementSearchStrategyV30, - ) + from nevergrad.optimization.lama.AdaptiveRefinementSearchStrategyV30 import AdaptiveRefinementSearchStrategyV30 lama_register["AdaptiveRefinementSearchStrategyV30"] = AdaptiveRefinementSearchStrategyV30 - LLAMAAdaptiveRefinementSearchStrategyV30 = NonObjectOptimizer( - method="LLAMAAdaptiveRefinementSearchStrategyV30" - ).set_name("LLAMAAdaptiveRefinementSearchStrategyV30", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementSearchStrategyV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRefinementSearchStrategyV30 = NonObjectOptimizer(method="LLAMAAdaptiveRefinementSearchStrategyV30").set_name("LLAMAAdaptiveRefinementSearchStrategyV30", register=True) except Exception as e: print("AdaptiveRefinementSearchStrategyV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveResilientQuantumCrossoverStrategy import ( - AdaptiveResilientQuantumCrossoverStrategy, - ) + from nevergrad.optimization.lama.AdaptiveResilientQuantumCrossoverStrategy import AdaptiveResilientQuantumCrossoverStrategy lama_register["AdaptiveResilientQuantumCrossoverStrategy"] = AdaptiveResilientQuantumCrossoverStrategy - LLAMAAdaptiveResilientQuantumCrossoverStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveResilientQuantumCrossoverStrategy" - ).set_name("LLAMAAdaptiveResilientQuantumCrossoverStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveResilientQuantumCrossoverStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveResilientQuantumCrossoverStrategy = NonObjectOptimizer(method="LLAMAAdaptiveResilientQuantumCrossoverStrategy").set_name("LLAMAAdaptiveResilientQuantumCrossoverStrategy", register=True) except Exception as e: print("AdaptiveResilientQuantumCrossoverStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveRestartDE import AdaptiveRestartDE lama_register["AdaptiveRestartDE"] = AdaptiveRestartDE - LLAMAAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE").set_name( - "LLAMAAdaptiveRestartDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE").set_name("LLAMAAdaptiveRestartDE", register=True) except Exception as e: print("AdaptiveRestartDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveRestartHybridOptimizer import AdaptiveRestartHybridOptimizer lama_register["AdaptiveRestartHybridOptimizer"] = AdaptiveRestartHybridOptimizer - LLAMAAdaptiveRestartHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveRestartHybridOptimizer" - ).set_name("LLAMAAdaptiveRestartHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveRestartHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRestartHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveRestartHybridOptimizer").set_name("LLAMAAdaptiveRestartHybridOptimizer", register=True) except Exception as e: print("AdaptiveRestartHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveRotationalClimbOptimizer import AdaptiveRotationalClimbOptimizer lama_register["AdaptiveRotationalClimbOptimizer"] = AdaptiveRotationalClimbOptimizer - LLAMAAdaptiveRotationalClimbOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveRotationalClimbOptimizer" - ).set_name("LLAMAAdaptiveRotationalClimbOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveRotationalClimbOptimizer").set_name("LLAMAAdaptiveRotationalClimbOptimizer", register=True) except Exception as e: print("AdaptiveRotationalClimbOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveSigmaCrossoverEvolution import AdaptiveSigmaCrossoverEvolution lama_register["AdaptiveSigmaCrossoverEvolution"] = AdaptiveSigmaCrossoverEvolution - LLAMAAdaptiveSigmaCrossoverEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveSigmaCrossoverEvolution" - ).set_name("LLAMAAdaptiveSigmaCrossoverEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSigmaCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSigmaCrossoverEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSigmaCrossoverEvolution").set_name("LLAMAAdaptiveSigmaCrossoverEvolution", register=True) except Exception as e: print("AdaptiveSigmaCrossoverEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveSimulatedAnnealing import AdaptiveSimulatedAnnealing lama_register["AdaptiveSimulatedAnnealing"] = AdaptiveSimulatedAnnealing - LLAMAAdaptiveSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing").set_name( - "LLAMAAdaptiveSimulatedAnnealing", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing").set_name("LLAMAAdaptiveSimulatedAnnealing", register=True) except Exception as e: print("AdaptiveSimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingSearch import AdaptiveSimulatedAnnealingSearch lama_register["AdaptiveSimulatedAnnealingSearch"] = AdaptiveSimulatedAnnealingSearch - LLAMAAdaptiveSimulatedAnnealingSearch = NonObjectOptimizer( - method="LLAMAAdaptiveSimulatedAnnealingSearch" - ).set_name("LLAMAAdaptiveSimulatedAnnealingSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSimulatedAnnealingSearch = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingSearch").set_name("LLAMAAdaptiveSimulatedAnnealingSearch", register=True) except Exception as e: print("AdaptiveSimulatedAnnealingSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingWithSmartMemory import ( - AdaptiveSimulatedAnnealingWithSmartMemory, - ) + from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingWithSmartMemory import AdaptiveSimulatedAnnealingWithSmartMemory lama_register["AdaptiveSimulatedAnnealingWithSmartMemory"] = AdaptiveSimulatedAnnealingWithSmartMemory - LLAMAAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( - method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory" - ).set_name("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory", register=True) except Exception as e: print("AdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSineCosineDifferentialEvolution import ( - AdaptiveSineCosineDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveSineCosineDifferentialEvolution import AdaptiveSineCosineDifferentialEvolution lama_register["AdaptiveSineCosineDifferentialEvolution"] = AdaptiveSineCosineDifferentialEvolution - LLAMAAdaptiveSineCosineDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveSineCosineDifferentialEvolution" - ).set_name("LLAMAAdaptiveSineCosineDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSineCosineDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSineCosineDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSineCosineDifferentialEvolution").set_name("LLAMAAdaptiveSineCosineDifferentialEvolution", register=True) except Exception as e: print("AdaptiveSineCosineDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSinusoidalDifferentialSwarm import ( - AdaptiveSinusoidalDifferentialSwarm, - ) + from nevergrad.optimization.lama.AdaptiveSinusoidalDifferentialSwarm import AdaptiveSinusoidalDifferentialSwarm lama_register["AdaptiveSinusoidalDifferentialSwarm"] = AdaptiveSinusoidalDifferentialSwarm - LLAMAAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( - method="LLAMAAdaptiveSinusoidalDifferentialSwarm" - ).set_name("LLAMAAdaptiveSinusoidalDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAAdaptiveSinusoidalDifferentialSwarm", register=True) except Exception as e: print("AdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSpatialExplorationOptimizer import ( - AdaptiveSpatialExplorationOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveSpatialExplorationOptimizer import AdaptiveSpatialExplorationOptimizer lama_register["AdaptiveSpatialExplorationOptimizer"] = AdaptiveSpatialExplorationOptimizer - LLAMAAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveSpatialExplorationOptimizer" - ).set_name("LLAMAAdaptiveSpatialExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveSpatialExplorationOptimizer").set_name("LLAMAAdaptiveSpatialExplorationOptimizer", register=True) except Exception as e: print("AdaptiveSpatialExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveSpiralGradientSearch import AdaptiveSpiralGradientSearch lama_register["AdaptiveSpiralGradientSearch"] = AdaptiveSpiralGradientSearch - LLAMAAdaptiveSpiralGradientSearch = NonObjectOptimizer( - method="LLAMAAdaptiveSpiralGradientSearch" - ).set_name("LLAMAAdaptiveSpiralGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveSpiralGradientSearch").set_name("LLAMAAdaptiveSpiralGradientSearch", register=True) except Exception as e: print("AdaptiveSpiralGradientSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveStepSearch import AdaptiveStepSearch lama_register["AdaptiveStepSearch"] = AdaptiveStepSearch - LLAMAAdaptiveStepSearch = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch").set_name( - "LLAMAAdaptiveStepSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveStepSearch = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch").set_name("LLAMAAdaptiveStepSearch", register=True) except Exception as e: print("AdaptiveStepSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveStochasticGradientQuorumOptimization import ( - AdaptiveStochasticGradientQuorumOptimization, - ) + from nevergrad.optimization.lama.AdaptiveStochasticGradientQuorumOptimization import AdaptiveStochasticGradientQuorumOptimization - lama_register["AdaptiveStochasticGradientQuorumOptimization"] = ( - AdaptiveStochasticGradientQuorumOptimization - ) - LLAMAAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveStochasticGradientQuorumOptimization" - ).set_name("LLAMAAdaptiveStochasticGradientQuorumOptimization", register=True) + lama_register["AdaptiveStochasticGradientQuorumOptimization"] = AdaptiveStochasticGradientQuorumOptimization + res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveStochasticGradientQuorumOptimization").set_name("LLAMAAdaptiveStochasticGradientQuorumOptimization", register=True) except Exception as e: print("AdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveStochasticHybridEvolution import ( - AdaptiveStochasticHybridEvolution, - ) + from nevergrad.optimization.lama.AdaptiveStochasticHybridEvolution import AdaptiveStochasticHybridEvolution lama_register["AdaptiveStochasticHybridEvolution"] = AdaptiveStochasticHybridEvolution - LLAMAAdaptiveStochasticHybridEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveStochasticHybridEvolution" - ).set_name("LLAMAAdaptiveStochasticHybridEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveStochasticHybridEvolution = NonObjectOptimizer(method="LLAMAAdaptiveStochasticHybridEvolution").set_name("LLAMAAdaptiveStochasticHybridEvolution", register=True) except Exception as e: print("AdaptiveStochasticHybridEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveStochasticTunneling import AdaptiveStochasticTunneling lama_register["AdaptiveStochasticTunneling"] = AdaptiveStochasticTunneling - LLAMAAdaptiveStochasticTunneling = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling").set_name( - "LLAMAAdaptiveStochasticTunneling", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveStochasticTunneling = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling").set_name("LLAMAAdaptiveStochasticTunneling", register=True) except Exception as e: print("AdaptiveStochasticTunneling can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveStrategicExplorationOptimizer import ( - AdaptiveStrategicExplorationOptimizer, - ) + from nevergrad.optimization.lama.AdaptiveStrategicExplorationOptimizer import AdaptiveStrategicExplorationOptimizer lama_register["AdaptiveStrategicExplorationOptimizer"] = AdaptiveStrategicExplorationOptimizer - LLAMAAdaptiveStrategicExplorationOptimizer = NonObjectOptimizer( - method="LLAMAAdaptiveStrategicExplorationOptimizer" - ).set_name("LLAMAAdaptiveStrategicExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveStrategicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveStrategicExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveStrategicExplorationOptimizer").set_name("LLAMAAdaptiveStrategicExplorationOptimizer", register=True) except Exception as e: print("AdaptiveStrategicExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSwarmDifferentialEvolution import ( - AdaptiveSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdaptiveSwarmDifferentialEvolution import AdaptiveSwarmDifferentialEvolution lama_register["AdaptiveSwarmDifferentialEvolution"] = AdaptiveSwarmDifferentialEvolution - LLAMAAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdaptiveSwarmDifferentialEvolution" - ).set_name("LLAMAAdaptiveSwarmDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSwarmDifferentialEvolution").set_name("LLAMAAdaptiveSwarmDifferentialEvolution", register=True) except Exception as e: print("AdaptiveSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSwarmGradientOptimization import ( - AdaptiveSwarmGradientOptimization, - ) + from nevergrad.optimization.lama.AdaptiveSwarmGradientOptimization import AdaptiveSwarmGradientOptimization lama_register["AdaptiveSwarmGradientOptimization"] = AdaptiveSwarmGradientOptimization - LLAMAAdaptiveSwarmGradientOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveSwarmGradientOptimization" - ).set_name("LLAMAAdaptiveSwarmGradientOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmGradientOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSwarmGradientOptimization = NonObjectOptimizer(method="LLAMAAdaptiveSwarmGradientOptimization").set_name("LLAMAAdaptiveSwarmGradientOptimization", register=True) except Exception as e: print("AdaptiveSwarmGradientOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveSwarmHarmonicOptimizationV4 import ( - AdaptiveSwarmHarmonicOptimizationV4, - ) + from nevergrad.optimization.lama.AdaptiveSwarmHarmonicOptimizationV4 import AdaptiveSwarmHarmonicOptimizationV4 lama_register["AdaptiveSwarmHarmonicOptimizationV4"] = AdaptiveSwarmHarmonicOptimizationV4 - LLAMAAdaptiveSwarmHarmonicOptimizationV4 = NonObjectOptimizer( - method="LLAMAAdaptiveSwarmHarmonicOptimizationV4" - ).set_name("LLAMAAdaptiveSwarmHarmonicOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHarmonicOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSwarmHarmonicOptimizationV4 = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHarmonicOptimizationV4").set_name("LLAMAAdaptiveSwarmHarmonicOptimizationV4", register=True) except Exception as e: print("AdaptiveSwarmHarmonicOptimizationV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdaptiveSwarmHybridOptimization import AdaptiveSwarmHybridOptimization lama_register["AdaptiveSwarmHybridOptimization"] = AdaptiveSwarmHybridOptimization - LLAMAAdaptiveSwarmHybridOptimization = NonObjectOptimizer( - method="LLAMAAdaptiveSwarmHybridOptimization" - ).set_name("LLAMAAdaptiveSwarmHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveSwarmHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHybridOptimization").set_name("LLAMAAdaptiveSwarmHybridOptimization", register=True) except Exception as e: print("AdaptiveSwarmHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdaptiveThresholdDifferentialStrategy import ( - AdaptiveThresholdDifferentialStrategy, - ) + from nevergrad.optimization.lama.AdaptiveThresholdDifferentialStrategy import AdaptiveThresholdDifferentialStrategy lama_register["AdaptiveThresholdDifferentialStrategy"] = AdaptiveThresholdDifferentialStrategy - LLAMAAdaptiveThresholdDifferentialStrategy = NonObjectOptimizer( - method="LLAMAAdaptiveThresholdDifferentialStrategy" - ).set_name("LLAMAAdaptiveThresholdDifferentialStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdaptiveThresholdDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdaptiveThresholdDifferentialStrategy = NonObjectOptimizer(method="LLAMAAdaptiveThresholdDifferentialStrategy").set_name("LLAMAAdaptiveThresholdDifferentialStrategy", register=True) except Exception as e: print("AdaptiveThresholdDifferentialStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveDifferentialEvolution import ( - AdvancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveDifferentialEvolution import AdvancedAdaptiveDifferentialEvolution lama_register["AdvancedAdaptiveDifferentialEvolution"] = AdvancedAdaptiveDifferentialEvolution - LLAMAAdvancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveDifferentialEvolution" - ).set_name("LLAMAAdvancedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDifferentialEvolution").set_name("LLAMAAdvancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("AdvancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveDualPhaseStrategy import ( - AdvancedAdaptiveDualPhaseStrategy, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveDualPhaseStrategy import AdvancedAdaptiveDualPhaseStrategy lama_register["AdvancedAdaptiveDualPhaseStrategy"] = AdvancedAdaptiveDualPhaseStrategy - LLAMAAdvancedAdaptiveDualPhaseStrategy = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveDualPhaseStrategy" - ).set_name("LLAMAAdvancedAdaptiveDualPhaseStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDualPhaseStrategy").set_name("LLAMAAdvancedAdaptiveDualPhaseStrategy", register=True) except Exception as e: print("AdvancedAdaptiveDualPhaseStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMemoryStrategyV64 import ( - AdvancedAdaptiveDynamicMemoryStrategyV64, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMemoryStrategyV64 import AdvancedAdaptiveDynamicMemoryStrategyV64 lama_register["AdvancedAdaptiveDynamicMemoryStrategyV64"] = AdvancedAdaptiveDynamicMemoryStrategyV64 - LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64" - ).set_name("LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64").set_name("LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64", register=True) except Exception as e: print("AdvancedAdaptiveDynamicMemoryStrategyV64 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( - AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveExplorationExploitationAlgorithm import ( - AdvancedAdaptiveExplorationExploitationAlgorithm, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationExploitationAlgorithm import AdvancedAdaptiveExplorationExploitationAlgorithm - lama_register["AdvancedAdaptiveExplorationExploitationAlgorithm"] = ( - AdvancedAdaptiveExplorationExploitationAlgorithm - ) - LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm" - ).set_name("LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm", register=True) + lama_register["AdvancedAdaptiveExplorationExploitationAlgorithm"] = AdvancedAdaptiveExplorationExploitationAlgorithm + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm", register=True) except Exception as e: print("AdvancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveExplorationOptimizationAlgorithm import ( - AdvancedAdaptiveExplorationOptimizationAlgorithm, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationOptimizationAlgorithm import AdvancedAdaptiveExplorationOptimizationAlgorithm - lama_register["AdvancedAdaptiveExplorationOptimizationAlgorithm"] = ( - AdvancedAdaptiveExplorationOptimizationAlgorithm - ) - LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm" - ).set_name("LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm", register=True) + lama_register["AdvancedAdaptiveExplorationOptimizationAlgorithm"] = AdvancedAdaptiveExplorationOptimizationAlgorithm + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm").set_name("LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm", register=True) except Exception as e: print("AdvancedAdaptiveExplorationOptimizationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveFireworkAlgorithm import ( - AdvancedAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveFireworkAlgorithm import AdvancedAdaptiveFireworkAlgorithm lama_register["AdvancedAdaptiveFireworkAlgorithm"] = AdvancedAdaptiveFireworkAlgorithm - LLAMAAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveFireworkAlgorithm" - ).set_name("LLAMAAdvancedAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveFireworkAlgorithm").set_name("LLAMAAdvancedAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("AdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveGlobalClimbingOptimizerV6 import ( - AdvancedAdaptiveGlobalClimbingOptimizerV6, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveGlobalClimbingOptimizerV6 import AdvancedAdaptiveGlobalClimbingOptimizerV6 lama_register["AdvancedAdaptiveGlobalClimbingOptimizerV6"] = AdvancedAdaptiveGlobalClimbingOptimizerV6 - LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6" - ).set_name("LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6").set_name("LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6", register=True) except Exception as e: print("AdvancedAdaptiveGlobalClimbingOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveGradientBoostedMemoryExploration import ( - AdvancedAdaptiveGradientBoostedMemoryExploration, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveGradientBoostedMemoryExploration import AdvancedAdaptiveGradientBoostedMemoryExploration - lama_register["AdvancedAdaptiveGradientBoostedMemoryExploration"] = ( - AdvancedAdaptiveGradientBoostedMemoryExploration - ) - LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration" - ).set_name("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration", register=True) + lama_register["AdvancedAdaptiveGradientBoostedMemoryExploration"] = AdvancedAdaptiveGradientBoostedMemoryExploration + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration").set_name("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration", register=True) except Exception as e: print("AdvancedAdaptiveGradientBoostedMemoryExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveGradientHybridOptimizer import ( - AdvancedAdaptiveGradientHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveGradientHybridOptimizer import AdvancedAdaptiveGradientHybridOptimizer lama_register["AdvancedAdaptiveGradientHybridOptimizer"] = AdvancedAdaptiveGradientHybridOptimizer - LLAMAAdvancedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveGradientHybridOptimizer" - ).set_name("LLAMAAdvancedAdaptiveGradientHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientHybridOptimizer").set_name("LLAMAAdvancedAdaptiveGradientHybridOptimizer", register=True) except Exception as e: print("AdvancedAdaptiveGradientHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV56 import ( - AdvancedAdaptiveMemoryEnhancedStrategyV56, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV56 import AdvancedAdaptiveMemoryEnhancedStrategyV56 lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV56"] = AdvancedAdaptiveMemoryEnhancedStrategyV56 - LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56" - ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56").set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56", register=True) except Exception as e: print("AdvancedAdaptiveMemoryEnhancedStrategyV56 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV73 import ( - AdvancedAdaptiveMemoryEnhancedStrategyV73, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV73 import AdvancedAdaptiveMemoryEnhancedStrategyV73 lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV73"] = AdvancedAdaptiveMemoryEnhancedStrategyV73 - LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73" - ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73").set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73", register=True) except Exception as e: print("AdvancedAdaptiveMemoryEnhancedStrategyV73 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryGuidedStrategyV77 import ( - AdvancedAdaptiveMemoryGuidedStrategyV77, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryGuidedStrategyV77 import AdvancedAdaptiveMemoryGuidedStrategyV77 lama_register["AdvancedAdaptiveMemoryGuidedStrategyV77"] = AdvancedAdaptiveMemoryGuidedStrategyV77 - LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77" - ).set_name("LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77").set_name("LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77", register=True) except Exception as e: print("AdvancedAdaptiveMemoryGuidedStrategyV77 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemorySimulatedAnnealing import ( - AdvancedAdaptiveMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveMemorySimulatedAnnealing import AdvancedAdaptiveMemorySimulatedAnnealing lama_register["AdvancedAdaptiveMemorySimulatedAnnealing"] = AdvancedAdaptiveMemorySimulatedAnnealing - LLAMAAdvancedAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing" - ).set_name("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing").set_name("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing", register=True) except Exception as e: print("AdvancedAdaptiveMemorySimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedAdaptivePSO import AdvancedAdaptivePSO lama_register["AdvancedAdaptivePSO"] = AdvancedAdaptivePSO - LLAMAAdvancedAdaptivePSO = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO").set_name( - "LLAMAAdvancedAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptivePSO = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO").set_name("LLAMAAdvancedAdaptivePSO", register=True) except Exception as e: print("AdvancedAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedAdaptiveQuantumEntropyDE import AdvancedAdaptiveQuantumEntropyDE lama_register["AdvancedAdaptiveQuantumEntropyDE"] = AdvancedAdaptiveQuantumEntropyDE - LLAMAAdvancedAdaptiveQuantumEntropyDE = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveQuantumEntropyDE" - ).set_name("LLAMAAdvancedAdaptiveQuantumEntropyDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumEntropyDE").set_name("LLAMAAdvancedAdaptiveQuantumEntropyDE", register=True) except Exception as e: print("AdvancedAdaptiveQuantumEntropyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumLevyOptimizer import ( - AdvancedAdaptiveQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumLevyOptimizer import AdvancedAdaptiveQuantumLevyOptimizer lama_register["AdvancedAdaptiveQuantumLevyOptimizer"] = AdvancedAdaptiveQuantumLevyOptimizer - LLAMAAdvancedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer" - ).set_name("LLAMAAdvancedAdaptiveQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer").set_name("LLAMAAdvancedAdaptiveQuantumLevyOptimizer", register=True) except Exception as e: print("AdvancedAdaptiveQuantumLevyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV1 import ( - AdvancedAdaptiveQuantumSwarmOptimizationV1, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV1 import AdvancedAdaptiveQuantumSwarmOptimizationV1 lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV1"] = AdvancedAdaptiveQuantumSwarmOptimizationV1 - LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1" - ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1").set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1", register=True) except Exception as e: print("AdvancedAdaptiveQuantumSwarmOptimizationV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV2 import ( - AdvancedAdaptiveQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV2 import AdvancedAdaptiveQuantumSwarmOptimizationV2 lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV2"] = AdvancedAdaptiveQuantumSwarmOptimizationV2 - LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2" - ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2", register=True) except Exception as e: print("AdvancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAdaptiveStrategyOptimizer import ( - AdvancedAdaptiveStrategyOptimizer, - ) + from nevergrad.optimization.lama.AdvancedAdaptiveStrategyOptimizer import AdvancedAdaptiveStrategyOptimizer lama_register["AdvancedAdaptiveStrategyOptimizer"] = AdvancedAdaptiveStrategyOptimizer - LLAMAAdvancedAdaptiveStrategyOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedAdaptiveStrategyOptimizer" - ).set_name("LLAMAAdvancedAdaptiveStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAdaptiveStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveStrategyOptimizer").set_name("LLAMAAdvancedAdaptiveStrategyOptimizer", register=True) except Exception as e: print("AdvancedAdaptiveStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedAttenuatedAdaptiveEvolver import ( - AdvancedAttenuatedAdaptiveEvolver, - ) + from nevergrad.optimization.lama.AdvancedAttenuatedAdaptiveEvolver import AdvancedAttenuatedAdaptiveEvolver lama_register["AdvancedAttenuatedAdaptiveEvolver"] = AdvancedAttenuatedAdaptiveEvolver - LLAMAAdvancedAttenuatedAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAAdvancedAttenuatedAdaptiveEvolver" - ).set_name("LLAMAAdvancedAttenuatedAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAdvancedAttenuatedAdaptiveEvolver").set_name("LLAMAAdvancedAttenuatedAdaptiveEvolver", register=True) except Exception as e: print("AdvancedAttenuatedAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedBalancedAdaptiveElitistStrategyV2 import ( - AdvancedBalancedAdaptiveElitistStrategyV2, - ) + from nevergrad.optimization.lama.AdvancedBalancedAdaptiveElitistStrategyV2 import AdvancedBalancedAdaptiveElitistStrategyV2 lama_register["AdvancedBalancedAdaptiveElitistStrategyV2"] = AdvancedBalancedAdaptiveElitistStrategyV2 - LLAMAAdvancedBalancedAdaptiveElitistStrategyV2 = NonObjectOptimizer( - method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2" - ).set_name("LLAMAAdvancedBalancedAdaptiveElitistStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedBalancedAdaptiveElitistStrategyV2 = NonObjectOptimizer(method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2").set_name("LLAMAAdvancedBalancedAdaptiveElitistStrategyV2", register=True) except Exception as e: print("AdvancedBalancedAdaptiveElitistStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedBalancedExplorationOptimizer import ( - AdvancedBalancedExplorationOptimizer, - ) + from nevergrad.optimization.lama.AdvancedBalancedExplorationOptimizer import AdvancedBalancedExplorationOptimizer lama_register["AdvancedBalancedExplorationOptimizer"] = AdvancedBalancedExplorationOptimizer - LLAMAAdvancedBalancedExplorationOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedBalancedExplorationOptimizer" - ).set_name("LLAMAAdvancedBalancedExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedBalancedExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedBalancedExplorationOptimizer").set_name("LLAMAAdvancedBalancedExplorationOptimizer", register=True) except Exception as e: print("AdvancedBalancedExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRate import ( - AdvancedDifferentialEvolutionWithAdaptiveLearningRate, - ) + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRate import AdvancedDifferentialEvolutionWithAdaptiveLearningRate - lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRate"] = ( - AdvancedDifferentialEvolutionWithAdaptiveLearningRate - ) - LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( - method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate" - ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate", register=True) + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRate"] = AdvancedDifferentialEvolutionWithAdaptiveLearningRate + res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate").set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate", register=True) except Exception as e: print("AdvancedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 import ( - AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2, - ) + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 import AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 - lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2"] = ( - AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 - ) - LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 = NonObjectOptimizer( - method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2" - ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2", register=True) + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2"] = AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 + res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2").set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2", register=True) except Exception as e: print("AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDifferentialParticleSwarmOptimization import ( - AdvancedDifferentialParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.AdvancedDifferentialParticleSwarmOptimization import AdvancedDifferentialParticleSwarmOptimization - lama_register["AdvancedDifferentialParticleSwarmOptimization"] = ( - AdvancedDifferentialParticleSwarmOptimization - ) - LLAMAAdvancedDifferentialParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdvancedDifferentialParticleSwarmOptimization" - ).set_name("LLAMAAdvancedDifferentialParticleSwarmOptimization", register=True) + lama_register["AdvancedDifferentialParticleSwarmOptimization"] = AdvancedDifferentialParticleSwarmOptimization + res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdvancedDifferentialParticleSwarmOptimization").set_name("LLAMAAdvancedDifferentialParticleSwarmOptimization", register=True) except Exception as e: print("AdvancedDifferentialParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDimensionalCyclicCrossoverEvolver import ( - AdvancedDimensionalCyclicCrossoverEvolver, - ) + from nevergrad.optimization.lama.AdvancedDimensionalCyclicCrossoverEvolver import AdvancedDimensionalCyclicCrossoverEvolver lama_register["AdvancedDimensionalCyclicCrossoverEvolver"] = AdvancedDimensionalCyclicCrossoverEvolver - LLAMAAdvancedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( - method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver" - ).set_name("LLAMAAdvancedDimensionalCyclicCrossoverEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer(method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver").set_name("LLAMAAdvancedDimensionalCyclicCrossoverEvolver", register=True) except Exception as e: print("AdvancedDimensionalCyclicCrossoverEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDimensionalFeedbackEvolver import ( - AdvancedDimensionalFeedbackEvolver, - ) + from nevergrad.optimization.lama.AdvancedDimensionalFeedbackEvolver import AdvancedDimensionalFeedbackEvolver lama_register["AdvancedDimensionalFeedbackEvolver"] = AdvancedDimensionalFeedbackEvolver - LLAMAAdvancedDimensionalFeedbackEvolver = NonObjectOptimizer( - method="LLAMAAdvancedDimensionalFeedbackEvolver" - ).set_name("LLAMAAdvancedDimensionalFeedbackEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalFeedbackEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDimensionalFeedbackEvolver = NonObjectOptimizer(method="LLAMAAdvancedDimensionalFeedbackEvolver").set_name("LLAMAAdvancedDimensionalFeedbackEvolver", register=True) except Exception as e: print("AdvancedDimensionalFeedbackEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDiversityAdaptiveDE import AdvancedDiversityAdaptiveDE lama_register["AdvancedDiversityAdaptiveDE"] = AdvancedDiversityAdaptiveDE - LLAMAAdvancedDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE").set_name( - "LLAMAAdvancedDiversityAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE").set_name("LLAMAAdvancedDiversityAdaptiveDE", register=True) except Exception as e: print("AdvancedDiversityAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDiversityDE import AdvancedDiversityDE lama_register["AdvancedDiversityDE"] = AdvancedDiversityDE - LLAMAAdvancedDiversityDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE").set_name( - "LLAMAAdvancedDiversityDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDiversityDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE").set_name("LLAMAAdvancedDiversityDE", register=True) except Exception as e: print("AdvancedDiversityDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDualStrategyAdaptiveDE import AdvancedDualStrategyAdaptiveDE lama_register["AdvancedDualStrategyAdaptiveDE"] = AdvancedDualStrategyAdaptiveDE - LLAMAAdvancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAAdvancedDualStrategyAdaptiveDE" - ).set_name("LLAMAAdvancedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyAdaptiveDE").set_name("LLAMAAdvancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("AdvancedDualStrategyAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDualStrategyHybridDE import AdvancedDualStrategyHybridDE lama_register["AdvancedDualStrategyHybridDE"] = AdvancedDualStrategyHybridDE - LLAMAAdvancedDualStrategyHybridDE = NonObjectOptimizer( - method="LLAMAAdvancedDualStrategyHybridDE" - ).set_name("LLAMAAdvancedDualStrategyHybridDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDualStrategyHybridDE = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyHybridDE").set_name("LLAMAAdvancedDualStrategyHybridDE", register=True) except Exception as e: print("AdvancedDualStrategyHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( - AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - lama_register["AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( - AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - ) - LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" - ).set_name("LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) + lama_register["AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridOptimizer import ( - AdvancedDynamicAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridOptimizer import AdvancedDynamicAdaptiveHybridOptimizer lama_register["AdvancedDynamicAdaptiveHybridOptimizer"] = AdvancedDynamicAdaptiveHybridOptimizer - LLAMAAdvancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer" - ).set_name("LLAMAAdvancedDynamicAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer").set_name("LLAMAAdvancedDynamicAdaptiveHybridOptimizer", register=True) except Exception as e: print("AdvancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDynamicCrowdedDE import AdvancedDynamicCrowdedDE lama_register["AdvancedDynamicCrowdedDE"] = AdvancedDynamicCrowdedDE - LLAMAAdvancedDynamicCrowdedDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE").set_name( - "LLAMAAdvancedDynamicCrowdedDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicCrowdedDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE").set_name("LLAMAAdvancedDynamicCrowdedDE", register=True) except Exception as e: print("AdvancedDynamicCrowdedDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicDualPhaseStrategyV37 import ( - AdvancedDynamicDualPhaseStrategyV37, - ) + from nevergrad.optimization.lama.AdvancedDynamicDualPhaseStrategyV37 import AdvancedDynamicDualPhaseStrategyV37 lama_register["AdvancedDynamicDualPhaseStrategyV37"] = AdvancedDynamicDualPhaseStrategyV37 - LLAMAAdvancedDynamicDualPhaseStrategyV37 = NonObjectOptimizer( - method="LLAMAAdvancedDynamicDualPhaseStrategyV37" - ).set_name("LLAMAAdvancedDynamicDualPhaseStrategyV37", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicDualPhaseStrategyV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicDualPhaseStrategyV37 = NonObjectOptimizer(method="LLAMAAdvancedDynamicDualPhaseStrategyV37").set_name("LLAMAAdvancedDynamicDualPhaseStrategyV37", register=True) except Exception as e: print("AdvancedDynamicDualPhaseStrategyV37 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicExplorationOptimizer import ( - AdvancedDynamicExplorationOptimizer, - ) + from nevergrad.optimization.lama.AdvancedDynamicExplorationOptimizer import AdvancedDynamicExplorationOptimizer lama_register["AdvancedDynamicExplorationOptimizer"] = AdvancedDynamicExplorationOptimizer - LLAMAAdvancedDynamicExplorationOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedDynamicExplorationOptimizer" - ).set_name("LLAMAAdvancedDynamicExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicExplorationOptimizer").set_name("LLAMAAdvancedDynamicExplorationOptimizer", register=True) except Exception as e: print("AdvancedDynamicExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDynamicFireworkAlgorithm import AdvancedDynamicFireworkAlgorithm lama_register["AdvancedDynamicFireworkAlgorithm"] = AdvancedDynamicFireworkAlgorithm - LLAMAAdvancedDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdvancedDynamicFireworkAlgorithm" - ).set_name("LLAMAAdvancedDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedDynamicFireworkAlgorithm").set_name("LLAMAAdvancedDynamicFireworkAlgorithm", register=True) except Exception as e: print("AdvancedDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicGradientBoostedMemorySimulatedAnnealing import ( - AdvancedDynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdvancedDynamicGradientBoostedMemorySimulatedAnnealing import AdvancedDynamicGradientBoostedMemorySimulatedAnnealing - lama_register["AdvancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( - AdvancedDynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["AdvancedDynamicGradientBoostedMemorySimulatedAnnealing"] = AdvancedDynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("AdvancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicHybridOptimization import ( - AdvancedDynamicHybridOptimization, - ) + from nevergrad.optimization.lama.AdvancedDynamicHybridOptimization import AdvancedDynamicHybridOptimization lama_register["AdvancedDynamicHybridOptimization"] = AdvancedDynamicHybridOptimization - LLAMAAdvancedDynamicHybridOptimization = NonObjectOptimizer( - method="LLAMAAdvancedDynamicHybridOptimization" - ).set_name("LLAMAAdvancedDynamicHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimization").set_name("LLAMAAdvancedDynamicHybridOptimization", register=True) except Exception as e: print("AdvancedDynamicHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedDynamicHybridOptimizer import AdvancedDynamicHybridOptimizer lama_register["AdvancedDynamicHybridOptimizer"] = AdvancedDynamicHybridOptimizer - LLAMAAdvancedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedDynamicHybridOptimizer" - ).set_name("LLAMAAdvancedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimizer").set_name("LLAMAAdvancedDynamicHybridOptimizer", register=True) except Exception as e: print("AdvancedDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicMultimodalSimulatedAnnealing import ( - AdvancedDynamicMultimodalSimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdvancedDynamicMultimodalSimulatedAnnealing import AdvancedDynamicMultimodalSimulatedAnnealing lama_register["AdvancedDynamicMultimodalSimulatedAnnealing"] = AdvancedDynamicMultimodalSimulatedAnnealing - LLAMAAdvancedDynamicMultimodalSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing" - ).set_name("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicMultimodalSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing").set_name("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing", register=True) except Exception as e: print("AdvancedDynamicMultimodalSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedDynamicStrategyAdaptiveDE import ( - AdvancedDynamicStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.AdvancedDynamicStrategyAdaptiveDE import AdvancedDynamicStrategyAdaptiveDE lama_register["AdvancedDynamicStrategyAdaptiveDE"] = AdvancedDynamicStrategyAdaptiveDE - LLAMAAdvancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAAdvancedDynamicStrategyAdaptiveDE" - ).set_name("LLAMAAdvancedDynamicStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedDynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicStrategyAdaptiveDE").set_name("LLAMAAdvancedDynamicStrategyAdaptiveDE", register=True) except Exception as e: print("AdvancedDynamicStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEliteAdaptiveCrowdingHybridOptimizer import ( - AdvancedEliteAdaptiveCrowdingHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedEliteAdaptiveCrowdingHybridOptimizer import AdvancedEliteAdaptiveCrowdingHybridOptimizer - lama_register["AdvancedEliteAdaptiveCrowdingHybridOptimizer"] = ( - AdvancedEliteAdaptiveCrowdingHybridOptimizer - ) - LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer" - ).set_name("LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer", register=True) + lama_register["AdvancedEliteAdaptiveCrowdingHybridOptimizer"] = AdvancedEliteAdaptiveCrowdingHybridOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer").set_name("LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer", register=True) except Exception as e: print("AdvancedEliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEliteDynamicHybridOptimizer import ( - AdvancedEliteDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedEliteDynamicHybridOptimizer import AdvancedEliteDynamicHybridOptimizer lama_register["AdvancedEliteDynamicHybridOptimizer"] = AdvancedEliteDynamicHybridOptimizer - LLAMAAdvancedEliteDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedEliteDynamicHybridOptimizer" - ).set_name("LLAMAAdvancedEliteDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEliteDynamicHybridOptimizer").set_name("LLAMAAdvancedEliteDynamicHybridOptimizer", register=True) except Exception as e: print("AdvancedEliteDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveFireworkAlgorithm import ( - AdvancedEnhancedAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveFireworkAlgorithm import AdvancedEnhancedAdaptiveFireworkAlgorithm lama_register["AdvancedEnhancedAdaptiveFireworkAlgorithm"] = AdvancedEnhancedAdaptiveFireworkAlgorithm - LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm" - ).set_name("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm").set_name("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("AdvancedEnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveMetaNetAQAPSO import ( - AdvancedEnhancedAdaptiveMetaNetAQAPSO, - ) + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveMetaNetAQAPSO import AdvancedEnhancedAdaptiveMetaNetAQAPSO lama_register["AdvancedEnhancedAdaptiveMetaNetAQAPSO"] = AdvancedEnhancedAdaptiveMetaNetAQAPSO - LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO" - ).set_name("LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO", register=True) except Exception as e: print("AdvancedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 import ( - AdvancedEnhancedDifferentialEvolutionLocalSearch_v55, - ) + from nevergrad.optimization.lama.AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 import AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 - lama_register["AdvancedEnhancedDifferentialEvolutionLocalSearch_v55"] = ( - AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 - ) - LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55 = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55" - ).set_name("LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55", register=True) + lama_register["AdvancedEnhancedDifferentialEvolutionLocalSearch_v55"] = AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55").set_name("LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55", register=True) except Exception as e: print("AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedEnhancedGuidedMassQGSA_v69 import ( - AdvancedEnhancedEnhancedGuidedMassQGSA_v69, - ) + from nevergrad.optimization.lama.AdvancedEnhancedEnhancedGuidedMassQGSA_v69 import AdvancedEnhancedEnhancedGuidedMassQGSA_v69 lama_register["AdvancedEnhancedEnhancedGuidedMassQGSA_v69"] = AdvancedEnhancedEnhancedGuidedMassQGSA_v69 - LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69 = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69" - ).set_name("LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69").set_name("LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69", register=True) except Exception as e: print("AdvancedEnhancedEnhancedGuidedMassQGSA_v69 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedGuidedMassQGSA_v65 import ( - AdvancedEnhancedGuidedMassQGSA_v65, - ) + from nevergrad.optimization.lama.AdvancedEnhancedGuidedMassQGSA_v65 import AdvancedEnhancedGuidedMassQGSA_v65 lama_register["AdvancedEnhancedGuidedMassQGSA_v65"] = AdvancedEnhancedGuidedMassQGSA_v65 - LLAMAAdvancedEnhancedGuidedMassQGSA_v65 = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65" - ).set_name("LLAMAAdvancedEnhancedGuidedMassQGSA_v65", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedGuidedMassQGSA_v65 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65").set_name("LLAMAAdvancedEnhancedGuidedMassQGSA_v65", register=True) except Exception as e: print("AdvancedEnhancedGuidedMassQGSA_v65 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizer import ( - AdvancedEnhancedHybridMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizer import AdvancedEnhancedHybridMetaHeuristicOptimizer - lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizer"] = ( - AdvancedEnhancedHybridMetaHeuristicOptimizer - ) - LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer" - ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer", register=True) + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizer"] = AdvancedEnhancedHybridMetaHeuristicOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer", register=True) except Exception as e: print("AdvancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizerV16 import ( - AdvancedEnhancedHybridMetaHeuristicOptimizerV16, - ) + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizerV16 import AdvancedEnhancedHybridMetaHeuristicOptimizerV16 - lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizerV16"] = ( - AdvancedEnhancedHybridMetaHeuristicOptimizerV16 - ) - LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16 = NonObjectOptimizer( - method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16" - ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16", register=True) + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizerV16"] = AdvancedEnhancedHybridMetaHeuristicOptimizerV16 + res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16").set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16", register=True) except Exception as e: print("AdvancedEnhancedHybridMetaHeuristicOptimizerV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedExplorativeConvergenceEnhancer import ( - AdvancedExplorativeConvergenceEnhancer, - ) + from nevergrad.optimization.lama.AdvancedExplorativeConvergenceEnhancer import AdvancedExplorativeConvergenceEnhancer lama_register["AdvancedExplorativeConvergenceEnhancer"] = AdvancedExplorativeConvergenceEnhancer - LLAMAAdvancedExplorativeConvergenceEnhancer = NonObjectOptimizer( - method="LLAMAAdvancedExplorativeConvergenceEnhancer" - ).set_name("LLAMAAdvancedExplorativeConvergenceEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedExplorativeConvergenceEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedExplorativeConvergenceEnhancer = NonObjectOptimizer(method="LLAMAAdvancedExplorativeConvergenceEnhancer").set_name("LLAMAAdvancedExplorativeConvergenceEnhancer", register=True) except Exception as e: print("AdvancedExplorativeConvergenceEnhancer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedFireworkAlgorithmWithAdaptiveMutation import ( - AdvancedFireworkAlgorithmWithAdaptiveMutation, - ) + from nevergrad.optimization.lama.AdvancedFireworkAlgorithmWithAdaptiveMutation import AdvancedFireworkAlgorithmWithAdaptiveMutation - lama_register["AdvancedFireworkAlgorithmWithAdaptiveMutation"] = ( - AdvancedFireworkAlgorithmWithAdaptiveMutation - ) - LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( - method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation" - ).set_name("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation", register=True) + lama_register["AdvancedFireworkAlgorithmWithAdaptiveMutation"] = AdvancedFireworkAlgorithmWithAdaptiveMutation + res = NonObjectOptimizer(method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer(method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation").set_name("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation", register=True) except Exception as e: print("AdvancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedFocusedAdaptiveOptimizer import AdvancedFocusedAdaptiveOptimizer lama_register["AdvancedFocusedAdaptiveOptimizer"] = AdvancedFocusedAdaptiveOptimizer - LLAMAAdvancedFocusedAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedFocusedAdaptiveOptimizer" - ).set_name("LLAMAAdvancedFocusedAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedFocusedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAAdvancedFocusedAdaptiveOptimizer").set_name("LLAMAAdvancedFocusedAdaptiveOptimizer", register=True) except Exception as e: print("AdvancedFocusedAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedGlobalClimbingOptimizerV4 import ( - AdvancedGlobalClimbingOptimizerV4, - ) + from nevergrad.optimization.lama.AdvancedGlobalClimbingOptimizerV4 import AdvancedGlobalClimbingOptimizerV4 lama_register["AdvancedGlobalClimbingOptimizerV4"] = AdvancedGlobalClimbingOptimizerV4 - LLAMAAdvancedGlobalClimbingOptimizerV4 = NonObjectOptimizer( - method="LLAMAAdvancedGlobalClimbingOptimizerV4" - ).set_name("LLAMAAdvancedGlobalClimbingOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedGlobalClimbingOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedGlobalClimbingOptimizerV4 = NonObjectOptimizer(method="LLAMAAdvancedGlobalClimbingOptimizerV4").set_name("LLAMAAdvancedGlobalClimbingOptimizerV4", register=True) except Exception as e: print("AdvancedGlobalClimbingOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedGlobalStructureAwareOptimizerV3 import ( - AdvancedGlobalStructureAwareOptimizerV3, - ) + from nevergrad.optimization.lama.AdvancedGlobalStructureAwareOptimizerV3 import AdvancedGlobalStructureAwareOptimizerV3 lama_register["AdvancedGlobalStructureAwareOptimizerV3"] = AdvancedGlobalStructureAwareOptimizerV3 - LLAMAAdvancedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( - method="LLAMAAdvancedGlobalStructureAwareOptimizerV3" - ).set_name("LLAMAAdvancedGlobalStructureAwareOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer(method="LLAMAAdvancedGlobalStructureAwareOptimizerV3").set_name("LLAMAAdvancedGlobalStructureAwareOptimizerV3", register=True) except Exception as e: print("AdvancedGlobalStructureAwareOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration import ( - AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration, - ) + from nevergrad.optimization.lama.AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration import AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration - lama_register["AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration"] = ( - AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration - ) - LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration = NonObjectOptimizer( - method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration" - ).set_name("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration", register=True) + lama_register["AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration"] = AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration + res = NonObjectOptimizer(method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration = NonObjectOptimizer(method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration").set_name("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration", register=True) except Exception as e: print("AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategy import ( - AdvancedGradientEvolutionStrategy, - ) + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategy import AdvancedGradientEvolutionStrategy lama_register["AdvancedGradientEvolutionStrategy"] = AdvancedGradientEvolutionStrategy - LLAMAAdvancedGradientEvolutionStrategy = NonObjectOptimizer( - method="LLAMAAdvancedGradientEvolutionStrategy" - ).set_name("LLAMAAdvancedGradientEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedGradientEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategy").set_name("LLAMAAdvancedGradientEvolutionStrategy", register=True) except Exception as e: print("AdvancedGradientEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategyV2 import ( - AdvancedGradientEvolutionStrategyV2, - ) + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategyV2 import AdvancedGradientEvolutionStrategyV2 lama_register["AdvancedGradientEvolutionStrategyV2"] = AdvancedGradientEvolutionStrategyV2 - LLAMAAdvancedGradientEvolutionStrategyV2 = NonObjectOptimizer( - method="LLAMAAdvancedGradientEvolutionStrategyV2" - ).set_name("LLAMAAdvancedGradientEvolutionStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedGradientEvolutionStrategyV2 = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategyV2").set_name("LLAMAAdvancedGradientEvolutionStrategyV2", register=True) except Exception as e: print("AdvancedGradientEvolutionStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHarmonyMemeticOptimization import ( - AdvancedHarmonyMemeticOptimization, - ) + from nevergrad.optimization.lama.AdvancedHarmonyMemeticOptimization import AdvancedHarmonyMemeticOptimization lama_register["AdvancedHarmonyMemeticOptimization"] = AdvancedHarmonyMemeticOptimization - LLAMAAdvancedHarmonyMemeticOptimization = NonObjectOptimizer( - method="LLAMAAdvancedHarmonyMemeticOptimization" - ).set_name("LLAMAAdvancedHarmonyMemeticOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHarmonyMemeticOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHarmonyMemeticOptimization = NonObjectOptimizer(method="LLAMAAdvancedHarmonyMemeticOptimization").set_name("LLAMAAdvancedHarmonyMemeticOptimization", register=True) except Exception as e: print("AdvancedHarmonyMemeticOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHarmonySearch import AdvancedHarmonySearch lama_register["AdvancedHarmonySearch"] = AdvancedHarmonySearch - LLAMAAdvancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch").set_name( - "LLAMAAdvancedHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch").set_name("LLAMAAdvancedHarmonySearch", register=True) except Exception as e: print("AdvancedHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHybridAdaptiveDE import AdvancedHybridAdaptiveDE lama_register["AdvancedHybridAdaptiveDE"] = AdvancedHybridAdaptiveDE - LLAMAAdvancedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE").set_name( - "LLAMAAdvancedHybridAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE").set_name("LLAMAAdvancedHybridAdaptiveDE", register=True) except Exception as e: print("AdvancedHybridAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridAdaptiveOptimization import ( - AdvancedHybridAdaptiveOptimization, - ) + from nevergrad.optimization.lama.AdvancedHybridAdaptiveOptimization import AdvancedHybridAdaptiveOptimization lama_register["AdvancedHybridAdaptiveOptimization"] = AdvancedHybridAdaptiveOptimization - LLAMAAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( - method="LLAMAAdvancedHybridAdaptiveOptimization" - ).set_name("LLAMAAdvancedHybridAdaptiveOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveOptimization").set_name("LLAMAAdvancedHybridAdaptiveOptimization", register=True) except Exception as e: print("AdvancedHybridAdaptiveOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( - AdvancedHybridCovarianceMatrixDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 import AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 - lama_register["AdvancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( - AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 - ) - LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3" - ).set_name("LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) + lama_register["AdvancedHybridCovarianceMatrixDifferentialEvolutionV3"] = AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 + res = NonObjectOptimizer(method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) except Exception as e: print("AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridDEPSOWithAdaptiveRestarts import ( - AdvancedHybridDEPSOWithAdaptiveRestarts, - ) + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithAdaptiveRestarts import AdvancedHybridDEPSOWithAdaptiveRestarts lama_register["AdvancedHybridDEPSOWithAdaptiveRestarts"] = AdvancedHybridDEPSOWithAdaptiveRestarts - LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts" - ).set_name("LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts").set_name("LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts", register=True) except Exception as e: print("AdvancedHybridDEPSOWithAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridDEPSOWithDynamicAdaptationAndRestart import ( - AdvancedHybridDEPSOWithDynamicAdaptationAndRestart, - ) + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithDynamicAdaptationAndRestart import AdvancedHybridDEPSOWithDynamicAdaptationAndRestart - lama_register["AdvancedHybridDEPSOWithDynamicAdaptationAndRestart"] = ( - AdvancedHybridDEPSOWithDynamicAdaptationAndRestart - ) - LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart = NonObjectOptimizer( - method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart" - ).set_name("LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart", register=True) + lama_register["AdvancedHybridDEPSOWithDynamicAdaptationAndRestart"] = AdvancedHybridDEPSOWithDynamicAdaptationAndRestart + res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart").set_name("LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart", register=True) except Exception as e: print("AdvancedHybridDEPSOWithDynamicAdaptationAndRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridExplorationExploitationOptimizer import ( - AdvancedHybridExplorationExploitationOptimizer, - ) + from nevergrad.optimization.lama.AdvancedHybridExplorationExploitationOptimizer import AdvancedHybridExplorationExploitationOptimizer - lama_register["AdvancedHybridExplorationExploitationOptimizer"] = ( - AdvancedHybridExplorationExploitationOptimizer - ) - LLAMAAdvancedHybridExplorationExploitationOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedHybridExplorationExploitationOptimizer" - ).set_name("LLAMAAdvancedHybridExplorationExploitationOptimizer", register=True) + lama_register["AdvancedHybridExplorationExploitationOptimizer"] = AdvancedHybridExplorationExploitationOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedHybridExplorationExploitationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridExplorationExploitationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridExplorationExploitationOptimizer").set_name("LLAMAAdvancedHybridExplorationExploitationOptimizer", register=True) except Exception as e: print("AdvancedHybridExplorationExploitationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridLocalOptimizationDE import ( - AdvancedHybridLocalOptimizationDE, - ) + from nevergrad.optimization.lama.AdvancedHybridLocalOptimizationDE import AdvancedHybridLocalOptimizationDE lama_register["AdvancedHybridLocalOptimizationDE"] = AdvancedHybridLocalOptimizationDE - LLAMAAdvancedHybridLocalOptimizationDE = NonObjectOptimizer( - method="LLAMAAdvancedHybridLocalOptimizationDE" - ).set_name("LLAMAAdvancedHybridLocalOptimizationDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridLocalOptimizationDE = NonObjectOptimizer(method="LLAMAAdvancedHybridLocalOptimizationDE").set_name("LLAMAAdvancedHybridLocalOptimizationDE", register=True) except Exception as e: print("AdvancedHybridLocalOptimizationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridMetaHeuristicOptimizer import ( - AdvancedHybridMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.AdvancedHybridMetaHeuristicOptimizer import AdvancedHybridMetaHeuristicOptimizer lama_register["AdvancedHybridMetaHeuristicOptimizer"] = AdvancedHybridMetaHeuristicOptimizer - LLAMAAdvancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedHybridMetaHeuristicOptimizer" - ).set_name("LLAMAAdvancedHybridMetaHeuristicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaHeuristicOptimizer").set_name("LLAMAAdvancedHybridMetaHeuristicOptimizer", register=True) except Exception as e: print("AdvancedHybridMetaHeuristicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHybridMetaheuristic import AdvancedHybridMetaheuristic lama_register["AdvancedHybridMetaheuristic"] = AdvancedHybridMetaheuristic - LLAMAAdvancedHybridMetaheuristic = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic").set_name( - "LLAMAAdvancedHybridMetaheuristic", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridMetaheuristic = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic").set_name("LLAMAAdvancedHybridMetaheuristic", register=True) except Exception as e: print("AdvancedHybridMetaheuristic can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHybridOptimization import AdvancedHybridOptimization lama_register["AdvancedHybridOptimization"] = AdvancedHybridOptimization - LLAMAAdvancedHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization").set_name( - "LLAMAAdvancedHybridOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization").set_name("LLAMAAdvancedHybridOptimization", register=True) except Exception as e: print("AdvancedHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHybridOptimizer import AdvancedHybridOptimizer lama_register["AdvancedHybridOptimizer"] = AdvancedHybridOptimizer - LLAMAAdvancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer").set_name( - "LLAMAAdvancedHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer").set_name("LLAMAAdvancedHybridOptimizer", register=True) except Exception as e: print("AdvancedHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedHybridQuantumAdaptiveDE import AdvancedHybridQuantumAdaptiveDE lama_register["AdvancedHybridQuantumAdaptiveDE"] = AdvancedHybridQuantumAdaptiveDE - LLAMAAdvancedHybridQuantumAdaptiveDE = NonObjectOptimizer( - method="LLAMAAdvancedHybridQuantumAdaptiveDE" - ).set_name("LLAMAAdvancedHybridQuantumAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridQuantumAdaptiveDE").set_name("LLAMAAdvancedHybridQuantumAdaptiveDE", register=True) except Exception as e: print("AdvancedHybridQuantumAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithAdaptiveMemory import ( - AdvancedHybridSimulatedAnnealingWithAdaptiveMemory, - ) + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithAdaptiveMemory import AdvancedHybridSimulatedAnnealingWithAdaptiveMemory - lama_register["AdvancedHybridSimulatedAnnealingWithAdaptiveMemory"] = ( - AdvancedHybridSimulatedAnnealingWithAdaptiveMemory - ) - LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory = NonObjectOptimizer( - method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory" - ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory", register=True) + lama_register["AdvancedHybridSimulatedAnnealingWithAdaptiveMemory"] = AdvancedHybridSimulatedAnnealingWithAdaptiveMemory + res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory").set_name("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory", register=True) except Exception as e: print("AdvancedHybridSimulatedAnnealingWithAdaptiveMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithGuidedExploration import ( - AdvancedHybridSimulatedAnnealingWithGuidedExploration, - ) + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithGuidedExploration import AdvancedHybridSimulatedAnnealingWithGuidedExploration - lama_register["AdvancedHybridSimulatedAnnealingWithGuidedExploration"] = ( - AdvancedHybridSimulatedAnnealingWithGuidedExploration - ) - LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration = NonObjectOptimizer( - method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration" - ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration", register=True) + lama_register["AdvancedHybridSimulatedAnnealingWithGuidedExploration"] = AdvancedHybridSimulatedAnnealingWithGuidedExploration + res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration").set_name("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration", register=True) except Exception as e: print("AdvancedHybridSimulatedAnnealingWithGuidedExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedImprovedMetaHeuristicOptimizer import ( - AdvancedImprovedMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.AdvancedImprovedMetaHeuristicOptimizer import AdvancedImprovedMetaHeuristicOptimizer lama_register["AdvancedImprovedMetaHeuristicOptimizer"] = AdvancedImprovedMetaHeuristicOptimizer - LLAMAAdvancedImprovedMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedImprovedMetaHeuristicOptimizer" - ).set_name("LLAMAAdvancedImprovedMetaHeuristicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedImprovedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedImprovedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedImprovedMetaHeuristicOptimizer").set_name("LLAMAAdvancedImprovedMetaHeuristicOptimizer", register=True) except Exception as e: print("AdvancedImprovedMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV5 import ( - AdvancedIslandEvolutionStrategyV5, - ) + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV5 import AdvancedIslandEvolutionStrategyV5 lama_register["AdvancedIslandEvolutionStrategyV5"] = AdvancedIslandEvolutionStrategyV5 - LLAMAAdvancedIslandEvolutionStrategyV5 = NonObjectOptimizer( - method="LLAMAAdvancedIslandEvolutionStrategyV5" - ).set_name("LLAMAAdvancedIslandEvolutionStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedIslandEvolutionStrategyV5 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV5").set_name("LLAMAAdvancedIslandEvolutionStrategyV5", register=True) except Exception as e: print("AdvancedIslandEvolutionStrategyV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV8 import ( - AdvancedIslandEvolutionStrategyV8, - ) + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV8 import AdvancedIslandEvolutionStrategyV8 lama_register["AdvancedIslandEvolutionStrategyV8"] = AdvancedIslandEvolutionStrategyV8 - LLAMAAdvancedIslandEvolutionStrategyV8 = NonObjectOptimizer( - method="LLAMAAdvancedIslandEvolutionStrategyV8" - ).set_name("LLAMAAdvancedIslandEvolutionStrategyV8", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedIslandEvolutionStrategyV8 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV8").set_name("LLAMAAdvancedIslandEvolutionStrategyV8", register=True) except Exception as e: print("AdvancedIslandEvolutionStrategyV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV9 import ( - AdvancedIslandEvolutionStrategyV9, - ) + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV9 import AdvancedIslandEvolutionStrategyV9 lama_register["AdvancedIslandEvolutionStrategyV9"] = AdvancedIslandEvolutionStrategyV9 - LLAMAAdvancedIslandEvolutionStrategyV9 = NonObjectOptimizer( - method="LLAMAAdvancedIslandEvolutionStrategyV9" - ).set_name("LLAMAAdvancedIslandEvolutionStrategyV9", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedIslandEvolutionStrategyV9 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV9").set_name("LLAMAAdvancedIslandEvolutionStrategyV9", register=True) except Exception as e: print("AdvancedIslandEvolutionStrategyV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMemeticQuantumDifferentialOptimizer import ( - AdvancedMemeticQuantumDifferentialOptimizer, - ) + from nevergrad.optimization.lama.AdvancedMemeticQuantumDifferentialOptimizer import AdvancedMemeticQuantumDifferentialOptimizer lama_register["AdvancedMemeticQuantumDifferentialOptimizer"] = AdvancedMemeticQuantumDifferentialOptimizer - LLAMAAdvancedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer" - ).set_name("LLAMAAdvancedMemeticQuantumDifferentialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer").set_name("LLAMAAdvancedMemeticQuantumDifferentialOptimizer", register=True) except Exception as e: print("AdvancedMemeticQuantumDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMemoryAdaptiveStrategyV50 import ( - AdvancedMemoryAdaptiveStrategyV50, - ) + from nevergrad.optimization.lama.AdvancedMemoryAdaptiveStrategyV50 import AdvancedMemoryAdaptiveStrategyV50 lama_register["AdvancedMemoryAdaptiveStrategyV50"] = AdvancedMemoryAdaptiveStrategyV50 - LLAMAAdvancedMemoryAdaptiveStrategyV50 = NonObjectOptimizer( - method="LLAMAAdvancedMemoryAdaptiveStrategyV50" - ).set_name("LLAMAAdvancedMemoryAdaptiveStrategyV50", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMemoryAdaptiveStrategyV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMemoryAdaptiveStrategyV50 = NonObjectOptimizer(method="LLAMAAdvancedMemoryAdaptiveStrategyV50").set_name("LLAMAAdvancedMemoryAdaptiveStrategyV50", register=True) except Exception as e: print("AdvancedMemoryAdaptiveStrategyV50 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMemoryEnhancedHybridOptimizer import ( - AdvancedMemoryEnhancedHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedMemoryEnhancedHybridOptimizer import AdvancedMemoryEnhancedHybridOptimizer lama_register["AdvancedMemoryEnhancedHybridOptimizer"] = AdvancedMemoryEnhancedHybridOptimizer - LLAMAAdvancedMemoryEnhancedHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedMemoryEnhancedHybridOptimizer" - ).set_name("LLAMAAdvancedMemoryEnhancedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMemoryEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMemoryEnhancedHybridOptimizer").set_name("LLAMAAdvancedMemoryEnhancedHybridOptimizer", register=True) except Exception as e: print("AdvancedMemoryEnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMemoryGuidedAdaptiveStrategyV68 import ( - AdvancedMemoryGuidedAdaptiveStrategyV68, - ) + from nevergrad.optimization.lama.AdvancedMemoryGuidedAdaptiveStrategyV68 import AdvancedMemoryGuidedAdaptiveStrategyV68 lama_register["AdvancedMemoryGuidedAdaptiveStrategyV68"] = AdvancedMemoryGuidedAdaptiveStrategyV68 - LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68 = NonObjectOptimizer( - method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68" - ).set_name("LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68 = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68").set_name("LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68", register=True) except Exception as e: print("AdvancedMemoryGuidedAdaptiveStrategyV68 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMemoryGuidedDualStrategyV80 import ( - AdvancedMemoryGuidedDualStrategyV80, - ) + from nevergrad.optimization.lama.AdvancedMemoryGuidedDualStrategyV80 import AdvancedMemoryGuidedDualStrategyV80 lama_register["AdvancedMemoryGuidedDualStrategyV80"] = AdvancedMemoryGuidedDualStrategyV80 - LLAMAAdvancedMemoryGuidedDualStrategyV80 = NonObjectOptimizer( - method="LLAMAAdvancedMemoryGuidedDualStrategyV80" - ).set_name("LLAMAAdvancedMemoryGuidedDualStrategyV80", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedDualStrategyV80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMemoryGuidedDualStrategyV80 = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedDualStrategyV80").set_name("LLAMAAdvancedMemoryGuidedDualStrategyV80", register=True) except Exception as e: print("AdvancedMemoryGuidedDualStrategyV80 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMultiModalAdaptiveOptimizer import ( - AdvancedMultiModalAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.AdvancedMultiModalAdaptiveOptimizer import AdvancedMultiModalAdaptiveOptimizer lama_register["AdvancedMultiModalAdaptiveOptimizer"] = AdvancedMultiModalAdaptiveOptimizer - LLAMAAdvancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedMultiModalAdaptiveOptimizer" - ).set_name("LLAMAAdvancedMultiModalAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMultiModalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMultiModalAdaptiveOptimizer").set_name("LLAMAAdvancedMultiModalAdaptiveOptimizer", register=True) except Exception as e: print("AdvancedMultiModalAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedMultiStrategySelfAdaptiveDE import ( - AdvancedMultiStrategySelfAdaptiveDE, - ) + from nevergrad.optimization.lama.AdvancedMultiStrategySelfAdaptiveDE import AdvancedMultiStrategySelfAdaptiveDE lama_register["AdvancedMultiStrategySelfAdaptiveDE"] = AdvancedMultiStrategySelfAdaptiveDE - LLAMAAdvancedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( - method="LLAMAAdvancedMultiStrategySelfAdaptiveDE" - ).set_name("LLAMAAdvancedMultiStrategySelfAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedMultiStrategySelfAdaptiveDE").set_name("LLAMAAdvancedMultiStrategySelfAdaptiveDE", register=True) except Exception as e: print("AdvancedMultiStrategySelfAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedNicheDifferentialParticleSwarmOptimizer import ( - AdvancedNicheDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.AdvancedNicheDifferentialParticleSwarmOptimizer import AdvancedNicheDifferentialParticleSwarmOptimizer - lama_register["AdvancedNicheDifferentialParticleSwarmOptimizer"] = ( - AdvancedNicheDifferentialParticleSwarmOptimizer - ) - LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer", register=True) + lama_register["AdvancedNicheDifferentialParticleSwarmOptimizer"] = AdvancedNicheDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("AdvancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( - AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE, - ) + from nevergrad.optimization.lama.AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE import AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE - lama_register["AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( - AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE - ) - LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( - method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE" - ).set_name("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) + lama_register["AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE + res = NonObjectOptimizer(method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) except Exception as e: print("AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedOptimalHybridDifferentialAnnealingOptimizer import ( - AdvancedOptimalHybridDifferentialAnnealingOptimizer, - ) + from nevergrad.optimization.lama.AdvancedOptimalHybridDifferentialAnnealingOptimizer import AdvancedOptimalHybridDifferentialAnnealingOptimizer - lama_register["AdvancedOptimalHybridDifferentialAnnealingOptimizer"] = ( - AdvancedOptimalHybridDifferentialAnnealingOptimizer - ) - LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer" - ).set_name("LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer", register=True) + lama_register["AdvancedOptimalHybridDifferentialAnnealingOptimizer"] = AdvancedOptimalHybridDifferentialAnnealingOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer").set_name("LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer", register=True) except Exception as e: print("AdvancedOptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedParallelDifferentialEvolution import ( - AdvancedParallelDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdvancedParallelDifferentialEvolution import AdvancedParallelDifferentialEvolution lama_register["AdvancedParallelDifferentialEvolution"] = AdvancedParallelDifferentialEvolution - LLAMAAdvancedParallelDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdvancedParallelDifferentialEvolution" - ).set_name("LLAMAAdvancedParallelDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedParallelDifferentialEvolution").set_name("LLAMAAdvancedParallelDifferentialEvolution", register=True) except Exception as e: print("AdvancedParallelDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedPrecisionEvolver import AdvancedPrecisionEvolver lama_register["AdvancedPrecisionEvolver"] = AdvancedPrecisionEvolver - LLAMAAdvancedPrecisionEvolver = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver").set_name( - "LLAMAAdvancedPrecisionEvolver", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedPrecisionEvolver = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver").set_name("LLAMAAdvancedPrecisionEvolver", register=True) except Exception as e: print("AdvancedPrecisionEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedPrecisionGuidedStrategy import AdvancedPrecisionGuidedStrategy lama_register["AdvancedPrecisionGuidedStrategy"] = AdvancedPrecisionGuidedStrategy - LLAMAAdvancedPrecisionGuidedStrategy = NonObjectOptimizer( - method="LLAMAAdvancedPrecisionGuidedStrategy" - ).set_name("LLAMAAdvancedPrecisionGuidedStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionGuidedStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedPrecisionGuidedStrategy = NonObjectOptimizer(method="LLAMAAdvancedPrecisionGuidedStrategy").set_name("LLAMAAdvancedPrecisionGuidedStrategy", register=True) except Exception as e: print("AdvancedPrecisionGuidedStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumCognitionTrajectoryOptimizerV29 import ( - AdvancedQuantumCognitionTrajectoryOptimizerV29, - ) + from nevergrad.optimization.lama.AdvancedQuantumCognitionTrajectoryOptimizerV29 import AdvancedQuantumCognitionTrajectoryOptimizerV29 - lama_register["AdvancedQuantumCognitionTrajectoryOptimizerV29"] = ( - AdvancedQuantumCognitionTrajectoryOptimizerV29 - ) - LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29 = NonObjectOptimizer( - method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29" - ).set_name("LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29", register=True) + lama_register["AdvancedQuantumCognitionTrajectoryOptimizerV29"] = AdvancedQuantumCognitionTrajectoryOptimizerV29 + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29 = NonObjectOptimizer(method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29").set_name("LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29", register=True) except Exception as e: print("AdvancedQuantumCognitionTrajectoryOptimizerV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumControlledDiversityStrategy import ( - AdvancedQuantumControlledDiversityStrategy, - ) + from nevergrad.optimization.lama.AdvancedQuantumControlledDiversityStrategy import AdvancedQuantumControlledDiversityStrategy lama_register["AdvancedQuantumControlledDiversityStrategy"] = AdvancedQuantumControlledDiversityStrategy - LLAMAAdvancedQuantumControlledDiversityStrategy = NonObjectOptimizer( - method="LLAMAAdvancedQuantumControlledDiversityStrategy" - ).set_name("LLAMAAdvancedQuantumControlledDiversityStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumControlledDiversityStrategy = NonObjectOptimizer(method="LLAMAAdvancedQuantumControlledDiversityStrategy").set_name("LLAMAAdvancedQuantumControlledDiversityStrategy", register=True) except Exception as e: print("AdvancedQuantumControlledDiversityStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumCrossoverOptimizer import ( - AdvancedQuantumCrossoverOptimizer, - ) + from nevergrad.optimization.lama.AdvancedQuantumCrossoverOptimizer import AdvancedQuantumCrossoverOptimizer lama_register["AdvancedQuantumCrossoverOptimizer"] = AdvancedQuantumCrossoverOptimizer - LLAMAAdvancedQuantumCrossoverOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedQuantumCrossoverOptimizer" - ).set_name("LLAMAAdvancedQuantumCrossoverOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumCrossoverOptimizer").set_name("LLAMAAdvancedQuantumCrossoverOptimizer", register=True) except Exception as e: print("AdvancedQuantumCrossoverOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart import ( - AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart, - ) + from nevergrad.optimization.lama.AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart import AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart - lama_register["AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart"] = ( - AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart - ) - LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart = NonObjectOptimizer( - method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart" - ).set_name( - "LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart", register=True - ) + lama_register["AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart"] = AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart = NonObjectOptimizer(method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart").set_name("LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart", register=True) except Exception as e: - print( - "AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart can not be imported: ", e - ) - + print("AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart can not be imported: ", e) try: from nevergrad.optimization.lama.AdvancedQuantumGradientDescent import AdvancedQuantumGradientDescent lama_register["AdvancedQuantumGradientDescent"] = AdvancedQuantumGradientDescent - LLAMAAdvancedQuantumGradientDescent = NonObjectOptimizer( - method="LLAMAAdvancedQuantumGradientDescent" - ).set_name("LLAMAAdvancedQuantumGradientDescent", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientDescent")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumGradientDescent = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientDescent").set_name("LLAMAAdvancedQuantumGradientDescent", register=True) except Exception as e: print("AdvancedQuantumGradientDescent can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumGradientExplorationOptimization import ( - AdvancedQuantumGradientExplorationOptimization, - ) + from nevergrad.optimization.lama.AdvancedQuantumGradientExplorationOptimization import AdvancedQuantumGradientExplorationOptimization - lama_register["AdvancedQuantumGradientExplorationOptimization"] = ( - AdvancedQuantumGradientExplorationOptimization - ) - LLAMAAdvancedQuantumGradientExplorationOptimization = NonObjectOptimizer( - method="LLAMAAdvancedQuantumGradientExplorationOptimization" - ).set_name("LLAMAAdvancedQuantumGradientExplorationOptimization", register=True) + lama_register["AdvancedQuantumGradientExplorationOptimization"] = AdvancedQuantumGradientExplorationOptimization + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientExplorationOptimization").set_name("LLAMAAdvancedQuantumGradientExplorationOptimization", register=True) except Exception as e: print("AdvancedQuantumGradientExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumHarmonicFeedbackOptimizer import ( - AdvancedQuantumHarmonicFeedbackOptimizer, - ) + from nevergrad.optimization.lama.AdvancedQuantumHarmonicFeedbackOptimizer import AdvancedQuantumHarmonicFeedbackOptimizer lama_register["AdvancedQuantumHarmonicFeedbackOptimizer"] = AdvancedQuantumHarmonicFeedbackOptimizer - LLAMAAdvancedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer" - ).set_name("LLAMAAdvancedQuantumHarmonicFeedbackOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer").set_name("LLAMAAdvancedQuantumHarmonicFeedbackOptimizer", register=True) except Exception as e: print("AdvancedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumInfusedAdaptiveStrategyV3 import ( - AdvancedQuantumInfusedAdaptiveStrategyV3, - ) + from nevergrad.optimization.lama.AdvancedQuantumInfusedAdaptiveStrategyV3 import AdvancedQuantumInfusedAdaptiveStrategyV3 lama_register["AdvancedQuantumInfusedAdaptiveStrategyV3"] = AdvancedQuantumInfusedAdaptiveStrategyV3 - LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3 = NonObjectOptimizer( - method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3" - ).set_name("LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3 = NonObjectOptimizer(method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3").set_name("LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3", register=True) except Exception as e: print("AdvancedQuantumInfusedAdaptiveStrategyV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumMemeticDifferentialEvolution import ( - AdvancedQuantumMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.AdvancedQuantumMemeticDifferentialEvolution import AdvancedQuantumMemeticDifferentialEvolution lama_register["AdvancedQuantumMemeticDifferentialEvolution"] = AdvancedQuantumMemeticDifferentialEvolution - LLAMAAdvancedQuantumMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAAdvancedQuantumMemeticDifferentialEvolution" - ).set_name("LLAMAAdvancedQuantumMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedQuantumMemeticDifferentialEvolution").set_name("LLAMAAdvancedQuantumMemeticDifferentialEvolution", register=True) except Exception as e: print("AdvancedQuantumMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedQuantumStateCrossoverOptimization import ( - AdvancedQuantumStateCrossoverOptimization, - ) + from nevergrad.optimization.lama.AdvancedQuantumStateCrossoverOptimization import AdvancedQuantumStateCrossoverOptimization lama_register["AdvancedQuantumStateCrossoverOptimization"] = AdvancedQuantumStateCrossoverOptimization - LLAMAAdvancedQuantumStateCrossoverOptimization = NonObjectOptimizer( - method="LLAMAAdvancedQuantumStateCrossoverOptimization" - ).set_name("LLAMAAdvancedQuantumStateCrossoverOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumStateCrossoverOptimization").set_name("LLAMAAdvancedQuantumStateCrossoverOptimization", register=True) except Exception as e: print("AdvancedQuantumStateCrossoverOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedQuantumSwarmOptimization import AdvancedQuantumSwarmOptimization lama_register["AdvancedQuantumSwarmOptimization"] = AdvancedQuantumSwarmOptimization - LLAMAAdvancedQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAAdvancedQuantumSwarmOptimization" - ).set_name("LLAMAAdvancedQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumSwarmOptimization").set_name("LLAMAAdvancedQuantumSwarmOptimization", register=True) except Exception as e: print("AdvancedQuantumSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedQuantumVelocityOptimizer import AdvancedQuantumVelocityOptimizer lama_register["AdvancedQuantumVelocityOptimizer"] = AdvancedQuantumVelocityOptimizer - LLAMAAdvancedQuantumVelocityOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedQuantumVelocityOptimizer" - ).set_name("LLAMAAdvancedQuantumVelocityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedQuantumVelocityOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumVelocityOptimizer").set_name("LLAMAAdvancedQuantumVelocityOptimizer", register=True) except Exception as e: print("AdvancedQuantumVelocityOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedRAMEDSv6 import AdvancedRAMEDSv6 lama_register["AdvancedRAMEDSv6"] = AdvancedRAMEDSv6 - LLAMAAdvancedRAMEDSv6 = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6").set_name( - "LLAMAAdvancedRAMEDSv6", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRAMEDSv6 = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6").set_name("LLAMAAdvancedRAMEDSv6", register=True) except Exception as e: print("AdvancedRAMEDSv6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedAdaptiveMemoryEnhancedSearch import ( - AdvancedRefinedAdaptiveMemoryEnhancedSearch, - ) + from nevergrad.optimization.lama.AdvancedRefinedAdaptiveMemoryEnhancedSearch import AdvancedRefinedAdaptiveMemoryEnhancedSearch lama_register["AdvancedRefinedAdaptiveMemoryEnhancedSearch"] = AdvancedRefinedAdaptiveMemoryEnhancedSearch - LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( - method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch" - ).set_name("LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch").set_name("LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch", register=True) except Exception as e: print("AdvancedRefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( - AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus, - ) + from nevergrad.optimization.lama.AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus import AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus - lama_register["AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( - AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus - ) - LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( - method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus" - ).set_name("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) + lama_register["AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) except Exception as e: print("AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( - AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, - ) + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - lama_register["AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( - AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - ) - LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" - ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) + lama_register["AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) except Exception as e: print("AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( - AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer import AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer - lama_register["AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( - AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer - ) - LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) + lama_register["AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedAnnealing import ( - AdvancedRefinedGradientBoostedAnnealing, - ) + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedAnnealing import AdvancedRefinedGradientBoostedAnnealing lama_register["AdvancedRefinedGradientBoostedAnnealing"] = AdvancedRefinedGradientBoostedAnnealing - LLAMAAdvancedRefinedGradientBoostedAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedRefinedGradientBoostedAnnealing" - ).set_name("LLAMAAdvancedRefinedGradientBoostedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedAnnealing", register=True) except Exception as e: print("AdvancedRefinedGradientBoostedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemoryAnnealing import ( - AdvancedRefinedGradientBoostedMemoryAnnealing, - ) + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemoryAnnealing import AdvancedRefinedGradientBoostedMemoryAnnealing - lama_register["AdvancedRefinedGradientBoostedMemoryAnnealing"] = ( - AdvancedRefinedGradientBoostedMemoryAnnealing - ) - LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing" - ).set_name("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing", register=True) + lama_register["AdvancedRefinedGradientBoostedMemoryAnnealing"] = AdvancedRefinedGradientBoostedMemoryAnnealing + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("AdvancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemorySimulatedAnnealing import ( - AdvancedRefinedGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemorySimulatedAnnealing import AdvancedRefinedGradientBoostedMemorySimulatedAnnealing - lama_register["AdvancedRefinedGradientBoostedMemorySimulatedAnnealing"] = ( - AdvancedRefinedGradientBoostedMemorySimulatedAnnealing - ) - LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["AdvancedRefinedGradientBoostedMemorySimulatedAnnealing"] = AdvancedRefinedGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("AdvancedRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedHybridEvolutionaryAnnealingOptimizer import ( - AdvancedRefinedHybridEvolutionaryAnnealingOptimizer, - ) + from nevergrad.optimization.lama.AdvancedRefinedHybridEvolutionaryAnnealingOptimizer import AdvancedRefinedHybridEvolutionaryAnnealingOptimizer - lama_register["AdvancedRefinedHybridEvolutionaryAnnealingOptimizer"] = ( - AdvancedRefinedHybridEvolutionaryAnnealingOptimizer - ) - LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer" - ).set_name("LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer", register=True) + lama_register["AdvancedRefinedHybridEvolutionaryAnnealingOptimizer"] = AdvancedRefinedHybridEvolutionaryAnnealingOptimizer + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer", register=True) except Exception as e: print("AdvancedRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 import ( - AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51, - ) + from nevergrad.optimization.lama.AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 import AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 - lama_register["AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51"] = ( - AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 - ) - LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 = NonObjectOptimizer( - method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51" - ).set_name("LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51", register=True) + lama_register["AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51"] = AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 = NonObjectOptimizer(method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51").set_name("LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51", register=True) except Exception as e: print("AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedRefinedRAMEDSPro import AdvancedRefinedRAMEDSPro lama_register["AdvancedRefinedRAMEDSPro"] = AdvancedRefinedRAMEDSPro - LLAMAAdvancedRefinedRAMEDSPro = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro").set_name( - "LLAMAAdvancedRefinedRAMEDSPro", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedRAMEDSPro = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro").set_name("LLAMAAdvancedRefinedRAMEDSPro", register=True) except Exception as e: print("AdvancedRefinedRAMEDSPro can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedSpiralSearchOptimizer import ( - AdvancedRefinedSpiralSearchOptimizer, - ) + from nevergrad.optimization.lama.AdvancedRefinedSpiralSearchOptimizer import AdvancedRefinedSpiralSearchOptimizer lama_register["AdvancedRefinedSpiralSearchOptimizer"] = AdvancedRefinedSpiralSearchOptimizer - LLAMAAdvancedRefinedSpiralSearchOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedRefinedSpiralSearchOptimizer" - ).set_name("LLAMAAdvancedRefinedSpiralSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedSpiralSearchOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedSpiralSearchOptimizer").set_name("LLAMAAdvancedRefinedSpiralSearchOptimizer", register=True) except Exception as e: print("AdvancedRefinedSpiralSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 import ( - AdvancedRefinedUltraEvolutionaryGradientOptimizerV29, - ) + from nevergrad.optimization.lama.AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 import AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 - lama_register["AdvancedRefinedUltraEvolutionaryGradientOptimizerV29"] = ( - AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 - ) - LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29 = NonObjectOptimizer( - method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29" - ).set_name("LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29", register=True) + lama_register["AdvancedRefinedUltraEvolutionaryGradientOptimizerV29"] = AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 + res = NonObjectOptimizer(method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29 = NonObjectOptimizer(method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29").set_name("LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29", register=True) except Exception as e: print("AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v2 import AdvancedSelfAdaptiveDE_v2 lama_register["AdvancedSelfAdaptiveDE_v2"] = AdvancedSelfAdaptiveDE_v2 - LLAMAAdvancedSelfAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2").set_name( - "LLAMAAdvancedSelfAdaptiveDE_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedSelfAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2").set_name("LLAMAAdvancedSelfAdaptiveDE_v2", register=True) except Exception as e: print("AdvancedSelfAdaptiveDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v3 import AdvancedSelfAdaptiveDE_v3 lama_register["AdvancedSelfAdaptiveDE_v3"] = AdvancedSelfAdaptiveDE_v3 - LLAMAAdvancedSelfAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3").set_name( - "LLAMAAdvancedSelfAdaptiveDE_v3", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedSelfAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3").set_name("LLAMAAdvancedSelfAdaptiveDE_v3", register=True) except Exception as e: print("AdvancedSelfAdaptiveDE_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.AdvancedSpatialAdaptiveConvergenceOptimizer import ( - AdvancedSpatialAdaptiveConvergenceOptimizer, - ) + from nevergrad.optimization.lama.AdvancedSpatialAdaptiveConvergenceOptimizer import AdvancedSpatialAdaptiveConvergenceOptimizer lama_register["AdvancedSpatialAdaptiveConvergenceOptimizer"] = AdvancedSpatialAdaptiveConvergenceOptimizer - LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer" - ).set_name("LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer = NonObjectOptimizer(method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer").set_name("LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer", register=True) except Exception as e: print("AdvancedSpatialAdaptiveConvergenceOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedSpatialGradientOptimizer import AdvancedSpatialGradientOptimizer lama_register["AdvancedSpatialGradientOptimizer"] = AdvancedSpatialGradientOptimizer - LLAMAAdvancedSpatialGradientOptimizer = NonObjectOptimizer( - method="LLAMAAdvancedSpatialGradientOptimizer" - ).set_name("LLAMAAdvancedSpatialGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAAdvancedSpatialGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedSpatialGradientOptimizer = NonObjectOptimizer(method="LLAMAAdvancedSpatialGradientOptimizer").set_name("LLAMAAdvancedSpatialGradientOptimizer", register=True) except Exception as e: print("AdvancedSpatialGradientOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.AdvancedStrategicHybridDE import AdvancedStrategicHybridDE lama_register["AdvancedStrategicHybridDE"] = AdvancedStrategicHybridDE - LLAMAAdvancedStrategicHybridDE = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE").set_name( - "LLAMAAdvancedStrategicHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAdvancedStrategicHybridDE = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE").set_name("LLAMAAdvancedStrategicHybridDE", register=True) except Exception as e: print("AdvancedStrategicHybridDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ArchiveEnhancedAdaptiveDE import ArchiveEnhancedAdaptiveDE lama_register["ArchiveEnhancedAdaptiveDE"] = ArchiveEnhancedAdaptiveDE - LLAMAArchiveEnhancedAdaptiveDE = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE").set_name( - "LLAMAArchiveEnhancedAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAArchiveEnhancedAdaptiveDE = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE").set_name("LLAMAArchiveEnhancedAdaptiveDE", register=True) except Exception as e: print("ArchiveEnhancedAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.AttenuatedAdaptiveEvolver import AttenuatedAdaptiveEvolver lama_register["AttenuatedAdaptiveEvolver"] = AttenuatedAdaptiveEvolver - LLAMAAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver").set_name( - "LLAMAAttenuatedAdaptiveEvolver", register=True - ) + res = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver").set_name("LLAMAAttenuatedAdaptiveEvolver", register=True) except Exception as e: print("AttenuatedAdaptiveEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.BalancedAdaptiveMemeticDE import BalancedAdaptiveMemeticDE lama_register["BalancedAdaptiveMemeticDE"] = BalancedAdaptiveMemeticDE - LLAMABalancedAdaptiveMemeticDE = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE").set_name( - "LLAMABalancedAdaptiveMemeticDE", register=True - ) + res = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedAdaptiveMemeticDE = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE").set_name("LLAMABalancedAdaptiveMemeticDE", register=True) except Exception as e: print("BalancedAdaptiveMemeticDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.BalancedCulturalDifferentialEvolution import ( - BalancedCulturalDifferentialEvolution, - ) + from nevergrad.optimization.lama.BalancedCulturalDifferentialEvolution import BalancedCulturalDifferentialEvolution lama_register["BalancedCulturalDifferentialEvolution"] = BalancedCulturalDifferentialEvolution - LLAMABalancedCulturalDifferentialEvolution = NonObjectOptimizer( - method="LLAMABalancedCulturalDifferentialEvolution" - ).set_name("LLAMABalancedCulturalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMABalancedCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMABalancedCulturalDifferentialEvolution").set_name("LLAMABalancedCulturalDifferentialEvolution", register=True) except Exception as e: print("BalancedCulturalDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.BalancedDualStrategyAdaptiveDE import BalancedDualStrategyAdaptiveDE lama_register["BalancedDualStrategyAdaptiveDE"] = BalancedDualStrategyAdaptiveDE - LLAMABalancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMABalancedDualStrategyAdaptiveDE" - ).set_name("LLAMABalancedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMABalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMABalancedDualStrategyAdaptiveDE").set_name("LLAMABalancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("BalancedDualStrategyAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.BalancedDynamicQuantumLevySwarm import BalancedDynamicQuantumLevySwarm lama_register["BalancedDynamicQuantumLevySwarm"] = BalancedDynamicQuantumLevySwarm - LLAMABalancedDynamicQuantumLevySwarm = NonObjectOptimizer( - method="LLAMABalancedDynamicQuantumLevySwarm" - ).set_name("LLAMABalancedDynamicQuantumLevySwarm", register=True) + res = NonObjectOptimizer(method="LLAMABalancedDynamicQuantumLevySwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedDynamicQuantumLevySwarm = NonObjectOptimizer(method="LLAMABalancedDynamicQuantumLevySwarm").set_name("LLAMABalancedDynamicQuantumLevySwarm", register=True) except Exception as e: print("BalancedDynamicQuantumLevySwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.BalancedQuantumLevyDifferentialSearch import ( - BalancedQuantumLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.BalancedQuantumLevyDifferentialSearch import BalancedQuantumLevyDifferentialSearch lama_register["BalancedQuantumLevyDifferentialSearch"] = BalancedQuantumLevyDifferentialSearch - LLAMABalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMABalancedQuantumLevyDifferentialSearch" - ).set_name("LLAMABalancedQuantumLevyDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMABalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMABalancedQuantumLevyDifferentialSearch").set_name("LLAMABalancedQuantumLevyDifferentialSearch", register=True) except Exception as e: print("BalancedQuantumLevyDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.BalancedQuantumLevySwarmOptimization import ( - BalancedQuantumLevySwarmOptimization, - ) + from nevergrad.optimization.lama.BalancedQuantumLevySwarmOptimization import BalancedQuantumLevySwarmOptimization lama_register["BalancedQuantumLevySwarmOptimization"] = BalancedQuantumLevySwarmOptimization - LLAMABalancedQuantumLevySwarmOptimization = NonObjectOptimizer( - method="LLAMABalancedQuantumLevySwarmOptimization" - ).set_name("LLAMABalancedQuantumLevySwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMABalancedQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABalancedQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMABalancedQuantumLevySwarmOptimization").set_name("LLAMABalancedQuantumLevySwarmOptimization", register=True) except Exception as e: print("BalancedQuantumLevySwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.BayesianAdaptiveMemeticSearch import BayesianAdaptiveMemeticSearch lama_register["BayesianAdaptiveMemeticSearch"] = BayesianAdaptiveMemeticSearch - LLAMABayesianAdaptiveMemeticSearch = NonObjectOptimizer( - method="LLAMABayesianAdaptiveMemeticSearch" - ).set_name("LLAMABayesianAdaptiveMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMABayesianAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMABayesianAdaptiveMemeticSearch = NonObjectOptimizer(method="LLAMABayesianAdaptiveMemeticSearch").set_name("LLAMABayesianAdaptiveMemeticSearch", register=True) except Exception as e: print("BayesianAdaptiveMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.CAMSQSOB import CAMSQSOB lama_register["CAMSQSOB"] = CAMSQSOB + res = NonObjectOptimizer(method="LLAMACAMSQSOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMACAMSQSOB = NonObjectOptimizer(method="LLAMACAMSQSOB").set_name("LLAMACAMSQSOB", register=True) except Exception as e: print("CAMSQSOB can not be imported: ", e) - try: from nevergrad.optimization.lama.CGES import CGES lama_register["CGES"] = CGES + res = NonObjectOptimizer(method="LLAMACGES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMACGES = NonObjectOptimizer(method="LLAMACGES").set_name("LLAMACGES", register=True) except Exception as e: print("CGES can not be imported: ", e) - try: from nevergrad.optimization.lama.CMADifferentialEvolutionPSO import CMADifferentialEvolutionPSO lama_register["CMADifferentialEvolutionPSO"] = CMADifferentialEvolutionPSO - LLAMACMADifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO").set_name( - "LLAMACMADifferentialEvolutionPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACMADifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO").set_name("LLAMACMADifferentialEvolutionPSO", register=True) except Exception as e: print("CMADifferentialEvolutionPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.CMDEALX import CMDEALX lama_register["CMDEALX"] = CMDEALX + res = NonObjectOptimizer(method="LLAMACMDEALX")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMACMDEALX = NonObjectOptimizer(method="LLAMACMDEALX").set_name("LLAMACMDEALX", register=True) except Exception as e: print("CMDEALX can not be imported: ", e) - try: - from nevergrad.optimization.lama.ClusterAdaptiveQuantumLevyOptimizer import ( - ClusterAdaptiveQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.ClusterAdaptiveQuantumLevyOptimizer import ClusterAdaptiveQuantumLevyOptimizer lama_register["ClusterAdaptiveQuantumLevyOptimizer"] = ClusterAdaptiveQuantumLevyOptimizer - LLAMAClusterAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMAClusterAdaptiveQuantumLevyOptimizer" - ).set_name("LLAMAClusterAdaptiveQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAClusterAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAClusterAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAClusterAdaptiveQuantumLevyOptimizer").set_name("LLAMAClusterAdaptiveQuantumLevyOptimizer", register=True) except Exception as e: print("ClusterAdaptiveQuantumLevyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ClusterBasedAdaptiveDifferentialEvolution import ( - ClusterBasedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ClusterBasedAdaptiveDifferentialEvolution import ClusterBasedAdaptiveDifferentialEvolution lama_register["ClusterBasedAdaptiveDifferentialEvolution"] = ClusterBasedAdaptiveDifferentialEvolution - LLAMAClusterBasedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAClusterBasedAdaptiveDifferentialEvolution" - ).set_name("LLAMAClusterBasedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAClusterBasedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAClusterBasedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAClusterBasedAdaptiveDifferentialEvolution").set_name("LLAMAClusterBasedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ClusterBasedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ClusteredAdaptiveHybridPSODESimulatedAnnealing import ( - ClusteredAdaptiveHybridPSODESimulatedAnnealing, - ) + from nevergrad.optimization.lama.ClusteredAdaptiveHybridPSODESimulatedAnnealing import ClusteredAdaptiveHybridPSODESimulatedAnnealing - lama_register["ClusteredAdaptiveHybridPSODESimulatedAnnealing"] = ( - ClusteredAdaptiveHybridPSODESimulatedAnnealing - ) - LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing = NonObjectOptimizer( - method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing" - ).set_name("LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing", register=True) + lama_register["ClusteredAdaptiveHybridPSODESimulatedAnnealing"] = ClusteredAdaptiveHybridPSODESimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing = NonObjectOptimizer(method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing").set_name("LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing", register=True) except Exception as e: print("ClusteredAdaptiveHybridPSODESimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.ClusteredDifferentialEvolutionWithLocalSearch import ( - ClusteredDifferentialEvolutionWithLocalSearch, - ) + from nevergrad.optimization.lama.ClusteredDifferentialEvolutionWithLocalSearch import ClusteredDifferentialEvolutionWithLocalSearch - lama_register["ClusteredDifferentialEvolutionWithLocalSearch"] = ( - ClusteredDifferentialEvolutionWithLocalSearch - ) - LLAMAClusteredDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( - method="LLAMAClusteredDifferentialEvolutionWithLocalSearch" - ).set_name("LLAMAClusteredDifferentialEvolutionWithLocalSearch", register=True) + lama_register["ClusteredDifferentialEvolutionWithLocalSearch"] = ClusteredDifferentialEvolutionWithLocalSearch + res = NonObjectOptimizer(method="LLAMAClusteredDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAClusteredDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAClusteredDifferentialEvolutionWithLocalSearch").set_name("LLAMAClusteredDifferentialEvolutionWithLocalSearch", register=True) except Exception as e: print("ClusteredDifferentialEvolutionWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CoevolutionaryDualPopulationSearch import ( - CoevolutionaryDualPopulationSearch, - ) + from nevergrad.optimization.lama.CoevolutionaryDualPopulationSearch import CoevolutionaryDualPopulationSearch lama_register["CoevolutionaryDualPopulationSearch"] = CoevolutionaryDualPopulationSearch - LLAMACoevolutionaryDualPopulationSearch = NonObjectOptimizer( - method="LLAMACoevolutionaryDualPopulationSearch" - ).set_name("LLAMACoevolutionaryDualPopulationSearch", register=True) + res = NonObjectOptimizer(method="LLAMACoevolutionaryDualPopulationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACoevolutionaryDualPopulationSearch = NonObjectOptimizer(method="LLAMACoevolutionaryDualPopulationSearch").set_name("LLAMACoevolutionaryDualPopulationSearch", register=True) except Exception as e: print("CoevolutionaryDualPopulationSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CohortDiversityDrivenOptimization import ( - CohortDiversityDrivenOptimization, - ) + from nevergrad.optimization.lama.CohortDiversityDrivenOptimization import CohortDiversityDrivenOptimization lama_register["CohortDiversityDrivenOptimization"] = CohortDiversityDrivenOptimization - LLAMACohortDiversityDrivenOptimization = NonObjectOptimizer( - method="LLAMACohortDiversityDrivenOptimization" - ).set_name("LLAMACohortDiversityDrivenOptimization", register=True) + res = NonObjectOptimizer(method="LLAMACohortDiversityDrivenOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACohortDiversityDrivenOptimization = NonObjectOptimizer(method="LLAMACohortDiversityDrivenOptimization").set_name("LLAMACohortDiversityDrivenOptimization", register=True) except Exception as e: print("CohortDiversityDrivenOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.CohortEvolutionWithDynamicSelection import ( - CohortEvolutionWithDynamicSelection, - ) + from nevergrad.optimization.lama.CohortEvolutionWithDynamicSelection import CohortEvolutionWithDynamicSelection lama_register["CohortEvolutionWithDynamicSelection"] = CohortEvolutionWithDynamicSelection - LLAMACohortEvolutionWithDynamicSelection = NonObjectOptimizer( - method="LLAMACohortEvolutionWithDynamicSelection" - ).set_name("LLAMACohortEvolutionWithDynamicSelection", register=True) + res = NonObjectOptimizer(method="LLAMACohortEvolutionWithDynamicSelection")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACohortEvolutionWithDynamicSelection = NonObjectOptimizer(method="LLAMACohortEvolutionWithDynamicSelection").set_name("LLAMACohortEvolutionWithDynamicSelection", register=True) except Exception as e: print("CohortEvolutionWithDynamicSelection can not be imported: ", e) - try: from nevergrad.optimization.lama.ConcentricConvergenceOptimizer import ConcentricConvergenceOptimizer lama_register["ConcentricConvergenceOptimizer"] = ConcentricConvergenceOptimizer - LLAMAConcentricConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAConcentricConvergenceOptimizer" - ).set_name("LLAMAConcentricConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAConcentricConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConcentricConvergenceOptimizer = NonObjectOptimizer(method="LLAMAConcentricConvergenceOptimizer").set_name("LLAMAConcentricConvergenceOptimizer", register=True) except Exception as e: print("ConcentricConvergenceOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ConcentricDiversityStrategy import ConcentricDiversityStrategy lama_register["ConcentricDiversityStrategy"] = ConcentricDiversityStrategy - LLAMAConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy").set_name( - "LLAMAConcentricDiversityStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy").set_name("LLAMAConcentricDiversityStrategy", register=True) except Exception as e: print("ConcentricDiversityStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.ConcentricGradientDescentEvolver import ConcentricGradientDescentEvolver lama_register["ConcentricGradientDescentEvolver"] = ConcentricGradientDescentEvolver - LLAMAConcentricGradientDescentEvolver = NonObjectOptimizer( - method="LLAMAConcentricGradientDescentEvolver" - ).set_name("LLAMAConcentricGradientDescentEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAConcentricGradientDescentEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConcentricGradientDescentEvolver = NonObjectOptimizer(method="LLAMAConcentricGradientDescentEvolver").set_name("LLAMAConcentricGradientDescentEvolver", register=True) except Exception as e: print("ConcentricGradientDescentEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.ConcentricGradientEnhancedEvolver import ( - ConcentricGradientEnhancedEvolver, - ) + from nevergrad.optimization.lama.ConcentricGradientEnhancedEvolver import ConcentricGradientEnhancedEvolver lama_register["ConcentricGradientEnhancedEvolver"] = ConcentricGradientEnhancedEvolver - LLAMAConcentricGradientEnhancedEvolver = NonObjectOptimizer( - method="LLAMAConcentricGradientEnhancedEvolver" - ).set_name("LLAMAConcentricGradientEnhancedEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAConcentricGradientEnhancedEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConcentricGradientEnhancedEvolver = NonObjectOptimizer(method="LLAMAConcentricGradientEnhancedEvolver").set_name("LLAMAConcentricGradientEnhancedEvolver", register=True) except Exception as e: print("ConcentricGradientEnhancedEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.ConcentricQuantumCrossoverStrategyV4 import ( - ConcentricQuantumCrossoverStrategyV4, - ) + from nevergrad.optimization.lama.ConcentricQuantumCrossoverStrategyV4 import ConcentricQuantumCrossoverStrategyV4 lama_register["ConcentricQuantumCrossoverStrategyV4"] = ConcentricQuantumCrossoverStrategyV4 - LLAMAConcentricQuantumCrossoverStrategyV4 = NonObjectOptimizer( - method="LLAMAConcentricQuantumCrossoverStrategyV4" - ).set_name("LLAMAConcentricQuantumCrossoverStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMAConcentricQuantumCrossoverStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConcentricQuantumCrossoverStrategyV4 = NonObjectOptimizer(method="LLAMAConcentricQuantumCrossoverStrategyV4").set_name("LLAMAConcentricQuantumCrossoverStrategyV4", register=True) except Exception as e: print("ConcentricQuantumCrossoverStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ConvergenceAcceleratedSpiralSearch import ( - ConvergenceAcceleratedSpiralSearch, - ) + from nevergrad.optimization.lama.ConvergenceAcceleratedSpiralSearch import ConvergenceAcceleratedSpiralSearch lama_register["ConvergenceAcceleratedSpiralSearch"] = ConvergenceAcceleratedSpiralSearch - LLAMAConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( - method="LLAMAConvergenceAcceleratedSpiralSearch" - ).set_name("LLAMAConvergenceAcceleratedSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConvergenceAcceleratedSpiralSearch = NonObjectOptimizer(method="LLAMAConvergenceAcceleratedSpiralSearch").set_name("LLAMAConvergenceAcceleratedSpiralSearch", register=True) except Exception as e: print("ConvergenceAcceleratedSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ConvergentAdaptiveEvolutionStrategy import ( - ConvergentAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutionStrategy import ConvergentAdaptiveEvolutionStrategy lama_register["ConvergentAdaptiveEvolutionStrategy"] = ConvergentAdaptiveEvolutionStrategy - LLAMAConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMAConvergentAdaptiveEvolutionStrategy" - ).set_name("LLAMAConvergentAdaptiveEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutionStrategy").set_name("LLAMAConvergentAdaptiveEvolutionStrategy", register=True) except Exception as e: print("ConvergentAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.ConvergentAdaptiveEvolutiveStrategy import ( - ConvergentAdaptiveEvolutiveStrategy, - ) + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutiveStrategy import ConvergentAdaptiveEvolutiveStrategy lama_register["ConvergentAdaptiveEvolutiveStrategy"] = ConvergentAdaptiveEvolutiveStrategy - LLAMAConvergentAdaptiveEvolutiveStrategy = NonObjectOptimizer( - method="LLAMAConvergentAdaptiveEvolutiveStrategy" - ).set_name("LLAMAConvergentAdaptiveEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAConvergentAdaptiveEvolutiveStrategy = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutiveStrategy").set_name("LLAMAConvergentAdaptiveEvolutiveStrategy", register=True) except Exception as e: print("ConvergentAdaptiveEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeAdaptiveCulturalSearch import ( - CooperativeAdaptiveCulturalSearch, - ) + from nevergrad.optimization.lama.CooperativeAdaptiveCulturalSearch import CooperativeAdaptiveCulturalSearch lama_register["CooperativeAdaptiveCulturalSearch"] = CooperativeAdaptiveCulturalSearch - LLAMACooperativeAdaptiveCulturalSearch = NonObjectOptimizer( - method="LLAMACooperativeAdaptiveCulturalSearch" - ).set_name("LLAMACooperativeAdaptiveCulturalSearch", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeAdaptiveCulturalSearch = NonObjectOptimizer(method="LLAMACooperativeAdaptiveCulturalSearch").set_name("LLAMACooperativeAdaptiveCulturalSearch", register=True) except Exception as e: print("CooperativeAdaptiveCulturalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeAdaptiveEvolutionaryOptimizer import ( - CooperativeAdaptiveEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.CooperativeAdaptiveEvolutionaryOptimizer import CooperativeAdaptiveEvolutionaryOptimizer lama_register["CooperativeAdaptiveEvolutionaryOptimizer"] = CooperativeAdaptiveEvolutionaryOptimizer - LLAMACooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMACooperativeAdaptiveEvolutionaryOptimizer" - ).set_name("LLAMACooperativeAdaptiveEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMACooperativeAdaptiveEvolutionaryOptimizer").set_name("LLAMACooperativeAdaptiveEvolutionaryOptimizer", register=True) except Exception as e: print("CooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeCulturalAdaptiveSearch import ( - CooperativeCulturalAdaptiveSearch, - ) + from nevergrad.optimization.lama.CooperativeCulturalAdaptiveSearch import CooperativeCulturalAdaptiveSearch lama_register["CooperativeCulturalAdaptiveSearch"] = CooperativeCulturalAdaptiveSearch - LLAMACooperativeCulturalAdaptiveSearch = NonObjectOptimizer( - method="LLAMACooperativeCulturalAdaptiveSearch" - ).set_name("LLAMACooperativeCulturalAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeCulturalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeCulturalAdaptiveSearch = NonObjectOptimizer(method="LLAMACooperativeCulturalAdaptiveSearch").set_name("LLAMACooperativeCulturalAdaptiveSearch", register=True) except Exception as e: print("CooperativeCulturalAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeCulturalDifferentialSearch import ( - CooperativeCulturalDifferentialSearch, - ) + from nevergrad.optimization.lama.CooperativeCulturalDifferentialSearch import CooperativeCulturalDifferentialSearch lama_register["CooperativeCulturalDifferentialSearch"] = CooperativeCulturalDifferentialSearch - LLAMACooperativeCulturalDifferentialSearch = NonObjectOptimizer( - method="LLAMACooperativeCulturalDifferentialSearch" - ).set_name("LLAMACooperativeCulturalDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeCulturalDifferentialSearch = NonObjectOptimizer(method="LLAMACooperativeCulturalDifferentialSearch").set_name("LLAMACooperativeCulturalDifferentialSearch", register=True) except Exception as e: print("CooperativeCulturalDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeCulturalEvolutionStrategy import ( - CooperativeCulturalEvolutionStrategy, - ) + from nevergrad.optimization.lama.CooperativeCulturalEvolutionStrategy import CooperativeCulturalEvolutionStrategy lama_register["CooperativeCulturalEvolutionStrategy"] = CooperativeCulturalEvolutionStrategy - LLAMACooperativeCulturalEvolutionStrategy = NonObjectOptimizer( - method="LLAMACooperativeCulturalEvolutionStrategy" - ).set_name("LLAMACooperativeCulturalEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeCulturalEvolutionStrategy = NonObjectOptimizer(method="LLAMACooperativeCulturalEvolutionStrategy").set_name("LLAMACooperativeCulturalEvolutionStrategy", register=True) except Exception as e: print("CooperativeCulturalEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeEvolutionaryGradientSearch import ( - CooperativeEvolutionaryGradientSearch, - ) + from nevergrad.optimization.lama.CooperativeEvolutionaryGradientSearch import CooperativeEvolutionaryGradientSearch lama_register["CooperativeEvolutionaryGradientSearch"] = CooperativeEvolutionaryGradientSearch - LLAMACooperativeEvolutionaryGradientSearch = NonObjectOptimizer( - method="LLAMACooperativeEvolutionaryGradientSearch" - ).set_name("LLAMACooperativeEvolutionaryGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMACooperativeEvolutionaryGradientSearch").set_name("LLAMACooperativeEvolutionaryGradientSearch", register=True) except Exception as e: print("CooperativeEvolutionaryGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.CooperativeParticleSwarmOptimization import ( - CooperativeParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.CooperativeParticleSwarmOptimization import CooperativeParticleSwarmOptimization lama_register["CooperativeParticleSwarmOptimization"] = CooperativeParticleSwarmOptimization - LLAMACooperativeParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMACooperativeParticleSwarmOptimization" - ).set_name("LLAMACooperativeParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMACooperativeParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACooperativeParticleSwarmOptimization = NonObjectOptimizer(method="LLAMACooperativeParticleSwarmOptimization").set_name("LLAMACooperativeParticleSwarmOptimization", register=True) except Exception as e: print("CooperativeParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.CoordinatedAdaptiveHybridOptimizer import ( - CoordinatedAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.CoordinatedAdaptiveHybridOptimizer import CoordinatedAdaptiveHybridOptimizer lama_register["CoordinatedAdaptiveHybridOptimizer"] = CoordinatedAdaptiveHybridOptimizer - LLAMACoordinatedAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMACoordinatedAdaptiveHybridOptimizer" - ).set_name("LLAMACoordinatedAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMACoordinatedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACoordinatedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMACoordinatedAdaptiveHybridOptimizer").set_name("LLAMACoordinatedAdaptiveHybridOptimizer", register=True) except Exception as e: print("CoordinatedAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.CovarianceMatrixAdaptationDifferentialEvolution import ( - CovarianceMatrixAdaptationDifferentialEvolution, - ) + from nevergrad.optimization.lama.CovarianceMatrixAdaptationDifferentialEvolution import CovarianceMatrixAdaptationDifferentialEvolution - lama_register["CovarianceMatrixAdaptationDifferentialEvolution"] = ( - CovarianceMatrixAdaptationDifferentialEvolution - ) - LLAMACovarianceMatrixAdaptationDifferentialEvolution = NonObjectOptimizer( - method="LLAMACovarianceMatrixAdaptationDifferentialEvolution" - ).set_name("LLAMACovarianceMatrixAdaptationDifferentialEvolution", register=True) + lama_register["CovarianceMatrixAdaptationDifferentialEvolution"] = CovarianceMatrixAdaptationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMACovarianceMatrixAdaptationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACovarianceMatrixAdaptationDifferentialEvolution = NonObjectOptimizer(method="LLAMACovarianceMatrixAdaptationDifferentialEvolution").set_name("LLAMACovarianceMatrixAdaptationDifferentialEvolution", register=True) except Exception as e: print("CovarianceMatrixAdaptationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.CulturalAdaptiveDifferentialEvolution import ( - CulturalAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.CulturalAdaptiveDifferentialEvolution import CulturalAdaptiveDifferentialEvolution lama_register["CulturalAdaptiveDifferentialEvolution"] = CulturalAdaptiveDifferentialEvolution - LLAMACulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMACulturalAdaptiveDifferentialEvolution" - ).set_name("LLAMACulturalAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMACulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACulturalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMACulturalAdaptiveDifferentialEvolution").set_name("LLAMACulturalAdaptiveDifferentialEvolution", register=True) except Exception as e: print("CulturalAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.CulturalGuidedDifferentialEvolution import ( - CulturalGuidedDifferentialEvolution, - ) + from nevergrad.optimization.lama.CulturalGuidedDifferentialEvolution import CulturalGuidedDifferentialEvolution lama_register["CulturalGuidedDifferentialEvolution"] = CulturalGuidedDifferentialEvolution - LLAMACulturalGuidedDifferentialEvolution = NonObjectOptimizer( - method="LLAMACulturalGuidedDifferentialEvolution" - ).set_name("LLAMACulturalGuidedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMACulturalGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMACulturalGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMACulturalGuidedDifferentialEvolution").set_name("LLAMACulturalGuidedDifferentialEvolution", register=True) except Exception as e: print("CulturalGuidedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DADERC import DADERC lama_register["DADERC"] = DADERC + res = NonObjectOptimizer(method="LLAMADADERC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADADERC = NonObjectOptimizer(method="LLAMADADERC").set_name("LLAMADADERC", register=True) except Exception as e: print("DADERC can not be imported: ", e) - try: from nevergrad.optimization.lama.DADESM import DADESM lama_register["DADESM"] = DADESM + res = NonObjectOptimizer(method="LLAMADADESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADADESM = NonObjectOptimizer(method="LLAMADADESM").set_name("LLAMADADESM", register=True) except Exception as e: print("DADESM can not be imported: ", e) - try: from nevergrad.optimization.lama.DADe import DADe lama_register["DADe"] = DADe + res = NonObjectOptimizer(method="LLAMADADe")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADADe = NonObjectOptimizer(method="LLAMADADe").set_name("LLAMADADe", register=True) except Exception as e: print("DADe can not be imported: ", e) - try: from nevergrad.optimization.lama.DAEA import DAEA lama_register["DAEA"] = DAEA + res = NonObjectOptimizer(method="LLAMADAEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADAEA = NonObjectOptimizer(method="LLAMADAEA").set_name("LLAMADAEA", register=True) except Exception as e: print("DAEA can not be imported: ", e) - try: from nevergrad.optimization.lama.DAES import DAES lama_register["DAES"] = DAES + res = NonObjectOptimizer(method="LLAMADAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADAES = NonObjectOptimizer(method="LLAMADAES").set_name("LLAMADAES", register=True) except Exception as e: print("DAES can not be imported: ", e) - try: from nevergrad.optimization.lama.DAESF import DAESF lama_register["DAESF"] = DAESF + res = NonObjectOptimizer(method="LLAMADAESF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADAESF = NonObjectOptimizer(method="LLAMADAESF").set_name("LLAMADAESF", register=True) except Exception as e: print("DAESF can not be imported: ", e) - try: from nevergrad.optimization.lama.DASES import DASES lama_register["DASES"] = DASES + res = NonObjectOptimizer(method="LLAMADASES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADASES = NonObjectOptimizer(method="LLAMADASES").set_name("LLAMADASES", register=True) except Exception as e: print("DASES can not be imported: ", e) - try: from nevergrad.optimization.lama.DASOGG import DASOGG lama_register["DASOGG"] = DASOGG + res = NonObjectOptimizer(method="LLAMADASOGG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADASOGG = NonObjectOptimizer(method="LLAMADASOGG").set_name("LLAMADASOGG", register=True) except Exception as e: print("DASOGG can not be imported: ", e) - try: from nevergrad.optimization.lama.DDCEA import DDCEA lama_register["DDCEA"] = DDCEA + res = NonObjectOptimizer(method="LLAMADDCEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADDCEA = NonObjectOptimizer(method="LLAMADDCEA").set_name("LLAMADDCEA", register=True) except Exception as e: print("DDCEA can not be imported: ", e) - try: from nevergrad.optimization.lama.DDPO import DDPO lama_register["DDPO"] = DDPO + res = NonObjectOptimizer(method="LLAMADDPO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADDPO = NonObjectOptimizer(method="LLAMADDPO").set_name("LLAMADDPO", register=True) except Exception as e: print("DDPO can not be imported: ", e) - try: from nevergrad.optimization.lama.DEAMC import DEAMC lama_register["DEAMC"] = DEAMC + res = NonObjectOptimizer(method="LLAMADEAMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADEAMC = NonObjectOptimizer(method="LLAMADEAMC").set_name("LLAMADEAMC", register=True) except Exception as e: print("DEAMC can not be imported: ", e) - try: from nevergrad.optimization.lama.DEAMC_DSR import DEAMC_DSR lama_register["DEAMC_DSR"] = DEAMC_DSR + res = NonObjectOptimizer(method="LLAMADEAMC_DSR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADEAMC_DSR = NonObjectOptimizer(method="LLAMADEAMC_DSR").set_name("LLAMADEAMC_DSR", register=True) except Exception as e: print("DEAMC_DSR can not be imported: ", e) - try: from nevergrad.optimization.lama.DEAMC_LSI import DEAMC_LSI lama_register["DEAMC_LSI"] = DEAMC_LSI + res = NonObjectOptimizer(method="LLAMADEAMC_LSI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADEAMC_LSI = NonObjectOptimizer(method="LLAMADEAMC_LSI").set_name("LLAMADEAMC_LSI", register=True) except Exception as e: print("DEAMC_LSI can not be imported: ", e) - try: from nevergrad.optimization.lama.DEWithNelderMead import DEWithNelderMead lama_register["DEWithNelderMead"] = DEWithNelderMead - LLAMADEWithNelderMead = NonObjectOptimizer(method="LLAMADEWithNelderMead").set_name( - "LLAMADEWithNelderMead", register=True - ) + res = NonObjectOptimizer(method="LLAMADEWithNelderMead")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADEWithNelderMead = NonObjectOptimizer(method="LLAMADEWithNelderMead").set_name("LLAMADEWithNelderMead", register=True) except Exception as e: print("DEWithNelderMead can not be imported: ", e) - try: from nevergrad.optimization.lama.DHDGE import DHDGE lama_register["DHDGE"] = DHDGE + res = NonObjectOptimizer(method="LLAMADHDGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADHDGE = NonObjectOptimizer(method="LLAMADHDGE").set_name("LLAMADHDGE", register=True) except Exception as e: print("DHDGE can not be imported: ", e) - try: from nevergrad.optimization.lama.DLASS import DLASS lama_register["DLASS"] = DLASS + res = NonObjectOptimizer(method="LLAMADLASS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADLASS = NonObjectOptimizer(method="LLAMADLASS").set_name("LLAMADLASS", register=True) except Exception as e: print("DLASS can not be imported: ", e) - try: from nevergrad.optimization.lama.DMDE import DMDE lama_register["DMDE"] = DMDE + res = NonObjectOptimizer(method="LLAMADMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADMDE = NonObjectOptimizer(method="LLAMADMDE").set_name("LLAMADMDE", register=True) except Exception as e: print("DMDE can not be imported: ", e) - try: from nevergrad.optimization.lama.DMDESM import DMDESM lama_register["DMDESM"] = DMDESM + res = NonObjectOptimizer(method="LLAMADMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADMDESM = NonObjectOptimizer(method="LLAMADMDESM").set_name("LLAMADMDESM", register=True) except Exception as e: print("DMDESM can not be imported: ", e) - try: from nevergrad.optimization.lama.DMES import DMES lama_register["DMES"] = DMES + res = NonObjectOptimizer(method="LLAMADMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADMES = NonObjectOptimizer(method="LLAMADMES").set_name("LLAMADMES", register=True) except Exception as e: print("DMES can not be imported: ", e) - try: from nevergrad.optimization.lama.DNAS import DNAS lama_register["DNAS"] = DNAS + res = NonObjectOptimizer(method="LLAMADNAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADNAS = NonObjectOptimizer(method="LLAMADNAS").set_name("LLAMADNAS", register=True) except Exception as e: print("DNAS can not be imported: ", e) - try: from nevergrad.optimization.lama.DPADE import DPADE lama_register["DPADE"] = DPADE + res = NonObjectOptimizer(method="LLAMADPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADPADE = NonObjectOptimizer(method="LLAMADPADE").set_name("LLAMADPADE", register=True) except Exception as e: print("DPADE can not be imported: ", e) - try: from nevergrad.optimization.lama.DPES import DPES lama_register["DPES"] = DPES + res = NonObjectOptimizer(method="LLAMADPES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADPES = NonObjectOptimizer(method="LLAMADPES").set_name("LLAMADPES", register=True) except Exception as e: print("DPES can not be imported: ", e) - try: from nevergrad.optimization.lama.DSDE import DSDE lama_register["DSDE"] = DSDE + res = NonObjectOptimizer(method="LLAMADSDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADSDE = NonObjectOptimizer(method="LLAMADSDE").set_name("LLAMADSDE", register=True) except Exception as e: print("DSDE can not be imported: ", e) - try: from nevergrad.optimization.lama.DSEDES import DSEDES lama_register["DSEDES"] = DSEDES + res = NonObjectOptimizer(method="LLAMADSEDES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMADSEDES = NonObjectOptimizer(method="LLAMADSEDES").set_name("LLAMADSEDES", register=True) except Exception as e: print("DSEDES can not be imported: ", e) - try: - from nevergrad.optimization.lama.DifferentialEvolutionAdaptiveCrossover import ( - DifferentialEvolutionAdaptiveCrossover, - ) + from nevergrad.optimization.lama.DifferentialEvolutionAdaptiveCrossover import DifferentialEvolutionAdaptiveCrossover lama_register["DifferentialEvolutionAdaptiveCrossover"] = DifferentialEvolutionAdaptiveCrossover - LLAMADifferentialEvolutionAdaptiveCrossover = NonObjectOptimizer( - method="LLAMADifferentialEvolutionAdaptiveCrossover" - ).set_name("LLAMADifferentialEvolutionAdaptiveCrossover", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionAdaptiveCrossover = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptiveCrossover").set_name("LLAMADifferentialEvolutionAdaptiveCrossover", register=True) except Exception as e: print("DifferentialEvolutionAdaptiveCrossover can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialEvolutionAdaptivePSO import DifferentialEvolutionAdaptivePSO lama_register["DifferentialEvolutionAdaptivePSO"] = DifferentialEvolutionAdaptivePSO - LLAMADifferentialEvolutionAdaptivePSO = NonObjectOptimizer( - method="LLAMADifferentialEvolutionAdaptivePSO" - ).set_name("LLAMADifferentialEvolutionAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionAdaptivePSO = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptivePSO").set_name("LLAMADifferentialEvolutionAdaptivePSO", register=True) except Exception as e: print("DifferentialEvolutionAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialEvolutionHybrid import DifferentialEvolutionHybrid lama_register["DifferentialEvolutionHybrid"] = DifferentialEvolutionHybrid - LLAMADifferentialEvolutionHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid").set_name( - "LLAMADifferentialEvolutionHybrid", register=True - ) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid").set_name("LLAMADifferentialEvolutionHybrid", register=True) except Exception as e: print("DifferentialEvolutionHybrid can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialEvolutionOptimizer import DifferentialEvolutionOptimizer lama_register["DifferentialEvolutionOptimizer"] = DifferentialEvolutionOptimizer - LLAMADifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMADifferentialEvolutionOptimizer" - ).set_name("LLAMADifferentialEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMADifferentialEvolutionOptimizer").set_name("LLAMADifferentialEvolutionOptimizer", register=True) except Exception as e: print("DifferentialEvolutionOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialEvolutionPSOHybrid import DifferentialEvolutionPSOHybrid lama_register["DifferentialEvolutionPSOHybrid"] = DifferentialEvolutionPSOHybrid - LLAMADifferentialEvolutionPSOHybrid = NonObjectOptimizer( - method="LLAMADifferentialEvolutionPSOHybrid" - ).set_name("LLAMADifferentialEvolutionPSOHybrid", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionPSOHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionPSOHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionPSOHybrid").set_name("LLAMADifferentialEvolutionPSOHybrid", register=True) except Exception as e: print("DifferentialEvolutionPSOHybrid can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialEvolutionSearch import DifferentialEvolutionSearch lama_register["DifferentialEvolutionSearch"] = DifferentialEvolutionSearch - LLAMADifferentialEvolutionSearch = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch").set_name( - "LLAMADifferentialEvolutionSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialEvolutionSearch = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch").set_name("LLAMADifferentialEvolutionSearch", register=True) except Exception as e: print("DifferentialEvolutionSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialFireworkAlgorithm import DifferentialFireworkAlgorithm lama_register["DifferentialFireworkAlgorithm"] = DifferentialFireworkAlgorithm - LLAMADifferentialFireworkAlgorithm = NonObjectOptimizer( - method="LLAMADifferentialFireworkAlgorithm" - ).set_name("LLAMADifferentialFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMADifferentialFireworkAlgorithm").set_name("LLAMADifferentialFireworkAlgorithm", register=True) except Exception as e: print("DifferentialFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.DifferentialGradientEvolutionStrategy import ( - DifferentialGradientEvolutionStrategy, - ) + from nevergrad.optimization.lama.DifferentialGradientEvolutionStrategy import DifferentialGradientEvolutionStrategy lama_register["DifferentialGradientEvolutionStrategy"] = DifferentialGradientEvolutionStrategy - LLAMADifferentialGradientEvolutionStrategy = NonObjectOptimizer( - method="LLAMADifferentialGradientEvolutionStrategy" - ).set_name("LLAMADifferentialGradientEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialGradientEvolutionStrategy = NonObjectOptimizer(method="LLAMADifferentialGradientEvolutionStrategy").set_name("LLAMADifferentialGradientEvolutionStrategy", register=True) except Exception as e: print("DifferentialGradientEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialHarmonySearch import DifferentialHarmonySearch lama_register["DifferentialHarmonySearch"] = DifferentialHarmonySearch - LLAMADifferentialHarmonySearch = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch").set_name( - "LLAMADifferentialHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialHarmonySearch = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch").set_name("LLAMADifferentialHarmonySearch", register=True) except Exception as e: print("DifferentialHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialMemeticAlgorithm import DifferentialMemeticAlgorithm lama_register["DifferentialMemeticAlgorithm"] = DifferentialMemeticAlgorithm - LLAMADifferentialMemeticAlgorithm = NonObjectOptimizer( - method="LLAMADifferentialMemeticAlgorithm" - ).set_name("LLAMADifferentialMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMADifferentialMemeticAlgorithm").set_name("LLAMADifferentialMemeticAlgorithm", register=True) except Exception as e: print("DifferentialMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DifferentialQuantumMetaheuristic import DifferentialQuantumMetaheuristic lama_register["DifferentialQuantumMetaheuristic"] = DifferentialQuantumMetaheuristic - LLAMADifferentialQuantumMetaheuristic = NonObjectOptimizer( - method="LLAMADifferentialQuantumMetaheuristic" - ).set_name("LLAMADifferentialQuantumMetaheuristic", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialQuantumMetaheuristic = NonObjectOptimizer(method="LLAMADifferentialQuantumMetaheuristic").set_name("LLAMADifferentialQuantumMetaheuristic", register=True) except Exception as e: print("DifferentialQuantumMetaheuristic can not be imported: ", e) - try: - from nevergrad.optimization.lama.DifferentialSimulatedAnnealingOptimizer import ( - DifferentialSimulatedAnnealingOptimizer, - ) + from nevergrad.optimization.lama.DifferentialSimulatedAnnealingOptimizer import DifferentialSimulatedAnnealingOptimizer lama_register["DifferentialSimulatedAnnealingOptimizer"] = DifferentialSimulatedAnnealingOptimizer - LLAMADifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( - method="LLAMADifferentialSimulatedAnnealingOptimizer" - ).set_name("LLAMADifferentialSimulatedAnnealingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMADifferentialSimulatedAnnealingOptimizer").set_name("LLAMADifferentialSimulatedAnnealingOptimizer", register=True) except Exception as e: print("DifferentialSimulatedAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolution import ( - DiversityEnhancedAdaptiveGradientEvolution, - ) + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolution import DiversityEnhancedAdaptiveGradientEvolution lama_register["DiversityEnhancedAdaptiveGradientEvolution"] = DiversityEnhancedAdaptiveGradientEvolution - LLAMADiversityEnhancedAdaptiveGradientEvolution = NonObjectOptimizer( - method="LLAMADiversityEnhancedAdaptiveGradientEvolution" - ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADiversityEnhancedAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolution").set_name("LLAMADiversityEnhancedAdaptiveGradientEvolution", register=True) except Exception as e: print("DiversityEnhancedAdaptiveGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolutionV2 import ( - DiversityEnhancedAdaptiveGradientEvolutionV2, - ) + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolutionV2 import DiversityEnhancedAdaptiveGradientEvolutionV2 - lama_register["DiversityEnhancedAdaptiveGradientEvolutionV2"] = ( - DiversityEnhancedAdaptiveGradientEvolutionV2 - ) - LLAMADiversityEnhancedAdaptiveGradientEvolutionV2 = NonObjectOptimizer( - method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2" - ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolutionV2", register=True) + lama_register["DiversityEnhancedAdaptiveGradientEvolutionV2"] = DiversityEnhancedAdaptiveGradientEvolutionV2 + res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADiversityEnhancedAdaptiveGradientEvolutionV2 = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2").set_name("LLAMADiversityEnhancedAdaptiveGradientEvolutionV2", register=True) except Exception as e: print("DiversityEnhancedAdaptiveGradientEvolutionV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.DolphinPodOptimization import DolphinPodOptimization lama_register["DolphinPodOptimization"] = DolphinPodOptimization - LLAMADolphinPodOptimization = NonObjectOptimizer(method="LLAMADolphinPodOptimization").set_name( - "LLAMADolphinPodOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMADolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADolphinPodOptimization = NonObjectOptimizer(method="LLAMADolphinPodOptimization").set_name("LLAMADolphinPodOptimization", register=True) except Exception as e: print("DolphinPodOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DualAdaptiveRestartDE import DualAdaptiveRestartDE lama_register["DualAdaptiveRestartDE"] = DualAdaptiveRestartDE - LLAMADualAdaptiveRestartDE = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE").set_name( - "LLAMADualAdaptiveRestartDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualAdaptiveRestartDE = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE").set_name("LLAMADualAdaptiveRestartDE", register=True) except Exception as e: print("DualAdaptiveRestartDE can not be imported: ", e) - try: from nevergrad.optimization.lama.DualAdaptiveSearch import DualAdaptiveSearch lama_register["DualAdaptiveSearch"] = DualAdaptiveSearch - LLAMADualAdaptiveSearch = NonObjectOptimizer(method="LLAMADualAdaptiveSearch").set_name( - "LLAMADualAdaptiveSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADualAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualAdaptiveSearch = NonObjectOptimizer(method="LLAMADualAdaptiveSearch").set_name("LLAMADualAdaptiveSearch", register=True) except Exception as e: print("DualAdaptiveSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DualConvergenceEvolutiveStrategy import DualConvergenceEvolutiveStrategy lama_register["DualConvergenceEvolutiveStrategy"] = DualConvergenceEvolutiveStrategy - LLAMADualConvergenceEvolutiveStrategy = NonObjectOptimizer( - method="LLAMADualConvergenceEvolutiveStrategy" - ).set_name("LLAMADualConvergenceEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMADualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualConvergenceEvolutiveStrategy = NonObjectOptimizer(method="LLAMADualConvergenceEvolutiveStrategy").set_name("LLAMADualConvergenceEvolutiveStrategy", register=True) except Exception as e: print("DualConvergenceEvolutiveStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.DualModeOptimization import DualModeOptimization lama_register["DualModeOptimization"] = DualModeOptimization - LLAMADualModeOptimization = NonObjectOptimizer(method="LLAMADualModeOptimization").set_name( - "LLAMADualModeOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMADualModeOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualModeOptimization = NonObjectOptimizer(method="LLAMADualModeOptimization").set_name("LLAMADualModeOptimization", register=True) except Exception as e: print("DualModeOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseAdaptiveGradientEvolution import ( - DualPhaseAdaptiveGradientEvolution, - ) + from nevergrad.optimization.lama.DualPhaseAdaptiveGradientEvolution import DualPhaseAdaptiveGradientEvolution lama_register["DualPhaseAdaptiveGradientEvolution"] = DualPhaseAdaptiveGradientEvolution - LLAMADualPhaseAdaptiveGradientEvolution = NonObjectOptimizer( - method="LLAMADualPhaseAdaptiveGradientEvolution" - ).set_name("LLAMADualPhaseAdaptiveGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveGradientEvolution").set_name("LLAMADualPhaseAdaptiveGradientEvolution", register=True) except Exception as e: print("DualPhaseAdaptiveGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseAdaptiveHybridOptimizerV3 import ( - DualPhaseAdaptiveHybridOptimizerV3, - ) + from nevergrad.optimization.lama.DualPhaseAdaptiveHybridOptimizerV3 import DualPhaseAdaptiveHybridOptimizerV3 lama_register["DualPhaseAdaptiveHybridOptimizerV3"] = DualPhaseAdaptiveHybridOptimizerV3 - LLAMADualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMADualPhaseAdaptiveHybridOptimizerV3" - ).set_name("LLAMADualPhaseAdaptiveHybridOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveHybridOptimizerV3").set_name("LLAMADualPhaseAdaptiveHybridOptimizerV3", register=True) except Exception as e: print("DualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolution import ( - DualPhaseAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolution import DualPhaseAdaptiveMemeticDifferentialEvolution - lama_register["DualPhaseAdaptiveMemeticDifferentialEvolution"] = ( - DualPhaseAdaptiveMemeticDifferentialEvolution - ) - LLAMADualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution", register=True) + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolution"] = DualPhaseAdaptiveMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution").set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("DualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolutionV2 import ( - DualPhaseAdaptiveMemeticDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolutionV2 import DualPhaseAdaptiveMemeticDifferentialEvolutionV2 - lama_register["DualPhaseAdaptiveMemeticDifferentialEvolutionV2"] = ( - DualPhaseAdaptiveMemeticDifferentialEvolutionV2 - ) - LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2" - ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2", register=True) + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolutionV2"] = DualPhaseAdaptiveMemeticDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2").set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2", register=True) except Exception as e: print("DualPhaseAdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced import ( - DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced, - ) + from nevergrad.optimization.lama.DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced import DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced - lama_register["DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced"] = ( - DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced - ) - LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced = NonObjectOptimizer( - method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced" - ).set_name("LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced", register=True) + lama_register["DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced"] = DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced + res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced").set_name("LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced", register=True) except Exception as e: print("DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPhaseDifferentialEvolution import DualPhaseDifferentialEvolution lama_register["DualPhaseDifferentialEvolution"] = DualPhaseDifferentialEvolution - LLAMADualPhaseDifferentialEvolution = NonObjectOptimizer( - method="LLAMADualPhaseDifferentialEvolution" - ).set_name("LLAMADualPhaseDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMADualPhaseDifferentialEvolution").set_name("LLAMADualPhaseDifferentialEvolution", register=True) except Exception as e: print("DualPhaseDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPhaseOptimizationStrategy import DualPhaseOptimizationStrategy lama_register["DualPhaseOptimizationStrategy"] = DualPhaseOptimizationStrategy - LLAMADualPhaseOptimizationStrategy = NonObjectOptimizer( - method="LLAMADualPhaseOptimizationStrategy" - ).set_name("LLAMADualPhaseOptimizationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseOptimizationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseOptimizationStrategy = NonObjectOptimizer(method="LLAMADualPhaseOptimizationStrategy").set_name("LLAMADualPhaseOptimizationStrategy", register=True) except Exception as e: print("DualPhaseOptimizationStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPhaseQuantumMemeticSearch import DualPhaseQuantumMemeticSearch lama_register["DualPhaseQuantumMemeticSearch"] = DualPhaseQuantumMemeticSearch - LLAMADualPhaseQuantumMemeticSearch = NonObjectOptimizer( - method="LLAMADualPhaseQuantumMemeticSearch" - ).set_name("LLAMADualPhaseQuantumMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseQuantumMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseQuantumMemeticSearch = NonObjectOptimizer(method="LLAMADualPhaseQuantumMemeticSearch").set_name("LLAMADualPhaseQuantumMemeticSearch", register=True) except Exception as e: print("DualPhaseQuantumMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPhaseRefinedQuantumLocalSearchOptimizer import ( - DualPhaseRefinedQuantumLocalSearchOptimizer, - ) + from nevergrad.optimization.lama.DualPhaseRefinedQuantumLocalSearchOptimizer import DualPhaseRefinedQuantumLocalSearchOptimizer lama_register["DualPhaseRefinedQuantumLocalSearchOptimizer"] = DualPhaseRefinedQuantumLocalSearchOptimizer - LLAMADualPhaseRefinedQuantumLocalSearchOptimizer = NonObjectOptimizer( - method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer" - ).set_name("LLAMADualPhaseRefinedQuantumLocalSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPhaseRefinedQuantumLocalSearchOptimizer = NonObjectOptimizer(method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer").set_name("LLAMADualPhaseRefinedQuantumLocalSearchOptimizer", register=True) except Exception as e: print("DualPhaseRefinedQuantumLocalSearchOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPopulationADE import DualPopulationADE lama_register["DualPopulationADE"] = DualPopulationADE - LLAMADualPopulationADE = NonObjectOptimizer(method="LLAMADualPopulationADE").set_name( - "LLAMADualPopulationADE", register=True - ) + res = NonObjectOptimizer(method="LLAMADualPopulationADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPopulationADE = NonObjectOptimizer(method="LLAMADualPopulationADE").set_name("LLAMADualPopulationADE", register=True) except Exception as e: print("DualPopulationADE can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPopulationAdaptiveSearch import DualPopulationAdaptiveSearch lama_register["DualPopulationAdaptiveSearch"] = DualPopulationAdaptiveSearch - LLAMADualPopulationAdaptiveSearch = NonObjectOptimizer( - method="LLAMADualPopulationAdaptiveSearch" - ).set_name("LLAMADualPopulationAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMADualPopulationAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPopulationAdaptiveSearch = NonObjectOptimizer(method="LLAMADualPopulationAdaptiveSearch").set_name("LLAMADualPopulationAdaptiveSearch", register=True) except Exception as e: print("DualPopulationAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualPopulationCovarianceMatrixGradientSearch import ( - DualPopulationCovarianceMatrixGradientSearch, - ) + from nevergrad.optimization.lama.DualPopulationCovarianceMatrixGradientSearch import DualPopulationCovarianceMatrixGradientSearch - lama_register["DualPopulationCovarianceMatrixGradientSearch"] = ( - DualPopulationCovarianceMatrixGradientSearch - ) - LLAMADualPopulationCovarianceMatrixGradientSearch = NonObjectOptimizer( - method="LLAMADualPopulationCovarianceMatrixGradientSearch" - ).set_name("LLAMADualPopulationCovarianceMatrixGradientSearch", register=True) + lama_register["DualPopulationCovarianceMatrixGradientSearch"] = DualPopulationCovarianceMatrixGradientSearch + res = NonObjectOptimizer(method="LLAMADualPopulationCovarianceMatrixGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPopulationCovarianceMatrixGradientSearch = NonObjectOptimizer(method="LLAMADualPopulationCovarianceMatrixGradientSearch").set_name("LLAMADualPopulationCovarianceMatrixGradientSearch", register=True) except Exception as e: print("DualPopulationCovarianceMatrixGradientSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DualPopulationEnhancedSearch import DualPopulationEnhancedSearch lama_register["DualPopulationEnhancedSearch"] = DualPopulationEnhancedSearch - LLAMADualPopulationEnhancedSearch = NonObjectOptimizer( - method="LLAMADualPopulationEnhancedSearch" - ).set_name("LLAMADualPopulationEnhancedSearch", register=True) + res = NonObjectOptimizer(method="LLAMADualPopulationEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualPopulationEnhancedSearch = NonObjectOptimizer(method="LLAMADualPopulationEnhancedSearch").set_name("LLAMADualPopulationEnhancedSearch", register=True) except Exception as e: print("DualPopulationEnhancedSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DualStrategyAdaptiveDE import DualStrategyAdaptiveDE lama_register["DualStrategyAdaptiveDE"] = DualStrategyAdaptiveDE - LLAMADualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE").set_name( - "LLAMADualStrategyAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE").set_name("LLAMADualStrategyAdaptiveDE", register=True) except Exception as e: print("DualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualStrategyDifferentialEvolution import ( - DualStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.DualStrategyDifferentialEvolution import DualStrategyDifferentialEvolution lama_register["DualStrategyDifferentialEvolution"] = DualStrategyDifferentialEvolution - LLAMADualStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMADualStrategyDifferentialEvolution" - ).set_name("LLAMADualStrategyDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADualStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMADualStrategyDifferentialEvolution").set_name("LLAMADualStrategyDifferentialEvolution", register=True) except Exception as e: print("DualStrategyDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DualStrategyOptimizer import DualStrategyOptimizer lama_register["DualStrategyOptimizer"] = DualStrategyOptimizer - LLAMADualStrategyOptimizer = NonObjectOptimizer(method="LLAMADualStrategyOptimizer").set_name( - "LLAMADualStrategyOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMADualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualStrategyOptimizer = NonObjectOptimizer(method="LLAMADualStrategyOptimizer").set_name("LLAMADualStrategyOptimizer", register=True) except Exception as e: print("DualStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DualStrategyQuantumEvolutionOptimizer import ( - DualStrategyQuantumEvolutionOptimizer, - ) + from nevergrad.optimization.lama.DualStrategyQuantumEvolutionOptimizer import DualStrategyQuantumEvolutionOptimizer lama_register["DualStrategyQuantumEvolutionOptimizer"] = DualStrategyQuantumEvolutionOptimizer - LLAMADualStrategyQuantumEvolutionOptimizer = NonObjectOptimizer( - method="LLAMADualStrategyQuantumEvolutionOptimizer" - ).set_name("LLAMADualStrategyQuantumEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADualStrategyQuantumEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADualStrategyQuantumEvolutionOptimizer = NonObjectOptimizer(method="LLAMADualStrategyQuantumEvolutionOptimizer").set_name("LLAMADualStrategyQuantumEvolutionOptimizer", register=True) except Exception as e: print("DualStrategyQuantumEvolutionOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveClimbingStrategy import DynamicAdaptiveClimbingStrategy lama_register["DynamicAdaptiveClimbingStrategy"] = DynamicAdaptiveClimbingStrategy - LLAMADynamicAdaptiveClimbingStrategy = NonObjectOptimizer( - method="LLAMADynamicAdaptiveClimbingStrategy" - ).set_name("LLAMADynamicAdaptiveClimbingStrategy", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveClimbingStrategy = NonObjectOptimizer(method="LLAMADynamicAdaptiveClimbingStrategy").set_name("LLAMADynamicAdaptiveClimbingStrategy", register=True) except Exception as e: print("DynamicAdaptiveClimbingStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveCohortOptimization import ( - DynamicAdaptiveCohortOptimization, - ) + from nevergrad.optimization.lama.DynamicAdaptiveCohortOptimization import DynamicAdaptiveCohortOptimization lama_register["DynamicAdaptiveCohortOptimization"] = DynamicAdaptiveCohortOptimization - LLAMADynamicAdaptiveCohortOptimization = NonObjectOptimizer( - method="LLAMADynamicAdaptiveCohortOptimization" - ).set_name("LLAMADynamicAdaptiveCohortOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveCohortOptimization").set_name("LLAMADynamicAdaptiveCohortOptimization", register=True) except Exception as e: print("DynamicAdaptiveCohortOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveEliteHybridOptimizer import ( - DynamicAdaptiveEliteHybridOptimizer, - ) + from nevergrad.optimization.lama.DynamicAdaptiveEliteHybridOptimizer import DynamicAdaptiveEliteHybridOptimizer lama_register["DynamicAdaptiveEliteHybridOptimizer"] = DynamicAdaptiveEliteHybridOptimizer - LLAMADynamicAdaptiveEliteHybridOptimizer = NonObjectOptimizer( - method="LLAMADynamicAdaptiveEliteHybridOptimizer" - ).set_name("LLAMADynamicAdaptiveEliteHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveEliteHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveEliteHybridOptimizer").set_name("LLAMADynamicAdaptiveEliteHybridOptimizer", register=True) except Exception as e: print("DynamicAdaptiveEliteHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveEnhancedDifferentialEvolution import ( - DynamicAdaptiveEnhancedDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicAdaptiveEnhancedDifferentialEvolution import DynamicAdaptiveEnhancedDifferentialEvolution - lama_register["DynamicAdaptiveEnhancedDifferentialEvolution"] = ( - DynamicAdaptiveEnhancedDifferentialEvolution - ) - LLAMADynamicAdaptiveEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution" - ).set_name("LLAMADynamicAdaptiveEnhancedDifferentialEvolution", register=True) + lama_register["DynamicAdaptiveEnhancedDifferentialEvolution"] = DynamicAdaptiveEnhancedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution").set_name("LLAMADynamicAdaptiveEnhancedDifferentialEvolution", register=True) except Exception as e: print("DynamicAdaptiveEnhancedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimization import ( - DynamicAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimization import DynamicAdaptiveExplorationOptimization lama_register["DynamicAdaptiveExplorationOptimization"] = DynamicAdaptiveExplorationOptimization - LLAMADynamicAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMADynamicAdaptiveExplorationOptimization" - ).set_name("LLAMADynamicAdaptiveExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimization").set_name("LLAMADynamicAdaptiveExplorationOptimization", register=True) except Exception as e: print("DynamicAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimizer import ( - DynamicAdaptiveExplorationOptimizer, - ) + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimizer import DynamicAdaptiveExplorationOptimizer lama_register["DynamicAdaptiveExplorationOptimizer"] = DynamicAdaptiveExplorationOptimizer - LLAMADynamicAdaptiveExplorationOptimizer = NonObjectOptimizer( - method="LLAMADynamicAdaptiveExplorationOptimizer" - ).set_name("LLAMADynamicAdaptiveExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimizer").set_name("LLAMADynamicAdaptiveExplorationOptimizer", register=True) except Exception as e: print("DynamicAdaptiveExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveFireworkAlgorithm import DynamicAdaptiveFireworkAlgorithm lama_register["DynamicAdaptiveFireworkAlgorithm"] = DynamicAdaptiveFireworkAlgorithm - LLAMADynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMADynamicAdaptiveFireworkAlgorithm" - ).set_name("LLAMADynamicAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicAdaptiveFireworkAlgorithm").set_name("LLAMADynamicAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("DynamicAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveGradientDifferentialEvolution import ( - DynamicAdaptiveGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicAdaptiveGradientDifferentialEvolution import DynamicAdaptiveGradientDifferentialEvolution - lama_register["DynamicAdaptiveGradientDifferentialEvolution"] = ( - DynamicAdaptiveGradientDifferentialEvolution - ) - LLAMADynamicAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicAdaptiveGradientDifferentialEvolution" - ).set_name("LLAMADynamicAdaptiveGradientDifferentialEvolution", register=True) + lama_register["DynamicAdaptiveGradientDifferentialEvolution"] = DynamicAdaptiveGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveGradientDifferentialEvolution").set_name("LLAMADynamicAdaptiveGradientDifferentialEvolution", register=True) except Exception as e: print("DynamicAdaptiveGradientDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligence import ( - DynamicAdaptiveGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligence import DynamicAdaptiveGravitationalSwarmIntelligence - lama_register["DynamicAdaptiveGravitationalSwarmIntelligence"] = ( - DynamicAdaptiveGravitationalSwarmIntelligence - ) - LLAMADynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence" - ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligence", register=True) + lama_register["DynamicAdaptiveGravitationalSwarmIntelligence"] = DynamicAdaptiveGravitationalSwarmIntelligence + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligence", register=True) except Exception as e: print("DynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( - DynamicAdaptiveGravitationalSwarmIntelligenceV2, - ) + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligenceV2 import DynamicAdaptiveGravitationalSwarmIntelligenceV2 - lama_register["DynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( - DynamicAdaptiveGravitationalSwarmIntelligenceV2 - ) - LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( - method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2" - ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) + lama_register["DynamicAdaptiveGravitationalSwarmIntelligenceV2"] = DynamicAdaptiveGravitationalSwarmIntelligenceV2 + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) except Exception as e: print("DynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveHybridAlgorithm import DynamicAdaptiveHybridAlgorithm lama_register["DynamicAdaptiveHybridAlgorithm"] = DynamicAdaptiveHybridAlgorithm - LLAMADynamicAdaptiveHybridAlgorithm = NonObjectOptimizer( - method="LLAMADynamicAdaptiveHybridAlgorithm" - ).set_name("LLAMADynamicAdaptiveHybridAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridAlgorithm").set_name("LLAMADynamicAdaptiveHybridAlgorithm", register=True) except Exception as e: print("DynamicAdaptiveHybridAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveHybridDE import DynamicAdaptiveHybridDE lama_register["DynamicAdaptiveHybridDE"] = DynamicAdaptiveHybridDE - LLAMADynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE").set_name( - "LLAMADynamicAdaptiveHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE").set_name("LLAMADynamicAdaptiveHybridDE", register=True) except Exception as e: print("DynamicAdaptiveHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveHybridDEPSOWithEliteMemory import ( - DynamicAdaptiveHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.DynamicAdaptiveHybridDEPSOWithEliteMemory import DynamicAdaptiveHybridDEPSOWithEliteMemory lama_register["DynamicAdaptiveHybridDEPSOWithEliteMemory"] = DynamicAdaptiveHybridDEPSOWithEliteMemory - LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory" - ).set_name("LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("DynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimization import ( - DynamicAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimization import DynamicAdaptiveHybridOptimization lama_register["DynamicAdaptiveHybridOptimization"] = DynamicAdaptiveHybridOptimization - LLAMADynamicAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMADynamicAdaptiveHybridOptimization" - ).set_name("LLAMADynamicAdaptiveHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimization").set_name("LLAMADynamicAdaptiveHybridOptimization", register=True) except Exception as e: print("DynamicAdaptiveHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimizer import DynamicAdaptiveHybridOptimizer lama_register["DynamicAdaptiveHybridOptimizer"] = DynamicAdaptiveHybridOptimizer - LLAMADynamicAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMADynamicAdaptiveHybridOptimizer" - ).set_name("LLAMADynamicAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimizer").set_name("LLAMADynamicAdaptiveHybridOptimizer", register=True) except Exception as e: print("DynamicAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch import ( - DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch, - ) + from nevergrad.optimization.lama.DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch import DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch - lama_register["DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch"] = ( - DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch - ) - LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch = NonObjectOptimizer( - method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch" - ).set_name("LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch", register=True) + lama_register["DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch"] = DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch").set_name("LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch", register=True) except Exception as e: print("DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveMemeticOptimizer import DynamicAdaptiveMemeticOptimizer lama_register["DynamicAdaptiveMemeticOptimizer"] = DynamicAdaptiveMemeticOptimizer - LLAMADynamicAdaptiveMemeticOptimizer = NonObjectOptimizer( - method="LLAMADynamicAdaptiveMemeticOptimizer" - ).set_name("LLAMADynamicAdaptiveMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticOptimizer").set_name("LLAMADynamicAdaptiveMemeticOptimizer", register=True) except Exception as e: print("DynamicAdaptiveMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptivePopulationDifferentialEvolution import ( - DynamicAdaptivePopulationDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicAdaptivePopulationDifferentialEvolution import DynamicAdaptivePopulationDifferentialEvolution - lama_register["DynamicAdaptivePopulationDifferentialEvolution"] = ( - DynamicAdaptivePopulationDifferentialEvolution - ) - LLAMADynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicAdaptivePopulationDifferentialEvolution" - ).set_name("LLAMADynamicAdaptivePopulationDifferentialEvolution", register=True) + lama_register["DynamicAdaptivePopulationDifferentialEvolution"] = DynamicAdaptivePopulationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptivePopulationDifferentialEvolution").set_name("LLAMADynamicAdaptivePopulationDifferentialEvolution", register=True) except Exception as e: print("DynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveQuantumDifferentialEvolution import ( - DynamicAdaptiveQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicAdaptiveQuantumDifferentialEvolution import DynamicAdaptiveQuantumDifferentialEvolution lama_register["DynamicAdaptiveQuantumDifferentialEvolution"] = DynamicAdaptiveQuantumDifferentialEvolution - LLAMADynamicAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicAdaptiveQuantumDifferentialEvolution" - ).set_name("LLAMADynamicAdaptiveQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumDifferentialEvolution").set_name("LLAMADynamicAdaptiveQuantumDifferentialEvolution", register=True) except Exception as e: print("DynamicAdaptiveQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveQuantumLevyOptimizer import ( - DynamicAdaptiveQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.DynamicAdaptiveQuantumLevyOptimizer import DynamicAdaptiveQuantumLevyOptimizer lama_register["DynamicAdaptiveQuantumLevyOptimizer"] = DynamicAdaptiveQuantumLevyOptimizer - LLAMADynamicAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMADynamicAdaptiveQuantumLevyOptimizer" - ).set_name("LLAMADynamicAdaptiveQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumLevyOptimizer").set_name("LLAMADynamicAdaptiveQuantumLevyOptimizer", register=True) except Exception as e: print("DynamicAdaptiveQuantumLevyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveQuantumPSO import DynamicAdaptiveQuantumPSO lama_register["DynamicAdaptiveQuantumPSO"] = DynamicAdaptiveQuantumPSO - LLAMADynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO").set_name( - "LLAMADynamicAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO").set_name("LLAMADynamicAdaptiveQuantumPSO", register=True) except Exception as e: print("DynamicAdaptiveQuantumPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicAdaptiveQuasiRandomDEGradientAnnealing import ( - DynamicAdaptiveQuasiRandomDEGradientAnnealing, - ) + from nevergrad.optimization.lama.DynamicAdaptiveQuasiRandomDEGradientAnnealing import DynamicAdaptiveQuasiRandomDEGradientAnnealing - lama_register["DynamicAdaptiveQuasiRandomDEGradientAnnealing"] = ( - DynamicAdaptiveQuasiRandomDEGradientAnnealing - ) - LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( - method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing" - ).set_name("LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing", register=True) + lama_register["DynamicAdaptiveQuasiRandomDEGradientAnnealing"] = DynamicAdaptiveQuasiRandomDEGradientAnnealing + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing").set_name("LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing", register=True) except Exception as e: print("DynamicAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicAdaptiveSwarmOptimization import DynamicAdaptiveSwarmOptimization lama_register["DynamicAdaptiveSwarmOptimization"] = DynamicAdaptiveSwarmOptimization - LLAMADynamicAdaptiveSwarmOptimization = NonObjectOptimizer( - method="LLAMADynamicAdaptiveSwarmOptimization" - ).set_name("LLAMADynamicAdaptiveSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicAdaptiveSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveSwarmOptimization").set_name("LLAMADynamicAdaptiveSwarmOptimization", register=True) except Exception as e: print("DynamicAdaptiveSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicBalancingPSO import DynamicBalancingPSO lama_register["DynamicBalancingPSO"] = DynamicBalancingPSO - LLAMADynamicBalancingPSO = NonObjectOptimizer(method="LLAMADynamicBalancingPSO").set_name( - "LLAMADynamicBalancingPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicBalancingPSO = NonObjectOptimizer(method="LLAMADynamicBalancingPSO").set_name("LLAMADynamicBalancingPSO", register=True) except Exception as e: print("DynamicBalancingPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicClusterHybridOptimization import DynamicClusterHybridOptimization lama_register["DynamicClusterHybridOptimization"] = DynamicClusterHybridOptimization - LLAMADynamicClusterHybridOptimization = NonObjectOptimizer( - method="LLAMADynamicClusterHybridOptimization" - ).set_name("LLAMADynamicClusterHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicClusterHybridOptimization = NonObjectOptimizer(method="LLAMADynamicClusterHybridOptimization").set_name("LLAMADynamicClusterHybridOptimization", register=True) except Exception as e: print("DynamicClusterHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicCohortAdaptiveEvolution import DynamicCohortAdaptiveEvolution lama_register["DynamicCohortAdaptiveEvolution"] = DynamicCohortAdaptiveEvolution - LLAMADynamicCohortAdaptiveEvolution = NonObjectOptimizer( - method="LLAMADynamicCohortAdaptiveEvolution" - ).set_name("LLAMADynamicCohortAdaptiveEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicCohortAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicCohortAdaptiveEvolution = NonObjectOptimizer(method="LLAMADynamicCohortAdaptiveEvolution").set_name("LLAMADynamicCohortAdaptiveEvolution", register=True) except Exception as e: print("DynamicCohortAdaptiveEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicCohortMemeticAlgorithm import DynamicCohortMemeticAlgorithm lama_register["DynamicCohortMemeticAlgorithm"] = DynamicCohortMemeticAlgorithm - LLAMADynamicCohortMemeticAlgorithm = NonObjectOptimizer( - method="LLAMADynamicCohortMemeticAlgorithm" - ).set_name("LLAMADynamicCohortMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMADynamicCohortMemeticAlgorithm").set_name("LLAMADynamicCohortMemeticAlgorithm", register=True) except Exception as e: print("DynamicCohortMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicCohortOptimization import DynamicCohortOptimization lama_register["DynamicCohortOptimization"] = DynamicCohortOptimization - LLAMADynamicCohortOptimization = NonObjectOptimizer(method="LLAMADynamicCohortOptimization").set_name( - "LLAMADynamicCohortOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicCohortOptimization = NonObjectOptimizer(method="LLAMADynamicCohortOptimization").set_name("LLAMADynamicCohortOptimization", register=True) except Exception as e: print("DynamicCohortOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicCrowdedDE import DynamicCrowdedDE lama_register["DynamicCrowdedDE"] = DynamicCrowdedDE - LLAMADynamicCrowdedDE = NonObjectOptimizer(method="LLAMADynamicCrowdedDE").set_name( - "LLAMADynamicCrowdedDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicCrowdedDE = NonObjectOptimizer(method="LLAMADynamicCrowdedDE").set_name("LLAMADynamicCrowdedDE", register=True) except Exception as e: print("DynamicCrowdedDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicCulturalDifferentialEvolution import ( - DynamicCulturalDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicCulturalDifferentialEvolution import DynamicCulturalDifferentialEvolution lama_register["DynamicCulturalDifferentialEvolution"] = DynamicCulturalDifferentialEvolution - LLAMADynamicCulturalDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicCulturalDifferentialEvolution" - ).set_name("LLAMADynamicCulturalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicCulturalDifferentialEvolution").set_name("LLAMADynamicCulturalDifferentialEvolution", register=True) except Exception as e: print("DynamicCulturalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicEliteAdaptiveHybridOptimizerV2 import ( - DynamicEliteAdaptiveHybridOptimizerV2, - ) + from nevergrad.optimization.lama.DynamicEliteAdaptiveHybridOptimizerV2 import DynamicEliteAdaptiveHybridOptimizerV2 lama_register["DynamicEliteAdaptiveHybridOptimizerV2"] = DynamicEliteAdaptiveHybridOptimizerV2 - LLAMADynamicEliteAdaptiveHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMADynamicEliteAdaptiveHybridOptimizerV2" - ).set_name("LLAMADynamicEliteAdaptiveHybridOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMADynamicEliteAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEliteAdaptiveHybridOptimizerV2 = NonObjectOptimizer(method="LLAMADynamicEliteAdaptiveHybridOptimizerV2").set_name("LLAMADynamicEliteAdaptiveHybridOptimizerV2", register=True) except Exception as e: print("DynamicEliteAdaptiveHybridOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicEliteAnnealingDE import DynamicEliteAnnealingDE lama_register["DynamicEliteAnnealingDE"] = DynamicEliteAnnealingDE - LLAMADynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE").set_name( - "LLAMADynamicEliteAnnealingDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE").set_name("LLAMADynamicEliteAnnealingDE", register=True) except Exception as e: print("DynamicEliteAnnealingDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicEliteCovarianceMemeticSearch import ( - DynamicEliteCovarianceMemeticSearch, - ) + from nevergrad.optimization.lama.DynamicEliteCovarianceMemeticSearch import DynamicEliteCovarianceMemeticSearch lama_register["DynamicEliteCovarianceMemeticSearch"] = DynamicEliteCovarianceMemeticSearch - LLAMADynamicEliteCovarianceMemeticSearch = NonObjectOptimizer( - method="LLAMADynamicEliteCovarianceMemeticSearch" - ).set_name("LLAMADynamicEliteCovarianceMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMADynamicEliteCovarianceMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEliteCovarianceMemeticSearch = NonObjectOptimizer(method="LLAMADynamicEliteCovarianceMemeticSearch").set_name("LLAMADynamicEliteCovarianceMemeticSearch", register=True) except Exception as e: print("DynamicEliteCovarianceMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicEliteEnhancedDifferentialEvolution import ( - DynamicEliteEnhancedDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicEliteEnhancedDifferentialEvolution import DynamicEliteEnhancedDifferentialEvolution lama_register["DynamicEliteEnhancedDifferentialEvolution"] = DynamicEliteEnhancedDifferentialEvolution - LLAMADynamicEliteEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicEliteEnhancedDifferentialEvolution" - ).set_name("LLAMADynamicEliteEnhancedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicEliteEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEliteEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicEliteEnhancedDifferentialEvolution").set_name("LLAMADynamicEliteEnhancedDifferentialEvolution", register=True) except Exception as e: print("DynamicEliteEnhancedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicElitistHybridOptimizer import DynamicElitistHybridOptimizer lama_register["DynamicElitistHybridOptimizer"] = DynamicElitistHybridOptimizer - LLAMADynamicElitistHybridOptimizer = NonObjectOptimizer( - method="LLAMADynamicElitistHybridOptimizer" - ).set_name("LLAMADynamicElitistHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicElitistHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicElitistHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicElitistHybridOptimizer").set_name("LLAMADynamicElitistHybridOptimizer", register=True) except Exception as e: print("DynamicElitistHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicEnhancedDifferentialFireworkAlgorithm import ( - DynamicEnhancedDifferentialFireworkAlgorithm, - ) + from nevergrad.optimization.lama.DynamicEnhancedDifferentialFireworkAlgorithm import DynamicEnhancedDifferentialFireworkAlgorithm - lama_register["DynamicEnhancedDifferentialFireworkAlgorithm"] = ( - DynamicEnhancedDifferentialFireworkAlgorithm - ) - LLAMADynamicEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( - method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm" - ).set_name("LLAMADynamicEnhancedDifferentialFireworkAlgorithm", register=True) + lama_register["DynamicEnhancedDifferentialFireworkAlgorithm"] = DynamicEnhancedDifferentialFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm").set_name("LLAMADynamicEnhancedDifferentialFireworkAlgorithm", register=True) except Exception as e: print("DynamicEnhancedDifferentialFireworkAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicEnhancedHybridOptimizer import DynamicEnhancedHybridOptimizer lama_register["DynamicEnhancedHybridOptimizer"] = DynamicEnhancedHybridOptimizer - LLAMADynamicEnhancedHybridOptimizer = NonObjectOptimizer( - method="LLAMADynamicEnhancedHybridOptimizer" - ).set_name("LLAMADynamicEnhancedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicEnhancedHybridOptimizer").set_name("LLAMADynamicEnhancedHybridOptimizer", register=True) except Exception as e: print("DynamicEnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicExplorationExploitationAlgorithm import ( - DynamicExplorationExploitationAlgorithm, - ) + from nevergrad.optimization.lama.DynamicExplorationExploitationAlgorithm import DynamicExplorationExploitationAlgorithm lama_register["DynamicExplorationExploitationAlgorithm"] = DynamicExplorationExploitationAlgorithm - LLAMADynamicExplorationExploitationAlgorithm = NonObjectOptimizer( - method="LLAMADynamicExplorationExploitationAlgorithm" - ).set_name("LLAMADynamicExplorationExploitationAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationAlgorithm").set_name("LLAMADynamicExplorationExploitationAlgorithm", register=True) except Exception as e: print("DynamicExplorationExploitationAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicExplorationExploitationDE import DynamicExplorationExploitationDE lama_register["DynamicExplorationExploitationDE"] = DynamicExplorationExploitationDE - LLAMADynamicExplorationExploitationDE = NonObjectOptimizer( - method="LLAMADynamicExplorationExploitationDE" - ).set_name("LLAMADynamicExplorationExploitationDE", register=True) + res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicExplorationExploitationDE = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationDE").set_name("LLAMADynamicExplorationExploitationDE", register=True) except Exception as e: print("DynamicExplorationExploitationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicExplorationExploitationMemeticAlgorithm import ( - DynamicExplorationExploitationMemeticAlgorithm, - ) + from nevergrad.optimization.lama.DynamicExplorationExploitationMemeticAlgorithm import DynamicExplorationExploitationMemeticAlgorithm - lama_register["DynamicExplorationExploitationMemeticAlgorithm"] = ( - DynamicExplorationExploitationMemeticAlgorithm - ) - LLAMADynamicExplorationExploitationMemeticAlgorithm = NonObjectOptimizer( - method="LLAMADynamicExplorationExploitationMemeticAlgorithm" - ).set_name("LLAMADynamicExplorationExploitationMemeticAlgorithm", register=True) + lama_register["DynamicExplorationExploitationMemeticAlgorithm"] = DynamicExplorationExploitationMemeticAlgorithm + res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicExplorationExploitationMemeticAlgorithm = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationMemeticAlgorithm").set_name("LLAMADynamicExplorationExploitationMemeticAlgorithm", register=True) except Exception as e: print("DynamicExplorationExploitationMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicExplorationOptimization import DynamicExplorationOptimization lama_register["DynamicExplorationOptimization"] = DynamicExplorationOptimization - LLAMADynamicExplorationOptimization = NonObjectOptimizer( - method="LLAMADynamicExplorationOptimization" - ).set_name("LLAMADynamicExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicExplorationOptimization = NonObjectOptimizer(method="LLAMADynamicExplorationOptimization").set_name("LLAMADynamicExplorationOptimization", register=True) except Exception as e: print("DynamicExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicFireworkAlgorithm import DynamicFireworkAlgorithm lama_register["DynamicFireworkAlgorithm"] = DynamicFireworkAlgorithm - LLAMADynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm").set_name( - "LLAMADynamicFireworkAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm").set_name("LLAMADynamicFireworkAlgorithm", register=True) except Exception as e: print("DynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicFireworksSwarmOptimization import ( - DynamicFireworksSwarmOptimization, - ) + from nevergrad.optimization.lama.DynamicFireworksSwarmOptimization import DynamicFireworksSwarmOptimization lama_register["DynamicFireworksSwarmOptimization"] = DynamicFireworksSwarmOptimization - LLAMADynamicFireworksSwarmOptimization = NonObjectOptimizer( - method="LLAMADynamicFireworksSwarmOptimization" - ).set_name("LLAMADynamicFireworksSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicFireworksSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicFireworksSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicFireworksSwarmOptimization").set_name("LLAMADynamicFireworksSwarmOptimization", register=True) except Exception as e: print("DynamicFireworksSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicFractionalClusterOptimization import ( - DynamicFractionalClusterOptimization, - ) + from nevergrad.optimization.lama.DynamicFractionalClusterOptimization import DynamicFractionalClusterOptimization lama_register["DynamicFractionalClusterOptimization"] = DynamicFractionalClusterOptimization - LLAMADynamicFractionalClusterOptimization = NonObjectOptimizer( - method="LLAMADynamicFractionalClusterOptimization" - ).set_name("LLAMADynamicFractionalClusterOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicFractionalClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicFractionalClusterOptimization = NonObjectOptimizer(method="LLAMADynamicFractionalClusterOptimization").set_name("LLAMADynamicFractionalClusterOptimization", register=True) except Exception as e: print("DynamicFractionalClusterOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealing import ( - DynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealing import DynamicGradientBoostedMemorySimulatedAnnealing - lama_register["DynamicGradientBoostedMemorySimulatedAnnealing"] = ( - DynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMADynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["DynamicGradientBoostedMemorySimulatedAnnealing"] = DynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("DynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealingV2 import ( - DynamicGradientBoostedMemorySimulatedAnnealingV2, - ) + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealingV2 import DynamicGradientBoostedMemorySimulatedAnnealingV2 - lama_register["DynamicGradientBoostedMemorySimulatedAnnealingV2"] = ( - DynamicGradientBoostedMemorySimulatedAnnealingV2 - ) - LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2 = NonObjectOptimizer( - method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2" - ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2", register=True) + lama_register["DynamicGradientBoostedMemorySimulatedAnnealingV2"] = DynamicGradientBoostedMemorySimulatedAnnealingV2 + res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2").set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2", register=True) except Exception as e: print("DynamicGradientBoostedMemorySimulatedAnnealingV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicGradientBoostedRefinementAnnealing import ( - DynamicGradientBoostedRefinementAnnealing, - ) + from nevergrad.optimization.lama.DynamicGradientBoostedRefinementAnnealing import DynamicGradientBoostedRefinementAnnealing lama_register["DynamicGradientBoostedRefinementAnnealing"] = DynamicGradientBoostedRefinementAnnealing - LLAMADynamicGradientBoostedRefinementAnnealing = NonObjectOptimizer( - method="LLAMADynamicGradientBoostedRefinementAnnealing" - ).set_name("LLAMADynamicGradientBoostedRefinementAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedRefinementAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicGradientBoostedRefinementAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientBoostedRefinementAnnealing").set_name("LLAMADynamicGradientBoostedRefinementAnnealing", register=True) except Exception as e: print("DynamicGradientBoostedRefinementAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicGradientEnhancedAnnealing import DynamicGradientEnhancedAnnealing lama_register["DynamicGradientEnhancedAnnealing"] = DynamicGradientEnhancedAnnealing - LLAMADynamicGradientEnhancedAnnealing = NonObjectOptimizer( - method="LLAMADynamicGradientEnhancedAnnealing" - ).set_name("LLAMADynamicGradientEnhancedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMADynamicGradientEnhancedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicGradientEnhancedAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientEnhancedAnnealing").set_name("LLAMADynamicGradientEnhancedAnnealing", register=True) except Exception as e: print("DynamicGradientEnhancedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicHybridAnnealing import DynamicHybridAnnealing lama_register["DynamicHybridAnnealing"] = DynamicHybridAnnealing - LLAMADynamicHybridAnnealing = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing").set_name( - "LLAMADynamicHybridAnnealing", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicHybridAnnealing = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing").set_name("LLAMADynamicHybridAnnealing", register=True) except Exception as e: print("DynamicHybridAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicHybridOptimizer import DynamicHybridOptimizer lama_register["DynamicHybridOptimizer"] = DynamicHybridOptimizer - LLAMADynamicHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer").set_name( - "LLAMADynamicHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer").set_name("LLAMADynamicHybridOptimizer", register=True) except Exception as e: print("DynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicHybridQuantumDifferentialEvolution import ( - DynamicHybridQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicHybridQuantumDifferentialEvolution import DynamicHybridQuantumDifferentialEvolution lama_register["DynamicHybridQuantumDifferentialEvolution"] = DynamicHybridQuantumDifferentialEvolution - LLAMADynamicHybridQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicHybridQuantumDifferentialEvolution" - ).set_name("LLAMADynamicHybridQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicHybridQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicHybridQuantumDifferentialEvolution").set_name("LLAMADynamicHybridQuantumDifferentialEvolution", register=True) except Exception as e: print("DynamicHybridQuantumDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicHybridSelfAdaptiveDE import DynamicHybridSelfAdaptiveDE lama_register["DynamicHybridSelfAdaptiveDE"] = DynamicHybridSelfAdaptiveDE - LLAMADynamicHybridSelfAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE").set_name( - "LLAMADynamicHybridSelfAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicHybridSelfAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE").set_name("LLAMADynamicHybridSelfAdaptiveDE", register=True) except Exception as e: print("DynamicHybridSelfAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicLevyHarmonySearch import DynamicLevyHarmonySearch lama_register["DynamicLevyHarmonySearch"] = DynamicLevyHarmonySearch - LLAMADynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch").set_name( - "LLAMADynamicLevyHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch").set_name("LLAMADynamicLevyHarmonySearch", register=True) except Exception as e: print("DynamicLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicLocalSearchFireworkAlgorithm import ( - DynamicLocalSearchFireworkAlgorithm, - ) + from nevergrad.optimization.lama.DynamicLocalSearchFireworkAlgorithm import DynamicLocalSearchFireworkAlgorithm lama_register["DynamicLocalSearchFireworkAlgorithm"] = DynamicLocalSearchFireworkAlgorithm - LLAMADynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( - method="LLAMADynamicLocalSearchFireworkAlgorithm" - ).set_name("LLAMADynamicLocalSearchFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicLocalSearchFireworkAlgorithm").set_name("LLAMADynamicLocalSearchFireworkAlgorithm", register=True) except Exception as e: print("DynamicLocalSearchFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicMemeticDifferentialEvolutionWithAdaptiveElitism import ( - DynamicMemeticDifferentialEvolutionWithAdaptiveElitism, - ) + from nevergrad.optimization.lama.DynamicMemeticDifferentialEvolutionWithAdaptiveElitism import DynamicMemeticDifferentialEvolutionWithAdaptiveElitism - lama_register["DynamicMemeticDifferentialEvolutionWithAdaptiveElitism"] = ( - DynamicMemeticDifferentialEvolutionWithAdaptiveElitism - ) - LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism = NonObjectOptimizer( - method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism" - ).set_name("LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism", register=True) + lama_register["DynamicMemeticDifferentialEvolutionWithAdaptiveElitism"] = DynamicMemeticDifferentialEvolutionWithAdaptiveElitism + res = NonObjectOptimizer(method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism = NonObjectOptimizer(method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism").set_name("LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism", register=True) except Exception as e: print("DynamicMemeticDifferentialEvolutionWithAdaptiveElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicMemoryAdaptiveConvergenceStrategyV76 import ( - DynamicMemoryAdaptiveConvergenceStrategyV76, - ) + from nevergrad.optimization.lama.DynamicMemoryAdaptiveConvergenceStrategyV76 import DynamicMemoryAdaptiveConvergenceStrategyV76 lama_register["DynamicMemoryAdaptiveConvergenceStrategyV76"] = DynamicMemoryAdaptiveConvergenceStrategyV76 - LLAMADynamicMemoryAdaptiveConvergenceStrategyV76 = NonObjectOptimizer( - method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76" - ).set_name("LLAMADynamicMemoryAdaptiveConvergenceStrategyV76", register=True) + res = NonObjectOptimizer(method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMemoryAdaptiveConvergenceStrategyV76 = NonObjectOptimizer(method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76").set_name("LLAMADynamicMemoryAdaptiveConvergenceStrategyV76", register=True) except Exception as e: print("DynamicMemoryAdaptiveConvergenceStrategyV76 can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicMemoryEnhancedDualPhaseStrategyV66 import ( - DynamicMemoryEnhancedDualPhaseStrategyV66, - ) + from nevergrad.optimization.lama.DynamicMemoryEnhancedDualPhaseStrategyV66 import DynamicMemoryEnhancedDualPhaseStrategyV66 lama_register["DynamicMemoryEnhancedDualPhaseStrategyV66"] = DynamicMemoryEnhancedDualPhaseStrategyV66 - LLAMADynamicMemoryEnhancedDualPhaseStrategyV66 = NonObjectOptimizer( - method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66" - ).set_name("LLAMADynamicMemoryEnhancedDualPhaseStrategyV66", register=True) + res = NonObjectOptimizer(method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMemoryEnhancedDualPhaseStrategyV66 = NonObjectOptimizer(method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66").set_name("LLAMADynamicMemoryEnhancedDualPhaseStrategyV66", register=True) except Exception as e: print("DynamicMemoryEnhancedDualPhaseStrategyV66 can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicMemoryHybridSearch import DynamicMemoryHybridSearch lama_register["DynamicMemoryHybridSearch"] = DynamicMemoryHybridSearch - LLAMADynamicMemoryHybridSearch = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch").set_name( - "LLAMADynamicMemoryHybridSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMemoryHybridSearch = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch").set_name("LLAMADynamicMemoryHybridSearch", register=True) except Exception as e: print("DynamicMemoryHybridSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicMultiPhaseAnnealingPlus import DynamicMultiPhaseAnnealingPlus lama_register["DynamicMultiPhaseAnnealingPlus"] = DynamicMultiPhaseAnnealingPlus - LLAMADynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( - method="LLAMADynamicMultiPhaseAnnealingPlus" - ).set_name("LLAMADynamicMultiPhaseAnnealingPlus", register=True) + res = NonObjectOptimizer(method="LLAMADynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMultiPhaseAnnealingPlus = NonObjectOptimizer(method="LLAMADynamicMultiPhaseAnnealingPlus").set_name("LLAMADynamicMultiPhaseAnnealingPlus", register=True) except Exception as e: print("DynamicMultiPhaseAnnealingPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicMultiStrategyOptimizer import DynamicMultiStrategyOptimizer lama_register["DynamicMultiStrategyOptimizer"] = DynamicMultiStrategyOptimizer - LLAMADynamicMultiStrategyOptimizer = NonObjectOptimizer( - method="LLAMADynamicMultiStrategyOptimizer" - ).set_name("LLAMADynamicMultiStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMADynamicMultiStrategyOptimizer").set_name("LLAMADynamicMultiStrategyOptimizer", register=True) except Exception as e: print("DynamicMultiStrategyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicNichePSO_DE_LS import DynamicNichePSO_DE_LS lama_register["DynamicNichePSO_DE_LS"] = DynamicNichePSO_DE_LS - LLAMADynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS").set_name( - "LLAMADynamicNichePSO_DE_LS", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS").set_name("LLAMADynamicNichePSO_DE_LS", register=True) except Exception as e: print("DynamicNichePSO_DE_LS can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicNichingDEPSOWithRestart import DynamicNichingDEPSOWithRestart lama_register["DynamicNichingDEPSOWithRestart"] = DynamicNichingDEPSOWithRestart - LLAMADynamicNichingDEPSOWithRestart = NonObjectOptimizer( - method="LLAMADynamicNichingDEPSOWithRestart" - ).set_name("LLAMADynamicNichingDEPSOWithRestart", register=True) + res = NonObjectOptimizer(method="LLAMADynamicNichingDEPSOWithRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicNichingDEPSOWithRestart = NonObjectOptimizer(method="LLAMADynamicNichingDEPSOWithRestart").set_name("LLAMADynamicNichingDEPSOWithRestart", register=True) except Exception as e: print("DynamicNichingDEPSOWithRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicPopulationAdaptiveGradientEvolution import ( - DynamicPopulationAdaptiveGradientEvolution, - ) + from nevergrad.optimization.lama.DynamicPopulationAdaptiveGradientEvolution import DynamicPopulationAdaptiveGradientEvolution lama_register["DynamicPopulationAdaptiveGradientEvolution"] = DynamicPopulationAdaptiveGradientEvolution - LLAMADynamicPopulationAdaptiveGradientEvolution = NonObjectOptimizer( - method="LLAMADynamicPopulationAdaptiveGradientEvolution" - ).set_name("LLAMADynamicPopulationAdaptiveGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicPopulationAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPopulationAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADynamicPopulationAdaptiveGradientEvolution").set_name("LLAMADynamicPopulationAdaptiveGradientEvolution", register=True) except Exception as e: print("DynamicPopulationAdaptiveGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicPopulationMemeticDifferentialEvolution import ( - DynamicPopulationMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicPopulationMemeticDifferentialEvolution import DynamicPopulationMemeticDifferentialEvolution - lama_register["DynamicPopulationMemeticDifferentialEvolution"] = ( - DynamicPopulationMemeticDifferentialEvolution - ) - LLAMADynamicPopulationMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicPopulationMemeticDifferentialEvolution" - ).set_name("LLAMADynamicPopulationMemeticDifferentialEvolution", register=True) + lama_register["DynamicPopulationMemeticDifferentialEvolution"] = DynamicPopulationMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADynamicPopulationMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPopulationMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicPopulationMemeticDifferentialEvolution").set_name("LLAMADynamicPopulationMemeticDifferentialEvolution", register=True) except Exception as e: print("DynamicPopulationMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicPrecisionBalancedEvolution import ( - DynamicPrecisionBalancedEvolution, - ) + from nevergrad.optimization.lama.DynamicPrecisionBalancedEvolution import DynamicPrecisionBalancedEvolution lama_register["DynamicPrecisionBalancedEvolution"] = DynamicPrecisionBalancedEvolution - LLAMADynamicPrecisionBalancedEvolution = NonObjectOptimizer( - method="LLAMADynamicPrecisionBalancedEvolution" - ).set_name("LLAMADynamicPrecisionBalancedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPrecisionBalancedEvolution = NonObjectOptimizer(method="LLAMADynamicPrecisionBalancedEvolution").set_name("LLAMADynamicPrecisionBalancedEvolution", register=True) except Exception as e: print("DynamicPrecisionBalancedEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicPrecisionCosineDifferentialSwarm import ( - DynamicPrecisionCosineDifferentialSwarm, - ) + from nevergrad.optimization.lama.DynamicPrecisionCosineDifferentialSwarm import DynamicPrecisionCosineDifferentialSwarm lama_register["DynamicPrecisionCosineDifferentialSwarm"] = DynamicPrecisionCosineDifferentialSwarm - LLAMADynamicPrecisionCosineDifferentialSwarm = NonObjectOptimizer( - method="LLAMADynamicPrecisionCosineDifferentialSwarm" - ).set_name("LLAMADynamicPrecisionCosineDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicPrecisionCosineDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPrecisionCosineDifferentialSwarm = NonObjectOptimizer(method="LLAMADynamicPrecisionCosineDifferentialSwarm").set_name("LLAMADynamicPrecisionCosineDifferentialSwarm", register=True) except Exception as e: print("DynamicPrecisionCosineDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicPrecisionExplorationOptimizer import ( - DynamicPrecisionExplorationOptimizer, - ) + from nevergrad.optimization.lama.DynamicPrecisionExplorationOptimizer import DynamicPrecisionExplorationOptimizer lama_register["DynamicPrecisionExplorationOptimizer"] = DynamicPrecisionExplorationOptimizer - LLAMADynamicPrecisionExplorationOptimizer = NonObjectOptimizer( - method="LLAMADynamicPrecisionExplorationOptimizer" - ).set_name("LLAMADynamicPrecisionExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicPrecisionExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPrecisionExplorationOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionExplorationOptimizer").set_name("LLAMADynamicPrecisionExplorationOptimizer", register=True) except Exception as e: print("DynamicPrecisionExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicPrecisionOptimizer import DynamicPrecisionOptimizer lama_register["DynamicPrecisionOptimizer"] = DynamicPrecisionOptimizer - LLAMADynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer").set_name( - "LLAMADynamicPrecisionOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer").set_name("LLAMADynamicPrecisionOptimizer", register=True) except Exception as e: print("DynamicPrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumAdaptiveEvolutionStrategy import ( - DynamicQuantumAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.DynamicQuantumAdaptiveEvolutionStrategy import DynamicQuantumAdaptiveEvolutionStrategy lama_register["DynamicQuantumAdaptiveEvolutionStrategy"] = DynamicQuantumAdaptiveEvolutionStrategy - LLAMADynamicQuantumAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMADynamicQuantumAdaptiveEvolutionStrategy" - ).set_name("LLAMADynamicQuantumAdaptiveEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMADynamicQuantumAdaptiveEvolutionStrategy").set_name("LLAMADynamicQuantumAdaptiveEvolutionStrategy", register=True) except Exception as e: print("DynamicQuantumAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolution import ( - DynamicQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolution import DynamicQuantumDifferentialEvolution lama_register["DynamicQuantumDifferentialEvolution"] = DynamicQuantumDifferentialEvolution - LLAMADynamicQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicQuantumDifferentialEvolution" - ).set_name("LLAMADynamicQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolution").set_name("LLAMADynamicQuantumDifferentialEvolution", register=True) except Exception as e: print("DynamicQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch import ( - DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch, - ) + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch import DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch - lama_register["DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch"] = ( - DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch - ) - LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch = NonObjectOptimizer( - method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch" - ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch", register=True) + lama_register["DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch"] = DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch + res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch").set_name("LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch", register=True) except Exception as e: print("DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart import ( - DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart, - ) + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart import DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart - lama_register["DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart"] = ( - DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart - ) - LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart = NonObjectOptimizer( - method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart" - ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart", register=True) + lama_register["DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart"] = DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart + res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart").set_name("LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart", register=True) except Exception as e: print("DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicQuantumEvolution import DynamicQuantumEvolution lama_register["DynamicQuantumEvolution"] = DynamicQuantumEvolution - LLAMADynamicQuantumEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution").set_name( - "LLAMADynamicQuantumEvolution", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution").set_name("LLAMADynamicQuantumEvolution", register=True) except Exception as e: print("DynamicQuantumEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumGuidedHybridSearchV7 import ( - DynamicQuantumGuidedHybridSearchV7, - ) + from nevergrad.optimization.lama.DynamicQuantumGuidedHybridSearchV7 import DynamicQuantumGuidedHybridSearchV7 lama_register["DynamicQuantumGuidedHybridSearchV7"] = DynamicQuantumGuidedHybridSearchV7 - LLAMADynamicQuantumGuidedHybridSearchV7 = NonObjectOptimizer( - method="LLAMADynamicQuantumGuidedHybridSearchV7" - ).set_name("LLAMADynamicQuantumGuidedHybridSearchV7", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumGuidedHybridSearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumGuidedHybridSearchV7 = NonObjectOptimizer(method="LLAMADynamicQuantumGuidedHybridSearchV7").set_name("LLAMADynamicQuantumGuidedHybridSearchV7", register=True) except Exception as e: print("DynamicQuantumGuidedHybridSearchV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialHybridSearch import ( - DynamicQuantumLevyDifferentialHybridSearch, - ) + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialHybridSearch import DynamicQuantumLevyDifferentialHybridSearch lama_register["DynamicQuantumLevyDifferentialHybridSearch"] = DynamicQuantumLevyDifferentialHybridSearch - LLAMADynamicQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( - method="LLAMADynamicQuantumLevyDifferentialHybridSearch" - ).set_name("LLAMADynamicQuantumLevyDifferentialHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumLevyDifferentialHybridSearch = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialHybridSearch").set_name("LLAMADynamicQuantumLevyDifferentialHybridSearch", register=True) except Exception as e: print("DynamicQuantumLevyDifferentialHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialSwarmOptimization import ( - DynamicQuantumLevyDifferentialSwarmOptimization, - ) + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialSwarmOptimization import DynamicQuantumLevyDifferentialSwarmOptimization - lama_register["DynamicQuantumLevyDifferentialSwarmOptimization"] = ( - DynamicQuantumLevyDifferentialSwarmOptimization - ) - LLAMADynamicQuantumLevyDifferentialSwarmOptimization = NonObjectOptimizer( - method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization" - ).set_name("LLAMADynamicQuantumLevyDifferentialSwarmOptimization", register=True) + lama_register["DynamicQuantumLevyDifferentialSwarmOptimization"] = DynamicQuantumLevyDifferentialSwarmOptimization + res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumLevyDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization").set_name("LLAMADynamicQuantumLevyDifferentialSwarmOptimization", register=True) except Exception as e: print("DynamicQuantumLevyDifferentialSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumLevySwarmOptimization import ( - DynamicQuantumLevySwarmOptimization, - ) + from nevergrad.optimization.lama.DynamicQuantumLevySwarmOptimization import DynamicQuantumLevySwarmOptimization lama_register["DynamicQuantumLevySwarmOptimization"] = DynamicQuantumLevySwarmOptimization - LLAMADynamicQuantumLevySwarmOptimization = NonObjectOptimizer( - method="LLAMADynamicQuantumLevySwarmOptimization" - ).set_name("LLAMADynamicQuantumLevySwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumLevySwarmOptimization").set_name("LLAMADynamicQuantumLevySwarmOptimization", register=True) except Exception as e: print("DynamicQuantumLevySwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicQuantumMemeticOptimizer import DynamicQuantumMemeticOptimizer lama_register["DynamicQuantumMemeticOptimizer"] = DynamicQuantumMemeticOptimizer - LLAMADynamicQuantumMemeticOptimizer = NonObjectOptimizer( - method="LLAMADynamicQuantumMemeticOptimizer" - ).set_name("LLAMADynamicQuantumMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMADynamicQuantumMemeticOptimizer").set_name("LLAMADynamicQuantumMemeticOptimizer", register=True) except Exception as e: print("DynamicQuantumMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicQuantumSwarmOptimization import DynamicQuantumSwarmOptimization lama_register["DynamicQuantumSwarmOptimization"] = DynamicQuantumSwarmOptimization - LLAMADynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMADynamicQuantumSwarmOptimization" - ).set_name("LLAMADynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimization").set_name("LLAMADynamicQuantumSwarmOptimization", register=True) except Exception as e: print("DynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuantumSwarmOptimizationRefined import ( - DynamicQuantumSwarmOptimizationRefined, - ) + from nevergrad.optimization.lama.DynamicQuantumSwarmOptimizationRefined import DynamicQuantumSwarmOptimizationRefined lama_register["DynamicQuantumSwarmOptimizationRefined"] = DynamicQuantumSwarmOptimizationRefined - LLAMADynamicQuantumSwarmOptimizationRefined = NonObjectOptimizer( - method="LLAMADynamicQuantumSwarmOptimizationRefined" - ).set_name("LLAMADynamicQuantumSwarmOptimizationRefined", register=True) + res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuantumSwarmOptimizationRefined = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimizationRefined").set_name("LLAMADynamicQuantumSwarmOptimizationRefined", register=True) except Exception as e: print("DynamicQuantumSwarmOptimizationRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicQuasiRandomAdaptiveDifferentialEvolution import ( - DynamicQuasiRandomAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.DynamicQuasiRandomAdaptiveDifferentialEvolution import DynamicQuasiRandomAdaptiveDifferentialEvolution - lama_register["DynamicQuasiRandomAdaptiveDifferentialEvolution"] = ( - DynamicQuasiRandomAdaptiveDifferentialEvolution - ) - LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution" - ).set_name("LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution", register=True) + lama_register["DynamicQuasiRandomAdaptiveDifferentialEvolution"] = DynamicQuasiRandomAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution").set_name("LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution", register=True) except Exception as e: print("DynamicQuasiRandomAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( - DynamicRefinedGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.DynamicRefinedGradientBoostedMemorySimulatedAnnealing import DynamicRefinedGradientBoostedMemorySimulatedAnnealing - lama_register["DynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( - DynamicRefinedGradientBoostedMemorySimulatedAnnealing - ) - LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["DynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = DynamicRefinedGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("DynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicRefinementGradientBoostedMemoryAnnealing import ( - DynamicRefinementGradientBoostedMemoryAnnealing, - ) + from nevergrad.optimization.lama.DynamicRefinementGradientBoostedMemoryAnnealing import DynamicRefinementGradientBoostedMemoryAnnealing - lama_register["DynamicRefinementGradientBoostedMemoryAnnealing"] = ( - DynamicRefinementGradientBoostedMemoryAnnealing - ) - LLAMADynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing" - ).set_name("LLAMADynamicRefinementGradientBoostedMemoryAnnealing", register=True) + lama_register["DynamicRefinementGradientBoostedMemoryAnnealing"] = DynamicRefinementGradientBoostedMemoryAnnealing + res = NonObjectOptimizer(method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing").set_name("LLAMADynamicRefinementGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("DynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicScaleSearch import DynamicScaleSearch lama_register["DynamicScaleSearch"] = DynamicScaleSearch - LLAMADynamicScaleSearch = NonObjectOptimizer(method="LLAMADynamicScaleSearch").set_name( - "LLAMADynamicScaleSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicScaleSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicScaleSearch = NonObjectOptimizer(method="LLAMADynamicScaleSearch").set_name("LLAMADynamicScaleSearch", register=True) except Exception as e: print("DynamicScaleSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicSelfAdaptiveOptimizer import DynamicSelfAdaptiveOptimizer lama_register["DynamicSelfAdaptiveOptimizer"] = DynamicSelfAdaptiveOptimizer - LLAMADynamicSelfAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMADynamicSelfAdaptiveOptimizer" - ).set_name("LLAMADynamicSelfAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMADynamicSelfAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicSelfAdaptiveOptimizer = NonObjectOptimizer(method="LLAMADynamicSelfAdaptiveOptimizer").set_name("LLAMADynamicSelfAdaptiveOptimizer", register=True) except Exception as e: print("DynamicSelfAdaptiveOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.DynamicStrategyAdaptiveDE import DynamicStrategyAdaptiveDE lama_register["DynamicStrategyAdaptiveDE"] = DynamicStrategyAdaptiveDE - LLAMADynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE").set_name( - "LLAMADynamicStrategyAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE").set_name("LLAMADynamicStrategyAdaptiveDE", register=True) except Exception as e: print("DynamicStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.DynamicallyAdaptiveFireworkAlgorithm import ( - DynamicallyAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.DynamicallyAdaptiveFireworkAlgorithm import DynamicallyAdaptiveFireworkAlgorithm lama_register["DynamicallyAdaptiveFireworkAlgorithm"] = DynamicallyAdaptiveFireworkAlgorithm - LLAMADynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMADynamicallyAdaptiveFireworkAlgorithm" - ).set_name("LLAMADynamicallyAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMADynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMADynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicallyAdaptiveFireworkAlgorithm").set_name("LLAMADynamicallyAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("DynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EACDE import EACDE lama_register["EACDE"] = EACDE + res = NonObjectOptimizer(method="LLAMAEACDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEACDE = NonObjectOptimizer(method="LLAMAEACDE").set_name("LLAMAEACDE", register=True) except Exception as e: print("EACDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EADE import EADE lama_register["EADE"] = EADE + res = NonObjectOptimizer(method="LLAMAEADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADE = NonObjectOptimizer(method="LLAMAEADE").set_name("LLAMAEADE", register=True) except Exception as e: print("EADE can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEA import EADEA lama_register["EADEA"] = EADEA + res = NonObjectOptimizer(method="LLAMAEADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEA = NonObjectOptimizer(method="LLAMAEADEA").set_name("LLAMAEADEA", register=True) except Exception as e: print("EADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEDM import EADEDM lama_register["EADEDM"] = EADEDM + res = NonObjectOptimizer(method="LLAMAEADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEDM = NonObjectOptimizer(method="LLAMAEADEDM").set_name("LLAMAEADEDM", register=True) except Exception as e: print("EADEDM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEDMGM import EADEDMGM lama_register["EADEDMGM"] = EADEDMGM + res = NonObjectOptimizer(method="LLAMAEADEDMGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEDMGM = NonObjectOptimizer(method="LLAMAEADEDMGM").set_name("LLAMAEADEDMGM", register=True) except Exception as e: print("EADEDMGM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEPC import EADEPC lama_register["EADEPC"] = EADEPC + res = NonObjectOptimizer(method="LLAMAEADEPC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEPC = NonObjectOptimizer(method="LLAMAEADEPC").set_name("LLAMAEADEPC", register=True) except Exception as e: print("EADEPC can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEPM import EADEPM lama_register["EADEPM"] = EADEPM + res = NonObjectOptimizer(method="LLAMAEADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEPM = NonObjectOptimizer(method="LLAMAEADEPM").set_name("LLAMAEADEPM", register=True) except Exception as e: print("EADEPM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEPMC import EADEPMC lama_register["EADEPMC"] = EADEPMC + res = NonObjectOptimizer(method="LLAMAEADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEPMC = NonObjectOptimizer(method="LLAMAEADEPMC").set_name("LLAMAEADEPMC", register=True) except Exception as e: print("EADEPMC can not be imported: ", e) - try: from nevergrad.optimization.lama.EADES import EADES lama_register["EADES"] = EADES + res = NonObjectOptimizer(method="LLAMAEADES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADES = NonObjectOptimizer(method="LLAMAEADES").set_name("LLAMAEADES", register=True) except Exception as e: print("EADES can not be imported: ", e) - try: from nevergrad.optimization.lama.EADESC import EADESC lama_register["EADESC"] = EADESC + res = NonObjectOptimizer(method="LLAMAEADESC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADESC = NonObjectOptimizer(method="LLAMAEADESC").set_name("LLAMAEADESC", register=True) except Exception as e: print("EADESC can not be imported: ", e) - try: from nevergrad.optimization.lama.EADEWM import EADEWM lama_register["EADEWM"] = EADEWM + res = NonObjectOptimizer(method="LLAMAEADEWM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADEWM = NonObjectOptimizer(method="LLAMAEADEWM").set_name("LLAMAEADEWM", register=True) except Exception as e: print("EADEWM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADE_FIDM import EADE_FIDM lama_register["EADE_FIDM"] = EADE_FIDM + res = NonObjectOptimizer(method="LLAMAEADE_FIDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADE_FIDM = NonObjectOptimizer(method="LLAMAEADE_FIDM").set_name("LLAMAEADE_FIDM", register=True) except Exception as e: print("EADE_FIDM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADGM import EADGM lama_register["EADGM"] = EADGM + res = NonObjectOptimizer(method="LLAMAEADGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADGM = NonObjectOptimizer(method="LLAMAEADGM").set_name("LLAMAEADGM", register=True) except Exception as e: print("EADGM can not be imported: ", e) - try: from nevergrad.optimization.lama.EADMMMS import EADMMMS lama_register["EADMMMS"] = EADMMMS + res = NonObjectOptimizer(method="LLAMAEADMMMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADMMMS = NonObjectOptimizer(method="LLAMAEADMMMS").set_name("LLAMAEADMMMS", register=True) except Exception as e: print("EADMMMS can not be imported: ", e) - try: from nevergrad.optimization.lama.EADSEA import EADSEA lama_register["EADSEA"] = EADSEA + res = NonObjectOptimizer(method="LLAMAEADSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADSEA = NonObjectOptimizer(method="LLAMAEADSEA").set_name("LLAMAEADSEA", register=True) except Exception as e: print("EADSEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EADSM import EADSM lama_register["EADSM"] = EADSM + res = NonObjectOptimizer(method="LLAMAEADSM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEADSM = NonObjectOptimizer(method="LLAMAEADSM").set_name("LLAMAEADSM", register=True) except Exception as e: print("EADSM can not be imported: ", e) - try: from nevergrad.optimization.lama.EAMDE import EAMDE lama_register["EAMDE"] = EAMDE + res = NonObjectOptimizer(method="LLAMAEAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEAMDE = NonObjectOptimizer(method="LLAMAEAMDE").set_name("LLAMAEAMDE", register=True) except Exception as e: print("EAMDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EAMES import EAMES lama_register["EAMES"] = EAMES + res = NonObjectOptimizer(method="LLAMAEAMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEAMES = NonObjectOptimizer(method="LLAMAEAMES").set_name("LLAMAEAMES", register=True) except Exception as e: print("EAMES can not be imported: ", e) - try: from nevergrad.optimization.lama.EAMSDiffEvo import EAMSDiffEvo lama_register["EAMSDiffEvo"] = EAMSDiffEvo - LLAMAEAMSDiffEvo = NonObjectOptimizer(method="LLAMAEAMSDiffEvo").set_name( - "LLAMAEAMSDiffEvo", register=True - ) + res = NonObjectOptimizer(method="LLAMAEAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEAMSDiffEvo = NonObjectOptimizer(method="LLAMAEAMSDiffEvo").set_name("LLAMAEAMSDiffEvo", register=True) except Exception as e: print("EAMSDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.EAMSEA import EAMSEA lama_register["EAMSEA"] = EAMSEA + res = NonObjectOptimizer(method="LLAMAEAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEAMSEA = NonObjectOptimizer(method="LLAMAEAMSEA").set_name("LLAMAEAMSEA", register=True) except Exception as e: print("EAMSEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EAPBES import EAPBES lama_register["EAPBES"] = EAPBES + res = NonObjectOptimizer(method="LLAMAEAPBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEAPBES = NonObjectOptimizer(method="LLAMAEAPBES").set_name("LLAMAEAPBES", register=True) except Exception as e: print("EAPBES can not be imported: ", e) - try: from nevergrad.optimization.lama.EAPDELS import EAPDELS lama_register["EAPDELS"] = EAPDELS + res = NonObjectOptimizer(method="LLAMAEAPDELS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEAPDELS = NonObjectOptimizer(method="LLAMAEAPDELS").set_name("LLAMAEAPDELS", register=True) except Exception as e: print("EAPDELS can not be imported: ", e) - try: from nevergrad.optimization.lama.EARESDM import EARESDM lama_register["EARESDM"] = EARESDM + res = NonObjectOptimizer(method="LLAMAEARESDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEARESDM = NonObjectOptimizer(method="LLAMAEARESDM").set_name("LLAMAEARESDM", register=True) except Exception as e: print("EARESDM can not be imported: ", e) - try: from nevergrad.optimization.lama.EASO import EASO lama_register["EASO"] = EASO + res = NonObjectOptimizer(method="LLAMAEASO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEASO = NonObjectOptimizer(method="LLAMAEASO").set_name("LLAMAEASO", register=True) except Exception as e: print("EASO can not be imported: ", e) - try: from nevergrad.optimization.lama.EDAEA import EDAEA lama_register["EDAEA"] = EDAEA + res = NonObjectOptimizer(method="LLAMAEDAEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDAEA = NonObjectOptimizer(method="LLAMAEDAEA").set_name("LLAMAEDAEA", register=True) except Exception as e: print("EDAEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EDAG import EDAG lama_register["EDAG"] = EDAG + res = NonObjectOptimizer(method="LLAMAEDAG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDAG = NonObjectOptimizer(method="LLAMAEDAG").set_name("LLAMAEDAG", register=True) except Exception as e: print("EDAG can not be imported: ", e) - try: from nevergrad.optimization.lama.EDASOGG import EDASOGG lama_register["EDASOGG"] = EDASOGG + res = NonObjectOptimizer(method="LLAMAEDASOGG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDASOGG = NonObjectOptimizer(method="LLAMAEDASOGG").set_name("LLAMAEDASOGG", register=True) except Exception as e: print("EDASOGG can not be imported: ", e) - try: from nevergrad.optimization.lama.EDDCEA import EDDCEA lama_register["EDDCEA"] = EDDCEA + res = NonObjectOptimizer(method="LLAMAEDDCEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDDCEA = NonObjectOptimizer(method="LLAMAEDDCEA").set_name("LLAMAEDDCEA", register=True) except Exception as e: print("EDDCEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EDEAS import EDEAS lama_register["EDEAS"] = EDEAS + res = NonObjectOptimizer(method="LLAMAEDEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDEAS = NonObjectOptimizer(method="LLAMAEDEAS").set_name("LLAMAEDEAS", register=True) except Exception as e: print("EDEAS can not be imported: ", e) - try: from nevergrad.optimization.lama.EDEPM import EDEPM lama_register["EDEPM"] = EDEPM + res = NonObjectOptimizer(method="LLAMAEDEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDEPM = NonObjectOptimizer(method="LLAMAEDEPM").set_name("LLAMAEDEPM", register=True) except Exception as e: print("EDEPM can not be imported: ", e) - try: from nevergrad.optimization.lama.EDGB import EDGB lama_register["EDGB"] = EDGB + res = NonObjectOptimizer(method="LLAMAEDGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDGB = NonObjectOptimizer(method="LLAMAEDGB").set_name("LLAMAEDGB", register=True) except Exception as e: print("EDGB can not be imported: ", e) - try: from nevergrad.optimization.lama.EDMDESM import EDMDESM lama_register["EDMDESM"] = EDMDESM + res = NonObjectOptimizer(method="LLAMAEDMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDMDESM = NonObjectOptimizer(method="LLAMAEDMDESM").set_name("LLAMAEDMDESM", register=True) except Exception as e: print("EDMDESM can not be imported: ", e) - try: from nevergrad.optimization.lama.EDMRL import EDMRL lama_register["EDMRL"] = EDMRL + res = NonObjectOptimizer(method="LLAMAEDMRL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDMRL = NonObjectOptimizer(method="LLAMAEDMRL").set_name("LLAMAEDMRL", register=True) except Exception as e: print("EDMRL can not be imported: ", e) - try: from nevergrad.optimization.lama.EDMS import EDMS lama_register["EDMS"] = EDMS + res = NonObjectOptimizer(method="LLAMAEDMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDMS = NonObjectOptimizer(method="LLAMAEDMS").set_name("LLAMAEDMS", register=True) except Exception as e: print("EDMS can not be imported: ", e) - try: from nevergrad.optimization.lama.EDNAS import EDNAS lama_register["EDNAS"] = EDNAS + res = NonObjectOptimizer(method="LLAMAEDNAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEDNAS = NonObjectOptimizer(method="LLAMAEDNAS").set_name("LLAMAEDNAS", register=True) except Exception as e: print("EDNAS can not be imported: ", e) - try: from nevergrad.optimization.lama.EDNAS_SAMRA import EDNAS_SAMRA lama_register["EDNAS_SAMRA"] = EDNAS_SAMRA - LLAMAEDNAS_SAMRA = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA").set_name( - "LLAMAEDNAS_SAMRA", register=True - ) + res = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEDNAS_SAMRA = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA").set_name("LLAMAEDNAS_SAMRA", register=True) except Exception as e: print("EDNAS_SAMRA can not be imported: ", e) - try: from nevergrad.optimization.lama.EDSDiffEvoM import EDSDiffEvoM lama_register["EDSDiffEvoM"] = EDSDiffEvoM - LLAMAEDSDiffEvoM = NonObjectOptimizer(method="LLAMAEDSDiffEvoM").set_name( - "LLAMAEDSDiffEvoM", register=True - ) + res = NonObjectOptimizer(method="LLAMAEDSDiffEvoM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEDSDiffEvoM = NonObjectOptimizer(method="LLAMAEDSDiffEvoM").set_name("LLAMAEDSDiffEvoM", register=True) except Exception as e: print("EDSDiffEvoM can not be imported: ", e) - try: from nevergrad.optimization.lama.EGBDE import EGBDE lama_register["EGBDE"] = EGBDE + res = NonObjectOptimizer(method="LLAMAEGBDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEGBDE = NonObjectOptimizer(method="LLAMAEGBDE").set_name("LLAMAEGBDE", register=True) except Exception as e: print("EGBDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EGGEO import EGGEO lama_register["EGGEO"] = EGGEO + res = NonObjectOptimizer(method="LLAMAEGGEO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEGGEO = NonObjectOptimizer(method="LLAMAEGGEO").set_name("LLAMAEGGEO", register=True) except Exception as e: print("EGGEO can not be imported: ", e) - try: from nevergrad.optimization.lama.EHADEEM import EHADEEM lama_register["EHADEEM"] = EHADEEM + res = NonObjectOptimizer(method="LLAMAEHADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEHADEEM = NonObjectOptimizer(method="LLAMAEHADEEM").set_name("LLAMAEHADEEM", register=True) except Exception as e: print("EHADEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.EHADEMI import EHADEMI lama_register["EHADEMI"] = EHADEMI + res = NonObjectOptimizer(method="LLAMAEHADEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEHADEMI = NonObjectOptimizer(method="LLAMAEHADEMI").set_name("LLAMAEHADEMI", register=True) except Exception as e: print("EHADEMI can not be imported: ", e) - try: from nevergrad.optimization.lama.EHDAM import EHDAM lama_register["EHDAM"] = EHDAM + res = NonObjectOptimizer(method="LLAMAEHDAM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEHDAM = NonObjectOptimizer(method="LLAMAEHDAM").set_name("LLAMAEHDAM", register=True) except Exception as e: print("EHDAM can not be imported: ", e) - try: from nevergrad.optimization.lama.EHDE import EHDE lama_register["EHDE"] = EHDE + res = NonObjectOptimizer(method="LLAMAEHDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEHDE = NonObjectOptimizer(method="LLAMAEHDE").set_name("LLAMAEHDE", register=True) except Exception as e: print("EHDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EIADEA import EIADEA lama_register["EIADEA"] = EIADEA + res = NonObjectOptimizer(method="LLAMAEIADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEIADEA = NonObjectOptimizer(method="LLAMAEIADEA").set_name("LLAMAEIADEA", register=True) except Exception as e: print("EIADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.EMIDE import EMIDE lama_register["EMIDE"] = EMIDE + res = NonObjectOptimizer(method="LLAMAEMIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEMIDE = NonObjectOptimizer(method="LLAMAEMIDE").set_name("LLAMAEMIDE", register=True) except Exception as e: print("EMIDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EMSADE import EMSADE lama_register["EMSADE"] = EMSADE + res = NonObjectOptimizer(method="LLAMAEMSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEMSADE = NonObjectOptimizer(method="LLAMAEMSADE").set_name("LLAMAEMSADE", register=True) except Exception as e: print("EMSADE can not be imported: ", e) - try: from nevergrad.optimization.lama.EMSEAS import EMSEAS lama_register["EMSEAS"] = EMSEAS + res = NonObjectOptimizer(method="LLAMAEMSEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEMSEAS = NonObjectOptimizer(method="LLAMAEMSEAS").set_name("LLAMAEMSEAS", register=True) except Exception as e: print("EMSEAS can not be imported: ", e) - try: from nevergrad.optimization.lama.EORAMED import EORAMED lama_register["EORAMED"] = EORAMED + res = NonObjectOptimizer(method="LLAMAEORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEORAMED = NonObjectOptimizer(method="LLAMAEORAMED").set_name("LLAMAEORAMED", register=True) except Exception as e: print("EORAMED can not be imported: ", e) - try: from nevergrad.optimization.lama.EPADE import EPADE lama_register["EPADE"] = EPADE + res = NonObjectOptimizer(method="LLAMAEPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEPADE = NonObjectOptimizer(method="LLAMAEPADE").set_name("LLAMAEPADE", register=True) except Exception as e: print("EPADE can not be imported: ", e) - try: from nevergrad.optimization.lama.EPDE import EPDE lama_register["EPDE"] = EPDE + res = NonObjectOptimizer(method="LLAMAEPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEPDE = NonObjectOptimizer(method="LLAMAEPDE").set_name("LLAMAEPDE", register=True) except Exception as e: print("EPDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EPWDEM import EPWDEM lama_register["EPWDEM"] = EPWDEM + res = NonObjectOptimizer(method="LLAMAEPWDEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEPWDEM = NonObjectOptimizer(method="LLAMAEPWDEM").set_name("LLAMAEPWDEM", register=True) except Exception as e: print("EPWDEM can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADE import ERADE lama_register["ERADE"] = ERADE + res = NonObjectOptimizer(method="LLAMAERADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAERADE = NonObjectOptimizer(method="LLAMAERADE").set_name("LLAMAERADE", register=True) except Exception as e: print("ERADE can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS import ERADS lama_register["ERADS"] = ERADS + res = NonObjectOptimizer(method="LLAMAERADS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAERADS = NonObjectOptimizer(method="LLAMAERADS").set_name("LLAMAERADS", register=True) except Exception as e: print("ERADS can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptiveDynamic import ERADS_AdaptiveDynamic lama_register["ERADS_AdaptiveDynamic"] = ERADS_AdaptiveDynamic - LLAMAERADS_AdaptiveDynamic = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic").set_name( - "LLAMAERADS_AdaptiveDynamic", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptiveDynamic = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic").set_name("LLAMAERADS_AdaptiveDynamic", register=True) except Exception as e: print("ERADS_AdaptiveDynamic can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptiveDynamicPlus import ERADS_AdaptiveDynamicPlus lama_register["ERADS_AdaptiveDynamicPlus"] = ERADS_AdaptiveDynamicPlus - LLAMAERADS_AdaptiveDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus").set_name( - "LLAMAERADS_AdaptiveDynamicPlus", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptiveDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus").set_name("LLAMAERADS_AdaptiveDynamicPlus", register=True) except Exception as e: print("ERADS_AdaptiveDynamicPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptiveHybrid import ERADS_AdaptiveHybrid lama_register["ERADS_AdaptiveHybrid"] = ERADS_AdaptiveHybrid - LLAMAERADS_AdaptiveHybrid = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid").set_name( - "LLAMAERADS_AdaptiveHybrid", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptiveHybrid = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid").set_name("LLAMAERADS_AdaptiveHybrid", register=True) except Exception as e: print("ERADS_AdaptiveHybrid can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptivePlus import ERADS_AdaptivePlus lama_register["ERADS_AdaptivePlus"] = ERADS_AdaptivePlus - LLAMAERADS_AdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus").set_name( - "LLAMAERADS_AdaptivePlus", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus").set_name("LLAMAERADS_AdaptivePlus", register=True) except Exception as e: print("ERADS_AdaptivePlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptiveProgressive import ERADS_AdaptiveProgressive lama_register["ERADS_AdaptiveProgressive"] = ERADS_AdaptiveProgressive - LLAMAERADS_AdaptiveProgressive = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive").set_name( - "LLAMAERADS_AdaptiveProgressive", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptiveProgressive = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive").set_name("LLAMAERADS_AdaptiveProgressive", register=True) except Exception as e: print("ERADS_AdaptiveProgressive can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdaptiveRefinement import ERADS_AdaptiveRefinement lama_register["ERADS_AdaptiveRefinement"] = ERADS_AdaptiveRefinement - LLAMAERADS_AdaptiveRefinement = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement").set_name( - "LLAMAERADS_AdaptiveRefinement", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdaptiveRefinement = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement").set_name("LLAMAERADS_AdaptiveRefinement", register=True) except Exception as e: print("ERADS_AdaptiveRefinement can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Advanced import ERADS_Advanced lama_register["ERADS_Advanced"] = ERADS_Advanced - LLAMAERADS_Advanced = NonObjectOptimizer(method="LLAMAERADS_Advanced").set_name( - "LLAMAERADS_Advanced", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Advanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Advanced = NonObjectOptimizer(method="LLAMAERADS_Advanced").set_name("LLAMAERADS_Advanced", register=True) except Exception as e: print("ERADS_Advanced can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdvancedDynamic import ERADS_AdvancedDynamic lama_register["ERADS_AdvancedDynamic"] = ERADS_AdvancedDynamic - LLAMAERADS_AdvancedDynamic = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic").set_name( - "LLAMAERADS_AdvancedDynamic", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdvancedDynamic = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic").set_name("LLAMAERADS_AdvancedDynamic", register=True) except Exception as e: print("ERADS_AdvancedDynamic can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_AdvancedRefined import ERADS_AdvancedRefined lama_register["ERADS_AdvancedRefined"] = ERADS_AdvancedRefined - LLAMAERADS_AdvancedRefined = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined").set_name( - "LLAMAERADS_AdvancedRefined", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_AdvancedRefined = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined").set_name("LLAMAERADS_AdvancedRefined", register=True) except Exception as e: print("ERADS_AdvancedRefined can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_DynamicPrecision import ERADS_DynamicPrecision lama_register["ERADS_DynamicPrecision"] = ERADS_DynamicPrecision - LLAMAERADS_DynamicPrecision = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision").set_name( - "LLAMAERADS_DynamicPrecision", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_DynamicPrecision = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision").set_name("LLAMAERADS_DynamicPrecision", register=True) except Exception as e: print("ERADS_DynamicPrecision can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Enhanced import ERADS_Enhanced lama_register["ERADS_Enhanced"] = ERADS_Enhanced - LLAMAERADS_Enhanced = NonObjectOptimizer(method="LLAMAERADS_Enhanced").set_name( - "LLAMAERADS_Enhanced", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Enhanced = NonObjectOptimizer(method="LLAMAERADS_Enhanced").set_name("LLAMAERADS_Enhanced", register=True) except Exception as e: print("ERADS_Enhanced can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_EnhancedPrecision import ERADS_EnhancedPrecision lama_register["ERADS_EnhancedPrecision"] = ERADS_EnhancedPrecision - LLAMAERADS_EnhancedPrecision = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision").set_name( - "LLAMAERADS_EnhancedPrecision", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_EnhancedPrecision = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision").set_name("LLAMAERADS_EnhancedPrecision", register=True) except Exception as e: print("ERADS_EnhancedPrecision can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_HyperOptimized import ERADS_HyperOptimized lama_register["ERADS_HyperOptimized"] = ERADS_HyperOptimized - LLAMAERADS_HyperOptimized = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized").set_name( - "LLAMAERADS_HyperOptimized", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_HyperOptimized = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized").set_name("LLAMAERADS_HyperOptimized", register=True) except Exception as e: print("ERADS_HyperOptimized can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_NextGen import ERADS_NextGen lama_register["ERADS_NextGen"] = ERADS_NextGen - LLAMAERADS_NextGen = NonObjectOptimizer(method="LLAMAERADS_NextGen").set_name( - "LLAMAERADS_NextGen", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_NextGen")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_NextGen = NonObjectOptimizer(method="LLAMAERADS_NextGen").set_name("LLAMAERADS_NextGen", register=True) except Exception as e: print("ERADS_NextGen can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Optimized import ERADS_Optimized lama_register["ERADS_Optimized"] = ERADS_Optimized - LLAMAERADS_Optimized = NonObjectOptimizer(method="LLAMAERADS_Optimized").set_name( - "LLAMAERADS_Optimized", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Optimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Optimized = NonObjectOptimizer(method="LLAMAERADS_Optimized").set_name("LLAMAERADS_Optimized", register=True) except Exception as e: print("ERADS_Optimized can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Precision import ERADS_Precision lama_register["ERADS_Precision"] = ERADS_Precision - LLAMAERADS_Precision = NonObjectOptimizer(method="LLAMAERADS_Precision").set_name( - "LLAMAERADS_Precision", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Precision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Precision = NonObjectOptimizer(method="LLAMAERADS_Precision").set_name("LLAMAERADS_Precision", register=True) except Exception as e: print("ERADS_Precision can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressiveAdaptive import ERADS_ProgressiveAdaptive lama_register["ERADS_ProgressiveAdaptive"] = ERADS_ProgressiveAdaptive - LLAMAERADS_ProgressiveAdaptive = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive").set_name( - "LLAMAERADS_ProgressiveAdaptive", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressiveAdaptive = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive").set_name("LLAMAERADS_ProgressiveAdaptive", register=True) except Exception as e: print("ERADS_ProgressiveAdaptive can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressiveAdaptivePlus import ERADS_ProgressiveAdaptivePlus lama_register["ERADS_ProgressiveAdaptivePlus"] = ERADS_ProgressiveAdaptivePlus - LLAMAERADS_ProgressiveAdaptivePlus = NonObjectOptimizer( - method="LLAMAERADS_ProgressiveAdaptivePlus" - ).set_name("LLAMAERADS_ProgressiveAdaptivePlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptivePlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressiveAdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptivePlus").set_name("LLAMAERADS_ProgressiveAdaptivePlus", register=True) except Exception as e: print("ERADS_ProgressiveAdaptivePlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressiveDynamic import ERADS_ProgressiveDynamic lama_register["ERADS_ProgressiveDynamic"] = ERADS_ProgressiveDynamic - LLAMAERADS_ProgressiveDynamic = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic").set_name( - "LLAMAERADS_ProgressiveDynamic", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressiveDynamic = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic").set_name("LLAMAERADS_ProgressiveDynamic", register=True) except Exception as e: print("ERADS_ProgressiveDynamic can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressiveOptimized import ERADS_ProgressiveOptimized lama_register["ERADS_ProgressiveOptimized"] = ERADS_ProgressiveOptimized - LLAMAERADS_ProgressiveOptimized = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized").set_name( - "LLAMAERADS_ProgressiveOptimized", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressiveOptimized = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized").set_name("LLAMAERADS_ProgressiveOptimized", register=True) except Exception as e: print("ERADS_ProgressiveOptimized can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressivePrecision import ERADS_ProgressivePrecision lama_register["ERADS_ProgressivePrecision"] = ERADS_ProgressivePrecision - LLAMAERADS_ProgressivePrecision = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision").set_name( - "LLAMAERADS_ProgressivePrecision", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressivePrecision = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision").set_name("LLAMAERADS_ProgressivePrecision", register=True) except Exception as e: print("ERADS_ProgressivePrecision can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_ProgressiveRefinement import ERADS_ProgressiveRefinement lama_register["ERADS_ProgressiveRefinement"] = ERADS_ProgressiveRefinement - LLAMAERADS_ProgressiveRefinement = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement").set_name( - "LLAMAERADS_ProgressiveRefinement", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_ProgressiveRefinement = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement").set_name("LLAMAERADS_ProgressiveRefinement", register=True) except Exception as e: print("ERADS_ProgressiveRefinement can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_QuantumFlux import ERADS_QuantumFlux lama_register["ERADS_QuantumFlux"] = ERADS_QuantumFlux - LLAMAERADS_QuantumFlux = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux").set_name( - "LLAMAERADS_QuantumFlux", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumFlux = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux").set_name("LLAMAERADS_QuantumFlux", register=True) except Exception as e: print("ERADS_QuantumFlux can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_QuantumFluxPro import ERADS_QuantumFluxPro lama_register["ERADS_QuantumFluxPro"] = ERADS_QuantumFluxPro - LLAMAERADS_QuantumFluxPro = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro").set_name( - "LLAMAERADS_QuantumFluxPro", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumFluxPro = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro").set_name("LLAMAERADS_QuantumFluxPro", register=True) except Exception as e: print("ERADS_QuantumFluxPro can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_QuantumFluxUltra import ERADS_QuantumFluxUltra lama_register["ERADS_QuantumFluxUltra"] = ERADS_QuantumFluxUltra - LLAMAERADS_QuantumFluxUltra = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra").set_name( - "LLAMAERADS_QuantumFluxUltra", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumFluxUltra = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra").set_name("LLAMAERADS_QuantumFluxUltra", register=True) except Exception as e: print("ERADS_QuantumFluxUltra can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefined import ERADS_QuantumFluxUltraRefined lama_register["ERADS_QuantumFluxUltraRefined"] = ERADS_QuantumFluxUltraRefined - LLAMAERADS_QuantumFluxUltraRefined = NonObjectOptimizer( - method="LLAMAERADS_QuantumFluxUltraRefined" - ).set_name("LLAMAERADS_QuantumFluxUltraRefined", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumFluxUltraRefined = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefined").set_name("LLAMAERADS_QuantumFluxUltraRefined", register=True) except Exception as e: print("ERADS_QuantumFluxUltraRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefinedPlus import ( - ERADS_QuantumFluxUltraRefinedPlus, - ) + from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefinedPlus import ERADS_QuantumFluxUltraRefinedPlus lama_register["ERADS_QuantumFluxUltraRefinedPlus"] = ERADS_QuantumFluxUltraRefinedPlus - LLAMAERADS_QuantumFluxUltraRefinedPlus = NonObjectOptimizer( - method="LLAMAERADS_QuantumFluxUltraRefinedPlus" - ).set_name("LLAMAERADS_QuantumFluxUltraRefinedPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumFluxUltraRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefinedPlus").set_name("LLAMAERADS_QuantumFluxUltraRefinedPlus", register=True) except Exception as e: print("ERADS_QuantumFluxUltraRefinedPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_QuantumLeap import ERADS_QuantumLeap lama_register["ERADS_QuantumLeap"] = ERADS_QuantumLeap - LLAMAERADS_QuantumLeap = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap").set_name( - "LLAMAERADS_QuantumLeap", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_QuantumLeap = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap").set_name("LLAMAERADS_QuantumLeap", register=True) except Exception as e: print("ERADS_QuantumLeap can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Refined import ERADS_Refined lama_register["ERADS_Refined"] = ERADS_Refined - LLAMAERADS_Refined = NonObjectOptimizer(method="LLAMAERADS_Refined").set_name( - "LLAMAERADS_Refined", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Refined = NonObjectOptimizer(method="LLAMAERADS_Refined").set_name("LLAMAERADS_Refined", register=True) except Exception as e: print("ERADS_Refined can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Superior import ERADS_Superior lama_register["ERADS_Superior"] = ERADS_Superior - LLAMAERADS_Superior = NonObjectOptimizer(method="LLAMAERADS_Superior").set_name( - "LLAMAERADS_Superior", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Superior")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Superior = NonObjectOptimizer(method="LLAMAERADS_Superior").set_name("LLAMAERADS_Superior", register=True) except Exception as e: print("ERADS_Superior can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_Ultra import ERADS_Ultra lama_register["ERADS_Ultra"] = ERADS_Ultra - LLAMAERADS_Ultra = NonObjectOptimizer(method="LLAMAERADS_Ultra").set_name( - "LLAMAERADS_Ultra", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_Ultra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_Ultra = NonObjectOptimizer(method="LLAMAERADS_Ultra").set_name("LLAMAERADS_Ultra", register=True) except Exception as e: print("ERADS_Ultra can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamic import ERADS_UltraDynamic lama_register["ERADS_UltraDynamic"] = ERADS_UltraDynamic - LLAMAERADS_UltraDynamic = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic").set_name( - "LLAMAERADS_UltraDynamic", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamic = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic").set_name("LLAMAERADS_UltraDynamic", register=True) except Exception as e: print("ERADS_UltraDynamic can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMax import ERADS_UltraDynamicMax lama_register["ERADS_UltraDynamicMax"] = ERADS_UltraDynamicMax - LLAMAERADS_UltraDynamicMax = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax").set_name( - "LLAMAERADS_UltraDynamicMax", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMax = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax").set_name("LLAMAERADS_UltraDynamicMax", register=True) except Exception as e: print("ERADS_UltraDynamicMax can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxEnhanced import ERADS_UltraDynamicMaxEnhanced lama_register["ERADS_UltraDynamicMaxEnhanced"] = ERADS_UltraDynamicMaxEnhanced - LLAMAERADS_UltraDynamicMaxEnhanced = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxEnhanced" - ).set_name("LLAMAERADS_UltraDynamicMaxEnhanced", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxEnhanced").set_name("LLAMAERADS_UltraDynamicMaxEnhanced", register=True) except Exception as e: print("ERADS_UltraDynamicMaxEnhanced can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHybrid import ERADS_UltraDynamicMaxHybrid lama_register["ERADS_UltraDynamicMaxHybrid"] = ERADS_UltraDynamicMaxHybrid - LLAMAERADS_UltraDynamicMaxHybrid = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid").set_name( - "LLAMAERADS_UltraDynamicMaxHybrid", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHybrid = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid").set_name("LLAMAERADS_UltraDynamicMaxHybrid", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHybrid can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyper import ERADS_UltraDynamicMaxHyper lama_register["ERADS_UltraDynamicMaxHyper"] = ERADS_UltraDynamicMaxHyper - LLAMAERADS_UltraDynamicMaxHyper = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper").set_name( - "LLAMAERADS_UltraDynamicMaxHyper", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyper = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper").set_name("LLAMAERADS_UltraDynamicMaxHyper", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyper can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimized import ( - ERADS_UltraDynamicMaxHyperOptimized, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimized import ERADS_UltraDynamicMaxHyperOptimized lama_register["ERADS_UltraDynamicMaxHyperOptimized"] = ERADS_UltraDynamicMaxHyperOptimized - LLAMAERADS_UltraDynamicMaxHyperOptimized = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperOptimized" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimized", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimized").set_name("LLAMAERADS_UltraDynamicMaxHyperOptimized", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimizedV4 import ( - ERADS_UltraDynamicMaxHyperOptimizedV4, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimizedV4 import ERADS_UltraDynamicMaxHyperOptimizedV4 lama_register["ERADS_UltraDynamicMaxHyperOptimizedV4"] = ERADS_UltraDynamicMaxHyperOptimizedV4 - LLAMAERADS_UltraDynamicMaxHyperOptimizedV4 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimizedV4", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperOptimizedV4 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4").set_name("LLAMAERADS_UltraDynamicMaxHyperOptimizedV4", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperOptimizedV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperPlus import ERADS_UltraDynamicMaxHyperPlus lama_register["ERADS_UltraDynamicMaxHyperPlus"] = ERADS_UltraDynamicMaxHyperPlus - LLAMAERADS_UltraDynamicMaxHyperPlus = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperPlus" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperPlus").set_name("LLAMAERADS_UltraDynamicMaxHyperPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefined import ( - ERADS_UltraDynamicMaxHyperRefined, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefined import ERADS_UltraDynamicMaxHyperRefined lama_register["ERADS_UltraDynamicMaxHyperRefined"] = ERADS_UltraDynamicMaxHyperRefined - LLAMAERADS_UltraDynamicMaxHyperRefined = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperRefined" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefined", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefined").set_name("LLAMAERADS_UltraDynamicMaxHyperRefined", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimized import ( - ERADS_UltraDynamicMaxHyperRefinedOptimized, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimized import ERADS_UltraDynamicMaxHyperRefinedOptimized lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimized"] = ERADS_UltraDynamicMaxHyperRefinedOptimized - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperRefinedOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 import ( - ERADS_UltraDynamicMaxHyperRefinedOptimizedV2, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 import ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 - lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV2"] = ( - ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 - ) - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2", register=True) + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV2"] = ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 import ( - ERADS_UltraDynamicMaxHyperRefinedOptimizedV3, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 import ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 - lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV3"] = ( - ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 - ) - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3", register=True) + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV3"] = ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedPlus import ( - ERADS_UltraDynamicMaxHyperRefinedPlus, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedPlus import ERADS_UltraDynamicMaxHyperRefinedPlus lama_register["ERADS_UltraDynamicMaxHyperRefinedPlus"] = ERADS_UltraDynamicMaxHyperRefinedPlus - LLAMAERADS_UltraDynamicMaxHyperRefinedPlus = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus" - ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxHyperRefinedPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimal import ERADS_UltraDynamicMaxOptimal lama_register["ERADS_UltraDynamicMaxOptimal"] = ERADS_UltraDynamicMaxOptimal - LLAMAERADS_UltraDynamicMaxOptimal = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxOptimal" - ).set_name("LLAMAERADS_UltraDynamicMaxOptimal", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxOptimal = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimal").set_name("LLAMAERADS_UltraDynamicMaxOptimal", register=True) except Exception as e: print("ERADS_UltraDynamicMaxOptimal can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimized import ERADS_UltraDynamicMaxOptimized lama_register["ERADS_UltraDynamicMaxOptimized"] = ERADS_UltraDynamicMaxOptimized - LLAMAERADS_UltraDynamicMaxOptimized = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxOptimized" - ).set_name("LLAMAERADS_UltraDynamicMaxOptimized", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimized").set_name("LLAMAERADS_UltraDynamicMaxOptimized", register=True) except Exception as e: print("ERADS_UltraDynamicMaxOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimizedPlus import ( - ERADS_UltraDynamicMaxOptimizedPlus, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimizedPlus import ERADS_UltraDynamicMaxOptimizedPlus lama_register["ERADS_UltraDynamicMaxOptimizedPlus"] = ERADS_UltraDynamicMaxOptimizedPlus - LLAMAERADS_UltraDynamicMaxOptimizedPlus = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxOptimizedPlus" - ).set_name("LLAMAERADS_UltraDynamicMaxOptimizedPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimizedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxOptimizedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimizedPlus").set_name("LLAMAERADS_UltraDynamicMaxOptimizedPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxOptimizedPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPlus import ERADS_UltraDynamicMaxPlus lama_register["ERADS_UltraDynamicMaxPlus"] = ERADS_UltraDynamicMaxPlus - LLAMAERADS_UltraDynamicMaxPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus").set_name( - "LLAMAERADS_UltraDynamicMaxPlus", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus").set_name("LLAMAERADS_UltraDynamicMaxPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPrecision import ERADS_UltraDynamicMaxPrecision lama_register["ERADS_UltraDynamicMaxPrecision"] = ERADS_UltraDynamicMaxPrecision - LLAMAERADS_UltraDynamicMaxPrecision = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxPrecision" - ).set_name("LLAMAERADS_UltraDynamicMaxPrecision", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxPrecision = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPrecision").set_name("LLAMAERADS_UltraDynamicMaxPrecision", register=True) except Exception as e: print("ERADS_UltraDynamicMaxPrecision can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefined import ERADS_UltraDynamicMaxRefined lama_register["ERADS_UltraDynamicMaxRefined"] = ERADS_UltraDynamicMaxRefined - LLAMAERADS_UltraDynamicMaxRefined = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxRefined" - ).set_name("LLAMAERADS_UltraDynamicMaxRefined", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefined").set_name("LLAMAERADS_UltraDynamicMaxRefined", register=True) except Exception as e: print("ERADS_UltraDynamicMaxRefined can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefinedPlus import ERADS_UltraDynamicMaxRefinedPlus lama_register["ERADS_UltraDynamicMaxRefinedPlus"] = ERADS_UltraDynamicMaxRefinedPlus - LLAMAERADS_UltraDynamicMaxRefinedPlus = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxRefinedPlus" - ).set_name("LLAMAERADS_UltraDynamicMaxRefinedPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefinedPlus").set_name("LLAMAERADS_UltraDynamicMaxRefinedPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxRefinedPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxSupreme import ERADS_UltraDynamicMaxSupreme lama_register["ERADS_UltraDynamicMaxSupreme"] = ERADS_UltraDynamicMaxSupreme - LLAMAERADS_UltraDynamicMaxSupreme = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxSupreme" - ).set_name("LLAMAERADS_UltraDynamicMaxSupreme", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxSupreme")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxSupreme = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxSupreme").set_name("LLAMAERADS_UltraDynamicMaxSupreme", register=True) except Exception as e: print("ERADS_UltraDynamicMaxSupreme can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltra import ERADS_UltraDynamicMaxUltra lama_register["ERADS_UltraDynamicMaxUltra"] = ERADS_UltraDynamicMaxUltra - LLAMAERADS_UltraDynamicMaxUltra = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra").set_name( - "LLAMAERADS_UltraDynamicMaxUltra", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltra = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra").set_name("LLAMAERADS_UltraDynamicMaxUltra", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltra can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraPlus import ERADS_UltraDynamicMaxUltraPlus lama_register["ERADS_UltraDynamicMaxUltraPlus"] = ERADS_UltraDynamicMaxUltraPlus - LLAMAERADS_UltraDynamicMaxUltraPlus = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraPlus" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraPlus", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraPlus").set_name("LLAMAERADS_UltraDynamicMaxUltraPlus", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefined import ( - ERADS_UltraDynamicMaxUltraRefined, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefined import ERADS_UltraDynamicMaxUltraRefined lama_register["ERADS_UltraDynamicMaxUltraRefined"] = ERADS_UltraDynamicMaxUltraRefined - LLAMAERADS_UltraDynamicMaxUltraRefined = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefined" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefined", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefined").set_name("LLAMAERADS_UltraDynamicMaxUltraRefined", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV2 import ( - ERADS_UltraDynamicMaxUltraRefinedV2, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV2 import ERADS_UltraDynamicMaxUltraRefinedV2 lama_register["ERADS_UltraDynamicMaxUltraRefinedV2"] = ERADS_UltraDynamicMaxUltraRefinedV2 - LLAMAERADS_UltraDynamicMaxUltraRefinedV2 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV2", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV2 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV2", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV3 import ( - ERADS_UltraDynamicMaxUltraRefinedV3, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV3 import ERADS_UltraDynamicMaxUltraRefinedV3 lama_register["ERADS_UltraDynamicMaxUltraRefinedV3"] = ERADS_UltraDynamicMaxUltraRefinedV3 - LLAMAERADS_UltraDynamicMaxUltraRefinedV3 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV3", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV3 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV3", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV4 import ( - ERADS_UltraDynamicMaxUltraRefinedV4, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV4 import ERADS_UltraDynamicMaxUltraRefinedV4 lama_register["ERADS_UltraDynamicMaxUltraRefinedV4"] = ERADS_UltraDynamicMaxUltraRefinedV4 - LLAMAERADS_UltraDynamicMaxUltraRefinedV4 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV4", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV4 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV4", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV5 import ( - ERADS_UltraDynamicMaxUltraRefinedV5, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV5 import ERADS_UltraDynamicMaxUltraRefinedV5 lama_register["ERADS_UltraDynamicMaxUltraRefinedV5"] = ERADS_UltraDynamicMaxUltraRefinedV5 - LLAMAERADS_UltraDynamicMaxUltraRefinedV5 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV5", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV5 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV5", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV6 import ( - ERADS_UltraDynamicMaxUltraRefinedV6, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV6 import ERADS_UltraDynamicMaxUltraRefinedV6 lama_register["ERADS_UltraDynamicMaxUltraRefinedV6"] = ERADS_UltraDynamicMaxUltraRefinedV6 - LLAMAERADS_UltraDynamicMaxUltraRefinedV6 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV6", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV6 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV6", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV7 import ( - ERADS_UltraDynamicMaxUltraRefinedV7, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV7 import ERADS_UltraDynamicMaxUltraRefinedV7 lama_register["ERADS_UltraDynamicMaxUltraRefinedV7"] = ERADS_UltraDynamicMaxUltraRefinedV7 - LLAMAERADS_UltraDynamicMaxUltraRefinedV7 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV7", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV7 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV7", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV8 import ( - ERADS_UltraDynamicMaxUltraRefinedV8, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV8 import ERADS_UltraDynamicMaxUltraRefinedV8 lama_register["ERADS_UltraDynamicMaxUltraRefinedV8"] = ERADS_UltraDynamicMaxUltraRefinedV8 - LLAMAERADS_UltraDynamicMaxUltraRefinedV8 = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8" - ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV8", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV8 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV8", register=True) except Exception as e: print("ERADS_UltraDynamicMaxUltraRefinedV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraDynamicPlus import ERADS_UltraDynamicPlus lama_register["ERADS_UltraDynamicPlus"] = ERADS_UltraDynamicPlus - LLAMAERADS_UltraDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus").set_name( - "LLAMAERADS_UltraDynamicPlus", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus").set_name("LLAMAERADS_UltraDynamicPlus", register=True) except Exception as e: print("ERADS_UltraDynamicPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionEnhanced import ( - ERADS_UltraDynamicPrecisionEnhanced, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionEnhanced import ERADS_UltraDynamicPrecisionEnhanced lama_register["ERADS_UltraDynamicPrecisionEnhanced"] = ERADS_UltraDynamicPrecisionEnhanced - LLAMAERADS_UltraDynamicPrecisionEnhanced = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicPrecisionEnhanced" - ).set_name("LLAMAERADS_UltraDynamicPrecisionEnhanced", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicPrecisionEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionEnhanced").set_name("LLAMAERADS_UltraDynamicPrecisionEnhanced", register=True) except Exception as e: print("ERADS_UltraDynamicPrecisionEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionOptimized import ( - ERADS_UltraDynamicPrecisionOptimized, - ) + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionOptimized import ERADS_UltraDynamicPrecisionOptimized lama_register["ERADS_UltraDynamicPrecisionOptimized"] = ERADS_UltraDynamicPrecisionOptimized - LLAMAERADS_UltraDynamicPrecisionOptimized = NonObjectOptimizer( - method="LLAMAERADS_UltraDynamicPrecisionOptimized" - ).set_name("LLAMAERADS_UltraDynamicPrecisionOptimized", register=True) + res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraDynamicPrecisionOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionOptimized").set_name("LLAMAERADS_UltraDynamicPrecisionOptimized", register=True) except Exception as e: print("ERADS_UltraDynamicPrecisionOptimized can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraEnhanced import ERADS_UltraEnhanced lama_register["ERADS_UltraEnhanced"] = ERADS_UltraEnhanced - LLAMAERADS_UltraEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced").set_name( - "LLAMAERADS_UltraEnhanced", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced").set_name("LLAMAERADS_UltraEnhanced", register=True) except Exception as e: print("ERADS_UltraEnhanced can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraMax import ERADS_UltraMax lama_register["ERADS_UltraMax"] = ERADS_UltraMax - LLAMAERADS_UltraMax = NonObjectOptimizer(method="LLAMAERADS_UltraMax").set_name( - "LLAMAERADS_UltraMax", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraMax")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraMax = NonObjectOptimizer(method="LLAMAERADS_UltraMax").set_name("LLAMAERADS_UltraMax", register=True) except Exception as e: print("ERADS_UltraMax can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraOptimized import ERADS_UltraOptimized lama_register["ERADS_UltraOptimized"] = ERADS_UltraOptimized - LLAMAERADS_UltraOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized").set_name( - "LLAMAERADS_UltraOptimized", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized").set_name("LLAMAERADS_UltraOptimized", register=True) except Exception as e: print("ERADS_UltraOptimized can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraPrecise import ERADS_UltraPrecise lama_register["ERADS_UltraPrecise"] = ERADS_UltraPrecise - LLAMAERADS_UltraPrecise = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise").set_name( - "LLAMAERADS_UltraPrecise", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraPrecise = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise").set_name("LLAMAERADS_UltraPrecise", register=True) except Exception as e: print("ERADS_UltraPrecise can not be imported: ", e) - try: from nevergrad.optimization.lama.ERADS_UltraRefined import ERADS_UltraRefined lama_register["ERADS_UltraRefined"] = ERADS_UltraRefined - LLAMAERADS_UltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraRefined").set_name( - "LLAMAERADS_UltraRefined", register=True - ) + res = NonObjectOptimizer(method="LLAMAERADS_UltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAERADS_UltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraRefined").set_name("LLAMAERADS_UltraRefined", register=True) except Exception as e: print("ERADS_UltraRefined can not be imported: ", e) - try: from nevergrad.optimization.lama.ERAMEDS import ERAMEDS lama_register["ERAMEDS"] = ERAMEDS + res = NonObjectOptimizer(method="LLAMAERAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAERAMEDS = NonObjectOptimizer(method="LLAMAERAMEDS").set_name("LLAMAERAMEDS", register=True) except Exception as e: print("ERAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.ESADE import ESADE lama_register["ESADE"] = ESADE + res = NonObjectOptimizer(method="LLAMAESADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAESADE = NonObjectOptimizer(method="LLAMAESADE").set_name("LLAMAESADE", register=True) except Exception as e: print("ESADE can not be imported: ", e) - try: from nevergrad.optimization.lama.ESADEPFLLP import ESADEPFLLP lama_register["ESADEPFLLP"] = ESADEPFLLP + res = NonObjectOptimizer(method="LLAMAESADEPFLLP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAESADEPFLLP = NonObjectOptimizer(method="LLAMAESADEPFLLP").set_name("LLAMAESADEPFLLP", register=True) except Exception as e: print("ESADEPFLLP can not be imported: ", e) - try: from nevergrad.optimization.lama.ESBASM import ESBASM lama_register["ESBASM"] = ESBASM + res = NonObjectOptimizer(method="LLAMAESBASM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAESBASM = NonObjectOptimizer(method="LLAMAESBASM").set_name("LLAMAESBASM", register=True) except Exception as e: print("ESBASM can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteAdaptiveCrowdingHybridOptimizer import ( - EliteAdaptiveCrowdingHybridOptimizer, - ) + from nevergrad.optimization.lama.EliteAdaptiveCrowdingHybridOptimizer import EliteAdaptiveCrowdingHybridOptimizer lama_register["EliteAdaptiveCrowdingHybridOptimizer"] = EliteAdaptiveCrowdingHybridOptimizer - LLAMAEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( - method="LLAMAEliteAdaptiveCrowdingHybridOptimizer" - ).set_name("LLAMAEliteAdaptiveCrowdingHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteAdaptiveCrowdingHybridOptimizer").set_name("LLAMAEliteAdaptiveCrowdingHybridOptimizer", register=True) except Exception as e: print("EliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteAdaptiveHybridDEPSO import EliteAdaptiveHybridDEPSO lama_register["EliteAdaptiveHybridDEPSO"] = EliteAdaptiveHybridDEPSO - LLAMAEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO").set_name( - "LLAMAEliteAdaptiveHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO").set_name("LLAMAEliteAdaptiveHybridDEPSO", register=True) except Exception as e: print("EliteAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteAdaptiveMemeticDifferentialEvolution import ( - EliteAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.EliteAdaptiveMemeticDifferentialEvolution import EliteAdaptiveMemeticDifferentialEvolution lama_register["EliteAdaptiveMemeticDifferentialEvolution"] = EliteAdaptiveMemeticDifferentialEvolution - LLAMAEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEliteAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMAEliteAdaptiveMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEliteAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("EliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 import ( - EliteAdaptiveMemoryDynamicCrowdingOptimizerV2, - ) + from nevergrad.optimization.lama.EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 import EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 - lama_register["EliteAdaptiveMemoryDynamicCrowdingOptimizerV2"] = ( - EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 - ) - LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2 = NonObjectOptimizer( - method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2" - ).set_name("LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2", register=True) + lama_register["EliteAdaptiveMemoryDynamicCrowdingOptimizerV2"] = EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2 = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2").set_name("LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2", register=True) except Exception as e: print("EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteAdaptiveMemoryHybridOptimizer import ( - EliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EliteAdaptiveMemoryHybridOptimizer import EliteAdaptiveMemoryHybridOptimizer lama_register["EliteAdaptiveMemoryHybridOptimizer"] = EliteAdaptiveMemoryHybridOptimizer - LLAMAEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAEliteAdaptiveMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("EliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch import ( - EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch, - ) + from nevergrad.optimization.lama.EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch import EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch - lama_register["EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch"] = ( - EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch - ) - LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( - method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch" - ).set_name("LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch", register=True) + lama_register["EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch"] = EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch + res = NonObjectOptimizer(method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch").set_name("LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch", register=True) except Exception as e: print("EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteCovarianceMatrixAdaptationMemeticSearch import ( - EliteCovarianceMatrixAdaptationMemeticSearch, - ) + from nevergrad.optimization.lama.EliteCovarianceMatrixAdaptationMemeticSearch import EliteCovarianceMatrixAdaptationMemeticSearch - lama_register["EliteCovarianceMatrixAdaptationMemeticSearch"] = ( - EliteCovarianceMatrixAdaptationMemeticSearch - ) - LLAMAEliteCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( - method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch" - ).set_name("LLAMAEliteCovarianceMatrixAdaptationMemeticSearch", register=True) + lama_register["EliteCovarianceMatrixAdaptationMemeticSearch"] = EliteCovarianceMatrixAdaptationMemeticSearch + res = NonObjectOptimizer(method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer(method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch").set_name("LLAMAEliteCovarianceMatrixAdaptationMemeticSearch", register=True) except Exception as e: print("EliteCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteDynamicHybridOptimizer import EliteDynamicHybridOptimizer lama_register["EliteDynamicHybridOptimizer"] = EliteDynamicHybridOptimizer - LLAMAEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer").set_name( - "LLAMAEliteDynamicHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer").set_name("LLAMAEliteDynamicHybridOptimizer", register=True) except Exception as e: print("EliteDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteDynamicMemoryHybridOptimizer import ( - EliteDynamicMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EliteDynamicMemoryHybridOptimizer import EliteDynamicMemoryHybridOptimizer lama_register["EliteDynamicMemoryHybridOptimizer"] = EliteDynamicMemoryHybridOptimizer - LLAMAEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEliteDynamicMemoryHybridOptimizer" - ).set_name("LLAMAEliteDynamicMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicMemoryHybridOptimizer").set_name("LLAMAEliteDynamicMemoryHybridOptimizer", register=True) except Exception as e: print("EliteDynamicMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteDynamicMultiStrategyHybridDEPSO import ( - EliteDynamicMultiStrategyHybridDEPSO, - ) + from nevergrad.optimization.lama.EliteDynamicMultiStrategyHybridDEPSO import EliteDynamicMultiStrategyHybridDEPSO lama_register["EliteDynamicMultiStrategyHybridDEPSO"] = EliteDynamicMultiStrategyHybridDEPSO - LLAMAEliteDynamicMultiStrategyHybridDEPSO = NonObjectOptimizer( - method="LLAMAEliteDynamicMultiStrategyHybridDEPSO" - ).set_name("LLAMAEliteDynamicMultiStrategyHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEliteDynamicMultiStrategyHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteDynamicMultiStrategyHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteDynamicMultiStrategyHybridDEPSO").set_name("LLAMAEliteDynamicMultiStrategyHybridDEPSO", register=True) except Exception as e: print("EliteDynamicMultiStrategyHybridDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedAdaptiveRestartDE import EliteGuidedAdaptiveRestartDE lama_register["EliteGuidedAdaptiveRestartDE"] = EliteGuidedAdaptiveRestartDE - LLAMAEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( - method="LLAMAEliteGuidedAdaptiveRestartDE" - ).set_name("LLAMAEliteGuidedAdaptiveRestartDE", register=True) + res = NonObjectOptimizer(method="LLAMAEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAEliteGuidedAdaptiveRestartDE").set_name("LLAMAEliteGuidedAdaptiveRestartDE", register=True) except Exception as e: print("EliteGuidedAdaptiveRestartDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedDualStrategyDE import EliteGuidedDualStrategyDE lama_register["EliteGuidedDualStrategyDE"] = EliteGuidedDualStrategyDE - LLAMAEliteGuidedDualStrategyDE = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE").set_name( - "LLAMAEliteGuidedDualStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedDualStrategyDE = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE").set_name("LLAMAEliteGuidedDualStrategyDE", register=True) except Exception as e: print("EliteGuidedDualStrategyDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedHybridAdaptiveDE import EliteGuidedHybridAdaptiveDE lama_register["EliteGuidedHybridAdaptiveDE"] = EliteGuidedHybridAdaptiveDE - LLAMAEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE").set_name( - "LLAMAEliteGuidedHybridAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE").set_name("LLAMAEliteGuidedHybridAdaptiveDE", register=True) except Exception as e: print("EliteGuidedHybridAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedHybridDE import EliteGuidedHybridDE lama_register["EliteGuidedHybridDE"] = EliteGuidedHybridDE - LLAMAEliteGuidedHybridDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE").set_name( - "LLAMAEliteGuidedHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedHybridDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE").set_name("LLAMAEliteGuidedHybridDE", register=True) except Exception as e: print("EliteGuidedHybridDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedMutationDE import EliteGuidedMutationDE lama_register["EliteGuidedMutationDE"] = EliteGuidedMutationDE - LLAMAEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE").set_name( - "LLAMAEliteGuidedMutationDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE").set_name("LLAMAEliteGuidedMutationDE", register=True) except Exception as e: print("EliteGuidedMutationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedMutationDE_v2 import EliteGuidedMutationDE_v2 lama_register["EliteGuidedMutationDE_v2"] = EliteGuidedMutationDE_v2 - LLAMAEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2").set_name( - "LLAMAEliteGuidedMutationDE_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2").set_name("LLAMAEliteGuidedMutationDE_v2", register=True) except Exception as e: print("EliteGuidedMutationDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteGuidedQuantumAdaptiveDE import EliteGuidedQuantumAdaptiveDE lama_register["EliteGuidedQuantumAdaptiveDE"] = EliteGuidedQuantumAdaptiveDE - LLAMAEliteGuidedQuantumAdaptiveDE = NonObjectOptimizer( - method="LLAMAEliteGuidedQuantumAdaptiveDE" - ).set_name("LLAMAEliteGuidedQuantumAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEliteGuidedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteGuidedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedQuantumAdaptiveDE").set_name("LLAMAEliteGuidedQuantumAdaptiveDE", register=True) except Exception as e: print("EliteGuidedQuantumAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteHybridAdaptiveOptimizer import EliteHybridAdaptiveOptimizer lama_register["EliteHybridAdaptiveOptimizer"] = EliteHybridAdaptiveOptimizer - LLAMAEliteHybridAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEliteHybridAdaptiveOptimizer" - ).set_name("LLAMAEliteHybridAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteHybridAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteHybridAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEliteHybridAdaptiveOptimizer").set_name("LLAMAEliteHybridAdaptiveOptimizer", register=True) except Exception as e: print("EliteHybridAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteMemoryEnhancedDynamicHybridOptimizer import ( - EliteMemoryEnhancedDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.EliteMemoryEnhancedDynamicHybridOptimizer import EliteMemoryEnhancedDynamicHybridOptimizer lama_register["EliteMemoryEnhancedDynamicHybridOptimizer"] = EliteMemoryEnhancedDynamicHybridOptimizer - LLAMAEliteMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer" - ).set_name("LLAMAEliteMemoryEnhancedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMAEliteMemoryEnhancedDynamicHybridOptimizer", register=True) except Exception as e: print("EliteMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EliteMultiStrategySelfAdaptiveDE import EliteMultiStrategySelfAdaptiveDE lama_register["EliteMultiStrategySelfAdaptiveDE"] = EliteMultiStrategySelfAdaptiveDE - LLAMAEliteMultiStrategySelfAdaptiveDE = NonObjectOptimizer( - method="LLAMAEliteMultiStrategySelfAdaptiveDE" - ).set_name("LLAMAEliteMultiStrategySelfAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEliteMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteMultiStrategySelfAdaptiveDE").set_name("LLAMAEliteMultiStrategySelfAdaptiveDE", register=True) except Exception as e: print("EliteMultiStrategySelfAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.ElitePreservingDifferentialEvolution import ( - ElitePreservingDifferentialEvolution, - ) + from nevergrad.optimization.lama.ElitePreservingDifferentialEvolution import ElitePreservingDifferentialEvolution lama_register["ElitePreservingDifferentialEvolution"] = ElitePreservingDifferentialEvolution - LLAMAElitePreservingDifferentialEvolution = NonObjectOptimizer( - method="LLAMAElitePreservingDifferentialEvolution" - ).set_name("LLAMAElitePreservingDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAElitePreservingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAElitePreservingDifferentialEvolution = NonObjectOptimizer(method="LLAMAElitePreservingDifferentialEvolution").set_name("LLAMAElitePreservingDifferentialEvolution", register=True) except Exception as e: print("ElitePreservingDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteQuantumAdaptiveExplorationOptimization import ( - EliteQuantumAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.EliteQuantumAdaptiveExplorationOptimization import EliteQuantumAdaptiveExplorationOptimization lama_register["EliteQuantumAdaptiveExplorationOptimization"] = EliteQuantumAdaptiveExplorationOptimization - LLAMAEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAEliteQuantumAdaptiveExplorationOptimization" - ).set_name("LLAMAEliteQuantumAdaptiveExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEliteQuantumAdaptiveExplorationOptimization").set_name("LLAMAEliteQuantumAdaptiveExplorationOptimization", register=True) except Exception as e: print("EliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteQuantumDifferentialMemeticOptimizer import ( - EliteQuantumDifferentialMemeticOptimizer, - ) + from nevergrad.optimization.lama.EliteQuantumDifferentialMemeticOptimizer import EliteQuantumDifferentialMemeticOptimizer lama_register["EliteQuantumDifferentialMemeticOptimizer"] = EliteQuantumDifferentialMemeticOptimizer - LLAMAEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEliteQuantumDifferentialMemeticOptimizer" - ).set_name("LLAMAEliteQuantumDifferentialMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMAEliteQuantumDifferentialMemeticOptimizer").set_name("LLAMAEliteQuantumDifferentialMemeticOptimizer", register=True) except Exception as e: print("EliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteRefinedAdaptivePrecisionOptimizer import ( - EliteRefinedAdaptivePrecisionOptimizer, - ) + from nevergrad.optimization.lama.EliteRefinedAdaptivePrecisionOptimizer import EliteRefinedAdaptivePrecisionOptimizer lama_register["EliteRefinedAdaptivePrecisionOptimizer"] = EliteRefinedAdaptivePrecisionOptimizer - LLAMAEliteRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( - method="LLAMAEliteRefinedAdaptivePrecisionOptimizer" - ).set_name("LLAMAEliteRefinedAdaptivePrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAEliteRefinedAdaptivePrecisionOptimizer").set_name("LLAMAEliteRefinedAdaptivePrecisionOptimizer", register=True) except Exception as e: print("EliteRefinedAdaptivePrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EliteTranscendentalEvolutionaryOptimizer import ( - EliteTranscendentalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.EliteTranscendentalEvolutionaryOptimizer import EliteTranscendentalEvolutionaryOptimizer lama_register["EliteTranscendentalEvolutionaryOptimizer"] = EliteTranscendentalEvolutionaryOptimizer - LLAMAEliteTranscendentalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAEliteTranscendentalEvolutionaryOptimizer" - ).set_name("LLAMAEliteTranscendentalEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEliteTranscendentalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEliteTranscendentalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEliteTranscendentalEvolutionaryOptimizer").set_name("LLAMAEliteTranscendentalEvolutionaryOptimizer", register=True) except Exception as e: print("EliteTranscendentalEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ElitistAdaptiveDE import ElitistAdaptiveDE lama_register["ElitistAdaptiveDE"] = ElitistAdaptiveDE - LLAMAElitistAdaptiveDE = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE").set_name( - "LLAMAElitistAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAElitistAdaptiveDE = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE").set_name("LLAMAElitistAdaptiveDE", register=True) except Exception as e: print("ElitistAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW import EnhancedAQAPSOHR_LSDIW lama_register["EnhancedAQAPSOHR_LSDIW"] = EnhancedAQAPSOHR_LSDIW - LLAMAEnhancedAQAPSOHR_LSDIW = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW").set_name( - "LLAMAEnhancedAQAPSOHR_LSDIW", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSOHR_LSDIW = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW").set_name("LLAMAEnhancedAQAPSOHR_LSDIW", register=True) except Exception as e: print("EnhancedAQAPSOHR_LSDIW can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW_AP import EnhancedAQAPSOHR_LSDIW_AP lama_register["EnhancedAQAPSOHR_LSDIW_AP"] = EnhancedAQAPSOHR_LSDIW_AP - LLAMAEnhancedAQAPSOHR_LSDIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP").set_name( - "LLAMAEnhancedAQAPSOHR_LSDIW_AP", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSOHR_LSDIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP").set_name("LLAMAEnhancedAQAPSOHR_LSDIW_AP", register=True) except Exception as e: print("EnhancedAQAPSOHR_LSDIW_AP can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP import EnhancedAQAPSO_LS_DIW_AP lama_register["EnhancedAQAPSO_LS_DIW_AP"] = EnhancedAQAPSO_LS_DIW_AP - LLAMAEnhancedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP").set_name( - "LLAMAEnhancedAQAPSO_LS_DIW_AP", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Final import EnhancedAQAPSO_LS_DIW_AP_Final lama_register["EnhancedAQAPSO_LS_DIW_AP_Final"] = EnhancedAQAPSO_LS_DIW_AP_Final - LLAMAEnhancedAQAPSO_LS_DIW_AP_Final = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Final", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Final = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Final", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Final can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined import EnhancedAQAPSO_LS_DIW_AP_Refined lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Refined - LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined_Final import ( - EnhancedAQAPSO_LS_DIW_AP_Refined_Final, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined_Final import EnhancedAQAPSO_LS_DIW_AP_Refined_Final lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined_Final"] = EnhancedAQAPSO_LS_DIW_AP_Refined_Final - LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Refined_Final can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate import ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate import EnhancedAQAPSO_LS_DIW_AP_Ultimate lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Ultimate can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined import ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined - lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined"] = ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined - ) - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined", register=True) + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined import ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined import ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined, - ) + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined - lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined"] = ( - EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined - ) - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined = NonObjectOptimizer( - method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined" - ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined", register=True) + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined + res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined", register=True) except Exception as e: print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v2 import ( - EnhancedAdaptiveChaoticFireworksOptimization_v2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v2 import EnhancedAdaptiveChaoticFireworksOptimization_v2 - lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v2"] = ( - EnhancedAdaptiveChaoticFireworksOptimization_v2 - ) - LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2" - ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2", register=True) + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v2"] = EnhancedAdaptiveChaoticFireworksOptimization_v2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2").set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2", register=True) except Exception as e: print("EnhancedAdaptiveChaoticFireworksOptimization_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v3 import ( - EnhancedAdaptiveChaoticFireworksOptimization_v3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v3 import EnhancedAdaptiveChaoticFireworksOptimization_v3 - lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v3"] = ( - EnhancedAdaptiveChaoticFireworksOptimization_v3 - ) - LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3" - ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3", register=True) + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v3"] = EnhancedAdaptiveChaoticFireworksOptimization_v3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3").set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3", register=True) except Exception as e: print("EnhancedAdaptiveChaoticFireworksOptimization_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveCohortMemeticAlgorithm import ( - EnhancedAdaptiveCohortMemeticAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveCohortMemeticAlgorithm import EnhancedAdaptiveCohortMemeticAlgorithm lama_register["EnhancedAdaptiveCohortMemeticAlgorithm"] = EnhancedAdaptiveCohortMemeticAlgorithm - LLAMAEnhancedAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveCohortMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveCohortMemeticAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveCohortMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveControlledMemoryAnnealing import ( - EnhancedAdaptiveControlledMemoryAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveControlledMemoryAnnealing import EnhancedAdaptiveControlledMemoryAnnealing lama_register["EnhancedAdaptiveControlledMemoryAnnealing"] = EnhancedAdaptiveControlledMemoryAnnealing - LLAMAEnhancedAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing" - ).set_name("LLAMAEnhancedAdaptiveControlledMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveControlledMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing").set_name("LLAMAEnhancedAdaptiveControlledMemoryAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveControlledMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 import ( - EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 import EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 - lama_register["EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4"] = ( - EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 - ) - LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4" - ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4", register=True) + lama_register["EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4"] = EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4", register=True) except Exception as e: print("EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixEvolution import ( - EnhancedAdaptiveCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixEvolution import EnhancedAdaptiveCovarianceMatrixEvolution lama_register["EnhancedAdaptiveCovarianceMatrixEvolution"] = EnhancedAdaptiveCovarianceMatrixEvolution - LLAMAEnhancedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution" - ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution").set_name("LLAMAEnhancedAdaptiveCovarianceMatrixEvolution", register=True) except Exception as e: print("EnhancedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveDEPSOOptimizer import EnhancedAdaptiveDEPSOOptimizer lama_register["EnhancedAdaptiveDEPSOOptimizer"] = EnhancedAdaptiveDEPSOOptimizer - LLAMAEnhancedAdaptiveDEPSOOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDEPSOOptimizer" - ).set_name("LLAMAEnhancedAdaptiveDEPSOOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDEPSOOptimizer").set_name("LLAMAEnhancedAdaptiveDEPSOOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveDEPSOOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiffEvolutionGradientDescent import ( - EnhancedAdaptiveDiffEvolutionGradientDescent, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiffEvolutionGradientDescent import EnhancedAdaptiveDiffEvolutionGradientDescent - lama_register["EnhancedAdaptiveDiffEvolutionGradientDescent"] = ( - EnhancedAdaptiveDiffEvolutionGradientDescent - ) - LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent" - ).set_name("LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent", register=True) + lama_register["EnhancedAdaptiveDiffEvolutionGradientDescent"] = EnhancedAdaptiveDiffEvolutionGradientDescent + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent").set_name("LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent", register=True) except Exception as e: print("EnhancedAdaptiveDiffEvolutionGradientDescent can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolution import ( - EnhancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolution import EnhancedAdaptiveDifferentialEvolution lama_register["EnhancedAdaptiveDifferentialEvolution"] = EnhancedAdaptiveDifferentialEvolution - LLAMAEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamic import ( - EnhancedAdaptiveDifferentialEvolutionDynamic, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamic import EnhancedAdaptiveDifferentialEvolutionDynamic - lama_register["EnhancedAdaptiveDifferentialEvolutionDynamic"] = ( - EnhancedAdaptiveDifferentialEvolutionDynamic - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamic"] = EnhancedAdaptiveDifferentialEvolutionDynamic + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionDynamic can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamicImproved import ( - EnhancedAdaptiveDifferentialEvolutionDynamicImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamicImproved import EnhancedAdaptiveDifferentialEvolutionDynamicImproved - lama_register["EnhancedAdaptiveDifferentialEvolutionDynamicImproved"] = ( - EnhancedAdaptiveDifferentialEvolutionDynamicImproved - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamicImproved"] = EnhancedAdaptiveDifferentialEvolutionDynamicImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionDynamicImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionEnhanced import ( - EnhancedAdaptiveDifferentialEvolutionEnhanced, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionEnhanced import EnhancedAdaptiveDifferentialEvolutionEnhanced - lama_register["EnhancedAdaptiveDifferentialEvolutionEnhanced"] = ( - EnhancedAdaptiveDifferentialEvolutionEnhanced - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionEnhanced"] = EnhancedAdaptiveDifferentialEvolutionEnhanced + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefined import ( - EnhancedAdaptiveDifferentialEvolutionRefined, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefined import EnhancedAdaptiveDifferentialEvolutionRefined - lama_register["EnhancedAdaptiveDifferentialEvolutionRefined"] = ( - EnhancedAdaptiveDifferentialEvolutionRefined - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionRefined"] = EnhancedAdaptiveDifferentialEvolutionRefined + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedImproved import ( - EnhancedAdaptiveDifferentialEvolutionRefinedImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedImproved import EnhancedAdaptiveDifferentialEvolutionRefinedImproved - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedImproved"] = ( - EnhancedAdaptiveDifferentialEvolutionRefinedImproved - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedImproved"] = EnhancedAdaptiveDifferentialEvolutionRefinedImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionRefinedImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV2 import ( - EnhancedAdaptiveDifferentialEvolutionRefinedV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV2 import EnhancedAdaptiveDifferentialEvolutionRefinedV2 - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV2"] = ( - EnhancedAdaptiveDifferentialEvolutionRefinedV2 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV2"] = EnhancedAdaptiveDifferentialEvolutionRefinedV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionRefinedV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV3 import ( - EnhancedAdaptiveDifferentialEvolutionRefinedV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV3 import EnhancedAdaptiveDifferentialEvolutionRefinedV3 - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV3"] = ( - EnhancedAdaptiveDifferentialEvolutionRefinedV3 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV3"] = EnhancedAdaptiveDifferentialEvolutionRefinedV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionRefinedV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV4 import ( - EnhancedAdaptiveDifferentialEvolutionRefinedV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV4 import EnhancedAdaptiveDifferentialEvolutionRefinedV4 - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV4"] = ( - EnhancedAdaptiveDifferentialEvolutionRefinedV4 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV4"] = EnhancedAdaptiveDifferentialEvolutionRefinedV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionRefinedV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV10 import ( - EnhancedAdaptiveDifferentialEvolutionV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV10 import EnhancedAdaptiveDifferentialEvolutionV10 lama_register["EnhancedAdaptiveDifferentialEvolutionV10"] = EnhancedAdaptiveDifferentialEvolutionV10 - LLAMAEnhancedAdaptiveDifferentialEvolutionV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV10", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV11 import ( - EnhancedAdaptiveDifferentialEvolutionV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV11 import EnhancedAdaptiveDifferentialEvolutionV11 lama_register["EnhancedAdaptiveDifferentialEvolutionV11"] = EnhancedAdaptiveDifferentialEvolutionV11 - LLAMAEnhancedAdaptiveDifferentialEvolutionV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV11", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV12 import ( - EnhancedAdaptiveDifferentialEvolutionV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV12 import EnhancedAdaptiveDifferentialEvolutionV12 lama_register["EnhancedAdaptiveDifferentialEvolutionV12"] = EnhancedAdaptiveDifferentialEvolutionV12 - LLAMAEnhancedAdaptiveDifferentialEvolutionV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV12", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV13 import ( - EnhancedAdaptiveDifferentialEvolutionV13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV13 import EnhancedAdaptiveDifferentialEvolutionV13 lama_register["EnhancedAdaptiveDifferentialEvolutionV13"] = EnhancedAdaptiveDifferentialEvolutionV13 - LLAMAEnhancedAdaptiveDifferentialEvolutionV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV13", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV14 import ( - EnhancedAdaptiveDifferentialEvolutionV14, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV14 import EnhancedAdaptiveDifferentialEvolutionV14 lama_register["EnhancedAdaptiveDifferentialEvolutionV14"] = EnhancedAdaptiveDifferentialEvolutionV14 - LLAMAEnhancedAdaptiveDifferentialEvolutionV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV14", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV15 import ( - EnhancedAdaptiveDifferentialEvolutionV15, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV15 import EnhancedAdaptiveDifferentialEvolutionV15 lama_register["EnhancedAdaptiveDifferentialEvolutionV15"] = EnhancedAdaptiveDifferentialEvolutionV15 - LLAMAEnhancedAdaptiveDifferentialEvolutionV15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV15", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV16 import ( - EnhancedAdaptiveDifferentialEvolutionV16, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV16 import EnhancedAdaptiveDifferentialEvolutionV16 lama_register["EnhancedAdaptiveDifferentialEvolutionV16"] = EnhancedAdaptiveDifferentialEvolutionV16 - LLAMAEnhancedAdaptiveDifferentialEvolutionV16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV16", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV17 import ( - EnhancedAdaptiveDifferentialEvolutionV17, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV17 import EnhancedAdaptiveDifferentialEvolutionV17 lama_register["EnhancedAdaptiveDifferentialEvolutionV17"] = EnhancedAdaptiveDifferentialEvolutionV17 - LLAMAEnhancedAdaptiveDifferentialEvolutionV17 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV17", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV18 import ( - EnhancedAdaptiveDifferentialEvolutionV18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV18 import EnhancedAdaptiveDifferentialEvolutionV18 lama_register["EnhancedAdaptiveDifferentialEvolutionV18"] = EnhancedAdaptiveDifferentialEvolutionV18 - LLAMAEnhancedAdaptiveDifferentialEvolutionV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV18", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV19 import ( - EnhancedAdaptiveDifferentialEvolutionV19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV19 import EnhancedAdaptiveDifferentialEvolutionV19 lama_register["EnhancedAdaptiveDifferentialEvolutionV19"] = EnhancedAdaptiveDifferentialEvolutionV19 - LLAMAEnhancedAdaptiveDifferentialEvolutionV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV19", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV20 import ( - EnhancedAdaptiveDifferentialEvolutionV20, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV20 import EnhancedAdaptiveDifferentialEvolutionV20 lama_register["EnhancedAdaptiveDifferentialEvolutionV20"] = EnhancedAdaptiveDifferentialEvolutionV20 - LLAMAEnhancedAdaptiveDifferentialEvolutionV20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV20", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV21 import ( - EnhancedAdaptiveDifferentialEvolutionV21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV21 import EnhancedAdaptiveDifferentialEvolutionV21 lama_register["EnhancedAdaptiveDifferentialEvolutionV21"] = EnhancedAdaptiveDifferentialEvolutionV21 - LLAMAEnhancedAdaptiveDifferentialEvolutionV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV21", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV22 import ( - EnhancedAdaptiveDifferentialEvolutionV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV22 import EnhancedAdaptiveDifferentialEvolutionV22 lama_register["EnhancedAdaptiveDifferentialEvolutionV22"] = EnhancedAdaptiveDifferentialEvolutionV22 - LLAMAEnhancedAdaptiveDifferentialEvolutionV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV22", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV23 import ( - EnhancedAdaptiveDifferentialEvolutionV23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV23 import EnhancedAdaptiveDifferentialEvolutionV23 lama_register["EnhancedAdaptiveDifferentialEvolutionV23"] = EnhancedAdaptiveDifferentialEvolutionV23 - LLAMAEnhancedAdaptiveDifferentialEvolutionV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV23", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV24 import ( - EnhancedAdaptiveDifferentialEvolutionV24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV24 import EnhancedAdaptiveDifferentialEvolutionV24 lama_register["EnhancedAdaptiveDifferentialEvolutionV24"] = EnhancedAdaptiveDifferentialEvolutionV24 - LLAMAEnhancedAdaptiveDifferentialEvolutionV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV24", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV25 import ( - EnhancedAdaptiveDifferentialEvolutionV25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV25 import EnhancedAdaptiveDifferentialEvolutionV25 lama_register["EnhancedAdaptiveDifferentialEvolutionV25"] = EnhancedAdaptiveDifferentialEvolutionV25 - LLAMAEnhancedAdaptiveDifferentialEvolutionV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV25", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV26 import ( - EnhancedAdaptiveDifferentialEvolutionV26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV26 import EnhancedAdaptiveDifferentialEvolutionV26 lama_register["EnhancedAdaptiveDifferentialEvolutionV26"] = EnhancedAdaptiveDifferentialEvolutionV26 - LLAMAEnhancedAdaptiveDifferentialEvolutionV26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV26", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV27 import ( - EnhancedAdaptiveDifferentialEvolutionV27, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV27 import EnhancedAdaptiveDifferentialEvolutionV27 lama_register["EnhancedAdaptiveDifferentialEvolutionV27"] = EnhancedAdaptiveDifferentialEvolutionV27 - LLAMAEnhancedAdaptiveDifferentialEvolutionV27 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV27", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV27", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV28 import ( - EnhancedAdaptiveDifferentialEvolutionV28, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV28 import EnhancedAdaptiveDifferentialEvolutionV28 lama_register["EnhancedAdaptiveDifferentialEvolutionV28"] = EnhancedAdaptiveDifferentialEvolutionV28 - LLAMAEnhancedAdaptiveDifferentialEvolutionV28 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV28", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV28", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV4 import ( - EnhancedAdaptiveDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV4 import EnhancedAdaptiveDifferentialEvolutionV4 lama_register["EnhancedAdaptiveDifferentialEvolutionV4"] = EnhancedAdaptiveDifferentialEvolutionV4 - LLAMAEnhancedAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV4", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV5 import ( - EnhancedAdaptiveDifferentialEvolutionV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV5 import EnhancedAdaptiveDifferentialEvolutionV5 lama_register["EnhancedAdaptiveDifferentialEvolutionV5"] = EnhancedAdaptiveDifferentialEvolutionV5 - LLAMAEnhancedAdaptiveDifferentialEvolutionV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV5", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV6 import ( - EnhancedAdaptiveDifferentialEvolutionV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV6 import EnhancedAdaptiveDifferentialEvolutionV6 lama_register["EnhancedAdaptiveDifferentialEvolutionV6"] = EnhancedAdaptiveDifferentialEvolutionV6 - LLAMAEnhancedAdaptiveDifferentialEvolutionV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV6", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV7 import ( - EnhancedAdaptiveDifferentialEvolutionV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV7 import EnhancedAdaptiveDifferentialEvolutionV7 lama_register["EnhancedAdaptiveDifferentialEvolutionV7"] = EnhancedAdaptiveDifferentialEvolutionV7 - LLAMAEnhancedAdaptiveDifferentialEvolutionV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV7", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV8 import ( - EnhancedAdaptiveDifferentialEvolutionV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV8 import EnhancedAdaptiveDifferentialEvolutionV8 lama_register["EnhancedAdaptiveDifferentialEvolutionV8"] = EnhancedAdaptiveDifferentialEvolutionV8 - LLAMAEnhancedAdaptiveDifferentialEvolutionV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV8", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV9 import ( - EnhancedAdaptiveDifferentialEvolutionV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV9 import EnhancedAdaptiveDifferentialEvolutionV9 lama_register["EnhancedAdaptiveDifferentialEvolutionV9"] = EnhancedAdaptiveDifferentialEvolutionV9 - LLAMAEnhancedAdaptiveDifferentialEvolutionV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV9", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( - EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch import EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch - lama_register["EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( - EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation import EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved import EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved" - ).set_name( - "LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved", register=True - ) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved", register=True) except Exception as e: - print( - "EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved can not be imported: ", - e, - ) - + print("EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters import EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize import EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined import ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined import EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined"] = ( - EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters import ( - EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters import EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters - lama_register["EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters"] = ( - EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters - ) - LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters" - ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters", register=True) + lama_register["EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters"] = EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialMemeticAlgorithm import ( - EnhancedAdaptiveDifferentialMemeticAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialMemeticAlgorithm import EnhancedAdaptiveDifferentialMemeticAlgorithm - lama_register["EnhancedAdaptiveDifferentialMemeticAlgorithm"] = ( - EnhancedAdaptiveDifferentialMemeticAlgorithm - ) - LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm", register=True) + lama_register["EnhancedAdaptiveDifferentialMemeticAlgorithm"] = EnhancedAdaptiveDifferentialMemeticAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDirectionalBiasQuorumOptimization import ( - EnhancedAdaptiveDirectionalBiasQuorumOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDirectionalBiasQuorumOptimization import EnhancedAdaptiveDirectionalBiasQuorumOptimization - lama_register["EnhancedAdaptiveDirectionalBiasQuorumOptimization"] = ( - EnhancedAdaptiveDirectionalBiasQuorumOptimization - ) - LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization" - ).set_name("LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization", register=True) + lama_register["EnhancedAdaptiveDirectionalBiasQuorumOptimization"] = EnhancedAdaptiveDirectionalBiasQuorumOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization", register=True) except Exception as e: print("EnhancedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedEvolutionStrategy import ( - EnhancedAdaptiveDiversifiedEvolutionStrategy, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedEvolutionStrategy import EnhancedAdaptiveDiversifiedEvolutionStrategy - lama_register["EnhancedAdaptiveDiversifiedEvolutionStrategy"] = ( - EnhancedAdaptiveDiversifiedEvolutionStrategy - ) - LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy", register=True) + lama_register["EnhancedAdaptiveDiversifiedEvolutionStrategy"] = EnhancedAdaptiveDiversifiedEvolutionStrategy + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy").set_name("LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization import ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization"] = ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization - ) - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization", register=True) + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 import ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2"] = ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 - ) - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2", register=True) + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 import ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3"] = ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 - ) - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3", register=True) + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 import ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4"] = ( - EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 - ) - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4", register=True) + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearch import ( - EnhancedAdaptiveDiversifiedHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearch import EnhancedAdaptiveDiversifiedHarmonySearch lama_register["EnhancedAdaptiveDiversifiedHarmonySearch"] = EnhancedAdaptiveDiversifiedHarmonySearch - LLAMAEnhancedAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizer import ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizer import EnhancedAdaptiveDiversifiedHarmonySearchOptimizer - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizer"] = ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizer - ) - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer", register=True) + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizer"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 import ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2"] = ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 - ) - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2", register=True) + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 import ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3"] = ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 - ) - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3", register=True) + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 import ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4"] = ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 - ) - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4", register=True) + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 import ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5"] = ( - EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 - ) - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5", register=True) + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV2 import ( - EnhancedAdaptiveDiversifiedHarmonySearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV2 import EnhancedAdaptiveDiversifiedHarmonySearchV2 lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV2"] = EnhancedAdaptiveDiversifiedHarmonySearchV2 - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV3 import ( - EnhancedAdaptiveDiversifiedHarmonySearchV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV3 import EnhancedAdaptiveDiversifiedHarmonySearchV3 lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV3"] = EnhancedAdaptiveDiversifiedHarmonySearchV3 - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV4 import ( - EnhancedAdaptiveDiversifiedHarmonySearchV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV4 import EnhancedAdaptiveDiversifiedHarmonySearchV4 lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV4"] = EnhancedAdaptiveDiversifiedHarmonySearchV4 - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedHarmonySearchV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm import ( - EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm import EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm - lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm"] = ( - EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm - ) - LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm", register=True) + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm"] = EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 import ( - EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 import EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 - lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2"] = ( - EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 - ) - LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2", register=True) + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2"] = EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedSearch import ( - EnhancedAdaptiveDiversifiedSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedSearch import EnhancedAdaptiveDiversifiedSearch lama_register["EnhancedAdaptiveDiversifiedSearch"] = EnhancedAdaptiveDiversifiedSearch - LLAMAEnhancedAdaptiveDiversifiedSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDiversifiedSearch" - ).set_name("LLAMAEnhancedAdaptiveDiversifiedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedSearch").set_name("LLAMAEnhancedAdaptiveDiversifiedSearch", register=True) except Exception as e: print("EnhancedAdaptiveDiversifiedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDolphinPodOptimization import ( - EnhancedAdaptiveDolphinPodOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDolphinPodOptimization import EnhancedAdaptiveDolphinPodOptimization lama_register["EnhancedAdaptiveDolphinPodOptimization"] = EnhancedAdaptiveDolphinPodOptimization - LLAMAEnhancedAdaptiveDolphinPodOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDolphinPodOptimization" - ).set_name("LLAMAEnhancedAdaptiveDolphinPodOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDolphinPodOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDolphinPodOptimization").set_name("LLAMAEnhancedAdaptiveDolphinPodOptimization", register=True) except Exception as e: print("EnhancedAdaptiveDolphinPodOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization import ( - EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization import EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization - lama_register["EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( - EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization - ) - LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) + lama_register["EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization"] = EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization").set_name("LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( - EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl import EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl - lama_register["EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( - EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl - ) - LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl" - ).set_name("LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) + lama_register["EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl").set_name("LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) except Exception as e: print("EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV2 import ( - EnhancedAdaptiveDualPhaseStrategyV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV2 import EnhancedAdaptiveDualPhaseStrategyV2 lama_register["EnhancedAdaptiveDualPhaseStrategyV2"] = EnhancedAdaptiveDualPhaseStrategyV2 - LLAMAEnhancedAdaptiveDualPhaseStrategyV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2" - ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDualPhaseStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2").set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV2", register=True) except Exception as e: print("EnhancedAdaptiveDualPhaseStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV5 import ( - EnhancedAdaptiveDualPhaseStrategyV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV5 import EnhancedAdaptiveDualPhaseStrategyV5 lama_register["EnhancedAdaptiveDualPhaseStrategyV5"] = EnhancedAdaptiveDualPhaseStrategyV5 - LLAMAEnhancedAdaptiveDualPhaseStrategyV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5" - ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDualPhaseStrategyV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5").set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV5", register=True) except Exception as e: print("EnhancedAdaptiveDualPhaseStrategyV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualStrategyOptimizer import ( - EnhancedAdaptiveDualStrategyOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDualStrategyOptimizer import EnhancedAdaptiveDualStrategyOptimizer lama_register["EnhancedAdaptiveDualStrategyOptimizer"] = EnhancedAdaptiveDualStrategyOptimizer - LLAMAEnhancedAdaptiveDualStrategyOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDualStrategyOptimizer" - ).set_name("LLAMAEnhancedAdaptiveDualStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualStrategyOptimizer").set_name("LLAMAEnhancedAdaptiveDualStrategyOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveDualStrategyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDE import EnhancedAdaptiveDynamicDE lama_register["EnhancedAdaptiveDynamicDE"] = EnhancedAdaptiveDynamicDE - LLAMAEnhancedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE").set_name( - "LLAMAEnhancedAdaptiveDynamicDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE").set_name("LLAMAEnhancedAdaptiveDynamicDE", register=True) except Exception as e: print("EnhancedAdaptiveDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDifferentialEvolution import ( - EnhancedAdaptiveDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDifferentialEvolution import EnhancedAdaptiveDynamicDifferentialEvolution - lama_register["EnhancedAdaptiveDynamicDifferentialEvolution"] = ( - EnhancedAdaptiveDynamicDifferentialEvolution - ) - LLAMAEnhancedAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveDynamicDifferentialEvolution"] = EnhancedAdaptiveDynamicDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV19 import ( - EnhancedAdaptiveDynamicDualPhaseStrategyV19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV19 import EnhancedAdaptiveDynamicDualPhaseStrategyV19 lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV19"] = EnhancedAdaptiveDynamicDualPhaseStrategyV19 - LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19" - ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19").set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19", register=True) except Exception as e: print("EnhancedAdaptiveDynamicDualPhaseStrategyV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV22 import ( - EnhancedAdaptiveDynamicDualPhaseStrategyV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV22 import EnhancedAdaptiveDynamicDualPhaseStrategyV22 lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV22"] = EnhancedAdaptiveDynamicDualPhaseStrategyV22 - LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22" - ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22").set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22", register=True) except Exception as e: print("EnhancedAdaptiveDynamicDualPhaseStrategyV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithm import ( - EnhancedAdaptiveDynamicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithm import EnhancedAdaptiveDynamicFireworkAlgorithm lama_register["EnhancedAdaptiveDynamicFireworkAlgorithm"] = EnhancedAdaptiveDynamicFireworkAlgorithm - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced import ( - EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced import EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced"] = ( - EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced - ) - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced"] = EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmImproved import ( - EnhancedAdaptiveDynamicFireworkAlgorithmImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmImproved import EnhancedAdaptiveDynamicFireworkAlgorithmImproved - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmImproved"] = ( - EnhancedAdaptiveDynamicFireworkAlgorithmImproved - ) - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmImproved"] = EnhancedAdaptiveDynamicFireworkAlgorithmImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkAlgorithmImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmRefined import ( - EnhancedAdaptiveDynamicFireworkAlgorithmRefined, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmRefined import EnhancedAdaptiveDynamicFireworkAlgorithmRefined - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmRefined"] = ( - EnhancedAdaptiveDynamicFireworkAlgorithmRefined - ) - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmRefined"] = EnhancedAdaptiveDynamicFireworkAlgorithmRefined + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkAlgorithmRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 import ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5"] = ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 - ) - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 import ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6"] = ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 - ) - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 import ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7"] = ( - EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 - ) - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7" - ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7", register=True) + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7", register=True) except Exception as e: print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearch import ( - EnhancedAdaptiveDynamicHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearch import EnhancedAdaptiveDynamicHarmonySearch lama_register["EnhancedAdaptiveDynamicHarmonySearch"] = EnhancedAdaptiveDynamicHarmonySearch - LLAMAEnhancedAdaptiveDynamicHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicHarmonySearch" - ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearch").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearch", register=True) except Exception as e: print("EnhancedAdaptiveDynamicHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV2 import ( - EnhancedAdaptiveDynamicHarmonySearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV2 import EnhancedAdaptiveDynamicHarmonySearchV2 lama_register["EnhancedAdaptiveDynamicHarmonySearchV2"] = EnhancedAdaptiveDynamicHarmonySearchV2 - LLAMAEnhancedAdaptiveDynamicHarmonySearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2" - ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2", register=True) except Exception as e: print("EnhancedAdaptiveDynamicHarmonySearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV3 import ( - EnhancedAdaptiveDynamicHarmonySearchV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV3 import EnhancedAdaptiveDynamicHarmonySearchV3 lama_register["EnhancedAdaptiveDynamicHarmonySearchV3"] = EnhancedAdaptiveDynamicHarmonySearchV3 - LLAMAEnhancedAdaptiveDynamicHarmonySearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3" - ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3", register=True) except Exception as e: print("EnhancedAdaptiveDynamicHarmonySearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( - EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm import EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm - lama_register["EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( - EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm - ) - LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) + lama_register["EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( - EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution import EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicQuantumSwarmOptimization import ( - EnhancedAdaptiveDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicQuantumSwarmOptimization import EnhancedAdaptiveDynamicQuantumSwarmOptimization - lama_register["EnhancedAdaptiveDynamicQuantumSwarmOptimization"] = ( - EnhancedAdaptiveDynamicQuantumSwarmOptimization - ) - LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization", register=True) + lama_register["EnhancedAdaptiveDynamicQuantumSwarmOptimization"] = EnhancedAdaptiveDynamicQuantumSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteDifferentialEvolution import ( - EnhancedAdaptiveEliteDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveEliteDifferentialEvolution import EnhancedAdaptiveEliteDifferentialEvolution lama_register["EnhancedAdaptiveEliteDifferentialEvolution"] = EnhancedAdaptiveEliteDifferentialEvolution - LLAMAEnhancedAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveEliteDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveEliteDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveEliteDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteGuidedMutationDE_v2 import ( - EnhancedAdaptiveEliteGuidedMutationDE_v2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveEliteGuidedMutationDE_v2 import EnhancedAdaptiveEliteGuidedMutationDE_v2 lama_register["EnhancedAdaptiveEliteGuidedMutationDE_v2"] = EnhancedAdaptiveEliteGuidedMutationDE_v2 - LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2" - ).set_name("LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2").set_name("LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2", register=True) except Exception as e: print("EnhancedAdaptiveEliteGuidedMutationDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution import ( - EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution import EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution - lama_register["EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution"] = ( - EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution - ) - LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveEnvironmentalStrategyV24 import ( - EnhancedAdaptiveEnvironmentalStrategyV24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveEnvironmentalStrategyV24 import EnhancedAdaptiveEnvironmentalStrategyV24 lama_register["EnhancedAdaptiveEnvironmentalStrategyV24"] = EnhancedAdaptiveEnvironmentalStrategyV24 - LLAMAEnhancedAdaptiveEnvironmentalStrategyV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24" - ).set_name("LLAMAEnhancedAdaptiveEnvironmentalStrategyV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveEnvironmentalStrategyV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24").set_name("LLAMAEnhancedAdaptiveEnvironmentalStrategyV24", register=True) except Exception as e: print("EnhancedAdaptiveEnvironmentalStrategyV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy import ( - EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy import EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy - lama_register["EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( - EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy - ) - LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy" - ).set_name("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) + lama_register["EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy"] = EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy").set_name("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) except Exception as e: print("EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveExplorationExploitationAlgorithm import ( - EnhancedAdaptiveExplorationExploitationAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationExploitationAlgorithm import EnhancedAdaptiveExplorationExploitationAlgorithm - lama_register["EnhancedAdaptiveExplorationExploitationAlgorithm"] = ( - EnhancedAdaptiveExplorationExploitationAlgorithm - ) - LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm", register=True) + lama_register["EnhancedAdaptiveExplorationExploitationAlgorithm"] = EnhancedAdaptiveExplorationExploitationAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveExplorationOptimizer import ( - EnhancedAdaptiveExplorationOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationOptimizer import EnhancedAdaptiveExplorationOptimizer lama_register["EnhancedAdaptiveExplorationOptimizer"] = EnhancedAdaptiveExplorationOptimizer - LLAMAEnhancedAdaptiveExplorationOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveExplorationOptimizer" - ).set_name("LLAMAEnhancedAdaptiveExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationOptimizer").set_name("LLAMAEnhancedAdaptiveExplorationOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveFireworkAlgorithm import ( - EnhancedAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveFireworkAlgorithm import EnhancedAdaptiveFireworkAlgorithm lama_register["EnhancedAdaptiveFireworkAlgorithm"] = EnhancedAdaptiveFireworkAlgorithm - LLAMAEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveFireworkAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveFireworksAlgorithm import ( - EnhancedAdaptiveFireworksAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveFireworksAlgorithm import EnhancedAdaptiveFireworksAlgorithm lama_register["EnhancedAdaptiveFireworksAlgorithm"] = EnhancedAdaptiveFireworksAlgorithm - LLAMAEnhancedAdaptiveFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveFireworksAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworksAlgorithm").set_name("LLAMAEnhancedAdaptiveFireworksAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveFireworksAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveGaussianSearch import EnhancedAdaptiveGaussianSearch lama_register["EnhancedAdaptiveGaussianSearch"] = EnhancedAdaptiveGaussianSearch - LLAMAEnhancedAdaptiveGaussianSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGaussianSearch" - ).set_name("LLAMAEnhancedAdaptiveGaussianSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGaussianSearch").set_name("LLAMAEnhancedAdaptiveGaussianSearch", register=True) except Exception as e: print("EnhancedAdaptiveGaussianSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGradientBalancedCrossoverPSO import ( - EnhancedAdaptiveGradientBalancedCrossoverPSO, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBalancedCrossoverPSO import EnhancedAdaptiveGradientBalancedCrossoverPSO - lama_register["EnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( - EnhancedAdaptiveGradientBalancedCrossoverPSO - ) - LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO" - ).set_name("LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) + lama_register["EnhancedAdaptiveGradientBalancedCrossoverPSO"] = EnhancedAdaptiveGradientBalancedCrossoverPSO + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) except Exception as e: print("EnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing import ( - EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing import EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing - lama_register["EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( - EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing - ) - LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing"] = EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGranularStrategyV26 import ( - EnhancedAdaptiveGranularStrategyV26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGranularStrategyV26 import EnhancedAdaptiveGranularStrategyV26 lama_register["EnhancedAdaptiveGranularStrategyV26"] = EnhancedAdaptiveGranularStrategyV26 - LLAMAEnhancedAdaptiveGranularStrategyV26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGranularStrategyV26" - ).set_name("LLAMAEnhancedAdaptiveGranularStrategyV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGranularStrategyV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGranularStrategyV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGranularStrategyV26").set_name("LLAMAEnhancedAdaptiveGranularStrategyV26", register=True) except Exception as e: print("EnhancedAdaptiveGranularStrategyV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV10 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV10 import EnhancedAdaptiveGravitationalSwarmIntelligenceV10 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV10"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV10 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV10"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV10 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV11 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV11 import EnhancedAdaptiveGravitationalSwarmIntelligenceV11 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV11"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV11 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV11"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV11 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV12 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV12 import EnhancedAdaptiveGravitationalSwarmIntelligenceV12 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV12"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV12 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV12"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV12 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV19 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV19 import EnhancedAdaptiveGravitationalSwarmIntelligenceV19 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV19"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV19 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV19"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV19 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV20 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV20, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV20 import EnhancedAdaptiveGravitationalSwarmIntelligenceV20 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV20"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV20 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV20"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV20 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV21 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV21 import EnhancedAdaptiveGravitationalSwarmIntelligenceV21 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV21"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV21 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV21"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV21 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV27 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV27, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV27 import EnhancedAdaptiveGravitationalSwarmIntelligenceV27 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV27"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV27 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV27"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV27 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV28 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV28, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV28 import EnhancedAdaptiveGravitationalSwarmIntelligenceV28 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV28"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV28 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV28"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV28 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV3 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV3 import EnhancedAdaptiveGravitationalSwarmIntelligenceV3 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV3"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV3 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV3"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV4 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV4 import EnhancedAdaptiveGravitationalSwarmIntelligenceV4 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV4"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV4 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV4"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV5 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV5 import EnhancedAdaptiveGravitationalSwarmIntelligenceV5 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV5"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV5 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV5"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV6 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV6 import EnhancedAdaptiveGravitationalSwarmIntelligenceV6 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV6"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV6 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV6"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV7 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV7 import EnhancedAdaptiveGravitationalSwarmIntelligenceV7 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV7"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV7 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV7"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV8 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV8 import EnhancedAdaptiveGravitationalSwarmIntelligenceV8 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV8"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV8 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV8"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV9 import ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV9 import EnhancedAdaptiveGravitationalSwarmIntelligenceV9 - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV9"] = ( - EnhancedAdaptiveGravitationalSwarmIntelligenceV9 - ) - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9" - ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9", register=True) + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV9"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV9 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9", register=True) except Exception as e: print("EnhancedAdaptiveGravitationalSwarmIntelligenceV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( - EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - lama_register["EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( - EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - ) - LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" - ).set_name( - "LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True - ) + lama_register["EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) except Exception as e: - print( - "EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", - e, - ) - + print("EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedAdaptiveGuidedDifferentialEvolution import ( - EnhancedAdaptiveGuidedDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedDifferentialEvolution import EnhancedAdaptiveGuidedDifferentialEvolution lama_register["EnhancedAdaptiveGuidedDifferentialEvolution"] = EnhancedAdaptiveGuidedDifferentialEvolution - LLAMAEnhancedAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveGuidedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveGuidedDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveGuidedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveGuidedMutationOptimizer import ( - EnhancedAdaptiveGuidedMutationOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedMutationOptimizer import EnhancedAdaptiveGuidedMutationOptimizer lama_register["EnhancedAdaptiveGuidedMutationOptimizer"] = EnhancedAdaptiveGuidedMutationOptimizer - LLAMAEnhancedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer" - ).set_name("LLAMAEnhancedAdaptiveGuidedMutationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer").set_name("LLAMAEnhancedAdaptiveGuidedMutationOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveGuidedMutationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearch import ( - EnhancedAdaptiveHarmonicFireworksTabuSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearch import EnhancedAdaptiveHarmonicFireworksTabuSearch lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearch"] = EnhancedAdaptiveHarmonicFireworksTabuSearch - LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch" - ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearchV2 import ( - EnhancedAdaptiveHarmonicFireworksTabuSearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearchV2 import EnhancedAdaptiveHarmonicFireworksTabuSearchV2 - lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearchV2"] = ( - EnhancedAdaptiveHarmonicFireworksTabuSearchV2 - ) - LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2", register=True) + lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearchV2"] = EnhancedAdaptiveHarmonicFireworksTabuSearchV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicFireworksTabuSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicOptimizationV2 import ( - EnhancedAdaptiveHarmonicOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicOptimizationV2 import EnhancedAdaptiveHarmonicOptimizationV2 lama_register["EnhancedAdaptiveHarmonicOptimizationV2"] = EnhancedAdaptiveHarmonicOptimizationV2 - LLAMAEnhancedAdaptiveHarmonicOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonicOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2").set_name("LLAMAEnhancedAdaptiveHarmonicOptimizationV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV10 import ( - EnhancedAdaptiveHarmonicTabuSearchV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV10 import EnhancedAdaptiveHarmonicTabuSearchV10 lama_register["EnhancedAdaptiveHarmonicTabuSearchV10"] = EnhancedAdaptiveHarmonicTabuSearchV10 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV18 import ( - EnhancedAdaptiveHarmonicTabuSearchV18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV18 import EnhancedAdaptiveHarmonicTabuSearchV18 lama_register["EnhancedAdaptiveHarmonicTabuSearchV18"] = EnhancedAdaptiveHarmonicTabuSearchV18 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV21 import ( - EnhancedAdaptiveHarmonicTabuSearchV21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV21 import EnhancedAdaptiveHarmonicTabuSearchV21 lama_register["EnhancedAdaptiveHarmonicTabuSearchV21"] = EnhancedAdaptiveHarmonicTabuSearchV21 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV22 import ( - EnhancedAdaptiveHarmonicTabuSearchV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV22 import EnhancedAdaptiveHarmonicTabuSearchV22 lama_register["EnhancedAdaptiveHarmonicTabuSearchV22"] = EnhancedAdaptiveHarmonicTabuSearchV22 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV23 import ( - EnhancedAdaptiveHarmonicTabuSearchV23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV23 import EnhancedAdaptiveHarmonicTabuSearchV23 lama_register["EnhancedAdaptiveHarmonicTabuSearchV23"] = EnhancedAdaptiveHarmonicTabuSearchV23 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV25 import ( - EnhancedAdaptiveHarmonicTabuSearchV25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV25 import EnhancedAdaptiveHarmonicTabuSearchV25 lama_register["EnhancedAdaptiveHarmonicTabuSearchV25"] = EnhancedAdaptiveHarmonicTabuSearchV25 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV26 import ( - EnhancedAdaptiveHarmonicTabuSearchV26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV26 import EnhancedAdaptiveHarmonicTabuSearchV26 lama_register["EnhancedAdaptiveHarmonicTabuSearchV26"] = EnhancedAdaptiveHarmonicTabuSearchV26 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV27 import ( - EnhancedAdaptiveHarmonicTabuSearchV27, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV27 import EnhancedAdaptiveHarmonicTabuSearchV27 lama_register["EnhancedAdaptiveHarmonicTabuSearchV27"] = EnhancedAdaptiveHarmonicTabuSearchV27 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV27 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV29 import ( - EnhancedAdaptiveHarmonicTabuSearchV29, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV29 import EnhancedAdaptiveHarmonicTabuSearchV29 lama_register["EnhancedAdaptiveHarmonicTabuSearchV29"] = EnhancedAdaptiveHarmonicTabuSearchV29 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV29 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV30 import ( - EnhancedAdaptiveHarmonicTabuSearchV30, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV30 import EnhancedAdaptiveHarmonicTabuSearchV30 lama_register["EnhancedAdaptiveHarmonicTabuSearchV30"] = EnhancedAdaptiveHarmonicTabuSearchV30 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV30 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV31 import ( - EnhancedAdaptiveHarmonicTabuSearchV31, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV31 import EnhancedAdaptiveHarmonicTabuSearchV31 lama_register["EnhancedAdaptiveHarmonicTabuSearchV31"] = EnhancedAdaptiveHarmonicTabuSearchV31 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV31 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV9 import ( - EnhancedAdaptiveHarmonicTabuSearchV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV9 import EnhancedAdaptiveHarmonicTabuSearchV9 lama_register["EnhancedAdaptiveHarmonicTabuSearchV9"] = EnhancedAdaptiveHarmonicTabuSearchV9 - LLAMAEnhancedAdaptiveHarmonicTabuSearchV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonicTabuSearchV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyFireworksAlgorithm import ( - EnhancedAdaptiveHarmonyFireworksAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyFireworksAlgorithm import EnhancedAdaptiveHarmonyFireworksAlgorithm lama_register["EnhancedAdaptiveHarmonyFireworksAlgorithm"] = EnhancedAdaptiveHarmonyFireworksAlgorithm - LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm").set_name("LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithm import ( - EnhancedAdaptiveHarmonyMemeticAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithm import EnhancedAdaptiveHarmonyMemeticAlgorithm lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithm"] = EnhancedAdaptiveHarmonyMemeticAlgorithm - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV10 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV10 import EnhancedAdaptiveHarmonyMemeticAlgorithmV10 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV10"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV10 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV11 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV11 import EnhancedAdaptiveHarmonyMemeticAlgorithmV11 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV11"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV11 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV12 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV12 import EnhancedAdaptiveHarmonyMemeticAlgorithmV12 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV12"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV12 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV13 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV13 import EnhancedAdaptiveHarmonyMemeticAlgorithmV13 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV13"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV13 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV14 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV14, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV14 import EnhancedAdaptiveHarmonyMemeticAlgorithmV14 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV14"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV14 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV16 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV16, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV16 import EnhancedAdaptiveHarmonyMemeticAlgorithmV16 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV16"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV16 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV18 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV18 import EnhancedAdaptiveHarmonyMemeticAlgorithmV18 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV18"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV18 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV19 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV19 import EnhancedAdaptiveHarmonyMemeticAlgorithmV19 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV19"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV19 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV2 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV2 import EnhancedAdaptiveHarmonyMemeticAlgorithmV2 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV2"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV2 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV20 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV20, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV20 import EnhancedAdaptiveHarmonyMemeticAlgorithmV20 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV20"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV20 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV21 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV21 import EnhancedAdaptiveHarmonyMemeticAlgorithmV21 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV21"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV21 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV22 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV22 import EnhancedAdaptiveHarmonyMemeticAlgorithmV22 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV22"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV22 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV23 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV23 import EnhancedAdaptiveHarmonyMemeticAlgorithmV23 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV23"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV23 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV24 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV24 import EnhancedAdaptiveHarmonyMemeticAlgorithmV24 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV24"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV24 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV25 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV25 import EnhancedAdaptiveHarmonyMemeticAlgorithmV25 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV25"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV25 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV3 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV3 import EnhancedAdaptiveHarmonyMemeticAlgorithmV3 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV3"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV3 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV4 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV4 import EnhancedAdaptiveHarmonyMemeticAlgorithmV4 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV4"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV4 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV5 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV5 import EnhancedAdaptiveHarmonyMemeticAlgorithmV5 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV5"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV5 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV6 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV6 import EnhancedAdaptiveHarmonyMemeticAlgorithmV6 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV6"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV6 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV7 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV7 import EnhancedAdaptiveHarmonyMemeticAlgorithmV7 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV7"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV7 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV8 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV8 import EnhancedAdaptiveHarmonyMemeticAlgorithmV8 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV8"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV8 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV9 import ( - EnhancedAdaptiveHarmonyMemeticAlgorithmV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV9 import EnhancedAdaptiveHarmonyMemeticAlgorithmV9 lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV9"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV9 - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticAlgorithmV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV28 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV28, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV28 import EnhancedAdaptiveHarmonyMemeticOptimizationV28 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV28"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV28 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV28"] = EnhancedAdaptiveHarmonyMemeticOptimizationV28 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV29 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV29, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV29 import EnhancedAdaptiveHarmonyMemeticOptimizationV29 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV29"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV29 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV29"] = EnhancedAdaptiveHarmonyMemeticOptimizationV29 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV3 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV3 import EnhancedAdaptiveHarmonyMemeticOptimizationV3 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV3"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV3 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV3"] = EnhancedAdaptiveHarmonyMemeticOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV30 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV30, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV30 import EnhancedAdaptiveHarmonyMemeticOptimizationV30 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV30"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV30 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV30"] = EnhancedAdaptiveHarmonyMemeticOptimizationV30 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV31 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV31, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV31 import EnhancedAdaptiveHarmonyMemeticOptimizationV31 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV31"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV31 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV31"] = EnhancedAdaptiveHarmonyMemeticOptimizationV31 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV32 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV32, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV32 import EnhancedAdaptiveHarmonyMemeticOptimizationV32 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV32"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV32 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV32"] = EnhancedAdaptiveHarmonyMemeticOptimizationV32 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV32 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV33 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV33, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV33 import EnhancedAdaptiveHarmonyMemeticOptimizationV33 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV33"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV33 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV33"] = EnhancedAdaptiveHarmonyMemeticOptimizationV33 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV33 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV4 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV4 import EnhancedAdaptiveHarmonyMemeticOptimizationV4 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV4"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV4 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV4"] = EnhancedAdaptiveHarmonyMemeticOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV5 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV5 import EnhancedAdaptiveHarmonyMemeticOptimizationV5 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV5"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV5 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV5"] = EnhancedAdaptiveHarmonyMemeticOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV6 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV6 import EnhancedAdaptiveHarmonyMemeticOptimizationV6 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV6"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV6 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV6"] = EnhancedAdaptiveHarmonyMemeticOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV7 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV7 import EnhancedAdaptiveHarmonyMemeticOptimizationV7 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV7"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV7 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV7"] = EnhancedAdaptiveHarmonyMemeticOptimizationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV8 import ( - EnhancedAdaptiveHarmonyMemeticOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV8 import EnhancedAdaptiveHarmonyMemeticOptimizationV8 - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV8"] = ( - EnhancedAdaptiveHarmonyMemeticOptimizationV8 - ) - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8", register=True) + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV8"] = EnhancedAdaptiveHarmonyMemeticOptimizationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearch import ( - EnhancedAdaptiveHarmonyMemeticSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearch import EnhancedAdaptiveHarmonyMemeticSearch lama_register["EnhancedAdaptiveHarmonyMemeticSearch"] = EnhancedAdaptiveHarmonyMemeticSearch - LLAMAEnhancedAdaptiveHarmonyMemeticSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearch", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearchV2 import ( - EnhancedAdaptiveHarmonyMemeticSearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearchV2 import EnhancedAdaptiveHarmonyMemeticSearchV2 lama_register["EnhancedAdaptiveHarmonyMemeticSearchV2"] = EnhancedAdaptiveHarmonyMemeticSearchV2 - LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization import ( - EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization import EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization - lama_register["EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization"] = ( - EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization - ) - LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization", register=True) + lama_register["EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization import ( - EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization import EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization - lama_register["EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization"] = ( - EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization - ) - LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization", register=True) + lama_register["EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizer import ( - EnhancedAdaptiveHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizer import EnhancedAdaptiveHarmonySearchOptimizer lama_register["EnhancedAdaptiveHarmonySearchOptimizer"] = EnhancedAdaptiveHarmonySearchOptimizer - LLAMAEnhancedAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizerV2 import ( - EnhancedAdaptiveHarmonySearchOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizerV2 import EnhancedAdaptiveHarmonySearchOptimizerV2 lama_register["EnhancedAdaptiveHarmonySearchOptimizerV2"] = EnhancedAdaptiveHarmonySearchOptimizerV2 - LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV10 import EnhancedAdaptiveHarmonySearchV10 lama_register["EnhancedAdaptiveHarmonySearchV10"] = EnhancedAdaptiveHarmonySearchV10 - LLAMAEnhancedAdaptiveHarmonySearchV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV10 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV11 import EnhancedAdaptiveHarmonySearchV11 lama_register["EnhancedAdaptiveHarmonySearchV11"] = EnhancedAdaptiveHarmonySearchV11 - LLAMAEnhancedAdaptiveHarmonySearchV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV11" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchV11", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV12 import EnhancedAdaptiveHarmonySearchV12 lama_register["EnhancedAdaptiveHarmonySearchV12"] = EnhancedAdaptiveHarmonySearchV12 - LLAMAEnhancedAdaptiveHarmonySearchV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV12" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchV12", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV13 import EnhancedAdaptiveHarmonySearchV13 lama_register["EnhancedAdaptiveHarmonySearchV13"] = EnhancedAdaptiveHarmonySearchV13 - LLAMAEnhancedAdaptiveHarmonySearchV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV13" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchV13", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV14 import EnhancedAdaptiveHarmonySearchV14 lama_register["EnhancedAdaptiveHarmonySearchV14"] = EnhancedAdaptiveHarmonySearchV14 - LLAMAEnhancedAdaptiveHarmonySearchV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV14" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV14").set_name("LLAMAEnhancedAdaptiveHarmonySearchV14", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV14 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV15 import EnhancedAdaptiveHarmonySearchV15 lama_register["EnhancedAdaptiveHarmonySearchV15"] = EnhancedAdaptiveHarmonySearchV15 - LLAMAEnhancedAdaptiveHarmonySearchV15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV15" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV15").set_name("LLAMAEnhancedAdaptiveHarmonySearchV15", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV16 import EnhancedAdaptiveHarmonySearchV16 lama_register["EnhancedAdaptiveHarmonySearchV16"] = EnhancedAdaptiveHarmonySearchV16 - LLAMAEnhancedAdaptiveHarmonySearchV16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV16" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV16").set_name("LLAMAEnhancedAdaptiveHarmonySearchV16", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV17 import EnhancedAdaptiveHarmonySearchV17 lama_register["EnhancedAdaptiveHarmonySearchV17"] = EnhancedAdaptiveHarmonySearchV17 - LLAMAEnhancedAdaptiveHarmonySearchV17 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV17" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV17").set_name("LLAMAEnhancedAdaptiveHarmonySearchV17", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV17 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV18 import EnhancedAdaptiveHarmonySearchV18 lama_register["EnhancedAdaptiveHarmonySearchV18"] = EnhancedAdaptiveHarmonySearchV18 - LLAMAEnhancedAdaptiveHarmonySearchV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV18" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV18").set_name("LLAMAEnhancedAdaptiveHarmonySearchV18", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV19 import EnhancedAdaptiveHarmonySearchV19 lama_register["EnhancedAdaptiveHarmonySearchV19"] = EnhancedAdaptiveHarmonySearchV19 - LLAMAEnhancedAdaptiveHarmonySearchV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV19" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV19").set_name("LLAMAEnhancedAdaptiveHarmonySearchV19", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV19 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV20 import EnhancedAdaptiveHarmonySearchV20 lama_register["EnhancedAdaptiveHarmonySearchV20"] = EnhancedAdaptiveHarmonySearchV20 - LLAMAEnhancedAdaptiveHarmonySearchV20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV20" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV20").set_name("LLAMAEnhancedAdaptiveHarmonySearchV20", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV21 import EnhancedAdaptiveHarmonySearchV21 lama_register["EnhancedAdaptiveHarmonySearchV21"] = EnhancedAdaptiveHarmonySearchV21 - LLAMAEnhancedAdaptiveHarmonySearchV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV21" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV21").set_name("LLAMAEnhancedAdaptiveHarmonySearchV21", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV21 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV22 import EnhancedAdaptiveHarmonySearchV22 lama_register["EnhancedAdaptiveHarmonySearchV22"] = EnhancedAdaptiveHarmonySearchV22 - LLAMAEnhancedAdaptiveHarmonySearchV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV22" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV22").set_name("LLAMAEnhancedAdaptiveHarmonySearchV22", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV22 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV23 import EnhancedAdaptiveHarmonySearchV23 lama_register["EnhancedAdaptiveHarmonySearchV23"] = EnhancedAdaptiveHarmonySearchV23 - LLAMAEnhancedAdaptiveHarmonySearchV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV23" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV23").set_name("LLAMAEnhancedAdaptiveHarmonySearchV23", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV23 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV24 import EnhancedAdaptiveHarmonySearchV24 lama_register["EnhancedAdaptiveHarmonySearchV24"] = EnhancedAdaptiveHarmonySearchV24 - LLAMAEnhancedAdaptiveHarmonySearchV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV24" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV24").set_name("LLAMAEnhancedAdaptiveHarmonySearchV24", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV24 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV25 import EnhancedAdaptiveHarmonySearchV25 lama_register["EnhancedAdaptiveHarmonySearchV25"] = EnhancedAdaptiveHarmonySearchV25 - LLAMAEnhancedAdaptiveHarmonySearchV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV25" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV25").set_name("LLAMAEnhancedAdaptiveHarmonySearchV25", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV25 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV3 import EnhancedAdaptiveHarmonySearchV3 lama_register["EnhancedAdaptiveHarmonySearchV3"] = EnhancedAdaptiveHarmonySearchV3 - LLAMAEnhancedAdaptiveHarmonySearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV4 import EnhancedAdaptiveHarmonySearchV4 lama_register["EnhancedAdaptiveHarmonySearchV4"] = EnhancedAdaptiveHarmonySearchV4 - LLAMAEnhancedAdaptiveHarmonySearchV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV5 import EnhancedAdaptiveHarmonySearchV5 lama_register["EnhancedAdaptiveHarmonySearchV5"] = EnhancedAdaptiveHarmonySearchV5 - LLAMAEnhancedAdaptiveHarmonySearchV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV6 import EnhancedAdaptiveHarmonySearchV6 lama_register["EnhancedAdaptiveHarmonySearchV6"] = EnhancedAdaptiveHarmonySearchV6 - LLAMAEnhancedAdaptiveHarmonySearchV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV7 import EnhancedAdaptiveHarmonySearchV7 lama_register["EnhancedAdaptiveHarmonySearchV7"] = EnhancedAdaptiveHarmonySearchV7 - LLAMAEnhancedAdaptiveHarmonySearchV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV8 import EnhancedAdaptiveHarmonySearchV8 lama_register["EnhancedAdaptiveHarmonySearchV8"] = EnhancedAdaptiveHarmonySearchV8 - LLAMAEnhancedAdaptiveHarmonySearchV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV9 import EnhancedAdaptiveHarmonySearchV9 lama_register["EnhancedAdaptiveHarmonySearchV9"] = EnhancedAdaptiveHarmonySearchV9 - LLAMAEnhancedAdaptiveHarmonySearchV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration import ( - EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration import EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration - lama_register["EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( - EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration - ) - LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration"] = EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 import ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9"] = ( - EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 import ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9"] = ( - EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 import EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17"] = EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 import EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18"] = EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 import EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2" - ).set_name( - "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2", register=True - ) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2"] = EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2", register=True) except Exception as e: - print( - "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 can not be imported: ", - e, - ) - + print("EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 import ( - EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 import EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3"] = ( - EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3" - ).set_name( - "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3", register=True - ) + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3"] = EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3", register=True) except Exception as e: - print( - "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 can not be imported: ", - e, - ) - + print("EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 import ( - EnhancedAdaptiveHarmonySearchWithHybridInspirationV16, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 import EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 - lama_register["EnhancedAdaptiveHarmonySearchWithHybridInspirationV16"] = ( - EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithHybridInspirationV16"] = EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 import ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9"] = ( - EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlight import ( - EnhancedAdaptiveHarmonySearchWithLevyFlight, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlight import EnhancedAdaptiveHarmonySearchWithLevyFlight lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlight"] = EnhancedAdaptiveHarmonySearchWithLevyFlight - LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLevyFlight can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 import ( - EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 import EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 - lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2"] = ( - EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2"] = EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimization import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimization import EnhancedAdaptiveHarmonySearchWithLocalOptimization - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimization"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimization - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchWithLocalOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 import ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8"] = ( - EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight import ( - EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight import EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight - lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight"] = ( - EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight - ) - LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight"] = EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration import ( - EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration import EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration - lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration"] = ( - EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration - ) - LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration"] = EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 import ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6"] = ( - EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 - ) - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6" - ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6", register=True) + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6", register=True) except Exception as e: print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuOptimization import ( - EnhancedAdaptiveHarmonyTabuOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuOptimization import EnhancedAdaptiveHarmonyTabuOptimization lama_register["EnhancedAdaptiveHarmonyTabuOptimization"] = EnhancedAdaptiveHarmonyTabuOptimization - LLAMAEnhancedAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization" - ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization").set_name("LLAMAEnhancedAdaptiveHarmonyTabuOptimization", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyTabuOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV2 import ( - EnhancedAdaptiveHarmonyTabuSearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV2 import EnhancedAdaptiveHarmonyTabuSearchV2 lama_register["EnhancedAdaptiveHarmonyTabuSearchV2"] = EnhancedAdaptiveHarmonyTabuSearchV2 - LLAMAEnhancedAdaptiveHarmonyTabuSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2" - ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyTabuSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV3 import ( - EnhancedAdaptiveHarmonyTabuSearchV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV3 import EnhancedAdaptiveHarmonyTabuSearchV3 lama_register["EnhancedAdaptiveHarmonyTabuSearchV3"] = EnhancedAdaptiveHarmonyTabuSearchV3 - LLAMAEnhancedAdaptiveHarmonyTabuSearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3" - ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyTabuSearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV4 import ( - EnhancedAdaptiveHarmonyTabuSearchV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV4 import EnhancedAdaptiveHarmonyTabuSearchV4 lama_register["EnhancedAdaptiveHarmonyTabuSearchV4"] = EnhancedAdaptiveHarmonyTabuSearchV4 - LLAMAEnhancedAdaptiveHarmonyTabuSearchV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4" - ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyTabuSearchV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV5 import ( - EnhancedAdaptiveHarmonyTabuSearchV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV5 import EnhancedAdaptiveHarmonyTabuSearchV5 lama_register["EnhancedAdaptiveHarmonyTabuSearchV5"] = EnhancedAdaptiveHarmonyTabuSearchV5 - LLAMAEnhancedAdaptiveHarmonyTabuSearchV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5" - ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5", register=True) except Exception as e: print("EnhancedAdaptiveHarmonyTabuSearchV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory import ( - EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory import EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory - lama_register["EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory"] = ( - EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory - ) - LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory" - ).set_name("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory", register=True) + lama_register["EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory"] = EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory").set_name("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory", register=True) except Exception as e: print("EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV22 import ( - EnhancedAdaptiveHybridHarmonySearchV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV22 import EnhancedAdaptiveHybridHarmonySearchV22 lama_register["EnhancedAdaptiveHybridHarmonySearchV22"] = EnhancedAdaptiveHybridHarmonySearchV22 - LLAMAEnhancedAdaptiveHybridHarmonySearchV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV22", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV23 import ( - EnhancedAdaptiveHybridHarmonySearchV23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV23 import EnhancedAdaptiveHybridHarmonySearchV23 lama_register["EnhancedAdaptiveHybridHarmonySearchV23"] = EnhancedAdaptiveHybridHarmonySearchV23 - LLAMAEnhancedAdaptiveHybridHarmonySearchV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV23", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV24 import ( - EnhancedAdaptiveHybridHarmonySearchV24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV24 import EnhancedAdaptiveHybridHarmonySearchV24 lama_register["EnhancedAdaptiveHybridHarmonySearchV24"] = EnhancedAdaptiveHybridHarmonySearchV24 - LLAMAEnhancedAdaptiveHybridHarmonySearchV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV24", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV25 import ( - EnhancedAdaptiveHybridHarmonySearchV25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV25 import EnhancedAdaptiveHybridHarmonySearchV25 lama_register["EnhancedAdaptiveHybridHarmonySearchV25"] = EnhancedAdaptiveHybridHarmonySearchV25 - LLAMAEnhancedAdaptiveHybridHarmonySearchV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV25", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV26 import ( - EnhancedAdaptiveHybridHarmonySearchV26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV26 import EnhancedAdaptiveHybridHarmonySearchV26 lama_register["EnhancedAdaptiveHybridHarmonySearchV26"] = EnhancedAdaptiveHybridHarmonySearchV26 - LLAMAEnhancedAdaptiveHybridHarmonySearchV26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV26", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV27 import ( - EnhancedAdaptiveHybridHarmonySearchV27, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV27 import EnhancedAdaptiveHybridHarmonySearchV27 lama_register["EnhancedAdaptiveHybridHarmonySearchV27"] = EnhancedAdaptiveHybridHarmonySearchV27 - LLAMAEnhancedAdaptiveHybridHarmonySearchV27 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27" - ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV27", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV27", register=True) except Exception as e: print("EnhancedAdaptiveHybridHarmonySearchV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridMetaOptimizer import ( - EnhancedAdaptiveHybridMetaOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridMetaOptimizer import EnhancedAdaptiveHybridMetaOptimizer lama_register["EnhancedAdaptiveHybridMetaOptimizer"] = EnhancedAdaptiveHybridMetaOptimizer - LLAMAEnhancedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridMetaOptimizer" - ).set_name("LLAMAEnhancedAdaptiveHybridMetaOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridMetaOptimizer").set_name("LLAMAEnhancedAdaptiveHybridMetaOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveHybridMetaOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveHybridOptimizer import EnhancedAdaptiveHybridOptimizer lama_register["EnhancedAdaptiveHybridOptimizer"] = EnhancedAdaptiveHybridOptimizer - LLAMAEnhancedAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridOptimizer" - ).set_name("LLAMAEnhancedAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridOptimizer").set_name("LLAMAEnhancedAdaptiveHybridOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution import ( - EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution import EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution - lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( - EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution - ) - LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution"] = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( - EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus import EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus - lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( - EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus - ) - LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" - ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus").set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) except Exception as e: print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveInertiaHybridOptimizer import ( - EnhancedAdaptiveInertiaHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveInertiaHybridOptimizer import EnhancedAdaptiveInertiaHybridOptimizer lama_register["EnhancedAdaptiveInertiaHybridOptimizer"] = EnhancedAdaptiveInertiaHybridOptimizer - LLAMAEnhancedAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer" - ).set_name("LLAMAEnhancedAdaptiveInertiaHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveInertiaHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer").set_name("LLAMAEnhancedAdaptiveInertiaHybridOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveInertiaHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm - ) - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" - ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) except Exception as e: print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 import ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2"] = ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 - ) - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2" - ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2", register=True) + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2", register=True) except Exception as e: print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 import ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3"] = ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 - ) - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3" - ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3", register=True) + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3", register=True) except Exception as e: print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 import ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4"] = ( - EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 - ) - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4" - ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4", register=True) + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4", register=True) except Exception as e: print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearch import ( - EnhancedAdaptiveLevyHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearch import EnhancedAdaptiveLevyHarmonySearch lama_register["EnhancedAdaptiveLevyHarmonySearch"] = EnhancedAdaptiveLevyHarmonySearch - LLAMAEnhancedAdaptiveLevyHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyHarmonySearch" - ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearch").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearch", register=True) except Exception as e: print("EnhancedAdaptiveLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV2 import ( - EnhancedAdaptiveLevyHarmonySearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV2 import EnhancedAdaptiveLevyHarmonySearchV2 lama_register["EnhancedAdaptiveLevyHarmonySearchV2"] = EnhancedAdaptiveLevyHarmonySearchV2 - LLAMAEnhancedAdaptiveLevyHarmonySearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2" - ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV2", register=True) except Exception as e: print("EnhancedAdaptiveLevyHarmonySearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV3 import ( - EnhancedAdaptiveLevyHarmonySearchV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV3 import EnhancedAdaptiveLevyHarmonySearchV3 lama_register["EnhancedAdaptiveLevyHarmonySearchV3"] = EnhancedAdaptiveLevyHarmonySearchV3 - LLAMAEnhancedAdaptiveLevyHarmonySearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3" - ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLevyHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV3", register=True) except Exception as e: print("EnhancedAdaptiveLevyHarmonySearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing import ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing - ) - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing" - ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 import ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2"] = ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 - ) - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2" - ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2", register=True) + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2", register=True) except Exception as e: print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 import ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3"] = ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 - ) - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3" - ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3", register=True) + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3", register=True) except Exception as e: print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 import ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4"] = ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 - ) - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4" - ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4", register=True) + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4", register=True) except Exception as e: print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 import ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5"] = ( - EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 - ) - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5" - ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5", register=True) + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5", register=True) except Exception as e: print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDifferentialEvolution import ( - EnhancedAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDifferentialEvolution import EnhancedAdaptiveMemeticDifferentialEvolution - lama_register["EnhancedAdaptiveMemeticDifferentialEvolution"] = ( - EnhancedAdaptiveMemeticDifferentialEvolution - ) - LLAMAEnhancedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveMemeticDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveMemeticDifferentialEvolution"] = EnhancedAdaptiveMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizer import ( - EnhancedAdaptiveMemeticDiverseOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizer import EnhancedAdaptiveMemeticDiverseOptimizer lama_register["EnhancedAdaptiveMemeticDiverseOptimizer"] = EnhancedAdaptiveMemeticDiverseOptimizer - LLAMAEnhancedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer" - ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV2 import ( - EnhancedAdaptiveMemeticDiverseOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV2 import EnhancedAdaptiveMemeticDiverseOptimizerV2 lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV2"] = EnhancedAdaptiveMemeticDiverseOptimizerV2 - LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2" - ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2", register=True) except Exception as e: print("EnhancedAdaptiveMemeticDiverseOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV3 import ( - EnhancedAdaptiveMemeticDiverseOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV3 import EnhancedAdaptiveMemeticDiverseOptimizerV3 lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV3"] = EnhancedAdaptiveMemeticDiverseOptimizerV3 - LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3" - ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3", register=True) except Exception as e: print("EnhancedAdaptiveMemeticDiverseOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 import ( - EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 import EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 - lama_register["EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2"] = ( - EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 - ) - LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2" - ).set_name("LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2", register=True) + lama_register["EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2"] = EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2").set_name("LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2", register=True) except Exception as e: print("EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimization import ( - EnhancedAdaptiveMemeticHarmonyOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimization import EnhancedAdaptiveMemeticHarmonyOptimization lama_register["EnhancedAdaptiveMemeticHarmonyOptimization"] = EnhancedAdaptiveMemeticHarmonyOptimization - LLAMAEnhancedAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization" - ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimization", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHarmonyOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV2 import ( - EnhancedAdaptiveMemeticHarmonyOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV2 import EnhancedAdaptiveMemeticHarmonyOptimizationV2 - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV2"] = ( - EnhancedAdaptiveMemeticHarmonyOptimizationV2 - ) - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2" - ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2", register=True) + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV2"] = EnhancedAdaptiveMemeticHarmonyOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHarmonyOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV3 import ( - EnhancedAdaptiveMemeticHarmonyOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV3 import EnhancedAdaptiveMemeticHarmonyOptimizationV3 - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV3"] = ( - EnhancedAdaptiveMemeticHarmonyOptimizationV3 - ) - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3", register=True) + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV3"] = EnhancedAdaptiveMemeticHarmonyOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHarmonyOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV4 import ( - EnhancedAdaptiveMemeticHarmonyOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV4 import EnhancedAdaptiveMemeticHarmonyOptimizationV4 - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV4"] = ( - EnhancedAdaptiveMemeticHarmonyOptimizationV4 - ) - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4", register=True) + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV4"] = EnhancedAdaptiveMemeticHarmonyOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHarmonyOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV6 import ( - EnhancedAdaptiveMemeticHarmonyOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV6 import EnhancedAdaptiveMemeticHarmonyOptimizationV6 - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV6"] = ( - EnhancedAdaptiveMemeticHarmonyOptimizationV6 - ) - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6" - ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6", register=True) + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV6"] = EnhancedAdaptiveMemeticHarmonyOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHarmonyOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHybridOptimizer import ( - EnhancedAdaptiveMemeticHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHybridOptimizer import EnhancedAdaptiveMemeticHybridOptimizer lama_register["EnhancedAdaptiveMemeticHybridOptimizer"] = EnhancedAdaptiveMemeticHybridOptimizer - LLAMAEnhancedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer" - ).set_name("LLAMAEnhancedAdaptiveMemeticHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer").set_name("LLAMAEnhancedAdaptiveMemeticHybridOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveMemeticHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticOptimizerV7 import ( - EnhancedAdaptiveMemeticOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticOptimizerV7 import EnhancedAdaptiveMemeticOptimizerV7 lama_register["EnhancedAdaptiveMemeticOptimizerV7"] = EnhancedAdaptiveMemeticOptimizerV7 - LLAMAEnhancedAdaptiveMemeticOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemeticOptimizerV7" - ).set_name("LLAMAEnhancedAdaptiveMemeticOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemeticOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticOptimizerV7").set_name("LLAMAEnhancedAdaptiveMemeticOptimizerV7", register=True) except Exception as e: print("EnhancedAdaptiveMemeticOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryControlStrategyV49 import ( - EnhancedAdaptiveMemoryControlStrategyV49, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryControlStrategyV49 import EnhancedAdaptiveMemoryControlStrategyV49 lama_register["EnhancedAdaptiveMemoryControlStrategyV49"] = EnhancedAdaptiveMemoryControlStrategyV49 - LLAMAEnhancedAdaptiveMemoryControlStrategyV49 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49" - ).set_name("LLAMAEnhancedAdaptiveMemoryControlStrategyV49", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryControlStrategyV49 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49").set_name("LLAMAEnhancedAdaptiveMemoryControlStrategyV49", register=True) except Exception as e: print("EnhancedAdaptiveMemoryControlStrategyV49 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryDualPhaseStrategyV46 import ( - EnhancedAdaptiveMemoryDualPhaseStrategyV46, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryDualPhaseStrategyV46 import EnhancedAdaptiveMemoryDualPhaseStrategyV46 lama_register["EnhancedAdaptiveMemoryDualPhaseStrategyV46"] = EnhancedAdaptiveMemoryDualPhaseStrategyV46 - LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46" - ).set_name("LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46").set_name("LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46", register=True) except Exception as e: print("EnhancedAdaptiveMemoryDualPhaseStrategyV46 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost import ( - EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost import EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost - lama_register["EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( - EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost - ) - LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost" - ).set_name("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) + lama_register["EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost"] = EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost").set_name("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) except Exception as e: print("EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridAnnealing import ( - EnhancedAdaptiveMemoryHybridAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridAnnealing import EnhancedAdaptiveMemoryHybridAnnealing lama_register["EnhancedAdaptiveMemoryHybridAnnealing"] = EnhancedAdaptiveMemoryHybridAnnealing - LLAMAEnhancedAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing" - ).set_name("LLAMAEnhancedAdaptiveMemoryHybridAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryHybridAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing").set_name("LLAMAEnhancedAdaptiveMemoryHybridAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveMemoryHybridAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridDEPSO import ( - EnhancedAdaptiveMemoryHybridDEPSO, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridDEPSO import EnhancedAdaptiveMemoryHybridDEPSO lama_register["EnhancedAdaptiveMemoryHybridDEPSO"] = EnhancedAdaptiveMemoryHybridDEPSO - LLAMAEnhancedAdaptiveMemoryHybridDEPSO = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO" - ).set_name("LLAMAEnhancedAdaptiveMemoryHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO").set_name("LLAMAEnhancedAdaptiveMemoryHybridDEPSO", register=True) except Exception as e: print("EnhancedAdaptiveMemoryHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV54 import ( - EnhancedAdaptiveMemoryStrategyV54, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV54 import EnhancedAdaptiveMemoryStrategyV54 lama_register["EnhancedAdaptiveMemoryStrategyV54"] = EnhancedAdaptiveMemoryStrategyV54 - LLAMAEnhancedAdaptiveMemoryStrategyV54 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryStrategyV54" - ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV54", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryStrategyV54 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV54").set_name("LLAMAEnhancedAdaptiveMemoryStrategyV54", register=True) except Exception as e: print("EnhancedAdaptiveMemoryStrategyV54 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV79 import ( - EnhancedAdaptiveMemoryStrategyV79, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV79 import EnhancedAdaptiveMemoryStrategyV79 lama_register["EnhancedAdaptiveMemoryStrategyV79"] = EnhancedAdaptiveMemoryStrategyV79 - LLAMAEnhancedAdaptiveMemoryStrategyV79 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMemoryStrategyV79" - ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV79", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMemoryStrategyV79 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV79").set_name("LLAMAEnhancedAdaptiveMemoryStrategyV79", register=True) except Exception as e: print("EnhancedAdaptiveMemoryStrategyV79 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSO import EnhancedAdaptiveMetaNetAQAPSO lama_register["EnhancedAdaptiveMetaNetAQAPSO"] = EnhancedAdaptiveMetaNetAQAPSO - LLAMAEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSO" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSO", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv12 import EnhancedAdaptiveMetaNetAQAPSOv12 lama_register["EnhancedAdaptiveMetaNetAQAPSOv12"] = EnhancedAdaptiveMetaNetAQAPSOv12 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv12", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv12 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv14 import EnhancedAdaptiveMetaNetAQAPSOv14 lama_register["EnhancedAdaptiveMetaNetAQAPSOv14"] = EnhancedAdaptiveMetaNetAQAPSOv14 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv14", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv14 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv15 import EnhancedAdaptiveMetaNetAQAPSOv15 lama_register["EnhancedAdaptiveMetaNetAQAPSOv15"] = EnhancedAdaptiveMetaNetAQAPSOv15 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv15", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv15 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv16 import EnhancedAdaptiveMetaNetAQAPSOv16 lama_register["EnhancedAdaptiveMetaNetAQAPSOv16"] = EnhancedAdaptiveMetaNetAQAPSOv16 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv16", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv16 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv2 import EnhancedAdaptiveMetaNetAQAPSOv2 lama_register["EnhancedAdaptiveMetaNetAQAPSOv2"] = EnhancedAdaptiveMetaNetAQAPSOv2 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv2", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv3 import EnhancedAdaptiveMetaNetAQAPSOv3 lama_register["EnhancedAdaptiveMetaNetAQAPSOv3"] = EnhancedAdaptiveMetaNetAQAPSOv3 - LLAMAEnhancedAdaptiveMetaNetAQAPSOv3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3" - ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv3", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetAQAPSOv3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO import EnhancedAdaptiveMetaNetPSO lama_register["EnhancedAdaptiveMetaNetPSO"] = EnhancedAdaptiveMetaNetPSO - LLAMAEnhancedAdaptiveMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO").set_name( - "LLAMAEnhancedAdaptiveMetaNetPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO").set_name("LLAMAEnhancedAdaptiveMetaNetPSO", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v2 import EnhancedAdaptiveMetaNetPSO_v2 lama_register["EnhancedAdaptiveMetaNetPSO_v2"] = EnhancedAdaptiveMetaNetPSO_v2 - LLAMAEnhancedAdaptiveMetaNetPSO_v2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetPSO_v2" - ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetPSO_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v2").set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v2", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetPSO_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v3 import EnhancedAdaptiveMetaNetPSO_v3 lama_register["EnhancedAdaptiveMetaNetPSO_v3"] = EnhancedAdaptiveMetaNetPSO_v3 - LLAMAEnhancedAdaptiveMetaNetPSO_v3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMetaNetPSO_v3" - ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v3").set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v3", register=True) except Exception as e: print("EnhancedAdaptiveMetaNetPSO_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiMemorySimulatedAnnealing import ( - EnhancedAdaptiveMultiMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiMemorySimulatedAnnealing import EnhancedAdaptiveMultiMemorySimulatedAnnealing - lama_register["EnhancedAdaptiveMultiMemorySimulatedAnnealing"] = ( - EnhancedAdaptiveMultiMemorySimulatedAnnealing - ) - LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing" - ).set_name("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing", register=True) + lama_register["EnhancedAdaptiveMultiMemorySimulatedAnnealing"] = EnhancedAdaptiveMultiMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiOperatorSearch import ( - EnhancedAdaptiveMultiOperatorSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiOperatorSearch import EnhancedAdaptiveMultiOperatorSearch lama_register["EnhancedAdaptiveMultiOperatorSearch"] = EnhancedAdaptiveMultiOperatorSearch - LLAMAEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiOperatorSearch" - ).set_name("LLAMAEnhancedAdaptiveMultiOperatorSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiOperatorSearch").set_name("LLAMAEnhancedAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("EnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealing import ( - EnhancedAdaptiveMultiPhaseAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealing import EnhancedAdaptiveMultiPhaseAnnealing lama_register["EnhancedAdaptiveMultiPhaseAnnealing"] = EnhancedAdaptiveMultiPhaseAnnealing - LLAMAEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing" - ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing").set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( - EnhancedAdaptiveMultiPhaseAnnealingWithGradient, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealingWithGradient import EnhancedAdaptiveMultiPhaseAnnealingWithGradient - lama_register["EnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( - EnhancedAdaptiveMultiPhaseAnnealingWithGradient - ) - LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient" - ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) + lama_register["EnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = EnhancedAdaptiveMultiPhaseAnnealingWithGradient + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient").set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) except Exception as e: print("EnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPopulationDifferentialEvolution import ( - EnhancedAdaptiveMultiPopulationDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPopulationDifferentialEvolution import EnhancedAdaptiveMultiPopulationDifferentialEvolution - lama_register["EnhancedAdaptiveMultiPopulationDifferentialEvolution"] = ( - EnhancedAdaptiveMultiPopulationDifferentialEvolution - ) - LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveMultiPopulationDifferentialEvolution"] = EnhancedAdaptiveMultiPopulationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategicOptimizer import ( - EnhancedAdaptiveMultiStrategicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategicOptimizer import EnhancedAdaptiveMultiStrategicOptimizer lama_register["EnhancedAdaptiveMultiStrategicOptimizer"] = EnhancedAdaptiveMultiStrategicOptimizer - LLAMAEnhancedAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer" - ).set_name("LLAMAEnhancedAdaptiveMultiStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiStrategicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer").set_name("LLAMAEnhancedAdaptiveMultiStrategicOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveMultiStrategicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDE import EnhancedAdaptiveMultiStrategyDE lama_register["EnhancedAdaptiveMultiStrategyDE"] = EnhancedAdaptiveMultiStrategyDE - LLAMAEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiStrategyDE" - ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDE").set_name("LLAMAEnhancedAdaptiveMultiStrategyDE", register=True) except Exception as e: print("EnhancedAdaptiveMultiStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDifferentialEvolution import ( - EnhancedAdaptiveMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDifferentialEvolution import EnhancedAdaptiveMultiStrategyDifferentialEvolution - lama_register["EnhancedAdaptiveMultiStrategyDifferentialEvolution"] = ( - EnhancedAdaptiveMultiStrategyDifferentialEvolution - ) - LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyOptimizer import ( - EnhancedAdaptiveMultiStrategyOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyOptimizer import EnhancedAdaptiveMultiStrategyOptimizer lama_register["EnhancedAdaptiveMultiStrategyOptimizer"] = EnhancedAdaptiveMultiStrategyOptimizer - LLAMAEnhancedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer" - ).set_name("LLAMAEnhancedAdaptiveMultiStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer").set_name("LLAMAEnhancedAdaptiveMultiStrategyOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveMultiStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer import ( - EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer import EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer - lama_register["EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( - EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer - ) - LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) + lama_register["EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer"] = EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution import ( - EnhancedAdaptiveOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution import EnhancedAdaptiveOppositionBasedDifferentialEvolution - lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( - EnhancedAdaptiveOppositionBasedDifferentialEvolution - ) - LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution"] = EnhancedAdaptiveOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 import ( - EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 import EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 - lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2"] = ( - EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 - ) - LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2" - ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2", register=True) + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2"] = EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2").set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2", register=True) except Exception as e: print("EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE import ( - EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE import EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE - lama_register["EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( - EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE - ) - LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE" - ).set_name("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) + lama_register["EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE"] = EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) except Exception as e: print("EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveOrthogonalDifferentialEvolution import ( - EnhancedAdaptiveOrthogonalDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveOrthogonalDifferentialEvolution import EnhancedAdaptiveOrthogonalDifferentialEvolution - lama_register["EnhancedAdaptiveOrthogonalDifferentialEvolution"] = ( - EnhancedAdaptiveOrthogonalDifferentialEvolution - ) - LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveOrthogonalDifferentialEvolution"] = EnhancedAdaptiveOrthogonalDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( - EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - lama_register["EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( - EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - ) - LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" - ).set_name("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) + lama_register["EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) except Exception as e: print("EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptivePrecisionCohortOptimizationV5 import ( - EnhancedAdaptivePrecisionCohortOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionCohortOptimizationV5 import EnhancedAdaptivePrecisionCohortOptimizationV5 - lama_register["EnhancedAdaptivePrecisionCohortOptimizationV5"] = ( - EnhancedAdaptivePrecisionCohortOptimizationV5 - ) - LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5" - ).set_name("LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5", register=True) + lama_register["EnhancedAdaptivePrecisionCohortOptimizationV5"] = EnhancedAdaptivePrecisionCohortOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5").set_name("LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5", register=True) except Exception as e: print("EnhancedAdaptivePrecisionCohortOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptivePrecisionFocalStrategy import ( - EnhancedAdaptivePrecisionFocalStrategy, - ) + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionFocalStrategy import EnhancedAdaptivePrecisionFocalStrategy lama_register["EnhancedAdaptivePrecisionFocalStrategy"] = EnhancedAdaptivePrecisionFocalStrategy - LLAMAEnhancedAdaptivePrecisionFocalStrategy = NonObjectOptimizer( - method="LLAMAEnhancedAdaptivePrecisionFocalStrategy" - ).set_name("LLAMAEnhancedAdaptivePrecisionFocalStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptivePrecisionFocalStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionFocalStrategy").set_name("LLAMAEnhancedAdaptivePrecisionFocalStrategy", register=True) except Exception as e: print("EnhancedAdaptivePrecisionFocalStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA import EnhancedAdaptiveQGSA lama_register["EnhancedAdaptiveQGSA"] = EnhancedAdaptiveQGSA - LLAMAEnhancedAdaptiveQGSA = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA").set_name( - "LLAMAEnhancedAdaptiveQGSA", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA").set_name("LLAMAEnhancedAdaptiveQGSA", register=True) except Exception as e: print("EnhancedAdaptiveQGSA can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v10 import EnhancedAdaptiveQGSA_v10 lama_register["EnhancedAdaptiveQGSA_v10"] = EnhancedAdaptiveQGSA_v10 - LLAMAEnhancedAdaptiveQGSA_v10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10").set_name( - "LLAMAEnhancedAdaptiveQGSA_v10", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10").set_name("LLAMAEnhancedAdaptiveQGSA_v10", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v10 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v11 import EnhancedAdaptiveQGSA_v11 lama_register["EnhancedAdaptiveQGSA_v11"] = EnhancedAdaptiveQGSA_v11 - LLAMAEnhancedAdaptiveQGSA_v11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11").set_name( - "LLAMAEnhancedAdaptiveQGSA_v11", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11").set_name("LLAMAEnhancedAdaptiveQGSA_v11", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v11 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v12 import EnhancedAdaptiveQGSA_v12 lama_register["EnhancedAdaptiveQGSA_v12"] = EnhancedAdaptiveQGSA_v12 - LLAMAEnhancedAdaptiveQGSA_v12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12").set_name( - "LLAMAEnhancedAdaptiveQGSA_v12", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12").set_name("LLAMAEnhancedAdaptiveQGSA_v12", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v12 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v13 import EnhancedAdaptiveQGSA_v13 lama_register["EnhancedAdaptiveQGSA_v13"] = EnhancedAdaptiveQGSA_v13 - LLAMAEnhancedAdaptiveQGSA_v13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13").set_name( - "LLAMAEnhancedAdaptiveQGSA_v13", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13").set_name("LLAMAEnhancedAdaptiveQGSA_v13", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v13 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v14 import EnhancedAdaptiveQGSA_v14 lama_register["EnhancedAdaptiveQGSA_v14"] = EnhancedAdaptiveQGSA_v14 - LLAMAEnhancedAdaptiveQGSA_v14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14").set_name( - "LLAMAEnhancedAdaptiveQGSA_v14", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14").set_name("LLAMAEnhancedAdaptiveQGSA_v14", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v14 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v15 import EnhancedAdaptiveQGSA_v15 lama_register["EnhancedAdaptiveQGSA_v15"] = EnhancedAdaptiveQGSA_v15 - LLAMAEnhancedAdaptiveQGSA_v15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15").set_name( - "LLAMAEnhancedAdaptiveQGSA_v15", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15").set_name("LLAMAEnhancedAdaptiveQGSA_v15", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v15 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v16 import EnhancedAdaptiveQGSA_v16 lama_register["EnhancedAdaptiveQGSA_v16"] = EnhancedAdaptiveQGSA_v16 - LLAMAEnhancedAdaptiveQGSA_v16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16").set_name( - "LLAMAEnhancedAdaptiveQGSA_v16", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16").set_name("LLAMAEnhancedAdaptiveQGSA_v16", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v16 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v17 import EnhancedAdaptiveQGSA_v17 lama_register["EnhancedAdaptiveQGSA_v17"] = EnhancedAdaptiveQGSA_v17 - LLAMAEnhancedAdaptiveQGSA_v17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17").set_name( - "LLAMAEnhancedAdaptiveQGSA_v17", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17").set_name("LLAMAEnhancedAdaptiveQGSA_v17", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v17 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v18 import EnhancedAdaptiveQGSA_v18 lama_register["EnhancedAdaptiveQGSA_v18"] = EnhancedAdaptiveQGSA_v18 - LLAMAEnhancedAdaptiveQGSA_v18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18").set_name( - "LLAMAEnhancedAdaptiveQGSA_v18", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18").set_name("LLAMAEnhancedAdaptiveQGSA_v18", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v18 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v19 import EnhancedAdaptiveQGSA_v19 lama_register["EnhancedAdaptiveQGSA_v19"] = EnhancedAdaptiveQGSA_v19 - LLAMAEnhancedAdaptiveQGSA_v19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19").set_name( - "LLAMAEnhancedAdaptiveQGSA_v19", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19").set_name("LLAMAEnhancedAdaptiveQGSA_v19", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v19 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v2 import EnhancedAdaptiveQGSA_v2 lama_register["EnhancedAdaptiveQGSA_v2"] = EnhancedAdaptiveQGSA_v2 - LLAMAEnhancedAdaptiveQGSA_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2").set_name( - "LLAMAEnhancedAdaptiveQGSA_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2").set_name("LLAMAEnhancedAdaptiveQGSA_v2", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v20 import EnhancedAdaptiveQGSA_v20 lama_register["EnhancedAdaptiveQGSA_v20"] = EnhancedAdaptiveQGSA_v20 - LLAMAEnhancedAdaptiveQGSA_v20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20").set_name( - "LLAMAEnhancedAdaptiveQGSA_v20", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20").set_name("LLAMAEnhancedAdaptiveQGSA_v20", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v20 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v21 import EnhancedAdaptiveQGSA_v21 lama_register["EnhancedAdaptiveQGSA_v21"] = EnhancedAdaptiveQGSA_v21 - LLAMAEnhancedAdaptiveQGSA_v21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21").set_name( - "LLAMAEnhancedAdaptiveQGSA_v21", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21").set_name("LLAMAEnhancedAdaptiveQGSA_v21", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v21 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v22 import EnhancedAdaptiveQGSA_v22 lama_register["EnhancedAdaptiveQGSA_v22"] = EnhancedAdaptiveQGSA_v22 - LLAMAEnhancedAdaptiveQGSA_v22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22").set_name( - "LLAMAEnhancedAdaptiveQGSA_v22", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22").set_name("LLAMAEnhancedAdaptiveQGSA_v22", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v22 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v23 import EnhancedAdaptiveQGSA_v23 lama_register["EnhancedAdaptiveQGSA_v23"] = EnhancedAdaptiveQGSA_v23 - LLAMAEnhancedAdaptiveQGSA_v23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23").set_name( - "LLAMAEnhancedAdaptiveQGSA_v23", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23").set_name("LLAMAEnhancedAdaptiveQGSA_v23", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v23 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v24 import EnhancedAdaptiveQGSA_v24 lama_register["EnhancedAdaptiveQGSA_v24"] = EnhancedAdaptiveQGSA_v24 - LLAMAEnhancedAdaptiveQGSA_v24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24").set_name( - "LLAMAEnhancedAdaptiveQGSA_v24", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24").set_name("LLAMAEnhancedAdaptiveQGSA_v24", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v24 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v25 import EnhancedAdaptiveQGSA_v25 lama_register["EnhancedAdaptiveQGSA_v25"] = EnhancedAdaptiveQGSA_v25 - LLAMAEnhancedAdaptiveQGSA_v25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25").set_name( - "LLAMAEnhancedAdaptiveQGSA_v25", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25").set_name("LLAMAEnhancedAdaptiveQGSA_v25", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v25 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v26 import EnhancedAdaptiveQGSA_v26 lama_register["EnhancedAdaptiveQGSA_v26"] = EnhancedAdaptiveQGSA_v26 - LLAMAEnhancedAdaptiveQGSA_v26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26").set_name( - "LLAMAEnhancedAdaptiveQGSA_v26", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26").set_name("LLAMAEnhancedAdaptiveQGSA_v26", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v26 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v27 import EnhancedAdaptiveQGSA_v27 lama_register["EnhancedAdaptiveQGSA_v27"] = EnhancedAdaptiveQGSA_v27 - LLAMAEnhancedAdaptiveQGSA_v27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27").set_name( - "LLAMAEnhancedAdaptiveQGSA_v27", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27").set_name("LLAMAEnhancedAdaptiveQGSA_v27", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v27 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v28 import EnhancedAdaptiveQGSA_v28 lama_register["EnhancedAdaptiveQGSA_v28"] = EnhancedAdaptiveQGSA_v28 - LLAMAEnhancedAdaptiveQGSA_v28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28").set_name( - "LLAMAEnhancedAdaptiveQGSA_v28", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28").set_name("LLAMAEnhancedAdaptiveQGSA_v28", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v28 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v29 import EnhancedAdaptiveQGSA_v29 lama_register["EnhancedAdaptiveQGSA_v29"] = EnhancedAdaptiveQGSA_v29 - LLAMAEnhancedAdaptiveQGSA_v29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29").set_name( - "LLAMAEnhancedAdaptiveQGSA_v29", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29").set_name("LLAMAEnhancedAdaptiveQGSA_v29", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v29 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v3 import EnhancedAdaptiveQGSA_v3 lama_register["EnhancedAdaptiveQGSA_v3"] = EnhancedAdaptiveQGSA_v3 - LLAMAEnhancedAdaptiveQGSA_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3").set_name( - "LLAMAEnhancedAdaptiveQGSA_v3", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3").set_name("LLAMAEnhancedAdaptiveQGSA_v3", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v30 import EnhancedAdaptiveQGSA_v30 lama_register["EnhancedAdaptiveQGSA_v30"] = EnhancedAdaptiveQGSA_v30 - LLAMAEnhancedAdaptiveQGSA_v30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30").set_name( - "LLAMAEnhancedAdaptiveQGSA_v30", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30").set_name("LLAMAEnhancedAdaptiveQGSA_v30", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v30 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v31 import EnhancedAdaptiveQGSA_v31 lama_register["EnhancedAdaptiveQGSA_v31"] = EnhancedAdaptiveQGSA_v31 - LLAMAEnhancedAdaptiveQGSA_v31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31").set_name( - "LLAMAEnhancedAdaptiveQGSA_v31", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31").set_name("LLAMAEnhancedAdaptiveQGSA_v31", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v31 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v32 import EnhancedAdaptiveQGSA_v32 lama_register["EnhancedAdaptiveQGSA_v32"] = EnhancedAdaptiveQGSA_v32 - LLAMAEnhancedAdaptiveQGSA_v32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32").set_name( - "LLAMAEnhancedAdaptiveQGSA_v32", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32").set_name("LLAMAEnhancedAdaptiveQGSA_v32", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v32 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v33 import EnhancedAdaptiveQGSA_v33 lama_register["EnhancedAdaptiveQGSA_v33"] = EnhancedAdaptiveQGSA_v33 - LLAMAEnhancedAdaptiveQGSA_v33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33").set_name( - "LLAMAEnhancedAdaptiveQGSA_v33", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33").set_name("LLAMAEnhancedAdaptiveQGSA_v33", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v33 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v34 import EnhancedAdaptiveQGSA_v34 lama_register["EnhancedAdaptiveQGSA_v34"] = EnhancedAdaptiveQGSA_v34 - LLAMAEnhancedAdaptiveQGSA_v34 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34").set_name( - "LLAMAEnhancedAdaptiveQGSA_v34", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v34 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34").set_name("LLAMAEnhancedAdaptiveQGSA_v34", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v34 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v35 import EnhancedAdaptiveQGSA_v35 lama_register["EnhancedAdaptiveQGSA_v35"] = EnhancedAdaptiveQGSA_v35 - LLAMAEnhancedAdaptiveQGSA_v35 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35").set_name( - "LLAMAEnhancedAdaptiveQGSA_v35", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v35 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35").set_name("LLAMAEnhancedAdaptiveQGSA_v35", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v35 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v36 import EnhancedAdaptiveQGSA_v36 lama_register["EnhancedAdaptiveQGSA_v36"] = EnhancedAdaptiveQGSA_v36 - LLAMAEnhancedAdaptiveQGSA_v36 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36").set_name( - "LLAMAEnhancedAdaptiveQGSA_v36", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v36 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36").set_name("LLAMAEnhancedAdaptiveQGSA_v36", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v36 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v38 import EnhancedAdaptiveQGSA_v38 lama_register["EnhancedAdaptiveQGSA_v38"] = EnhancedAdaptiveQGSA_v38 - LLAMAEnhancedAdaptiveQGSA_v38 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38").set_name( - "LLAMAEnhancedAdaptiveQGSA_v38", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v38 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38").set_name("LLAMAEnhancedAdaptiveQGSA_v38", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v38 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v39 import EnhancedAdaptiveQGSA_v39 lama_register["EnhancedAdaptiveQGSA_v39"] = EnhancedAdaptiveQGSA_v39 - LLAMAEnhancedAdaptiveQGSA_v39 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39").set_name( - "LLAMAEnhancedAdaptiveQGSA_v39", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v39 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39").set_name("LLAMAEnhancedAdaptiveQGSA_v39", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v39 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v4 import EnhancedAdaptiveQGSA_v4 lama_register["EnhancedAdaptiveQGSA_v4"] = EnhancedAdaptiveQGSA_v4 - LLAMAEnhancedAdaptiveQGSA_v4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4").set_name( - "LLAMAEnhancedAdaptiveQGSA_v4", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4").set_name("LLAMAEnhancedAdaptiveQGSA_v4", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v40 import EnhancedAdaptiveQGSA_v40 lama_register["EnhancedAdaptiveQGSA_v40"] = EnhancedAdaptiveQGSA_v40 - LLAMAEnhancedAdaptiveQGSA_v40 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40").set_name( - "LLAMAEnhancedAdaptiveQGSA_v40", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v40 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40").set_name("LLAMAEnhancedAdaptiveQGSA_v40", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v40 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v41 import EnhancedAdaptiveQGSA_v41 lama_register["EnhancedAdaptiveQGSA_v41"] = EnhancedAdaptiveQGSA_v41 - LLAMAEnhancedAdaptiveQGSA_v41 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41").set_name( - "LLAMAEnhancedAdaptiveQGSA_v41", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v41 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41").set_name("LLAMAEnhancedAdaptiveQGSA_v41", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v41 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v42 import EnhancedAdaptiveQGSA_v42 lama_register["EnhancedAdaptiveQGSA_v42"] = EnhancedAdaptiveQGSA_v42 - LLAMAEnhancedAdaptiveQGSA_v42 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42").set_name( - "LLAMAEnhancedAdaptiveQGSA_v42", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v42 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42").set_name("LLAMAEnhancedAdaptiveQGSA_v42", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v42 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v43 import EnhancedAdaptiveQGSA_v43 lama_register["EnhancedAdaptiveQGSA_v43"] = EnhancedAdaptiveQGSA_v43 - LLAMAEnhancedAdaptiveQGSA_v43 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43").set_name( - "LLAMAEnhancedAdaptiveQGSA_v43", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v43 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43").set_name("LLAMAEnhancedAdaptiveQGSA_v43", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v43 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v44 import EnhancedAdaptiveQGSA_v44 lama_register["EnhancedAdaptiveQGSA_v44"] = EnhancedAdaptiveQGSA_v44 - LLAMAEnhancedAdaptiveQGSA_v44 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44").set_name( - "LLAMAEnhancedAdaptiveQGSA_v44", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v44 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44").set_name("LLAMAEnhancedAdaptiveQGSA_v44", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v44 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v47 import EnhancedAdaptiveQGSA_v47 lama_register["EnhancedAdaptiveQGSA_v47"] = EnhancedAdaptiveQGSA_v47 - LLAMAEnhancedAdaptiveQGSA_v47 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47").set_name( - "LLAMAEnhancedAdaptiveQGSA_v47", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v47 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47").set_name("LLAMAEnhancedAdaptiveQGSA_v47", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v47 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v5 import EnhancedAdaptiveQGSA_v5 lama_register["EnhancedAdaptiveQGSA_v5"] = EnhancedAdaptiveQGSA_v5 - LLAMAEnhancedAdaptiveQGSA_v5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5").set_name( - "LLAMAEnhancedAdaptiveQGSA_v5", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5").set_name("LLAMAEnhancedAdaptiveQGSA_v5", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v6 import EnhancedAdaptiveQGSA_v6 lama_register["EnhancedAdaptiveQGSA_v6"] = EnhancedAdaptiveQGSA_v6 - LLAMAEnhancedAdaptiveQGSA_v6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6").set_name( - "LLAMAEnhancedAdaptiveQGSA_v6", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6").set_name("LLAMAEnhancedAdaptiveQGSA_v6", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v8 import EnhancedAdaptiveQGSA_v8 lama_register["EnhancedAdaptiveQGSA_v8"] = EnhancedAdaptiveQGSA_v8 - LLAMAEnhancedAdaptiveQGSA_v8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8").set_name( - "LLAMAEnhancedAdaptiveQGSA_v8", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8").set_name("LLAMAEnhancedAdaptiveQGSA_v8", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v8 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v9 import EnhancedAdaptiveQGSA_v9 lama_register["EnhancedAdaptiveQGSA_v9"] = EnhancedAdaptiveQGSA_v9 - LLAMAEnhancedAdaptiveQGSA_v9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9").set_name( - "LLAMAEnhancedAdaptiveQGSA_v9", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQGSA_v9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9").set_name("LLAMAEnhancedAdaptiveQGSA_v9", register=True) except Exception as e: print("EnhancedAdaptiveQGSA_v9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDEWithDynamicElitistLearning import ( - EnhancedAdaptiveQuantumDEWithDynamicElitistLearning, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDEWithDynamicElitistLearning import EnhancedAdaptiveQuantumDEWithDynamicElitistLearning - lama_register["EnhancedAdaptiveQuantumDEWithDynamicElitistLearning"] = ( - EnhancedAdaptiveQuantumDEWithDynamicElitistLearning - ) - LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning" - ).set_name("LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning", register=True) + lama_register["EnhancedAdaptiveQuantumDEWithDynamicElitistLearning"] = EnhancedAdaptiveQuantumDEWithDynamicElitistLearning + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning").set_name("LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning", register=True) except Exception as e: print("EnhancedAdaptiveQuantumDEWithDynamicElitistLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolution import ( - EnhancedAdaptiveQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolution import EnhancedAdaptiveQuantumDifferentialEvolution - lama_register["EnhancedAdaptiveQuantumDifferentialEvolution"] = ( - EnhancedAdaptiveQuantumDifferentialEvolution - ) - LLAMAEnhancedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution" - ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolution", register=True) + lama_register["EnhancedAdaptiveQuantumDifferentialEvolution"] = EnhancedAdaptiveQuantumDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolution", register=True) except Exception as e: print("EnhancedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch import ( - EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch import EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch - lama_register["EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch"] = ( - EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch - ) - LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch" - ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch", register=True) + lama_register["EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch"] = EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch").set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch", register=True) except Exception as e: print("EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDynamicLevyOptimization import ( - EnhancedAdaptiveQuantumDynamicLevyOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDynamicLevyOptimization import EnhancedAdaptiveQuantumDynamicLevyOptimization - lama_register["EnhancedAdaptiveQuantumDynamicLevyOptimization"] = ( - EnhancedAdaptiveQuantumDynamicLevyOptimization - ) - LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization" - ).set_name("LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization", register=True) + lama_register["EnhancedAdaptiveQuantumDynamicLevyOptimization"] = EnhancedAdaptiveQuantumDynamicLevyOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization").set_name("LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization", register=True) except Exception as e: print("EnhancedAdaptiveQuantumDynamicLevyOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumGradientMemeticOptimizer import ( - EnhancedAdaptiveQuantumGradientMemeticOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumGradientMemeticOptimizer import EnhancedAdaptiveQuantumGradientMemeticOptimizer - lama_register["EnhancedAdaptiveQuantumGradientMemeticOptimizer"] = ( - EnhancedAdaptiveQuantumGradientMemeticOptimizer - ) - LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer" - ).set_name("LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer", register=True) + lama_register["EnhancedAdaptiveQuantumGradientMemeticOptimizer"] = EnhancedAdaptiveQuantumGradientMemeticOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer").set_name("LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveQuantumGradientMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGB import ( - EnhancedAdaptiveQuantumHarmonySearchDBGB, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGB import EnhancedAdaptiveQuantumHarmonySearchDBGB lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGB"] = EnhancedAdaptiveQuantumHarmonySearchDBGB - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchDBGB can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinal import ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinal, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinal import EnhancedAdaptiveQuantumHarmonySearchDBGBFinal - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinal"] = ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinal - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinal"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinal + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinal can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII import ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII import EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII"] = ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII import ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII import EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII"] = ( - EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBImproved import ( - EnhancedAdaptiveQuantumHarmonySearchDBGBImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBImproved import EnhancedAdaptiveQuantumHarmonySearchDBGBImproved - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBImproved"] = ( - EnhancedAdaptiveQuantumHarmonySearchDBGBImproved - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBImproved"] = EnhancedAdaptiveQuantumHarmonySearchDBGBImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchDBGBImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchFinal import ( - EnhancedAdaptiveQuantumHarmonySearchFinal, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchFinal import EnhancedAdaptiveQuantumHarmonySearchFinal lama_register["EnhancedAdaptiveQuantumHarmonySearchFinal"] = EnhancedAdaptiveQuantumHarmonySearchFinal - LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchFinal can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImproved import ( - EnhancedAdaptiveQuantumHarmonySearchImproved, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImproved import EnhancedAdaptiveQuantumHarmonySearchImproved - lama_register["EnhancedAdaptiveQuantumHarmonySearchImproved"] = ( - EnhancedAdaptiveQuantumHarmonySearchImproved - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchImproved"] = EnhancedAdaptiveQuantumHarmonySearchImproved + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImprovedRefined import ( - EnhancedAdaptiveQuantumHarmonySearchImprovedRefined, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImprovedRefined import EnhancedAdaptiveQuantumHarmonySearchImprovedRefined - lama_register["EnhancedAdaptiveQuantumHarmonySearchImprovedRefined"] = ( - EnhancedAdaptiveQuantumHarmonySearchImprovedRefined - ) - LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined" - ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined", register=True) + lama_register["EnhancedAdaptiveQuantumHarmonySearchImprovedRefined"] = EnhancedAdaptiveQuantumHarmonySearchImprovedRefined + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined", register=True) except Exception as e: print("EnhancedAdaptiveQuantumHarmonySearchImprovedRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevyMemeticOptimizer import ( - EnhancedAdaptiveQuantumLevyMemeticOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevyMemeticOptimizer import EnhancedAdaptiveQuantumLevyMemeticOptimizer lama_register["EnhancedAdaptiveQuantumLevyMemeticOptimizer"] = EnhancedAdaptiveQuantumLevyMemeticOptimizer - LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer" - ).set_name("LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer").set_name("LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer", register=True) except Exception as e: print("EnhancedAdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevySwarmOptimization import ( - EnhancedAdaptiveQuantumLevySwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevySwarmOptimization import EnhancedAdaptiveQuantumLevySwarmOptimization - lama_register["EnhancedAdaptiveQuantumLevySwarmOptimization"] = ( - EnhancedAdaptiveQuantumLevySwarmOptimization - ) - LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization", register=True) + lama_register["EnhancedAdaptiveQuantumLevySwarmOptimization"] = EnhancedAdaptiveQuantumLevySwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveQuantumLevySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLocalSearch import ( - EnhancedAdaptiveQuantumLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLocalSearch import EnhancedAdaptiveQuantumLocalSearch lama_register["EnhancedAdaptiveQuantumLocalSearch"] = EnhancedAdaptiveQuantumLocalSearch - LLAMAEnhancedAdaptiveQuantumLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumLocalSearch" - ).set_name("LLAMAEnhancedAdaptiveQuantumLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLocalSearch").set_name("LLAMAEnhancedAdaptiveQuantumLocalSearch", register=True) except Exception as e: print("EnhancedAdaptiveQuantumLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumMemeticOptimizerV4 import ( - EnhancedAdaptiveQuantumMemeticOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumMemeticOptimizerV4 import EnhancedAdaptiveQuantumMemeticOptimizerV4 lama_register["EnhancedAdaptiveQuantumMemeticOptimizerV4"] = EnhancedAdaptiveQuantumMemeticOptimizerV4 - LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4" - ).set_name("LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4").set_name("LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4", register=True) except Exception as e: print("EnhancedAdaptiveQuantumMemeticOptimizerV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSO import EnhancedAdaptiveQuantumPSO lama_register["EnhancedAdaptiveQuantumPSO"] = EnhancedAdaptiveQuantumPSO - LLAMAEnhancedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO").set_name( - "LLAMAEnhancedAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO").set_name("LLAMAEnhancedAdaptiveQuantumPSO", register=True) except Exception as e: print("EnhancedAdaptiveQuantumPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSOv2 import EnhancedAdaptiveQuantumPSOv2 lama_register["EnhancedAdaptiveQuantumPSOv2"] = EnhancedAdaptiveQuantumPSOv2 - LLAMAEnhancedAdaptiveQuantumPSOv2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumPSOv2" - ).set_name("LLAMAEnhancedAdaptiveQuantumPSOv2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSOv2").set_name("LLAMAEnhancedAdaptiveQuantumPSOv2", register=True) except Exception as e: print("EnhancedAdaptiveQuantumPSOv2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumParticleSwarmOptimization import ( - EnhancedAdaptiveQuantumParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumParticleSwarmOptimization import EnhancedAdaptiveQuantumParticleSwarmOptimization - lama_register["EnhancedAdaptiveQuantumParticleSwarmOptimization"] = ( - EnhancedAdaptiveQuantumParticleSwarmOptimization - ) - LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization", register=True) + lama_register["EnhancedAdaptiveQuantumParticleSwarmOptimization"] = EnhancedAdaptiveQuantumParticleSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealing import ( - EnhancedAdaptiveQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealing import EnhancedAdaptiveQuantumSimulatedAnnealing lama_register["EnhancedAdaptiveQuantumSimulatedAnnealing"] = EnhancedAdaptiveQuantumSimulatedAnnealing - LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing" - ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealingOptimized import ( - EnhancedAdaptiveQuantumSimulatedAnnealingOptimized, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealingOptimized import EnhancedAdaptiveQuantumSimulatedAnnealingOptimized - lama_register["EnhancedAdaptiveQuantumSimulatedAnnealingOptimized"] = ( - EnhancedAdaptiveQuantumSimulatedAnnealingOptimized - ) - LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized" - ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized", register=True) + lama_register["EnhancedAdaptiveQuantumSimulatedAnnealingOptimized"] = EnhancedAdaptiveQuantumSimulatedAnnealingOptimized + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized").set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSimulatedAnnealingOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimization import ( - EnhancedAdaptiveQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimization import EnhancedAdaptiveQuantumSwarmOptimization lama_register["EnhancedAdaptiveQuantumSwarmOptimization"] = EnhancedAdaptiveQuantumSwarmOptimization - LLAMAEnhancedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV10 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV10 import EnhancedAdaptiveQuantumSwarmOptimizationV10 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV10"] = EnhancedAdaptiveQuantumSwarmOptimizationV10 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV11 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV11, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV11 import EnhancedAdaptiveQuantumSwarmOptimizationV11 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV11"] = EnhancedAdaptiveQuantumSwarmOptimizationV11 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV12 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV12 import EnhancedAdaptiveQuantumSwarmOptimizationV12 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV12"] = EnhancedAdaptiveQuantumSwarmOptimizationV12 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV13 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV13 import EnhancedAdaptiveQuantumSwarmOptimizationV13 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV13"] = EnhancedAdaptiveQuantumSwarmOptimizationV13 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV14 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV14, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV14 import EnhancedAdaptiveQuantumSwarmOptimizationV14 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV14"] = EnhancedAdaptiveQuantumSwarmOptimizationV14 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV15 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV15, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV15 import EnhancedAdaptiveQuantumSwarmOptimizationV15 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV15"] = EnhancedAdaptiveQuantumSwarmOptimizationV15 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV16 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV16, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV16 import EnhancedAdaptiveQuantumSwarmOptimizationV16 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV16"] = EnhancedAdaptiveQuantumSwarmOptimizationV16 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV17 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV17, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV17 import EnhancedAdaptiveQuantumSwarmOptimizationV17 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV17"] = EnhancedAdaptiveQuantumSwarmOptimizationV17 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV18 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV18 import EnhancedAdaptiveQuantumSwarmOptimizationV18 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV18"] = EnhancedAdaptiveQuantumSwarmOptimizationV18 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV19 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV19 import EnhancedAdaptiveQuantumSwarmOptimizationV19 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV19"] = EnhancedAdaptiveQuantumSwarmOptimizationV19 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV2 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV2 import EnhancedAdaptiveQuantumSwarmOptimizationV2 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV2"] = EnhancedAdaptiveQuantumSwarmOptimizationV2 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV20 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV20, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV20 import EnhancedAdaptiveQuantumSwarmOptimizationV20 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV20"] = EnhancedAdaptiveQuantumSwarmOptimizationV20 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV21 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV21 import EnhancedAdaptiveQuantumSwarmOptimizationV21 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV21"] = EnhancedAdaptiveQuantumSwarmOptimizationV21 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV22 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV22 import EnhancedAdaptiveQuantumSwarmOptimizationV22 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV22"] = EnhancedAdaptiveQuantumSwarmOptimizationV22 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV23 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV23 import EnhancedAdaptiveQuantumSwarmOptimizationV23 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV23"] = EnhancedAdaptiveQuantumSwarmOptimizationV23 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV24 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV24 import EnhancedAdaptiveQuantumSwarmOptimizationV24 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV24"] = EnhancedAdaptiveQuantumSwarmOptimizationV24 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV25 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV25 import EnhancedAdaptiveQuantumSwarmOptimizationV25 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV25"] = EnhancedAdaptiveQuantumSwarmOptimizationV25 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV26 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV26 import EnhancedAdaptiveQuantumSwarmOptimizationV26 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV26"] = EnhancedAdaptiveQuantumSwarmOptimizationV26 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV27 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV27, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV27 import EnhancedAdaptiveQuantumSwarmOptimizationV27 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV27"] = EnhancedAdaptiveQuantumSwarmOptimizationV27 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV28 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV28, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV28 import EnhancedAdaptiveQuantumSwarmOptimizationV28 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV28"] = EnhancedAdaptiveQuantumSwarmOptimizationV28 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV29 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV29, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV29 import EnhancedAdaptiveQuantumSwarmOptimizationV29 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV29"] = EnhancedAdaptiveQuantumSwarmOptimizationV29 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV3 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV3 import EnhancedAdaptiveQuantumSwarmOptimizationV3 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV3"] = EnhancedAdaptiveQuantumSwarmOptimizationV3 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV30 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV30, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV30 import EnhancedAdaptiveQuantumSwarmOptimizationV30 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV30"] = EnhancedAdaptiveQuantumSwarmOptimizationV30 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV31 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV31, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV31 import EnhancedAdaptiveQuantumSwarmOptimizationV31 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV31"] = EnhancedAdaptiveQuantumSwarmOptimizationV31 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV4 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV4 import EnhancedAdaptiveQuantumSwarmOptimizationV4 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV4"] = EnhancedAdaptiveQuantumSwarmOptimizationV4 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV5 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV5 import EnhancedAdaptiveQuantumSwarmOptimizationV5 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV5"] = EnhancedAdaptiveQuantumSwarmOptimizationV5 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV6 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV6 import EnhancedAdaptiveQuantumSwarmOptimizationV6 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV6"] = EnhancedAdaptiveQuantumSwarmOptimizationV6 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV7 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV7 import EnhancedAdaptiveQuantumSwarmOptimizationV7 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV7"] = EnhancedAdaptiveQuantumSwarmOptimizationV7 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV8 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV8 import EnhancedAdaptiveQuantumSwarmOptimizationV8 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV8"] = EnhancedAdaptiveQuantumSwarmOptimizationV8 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV9 import ( - EnhancedAdaptiveQuantumSwarmOptimizationV9, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV9 import EnhancedAdaptiveQuantumSwarmOptimizationV9 lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV9"] = EnhancedAdaptiveQuantumSwarmOptimizationV9 - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9" - ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9", register=True) except Exception as e: print("EnhancedAdaptiveQuantumSwarmOptimizationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSinusoidalDifferentialSwarm import ( - EnhancedAdaptiveSinusoidalDifferentialSwarm, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSinusoidalDifferentialSwarm import EnhancedAdaptiveSinusoidalDifferentialSwarm lama_register["EnhancedAdaptiveSinusoidalDifferentialSwarm"] = EnhancedAdaptiveSinusoidalDifferentialSwarm - LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm" - ).set_name("LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm", register=True) except Exception as e: print("EnhancedAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 import ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26"] = ( - EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 - ) - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26" - ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26", register=True) + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26", register=True) except Exception as e: print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveSwarmHarmonicOptimization import ( - EnhancedAdaptiveSwarmHarmonicOptimization, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveSwarmHarmonicOptimization import EnhancedAdaptiveSwarmHarmonicOptimization lama_register["EnhancedAdaptiveSwarmHarmonicOptimization"] = EnhancedAdaptiveSwarmHarmonicOptimization - LLAMAEnhancedAdaptiveSwarmHarmonicOptimization = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization" - ).set_name("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveSwarmHarmonicOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization").set_name("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization", register=True) except Exception as e: print("EnhancedAdaptiveSwarmHarmonicOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearch import ( - EnhancedAdaptiveTabuHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearch import EnhancedAdaptiveTabuHarmonySearch lama_register["EnhancedAdaptiveTabuHarmonySearch"] = EnhancedAdaptiveTabuHarmonySearch - LLAMAEnhancedAdaptiveTabuHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveTabuHarmonySearch" - ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveTabuHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearch").set_name("LLAMAEnhancedAdaptiveTabuHarmonySearch", register=True) except Exception as e: print("EnhancedAdaptiveTabuHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearchV2 import ( - EnhancedAdaptiveTabuHarmonySearchV2, - ) + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearchV2 import EnhancedAdaptiveTabuHarmonySearchV2 lama_register["EnhancedAdaptiveTabuHarmonySearchV2"] = EnhancedAdaptiveTabuHarmonySearchV2 - LLAMAEnhancedAdaptiveTabuHarmonySearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2" - ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdaptiveTabuHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveTabuHarmonySearchV2", register=True) except Exception as e: print("EnhancedAdaptiveTabuHarmonySearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedAdaptiveFireworkAlgorithm import ( - EnhancedAdvancedAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedAdvancedAdaptiveFireworkAlgorithm import EnhancedAdvancedAdaptiveFireworkAlgorithm lama_register["EnhancedAdvancedAdaptiveFireworkAlgorithm"] = EnhancedAdvancedAdaptiveFireworkAlgorithm - LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm" - ).set_name("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("EnhancedAdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 import ( - EnhancedAdvancedDifferentialEvolutionLocalSearch_v56, - ) + from nevergrad.optimization.lama.EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 import EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 - lama_register["EnhancedAdvancedDifferentialEvolutionLocalSearch_v56"] = ( - EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 - ) - LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56" - ).set_name("LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56", register=True) + lama_register["EnhancedAdvancedDifferentialEvolutionLocalSearch_v56"] = EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56").set_name("LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56", register=True) except Exception as e: print("EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridDifferentialEvolutionV4 import ( - EnhancedAdvancedHybridDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.EnhancedAdvancedHybridDifferentialEvolutionV4 import EnhancedAdvancedHybridDifferentialEvolutionV4 - lama_register["EnhancedAdvancedHybridDifferentialEvolutionV4"] = ( - EnhancedAdvancedHybridDifferentialEvolutionV4 - ) - LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4" - ).set_name("LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4", register=True) + lama_register["EnhancedAdvancedHybridDifferentialEvolutionV4"] = EnhancedAdvancedHybridDifferentialEvolutionV4 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4").set_name("LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4", register=True) except Exception as e: print("EnhancedAdvancedHybridDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV17 import ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV17, - ) + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV17 import EnhancedAdvancedHybridMetaHeuristicOptimizerV17 - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV17"] = ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV17 - ) - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17" - ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17", register=True) + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV17"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV17 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17", register=True) except Exception as e: print("EnhancedAdvancedHybridMetaHeuristicOptimizerV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV18 import ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV18, - ) + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV18 import EnhancedAdvancedHybridMetaHeuristicOptimizerV18 - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV18"] = ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV18 - ) - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18" - ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18", register=True) + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV18"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV18 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18", register=True) except Exception as e: print("EnhancedAdvancedHybridMetaHeuristicOptimizerV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV19 import ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV19, - ) + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV19 import EnhancedAdvancedHybridMetaHeuristicOptimizerV19 - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV19"] = ( - EnhancedAdvancedHybridMetaHeuristicOptimizerV19 - ) - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19" - ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19", register=True) + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV19"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV19 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19", register=True) except Exception as e: print("EnhancedAdvancedHybridMetaHeuristicOptimizerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer import ( - EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer import EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer - lama_register["EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer"] = ( - EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer - ) - LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer" - ).set_name("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer", register=True) + lama_register["EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer"] = EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer").set_name("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer", register=True) except Exception as e: print("EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV1 import ( - EnhancedAdvancedQuantumSwarmOptimizationV1, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV1 import EnhancedAdvancedQuantumSwarmOptimizationV1 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV1"] = EnhancedAdvancedQuantumSwarmOptimizationV1 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV10 import ( - EnhancedAdvancedQuantumSwarmOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV10 import EnhancedAdvancedQuantumSwarmOptimizationV10 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV10"] = EnhancedAdvancedQuantumSwarmOptimizationV10 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV11 import ( - EnhancedAdvancedQuantumSwarmOptimizationV11, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV11 import EnhancedAdvancedQuantumSwarmOptimizationV11 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV11"] = EnhancedAdvancedQuantumSwarmOptimizationV11 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV12 import ( - EnhancedAdvancedQuantumSwarmOptimizationV12, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV12 import EnhancedAdvancedQuantumSwarmOptimizationV12 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV12"] = EnhancedAdvancedQuantumSwarmOptimizationV12 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV13 import ( - EnhancedAdvancedQuantumSwarmOptimizationV13, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV13 import EnhancedAdvancedQuantumSwarmOptimizationV13 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV13"] = EnhancedAdvancedQuantumSwarmOptimizationV13 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV14 import ( - EnhancedAdvancedQuantumSwarmOptimizationV14, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV14 import EnhancedAdvancedQuantumSwarmOptimizationV14 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV14"] = EnhancedAdvancedQuantumSwarmOptimizationV14 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV2 import ( - EnhancedAdvancedQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV2 import EnhancedAdvancedQuantumSwarmOptimizationV2 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV2"] = EnhancedAdvancedQuantumSwarmOptimizationV2 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV3 import ( - EnhancedAdvancedQuantumSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV3 import EnhancedAdvancedQuantumSwarmOptimizationV3 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV3"] = EnhancedAdvancedQuantumSwarmOptimizationV3 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV4 import ( - EnhancedAdvancedQuantumSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV4 import EnhancedAdvancedQuantumSwarmOptimizationV4 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV4"] = EnhancedAdvancedQuantumSwarmOptimizationV4 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV5 import ( - EnhancedAdvancedQuantumSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV5 import EnhancedAdvancedQuantumSwarmOptimizationV5 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV5"] = EnhancedAdvancedQuantumSwarmOptimizationV5 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV6 import ( - EnhancedAdvancedQuantumSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV6 import EnhancedAdvancedQuantumSwarmOptimizationV6 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV6"] = EnhancedAdvancedQuantumSwarmOptimizationV6 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV7 import ( - EnhancedAdvancedQuantumSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV7 import EnhancedAdvancedQuantumSwarmOptimizationV7 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV7"] = EnhancedAdvancedQuantumSwarmOptimizationV7 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV8 import ( - EnhancedAdvancedQuantumSwarmOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV8 import EnhancedAdvancedQuantumSwarmOptimizationV8 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV8"] = EnhancedAdvancedQuantumSwarmOptimizationV8 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV9 import ( - EnhancedAdvancedQuantumSwarmOptimizationV9, - ) + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV9 import EnhancedAdvancedQuantumSwarmOptimizationV9 lama_register["EnhancedAdvancedQuantumSwarmOptimizationV9"] = EnhancedAdvancedQuantumSwarmOptimizationV9 - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9" - ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9", register=True) except Exception as e: print("EnhancedAdvancedQuantumSwarmOptimizationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 import ( - EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78, - ) + from nevergrad.optimization.lama.EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 import EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 - lama_register["EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78"] = ( - EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 - ) - LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78" - ).set_name("LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78", register=True) + lama_register["EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78"] = EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78").set_name("LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78", register=True) except Exception as e: print("EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedAdvancedUltimateGuidedMassQGSA_v79 import ( - EnhancedAdvancedUltimateGuidedMassQGSA_v79, - ) + from nevergrad.optimization.lama.EnhancedAdvancedUltimateGuidedMassQGSA_v79 import EnhancedAdvancedUltimateGuidedMassQGSA_v79 lama_register["EnhancedAdvancedUltimateGuidedMassQGSA_v79"] = EnhancedAdvancedUltimateGuidedMassQGSA_v79 - LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79 = NonObjectOptimizer( - method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79" - ).set_name("LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79").set_name("LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79", register=True) except Exception as e: print("EnhancedAdvancedUltimateGuidedMassQGSA_v79 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedArchiveDE import EnhancedArchiveDE lama_register["EnhancedArchiveDE"] = EnhancedArchiveDE - LLAMAEnhancedArchiveDE = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE").set_name( - "LLAMAEnhancedArchiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedArchiveDE = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE").set_name("LLAMAEnhancedArchiveDE", register=True) except Exception as e: print("EnhancedArchiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedBalancedDualStrategyAdaptiveDE import ( - EnhancedBalancedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.EnhancedBalancedDualStrategyAdaptiveDE import EnhancedBalancedDualStrategyAdaptiveDE lama_register["EnhancedBalancedDualStrategyAdaptiveDE"] = EnhancedBalancedDualStrategyAdaptiveDE - LLAMAEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE" - ).set_name("LLAMAEnhancedBalancedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE").set_name("LLAMAEnhancedBalancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("EnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedCMAES import EnhancedCMAES lama_register["EnhancedCMAES"] = EnhancedCMAES - LLAMAEnhancedCMAES = NonObjectOptimizer(method="LLAMAEnhancedCMAES").set_name( - "LLAMAEnhancedCMAES", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedCMAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCMAES = NonObjectOptimizer(method="LLAMAEnhancedCMAES").set_name("LLAMAEnhancedCMAES", register=True) except Exception as e: print("EnhancedCMAES can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedCMAESv2 import EnhancedCMAESv2 lama_register["EnhancedCMAESv2"] = EnhancedCMAESv2 - LLAMAEnhancedCMAESv2 = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2").set_name( - "LLAMAEnhancedCMAESv2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCMAESv2 = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2").set_name("LLAMAEnhancedCMAESv2", register=True) except Exception as e: print("EnhancedCMAESv2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedChaoticFireworksOptimization import ( - EnhancedChaoticFireworksOptimization, - ) + from nevergrad.optimization.lama.EnhancedChaoticFireworksOptimization import EnhancedChaoticFireworksOptimization lama_register["EnhancedChaoticFireworksOptimization"] = EnhancedChaoticFireworksOptimization - LLAMAEnhancedChaoticFireworksOptimization = NonObjectOptimizer( - method="LLAMAEnhancedChaoticFireworksOptimization" - ).set_name("LLAMAEnhancedChaoticFireworksOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedChaoticFireworksOptimization = NonObjectOptimizer(method="LLAMAEnhancedChaoticFireworksOptimization").set_name("LLAMAEnhancedChaoticFireworksOptimization", register=True) except Exception as e: print("EnhancedChaoticFireworksOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedClusterDifferentialCrossover import ( - EnhancedClusterDifferentialCrossover, - ) + from nevergrad.optimization.lama.EnhancedClusterDifferentialCrossover import EnhancedClusterDifferentialCrossover lama_register["EnhancedClusterDifferentialCrossover"] = EnhancedClusterDifferentialCrossover - LLAMAEnhancedClusterDifferentialCrossover = NonObjectOptimizer( - method="LLAMAEnhancedClusterDifferentialCrossover" - ).set_name("LLAMAEnhancedClusterDifferentialCrossover", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedClusterDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedClusterDifferentialCrossover = NonObjectOptimizer(method="LLAMAEnhancedClusterDifferentialCrossover").set_name("LLAMAEnhancedClusterDifferentialCrossover", register=True) except Exception as e: print("EnhancedClusterDifferentialCrossover can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedClusteredDifferentialEvolution import ( - EnhancedClusteredDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedClusteredDifferentialEvolution import EnhancedClusteredDifferentialEvolution lama_register["EnhancedClusteredDifferentialEvolution"] = EnhancedClusteredDifferentialEvolution - LLAMAEnhancedClusteredDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedClusteredDifferentialEvolution" - ).set_name("LLAMAEnhancedClusteredDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedClusteredDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedClusteredDifferentialEvolution").set_name("LLAMAEnhancedClusteredDifferentialEvolution", register=True) except Exception as e: print("EnhancedClusteredDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedConvergenceAcceleratedSpiralSearch import ( - EnhancedConvergenceAcceleratedSpiralSearch, - ) + from nevergrad.optimization.lama.EnhancedConvergenceAcceleratedSpiralSearch import EnhancedConvergenceAcceleratedSpiralSearch lama_register["EnhancedConvergenceAcceleratedSpiralSearch"] = EnhancedConvergenceAcceleratedSpiralSearch - LLAMAEnhancedConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( - method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch" - ).set_name("LLAMAEnhancedConvergenceAcceleratedSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedConvergenceAcceleratedSpiralSearch = NonObjectOptimizer(method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch").set_name("LLAMAEnhancedConvergenceAcceleratedSpiralSearch", register=True) except Exception as e: print("EnhancedConvergenceAcceleratedSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolution import ( - EnhancedConvergentDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolution import EnhancedConvergentDifferentialEvolution lama_register["EnhancedConvergentDifferentialEvolution"] = EnhancedConvergentDifferentialEvolution - LLAMAEnhancedConvergentDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedConvergentDifferentialEvolution" - ).set_name("LLAMAEnhancedConvergentDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedConvergentDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolution").set_name("LLAMAEnhancedConvergentDifferentialEvolution", register=True) except Exception as e: print("EnhancedConvergentDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV2 import ( - EnhancedConvergentDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV2 import EnhancedConvergentDifferentialEvolutionV2 lama_register["EnhancedConvergentDifferentialEvolutionV2"] = EnhancedConvergentDifferentialEvolutionV2 - LLAMAEnhancedConvergentDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedConvergentDifferentialEvolutionV2" - ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedConvergentDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV2").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV2", register=True) except Exception as e: print("EnhancedConvergentDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV3 import ( - EnhancedConvergentDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV3 import EnhancedConvergentDifferentialEvolutionV3 lama_register["EnhancedConvergentDifferentialEvolutionV3"] = EnhancedConvergentDifferentialEvolutionV3 - LLAMAEnhancedConvergentDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAEnhancedConvergentDifferentialEvolutionV3" - ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedConvergentDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV3").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV3", register=True) except Exception as e: print("EnhancedConvergentDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV4 import ( - EnhancedConvergentDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV4 import EnhancedConvergentDifferentialEvolutionV4 lama_register["EnhancedConvergentDifferentialEvolutionV4"] = EnhancedConvergentDifferentialEvolutionV4 - LLAMAEnhancedConvergentDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAEnhancedConvergentDifferentialEvolutionV4" - ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedConvergentDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV4").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV4", register=True) except Exception as e: print("EnhancedConvergentDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCooperativeCulturalDifferentialSearch import ( - EnhancedCooperativeCulturalDifferentialSearch, - ) + from nevergrad.optimization.lama.EnhancedCooperativeCulturalDifferentialSearch import EnhancedCooperativeCulturalDifferentialSearch - lama_register["EnhancedCooperativeCulturalDifferentialSearch"] = ( - EnhancedCooperativeCulturalDifferentialSearch - ) - LLAMAEnhancedCooperativeCulturalDifferentialSearch = NonObjectOptimizer( - method="LLAMAEnhancedCooperativeCulturalDifferentialSearch" - ).set_name("LLAMAEnhancedCooperativeCulturalDifferentialSearch", register=True) + lama_register["EnhancedCooperativeCulturalDifferentialSearch"] = EnhancedCooperativeCulturalDifferentialSearch + res = NonObjectOptimizer(method="LLAMAEnhancedCooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCooperativeCulturalDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedCooperativeCulturalDifferentialSearch").set_name("LLAMAEnhancedCooperativeCulturalDifferentialSearch", register=True) except Exception as e: print("EnhancedCooperativeCulturalDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarm import ( - EnhancedCosineAdaptiveDifferentialSwarm, - ) + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarm import EnhancedCosineAdaptiveDifferentialSwarm lama_register["EnhancedCosineAdaptiveDifferentialSwarm"] = EnhancedCosineAdaptiveDifferentialSwarm - LLAMAEnhancedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( - method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm" - ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm").set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarm", register=True) except Exception as e: print("EnhancedCosineAdaptiveDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarmV2 import ( - EnhancedCosineAdaptiveDifferentialSwarmV2, - ) + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarmV2 import EnhancedCosineAdaptiveDifferentialSwarmV2 lama_register["EnhancedCosineAdaptiveDifferentialSwarmV2"] = EnhancedCosineAdaptiveDifferentialSwarmV2 - LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2 = NonObjectOptimizer( - method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2" - ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2 = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2").set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2", register=True) except Exception as e: print("EnhancedCosineAdaptiveDifferentialSwarmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCovarianceGradientSearchV2 import ( - EnhancedCovarianceGradientSearchV2, - ) + from nevergrad.optimization.lama.EnhancedCovarianceGradientSearchV2 import EnhancedCovarianceGradientSearchV2 lama_register["EnhancedCovarianceGradientSearchV2"] = EnhancedCovarianceGradientSearchV2 - LLAMAEnhancedCovarianceGradientSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedCovarianceGradientSearchV2" - ).set_name("LLAMAEnhancedCovarianceGradientSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceGradientSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCovarianceGradientSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedCovarianceGradientSearchV2").set_name("LLAMAEnhancedCovarianceGradientSearchV2", register=True) except Exception as e: print("EnhancedCovarianceGradientSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixAdaptation import ( - EnhancedCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.EnhancedCovarianceMatrixAdaptation import EnhancedCovarianceMatrixAdaptation lama_register["EnhancedCovarianceMatrixAdaptation"] = EnhancedCovarianceMatrixAdaptation - LLAMAEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAEnhancedCovarianceMatrixAdaptation" - ).set_name("LLAMAEnhancedCovarianceMatrixAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixAdaptation").set_name("LLAMAEnhancedCovarianceMatrixAdaptation", register=True) except Exception as e: print("EnhancedCovarianceMatrixAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolution import ( - EnhancedCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolution import EnhancedCovarianceMatrixEvolution lama_register["EnhancedCovarianceMatrixEvolution"] = EnhancedCovarianceMatrixEvolution - LLAMAEnhancedCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMAEnhancedCovarianceMatrixEvolution" - ).set_name("LLAMAEnhancedCovarianceMatrixEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolution").set_name("LLAMAEnhancedCovarianceMatrixEvolution", register=True) except Exception as e: print("EnhancedCovarianceMatrixEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolutionV2 import ( - EnhancedCovarianceMatrixEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolutionV2 import EnhancedCovarianceMatrixEvolutionV2 lama_register["EnhancedCovarianceMatrixEvolutionV2"] = EnhancedCovarianceMatrixEvolutionV2 - LLAMAEnhancedCovarianceMatrixEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedCovarianceMatrixEvolutionV2" - ).set_name("LLAMAEnhancedCovarianceMatrixEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCovarianceMatrixEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolutionV2").set_name("LLAMAEnhancedCovarianceMatrixEvolutionV2", register=True) except Exception as e: print("EnhancedCovarianceMatrixEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCrossoverElitistStrategyV9 import ( - EnhancedCrossoverElitistStrategyV9, - ) + from nevergrad.optimization.lama.EnhancedCrossoverElitistStrategyV9 import EnhancedCrossoverElitistStrategyV9 lama_register["EnhancedCrossoverElitistStrategyV9"] = EnhancedCrossoverElitistStrategyV9 - LLAMAEnhancedCrossoverElitistStrategyV9 = NonObjectOptimizer( - method="LLAMAEnhancedCrossoverElitistStrategyV9" - ).set_name("LLAMAEnhancedCrossoverElitistStrategyV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCrossoverElitistStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCrossoverElitistStrategyV9 = NonObjectOptimizer(method="LLAMAEnhancedCrossoverElitistStrategyV9").set_name("LLAMAEnhancedCrossoverElitistStrategyV9", register=True) except Exception as e: print("EnhancedCrossoverElitistStrategyV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCrowdingMemoryHybridOptimizer import ( - EnhancedCrowdingMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedCrowdingMemoryHybridOptimizer import EnhancedCrowdingMemoryHybridOptimizer lama_register["EnhancedCrowdingMemoryHybridOptimizer"] = EnhancedCrowdingMemoryHybridOptimizer - LLAMAEnhancedCrowdingMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedCrowdingMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedCrowdingMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCrowdingMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCrowdingMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedCrowdingMemoryHybridOptimizer").set_name("LLAMAEnhancedCrowdingMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedCrowdingMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCulturalAdaptiveDifferentialEvolution import ( - EnhancedCulturalAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedCulturalAdaptiveDifferentialEvolution import EnhancedCulturalAdaptiveDifferentialEvolution - lama_register["EnhancedCulturalAdaptiveDifferentialEvolution"] = ( - EnhancedCulturalAdaptiveDifferentialEvolution - ) - LLAMAEnhancedCulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedCulturalAdaptiveDifferentialEvolution", register=True) + lama_register["EnhancedCulturalAdaptiveDifferentialEvolution"] = EnhancedCulturalAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCulturalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedCulturalAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedCulturalAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCulturalEvolutionaryOptimizer import ( - EnhancedCulturalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.EnhancedCulturalEvolutionaryOptimizer import EnhancedCulturalEvolutionaryOptimizer lama_register["EnhancedCulturalEvolutionaryOptimizer"] = EnhancedCulturalEvolutionaryOptimizer - LLAMAEnhancedCulturalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedCulturalEvolutionaryOptimizer" - ).set_name("LLAMAEnhancedCulturalEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCulturalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEnhancedCulturalEvolutionaryOptimizer").set_name("LLAMAEnhancedCulturalEvolutionaryOptimizer", register=True) except Exception as e: print("EnhancedCulturalEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedCulturalMemeticDifferentialEvolution import ( - EnhancedCulturalMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedCulturalMemeticDifferentialEvolution import EnhancedCulturalMemeticDifferentialEvolution - lama_register["EnhancedCulturalMemeticDifferentialEvolution"] = ( - EnhancedCulturalMemeticDifferentialEvolution - ) - LLAMAEnhancedCulturalMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedCulturalMemeticDifferentialEvolution" - ).set_name("LLAMAEnhancedCulturalMemeticDifferentialEvolution", register=True) + lama_register["EnhancedCulturalMemeticDifferentialEvolution"] = EnhancedCulturalMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedCulturalMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedCulturalMemeticDifferentialEvolution").set_name("LLAMAEnhancedCulturalMemeticDifferentialEvolution", register=True) except Exception as e: print("EnhancedCulturalMemeticDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDifferentialEvolution import EnhancedDifferentialEvolution lama_register["EnhancedDifferentialEvolution"] = EnhancedDifferentialEvolution - LLAMAEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolution" - ).set_name("LLAMAEnhancedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolution").set_name("LLAMAEnhancedDifferentialEvolution", register=True) except Exception as e: print("EnhancedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptivePSO import ( - EnhancedDifferentialEvolutionAdaptivePSO, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptivePSO import EnhancedDifferentialEvolutionAdaptivePSO lama_register["EnhancedDifferentialEvolutionAdaptivePSO"] = EnhancedDifferentialEvolutionAdaptivePSO - LLAMAEnhancedDifferentialEvolutionAdaptivePSO = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO" - ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO").set_name("LLAMAEnhancedDifferentialEvolutionAdaptivePSO", register=True) except Exception as e: print("EnhancedDifferentialEvolutionAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptiveStrategy import ( - EnhancedDifferentialEvolutionAdaptiveStrategy, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptiveStrategy import EnhancedDifferentialEvolutionAdaptiveStrategy - lama_register["EnhancedDifferentialEvolutionAdaptiveStrategy"] = ( - EnhancedDifferentialEvolutionAdaptiveStrategy - ) - LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy" - ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy", register=True) + lama_register["EnhancedDifferentialEvolutionAdaptiveStrategy"] = EnhancedDifferentialEvolutionAdaptiveStrategy + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy").set_name("LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy", register=True) except Exception as e: print("EnhancedDifferentialEvolutionAdaptiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionFireworkAlgorithm import ( - EnhancedDifferentialEvolutionFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionFireworkAlgorithm import EnhancedDifferentialEvolutionFireworkAlgorithm - lama_register["EnhancedDifferentialEvolutionFireworkAlgorithm"] = ( - EnhancedDifferentialEvolutionFireworkAlgorithm - ) - LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm" - ).set_name("LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) + lama_register["EnhancedDifferentialEvolutionFireworkAlgorithm"] = EnhancedDifferentialEvolutionFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm").set_name("LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v15 import ( - EnhancedDifferentialEvolutionLSRefinement_v15, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v15 import EnhancedDifferentialEvolutionLSRefinement_v15 - lama_register["EnhancedDifferentialEvolutionLSRefinement_v15"] = ( - EnhancedDifferentialEvolutionLSRefinement_v15 - ) - LLAMAEnhancedDifferentialEvolutionLSRefinement_v15 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15" - ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v15", register=True) + lama_register["EnhancedDifferentialEvolutionLSRefinement_v15"] = EnhancedDifferentialEvolutionLSRefinement_v15 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v15 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v15", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLSRefinement_v15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v16 import ( - EnhancedDifferentialEvolutionLSRefinement_v16, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v16 import EnhancedDifferentialEvolutionLSRefinement_v16 - lama_register["EnhancedDifferentialEvolutionLSRefinement_v16"] = ( - EnhancedDifferentialEvolutionLSRefinement_v16 - ) - LLAMAEnhancedDifferentialEvolutionLSRefinement_v16 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16" - ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v16", register=True) + lama_register["EnhancedDifferentialEvolutionLSRefinement_v16"] = EnhancedDifferentialEvolutionLSRefinement_v16 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v16 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v16", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLSRefinement_v16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v17 import ( - EnhancedDifferentialEvolutionLSRefinement_v17, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v17 import EnhancedDifferentialEvolutionLSRefinement_v17 - lama_register["EnhancedDifferentialEvolutionLSRefinement_v17"] = ( - EnhancedDifferentialEvolutionLSRefinement_v17 - ) - LLAMAEnhancedDifferentialEvolutionLSRefinement_v17 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17" - ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v17", register=True) + lama_register["EnhancedDifferentialEvolutionLSRefinement_v17"] = EnhancedDifferentialEvolutionLSRefinement_v17 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v17 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v17", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLSRefinement_v17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v18 import ( - EnhancedDifferentialEvolutionLSRefinement_v18, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v18 import EnhancedDifferentialEvolutionLSRefinement_v18 - lama_register["EnhancedDifferentialEvolutionLSRefinement_v18"] = ( - EnhancedDifferentialEvolutionLSRefinement_v18 - ) - LLAMAEnhancedDifferentialEvolutionLSRefinement_v18 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18" - ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v18", register=True) + lama_register["EnhancedDifferentialEvolutionLSRefinement_v18"] = EnhancedDifferentialEvolutionLSRefinement_v18 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v18 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v18", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLSRefinement_v18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v19 import ( - EnhancedDifferentialEvolutionLSRefinement_v19, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v19 import EnhancedDifferentialEvolutionLSRefinement_v19 - lama_register["EnhancedDifferentialEvolutionLSRefinement_v19"] = ( - EnhancedDifferentialEvolutionLSRefinement_v19 - ) - LLAMAEnhancedDifferentialEvolutionLSRefinement_v19 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19" - ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v19", register=True) + lama_register["EnhancedDifferentialEvolutionLSRefinement_v19"] = EnhancedDifferentialEvolutionLSRefinement_v19 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v19 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v19", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLSRefinement_v19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v21 import ( - EnhancedDifferentialEvolutionLocalSearch_v21, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v21 import EnhancedDifferentialEvolutionLocalSearch_v21 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v21"] = ( - EnhancedDifferentialEvolutionLocalSearch_v21 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v21 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v21", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v21"] = EnhancedDifferentialEvolutionLocalSearch_v21 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v21 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v21", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v22 import ( - EnhancedDifferentialEvolutionLocalSearch_v22, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v22 import EnhancedDifferentialEvolutionLocalSearch_v22 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v22"] = ( - EnhancedDifferentialEvolutionLocalSearch_v22 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v22 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v22", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v22"] = EnhancedDifferentialEvolutionLocalSearch_v22 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v22 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v22", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v23 import ( - EnhancedDifferentialEvolutionLocalSearch_v23, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v23 import EnhancedDifferentialEvolutionLocalSearch_v23 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v23"] = ( - EnhancedDifferentialEvolutionLocalSearch_v23 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v23 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v23", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v23"] = EnhancedDifferentialEvolutionLocalSearch_v23 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v23 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v23", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v24 import ( - EnhancedDifferentialEvolutionLocalSearch_v24, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v24 import EnhancedDifferentialEvolutionLocalSearch_v24 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v24"] = ( - EnhancedDifferentialEvolutionLocalSearch_v24 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v24 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v24", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v24"] = EnhancedDifferentialEvolutionLocalSearch_v24 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v24 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v24", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v25 import ( - EnhancedDifferentialEvolutionLocalSearch_v25, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v25 import EnhancedDifferentialEvolutionLocalSearch_v25 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v25"] = ( - EnhancedDifferentialEvolutionLocalSearch_v25 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v25 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v25", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v25"] = EnhancedDifferentialEvolutionLocalSearch_v25 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v25 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v25", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v26 import ( - EnhancedDifferentialEvolutionLocalSearch_v26, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v26 import EnhancedDifferentialEvolutionLocalSearch_v26 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v26"] = ( - EnhancedDifferentialEvolutionLocalSearch_v26 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v26 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v26", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v26"] = EnhancedDifferentialEvolutionLocalSearch_v26 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v26 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v26", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v27 import ( - EnhancedDifferentialEvolutionLocalSearch_v27, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v27 import EnhancedDifferentialEvolutionLocalSearch_v27 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v27"] = ( - EnhancedDifferentialEvolutionLocalSearch_v27 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v27 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v27", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v27"] = EnhancedDifferentialEvolutionLocalSearch_v27 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v27 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v27", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v28 import ( - EnhancedDifferentialEvolutionLocalSearch_v28, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v28 import EnhancedDifferentialEvolutionLocalSearch_v28 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v28"] = ( - EnhancedDifferentialEvolutionLocalSearch_v28 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v28 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v28", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v28"] = EnhancedDifferentialEvolutionLocalSearch_v28 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v28 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v28", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v29 import ( - EnhancedDifferentialEvolutionLocalSearch_v29, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v29 import EnhancedDifferentialEvolutionLocalSearch_v29 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v29"] = ( - EnhancedDifferentialEvolutionLocalSearch_v29 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v29 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v29", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v29"] = EnhancedDifferentialEvolutionLocalSearch_v29 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v29 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v29", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v30 import ( - EnhancedDifferentialEvolutionLocalSearch_v30, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v30 import EnhancedDifferentialEvolutionLocalSearch_v30 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v30"] = ( - EnhancedDifferentialEvolutionLocalSearch_v30 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v30 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v30", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v30"] = EnhancedDifferentialEvolutionLocalSearch_v30 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v30 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v30", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v31 import ( - EnhancedDifferentialEvolutionLocalSearch_v31, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v31 import EnhancedDifferentialEvolutionLocalSearch_v31 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v31"] = ( - EnhancedDifferentialEvolutionLocalSearch_v31 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v31 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v31", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v31"] = EnhancedDifferentialEvolutionLocalSearch_v31 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v31 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v31", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v32 import ( - EnhancedDifferentialEvolutionLocalSearch_v32, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v32 import EnhancedDifferentialEvolutionLocalSearch_v32 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v32"] = ( - EnhancedDifferentialEvolutionLocalSearch_v32 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v32 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v32", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v32"] = EnhancedDifferentialEvolutionLocalSearch_v32 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v32 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v32", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v32 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v33 import ( - EnhancedDifferentialEvolutionLocalSearch_v33, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v33 import EnhancedDifferentialEvolutionLocalSearch_v33 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v33"] = ( - EnhancedDifferentialEvolutionLocalSearch_v33 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v33 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v33", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v33"] = EnhancedDifferentialEvolutionLocalSearch_v33 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v33 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v33", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v33 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v34 import ( - EnhancedDifferentialEvolutionLocalSearch_v34, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v34 import EnhancedDifferentialEvolutionLocalSearch_v34 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v34"] = ( - EnhancedDifferentialEvolutionLocalSearch_v34 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v34 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v34", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v34"] = EnhancedDifferentialEvolutionLocalSearch_v34 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v34 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v34", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v35 import ( - EnhancedDifferentialEvolutionLocalSearch_v35, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v35 import EnhancedDifferentialEvolutionLocalSearch_v35 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v35"] = ( - EnhancedDifferentialEvolutionLocalSearch_v35 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v35 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v35", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v35"] = EnhancedDifferentialEvolutionLocalSearch_v35 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v35 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v35", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v35 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v36 import ( - EnhancedDifferentialEvolutionLocalSearch_v36, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v36 import EnhancedDifferentialEvolutionLocalSearch_v36 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v36"] = ( - EnhancedDifferentialEvolutionLocalSearch_v36 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v36 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v36", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v36"] = EnhancedDifferentialEvolutionLocalSearch_v36 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v36 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v36", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v36 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v37 import ( - EnhancedDifferentialEvolutionLocalSearch_v37, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v37 import EnhancedDifferentialEvolutionLocalSearch_v37 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v37"] = ( - EnhancedDifferentialEvolutionLocalSearch_v37 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v37 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v37", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v37"] = EnhancedDifferentialEvolutionLocalSearch_v37 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v37 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v37", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v37 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v38 import ( - EnhancedDifferentialEvolutionLocalSearch_v38, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v38 import EnhancedDifferentialEvolutionLocalSearch_v38 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v38"] = ( - EnhancedDifferentialEvolutionLocalSearch_v38 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v38 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v38", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v38"] = EnhancedDifferentialEvolutionLocalSearch_v38 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v38 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v38", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v38 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v39 import ( - EnhancedDifferentialEvolutionLocalSearch_v39, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v39 import EnhancedDifferentialEvolutionLocalSearch_v39 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v39"] = ( - EnhancedDifferentialEvolutionLocalSearch_v39 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v39 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v39", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v39"] = EnhancedDifferentialEvolutionLocalSearch_v39 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v39 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v39", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v39 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v40 import ( - EnhancedDifferentialEvolutionLocalSearch_v40, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v40 import EnhancedDifferentialEvolutionLocalSearch_v40 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v40"] = ( - EnhancedDifferentialEvolutionLocalSearch_v40 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v40 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v40", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v40"] = EnhancedDifferentialEvolutionLocalSearch_v40 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v40 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v40", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v40 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v41 import ( - EnhancedDifferentialEvolutionLocalSearch_v41, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v41 import EnhancedDifferentialEvolutionLocalSearch_v41 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v41"] = ( - EnhancedDifferentialEvolutionLocalSearch_v41 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v41 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v41", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v41"] = EnhancedDifferentialEvolutionLocalSearch_v41 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v41 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v41", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v43 import ( - EnhancedDifferentialEvolutionLocalSearch_v43, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v43 import EnhancedDifferentialEvolutionLocalSearch_v43 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v43"] = ( - EnhancedDifferentialEvolutionLocalSearch_v43 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v43 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v43", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v43"] = EnhancedDifferentialEvolutionLocalSearch_v43 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v43 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v43", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v43 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v44 import ( - EnhancedDifferentialEvolutionLocalSearch_v44, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v44 import EnhancedDifferentialEvolutionLocalSearch_v44 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v44"] = ( - EnhancedDifferentialEvolutionLocalSearch_v44 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v44 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v44", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v44"] = EnhancedDifferentialEvolutionLocalSearch_v44 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v44 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v44", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v44 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v45 import ( - EnhancedDifferentialEvolutionLocalSearch_v45, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v45 import EnhancedDifferentialEvolutionLocalSearch_v45 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v45"] = ( - EnhancedDifferentialEvolutionLocalSearch_v45 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v45 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v45", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v45"] = EnhancedDifferentialEvolutionLocalSearch_v45 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v45 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v45", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v45 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v46 import ( - EnhancedDifferentialEvolutionLocalSearch_v46, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v46 import EnhancedDifferentialEvolutionLocalSearch_v46 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v46"] = ( - EnhancedDifferentialEvolutionLocalSearch_v46 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v46 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v46", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v46"] = EnhancedDifferentialEvolutionLocalSearch_v46 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v46 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v46", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v46 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v47 import ( - EnhancedDifferentialEvolutionLocalSearch_v47, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v47 import EnhancedDifferentialEvolutionLocalSearch_v47 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v47"] = ( - EnhancedDifferentialEvolutionLocalSearch_v47 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v47 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v47", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v47"] = EnhancedDifferentialEvolutionLocalSearch_v47 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v47 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v47", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v47 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v48 import ( - EnhancedDifferentialEvolutionLocalSearch_v48, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v48 import EnhancedDifferentialEvolutionLocalSearch_v48 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v48"] = ( - EnhancedDifferentialEvolutionLocalSearch_v48 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v48 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v48", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v48"] = EnhancedDifferentialEvolutionLocalSearch_v48 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v48 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v48", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v48 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v49 import ( - EnhancedDifferentialEvolutionLocalSearch_v49, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v49 import EnhancedDifferentialEvolutionLocalSearch_v49 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v49"] = ( - EnhancedDifferentialEvolutionLocalSearch_v49 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v49 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v49", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v49"] = EnhancedDifferentialEvolutionLocalSearch_v49 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v49 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v49", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v49 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v50 import ( - EnhancedDifferentialEvolutionLocalSearch_v50, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v50 import EnhancedDifferentialEvolutionLocalSearch_v50 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v50"] = ( - EnhancedDifferentialEvolutionLocalSearch_v50 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v50 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v50", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v50"] = EnhancedDifferentialEvolutionLocalSearch_v50 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v50 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v50", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v50 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v51 import ( - EnhancedDifferentialEvolutionLocalSearch_v51, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v51 import EnhancedDifferentialEvolutionLocalSearch_v51 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v51"] = ( - EnhancedDifferentialEvolutionLocalSearch_v51 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v51 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v51", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v51"] = EnhancedDifferentialEvolutionLocalSearch_v51 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v51 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v51", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v51 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v52 import ( - EnhancedDifferentialEvolutionLocalSearch_v52, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v52 import EnhancedDifferentialEvolutionLocalSearch_v52 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v52"] = ( - EnhancedDifferentialEvolutionLocalSearch_v52 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v52 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v52", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v52"] = EnhancedDifferentialEvolutionLocalSearch_v52 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v52 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v52", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v52 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v53 import ( - EnhancedDifferentialEvolutionLocalSearch_v53, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v53 import EnhancedDifferentialEvolutionLocalSearch_v53 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v53"] = ( - EnhancedDifferentialEvolutionLocalSearch_v53 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v53 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v53", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v53"] = EnhancedDifferentialEvolutionLocalSearch_v53 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v53 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v53", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v53 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v59 import ( - EnhancedDifferentialEvolutionLocalSearch_v59, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v59 import EnhancedDifferentialEvolutionLocalSearch_v59 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v59"] = ( - EnhancedDifferentialEvolutionLocalSearch_v59 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v59 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v59", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v59"] = EnhancedDifferentialEvolutionLocalSearch_v59 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v59 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v59", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v59 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v60 import ( - EnhancedDifferentialEvolutionLocalSearch_v60, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v60 import EnhancedDifferentialEvolutionLocalSearch_v60 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v60"] = ( - EnhancedDifferentialEvolutionLocalSearch_v60 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v60 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v60", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v60"] = EnhancedDifferentialEvolutionLocalSearch_v60 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v60 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v60", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v60 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v62 import ( - EnhancedDifferentialEvolutionLocalSearch_v62, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v62 import EnhancedDifferentialEvolutionLocalSearch_v62 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v62"] = ( - EnhancedDifferentialEvolutionLocalSearch_v62 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v62 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v62", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v62"] = EnhancedDifferentialEvolutionLocalSearch_v62 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v62 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v62", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v62 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v63 import ( - EnhancedDifferentialEvolutionLocalSearch_v63, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v63 import EnhancedDifferentialEvolutionLocalSearch_v63 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v63"] = ( - EnhancedDifferentialEvolutionLocalSearch_v63 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v63 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v63", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v63"] = EnhancedDifferentialEvolutionLocalSearch_v63 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v63 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v63", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v63 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v64 import ( - EnhancedDifferentialEvolutionLocalSearch_v64, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v64 import EnhancedDifferentialEvolutionLocalSearch_v64 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v64"] = ( - EnhancedDifferentialEvolutionLocalSearch_v64 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v64 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v64", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v64"] = EnhancedDifferentialEvolutionLocalSearch_v64 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v64 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v64", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v64 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v66 import ( - EnhancedDifferentialEvolutionLocalSearch_v66, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v66 import EnhancedDifferentialEvolutionLocalSearch_v66 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v66"] = ( - EnhancedDifferentialEvolutionLocalSearch_v66 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v66 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v66", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v66"] = EnhancedDifferentialEvolutionLocalSearch_v66 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v66 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v66", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v66 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v67 import ( - EnhancedDifferentialEvolutionLocalSearch_v67, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v67 import EnhancedDifferentialEvolutionLocalSearch_v67 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v67"] = ( - EnhancedDifferentialEvolutionLocalSearch_v67 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v67 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v67", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v67"] = EnhancedDifferentialEvolutionLocalSearch_v67 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v67 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v67", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v67 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v68 import ( - EnhancedDifferentialEvolutionLocalSearch_v68, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v68 import EnhancedDifferentialEvolutionLocalSearch_v68 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v68"] = ( - EnhancedDifferentialEvolutionLocalSearch_v68 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v68 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v68", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v68"] = EnhancedDifferentialEvolutionLocalSearch_v68 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v68 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v68", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v68 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v69 import ( - EnhancedDifferentialEvolutionLocalSearch_v69, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v69 import EnhancedDifferentialEvolutionLocalSearch_v69 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v69"] = ( - EnhancedDifferentialEvolutionLocalSearch_v69 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v69 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v69", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v69"] = EnhancedDifferentialEvolutionLocalSearch_v69 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v69 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v69", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v69 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v70 import ( - EnhancedDifferentialEvolutionLocalSearch_v70, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v70 import EnhancedDifferentialEvolutionLocalSearch_v70 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v70"] = ( - EnhancedDifferentialEvolutionLocalSearch_v70 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v70 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v70", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v70"] = EnhancedDifferentialEvolutionLocalSearch_v70 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v70 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v70", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v70 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v71 import ( - EnhancedDifferentialEvolutionLocalSearch_v71, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v71 import EnhancedDifferentialEvolutionLocalSearch_v71 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v71"] = ( - EnhancedDifferentialEvolutionLocalSearch_v71 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v71 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v71", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v71"] = EnhancedDifferentialEvolutionLocalSearch_v71 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v71 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v71", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v71 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v72 import ( - EnhancedDifferentialEvolutionLocalSearch_v72, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v72 import EnhancedDifferentialEvolutionLocalSearch_v72 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v72"] = ( - EnhancedDifferentialEvolutionLocalSearch_v72 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v72 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v72", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v72"] = EnhancedDifferentialEvolutionLocalSearch_v72 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v72 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v72", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v72 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v73 import ( - EnhancedDifferentialEvolutionLocalSearch_v73, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v73 import EnhancedDifferentialEvolutionLocalSearch_v73 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v73"] = ( - EnhancedDifferentialEvolutionLocalSearch_v73 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v73 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v73", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v73"] = EnhancedDifferentialEvolutionLocalSearch_v73 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v73 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v73", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v73 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v74 import ( - EnhancedDifferentialEvolutionLocalSearch_v74, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v74 import EnhancedDifferentialEvolutionLocalSearch_v74 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v74"] = ( - EnhancedDifferentialEvolutionLocalSearch_v74 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v74 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v74", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v74"] = EnhancedDifferentialEvolutionLocalSearch_v74 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v74 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v74", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v74 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v75 import ( - EnhancedDifferentialEvolutionLocalSearch_v75, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v75 import EnhancedDifferentialEvolutionLocalSearch_v75 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v75"] = ( - EnhancedDifferentialEvolutionLocalSearch_v75 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v75 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v75", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v75"] = EnhancedDifferentialEvolutionLocalSearch_v75 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v75 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v75", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v75 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v76 import ( - EnhancedDifferentialEvolutionLocalSearch_v76, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v76 import EnhancedDifferentialEvolutionLocalSearch_v76 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v76"] = ( - EnhancedDifferentialEvolutionLocalSearch_v76 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v76 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v76", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v76"] = EnhancedDifferentialEvolutionLocalSearch_v76 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v76 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v76", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v76 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v77 import ( - EnhancedDifferentialEvolutionLocalSearch_v77, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v77 import EnhancedDifferentialEvolutionLocalSearch_v77 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v77"] = ( - EnhancedDifferentialEvolutionLocalSearch_v77 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v77 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v77", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v77"] = EnhancedDifferentialEvolutionLocalSearch_v77 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v77 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v77", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v77 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v78 import ( - EnhancedDifferentialEvolutionLocalSearch_v78, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v78 import EnhancedDifferentialEvolutionLocalSearch_v78 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v78"] = ( - EnhancedDifferentialEvolutionLocalSearch_v78 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v78 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v78", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v78"] = EnhancedDifferentialEvolutionLocalSearch_v78 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v78 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v78", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v78 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v79 import ( - EnhancedDifferentialEvolutionLocalSearch_v79, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v79 import EnhancedDifferentialEvolutionLocalSearch_v79 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v79"] = ( - EnhancedDifferentialEvolutionLocalSearch_v79 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v79 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v79", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v79"] = EnhancedDifferentialEvolutionLocalSearch_v79 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v79 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v79", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v79 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v80 import ( - EnhancedDifferentialEvolutionLocalSearch_v80, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v80 import EnhancedDifferentialEvolutionLocalSearch_v80 - lama_register["EnhancedDifferentialEvolutionLocalSearch_v80"] = ( - EnhancedDifferentialEvolutionLocalSearch_v80 - ) - LLAMAEnhancedDifferentialEvolutionLocalSearch_v80 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80" - ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v80", register=True) + lama_register["EnhancedDifferentialEvolutionLocalSearch_v80"] = EnhancedDifferentialEvolutionLocalSearch_v80 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v80 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v80", register=True) except Exception as e: print("EnhancedDifferentialEvolutionLocalSearch_v80 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionOptimizer import ( - EnhancedDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionOptimizer import EnhancedDifferentialEvolutionOptimizer lama_register["EnhancedDifferentialEvolutionOptimizer"] = EnhancedDifferentialEvolutionOptimizer - LLAMAEnhancedDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionOptimizer" - ).set_name("LLAMAEnhancedDifferentialEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionOptimizer").set_name("LLAMAEnhancedDifferentialEvolutionOptimizer", register=True) except Exception as e: print("EnhancedDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizer import ( - EnhancedDifferentialEvolutionParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizer import EnhancedDifferentialEvolutionParticleSwarmOptimizer - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizer"] = ( - EnhancedDifferentialEvolutionParticleSwarmOptimizer - ) - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer", register=True) + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizer"] = EnhancedDifferentialEvolutionParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 import ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV2"] = ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 - ) - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2" - ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2", register=True) + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV2"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2", register=True) except Exception as e: print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 import ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV3"] = ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 - ) - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3" - ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3", register=True) + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV3"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3", register=True) except Exception as e: print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 import ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV4"] = ( - EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 - ) - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4" - ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4", register=True) + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV4"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4", register=True) except Exception as e: print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionWithAdaptiveMutationControl import ( - EnhancedDifferentialEvolutionWithAdaptiveMutationControl, - ) + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionWithAdaptiveMutationControl import EnhancedDifferentialEvolutionWithAdaptiveMutationControl - lama_register["EnhancedDifferentialEvolutionWithAdaptiveMutationControl"] = ( - EnhancedDifferentialEvolutionWithAdaptiveMutationControl - ) - LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl" - ).set_name("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl", register=True) + lama_register["EnhancedDifferentialEvolutionWithAdaptiveMutationControl"] = EnhancedDifferentialEvolutionWithAdaptiveMutationControl + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl").set_name("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl", register=True) except Exception as e: print("EnhancedDifferentialEvolutionWithAdaptiveMutationControl can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm import ( - EnhancedDifferentialFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm import EnhancedDifferentialFireworkAlgorithm lama_register["EnhancedDifferentialFireworkAlgorithm"] = EnhancedDifferentialFireworkAlgorithm - LLAMAEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialFireworkAlgorithm" - ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm").set_name("LLAMAEnhancedDifferentialFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDifferentialFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm_v2 import ( - EnhancedDifferentialFireworkAlgorithm_v2, - ) + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm_v2 import EnhancedDifferentialFireworkAlgorithm_v2 lama_register["EnhancedDifferentialFireworkAlgorithm_v2"] = EnhancedDifferentialFireworkAlgorithm_v2 - LLAMAEnhancedDifferentialFireworkAlgorithm_v2 = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2" - ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialFireworkAlgorithm_v2 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2").set_name("LLAMAEnhancedDifferentialFireworkAlgorithm_v2", register=True) except Exception as e: print("EnhancedDifferentialFireworkAlgorithm_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentialSimulatedAnnealingOptimizer import ( - EnhancedDifferentialSimulatedAnnealingOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDifferentialSimulatedAnnealingOptimizer import EnhancedDifferentialSimulatedAnnealingOptimizer - lama_register["EnhancedDifferentialSimulatedAnnealingOptimizer"] = ( - EnhancedDifferentialSimulatedAnnealingOptimizer - ) - LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer" - ).set_name("LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer", register=True) + lama_register["EnhancedDifferentialSimulatedAnnealingOptimizer"] = EnhancedDifferentialSimulatedAnnealingOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer").set_name("LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer", register=True) except Exception as e: print("EnhancedDifferentialSimulatedAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDifferentiatedAdaptiveEvolution import ( - EnhancedDifferentiatedAdaptiveEvolution, - ) + from nevergrad.optimization.lama.EnhancedDifferentiatedAdaptiveEvolution import EnhancedDifferentiatedAdaptiveEvolution lama_register["EnhancedDifferentiatedAdaptiveEvolution"] = EnhancedDifferentiatedAdaptiveEvolution - LLAMAEnhancedDifferentiatedAdaptiveEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDifferentiatedAdaptiveEvolution" - ).set_name("LLAMAEnhancedDifferentiatedAdaptiveEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDifferentiatedAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDifferentiatedAdaptiveEvolution = NonObjectOptimizer(method="LLAMAEnhancedDifferentiatedAdaptiveEvolution").set_name("LLAMAEnhancedDifferentiatedAdaptiveEvolution", register=True) except Exception as e: print("EnhancedDifferentiatedAdaptiveEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDimensionalFeedbackEvolverV3 import ( - EnhancedDimensionalFeedbackEvolverV3, - ) + from nevergrad.optimization.lama.EnhancedDimensionalFeedbackEvolverV3 import EnhancedDimensionalFeedbackEvolverV3 lama_register["EnhancedDimensionalFeedbackEvolverV3"] = EnhancedDimensionalFeedbackEvolverV3 - LLAMAEnhancedDimensionalFeedbackEvolverV3 = NonObjectOptimizer( - method="LLAMAEnhancedDimensionalFeedbackEvolverV3" - ).set_name("LLAMAEnhancedDimensionalFeedbackEvolverV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDimensionalFeedbackEvolverV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDimensionalFeedbackEvolverV3 = NonObjectOptimizer(method="LLAMAEnhancedDimensionalFeedbackEvolverV3").set_name("LLAMAEnhancedDimensionalFeedbackEvolverV3", register=True) except Exception as e: print("EnhancedDimensionalFeedbackEvolverV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiverseMemoryHybridOptimizer import ( - EnhancedDiverseMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDiverseMemoryHybridOptimizer import EnhancedDiverseMemoryHybridOptimizer lama_register["EnhancedDiverseMemoryHybridOptimizer"] = EnhancedDiverseMemoryHybridOptimizer - LLAMAEnhancedDiverseMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDiverseMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedDiverseMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiverseMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiverseMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiverseMemoryHybridOptimizer").set_name("LLAMAEnhancedDiverseMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedDiverseMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedAdaptiveHarmonySearch import ( - EnhancedDiversifiedAdaptiveHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedAdaptiveHarmonySearch import EnhancedDiversifiedAdaptiveHarmonySearch lama_register["EnhancedDiversifiedAdaptiveHarmonySearch"] = EnhancedDiversifiedAdaptiveHarmonySearch - LLAMAEnhancedDiversifiedAdaptiveHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch" - ).set_name("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch").set_name("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch", register=True) except Exception as e: print("EnhancedDiversifiedAdaptiveHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithm import ( - EnhancedDiversifiedCuckooFireworksAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithm import EnhancedDiversifiedCuckooFireworksAlgorithm lama_register["EnhancedDiversifiedCuckooFireworksAlgorithm"] = EnhancedDiversifiedCuckooFireworksAlgorithm - LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm" - ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm").set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm", register=True) except Exception as e: print("EnhancedDiversifiedCuckooFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithmV2 import ( - EnhancedDiversifiedCuckooFireworksAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithmV2 import EnhancedDiversifiedCuckooFireworksAlgorithmV2 - lama_register["EnhancedDiversifiedCuckooFireworksAlgorithmV2"] = ( - EnhancedDiversifiedCuckooFireworksAlgorithmV2 - ) - LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2" - ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2", register=True) + lama_register["EnhancedDiversifiedCuckooFireworksAlgorithmV2"] = EnhancedDiversifiedCuckooFireworksAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2").set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2", register=True) except Exception as e: print("EnhancedDiversifiedCuckooFireworksAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimization import ( - EnhancedDiversifiedGravitationalSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimization import EnhancedDiversifiedGravitationalSwarmOptimization - lama_register["EnhancedDiversifiedGravitationalSwarmOptimization"] = ( - EnhancedDiversifiedGravitationalSwarmOptimization - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimization"] = EnhancedDiversifiedGravitationalSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV2 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV2 import EnhancedDiversifiedGravitationalSwarmOptimizationV2 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV2"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV2 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV2"] = EnhancedDiversifiedGravitationalSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV3 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV3 import EnhancedDiversifiedGravitationalSwarmOptimizationV3 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV3"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV3 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV3"] = EnhancedDiversifiedGravitationalSwarmOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV4 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV4 import EnhancedDiversifiedGravitationalSwarmOptimizationV4 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV4"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV4 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV4"] = EnhancedDiversifiedGravitationalSwarmOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV5 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV5 import EnhancedDiversifiedGravitationalSwarmOptimizationV5 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV5"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV5 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV5"] = EnhancedDiversifiedGravitationalSwarmOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV6 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV6 import EnhancedDiversifiedGravitationalSwarmOptimizationV6 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV6"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV6 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV6"] = EnhancedDiversifiedGravitationalSwarmOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV7 import ( - EnhancedDiversifiedGravitationalSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV7 import EnhancedDiversifiedGravitationalSwarmOptimizationV7 - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV7"] = ( - EnhancedDiversifiedGravitationalSwarmOptimizationV7 - ) - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7" - ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7", register=True) + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV7"] = EnhancedDiversifiedGravitationalSwarmOptimizationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedDiversifiedGravitationalSwarmOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer import ( - EnhancedDiversifiedHarmonicHarmonyOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer import EnhancedDiversifiedHarmonicHarmonyOptimizer lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer"] = EnhancedDiversifiedHarmonicHarmonyOptimizer - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer" - ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer", register=True) except Exception as e: print("EnhancedDiversifiedHarmonicHarmonyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 import ( - EnhancedDiversifiedHarmonicHarmonyOptimizer_V2, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 import EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 - lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V2"] = ( - EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 - ) - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2" - ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2", register=True) + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V2"] = EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2", register=True) except Exception as e: print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 import ( - EnhancedDiversifiedHarmonicHarmonyOptimizer_V3, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 import EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 - lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V3"] = ( - EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 - ) - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3" - ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3", register=True) + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V3"] = EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3", register=True) except Exception as e: print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyAlgorithm import ( - EnhancedDiversifiedHarmonyAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyAlgorithm import EnhancedDiversifiedHarmonyAlgorithm lama_register["EnhancedDiversifiedHarmonyAlgorithm"] = EnhancedDiversifiedHarmonyAlgorithm - LLAMAEnhancedDiversifiedHarmonyAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonyAlgorithm" - ).set_name("LLAMAEnhancedDiversifiedHarmonyAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyAlgorithm").set_name("LLAMAEnhancedDiversifiedHarmonyAlgorithm", register=True) except Exception as e: print("EnhancedDiversifiedHarmonyAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithm import ( - EnhancedDiversifiedHarmonyFireworksAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithm import EnhancedDiversifiedHarmonyFireworksAlgorithm - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithm"] = ( - EnhancedDiversifiedHarmonyFireworksAlgorithm - ) - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm" - ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm", register=True) + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithm"] = EnhancedDiversifiedHarmonyFireworksAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm", register=True) except Exception as e: print("EnhancedDiversifiedHarmonyFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV2 import ( - EnhancedDiversifiedHarmonyFireworksAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV2 import EnhancedDiversifiedHarmonyFireworksAlgorithmV2 - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV2"] = ( - EnhancedDiversifiedHarmonyFireworksAlgorithmV2 - ) - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2" - ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2", register=True) + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV2"] = EnhancedDiversifiedHarmonyFireworksAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2", register=True) except Exception as e: print("EnhancedDiversifiedHarmonyFireworksAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV3 import ( - EnhancedDiversifiedHarmonyFireworksAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV3 import EnhancedDiversifiedHarmonyFireworksAlgorithmV3 - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV3"] = ( - EnhancedDiversifiedHarmonyFireworksAlgorithmV3 - ) - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3" - ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3", register=True) + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV3"] = EnhancedDiversifiedHarmonyFireworksAlgorithmV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3", register=True) except Exception as e: print("EnhancedDiversifiedHarmonyFireworksAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonySearchOptimizer import ( - EnhancedDiversifiedHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonySearchOptimizer import EnhancedDiversifiedHarmonySearchOptimizer lama_register["EnhancedDiversifiedHarmonySearchOptimizer"] = EnhancedDiversifiedHarmonySearchOptimizer - LLAMAEnhancedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedDiversifiedHarmonySearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedDiversifiedHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedDiversifiedHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV3 import ( - EnhancedDiversifiedMetaHeuristicAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV3 import EnhancedDiversifiedMetaHeuristicAlgorithmV3 lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV3"] = EnhancedDiversifiedMetaHeuristicAlgorithmV3 - LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3" - ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3").set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3", register=True) except Exception as e: print("EnhancedDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV4 import ( - EnhancedDiversifiedMetaHeuristicAlgorithmV4, - ) + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV4 import EnhancedDiversifiedMetaHeuristicAlgorithmV4 lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV4"] = EnhancedDiversifiedMetaHeuristicAlgorithmV4 - LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( - method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4" - ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4").set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4", register=True) except Exception as e: print("EnhancedDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization import ( - EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization import EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization - lama_register["EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization"] = ( - EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization - ) - LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization" - ).set_name("LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization", register=True) + lama_register["EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization"] = EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization").set_name("LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization", register=True) except Exception as e: print("EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( - EnhancedDualPhaseAdaptiveHybridOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizationV3 import EnhancedDualPhaseAdaptiveHybridOptimizationV3 - lama_register["EnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( - EnhancedDualPhaseAdaptiveHybridOptimizationV3 - ) - LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3" - ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) + lama_register["EnhancedDualPhaseAdaptiveHybridOptimizationV3"] = EnhancedDualPhaseAdaptiveHybridOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3").set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) except Exception as e: print("EnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizerV3 import ( - EnhancedDualPhaseAdaptiveHybridOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizerV3 import EnhancedDualPhaseAdaptiveHybridOptimizerV3 lama_register["EnhancedDualPhaseAdaptiveHybridOptimizerV3"] = EnhancedDualPhaseAdaptiveHybridOptimizerV3 - LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3" - ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3").set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3", register=True) except Exception as e: print("EnhancedDualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution import ( - EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution import EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution - lama_register["EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution"] = ( - EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution - ) - LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution", register=True) + lama_register["EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution"] = EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseDifferentialEvolution import ( - EnhancedDualPhaseDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseDifferentialEvolution import EnhancedDualPhaseDifferentialEvolution lama_register["EnhancedDualPhaseDifferentialEvolution"] = EnhancedDualPhaseDifferentialEvolution - LLAMAEnhancedDualPhaseDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseDifferentialEvolution" - ).set_name("LLAMAEnhancedDualPhaseDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseDifferentialEvolution").set_name("LLAMAEnhancedDualPhaseDifferentialEvolution", register=True) except Exception as e: print("EnhancedDualPhaseDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimization import ( - EnhancedDualPhaseHybridOptimization, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimization import EnhancedDualPhaseHybridOptimization lama_register["EnhancedDualPhaseHybridOptimization"] = EnhancedDualPhaseHybridOptimization - LLAMAEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseHybridOptimization" - ).set_name("LLAMAEnhancedDualPhaseHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimization").set_name("LLAMAEnhancedDualPhaseHybridOptimization", register=True) except Exception as e: print("EnhancedDualPhaseHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimizationV2 import ( - EnhancedDualPhaseHybridOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimizationV2 import EnhancedDualPhaseHybridOptimizationV2 lama_register["EnhancedDualPhaseHybridOptimizationV2"] = EnhancedDualPhaseHybridOptimizationV2 - LLAMAEnhancedDualPhaseHybridOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedDualPhaseHybridOptimizationV2" - ).set_name("LLAMAEnhancedDualPhaseHybridOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualPhaseHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimizationV2").set_name("LLAMAEnhancedDualPhaseHybridOptimizationV2", register=True) except Exception as e: print("EnhancedDualPhaseHybridOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualStrategyAdaptiveDE_v2 import ( - EnhancedDualStrategyAdaptiveDE_v2, - ) + from nevergrad.optimization.lama.EnhancedDualStrategyAdaptiveDE_v2 import EnhancedDualStrategyAdaptiveDE_v2 lama_register["EnhancedDualStrategyAdaptiveDE_v2"] = EnhancedDualStrategyAdaptiveDE_v2 - LLAMAEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( - method="LLAMAEnhancedDualStrategyAdaptiveDE_v2" - ).set_name("LLAMAEnhancedDualStrategyAdaptiveDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyAdaptiveDE_v2").set_name("LLAMAEnhancedDualStrategyAdaptiveDE_v2", register=True) except Exception as e: print("EnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDualStrategyHybridOptimizer import ( - EnhancedDualStrategyHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDualStrategyHybridOptimizer import EnhancedDualStrategyHybridOptimizer lama_register["EnhancedDualStrategyHybridOptimizer"] = EnhancedDualStrategyHybridOptimizer - LLAMAEnhancedDualStrategyHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDualStrategyHybridOptimizer" - ).set_name("LLAMAEnhancedDualStrategyHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDualStrategyHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyHybridOptimizer").set_name("LLAMAEnhancedDualStrategyHybridOptimizer", register=True) except Exception as e: print("EnhancedDualStrategyHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveClimbingStrategy import ( - EnhancedDynamicAdaptiveClimbingStrategy, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveClimbingStrategy import EnhancedDynamicAdaptiveClimbingStrategy lama_register["EnhancedDynamicAdaptiveClimbingStrategy"] = EnhancedDynamicAdaptiveClimbingStrategy - LLAMAEnhancedDynamicAdaptiveClimbingStrategy = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy" - ).set_name("LLAMAEnhancedDynamicAdaptiveClimbingStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveClimbingStrategy = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy").set_name("LLAMAEnhancedDynamicAdaptiveClimbingStrategy", register=True) except Exception as e: print("EnhancedDynamicAdaptiveClimbingStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDE import EnhancedDynamicAdaptiveDE lama_register["EnhancedDynamicAdaptiveDE"] = EnhancedDynamicAdaptiveDE - LLAMAEnhancedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE").set_name( - "LLAMAEnhancedDynamicAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE").set_name("LLAMAEnhancedDynamicAdaptiveDE", register=True) except Exception as e: print("EnhancedDynamicAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolution import ( - EnhancedDynamicAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolution import EnhancedDynamicAdaptiveDifferentialEvolution - lama_register["EnhancedDynamicAdaptiveDifferentialEvolution"] = ( - EnhancedDynamicAdaptiveDifferentialEvolution - ) - LLAMAEnhancedDynamicAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution", register=True) + lama_register["EnhancedDynamicAdaptiveDifferentialEvolution"] = EnhancedDynamicAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedDynamicAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation import ( - EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation import EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation"] = ( - EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation - ) - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation" - ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation", register=True) + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation"] = EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation", register=True) except Exception as e: print("EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionRefined import ( - EnhancedDynamicAdaptiveDifferentialEvolutionRefined, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionRefined import EnhancedDynamicAdaptiveDifferentialEvolutionRefined - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionRefined"] = ( - EnhancedDynamicAdaptiveDifferentialEvolutionRefined - ) - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined" - ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined", register=True) + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionRefined"] = EnhancedDynamicAdaptiveDifferentialEvolutionRefined + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined", register=True) except Exception as e: print("EnhancedDynamicAdaptiveDifferentialEvolutionRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionV2 import ( - EnhancedDynamicAdaptiveDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionV2 import EnhancedDynamicAdaptiveDifferentialEvolutionV2 - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionV2"] = ( - EnhancedDynamicAdaptiveDifferentialEvolutionV2 - ) - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2" - ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2", register=True) + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionV2"] = EnhancedDynamicAdaptiveDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2", register=True) except Exception as e: print("EnhancedDynamicAdaptiveDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveExplorationOptimization import ( - EnhancedDynamicAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveExplorationOptimization import EnhancedDynamicAdaptiveExplorationOptimization - lama_register["EnhancedDynamicAdaptiveExplorationOptimization"] = ( - EnhancedDynamicAdaptiveExplorationOptimization - ) - LLAMAEnhancedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization" - ).set_name("LLAMAEnhancedDynamicAdaptiveExplorationOptimization", register=True) + lama_register["EnhancedDynamicAdaptiveExplorationOptimization"] = EnhancedDynamicAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization").set_name("LLAMAEnhancedDynamicAdaptiveExplorationOptimization", register=True) except Exception as e: print("EnhancedDynamicAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveFireworkAlgorithm import ( - EnhancedDynamicAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveFireworkAlgorithm import EnhancedDynamicAdaptiveFireworkAlgorithm lama_register["EnhancedDynamicAdaptiveFireworkAlgorithm"] = EnhancedDynamicAdaptiveFireworkAlgorithm - LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm" - ).set_name("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDynamicAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( - EnhancedDynamicAdaptiveGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligence import EnhancedDynamicAdaptiveGravitationalSwarmIntelligence - lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( - EnhancedDynamicAdaptiveGravitationalSwarmIntelligence - ) - LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" - ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = EnhancedDynamicAdaptiveGravitationalSwarmIntelligence + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) except Exception as e: print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( - EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 import EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 - lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( - EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 - ) - LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2" - ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2"] = EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) except Exception as e: print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizer import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizer import EnhancedDynamicAdaptiveHarmonySearchOptimizer - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizer"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizer - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizer"] = EnhancedDynamicAdaptiveHarmonySearchOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV2"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV2"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV3"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV3"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV4"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV4"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV5"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV5"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 import ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV6, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV6"] = ( - EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 - ) - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6" - ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6", register=True) + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV6"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridDEPSO import ( - EnhancedDynamicAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridDEPSO import EnhancedDynamicAdaptiveHybridDEPSO lama_register["EnhancedDynamicAdaptiveHybridDEPSO"] = EnhancedDynamicAdaptiveHybridDEPSO - LLAMAEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO" - ).set_name("LLAMAEnhancedDynamicAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO").set_name("LLAMAEnhancedDynamicAdaptiveHybridDEPSO", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimization import ( - EnhancedDynamicAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimization import EnhancedDynamicAdaptiveHybridOptimization lama_register["EnhancedDynamicAdaptiveHybridOptimization"] = EnhancedDynamicAdaptiveHybridOptimization - LLAMAEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHybridOptimization" - ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimization", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimizer import ( - EnhancedDynamicAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimizer import EnhancedDynamicAdaptiveHybridOptimizer lama_register["EnhancedDynamicAdaptiveHybridOptimizer"] = EnhancedDynamicAdaptiveHybridOptimizer - LLAMAEnhancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer" - ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer").set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimizer", register=True) except Exception as e: print("EnhancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryAnnealing import ( - EnhancedDynamicAdaptiveMemoryAnnealing, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryAnnealing import EnhancedDynamicAdaptiveMemoryAnnealing lama_register["EnhancedDynamicAdaptiveMemoryAnnealing"] = EnhancedDynamicAdaptiveMemoryAnnealing - LLAMAEnhancedDynamicAdaptiveMemoryAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing" - ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing").set_name("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing", register=True) except Exception as e: print("EnhancedDynamicAdaptiveMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryStrategyV59 import ( - EnhancedDynamicAdaptiveMemoryStrategyV59, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryStrategyV59 import EnhancedDynamicAdaptiveMemoryStrategyV59 lama_register["EnhancedDynamicAdaptiveMemoryStrategyV59"] = EnhancedDynamicAdaptiveMemoryStrategyV59 - LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59" - ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59").set_name("LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59", register=True) except Exception as e: print("EnhancedDynamicAdaptiveMemoryStrategyV59 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveOptimizerV8 import ( - EnhancedDynamicAdaptiveOptimizerV8, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveOptimizerV8 import EnhancedDynamicAdaptiveOptimizerV8 lama_register["EnhancedDynamicAdaptiveOptimizerV8"] = EnhancedDynamicAdaptiveOptimizerV8 - LLAMAEnhancedDynamicAdaptiveOptimizerV8 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveOptimizerV8" - ).set_name("LLAMAEnhancedDynamicAdaptiveOptimizerV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveOptimizerV8").set_name("LLAMAEnhancedDynamicAdaptiveOptimizerV8", register=True) except Exception as e: print("EnhancedDynamicAdaptiveOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptivePopulationDifferentialEvolution import ( - EnhancedDynamicAdaptivePopulationDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptivePopulationDifferentialEvolution import EnhancedDynamicAdaptivePopulationDifferentialEvolution - lama_register["EnhancedDynamicAdaptivePopulationDifferentialEvolution"] = ( - EnhancedDynamicAdaptivePopulationDifferentialEvolution - ) - LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution" - ).set_name("LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution", register=True) + lama_register["EnhancedDynamicAdaptivePopulationDifferentialEvolution"] = EnhancedDynamicAdaptivePopulationDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution").set_name("LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution", register=True) except Exception as e: print("EnhancedDynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveQuantumPSO import ( - EnhancedDynamicAdaptiveQuantumPSO, - ) + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveQuantumPSO import EnhancedDynamicAdaptiveQuantumPSO lama_register["EnhancedDynamicAdaptiveQuantumPSO"] = EnhancedDynamicAdaptiveQuantumPSO - LLAMAEnhancedDynamicAdaptiveQuantumPSO = NonObjectOptimizer( - method="LLAMAEnhancedDynamicAdaptiveQuantumPSO" - ).set_name("LLAMAEnhancedDynamicAdaptiveQuantumPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveQuantumPSO").set_name("LLAMAEnhancedDynamicAdaptiveQuantumPSO", register=True) except Exception as e: print("EnhancedDynamicAdaptiveQuantumPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicBalancingPSO import EnhancedDynamicBalancingPSO lama_register["EnhancedDynamicBalancingPSO"] = EnhancedDynamicBalancingPSO - LLAMAEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO").set_name( - "LLAMAEnhancedDynamicBalancingPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO").set_name("LLAMAEnhancedDynamicBalancingPSO", register=True) except Exception as e: print("EnhancedDynamicBalancingPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicClusterOptimization import ( - EnhancedDynamicClusterOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicClusterOptimization import EnhancedDynamicClusterOptimization lama_register["EnhancedDynamicClusterOptimization"] = EnhancedDynamicClusterOptimization - LLAMAEnhancedDynamicClusterOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicClusterOptimization" - ).set_name("LLAMAEnhancedDynamicClusterOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicClusterOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterOptimization").set_name("LLAMAEnhancedDynamicClusterOptimization", register=True) except Exception as e: print("EnhancedDynamicClusterOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicClusterSearch import EnhancedDynamicClusterSearch lama_register["EnhancedDynamicClusterSearch"] = EnhancedDynamicClusterSearch - LLAMAEnhancedDynamicClusterSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicClusterSearch" - ).set_name("LLAMAEnhancedDynamicClusterSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicClusterSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterSearch").set_name("LLAMAEnhancedDynamicClusterSearch", register=True) except Exception as e: print("EnhancedDynamicClusterSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicCohortOptimization import ( - EnhancedDynamicCohortOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicCohortOptimization import EnhancedDynamicCohortOptimization lama_register["EnhancedDynamicCohortOptimization"] = EnhancedDynamicCohortOptimization - LLAMAEnhancedDynamicCohortOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicCohortOptimization" - ).set_name("LLAMAEnhancedDynamicCohortOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicCohortOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicCohortOptimization").set_name("LLAMAEnhancedDynamicCohortOptimization", register=True) except Exception as e: print("EnhancedDynamicCohortOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicCrossoverRAMEDS import EnhancedDynamicCrossoverRAMEDS lama_register["EnhancedDynamicCrossoverRAMEDS"] = EnhancedDynamicCrossoverRAMEDS - LLAMAEnhancedDynamicCrossoverRAMEDS = NonObjectOptimizer( - method="LLAMAEnhancedDynamicCrossoverRAMEDS" - ).set_name("LLAMAEnhancedDynamicCrossoverRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCrossoverRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicCrossoverRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedDynamicCrossoverRAMEDS").set_name("LLAMAEnhancedDynamicCrossoverRAMEDS", register=True) except Exception as e: print("EnhancedDynamicCrossoverRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicCuckooHarmonyAlgorithm import ( - EnhancedDynamicCuckooHarmonyAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDynamicCuckooHarmonyAlgorithm import EnhancedDynamicCuckooHarmonyAlgorithm lama_register["EnhancedDynamicCuckooHarmonyAlgorithm"] = EnhancedDynamicCuckooHarmonyAlgorithm - LLAMAEnhancedDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm" - ).set_name("LLAMAEnhancedDynamicCuckooHarmonyAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm").set_name("LLAMAEnhancedDynamicCuckooHarmonyAlgorithm", register=True) except Exception as e: print("EnhancedDynamicCuckooHarmonyAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolution import ( - EnhancedDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolution import EnhancedDynamicDifferentialEvolution lama_register["EnhancedDynamicDifferentialEvolution"] = EnhancedDynamicDifferentialEvolution - LLAMAEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolution" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolution").set_name("LLAMAEnhancedDynamicDifferentialEvolution", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionImproved import ( - EnhancedDynamicDifferentialEvolutionImproved, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionImproved import EnhancedDynamicDifferentialEvolutionImproved - lama_register["EnhancedDynamicDifferentialEvolutionImproved"] = ( - EnhancedDynamicDifferentialEvolutionImproved - ) - LLAMAEnhancedDynamicDifferentialEvolutionImproved = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionImproved" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionImproved", register=True) + lama_register["EnhancedDynamicDifferentialEvolutionImproved"] = EnhancedDynamicDifferentialEvolutionImproved + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionImproved").set_name("LLAMAEnhancedDynamicDifferentialEvolutionImproved", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionRefined import ( - EnhancedDynamicDifferentialEvolutionRefined, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionRefined import EnhancedDynamicDifferentialEvolutionRefined lama_register["EnhancedDynamicDifferentialEvolutionRefined"] = EnhancedDynamicDifferentialEvolutionRefined - LLAMAEnhancedDynamicDifferentialEvolutionRefined = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionRefined" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionRefined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionRefined").set_name("LLAMAEnhancedDynamicDifferentialEvolutionRefined", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV2 import ( - EnhancedDynamicDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV2 import EnhancedDynamicDifferentialEvolutionV2 lama_register["EnhancedDynamicDifferentialEvolutionV2"] = EnhancedDynamicDifferentialEvolutionV2 - LLAMAEnhancedDynamicDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionV2" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicDifferentialEvolutionV2", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV3 import ( - EnhancedDynamicDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV3 import EnhancedDynamicDifferentialEvolutionV3 lama_register["EnhancedDynamicDifferentialEvolutionV3"] = EnhancedDynamicDifferentialEvolutionV3 - LLAMAEnhancedDynamicDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionV3" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV3").set_name("LLAMAEnhancedDynamicDifferentialEvolutionV3", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover import ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover"] = ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover - ) - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover", register=True) + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation import ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation"] = ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation - ) - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation", register=True) + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined import ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined"] = ( - EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined - ) - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined" - ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined", register=True) + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined", register=True) except Exception as e: print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover import ( - EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover, - ) + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover import EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover - lama_register["EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover"] = ( - EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover - ) - LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover" - ).set_name( - "LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover", register=True - ) + lama_register["EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover"] = EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover", register=True) except Exception as e: - print( - "EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover can not be imported: ", e - ) - + print("EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicDiversifiedHarmonySearchOptimizer import ( - EnhancedDynamicDiversifiedHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicDiversifiedHarmonySearchOptimizer import EnhancedDynamicDiversifiedHarmonySearchOptimizer - lama_register["EnhancedDynamicDiversifiedHarmonySearchOptimizer"] = ( - EnhancedDynamicDiversifiedHarmonySearchOptimizer - ) - LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer", register=True) + lama_register["EnhancedDynamicDiversifiedHarmonySearchOptimizer"] = EnhancedDynamicDiversifiedHarmonySearchOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedDynamicDiversifiedHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicDualPhaseStrategyV12 import ( - EnhancedDynamicDualPhaseStrategyV12, - ) + from nevergrad.optimization.lama.EnhancedDynamicDualPhaseStrategyV12 import EnhancedDynamicDualPhaseStrategyV12 lama_register["EnhancedDynamicDualPhaseStrategyV12"] = EnhancedDynamicDualPhaseStrategyV12 - LLAMAEnhancedDynamicDualPhaseStrategyV12 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicDualPhaseStrategyV12" - ).set_name("LLAMAEnhancedDynamicDualPhaseStrategyV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDualPhaseStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicDualPhaseStrategyV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDualPhaseStrategyV12").set_name("LLAMAEnhancedDynamicDualPhaseStrategyV12", register=True) except Exception as e: print("EnhancedDynamicDualPhaseStrategyV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicEliteAnnealingDE import EnhancedDynamicEliteAnnealingDE lama_register["EnhancedDynamicEliteAnnealingDE"] = EnhancedDynamicEliteAnnealingDE - LLAMAEnhancedDynamicEliteAnnealingDE = NonObjectOptimizer( - method="LLAMAEnhancedDynamicEliteAnnealingDE" - ).set_name("LLAMAEnhancedDynamicEliteAnnealingDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicEliteAnnealingDE").set_name("LLAMAEnhancedDynamicEliteAnnealingDE", register=True) except Exception as e: print("EnhancedDynamicEliteAnnealingDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicEscapeStrategyV32 import EnhancedDynamicEscapeStrategyV32 lama_register["EnhancedDynamicEscapeStrategyV32"] = EnhancedDynamicEscapeStrategyV32 - LLAMAEnhancedDynamicEscapeStrategyV32 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicEscapeStrategyV32" - ).set_name("LLAMAEnhancedDynamicEscapeStrategyV32", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEscapeStrategyV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicEscapeStrategyV32 = NonObjectOptimizer(method="LLAMAEnhancedDynamicEscapeStrategyV32").set_name("LLAMAEnhancedDynamicEscapeStrategyV32", register=True) except Exception as e: print("EnhancedDynamicEscapeStrategyV32 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicEvolutionStrategy import EnhancedDynamicEvolutionStrategy lama_register["EnhancedDynamicEvolutionStrategy"] = EnhancedDynamicEvolutionStrategy - LLAMAEnhancedDynamicEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedDynamicEvolutionStrategy" - ).set_name("LLAMAEnhancedDynamicEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedDynamicEvolutionStrategy").set_name("LLAMAEnhancedDynamicEvolutionStrategy", register=True) except Exception as e: print("EnhancedDynamicEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicExplorationOptimizer import ( - EnhancedDynamicExplorationOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicExplorationOptimizer import EnhancedDynamicExplorationOptimizer lama_register["EnhancedDynamicExplorationOptimizer"] = EnhancedDynamicExplorationOptimizer - LLAMAEnhancedDynamicExplorationOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicExplorationOptimizer" - ).set_name("LLAMAEnhancedDynamicExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicExplorationOptimizer").set_name("LLAMAEnhancedDynamicExplorationOptimizer", register=True) except Exception as e: print("EnhancedDynamicExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithm import EnhancedDynamicFireworkAlgorithm lama_register["EnhancedDynamicFireworkAlgorithm"] = EnhancedDynamicFireworkAlgorithm - LLAMAEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithm" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithm").set_name("LLAMAEnhancedDynamicFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmFinal import ( - EnhancedDynamicFireworkAlgorithmFinal, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmFinal import EnhancedDynamicFireworkAlgorithmFinal lama_register["EnhancedDynamicFireworkAlgorithmFinal"] = EnhancedDynamicFireworkAlgorithmFinal - LLAMAEnhancedDynamicFireworkAlgorithmFinal = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmFinal" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmFinal", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmFinal = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmFinal").set_name("LLAMAEnhancedDynamicFireworkAlgorithmFinal", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmFinal can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmImproved import ( - EnhancedDynamicFireworkAlgorithmImproved, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmImproved import EnhancedDynamicFireworkAlgorithmImproved lama_register["EnhancedDynamicFireworkAlgorithmImproved"] = EnhancedDynamicFireworkAlgorithmImproved - LLAMAEnhancedDynamicFireworkAlgorithmImproved = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmImproved" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmImproved", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedDynamicFireworkAlgorithmImproved", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRedesigned import ( - EnhancedDynamicFireworkAlgorithmRedesigned, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRedesigned import EnhancedDynamicFireworkAlgorithmRedesigned lama_register["EnhancedDynamicFireworkAlgorithmRedesigned"] = EnhancedDynamicFireworkAlgorithmRedesigned - LLAMAEnhancedDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned").set_name("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmRedesigned can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRefined import ( - EnhancedDynamicFireworkAlgorithmRefined, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRefined import EnhancedDynamicFireworkAlgorithmRefined lama_register["EnhancedDynamicFireworkAlgorithmRefined"] = EnhancedDynamicFireworkAlgorithmRefined - LLAMAEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmRefined" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRefined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRefined").set_name("LLAMAEnhancedDynamicFireworkAlgorithmRefined", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmV2 import ( - EnhancedDynamicFireworkAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmV2 import EnhancedDynamicFireworkAlgorithmV2 lama_register["EnhancedDynamicFireworkAlgorithmV2"] = EnhancedDynamicFireworkAlgorithmV2 - LLAMAEnhancedDynamicFireworkAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmV2" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmV2").set_name("LLAMAEnhancedDynamicFireworkAlgorithmV2", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) except Exception as e: print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 can not be imported: ", - e, - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 can not be imported: ", - e, - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 can not be imported: ", - e, - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 can not be imported: ", - e, - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9", register=True - ) -except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 can not be imported: ", e - ) - -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization import ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization, - ) - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization"] = ( - EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization", register=True) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10", register=True) except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization import ( - EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 - lama_register["EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization"] = ( - EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12", register=True) except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization import ( - EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 - lama_register["EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization"] = ( - EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization" - ).set_name( - "LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization", register=True - ) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2", register=True) except Exception as e: - print( - "EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization can not be imported: ", e - ) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithHybridSearch import ( - EnhancedDynamicFireworkAlgorithmWithHybridSearch, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 - lama_register["EnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( - EnhancedDynamicFireworkAlgorithmWithHybridSearch - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4", register=True) except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization import ( - EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 - lama_register["EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization"] = ( - EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization - ) - LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization" - ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6", register=True) except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolution import ( - EnhancedDynamicFireworkDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 - lama_register["EnhancedDynamicFireworkDifferentialEvolution"] = ( - EnhancedDynamicFireworkDifferentialEvolution - ) - LLAMAEnhancedDynamicFireworkDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkDifferentialEvolution" - ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolution", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8", register=True) except Exception as e: - print("EnhancedDynamicFireworkDifferentialEvolution can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV2 import ( - EnhancedDynamicFireworkDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization - lama_register["EnhancedDynamicFireworkDifferentialEvolutionV2"] = ( - EnhancedDynamicFireworkDifferentialEvolutionV2 - ) - LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2" - ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization", register=True) except Exception as e: - print("EnhancedDynamicFireworkDifferentialEvolutionV2 can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization import EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization + lama_register["EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization"] = EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV3 import ( - EnhancedDynamicFireworkDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization - lama_register["EnhancedDynamicFireworkDifferentialEvolutionV3"] = ( - EnhancedDynamicFireworkDifferentialEvolutionV3 - ) - LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3" - ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization", register=True) except Exception as e: - print("EnhancedDynamicFireworkDifferentialEvolutionV3 can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithHybridSearch import EnhancedDynamicFireworkAlgorithmWithHybridSearch + lama_register["EnhancedDynamicFireworkAlgorithmWithHybridSearch"] = EnhancedDynamicFireworkAlgorithmWithHybridSearch + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) +except Exception as e: + print("EnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealing import ( - EnhancedDynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization - lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( - EnhancedDynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization", register=True) except Exception as e: - print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) + print("EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolution import EnhancedDynamicFireworkDifferentialEvolution + + lama_register["EnhancedDynamicFireworkDifferentialEvolution"] = EnhancedDynamicFireworkDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolution").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolution", register=True) +except Exception as e: + print("EnhancedDynamicFireworkDifferentialEvolution can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV2 import EnhancedDynamicFireworkDifferentialEvolutionV2 + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV2"] = EnhancedDynamicFireworkDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2", register=True) +except Exception as e: + print("EnhancedDynamicFireworkDifferentialEvolutionV2 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( - EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus, - ) + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV3 import EnhancedDynamicFireworkDifferentialEvolutionV3 - lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( - EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus - ) - LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( - method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus" - ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV3"] = EnhancedDynamicFireworkDifferentialEvolutionV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3", register=True) except Exception as e: - print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) + print("EnhancedDynamicFireworkDifferentialEvolutionV3 can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealing import EnhancedDynamicGradientBoostedMemorySimulatedAnnealing + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealing"] = EnhancedDynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: + print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) +try: + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus import EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus + + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: + print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) try: from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithm import EnhancedDynamicHarmonyAlgorithm lama_register["EnhancedDynamicHarmonyAlgorithm"] = EnhancedDynamicHarmonyAlgorithm - LLAMAEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonyAlgorithm" - ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithm").set_name("LLAMAEnhancedDynamicHarmonyAlgorithm", register=True) except Exception as e: print("EnhancedDynamicHarmonyAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithmV2 import ( - EnhancedDynamicHarmonyAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithmV2 import EnhancedDynamicHarmonyAlgorithmV2 lama_register["EnhancedDynamicHarmonyAlgorithmV2"] = EnhancedDynamicHarmonyAlgorithmV2 - LLAMAEnhancedDynamicHarmonyAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonyAlgorithmV2" - ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonyAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithmV2").set_name("LLAMAEnhancedDynamicHarmonyAlgorithmV2", register=True) except Exception as e: print("EnhancedDynamicHarmonyAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonyFireworksSearch import ( - EnhancedDynamicHarmonyFireworksSearch, - ) + from nevergrad.optimization.lama.EnhancedDynamicHarmonyFireworksSearch import EnhancedDynamicHarmonyFireworksSearch lama_register["EnhancedDynamicHarmonyFireworksSearch"] = EnhancedDynamicHarmonyFireworksSearch - LLAMAEnhancedDynamicHarmonyFireworksSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonyFireworksSearch" - ).set_name("LLAMAEnhancedDynamicHarmonyFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyFireworksSearch").set_name("LLAMAEnhancedDynamicHarmonyFireworksSearch", register=True) except Exception as e: print("EnhancedDynamicHarmonyFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizer import ( - EnhancedDynamicHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizer import EnhancedDynamicHarmonySearchOptimizer lama_register["EnhancedDynamicHarmonySearchOptimizer"] = EnhancedDynamicHarmonySearchOptimizer - LLAMAEnhancedDynamicHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchOptimizer" - ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicHarmonySearchOptimizer", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizerV7 import ( - EnhancedDynamicHarmonySearchOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizerV7 import EnhancedDynamicHarmonySearchOptimizerV7 lama_register["EnhancedDynamicHarmonySearchOptimizerV7"] = EnhancedDynamicHarmonySearchOptimizerV7 - LLAMAEnhancedDynamicHarmonySearchOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7" - ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7").set_name("LLAMAEnhancedDynamicHarmonySearchOptimizerV7", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchOptimizerV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV5 import EnhancedDynamicHarmonySearchV5 lama_register["EnhancedDynamicHarmonySearchV5"] = EnhancedDynamicHarmonySearchV5 - LLAMAEnhancedDynamicHarmonySearchV5 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchV5" - ).set_name("LLAMAEnhancedDynamicHarmonySearchV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV5").set_name("LLAMAEnhancedDynamicHarmonySearchV5", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV6 import EnhancedDynamicHarmonySearchV6 lama_register["EnhancedDynamicHarmonySearchV6"] = EnhancedDynamicHarmonySearchV6 - LLAMAEnhancedDynamicHarmonySearchV6 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchV6" - ).set_name("LLAMAEnhancedDynamicHarmonySearchV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV6").set_name("LLAMAEnhancedDynamicHarmonySearchV6", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV7 import EnhancedDynamicHarmonySearchV7 lama_register["EnhancedDynamicHarmonySearchV7"] = EnhancedDynamicHarmonySearchV7 - LLAMAEnhancedDynamicHarmonySearchV7 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchV7" - ).set_name("LLAMAEnhancedDynamicHarmonySearchV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV7").set_name("LLAMAEnhancedDynamicHarmonySearchV7", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV8 import EnhancedDynamicHarmonySearchV8 lama_register["EnhancedDynamicHarmonySearchV8"] = EnhancedDynamicHarmonySearchV8 - LLAMAEnhancedDynamicHarmonySearchV8 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonySearchV8" - ).set_name("LLAMAEnhancedDynamicHarmonySearchV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonySearchV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV8").set_name("LLAMAEnhancedDynamicHarmonySearchV8", register=True) except Exception as e: print("EnhancedDynamicHarmonySearchV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHarmonyTabuSearch import EnhancedDynamicHarmonyTabuSearch lama_register["EnhancedDynamicHarmonyTabuSearch"] = EnhancedDynamicHarmonyTabuSearch - LLAMAEnhancedDynamicHarmonyTabuSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHarmonyTabuSearch" - ).set_name("LLAMAEnhancedDynamicHarmonyTabuSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyTabuSearch").set_name("LLAMAEnhancedDynamicHarmonyTabuSearch", register=True) except Exception as e: print("EnhancedDynamicHarmonyTabuSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHybridDEPSOWithEliteMemory import ( - EnhancedDynamicHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.EnhancedDynamicHybridDEPSOWithEliteMemory import EnhancedDynamicHybridDEPSOWithEliteMemory lama_register["EnhancedDynamicHybridDEPSOWithEliteMemory"] = EnhancedDynamicHybridDEPSOWithEliteMemory - LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory" - ).set_name("LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory").set_name("LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("EnhancedDynamicHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 import ( - EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21, - ) + from nevergrad.optimization.lama.EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 import EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 - lama_register["EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21"] = ( - EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 - ) - LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21" - ).set_name("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21", register=True) + lama_register["EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21"] = EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21").set_name("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21", register=True) except Exception as e: print("EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicHybridOptimization import ( - EnhancedDynamicHybridOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicHybridOptimization import EnhancedDynamicHybridOptimization lama_register["EnhancedDynamicHybridOptimization"] = EnhancedDynamicHybridOptimization - LLAMAEnhancedDynamicHybridOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHybridOptimization" - ).set_name("LLAMAEnhancedDynamicHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimization").set_name("LLAMAEnhancedDynamicHybridOptimization", register=True) except Exception as e: print("EnhancedDynamicHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicHybridOptimizer import EnhancedDynamicHybridOptimizer lama_register["EnhancedDynamicHybridOptimizer"] = EnhancedDynamicHybridOptimizer - LLAMAEnhancedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicHybridOptimizer" - ).set_name("LLAMAEnhancedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimizer").set_name("LLAMAEnhancedDynamicHybridOptimizer", register=True) except Exception as e: print("EnhancedDynamicHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearch import EnhancedDynamicLevyHarmonySearch lama_register["EnhancedDynamicLevyHarmonySearch"] = EnhancedDynamicLevyHarmonySearch - LLAMAEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLevyHarmonySearch" - ).set_name("LLAMAEnhancedDynamicLevyHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearch").set_name("LLAMAEnhancedDynamicLevyHarmonySearch", register=True) except Exception as e: print("EnhancedDynamicLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV2 import ( - EnhancedDynamicLevyHarmonySearchV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV2 import EnhancedDynamicLevyHarmonySearchV2 lama_register["EnhancedDynamicLevyHarmonySearchV2"] = EnhancedDynamicLevyHarmonySearchV2 - LLAMAEnhancedDynamicLevyHarmonySearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLevyHarmonySearchV2" - ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLevyHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV2").set_name("LLAMAEnhancedDynamicLevyHarmonySearchV2", register=True) except Exception as e: print("EnhancedDynamicLevyHarmonySearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV3 import ( - EnhancedDynamicLevyHarmonySearchV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV3 import EnhancedDynamicLevyHarmonySearchV3 lama_register["EnhancedDynamicLevyHarmonySearchV3"] = EnhancedDynamicLevyHarmonySearchV3 - LLAMAEnhancedDynamicLevyHarmonySearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLevyHarmonySearchV3" - ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLevyHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV3").set_name("LLAMAEnhancedDynamicLevyHarmonySearchV3", register=True) except Exception as e: print("EnhancedDynamicLevyHarmonySearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithm import ( - EnhancedDynamicLocalSearchFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithm import EnhancedDynamicLocalSearchFireworkAlgorithm lama_register["EnhancedDynamicLocalSearchFireworkAlgorithm"] = EnhancedDynamicLocalSearchFireworkAlgorithm - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm" - ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV2 import ( - EnhancedDynamicLocalSearchFireworkAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV2 import EnhancedDynamicLocalSearchFireworkAlgorithmV2 - lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV2"] = ( - EnhancedDynamicLocalSearchFireworkAlgorithmV2 - ) - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2" - ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2", register=True) + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV2"] = EnhancedDynamicLocalSearchFireworkAlgorithmV2 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2", register=True) except Exception as e: print("EnhancedDynamicLocalSearchFireworkAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV3 import ( - EnhancedDynamicLocalSearchFireworkAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV3 import EnhancedDynamicLocalSearchFireworkAlgorithmV3 - lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV3"] = ( - EnhancedDynamicLocalSearchFireworkAlgorithmV3 - ) - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3" - ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3", register=True) + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV3"] = EnhancedDynamicLocalSearchFireworkAlgorithmV3 + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3", register=True) except Exception as e: print("EnhancedDynamicLocalSearchFireworkAlgorithmV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicMemoryStrategyV51 import EnhancedDynamicMemoryStrategyV51 lama_register["EnhancedDynamicMemoryStrategyV51"] = EnhancedDynamicMemoryStrategyV51 - LLAMAEnhancedDynamicMemoryStrategyV51 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicMemoryStrategyV51" - ).set_name("LLAMAEnhancedDynamicMemoryStrategyV51", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMemoryStrategyV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicMemoryStrategyV51 = NonObjectOptimizer(method="LLAMAEnhancedDynamicMemoryStrategyV51").set_name("LLAMAEnhancedDynamicMemoryStrategyV51", register=True) except Exception as e: print("EnhancedDynamicMemoryStrategyV51 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicMultiPhaseAnnealingPlus import ( - EnhancedDynamicMultiPhaseAnnealingPlus, - ) + from nevergrad.optimization.lama.EnhancedDynamicMultiPhaseAnnealingPlus import EnhancedDynamicMultiPhaseAnnealingPlus lama_register["EnhancedDynamicMultiPhaseAnnealingPlus"] = EnhancedDynamicMultiPhaseAnnealingPlus - LLAMAEnhancedDynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( - method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus" - ).set_name("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicMultiPhaseAnnealingPlus = NonObjectOptimizer(method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus").set_name("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus", register=True) except Exception as e: print("EnhancedDynamicMultiPhaseAnnealingPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicMutationSearch import EnhancedDynamicMutationSearch lama_register["EnhancedDynamicMutationSearch"] = EnhancedDynamicMutationSearch - LLAMAEnhancedDynamicMutationSearch = NonObjectOptimizer( - method="LLAMAEnhancedDynamicMutationSearch" - ).set_name("LLAMAEnhancedDynamicMutationSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicMutationSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicMutationSearch").set_name("LLAMAEnhancedDynamicMutationSearch", register=True) except Exception as e: print("EnhancedDynamicMutationSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicNichePSO_DE_LS import EnhancedDynamicNichePSO_DE_LS lama_register["EnhancedDynamicNichePSO_DE_LS"] = EnhancedDynamicNichePSO_DE_LS - LLAMAEnhancedDynamicNichePSO_DE_LS = NonObjectOptimizer( - method="LLAMAEnhancedDynamicNichePSO_DE_LS" - ).set_name("LLAMAEnhancedDynamicNichePSO_DE_LS", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichePSO_DE_LS").set_name("LLAMAEnhancedDynamicNichePSO_DE_LS", register=True) except Exception as e: print("EnhancedDynamicNichePSO_DE_LS can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicNichingDEPSO import EnhancedDynamicNichingDEPSO lama_register["EnhancedDynamicNichingDEPSO"] = EnhancedDynamicNichingDEPSO - LLAMAEnhancedDynamicNichingDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO").set_name( - "LLAMAEnhancedDynamicNichingDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicNichingDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO").set_name("LLAMAEnhancedDynamicNichingDEPSO", register=True) except Exception as e: print("EnhancedDynamicNichingDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicPrecisionBalancedEvolution import ( - EnhancedDynamicPrecisionBalancedEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicPrecisionBalancedEvolution import EnhancedDynamicPrecisionBalancedEvolution lama_register["EnhancedDynamicPrecisionBalancedEvolution"] = EnhancedDynamicPrecisionBalancedEvolution - LLAMAEnhancedDynamicPrecisionBalancedEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicPrecisionBalancedEvolution" - ).set_name("LLAMAEnhancedDynamicPrecisionBalancedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicPrecisionBalancedEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionBalancedEvolution").set_name("LLAMAEnhancedDynamicPrecisionBalancedEvolution", register=True) except Exception as e: print("EnhancedDynamicPrecisionBalancedEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicPrecisionOptimizer import ( - EnhancedDynamicPrecisionOptimizer, - ) + from nevergrad.optimization.lama.EnhancedDynamicPrecisionOptimizer import EnhancedDynamicPrecisionOptimizer lama_register["EnhancedDynamicPrecisionOptimizer"] = EnhancedDynamicPrecisionOptimizer - LLAMAEnhancedDynamicPrecisionOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedDynamicPrecisionOptimizer" - ).set_name("LLAMAEnhancedDynamicPrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionOptimizer").set_name("LLAMAEnhancedDynamicPrecisionOptimizer", register=True) except Exception as e: print("EnhancedDynamicPrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolution import ( - EnhancedDynamicQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolution import EnhancedDynamicQuantumDifferentialEvolution lama_register["EnhancedDynamicQuantumDifferentialEvolution"] = EnhancedDynamicQuantumDifferentialEvolution - LLAMAEnhancedDynamicQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumDifferentialEvolution" - ).set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolution").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolution", register=True) except Exception as e: print("EnhancedDynamicQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory import ( - EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory import EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory - lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory"] = ( - EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory - ) - LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory" - ).set_name( - "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory", register=True - ) + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory"] = EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory", register=True) except Exception as e: - print( - "EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory can not be imported: ", - e, - ) - + print("EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart import ( - EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart import EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart - lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart"] = ( - EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart - ) - LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart" - ).set_name( - "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart", register=True - ) + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart"] = EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart", register=True) except Exception as e: - print( - "EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart can not be imported: ", - e, - ) - + print("EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimization import ( - EnhancedDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimization import EnhancedDynamicQuantumSwarmOptimization lama_register["EnhancedDynamicQuantumSwarmOptimization"] = EnhancedDynamicQuantumSwarmOptimization - LLAMAEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationFinal import ( - EnhancedDynamicQuantumSwarmOptimizationFinal, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationFinal import EnhancedDynamicQuantumSwarmOptimizationFinal - lama_register["EnhancedDynamicQuantumSwarmOptimizationFinal"] = ( - EnhancedDynamicQuantumSwarmOptimizationFinal - ) - LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal", register=True) + lama_register["EnhancedDynamicQuantumSwarmOptimizationFinal"] = EnhancedDynamicQuantumSwarmOptimizationFinal + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationFinal can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationImproved import ( - EnhancedDynamicQuantumSwarmOptimizationImproved, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationImproved import EnhancedDynamicQuantumSwarmOptimizationImproved - lama_register["EnhancedDynamicQuantumSwarmOptimizationImproved"] = ( - EnhancedDynamicQuantumSwarmOptimizationImproved - ) - LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved", register=True) + lama_register["EnhancedDynamicQuantumSwarmOptimizationImproved"] = EnhancedDynamicQuantumSwarmOptimizationImproved + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV10 import ( - EnhancedDynamicQuantumSwarmOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV10 import EnhancedDynamicQuantumSwarmOptimizationV10 lama_register["EnhancedDynamicQuantumSwarmOptimizationV10"] = EnhancedDynamicQuantumSwarmOptimizationV10 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV10", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV11 import ( - EnhancedDynamicQuantumSwarmOptimizationV11, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV11 import EnhancedDynamicQuantumSwarmOptimizationV11 lama_register["EnhancedDynamicQuantumSwarmOptimizationV11"] = EnhancedDynamicQuantumSwarmOptimizationV11 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV11 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV11", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV12 import ( - EnhancedDynamicQuantumSwarmOptimizationV12, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV12 import EnhancedDynamicQuantumSwarmOptimizationV12 lama_register["EnhancedDynamicQuantumSwarmOptimizationV12"] = EnhancedDynamicQuantumSwarmOptimizationV12 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV12 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV12", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV13 import ( - EnhancedDynamicQuantumSwarmOptimizationV13, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV13 import EnhancedDynamicQuantumSwarmOptimizationV13 lama_register["EnhancedDynamicQuantumSwarmOptimizationV13"] = EnhancedDynamicQuantumSwarmOptimizationV13 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV13 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV13", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV14 import ( - EnhancedDynamicQuantumSwarmOptimizationV14, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV14 import EnhancedDynamicQuantumSwarmOptimizationV14 lama_register["EnhancedDynamicQuantumSwarmOptimizationV14"] = EnhancedDynamicQuantumSwarmOptimizationV14 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV14 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV14", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV15 import ( - EnhancedDynamicQuantumSwarmOptimizationV15, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV15 import EnhancedDynamicQuantumSwarmOptimizationV15 lama_register["EnhancedDynamicQuantumSwarmOptimizationV15"] = EnhancedDynamicQuantumSwarmOptimizationV15 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV15 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV15", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV16 import ( - EnhancedDynamicQuantumSwarmOptimizationV16, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV16 import EnhancedDynamicQuantumSwarmOptimizationV16 lama_register["EnhancedDynamicQuantumSwarmOptimizationV16"] = EnhancedDynamicQuantumSwarmOptimizationV16 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV16 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV16", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV17 import ( - EnhancedDynamicQuantumSwarmOptimizationV17, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV17 import EnhancedDynamicQuantumSwarmOptimizationV17 lama_register["EnhancedDynamicQuantumSwarmOptimizationV17"] = EnhancedDynamicQuantumSwarmOptimizationV17 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV17 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV17", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV18 import ( - EnhancedDynamicQuantumSwarmOptimizationV18, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV18 import EnhancedDynamicQuantumSwarmOptimizationV18 lama_register["EnhancedDynamicQuantumSwarmOptimizationV18"] = EnhancedDynamicQuantumSwarmOptimizationV18 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV18 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV18 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV18", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV19 import ( - EnhancedDynamicQuantumSwarmOptimizationV19, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV19 import EnhancedDynamicQuantumSwarmOptimizationV19 lama_register["EnhancedDynamicQuantumSwarmOptimizationV19"] = EnhancedDynamicQuantumSwarmOptimizationV19 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV19 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV19 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV19", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV2 import ( - EnhancedDynamicQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV2 import EnhancedDynamicQuantumSwarmOptimizationV2 lama_register["EnhancedDynamicQuantumSwarmOptimizationV2"] = EnhancedDynamicQuantumSwarmOptimizationV2 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV20 import ( - EnhancedDynamicQuantumSwarmOptimizationV20, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV20 import EnhancedDynamicQuantumSwarmOptimizationV20 lama_register["EnhancedDynamicQuantumSwarmOptimizationV20"] = EnhancedDynamicQuantumSwarmOptimizationV20 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV20 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV20 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV20", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV21 import ( - EnhancedDynamicQuantumSwarmOptimizationV21, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV21 import EnhancedDynamicQuantumSwarmOptimizationV21 lama_register["EnhancedDynamicQuantumSwarmOptimizationV21"] = EnhancedDynamicQuantumSwarmOptimizationV21 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV21 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV21 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV21", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV22 import ( - EnhancedDynamicQuantumSwarmOptimizationV22, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV22 import EnhancedDynamicQuantumSwarmOptimizationV22 lama_register["EnhancedDynamicQuantumSwarmOptimizationV22"] = EnhancedDynamicQuantumSwarmOptimizationV22 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV22 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV22 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV22", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV23 import ( - EnhancedDynamicQuantumSwarmOptimizationV23, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV23 import EnhancedDynamicQuantumSwarmOptimizationV23 lama_register["EnhancedDynamicQuantumSwarmOptimizationV23"] = EnhancedDynamicQuantumSwarmOptimizationV23 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV23 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV23 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV23", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV24 import ( - EnhancedDynamicQuantumSwarmOptimizationV24, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV24 import EnhancedDynamicQuantumSwarmOptimizationV24 lama_register["EnhancedDynamicQuantumSwarmOptimizationV24"] = EnhancedDynamicQuantumSwarmOptimizationV24 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV24 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV24 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV24", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV25 import ( - EnhancedDynamicQuantumSwarmOptimizationV25, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV25 import EnhancedDynamicQuantumSwarmOptimizationV25 lama_register["EnhancedDynamicQuantumSwarmOptimizationV25"] = EnhancedDynamicQuantumSwarmOptimizationV25 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV25 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV25 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV25", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV26 import ( - EnhancedDynamicQuantumSwarmOptimizationV26, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV26 import EnhancedDynamicQuantumSwarmOptimizationV26 lama_register["EnhancedDynamicQuantumSwarmOptimizationV26"] = EnhancedDynamicQuantumSwarmOptimizationV26 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV26 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV26", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV26 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV26", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV27 import ( - EnhancedDynamicQuantumSwarmOptimizationV27, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV27 import EnhancedDynamicQuantumSwarmOptimizationV27 lama_register["EnhancedDynamicQuantumSwarmOptimizationV27"] = EnhancedDynamicQuantumSwarmOptimizationV27 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV27 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV27", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV27 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV27", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV28 import ( - EnhancedDynamicQuantumSwarmOptimizationV28, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV28 import EnhancedDynamicQuantumSwarmOptimizationV28 lama_register["EnhancedDynamicQuantumSwarmOptimizationV28"] = EnhancedDynamicQuantumSwarmOptimizationV28 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV28 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV28", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV28", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV3 import ( - EnhancedDynamicQuantumSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV3 import EnhancedDynamicQuantumSwarmOptimizationV3 lama_register["EnhancedDynamicQuantumSwarmOptimizationV3"] = EnhancedDynamicQuantumSwarmOptimizationV3 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV4 import ( - EnhancedDynamicQuantumSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV4 import EnhancedDynamicQuantumSwarmOptimizationV4 lama_register["EnhancedDynamicQuantumSwarmOptimizationV4"] = EnhancedDynamicQuantumSwarmOptimizationV4 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV5 import ( - EnhancedDynamicQuantumSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV5 import EnhancedDynamicQuantumSwarmOptimizationV5 lama_register["EnhancedDynamicQuantumSwarmOptimizationV5"] = EnhancedDynamicQuantumSwarmOptimizationV5 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV6 import ( - EnhancedDynamicQuantumSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV6 import EnhancedDynamicQuantumSwarmOptimizationV6 lama_register["EnhancedDynamicQuantumSwarmOptimizationV6"] = EnhancedDynamicQuantumSwarmOptimizationV6 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV7 import ( - EnhancedDynamicQuantumSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV7 import EnhancedDynamicQuantumSwarmOptimizationV7 lama_register["EnhancedDynamicQuantumSwarmOptimizationV7"] = EnhancedDynamicQuantumSwarmOptimizationV7 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV8 import ( - EnhancedDynamicQuantumSwarmOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV8 import EnhancedDynamicQuantumSwarmOptimizationV8 lama_register["EnhancedDynamicQuantumSwarmOptimizationV8"] = EnhancedDynamicQuantumSwarmOptimizationV8 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV8", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV9 import ( - EnhancedDynamicQuantumSwarmOptimizationV9, - ) + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV9 import EnhancedDynamicQuantumSwarmOptimizationV9 lama_register["EnhancedDynamicQuantumSwarmOptimizationV9"] = EnhancedDynamicQuantumSwarmOptimizationV9 - LLAMAEnhancedDynamicQuantumSwarmOptimizationV9 = NonObjectOptimizer( - method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9" - ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV9", register=True) except Exception as e: print("EnhancedDynamicQuantumSwarmOptimizationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( - EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing import EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing - lama_register["EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( - EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing - ) - LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicRefinementGradientBoostedMemoryAnnealing import ( - EnhancedDynamicRefinementGradientBoostedMemoryAnnealing, - ) + from nevergrad.optimization.lama.EnhancedDynamicRefinementGradientBoostedMemoryAnnealing import EnhancedDynamicRefinementGradientBoostedMemoryAnnealing - lama_register["EnhancedDynamicRefinementGradientBoostedMemoryAnnealing"] = ( - EnhancedDynamicRefinementGradientBoostedMemoryAnnealing - ) - LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing" - ).set_name("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing", register=True) + lama_register["EnhancedDynamicRefinementGradientBoostedMemoryAnnealing"] = EnhancedDynamicRefinementGradientBoostedMemoryAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing").set_name("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("EnhancedDynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedDynamicRestartAdaptiveDE import EnhancedDynamicRestartAdaptiveDE lama_register["EnhancedDynamicRestartAdaptiveDE"] = EnhancedDynamicRestartAdaptiveDE - LLAMAEnhancedDynamicRestartAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedDynamicRestartAdaptiveDE" - ).set_name("LLAMAEnhancedDynamicRestartAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicRestartAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicRestartAdaptiveDE").set_name("LLAMAEnhancedDynamicRestartAdaptiveDE", register=True) except Exception as e: print("EnhancedDynamicRestartAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicStrategyAdaptiveDE import ( - EnhancedDynamicStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.EnhancedDynamicStrategyAdaptiveDE import EnhancedDynamicStrategyAdaptiveDE lama_register["EnhancedDynamicStrategyAdaptiveDE"] = EnhancedDynamicStrategyAdaptiveDE - LLAMAEnhancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedDynamicStrategyAdaptiveDE" - ).set_name("LLAMAEnhancedDynamicStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicStrategyAdaptiveDE").set_name("LLAMAEnhancedDynamicStrategyAdaptiveDE", register=True) except Exception as e: print("EnhancedDynamicStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithm import ( - EnhancedDynamicallyAdaptiveFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithm import EnhancedDynamicallyAdaptiveFireworkAlgorithm - lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithm"] = ( - EnhancedDynamicallyAdaptiveFireworkAlgorithm - ) - LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm" - ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm", register=True) + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithm"] = EnhancedDynamicallyAdaptiveFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm", register=True) except Exception as e: print("EnhancedDynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved import ( - EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved, - ) + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved import EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved - lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved"] = ( - EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved - ) - LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved = NonObjectOptimizer( - method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved" - ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved", register=True) + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved"] = EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved + res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved").set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved", register=True) except Exception as e: print("EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteAdaptiveHybridDEPSO import EnhancedEliteAdaptiveHybridDEPSO lama_register["EnhancedEliteAdaptiveHybridDEPSO"] = EnhancedEliteAdaptiveHybridDEPSO - LLAMAEnhancedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAEnhancedEliteAdaptiveHybridDEPSO" - ).set_name("LLAMAEnhancedEliteAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveHybridDEPSO").set_name("LLAMAEnhancedEliteAdaptiveHybridDEPSO", register=True) except Exception as e: print("EnhancedEliteAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizer import ( - EnhancedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizer import EnhancedEliteAdaptiveMemoryHybridOptimizer lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizer"] = EnhancedEliteAdaptiveMemoryHybridOptimizer - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV2 import ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV2 import EnhancedEliteAdaptiveMemoryHybridOptimizerV2 - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV2"] = ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV2 - ) - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2" - ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2", register=True) + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV2"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2", register=True) except Exception as e: print("EnhancedEliteAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV6 import ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV6, - ) + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV6 import EnhancedEliteAdaptiveMemoryHybridOptimizerV6 - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV6"] = ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV6 - ) - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6" - ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6", register=True) + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV6"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV6 + res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6", register=True) except Exception as e: print("EnhancedEliteAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV7 import ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV7 import EnhancedEliteAdaptiveMemoryHybridOptimizerV7 - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV7"] = ( - EnhancedEliteAdaptiveMemoryHybridOptimizerV7 - ) - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7" - ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7", register=True) + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV7"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV7 + res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7", register=True) except Exception as e: print("EnhancedEliteAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteCrowdingMemoryHybridOptimizerV3 import ( - EnhancedEliteCrowdingMemoryHybridOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedEliteCrowdingMemoryHybridOptimizerV3 import EnhancedEliteCrowdingMemoryHybridOptimizerV3 - lama_register["EnhancedEliteCrowdingMemoryHybridOptimizerV3"] = ( - EnhancedEliteCrowdingMemoryHybridOptimizerV3 - ) - LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3" - ).set_name("LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3", register=True) + lama_register["EnhancedEliteCrowdingMemoryHybridOptimizerV3"] = EnhancedEliteCrowdingMemoryHybridOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3").set_name("LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3", register=True) except Exception as e: print("EnhancedEliteCrowdingMemoryHybridOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveDE import EnhancedEliteGuidedAdaptiveDE lama_register["EnhancedEliteGuidedAdaptiveDE"] = EnhancedEliteGuidedAdaptiveDE - LLAMAEnhancedEliteGuidedAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedAdaptiveDE" - ).set_name("LLAMAEnhancedEliteGuidedAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveDE").set_name("LLAMAEnhancedEliteGuidedAdaptiveDE", register=True) except Exception as e: print("EnhancedEliteGuidedAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveRestartDE import ( - EnhancedEliteGuidedAdaptiveRestartDE, - ) + from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveRestartDE import EnhancedEliteGuidedAdaptiveRestartDE lama_register["EnhancedEliteGuidedAdaptiveRestartDE"] = EnhancedEliteGuidedAdaptiveRestartDE - LLAMAEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE" - ).set_name("LLAMAEnhancedEliteGuidedAdaptiveRestartDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE").set_name("LLAMAEnhancedEliteGuidedAdaptiveRestartDE", register=True) except Exception as e: print("EnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteGuidedDualMutationDE import ( - EnhancedEliteGuidedDualMutationDE, - ) + from nevergrad.optimization.lama.EnhancedEliteGuidedDualMutationDE import EnhancedEliteGuidedDualMutationDE lama_register["EnhancedEliteGuidedDualMutationDE"] = EnhancedEliteGuidedDualMutationDE - LLAMAEnhancedEliteGuidedDualMutationDE = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedDualMutationDE" - ).set_name("LLAMAEnhancedEliteGuidedDualMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedDualMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedDualMutationDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedDualMutationDE").set_name("LLAMAEnhancedEliteGuidedDualMutationDE", register=True) except Exception as e: print("EnhancedEliteGuidedDualMutationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v81 import EnhancedEliteGuidedMassQGSA_v81 lama_register["EnhancedEliteGuidedMassQGSA_v81"] = EnhancedEliteGuidedMassQGSA_v81 - LLAMAEnhancedEliteGuidedMassQGSA_v81 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMassQGSA_v81" - ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v81", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v81")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMassQGSA_v81 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v81").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v81", register=True) except Exception as e: print("EnhancedEliteGuidedMassQGSA_v81 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v82 import EnhancedEliteGuidedMassQGSA_v82 lama_register["EnhancedEliteGuidedMassQGSA_v82"] = EnhancedEliteGuidedMassQGSA_v82 - LLAMAEnhancedEliteGuidedMassQGSA_v82 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMassQGSA_v82" - ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v82", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v82")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMassQGSA_v82 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v82").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v82", register=True) except Exception as e: print("EnhancedEliteGuidedMassQGSA_v82 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v83 import EnhancedEliteGuidedMassQGSA_v83 lama_register["EnhancedEliteGuidedMassQGSA_v83"] = EnhancedEliteGuidedMassQGSA_v83 - LLAMAEnhancedEliteGuidedMassQGSA_v83 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMassQGSA_v83" - ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v83", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v83")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMassQGSA_v83 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v83").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v83", register=True) except Exception as e: print("EnhancedEliteGuidedMassQGSA_v83 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v85 import EnhancedEliteGuidedMassQGSA_v85 lama_register["EnhancedEliteGuidedMassQGSA_v85"] = EnhancedEliteGuidedMassQGSA_v85 - LLAMAEnhancedEliteGuidedMassQGSA_v85 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMassQGSA_v85" - ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v85", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v85")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMassQGSA_v85 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v85").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v85", register=True) except Exception as e: print("EnhancedEliteGuidedMassQGSA_v85 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v86 import EnhancedEliteGuidedMassQGSA_v86 lama_register["EnhancedEliteGuidedMassQGSA_v86"] = EnhancedEliteGuidedMassQGSA_v86 - LLAMAEnhancedEliteGuidedMassQGSA_v86 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMassQGSA_v86" - ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v86", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v86")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMassQGSA_v86 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v86").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v86", register=True) except Exception as e: print("EnhancedEliteGuidedMassQGSA_v86 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteGuidedMutationDE_v2 import EnhancedEliteGuidedMutationDE_v2 lama_register["EnhancedEliteGuidedMutationDE_v2"] = EnhancedEliteGuidedMutationDE_v2 - LLAMAEnhancedEliteGuidedMutationDE_v2 = NonObjectOptimizer( - method="LLAMAEnhancedEliteGuidedMutationDE_v2" - ).set_name("LLAMAEnhancedEliteGuidedMutationDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMutationDE_v2").set_name("LLAMAEnhancedEliteGuidedMutationDE_v2", register=True) except Exception as e: print("EnhancedEliteGuidedMutationDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEliteHybridOptimizer import EnhancedEliteHybridOptimizer lama_register["EnhancedEliteHybridOptimizer"] = EnhancedEliteHybridOptimizer - LLAMAEnhancedEliteHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedEliteHybridOptimizer" - ).set_name("LLAMAEnhancedEliteHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEliteHybridOptimizer").set_name("LLAMAEnhancedEliteHybridOptimizer", register=True) except Exception as e: print("EnhancedEliteHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEliteQuantumAdaptiveExplorationOptimization import ( - EnhancedEliteQuantumAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedEliteQuantumAdaptiveExplorationOptimization import EnhancedEliteQuantumAdaptiveExplorationOptimization - lama_register["EnhancedEliteQuantumAdaptiveExplorationOptimization"] = ( - EnhancedEliteQuantumAdaptiveExplorationOptimization - ) - LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization" - ).set_name("LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization", register=True) + lama_register["EnhancedEliteQuantumAdaptiveExplorationOptimization"] = EnhancedEliteQuantumAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization").set_name("LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization", register=True) except Exception as e: print("EnhancedEliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 import ( - EnhancedEnhancedAdaptiveHarmonicTabuSearchV24, - ) + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 import EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 - lama_register["EnhancedEnhancedAdaptiveHarmonicTabuSearchV24"] = ( - EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 - ) - LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24" - ).set_name("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24", register=True) + lama_register["EnhancedEnhancedAdaptiveHarmonicTabuSearchV24"] = EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24", register=True) except Exception as e: print("EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 import ( - EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7, - ) + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 import EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 - lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7"] = ( - EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 - ) - LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7" - ).set_name( - "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7", register=True - ) + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7"] = EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7", register=True) except Exception as e: - print( - "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 can not be imported: ", - e, - ) - + print("EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 import ( - EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8, - ) + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 import EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 - lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8"] = ( - EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 - ) - LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8" - ).set_name( - "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8", register=True - ) + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8"] = EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8", register=True) except Exception as e: - print( - "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 can not be imported: ", - e, - ) - + print("EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution import ( - EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution import EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution - lama_register["EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( - EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution - ) - LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution" - ).set_name("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) + lama_register["EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution"] = EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 import ( - EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57, - ) + from nevergrad.optimization.lama.EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 import EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 - lama_register["EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57"] = ( - EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 - ) - LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57" - ).set_name("LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57", register=True) + lama_register["EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57"] = EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57").set_name("LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57", register=True) except Exception as e: print("EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( - EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence import EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence - lama_register["EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( - EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence - ) - LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" - ).set_name("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) + lama_register["EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) except Exception as e: print("EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedDynamicQuantumSwarmOptimization import ( - EnhancedEnhancedDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedEnhancedDynamicQuantumSwarmOptimization import EnhancedEnhancedDynamicQuantumSwarmOptimization - lama_register["EnhancedEnhancedDynamicQuantumSwarmOptimization"] = ( - EnhancedEnhancedDynamicQuantumSwarmOptimization - ) - LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization", register=True) + lama_register["EnhancedEnhancedDynamicQuantumSwarmOptimization"] = EnhancedEnhancedDynamicQuantumSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 import ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10, - ) + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10"] = ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 - ) - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10" - ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10", register=True) + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10", register=True) except Exception as e: print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 import ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6, - ) + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6"] = ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 - ) - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6" - ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6", register=True) + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6", register=True) except Exception as e: print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 import ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7"] = ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 - ) - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7" - ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7", register=True) + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7", register=True) except Exception as e: print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 import ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8, - ) + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8"] = ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 - ) - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8" - ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8", register=True) + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8", register=True) except Exception as e: print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 import ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9, - ) + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9"] = ( - EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 - ) - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9" - ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9", register=True) + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9", register=True) except Exception as e: print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization import ( - EnhancedEnhancedFireworkSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization import EnhancedEnhancedFireworkSwarmOptimization lama_register["EnhancedEnhancedFireworkSwarmOptimization"] = EnhancedEnhancedFireworkSwarmOptimization - LLAMAEnhancedEnhancedFireworkSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedFireworkSwarmOptimization" - ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization", register=True) except Exception as e: print("EnhancedEnhancedFireworkSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v2 import ( - EnhancedEnhancedFireworkSwarmOptimization_v2, - ) + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v2 import EnhancedEnhancedFireworkSwarmOptimization_v2 - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v2"] = ( - EnhancedEnhancedFireworkSwarmOptimization_v2 - ) - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2" - ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2", register=True) + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v2"] = EnhancedEnhancedFireworkSwarmOptimization_v2 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2", register=True) except Exception as e: print("EnhancedEnhancedFireworkSwarmOptimization_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v3 import ( - EnhancedEnhancedFireworkSwarmOptimization_v3, - ) + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v3 import EnhancedEnhancedFireworkSwarmOptimization_v3 - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v3"] = ( - EnhancedEnhancedFireworkSwarmOptimization_v3 - ) - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3" - ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3", register=True) + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v3"] = EnhancedEnhancedFireworkSwarmOptimization_v3 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3", register=True) except Exception as e: print("EnhancedEnhancedFireworkSwarmOptimization_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v4 import ( - EnhancedEnhancedFireworkSwarmOptimization_v4, - ) + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v4 import EnhancedEnhancedFireworkSwarmOptimization_v4 - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v4"] = ( - EnhancedEnhancedFireworkSwarmOptimization_v4 - ) - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4" - ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4", register=True) + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v4"] = EnhancedEnhancedFireworkSwarmOptimization_v4 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4", register=True) except Exception as e: print("EnhancedEnhancedFireworkSwarmOptimization_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v63 import ( - EnhancedEnhancedGuidedMassQGSA_v63, - ) + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v63 import EnhancedEnhancedGuidedMassQGSA_v63 lama_register["EnhancedEnhancedGuidedMassQGSA_v63"] = EnhancedEnhancedGuidedMassQGSA_v63 - LLAMAEnhancedEnhancedGuidedMassQGSA_v63 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63" - ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v63", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v63 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v63", register=True) except Exception as e: print("EnhancedEnhancedGuidedMassQGSA_v63 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v64 import ( - EnhancedEnhancedGuidedMassQGSA_v64, - ) + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v64 import EnhancedEnhancedGuidedMassQGSA_v64 lama_register["EnhancedEnhancedGuidedMassQGSA_v64"] = EnhancedEnhancedGuidedMassQGSA_v64 - LLAMAEnhancedEnhancedGuidedMassQGSA_v64 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64" - ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v64", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v64 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v64", register=True) except Exception as e: print("EnhancedEnhancedGuidedMassQGSA_v64 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v68 import ( - EnhancedEnhancedGuidedMassQGSA_v68, - ) + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v68 import EnhancedEnhancedGuidedMassQGSA_v68 lama_register["EnhancedEnhancedGuidedMassQGSA_v68"] = EnhancedEnhancedGuidedMassQGSA_v68 - LLAMAEnhancedEnhancedGuidedMassQGSA_v68 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68" - ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v68", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v68 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v68", register=True) except Exception as e: print("EnhancedEnhancedGuidedMassQGSA_v68 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration import ( - EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration import EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration - lama_register["EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration"] = ( - EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration - ) - LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration" - ).set_name("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration", register=True) + lama_register["EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration"] = EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration", register=True) except Exception as e: print("EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizer import ( - EnhancedEnhancedHybridMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizer import EnhancedEnhancedHybridMetaHeuristicOptimizer - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizer"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizer - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizer"] = EnhancedEnhancedHybridMetaHeuristicOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV10 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV10, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV10 import EnhancedEnhancedHybridMetaHeuristicOptimizerV10 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV10"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV10 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV10"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV10 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV11 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV11, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV11 import EnhancedEnhancedHybridMetaHeuristicOptimizerV11 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV11"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV11 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV11"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV11 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV12 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV12, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV12 import EnhancedEnhancedHybridMetaHeuristicOptimizerV12 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV12"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV12 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV12"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV12 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV13 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV13, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV13 import EnhancedEnhancedHybridMetaHeuristicOptimizerV13 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV13"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV13 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV13"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV13 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV14 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV14, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV14 import EnhancedEnhancedHybridMetaHeuristicOptimizerV14 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV14"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV14 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV14"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV14 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV2 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV2 import EnhancedEnhancedHybridMetaHeuristicOptimizerV2 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV2"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV2 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV2"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV3 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV3 import EnhancedEnhancedHybridMetaHeuristicOptimizerV3 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV3"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV3 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV3"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV4 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV4 import EnhancedEnhancedHybridMetaHeuristicOptimizerV4 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV4"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV4 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV4"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV5 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV5 import EnhancedEnhancedHybridMetaHeuristicOptimizerV5 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV5"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV5 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV5"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV5 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV6 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV6, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV6 import EnhancedEnhancedHybridMetaHeuristicOptimizerV6 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV6"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV6 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV6"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV6 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV7 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV7 import EnhancedEnhancedHybridMetaHeuristicOptimizerV7 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV7"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV7 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV7"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV7 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV8 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV8, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV8 import EnhancedEnhancedHybridMetaHeuristicOptimizerV8 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV8"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV8 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV8"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV8 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV9 import ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV9, - ) + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV9 import EnhancedEnhancedHybridMetaHeuristicOptimizerV9 - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV9"] = ( - EnhancedEnhancedHybridMetaHeuristicOptimizerV9 - ) - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9" - ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9", register=True) + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV9"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV9 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9", register=True) except Exception as e: print("EnhancedEnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedMetaHeuristicOptimizerV3 import ( - EnhancedEnhancedMetaHeuristicOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedEnhancedMetaHeuristicOptimizerV3 import EnhancedEnhancedMetaHeuristicOptimizerV3 lama_register["EnhancedEnhancedMetaHeuristicOptimizerV3"] = EnhancedEnhancedMetaHeuristicOptimizerV3 - LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3" - ).set_name("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3", register=True) except Exception as e: print("EnhancedEnhancedMetaHeuristicOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( - EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP, - ) + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP import EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP - lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = ( - EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP - ) - LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" - ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) except Exception as e: print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 import ( - EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4, - ) + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 import EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 - lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4"] = ( - EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 - ) - LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 = NonObjectOptimizer( - method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4" - ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4", register=True) + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4"] = EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 + res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4").set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4", register=True) except Exception as e: print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV1 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV1, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV1 import EnhancedEvolutionaryDifferentialSwarmOptimizerV1 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV1"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV1 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV1"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV1 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV12 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV12, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV12 import EnhancedEvolutionaryDifferentialSwarmOptimizerV12 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV12"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV12 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV12"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV12 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV13 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV13, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV13 import EnhancedEvolutionaryDifferentialSwarmOptimizerV13 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV13"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV13 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV13"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV13 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV14 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV14, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV14 import EnhancedEvolutionaryDifferentialSwarmOptimizerV14 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV14"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV14 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV14"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV14 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV15 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV15, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV15 import EnhancedEvolutionaryDifferentialSwarmOptimizerV15 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV15"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV15 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV15"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV15 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV16 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV16, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV16 import EnhancedEvolutionaryDifferentialSwarmOptimizerV16 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV16"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV16 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV16"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV16 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV17 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV17, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV17 import EnhancedEvolutionaryDifferentialSwarmOptimizerV17 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV17"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV17 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV17"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV17 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV18 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV18, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV18 import EnhancedEvolutionaryDifferentialSwarmOptimizerV18 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV18"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV18 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV18"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV18 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV19 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV19, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV19 import EnhancedEvolutionaryDifferentialSwarmOptimizerV19 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV19"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV19 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV19"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV19 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV2 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV2 import EnhancedEvolutionaryDifferentialSwarmOptimizerV2 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV2"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV2 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV2"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV20 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV20, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV20 import EnhancedEvolutionaryDifferentialSwarmOptimizerV20 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV20"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV20 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV20"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV20 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV21 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV21, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV21 import EnhancedEvolutionaryDifferentialSwarmOptimizerV21 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV21"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV21 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV21"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV21 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV22 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV22, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV22 import EnhancedEvolutionaryDifferentialSwarmOptimizerV22 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV22"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV22 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV22"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV22 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV23 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV23, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV23 import EnhancedEvolutionaryDifferentialSwarmOptimizerV23 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV23"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV23 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV23"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV23 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV24 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV24, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV24 import EnhancedEvolutionaryDifferentialSwarmOptimizerV24 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV24"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV24 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV24"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV24 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV25 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV25, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV25 import EnhancedEvolutionaryDifferentialSwarmOptimizerV25 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV25"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV25 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV25"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV25 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV26 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV26, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV26 import EnhancedEvolutionaryDifferentialSwarmOptimizerV26 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV26"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV26 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV26"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV26 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV27 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV27, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV27 import EnhancedEvolutionaryDifferentialSwarmOptimizerV27 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV27"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV27 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV27"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV27 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV28 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV28, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV28 import EnhancedEvolutionaryDifferentialSwarmOptimizerV28 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV28"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV28 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV28"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV28 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV29 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV29, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV29 import EnhancedEvolutionaryDifferentialSwarmOptimizerV29 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV29"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV29 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV29"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV29 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV3 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV3 import EnhancedEvolutionaryDifferentialSwarmOptimizerV3 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV3"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV3 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV3"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV30 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV30, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV30 import EnhancedEvolutionaryDifferentialSwarmOptimizerV30 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV30"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV30 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV30"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV30 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV4 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV4 import EnhancedEvolutionaryDifferentialSwarmOptimizerV4 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV4"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV4 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV4"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV5 import ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV5 import EnhancedEvolutionaryDifferentialSwarmOptimizerV5 - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV5"] = ( - EnhancedEvolutionaryDifferentialSwarmOptimizerV5 - ) - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5" - ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5", register=True) + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV5"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV5 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5", register=True) except Exception as e: print("EnhancedEvolutionaryDifferentialSwarmOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch import ( - EnhancedEvolutionaryFireworksSearch, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch import EnhancedEvolutionaryFireworksSearch lama_register["EnhancedEvolutionaryFireworksSearch"] = EnhancedEvolutionaryFireworksSearch - LLAMAEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch").set_name("LLAMAEnhancedEvolutionaryFireworksSearch", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v2 import ( - EnhancedEvolutionaryFireworksSearch_v2, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v2 import EnhancedEvolutionaryFireworksSearch_v2 lama_register["EnhancedEvolutionaryFireworksSearch_v2"] = EnhancedEvolutionaryFireworksSearch_v2 - LLAMAEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch_v2" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v2").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v2", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v3 import ( - EnhancedEvolutionaryFireworksSearch_v3, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v3 import EnhancedEvolutionaryFireworksSearch_v3 lama_register["EnhancedEvolutionaryFireworksSearch_v3"] = EnhancedEvolutionaryFireworksSearch_v3 - LLAMAEnhancedEvolutionaryFireworksSearch_v3 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch_v3" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v3").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v3", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v4 import ( - EnhancedEvolutionaryFireworksSearch_v4, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v4 import EnhancedEvolutionaryFireworksSearch_v4 lama_register["EnhancedEvolutionaryFireworksSearch_v4"] = EnhancedEvolutionaryFireworksSearch_v4 - LLAMAEnhancedEvolutionaryFireworksSearch_v4 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch_v4" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v4 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v4").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v4", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v5 import ( - EnhancedEvolutionaryFireworksSearch_v5, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v5 import EnhancedEvolutionaryFireworksSearch_v5 lama_register["EnhancedEvolutionaryFireworksSearch_v5"] = EnhancedEvolutionaryFireworksSearch_v5 - LLAMAEnhancedEvolutionaryFireworksSearch_v5 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch_v5" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v5 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v5").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v5", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch_v5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v6 import ( - EnhancedEvolutionaryFireworksSearch_v6, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v6 import EnhancedEvolutionaryFireworksSearch_v6 lama_register["EnhancedEvolutionaryFireworksSearch_v6"] = EnhancedEvolutionaryFireworksSearch_v6 - LLAMAEnhancedEvolutionaryFireworksSearch_v6 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryFireworksSearch_v6" - ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v6 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v6").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v6", register=True) except Exception as e: print("EnhancedEvolutionaryFireworksSearch_v6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryGradientSearch import ( - EnhancedEvolutionaryGradientSearch, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryGradientSearch import EnhancedEvolutionaryGradientSearch lama_register["EnhancedEvolutionaryGradientSearch"] = EnhancedEvolutionaryGradientSearch - LLAMAEnhancedEvolutionaryGradientSearch = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryGradientSearch" - ).set_name("LLAMAEnhancedEvolutionaryGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryGradientSearch").set_name("LLAMAEnhancedEvolutionaryGradientSearch", register=True) except Exception as e: print("EnhancedEvolutionaryGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizer import ( - EnhancedEvolutionaryParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizer import EnhancedEvolutionaryParticleSwarmOptimizer lama_register["EnhancedEvolutionaryParticleSwarmOptimizer"] = EnhancedEvolutionaryParticleSwarmOptimizer - LLAMAEnhancedEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedEvolutionaryParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV2 import ( - EnhancedEvolutionaryParticleSwarmOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV2 import EnhancedEvolutionaryParticleSwarmOptimizerV2 - lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV2"] = ( - EnhancedEvolutionaryParticleSwarmOptimizerV2 - ) - LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2" - ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2", register=True) + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV2"] = EnhancedEvolutionaryParticleSwarmOptimizerV2 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2", register=True) except Exception as e: print("EnhancedEvolutionaryParticleSwarmOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV3 import ( - EnhancedEvolutionaryParticleSwarmOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV3 import EnhancedEvolutionaryParticleSwarmOptimizerV3 - lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV3"] = ( - EnhancedEvolutionaryParticleSwarmOptimizerV3 - ) - LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3" - ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3", register=True) + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV3"] = EnhancedEvolutionaryParticleSwarmOptimizerV3 + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3", register=True) except Exception as e: print("EnhancedEvolutionaryParticleSwarmOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedEvolutionaryStrategy import EnhancedEvolutionaryStrategy lama_register["EnhancedEvolutionaryStrategy"] = EnhancedEvolutionaryStrategy - LLAMAEnhancedEvolutionaryStrategy = NonObjectOptimizer( - method="LLAMAEnhancedEvolutionaryStrategy" - ).set_name("LLAMAEnhancedEvolutionaryStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedEvolutionaryStrategy = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryStrategy").set_name("LLAMAEnhancedEvolutionaryStrategy", register=True) except Exception as e: print("EnhancedEvolutionaryStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimization import ( - EnhancedExplorationGravitationalSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimization import EnhancedExplorationGravitationalSwarmOptimization - lama_register["EnhancedExplorationGravitationalSwarmOptimization"] = ( - EnhancedExplorationGravitationalSwarmOptimization - ) - LLAMAEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedExplorationGravitationalSwarmOptimization" - ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimization", register=True) + lama_register["EnhancedExplorationGravitationalSwarmOptimization"] = EnhancedExplorationGravitationalSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimization").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimization", register=True) except Exception as e: print("EnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV2 import ( - EnhancedExplorationGravitationalSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV2 import EnhancedExplorationGravitationalSwarmOptimizationV2 - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV2"] = ( - EnhancedExplorationGravitationalSwarmOptimizationV2 - ) - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2" - ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2", register=True) + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV2"] = EnhancedExplorationGravitationalSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedExplorationGravitationalSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV3 import ( - EnhancedExplorationGravitationalSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV3 import EnhancedExplorationGravitationalSwarmOptimizationV3 - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV3"] = ( - EnhancedExplorationGravitationalSwarmOptimizationV3 - ) - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3" - ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3", register=True) + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV3"] = EnhancedExplorationGravitationalSwarmOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedExplorationGravitationalSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV4 import ( - EnhancedExplorationGravitationalSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV4 import EnhancedExplorationGravitationalSwarmOptimizationV4 - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV4"] = ( - EnhancedExplorationGravitationalSwarmOptimizationV4 - ) - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4" - ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4", register=True) + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV4"] = EnhancedExplorationGravitationalSwarmOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedExplorationGravitationalSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV5 import ( - EnhancedExplorationGravitationalSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV5 import EnhancedExplorationGravitationalSwarmOptimizationV5 - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV5"] = ( - EnhancedExplorationGravitationalSwarmOptimizationV5 - ) - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5" - ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5", register=True) + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV5"] = EnhancedExplorationGravitationalSwarmOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedExplorationGravitationalSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedExplorativeHarmonicSwarmOptimizer import ( - EnhancedExplorativeHarmonicSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedExplorativeHarmonicSwarmOptimizer import EnhancedExplorativeHarmonicSwarmOptimizer lama_register["EnhancedExplorativeHarmonicSwarmOptimizer"] = EnhancedExplorativeHarmonicSwarmOptimizer - LLAMAEnhancedExplorativeHarmonicSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer" - ).set_name("LLAMAEnhancedExplorativeHarmonicSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedExplorativeHarmonicSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer").set_name("LLAMAEnhancedExplorativeHarmonicSwarmOptimizer", register=True) except Exception as e: print("EnhancedExplorativeHarmonicSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedFireworkAlgorithm import EnhancedFireworkAlgorithm lama_register["EnhancedFireworkAlgorithm"] = EnhancedFireworkAlgorithm - LLAMAEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm").set_name( - "LLAMAEnhancedFireworkAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm").set_name("LLAMAEnhancedFireworkAlgorithm", register=True) except Exception as e: print("EnhancedFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization import ( - EnhancedFireworkAlgorithmOptimization, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization import EnhancedFireworkAlgorithmOptimization lama_register["EnhancedFireworkAlgorithmOptimization"] = EnhancedFireworkAlgorithmOptimization - LLAMAEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmOptimization" - ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization").set_name("LLAMAEnhancedFireworkAlgorithmOptimization", register=True) except Exception as e: print("EnhancedFireworkAlgorithmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization_v2 import ( - EnhancedFireworkAlgorithmOptimization_v2, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization_v2 import EnhancedFireworkAlgorithmOptimization_v2 lama_register["EnhancedFireworkAlgorithmOptimization_v2"] = EnhancedFireworkAlgorithmOptimization_v2 - LLAMAEnhancedFireworkAlgorithmOptimization_v2 = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmOptimization_v2" - ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization_v2").set_name("LLAMAEnhancedFireworkAlgorithmOptimization_v2", register=True) except Exception as e: print("EnhancedFireworkAlgorithmOptimization_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( - EnhancedFireworkAlgorithmWithAdaptiveLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearch import EnhancedFireworkAlgorithmWithAdaptiveLocalSearch - lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( - EnhancedFireworkAlgorithmWithAdaptiveLocalSearch - ) - LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = EnhancedFireworkAlgorithmWithAdaptiveLocalSearch + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined import ( - EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined import EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined - lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined"] = ( - EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined - ) - LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined", register=True) + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined"] = EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveMutation import ( - EnhancedFireworkAlgorithmWithAdaptiveMutation, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveMutation import EnhancedFireworkAlgorithmWithAdaptiveMutation - lama_register["EnhancedFireworkAlgorithmWithAdaptiveMutation"] = ( - EnhancedFireworkAlgorithmWithAdaptiveMutation - ) - LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation", register=True) + lama_register["EnhancedFireworkAlgorithmWithAdaptiveMutation"] = EnhancedFireworkAlgorithmWithAdaptiveMutation + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithDynamicMutation import ( - EnhancedFireworkAlgorithmWithDynamicMutation, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithDynamicMutation import EnhancedFireworkAlgorithmWithDynamicMutation - lama_register["EnhancedFireworkAlgorithmWithDynamicMutation"] = ( - EnhancedFireworkAlgorithmWithDynamicMutation - ) - LLAMAEnhancedFireworkAlgorithmWithDynamicMutation = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation", register=True) + lama_register["EnhancedFireworkAlgorithmWithDynamicMutation"] = EnhancedFireworkAlgorithmWithDynamicMutation + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithDynamicMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithDynamicMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithHybridLocalSearch import ( - EnhancedFireworkAlgorithmWithHybridLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithHybridLocalSearch import EnhancedFireworkAlgorithmWithHybridLocalSearch - lama_register["EnhancedFireworkAlgorithmWithHybridLocalSearch"] = ( - EnhancedFireworkAlgorithmWithHybridLocalSearch - ) - LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch", register=True) + lama_register["EnhancedFireworkAlgorithmWithHybridLocalSearch"] = EnhancedFireworkAlgorithmWithHybridLocalSearch + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithHybridLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithImprovedMutation import ( - EnhancedFireworkAlgorithmWithImprovedMutation, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithImprovedMutation import EnhancedFireworkAlgorithmWithImprovedMutation - lama_register["EnhancedFireworkAlgorithmWithImprovedMutation"] = ( - EnhancedFireworkAlgorithmWithImprovedMutation - ) - LLAMAEnhancedFireworkAlgorithmWithImprovedMutation = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation", register=True) + lama_register["EnhancedFireworkAlgorithmWithImprovedMutation"] = EnhancedFireworkAlgorithmWithImprovedMutation + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithImprovedMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithImprovedMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearch import ( - EnhancedFireworkAlgorithmWithLocalSearch, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearch import EnhancedFireworkAlgorithmWithLocalSearch lama_register["EnhancedFireworkAlgorithmWithLocalSearch"] = EnhancedFireworkAlgorithmWithLocalSearch - LLAMAEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearch", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinal import ( - EnhancedFireworkAlgorithmWithLocalSearchFinal, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinal import EnhancedFireworkAlgorithmWithLocalSearchFinal - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinal"] = ( - EnhancedFireworkAlgorithmWithLocalSearchFinal - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinal"] = EnhancedFireworkAlgorithmWithLocalSearchFinal + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchFinal can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized import ( - EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized import EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized"] = ( - EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized"] = EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalRefined import ( - EnhancedFireworkAlgorithmWithLocalSearchFinalRefined, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalRefined import EnhancedFireworkAlgorithmWithLocalSearchFinalRefined - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalRefined"] = ( - EnhancedFireworkAlgorithmWithLocalSearchFinalRefined - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalRefined"] = EnhancedFireworkAlgorithmWithLocalSearchFinalRefined + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchFinalRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchImproved import ( - EnhancedFireworkAlgorithmWithLocalSearchImproved, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchImproved import EnhancedFireworkAlgorithmWithLocalSearchImproved - lama_register["EnhancedFireworkAlgorithmWithLocalSearchImproved"] = ( - EnhancedFireworkAlgorithmWithLocalSearchImproved - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchImproved"] = EnhancedFireworkAlgorithmWithLocalSearchImproved + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchOptimized import ( - EnhancedFireworkAlgorithmWithLocalSearchOptimized, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchOptimized import EnhancedFireworkAlgorithmWithLocalSearchOptimized - lama_register["EnhancedFireworkAlgorithmWithLocalSearchOptimized"] = ( - EnhancedFireworkAlgorithmWithLocalSearchOptimized - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchOptimized"] = EnhancedFireworkAlgorithmWithLocalSearchOptimized + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchRefined import ( - EnhancedFireworkAlgorithmWithLocalSearchRefined, - ) + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchRefined import EnhancedFireworkAlgorithmWithLocalSearchRefined - lama_register["EnhancedFireworkAlgorithmWithLocalSearchRefined"] = ( - EnhancedFireworkAlgorithmWithLocalSearchRefined - ) - LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined = NonObjectOptimizer( - method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined" - ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined", register=True) + lama_register["EnhancedFireworkAlgorithmWithLocalSearchRefined"] = EnhancedFireworkAlgorithmWithLocalSearchRefined + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined", register=True) except Exception as e: print("EnhancedFireworkAlgorithmWithLocalSearchRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworkSwarmOptimization import ( - EnhancedFireworkSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedFireworkSwarmOptimization import EnhancedFireworkSwarmOptimization lama_register["EnhancedFireworkSwarmOptimization"] = EnhancedFireworkSwarmOptimization - LLAMAEnhancedFireworkSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedFireworkSwarmOptimization" - ).set_name("LLAMAEnhancedFireworkSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworkSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedFireworkSwarmOptimization").set_name("LLAMAEnhancedFireworkSwarmOptimization", register=True) except Exception as e: print("EnhancedFireworkSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedFireworksAlgorithm import EnhancedFireworksAlgorithm lama_register["EnhancedFireworksAlgorithm"] = EnhancedFireworksAlgorithm - LLAMAEnhancedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm").set_name( - "LLAMAEnhancedFireworksAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm").set_name("LLAMAEnhancedFireworksAlgorithm", register=True) except Exception as e: print("EnhancedFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFireworksSwarmOptimization_v4 import ( - EnhancedFireworksSwarmOptimization_v4, - ) + from nevergrad.optimization.lama.EnhancedFireworksSwarmOptimization_v4 import EnhancedFireworksSwarmOptimization_v4 lama_register["EnhancedFireworksSwarmOptimization_v4"] = EnhancedFireworksSwarmOptimization_v4 - LLAMAEnhancedFireworksSwarmOptimization_v4 = NonObjectOptimizer( - method="LLAMAEnhancedFireworksSwarmOptimization_v4" - ).set_name("LLAMAEnhancedFireworksSwarmOptimization_v4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFireworksSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFireworksSwarmOptimization_v4 = NonObjectOptimizer(method="LLAMAEnhancedFireworksSwarmOptimization_v4").set_name("LLAMAEnhancedFireworksSwarmOptimization_v4", register=True) except Exception as e: print("EnhancedFireworksSwarmOptimization_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedFocusedBalancedAdaptivePSO import ( - EnhancedFocusedBalancedAdaptivePSO, - ) + from nevergrad.optimization.lama.EnhancedFocusedBalancedAdaptivePSO import EnhancedFocusedBalancedAdaptivePSO lama_register["EnhancedFocusedBalancedAdaptivePSO"] = EnhancedFocusedBalancedAdaptivePSO - LLAMAEnhancedFocusedBalancedAdaptivePSO = NonObjectOptimizer( - method="LLAMAEnhancedFocusedBalancedAdaptivePSO" - ).set_name("LLAMAEnhancedFocusedBalancedAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedFocusedBalancedAdaptivePSO").set_name("LLAMAEnhancedFocusedBalancedAdaptivePSO", register=True) except Exception as e: print("EnhancedFocusedBalancedAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizer import EnhancedGlobalClimbingOptimizer lama_register["EnhancedGlobalClimbingOptimizer"] = EnhancedGlobalClimbingOptimizer - LLAMAEnhancedGlobalClimbingOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedGlobalClimbingOptimizer" - ).set_name("LLAMAEnhancedGlobalClimbingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGlobalClimbingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizer").set_name("LLAMAEnhancedGlobalClimbingOptimizer", register=True) except Exception as e: print("EnhancedGlobalClimbingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizerV3 import ( - EnhancedGlobalClimbingOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizerV3 import EnhancedGlobalClimbingOptimizerV3 lama_register["EnhancedGlobalClimbingOptimizerV3"] = EnhancedGlobalClimbingOptimizerV3 - LLAMAEnhancedGlobalClimbingOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedGlobalClimbingOptimizerV3" - ).set_name("LLAMAEnhancedGlobalClimbingOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGlobalClimbingOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizerV3").set_name("LLAMAEnhancedGlobalClimbingOptimizerV3", register=True) except Exception as e: print("EnhancedGlobalClimbingOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGlobalStructureAdaptiveEvolver import ( - EnhancedGlobalStructureAdaptiveEvolver, - ) + from nevergrad.optimization.lama.EnhancedGlobalStructureAdaptiveEvolver import EnhancedGlobalStructureAdaptiveEvolver lama_register["EnhancedGlobalStructureAdaptiveEvolver"] = EnhancedGlobalStructureAdaptiveEvolver - LLAMAEnhancedGlobalStructureAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAEnhancedGlobalStructureAdaptiveEvolver" - ).set_name("LLAMAEnhancedGlobalStructureAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGlobalStructureAdaptiveEvolver = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAdaptiveEvolver").set_name("LLAMAEnhancedGlobalStructureAdaptiveEvolver", register=True) except Exception as e: print("EnhancedGlobalStructureAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGlobalStructureAwareOptimizer import ( - EnhancedGlobalStructureAwareOptimizer, - ) + from nevergrad.optimization.lama.EnhancedGlobalStructureAwareOptimizer import EnhancedGlobalStructureAwareOptimizer lama_register["EnhancedGlobalStructureAwareOptimizer"] = EnhancedGlobalStructureAwareOptimizer - LLAMAEnhancedGlobalStructureAwareOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedGlobalStructureAwareOptimizer" - ).set_name("LLAMAEnhancedGlobalStructureAwareOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAwareOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGlobalStructureAwareOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAwareOptimizer").set_name("LLAMAEnhancedGlobalStructureAwareOptimizer", register=True) except Exception as e: print("EnhancedGlobalStructureAwareOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGlobalStructureOptimizer import EnhancedGlobalStructureOptimizer lama_register["EnhancedGlobalStructureOptimizer"] = EnhancedGlobalStructureOptimizer - LLAMAEnhancedGlobalStructureOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedGlobalStructureOptimizer" - ).set_name("LLAMAEnhancedGlobalStructureOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGlobalStructureOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureOptimizer").set_name("LLAMAEnhancedGlobalStructureOptimizer", register=True) except Exception as e: print("EnhancedGlobalStructureOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGradientBoostedAnnealingWithAdaptiveMemory import ( - EnhancedGradientBoostedAnnealingWithAdaptiveMemory, - ) + from nevergrad.optimization.lama.EnhancedGradientBoostedAnnealingWithAdaptiveMemory import EnhancedGradientBoostedAnnealingWithAdaptiveMemory - lama_register["EnhancedGradientBoostedAnnealingWithAdaptiveMemory"] = ( - EnhancedGradientBoostedAnnealingWithAdaptiveMemory - ) - LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory = NonObjectOptimizer( - method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory" - ).set_name("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory", register=True) + lama_register["EnhancedGradientBoostedAnnealingWithAdaptiveMemory"] = EnhancedGradientBoostedAnnealingWithAdaptiveMemory + res = NonObjectOptimizer(method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory = NonObjectOptimizer(method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory").set_name("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory", register=True) except Exception as e: print("EnhancedGradientBoostedAnnealingWithAdaptiveMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGradientGuidedClusterSearch import ( - EnhancedGradientGuidedClusterSearch, - ) + from nevergrad.optimization.lama.EnhancedGradientGuidedClusterSearch import EnhancedGradientGuidedClusterSearch lama_register["EnhancedGradientGuidedClusterSearch"] = EnhancedGradientGuidedClusterSearch - LLAMAEnhancedGradientGuidedClusterSearch = NonObjectOptimizer( - method="LLAMAEnhancedGradientGuidedClusterSearch" - ).set_name("LLAMAEnhancedGradientGuidedClusterSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedClusterSearch").set_name("LLAMAEnhancedGradientGuidedClusterSearch", register=True) except Exception as e: print("EnhancedGradientGuidedClusterSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGradientGuidedEvolution import EnhancedGradientGuidedEvolution lama_register["EnhancedGradientGuidedEvolution"] = EnhancedGradientGuidedEvolution - LLAMAEnhancedGradientGuidedEvolution = NonObjectOptimizer( - method="LLAMAEnhancedGradientGuidedEvolution" - ).set_name("LLAMAEnhancedGradientGuidedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGradientGuidedEvolution = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedEvolution").set_name("LLAMAEnhancedGradientGuidedEvolution", register=True) except Exception as e: print("EnhancedGradientGuidedEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGradientGuidedHybridPSO import EnhancedGradientGuidedHybridPSO lama_register["EnhancedGradientGuidedHybridPSO"] = EnhancedGradientGuidedHybridPSO - LLAMAEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( - method="LLAMAEnhancedGradientGuidedHybridPSO" - ).set_name("LLAMAEnhancedGradientGuidedHybridPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedHybridPSO").set_name("LLAMAEnhancedGradientGuidedHybridPSO", register=True) except Exception as e: print("EnhancedGradientGuidedHybridPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGradualAdaptiveRAMEDS import EnhancedGradualAdaptiveRAMEDS lama_register["EnhancedGradualAdaptiveRAMEDS"] = EnhancedGradualAdaptiveRAMEDS - LLAMAEnhancedGradualAdaptiveRAMEDS = NonObjectOptimizer( - method="LLAMAEnhancedGradualAdaptiveRAMEDS" - ).set_name("LLAMAEnhancedGradualAdaptiveRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGradualAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGradualAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedGradualAdaptiveRAMEDS").set_name("LLAMAEnhancedGradualAdaptiveRAMEDS", register=True) except Exception as e: print("EnhancedGradualAdaptiveRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimization import ( - EnhancedGravitationSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimization import EnhancedGravitationSwarmOptimization lama_register["EnhancedGravitationSwarmOptimization"] = EnhancedGravitationSwarmOptimization - LLAMAEnhancedGravitationSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedGravitationSwarmOptimization" - ).set_name("LLAMAEnhancedGravitationSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimization").set_name("LLAMAEnhancedGravitationSwarmOptimization", register=True) except Exception as e: print("EnhancedGravitationSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimizationV2 import ( - EnhancedGravitationSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimizationV2 import EnhancedGravitationSwarmOptimizationV2 lama_register["EnhancedGravitationSwarmOptimizationV2"] = EnhancedGravitationSwarmOptimizationV2 - LLAMAEnhancedGravitationSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationSwarmOptimizationV2" - ).set_name("LLAMAEnhancedGravitationSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimizationV2").set_name("LLAMAEnhancedGravitationSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedGravitationSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV10 import ( - EnhancedGravitationalSwarmIntelligenceV10, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV10 import EnhancedGravitationalSwarmIntelligenceV10 lama_register["EnhancedGravitationalSwarmIntelligenceV10"] = EnhancedGravitationalSwarmIntelligenceV10 - LLAMAEnhancedGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV10" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV10 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV10").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV10", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV11 import ( - EnhancedGravitationalSwarmIntelligenceV11, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV11 import EnhancedGravitationalSwarmIntelligenceV11 lama_register["EnhancedGravitationalSwarmIntelligenceV11"] = EnhancedGravitationalSwarmIntelligenceV11 - LLAMAEnhancedGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV11" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV11 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV11").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV11", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV12 import ( - EnhancedGravitationalSwarmIntelligenceV12, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV12 import EnhancedGravitationalSwarmIntelligenceV12 lama_register["EnhancedGravitationalSwarmIntelligenceV12"] = EnhancedGravitationalSwarmIntelligenceV12 - LLAMAEnhancedGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV12" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV12 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV12").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV12", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV13 import ( - EnhancedGravitationalSwarmIntelligenceV13, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV13 import EnhancedGravitationalSwarmIntelligenceV13 lama_register["EnhancedGravitationalSwarmIntelligenceV13"] = EnhancedGravitationalSwarmIntelligenceV13 - LLAMAEnhancedGravitationalSwarmIntelligenceV13 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV13" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV13 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV13").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV13", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV14 import ( - EnhancedGravitationalSwarmIntelligenceV14, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV14 import EnhancedGravitationalSwarmIntelligenceV14 lama_register["EnhancedGravitationalSwarmIntelligenceV14"] = EnhancedGravitationalSwarmIntelligenceV14 - LLAMAEnhancedGravitationalSwarmIntelligenceV14 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV14" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV14 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV14").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV14", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV15 import ( - EnhancedGravitationalSwarmIntelligenceV15, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV15 import EnhancedGravitationalSwarmIntelligenceV15 lama_register["EnhancedGravitationalSwarmIntelligenceV15"] = EnhancedGravitationalSwarmIntelligenceV15 - LLAMAEnhancedGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV15" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV15 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV15").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV15", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV16 import ( - EnhancedGravitationalSwarmIntelligenceV16, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV16 import EnhancedGravitationalSwarmIntelligenceV16 lama_register["EnhancedGravitationalSwarmIntelligenceV16"] = EnhancedGravitationalSwarmIntelligenceV16 - LLAMAEnhancedGravitationalSwarmIntelligenceV16 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV16" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV16 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV16").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV16", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV17 import ( - EnhancedGravitationalSwarmIntelligenceV17, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV17 import EnhancedGravitationalSwarmIntelligenceV17 lama_register["EnhancedGravitationalSwarmIntelligenceV17"] = EnhancedGravitationalSwarmIntelligenceV17 - LLAMAEnhancedGravitationalSwarmIntelligenceV17 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV17" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV17 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV17").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV17", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV18 import ( - EnhancedGravitationalSwarmIntelligenceV18, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV18 import EnhancedGravitationalSwarmIntelligenceV18 lama_register["EnhancedGravitationalSwarmIntelligenceV18"] = EnhancedGravitationalSwarmIntelligenceV18 - LLAMAEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV18" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV18").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV18", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV19 import ( - EnhancedGravitationalSwarmIntelligenceV19, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV19 import EnhancedGravitationalSwarmIntelligenceV19 lama_register["EnhancedGravitationalSwarmIntelligenceV19"] = EnhancedGravitationalSwarmIntelligenceV19 - LLAMAEnhancedGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV19" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV19 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV19").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV19", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV2 import ( - EnhancedGravitationalSwarmIntelligenceV2, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV2 import EnhancedGravitationalSwarmIntelligenceV2 lama_register["EnhancedGravitationalSwarmIntelligenceV2"] = EnhancedGravitationalSwarmIntelligenceV2 - LLAMAEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV2" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV2").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV2", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV20 import ( - EnhancedGravitationalSwarmIntelligenceV20, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV20 import EnhancedGravitationalSwarmIntelligenceV20 lama_register["EnhancedGravitationalSwarmIntelligenceV20"] = EnhancedGravitationalSwarmIntelligenceV20 - LLAMAEnhancedGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV20" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV20", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV20 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV20").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV20", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV21 import ( - EnhancedGravitationalSwarmIntelligenceV21, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV21 import EnhancedGravitationalSwarmIntelligenceV21 lama_register["EnhancedGravitationalSwarmIntelligenceV21"] = EnhancedGravitationalSwarmIntelligenceV21 - LLAMAEnhancedGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV21" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV21 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV21").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV21", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV22 import ( - EnhancedGravitationalSwarmIntelligenceV22, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV22 import EnhancedGravitationalSwarmIntelligenceV22 lama_register["EnhancedGravitationalSwarmIntelligenceV22"] = EnhancedGravitationalSwarmIntelligenceV22 - LLAMAEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV22" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV22", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV22").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV22", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV23 import ( - EnhancedGravitationalSwarmIntelligenceV23, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV23 import EnhancedGravitationalSwarmIntelligenceV23 lama_register["EnhancedGravitationalSwarmIntelligenceV23"] = EnhancedGravitationalSwarmIntelligenceV23 - LLAMAEnhancedGravitationalSwarmIntelligenceV23 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV23" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV23", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV23 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV23").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV23", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV24 import ( - EnhancedGravitationalSwarmIntelligenceV24, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV24 import EnhancedGravitationalSwarmIntelligenceV24 lama_register["EnhancedGravitationalSwarmIntelligenceV24"] = EnhancedGravitationalSwarmIntelligenceV24 - LLAMAEnhancedGravitationalSwarmIntelligenceV24 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV24" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV24", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV24 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV24").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV24", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV25 import ( - EnhancedGravitationalSwarmIntelligenceV25, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV25 import EnhancedGravitationalSwarmIntelligenceV25 lama_register["EnhancedGravitationalSwarmIntelligenceV25"] = EnhancedGravitationalSwarmIntelligenceV25 - LLAMAEnhancedGravitationalSwarmIntelligenceV25 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV25" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV25", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV25 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV25").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV25", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV3 import ( - EnhancedGravitationalSwarmIntelligenceV3, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV3 import EnhancedGravitationalSwarmIntelligenceV3 lama_register["EnhancedGravitationalSwarmIntelligenceV3"] = EnhancedGravitationalSwarmIntelligenceV3 - LLAMAEnhancedGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV3" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV3").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV3", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV30 import ( - EnhancedGravitationalSwarmIntelligenceV30, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV30 import EnhancedGravitationalSwarmIntelligenceV30 lama_register["EnhancedGravitationalSwarmIntelligenceV30"] = EnhancedGravitationalSwarmIntelligenceV30 - LLAMAEnhancedGravitationalSwarmIntelligenceV30 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV30" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV30", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV30 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV30").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV30", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV31 import ( - EnhancedGravitationalSwarmIntelligenceV31, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV31 import EnhancedGravitationalSwarmIntelligenceV31 lama_register["EnhancedGravitationalSwarmIntelligenceV31"] = EnhancedGravitationalSwarmIntelligenceV31 - LLAMAEnhancedGravitationalSwarmIntelligenceV31 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV31" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV31", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV31 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV31").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV31", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV32 import ( - EnhancedGravitationalSwarmIntelligenceV32, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV32 import EnhancedGravitationalSwarmIntelligenceV32 lama_register["EnhancedGravitationalSwarmIntelligenceV32"] = EnhancedGravitationalSwarmIntelligenceV32 - LLAMAEnhancedGravitationalSwarmIntelligenceV32 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV32" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV32", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV32 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV32").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV32", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV32 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV4 import ( - EnhancedGravitationalSwarmIntelligenceV4, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV4 import EnhancedGravitationalSwarmIntelligenceV4 lama_register["EnhancedGravitationalSwarmIntelligenceV4"] = EnhancedGravitationalSwarmIntelligenceV4 - LLAMAEnhancedGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV4" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV4").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV4", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV6 import ( - EnhancedGravitationalSwarmIntelligenceV6, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV6 import EnhancedGravitationalSwarmIntelligenceV6 lama_register["EnhancedGravitationalSwarmIntelligenceV6"] = EnhancedGravitationalSwarmIntelligenceV6 - LLAMAEnhancedGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV6" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV6 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV6").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV6", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV7 import ( - EnhancedGravitationalSwarmIntelligenceV7, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV7 import EnhancedGravitationalSwarmIntelligenceV7 lama_register["EnhancedGravitationalSwarmIntelligenceV7"] = EnhancedGravitationalSwarmIntelligenceV7 - LLAMAEnhancedGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV7" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV7 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV7").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV7", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV8 import ( - EnhancedGravitationalSwarmIntelligenceV8, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV8 import EnhancedGravitationalSwarmIntelligenceV8 lama_register["EnhancedGravitationalSwarmIntelligenceV8"] = EnhancedGravitationalSwarmIntelligenceV8 - LLAMAEnhancedGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV8" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV8 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV8").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV8", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV9 import ( - EnhancedGravitationalSwarmIntelligenceV9, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV9 import EnhancedGravitationalSwarmIntelligenceV9 lama_register["EnhancedGravitationalSwarmIntelligenceV9"] = EnhancedGravitationalSwarmIntelligenceV9 - LLAMAEnhancedGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmIntelligenceV9" - ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV9 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV9").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV9", register=True) except Exception as e: print("EnhancedGravitationalSwarmIntelligenceV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDiversityPreservation import ( - EnhancedGravitationalSwarmOptimizationWithDiversityPreservation, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDiversityPreservation import EnhancedGravitationalSwarmOptimizationWithDiversityPreservation - lama_register["EnhancedGravitationalSwarmOptimizationWithDiversityPreservation"] = ( - EnhancedGravitationalSwarmOptimizationWithDiversityPreservation - ) - LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation" - ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation", register=True) + lama_register["EnhancedGravitationalSwarmOptimizationWithDiversityPreservation"] = EnhancedGravitationalSwarmOptimizationWithDiversityPreservation + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation", register=True) except Exception as e: print("EnhancedGravitationalSwarmOptimizationWithDiversityPreservation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 import ( - EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 import EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 - lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2"] = ( - EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 - ) - LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2" - ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2", register=True) + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2"] = EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2", register=True) except Exception as e: print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 import ( - EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3, - ) + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 import EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 - lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3"] = ( - EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 - ) - LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 = NonObjectOptimizer( - method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3" - ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3", register=True) + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3"] = EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3", register=True) except Exception as e: print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v62 import EnhancedGuidedMassQGSA_v62 lama_register["EnhancedGuidedMassQGSA_v62"] = EnhancedGuidedMassQGSA_v62 - LLAMAEnhancedGuidedMassQGSA_v62 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62").set_name( - "LLAMAEnhancedGuidedMassQGSA_v62", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGuidedMassQGSA_v62 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62").set_name("LLAMAEnhancedGuidedMassQGSA_v62", register=True) except Exception as e: print("EnhancedGuidedMassQGSA_v62 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v94 import EnhancedGuidedMassQGSA_v94 lama_register["EnhancedGuidedMassQGSA_v94"] = EnhancedGuidedMassQGSA_v94 - LLAMAEnhancedGuidedMassQGSA_v94 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94").set_name( - "LLAMAEnhancedGuidedMassQGSA_v94", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedGuidedMassQGSA_v94 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94").set_name("LLAMAEnhancedGuidedMassQGSA_v94", register=True) except Exception as e: print("EnhancedGuidedMassQGSA_v94 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicFireworkAlgorithm import ( - EnhancedHarmonicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedHarmonicFireworkAlgorithm import EnhancedHarmonicFireworkAlgorithm lama_register["EnhancedHarmonicFireworkAlgorithm"] = EnhancedHarmonicFireworkAlgorithm - LLAMAEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicFireworkAlgorithm" - ).set_name("LLAMAEnhancedHarmonicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHarmonicFireworkAlgorithm").set_name("LLAMAEnhancedHarmonicFireworkAlgorithm", register=True) except Exception as e: print("EnhancedHarmonicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicLevyDolphinOptimization import ( - EnhancedHarmonicLevyDolphinOptimization, - ) + from nevergrad.optimization.lama.EnhancedHarmonicLevyDolphinOptimization import EnhancedHarmonicLevyDolphinOptimization lama_register["EnhancedHarmonicLevyDolphinOptimization"] = EnhancedHarmonicLevyDolphinOptimization - LLAMAEnhancedHarmonicLevyDolphinOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicLevyDolphinOptimization" - ).set_name("LLAMAEnhancedHarmonicLevyDolphinOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicLevyDolphinOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicLevyDolphinOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonicLevyDolphinOptimization").set_name("LLAMAEnhancedHarmonicLevyDolphinOptimization", register=True) except Exception as e: print("EnhancedHarmonicLevyDolphinOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizer import EnhancedHarmonicSearchOptimizer lama_register["EnhancedHarmonicSearchOptimizer"] = EnhancedHarmonicSearchOptimizer - LLAMAEnhancedHarmonicSearchOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSearchOptimizer" - ).set_name("LLAMAEnhancedHarmonicSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizer").set_name("LLAMAEnhancedHarmonicSearchOptimizer", register=True) except Exception as e: print("EnhancedHarmonicSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV2 import ( - EnhancedHarmonicSearchOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV2 import EnhancedHarmonicSearchOptimizerV2 lama_register["EnhancedHarmonicSearchOptimizerV2"] = EnhancedHarmonicSearchOptimizerV2 - LLAMAEnhancedHarmonicSearchOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSearchOptimizerV2" - ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV2").set_name("LLAMAEnhancedHarmonicSearchOptimizerV2", register=True) except Exception as e: print("EnhancedHarmonicSearchOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV3 import ( - EnhancedHarmonicSearchOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV3 import EnhancedHarmonicSearchOptimizerV3 lama_register["EnhancedHarmonicSearchOptimizerV3"] = EnhancedHarmonicSearchOptimizerV3 - LLAMAEnhancedHarmonicSearchOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSearchOptimizerV3" - ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV3").set_name("LLAMAEnhancedHarmonicSearchOptimizerV3", register=True) except Exception as e: print("EnhancedHarmonicSearchOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV4 import ( - EnhancedHarmonicSearchOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV4 import EnhancedHarmonicSearchOptimizerV4 lama_register["EnhancedHarmonicSearchOptimizerV4"] = EnhancedHarmonicSearchOptimizerV4 - LLAMAEnhancedHarmonicSearchOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSearchOptimizerV4" - ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV4").set_name("LLAMAEnhancedHarmonicSearchOptimizerV4", register=True) except Exception as e: print("EnhancedHarmonicSearchOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV5 import ( - EnhancedHarmonicSearchOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV5 import EnhancedHarmonicSearchOptimizerV5 lama_register["EnhancedHarmonicSearchOptimizerV5"] = EnhancedHarmonicSearchOptimizerV5 - LLAMAEnhancedHarmonicSearchOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSearchOptimizerV5" - ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV5").set_name("LLAMAEnhancedHarmonicSearchOptimizerV5", register=True) except Exception as e: print("EnhancedHarmonicSearchOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimization import ( - EnhancedHarmonicSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimization import EnhancedHarmonicSwarmOptimization lama_register["EnhancedHarmonicSwarmOptimization"] = EnhancedHarmonicSwarmOptimization - LLAMAEnhancedHarmonicSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSwarmOptimization" - ).set_name("LLAMAEnhancedHarmonicSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimization").set_name("LLAMAEnhancedHarmonicSwarmOptimization", register=True) except Exception as e: print("EnhancedHarmonicSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV2 import ( - EnhancedHarmonicSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV2 import EnhancedHarmonicSwarmOptimizationV2 lama_register["EnhancedHarmonicSwarmOptimizationV2"] = EnhancedHarmonicSwarmOptimizationV2 - LLAMAEnhancedHarmonicSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSwarmOptimizationV2" - ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV2").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedHarmonicSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV3 import ( - EnhancedHarmonicSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV3 import EnhancedHarmonicSwarmOptimizationV3 lama_register["EnhancedHarmonicSwarmOptimizationV3"] = EnhancedHarmonicSwarmOptimizationV3 - LLAMAEnhancedHarmonicSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSwarmOptimizationV3" - ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV3").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedHarmonicSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV4 import ( - EnhancedHarmonicSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV4 import EnhancedHarmonicSwarmOptimizationV4 lama_register["EnhancedHarmonicSwarmOptimizationV4"] = EnhancedHarmonicSwarmOptimizationV4 - LLAMAEnhancedHarmonicSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicSwarmOptimizationV4" - ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV4").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedHarmonicSwarmOptimizationV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV11 import EnhancedHarmonicTabuSearchV11 lama_register["EnhancedHarmonicTabuSearchV11"] = EnhancedHarmonicTabuSearchV11 - LLAMAEnhancedHarmonicTabuSearchV11 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV11" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV11 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV11").set_name("LLAMAEnhancedHarmonicTabuSearchV11", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV13 import EnhancedHarmonicTabuSearchV13 lama_register["EnhancedHarmonicTabuSearchV13"] = EnhancedHarmonicTabuSearchV13 - LLAMAEnhancedHarmonicTabuSearchV13 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV13" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV13 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV13").set_name("LLAMAEnhancedHarmonicTabuSearchV13", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV14 import EnhancedHarmonicTabuSearchV14 lama_register["EnhancedHarmonicTabuSearchV14"] = EnhancedHarmonicTabuSearchV14 - LLAMAEnhancedHarmonicTabuSearchV14 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV14" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV14 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV14").set_name("LLAMAEnhancedHarmonicTabuSearchV14", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV14 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV15 import EnhancedHarmonicTabuSearchV15 lama_register["EnhancedHarmonicTabuSearchV15"] = EnhancedHarmonicTabuSearchV15 - LLAMAEnhancedHarmonicTabuSearchV15 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV15" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV15 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV15").set_name("LLAMAEnhancedHarmonicTabuSearchV15", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV16 import EnhancedHarmonicTabuSearchV16 lama_register["EnhancedHarmonicTabuSearchV16"] = EnhancedHarmonicTabuSearchV16 - LLAMAEnhancedHarmonicTabuSearchV16 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV16" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV16 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV16").set_name("LLAMAEnhancedHarmonicTabuSearchV16", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV19 import EnhancedHarmonicTabuSearchV19 lama_register["EnhancedHarmonicTabuSearchV19"] = EnhancedHarmonicTabuSearchV19 - LLAMAEnhancedHarmonicTabuSearchV19 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonicTabuSearchV19" - ).set_name("LLAMAEnhancedHarmonicTabuSearchV19", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonicTabuSearchV19 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV19").set_name("LLAMAEnhancedHarmonicTabuSearchV19", register=True) except Exception as e: print("EnhancedHarmonicTabuSearchV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyDiversifiedCuckooAlgorithm import ( - EnhancedHarmonyDiversifiedCuckooAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedHarmonyDiversifiedCuckooAlgorithm import EnhancedHarmonyDiversifiedCuckooAlgorithm lama_register["EnhancedHarmonyDiversifiedCuckooAlgorithm"] = EnhancedHarmonyDiversifiedCuckooAlgorithm - LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm" - ).set_name("LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm").set_name("LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm", register=True) except Exception as e: print("EnhancedHarmonyDiversifiedCuckooAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyFireworkOptimizer import EnhancedHarmonyFireworkOptimizer lama_register["EnhancedHarmonyFireworkOptimizer"] = EnhancedHarmonyFireworkOptimizer - LLAMAEnhancedHarmonyFireworkOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyFireworkOptimizer" - ).set_name("LLAMAEnhancedHarmonyFireworkOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHarmonyFireworkOptimizer").set_name("LLAMAEnhancedHarmonyFireworkOptimizer", register=True) except Exception as e: print("EnhancedHarmonyFireworkOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV2 import ( - EnhancedHarmonyMemeticAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV2 import EnhancedHarmonyMemeticAlgorithmV2 lama_register["EnhancedHarmonyMemeticAlgorithmV2"] = EnhancedHarmonyMemeticAlgorithmV2 - LLAMAEnhancedHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticAlgorithmV2" - ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV2").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV2", register=True) except Exception as e: print("EnhancedHarmonyMemeticAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV3 import ( - EnhancedHarmonyMemeticAlgorithmV3, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV3 import EnhancedHarmonyMemeticAlgorithmV3 lama_register["EnhancedHarmonyMemeticAlgorithmV3"] = EnhancedHarmonyMemeticAlgorithmV3 - LLAMAEnhancedHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticAlgorithmV3" - ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV3").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV3", register=True) except Exception as e: print("EnhancedHarmonyMemeticAlgorithmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV4 import ( - EnhancedHarmonyMemeticAlgorithmV4, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV4 import EnhancedHarmonyMemeticAlgorithmV4 lama_register["EnhancedHarmonyMemeticAlgorithmV4"] = EnhancedHarmonyMemeticAlgorithmV4 - LLAMAEnhancedHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticAlgorithmV4" - ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV4").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV4", register=True) except Exception as e: print("EnhancedHarmonyMemeticAlgorithmV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV10 import ( - EnhancedHarmonyMemeticOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV10 import EnhancedHarmonyMemeticOptimizationV10 lama_register["EnhancedHarmonyMemeticOptimizationV10"] = EnhancedHarmonyMemeticOptimizationV10 - LLAMAEnhancedHarmonyMemeticOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV10" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV10").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV10", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV11 import ( - EnhancedHarmonyMemeticOptimizationV11, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV11 import EnhancedHarmonyMemeticOptimizationV11 lama_register["EnhancedHarmonyMemeticOptimizationV11"] = EnhancedHarmonyMemeticOptimizationV11 - LLAMAEnhancedHarmonyMemeticOptimizationV11 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV11" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV11").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV11", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV12 import ( - EnhancedHarmonyMemeticOptimizationV12, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV12 import EnhancedHarmonyMemeticOptimizationV12 lama_register["EnhancedHarmonyMemeticOptimizationV12"] = EnhancedHarmonyMemeticOptimizationV12 - LLAMAEnhancedHarmonyMemeticOptimizationV12 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV12" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV12").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV12", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV13 import ( - EnhancedHarmonyMemeticOptimizationV13, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV13 import EnhancedHarmonyMemeticOptimizationV13 lama_register["EnhancedHarmonyMemeticOptimizationV13"] = EnhancedHarmonyMemeticOptimizationV13 - LLAMAEnhancedHarmonyMemeticOptimizationV13 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV13" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV13").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV13", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV14 import ( - EnhancedHarmonyMemeticOptimizationV14, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV14 import EnhancedHarmonyMemeticOptimizationV14 lama_register["EnhancedHarmonyMemeticOptimizationV14"] = EnhancedHarmonyMemeticOptimizationV14 - LLAMAEnhancedHarmonyMemeticOptimizationV14 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV14" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV14", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV14").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV14", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV15 import ( - EnhancedHarmonyMemeticOptimizationV15, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV15 import EnhancedHarmonyMemeticOptimizationV15 lama_register["EnhancedHarmonyMemeticOptimizationV15"] = EnhancedHarmonyMemeticOptimizationV15 - LLAMAEnhancedHarmonyMemeticOptimizationV15 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV15" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV15").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV15", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV16 import ( - EnhancedHarmonyMemeticOptimizationV16, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV16 import EnhancedHarmonyMemeticOptimizationV16 lama_register["EnhancedHarmonyMemeticOptimizationV16"] = EnhancedHarmonyMemeticOptimizationV16 - LLAMAEnhancedHarmonyMemeticOptimizationV16 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV16" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV16", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV16").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV16", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV17 import ( - EnhancedHarmonyMemeticOptimizationV17, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV17 import EnhancedHarmonyMemeticOptimizationV17 lama_register["EnhancedHarmonyMemeticOptimizationV17"] = EnhancedHarmonyMemeticOptimizationV17 - LLAMAEnhancedHarmonyMemeticOptimizationV17 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV17" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV17", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV17").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV17", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV34 import ( - EnhancedHarmonyMemeticOptimizationV34, - ) + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV34 import EnhancedHarmonyMemeticOptimizationV34 lama_register["EnhancedHarmonyMemeticOptimizationV34"] = EnhancedHarmonyMemeticOptimizationV34 - LLAMAEnhancedHarmonyMemeticOptimizationV34 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticOptimizationV34" - ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV34", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticOptimizationV34 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV34").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV34", register=True) except Exception as e: print("EnhancedHarmonyMemeticOptimizationV34 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearch import EnhancedHarmonyMemeticSearch lama_register["EnhancedHarmonyMemeticSearch"] = EnhancedHarmonyMemeticSearch - LLAMAEnhancedHarmonyMemeticSearch = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticSearch" - ).set_name("LLAMAEnhancedHarmonyMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearch").set_name("LLAMAEnhancedHarmonyMemeticSearch", register=True) except Exception as e: print("EnhancedHarmonyMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV2 import EnhancedHarmonyMemeticSearchV2 lama_register["EnhancedHarmonyMemeticSearchV2"] = EnhancedHarmonyMemeticSearchV2 - LLAMAEnhancedHarmonyMemeticSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticSearchV2" - ).set_name("LLAMAEnhancedHarmonyMemeticSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV2").set_name("LLAMAEnhancedHarmonyMemeticSearchV2", register=True) except Exception as e: print("EnhancedHarmonyMemeticSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV3 import EnhancedHarmonyMemeticSearchV3 lama_register["EnhancedHarmonyMemeticSearchV3"] = EnhancedHarmonyMemeticSearchV3 - LLAMAEnhancedHarmonyMemeticSearchV3 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyMemeticSearchV3" - ).set_name("LLAMAEnhancedHarmonyMemeticSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyMemeticSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV3").set_name("LLAMAEnhancedHarmonyMemeticSearchV3", register=True) except Exception as e: print("EnhancedHarmonyMemeticSearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonySearchOB import EnhancedHarmonySearchOB lama_register["EnhancedHarmonySearchOB"] = EnhancedHarmonySearchOB - LLAMAEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB").set_name( - "LLAMAEnhancedHarmonySearchOB", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB").set_name("LLAMAEnhancedHarmonySearchOB", register=True) except Exception as e: print("EnhancedHarmonySearchOB can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( - EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, - ) + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( - EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - ) - LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" - ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) except Exception as e: print("EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightV2 import ( - EnhancedHarmonySearchWithAdaptiveLevyFlightV2, - ) + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightV2 import EnhancedHarmonySearchWithAdaptiveLevyFlightV2 - lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightV2"] = ( - EnhancedHarmonySearchWithAdaptiveLevyFlightV2 - ) - LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2" - ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2", register=True) + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightV2"] = EnhancedHarmonySearchWithAdaptiveLevyFlightV2 + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2").set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2", register=True) except Exception as e: print("EnhancedHarmonySearchWithAdaptiveLevyFlightV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimization import EnhancedHarmonyTabuOptimization lama_register["EnhancedHarmonyTabuOptimization"] = EnhancedHarmonyTabuOptimization - LLAMAEnhancedHarmonyTabuOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyTabuOptimization" - ).set_name("LLAMAEnhancedHarmonyTabuOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimization").set_name("LLAMAEnhancedHarmonyTabuOptimization", register=True) except Exception as e: print("EnhancedHarmonyTabuOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV2 import ( - EnhancedHarmonyTabuOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV2 import EnhancedHarmonyTabuOptimizationV2 lama_register["EnhancedHarmonyTabuOptimizationV2"] = EnhancedHarmonyTabuOptimizationV2 - LLAMAEnhancedHarmonyTabuOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyTabuOptimizationV2" - ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV2").set_name("LLAMAEnhancedHarmonyTabuOptimizationV2", register=True) except Exception as e: print("EnhancedHarmonyTabuOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV3 import ( - EnhancedHarmonyTabuOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV3 import EnhancedHarmonyTabuOptimizationV3 lama_register["EnhancedHarmonyTabuOptimizationV3"] = EnhancedHarmonyTabuOptimizationV3 - LLAMAEnhancedHarmonyTabuOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedHarmonyTabuOptimizationV3" - ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV3").set_name("LLAMAEnhancedHarmonyTabuOptimizationV3", register=True) except Exception as e: print("EnhancedHarmonyTabuOptimizationV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearch import EnhancedHarmonyTabuSearch lama_register["EnhancedHarmonyTabuSearch"] = EnhancedHarmonyTabuSearch - LLAMAEnhancedHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch").set_name( - "LLAMAEnhancedHarmonyTabuSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch").set_name("LLAMAEnhancedHarmonyTabuSearch", register=True) except Exception as e: print("EnhancedHarmonyTabuSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV2 import EnhancedHarmonyTabuSearchV2 lama_register["EnhancedHarmonyTabuSearchV2"] = EnhancedHarmonyTabuSearchV2 - LLAMAEnhancedHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2").set_name( - "LLAMAEnhancedHarmonyTabuSearchV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2").set_name("LLAMAEnhancedHarmonyTabuSearchV2", register=True) except Exception as e: print("EnhancedHarmonyTabuSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV3 import EnhancedHarmonyTabuSearchV3 lama_register["EnhancedHarmonyTabuSearchV3"] = EnhancedHarmonyTabuSearchV3 - LLAMAEnhancedHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3").set_name( - "LLAMAEnhancedHarmonyTabuSearchV3", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3").set_name("LLAMAEnhancedHarmonyTabuSearchV3", register=True) except Exception as e: print("EnhancedHarmonyTabuSearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV4 import EnhancedHarmonyTabuSearchV4 lama_register["EnhancedHarmonyTabuSearchV4"] = EnhancedHarmonyTabuSearchV4 - LLAMAEnhancedHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4").set_name( - "LLAMAEnhancedHarmonyTabuSearchV4", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4").set_name("LLAMAEnhancedHarmonyTabuSearchV4", register=True) except Exception as e: print("EnhancedHarmonyTabuSearchV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV6 import EnhancedHarmonyTabuSearchV6 lama_register["EnhancedHarmonyTabuSearchV6"] = EnhancedHarmonyTabuSearchV6 - LLAMAEnhancedHarmonyTabuSearchV6 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6").set_name( - "LLAMAEnhancedHarmonyTabuSearchV6", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearchV6 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6").set_name("LLAMAEnhancedHarmonyTabuSearchV6", register=True) except Exception as e: print("EnhancedHarmonyTabuSearchV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV7 import EnhancedHarmonyTabuSearchV7 lama_register["EnhancedHarmonyTabuSearchV7"] = EnhancedHarmonyTabuSearchV7 - LLAMAEnhancedHarmonyTabuSearchV7 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7").set_name( - "LLAMAEnhancedHarmonyTabuSearchV7", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHarmonyTabuSearchV7 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7").set_name("LLAMAEnhancedHarmonyTabuSearchV7", register=True) except Exception as e: print("EnhancedHarmonyTabuSearchV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHierarchicalCovarianceMatrixAdaptation import ( - EnhancedHierarchicalCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.EnhancedHierarchicalCovarianceMatrixAdaptation import EnhancedHierarchicalCovarianceMatrixAdaptation - lama_register["EnhancedHierarchicalCovarianceMatrixAdaptation"] = ( - EnhancedHierarchicalCovarianceMatrixAdaptation - ) - LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation" - ).set_name("LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation", register=True) + lama_register["EnhancedHierarchicalCovarianceMatrixAdaptation"] = EnhancedHierarchicalCovarianceMatrixAdaptation + res = NonObjectOptimizer(method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation").set_name("LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation", register=True) except Exception as e: print("EnhancedHierarchicalCovarianceMatrixAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveDifferentialEvolution import ( - EnhancedHybridAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveDifferentialEvolution import EnhancedHybridAdaptiveDifferentialEvolution lama_register["EnhancedHybridAdaptiveDifferentialEvolution"] = EnhancedHybridAdaptiveDifferentialEvolution - LLAMAEnhancedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedHybridAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedHybridAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedHybridAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveExplorationOptimizer import ( - EnhancedHybridAdaptiveExplorationOptimizer, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveExplorationOptimizer import EnhancedHybridAdaptiveExplorationOptimizer lama_register["EnhancedHybridAdaptiveExplorationOptimizer"] = EnhancedHybridAdaptiveExplorationOptimizer - LLAMAEnhancedHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer" - ).set_name("LLAMAEnhancedHybridAdaptiveExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer").set_name("LLAMAEnhancedHybridAdaptiveExplorationOptimizer", register=True) except Exception as e: print("EnhancedHybridAdaptiveExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveGeneticSwarmOptimizer import ( - EnhancedHybridAdaptiveGeneticSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveGeneticSwarmOptimizer import EnhancedHybridAdaptiveGeneticSwarmOptimizer lama_register["EnhancedHybridAdaptiveGeneticSwarmOptimizer"] = EnhancedHybridAdaptiveGeneticSwarmOptimizer - LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer" - ).set_name("LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer", register=True) except Exception as e: print("EnhancedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveHarmonicFireworksTabuSearch import ( - EnhancedHybridAdaptiveHarmonicFireworksTabuSearch, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveHarmonicFireworksTabuSearch import EnhancedHybridAdaptiveHarmonicFireworksTabuSearch - lama_register["EnhancedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( - EnhancedHybridAdaptiveHarmonicFireworksTabuSearch - ) - LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch" - ).set_name("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) + lama_register["EnhancedHybridAdaptiveHarmonicFireworksTabuSearch"] = EnhancedHybridAdaptiveHarmonicFireworksTabuSearch + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) except Exception as e: print("EnhancedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMemoryAnnealing import ( - EnhancedHybridAdaptiveMemoryAnnealing, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMemoryAnnealing import EnhancedHybridAdaptiveMemoryAnnealing lama_register["EnhancedHybridAdaptiveMemoryAnnealing"] = EnhancedHybridAdaptiveMemoryAnnealing - LLAMAEnhancedHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing" - ).set_name("LLAMAEnhancedHybridAdaptiveMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing").set_name("LLAMAEnhancedHybridAdaptiveMemoryAnnealing", register=True) except Exception as e: print("EnhancedHybridAdaptiveMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiPhaseEvolution import ( - EnhancedHybridAdaptiveMultiPhaseEvolution, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiPhaseEvolution import EnhancedHybridAdaptiveMultiPhaseEvolution lama_register["EnhancedHybridAdaptiveMultiPhaseEvolution"] = EnhancedHybridAdaptiveMultiPhaseEvolution - LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution" - ).set_name("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution").set_name("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution", register=True) except Exception as e: print("EnhancedHybridAdaptiveMultiPhaseEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiStageOptimization import ( - EnhancedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiStageOptimization import EnhancedHybridAdaptiveMultiStageOptimization - lama_register["EnhancedHybridAdaptiveMultiStageOptimization"] = ( - EnhancedHybridAdaptiveMultiStageOptimization - ) - LLAMAEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMAEnhancedHybridAdaptiveMultiStageOptimization", register=True) + lama_register["EnhancedHybridAdaptiveMultiStageOptimization"] = EnhancedHybridAdaptiveMultiStageOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization").set_name("LLAMAEnhancedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("EnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveQuantumOptimizer import ( - EnhancedHybridAdaptiveQuantumOptimizer, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveQuantumOptimizer import EnhancedHybridAdaptiveQuantumOptimizer lama_register["EnhancedHybridAdaptiveQuantumOptimizer"] = EnhancedHybridAdaptiveQuantumOptimizer - LLAMAEnhancedHybridAdaptiveQuantumOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer" - ).set_name("LLAMAEnhancedHybridAdaptiveQuantumOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveQuantumOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer").set_name("LLAMAEnhancedHybridAdaptiveQuantumOptimizer", register=True) except Exception as e: print("EnhancedHybridAdaptiveQuantumOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridAdaptiveSearch import EnhancedHybridAdaptiveSearch lama_register["EnhancedHybridAdaptiveSearch"] = EnhancedHybridAdaptiveSearch - LLAMAEnhancedHybridAdaptiveSearch = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveSearch" - ).set_name("LLAMAEnhancedHybridAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSearch").set_name("LLAMAEnhancedHybridAdaptiveSearch", register=True) except Exception as e: print("EnhancedHybridAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution import ( - EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution import EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution - lama_register["EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( - EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution - ) - LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) + lama_register["EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution"] = EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridCMAESDE import EnhancedHybridCMAESDE lama_register["EnhancedHybridCMAESDE"] = EnhancedHybridCMAESDE - LLAMAEnhancedHybridCMAESDE = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE").set_name( - "LLAMAEnhancedHybridCMAESDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridCMAESDE = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE").set_name("LLAMAEnhancedHybridCMAESDE", register=True) except Exception as e: print("EnhancedHybridCMAESDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridCovarianceMatrixDifferentialEvolution import ( - EnhancedHybridCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedHybridCovarianceMatrixDifferentialEvolution import EnhancedHybridCovarianceMatrixDifferentialEvolution - lama_register["EnhancedHybridCovarianceMatrixDifferentialEvolution"] = ( - EnhancedHybridCovarianceMatrixDifferentialEvolution - ) - LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution", register=True) + lama_register["EnhancedHybridCovarianceMatrixDifferentialEvolution"] = EnhancedHybridCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("EnhancedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOWithDynamicAdaptationV4 import ( - EnhancedHybridDEPSOWithDynamicAdaptationV4, - ) + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithDynamicAdaptationV4 import EnhancedHybridDEPSOWithDynamicAdaptationV4 lama_register["EnhancedHybridDEPSOWithDynamicAdaptationV4"] = EnhancedHybridDEPSOWithDynamicAdaptationV4 - LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4 = NonObjectOptimizer( - method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4" - ).set_name("LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4 = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4").set_name("LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4", register=True) except Exception as e: print("EnhancedHybridDEPSOWithDynamicAdaptationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOWithQuantumLevyFlight import ( - EnhancedHybridDEPSOWithQuantumLevyFlight, - ) + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithQuantumLevyFlight import EnhancedHybridDEPSOWithQuantumLevyFlight lama_register["EnhancedHybridDEPSOWithQuantumLevyFlight"] = EnhancedHybridDEPSOWithQuantumLevyFlight - LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight = NonObjectOptimizer( - method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight" - ).set_name("LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight").set_name("LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight", register=True) except Exception as e: print("EnhancedHybridDEPSOWithQuantumLevyFlight can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOwithAdaptiveRestart import ( - EnhancedHybridDEPSOwithAdaptiveRestart, - ) + from nevergrad.optimization.lama.EnhancedHybridDEPSOwithAdaptiveRestart import EnhancedHybridDEPSOwithAdaptiveRestart lama_register["EnhancedHybridDEPSOwithAdaptiveRestart"] = EnhancedHybridDEPSOwithAdaptiveRestart - LLAMAEnhancedHybridDEPSOwithAdaptiveRestart = NonObjectOptimizer( - method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart" - ).set_name("LLAMAEnhancedHybridDEPSOwithAdaptiveRestart", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridDEPSOwithAdaptiveRestart = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart").set_name("LLAMAEnhancedHybridDEPSOwithAdaptiveRestart", register=True) except Exception as e: print("EnhancedHybridDEPSOwithAdaptiveRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridDifferentialEvolutionMemeticOptimizer import ( - EnhancedHybridDifferentialEvolutionMemeticOptimizer, - ) + from nevergrad.optimization.lama.EnhancedHybridDifferentialEvolutionMemeticOptimizer import EnhancedHybridDifferentialEvolutionMemeticOptimizer - lama_register["EnhancedHybridDifferentialEvolutionMemeticOptimizer"] = ( - EnhancedHybridDifferentialEvolutionMemeticOptimizer - ) - LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer" - ).set_name("LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer", register=True) + lama_register["EnhancedHybridDifferentialEvolutionMemeticOptimizer"] = EnhancedHybridDifferentialEvolutionMemeticOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer").set_name("LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer", register=True) except Exception as e: print("EnhancedHybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridDynamicAdaptiveExplorationOptimization import ( - EnhancedHybridDynamicAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedHybridDynamicAdaptiveExplorationOptimization import EnhancedHybridDynamicAdaptiveExplorationOptimization - lama_register["EnhancedHybridDynamicAdaptiveExplorationOptimization"] = ( - EnhancedHybridDynamicAdaptiveExplorationOptimization - ) - LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization" - ).set_name("LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization", register=True) + lama_register["EnhancedHybridDynamicAdaptiveExplorationOptimization"] = EnhancedHybridDynamicAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization").set_name("LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization", register=True) except Exception as e: print("EnhancedHybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridExplorationOptimization import ( - EnhancedHybridExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedHybridExplorationOptimization import EnhancedHybridExplorationOptimization lama_register["EnhancedHybridExplorationOptimization"] = EnhancedHybridExplorationOptimization - LLAMAEnhancedHybridExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHybridExplorationOptimization" - ).set_name("LLAMAEnhancedHybridExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridExplorationOptimization").set_name("LLAMAEnhancedHybridExplorationOptimization", register=True) except Exception as e: print("EnhancedHybridExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridGradientAnnealingWithMemory import ( - EnhancedHybridGradientAnnealingWithMemory, - ) + from nevergrad.optimization.lama.EnhancedHybridGradientAnnealingWithMemory import EnhancedHybridGradientAnnealingWithMemory lama_register["EnhancedHybridGradientAnnealingWithMemory"] = EnhancedHybridGradientAnnealingWithMemory - LLAMAEnhancedHybridGradientAnnealingWithMemory = NonObjectOptimizer( - method="LLAMAEnhancedHybridGradientAnnealingWithMemory" - ).set_name("LLAMAEnhancedHybridGradientAnnealingWithMemory", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridGradientAnnealingWithMemory = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientAnnealingWithMemory").set_name("LLAMAEnhancedHybridGradientAnnealingWithMemory", register=True) except Exception as e: print("EnhancedHybridGradientAnnealingWithMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridGradientBasedStrategyV8 import ( - EnhancedHybridGradientBasedStrategyV8, - ) + from nevergrad.optimization.lama.EnhancedHybridGradientBasedStrategyV8 import EnhancedHybridGradientBasedStrategyV8 lama_register["EnhancedHybridGradientBasedStrategyV8"] = EnhancedHybridGradientBasedStrategyV8 - LLAMAEnhancedHybridGradientBasedStrategyV8 = NonObjectOptimizer( - method="LLAMAEnhancedHybridGradientBasedStrategyV8" - ).set_name("LLAMAEnhancedHybridGradientBasedStrategyV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientBasedStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridGradientBasedStrategyV8 = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientBasedStrategyV8").set_name("LLAMAEnhancedHybridGradientBasedStrategyV8", register=True) except Exception as e: print("EnhancedHybridGradientBasedStrategyV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridGradientPSO import EnhancedHybridGradientPSO lama_register["EnhancedHybridGradientPSO"] = EnhancedHybridGradientPSO - LLAMAEnhancedHybridGradientPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO").set_name( - "LLAMAEnhancedHybridGradientPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridGradientPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO").set_name("LLAMAEnhancedHybridGradientPSO", register=True) except Exception as e: print("EnhancedHybridGradientPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridHarmonySearchWithAdaptiveMutationV20 import ( - EnhancedHybridHarmonySearchWithAdaptiveMutationV20, - ) + from nevergrad.optimization.lama.EnhancedHybridHarmonySearchWithAdaptiveMutationV20 import EnhancedHybridHarmonySearchWithAdaptiveMutationV20 - lama_register["EnhancedHybridHarmonySearchWithAdaptiveMutationV20"] = ( - EnhancedHybridHarmonySearchWithAdaptiveMutationV20 - ) - LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20 = NonObjectOptimizer( - method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20" - ).set_name("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20", register=True) + lama_register["EnhancedHybridHarmonySearchWithAdaptiveMutationV20"] = EnhancedHybridHarmonySearchWithAdaptiveMutationV20 + res = NonObjectOptimizer(method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20 = NonObjectOptimizer(method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20").set_name("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20", register=True) except Exception as e: print("EnhancedHybridHarmonySearchWithAdaptiveMutationV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridMemoryAdaptiveDE import EnhancedHybridMemoryAdaptiveDE lama_register["EnhancedHybridMemoryAdaptiveDE"] = EnhancedHybridMemoryAdaptiveDE - LLAMAEnhancedHybridMemoryAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedHybridMemoryAdaptiveDE" - ).set_name("LLAMAEnhancedHybridMemoryAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryAdaptiveDE").set_name("LLAMAEnhancedHybridMemoryAdaptiveDE", register=True) except Exception as e: print("EnhancedHybridMemoryAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridMemoryPSO import EnhancedHybridMemoryPSO lama_register["EnhancedHybridMemoryPSO"] = EnhancedHybridMemoryPSO - LLAMAEnhancedHybridMemoryPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO").set_name( - "LLAMAEnhancedHybridMemoryPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMemoryPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO").set_name("LLAMAEnhancedHybridMemoryPSO", register=True) except Exception as e: print("EnhancedHybridMemoryPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizer import ( - EnhancedHybridMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizer import EnhancedHybridMetaHeuristicOptimizer lama_register["EnhancedHybridMetaHeuristicOptimizer"] = EnhancedHybridMetaHeuristicOptimizer - LLAMAEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizer" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizer", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV10 import ( - EnhancedHybridMetaHeuristicOptimizerV10, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV10 import EnhancedHybridMetaHeuristicOptimizerV10 lama_register["EnhancedHybridMetaHeuristicOptimizerV10"] = EnhancedHybridMetaHeuristicOptimizerV10 - LLAMAEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV10", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV11 import ( - EnhancedHybridMetaHeuristicOptimizerV11, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV11 import EnhancedHybridMetaHeuristicOptimizerV11 lama_register["EnhancedHybridMetaHeuristicOptimizerV11"] = EnhancedHybridMetaHeuristicOptimizerV11 - LLAMAEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV11", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV12 import ( - EnhancedHybridMetaHeuristicOptimizerV12, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV12 import EnhancedHybridMetaHeuristicOptimizerV12 lama_register["EnhancedHybridMetaHeuristicOptimizerV12"] = EnhancedHybridMetaHeuristicOptimizerV12 - LLAMAEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV12", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV15 import ( - EnhancedHybridMetaHeuristicOptimizerV15, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV15 import EnhancedHybridMetaHeuristicOptimizerV15 lama_register["EnhancedHybridMetaHeuristicOptimizerV15"] = EnhancedHybridMetaHeuristicOptimizerV15 - LLAMAEnhancedHybridMetaHeuristicOptimizerV15 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV15", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV15 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV15", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV2 import ( - EnhancedHybridMetaHeuristicOptimizerV2, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV2 import EnhancedHybridMetaHeuristicOptimizerV2 lama_register["EnhancedHybridMetaHeuristicOptimizerV2"] = EnhancedHybridMetaHeuristicOptimizerV2 - LLAMAEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV2", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV3 import ( - EnhancedHybridMetaHeuristicOptimizerV3, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV3 import EnhancedHybridMetaHeuristicOptimizerV3 lama_register["EnhancedHybridMetaHeuristicOptimizerV3"] = EnhancedHybridMetaHeuristicOptimizerV3 - LLAMAEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV3", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV4 import ( - EnhancedHybridMetaHeuristicOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV4 import EnhancedHybridMetaHeuristicOptimizerV4 lama_register["EnhancedHybridMetaHeuristicOptimizerV4"] = EnhancedHybridMetaHeuristicOptimizerV4 - LLAMAEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV4", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV5 import ( - EnhancedHybridMetaHeuristicOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV5 import EnhancedHybridMetaHeuristicOptimizerV5 lama_register["EnhancedHybridMetaHeuristicOptimizerV5"] = EnhancedHybridMetaHeuristicOptimizerV5 - LLAMAEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV5", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV6 import ( - EnhancedHybridMetaHeuristicOptimizerV6, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV6 import EnhancedHybridMetaHeuristicOptimizerV6 lama_register["EnhancedHybridMetaHeuristicOptimizerV6"] = EnhancedHybridMetaHeuristicOptimizerV6 - LLAMAEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV6", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV7 import ( - EnhancedHybridMetaHeuristicOptimizerV7, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV7 import EnhancedHybridMetaHeuristicOptimizerV7 lama_register["EnhancedHybridMetaHeuristicOptimizerV7"] = EnhancedHybridMetaHeuristicOptimizerV7 - LLAMAEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV7", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV8 import ( - EnhancedHybridMetaHeuristicOptimizerV8, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV8 import EnhancedHybridMetaHeuristicOptimizerV8 lama_register["EnhancedHybridMetaHeuristicOptimizerV8"] = EnhancedHybridMetaHeuristicOptimizerV8 - LLAMAEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV8", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV9 import ( - EnhancedHybridMetaHeuristicOptimizerV9, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV9 import EnhancedHybridMetaHeuristicOptimizerV9 lama_register["EnhancedHybridMetaHeuristicOptimizerV9"] = EnhancedHybridMetaHeuristicOptimizerV9 - LLAMAEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9" - ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV9", register=True) except Exception as e: print("EnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithm import ( - EnhancedHybridMetaOptimizationAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithm import EnhancedHybridMetaOptimizationAlgorithm lama_register["EnhancedHybridMetaOptimizationAlgorithm"] = EnhancedHybridMetaOptimizationAlgorithm - LLAMAEnhancedHybridMetaOptimizationAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaOptimizationAlgorithm" - ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithm").set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithm", register=True) except Exception as e: print("EnhancedHybridMetaOptimizationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithmV2 import ( - EnhancedHybridMetaOptimizationAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithmV2 import EnhancedHybridMetaOptimizationAlgorithmV2 lama_register["EnhancedHybridMetaOptimizationAlgorithmV2"] = EnhancedHybridMetaOptimizationAlgorithmV2 - LLAMAEnhancedHybridMetaOptimizationAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2" - ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridMetaOptimizationAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2").set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithmV2", register=True) except Exception as e: print("EnhancedHybridMetaOptimizationAlgorithmV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridOptimization import EnhancedHybridOptimization lama_register["EnhancedHybridOptimization"] = EnhancedHybridOptimization - LLAMAEnhancedHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization").set_name( - "LLAMAEnhancedHybridOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization").set_name("LLAMAEnhancedHybridOptimization", register=True) except Exception as e: print("EnhancedHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridOptimizer import EnhancedHybridOptimizer lama_register["EnhancedHybridOptimizer"] = EnhancedHybridOptimizer - LLAMAEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer").set_name( - "LLAMAEnhancedHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer").set_name("LLAMAEnhancedHybridOptimizer", register=True) except Exception as e: print("EnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridQuantumDifferentialPSO import ( - EnhancedHybridQuantumDifferentialPSO, - ) + from nevergrad.optimization.lama.EnhancedHybridQuantumDifferentialPSO import EnhancedHybridQuantumDifferentialPSO lama_register["EnhancedHybridQuantumDifferentialPSO"] = EnhancedHybridQuantumDifferentialPSO - LLAMAEnhancedHybridQuantumDifferentialPSO = NonObjectOptimizer( - method="LLAMAEnhancedHybridQuantumDifferentialPSO" - ).set_name("LLAMAEnhancedHybridQuantumDifferentialPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuantumDifferentialPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridQuantumDifferentialPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridQuantumDifferentialPSO").set_name("LLAMAEnhancedHybridQuantumDifferentialPSO", register=True) except Exception as e: print("EnhancedHybridQuantumDifferentialPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridQuasiRandomGradientDifferentialEvolution import ( - EnhancedHybridQuasiRandomGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedHybridQuasiRandomGradientDifferentialEvolution import EnhancedHybridQuasiRandomGradientDifferentialEvolution - lama_register["EnhancedHybridQuasiRandomGradientDifferentialEvolution"] = ( - EnhancedHybridQuasiRandomGradientDifferentialEvolution - ) - LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution" - ).set_name("LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution", register=True) + lama_register["EnhancedHybridQuasiRandomGradientDifferentialEvolution"] = EnhancedHybridQuasiRandomGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution").set_name("LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution", register=True) except Exception as e: print("EnhancedHybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHybridSearch import EnhancedHybridSearch lama_register["EnhancedHybridSearch"] = EnhancedHybridSearch - LLAMAEnhancedHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch").set_name( - "LLAMAEnhancedHybridSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch").set_name("LLAMAEnhancedHybridSearch", register=True) except Exception as e: print("EnhancedHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHybridSimulatedAnnealingOptimization import ( - EnhancedHybridSimulatedAnnealingOptimization, - ) + from nevergrad.optimization.lama.EnhancedHybridSimulatedAnnealingOptimization import EnhancedHybridSimulatedAnnealingOptimization - lama_register["EnhancedHybridSimulatedAnnealingOptimization"] = ( - EnhancedHybridSimulatedAnnealingOptimization - ) - LLAMAEnhancedHybridSimulatedAnnealingOptimization = NonObjectOptimizer( - method="LLAMAEnhancedHybridSimulatedAnnealingOptimization" - ).set_name("LLAMAEnhancedHybridSimulatedAnnealingOptimization", register=True) + lama_register["EnhancedHybridSimulatedAnnealingOptimization"] = EnhancedHybridSimulatedAnnealingOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedHybridSimulatedAnnealingOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHybridSimulatedAnnealingOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridSimulatedAnnealingOptimization").set_name("LLAMAEnhancedHybridSimulatedAnnealingOptimization", register=True) except Exception as e: print("EnhancedHybridSimulatedAnnealingOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedHyperAdaptiveHybridDEPSO import EnhancedHyperAdaptiveHybridDEPSO lama_register["EnhancedHyperAdaptiveHybridDEPSO"] = EnhancedHyperAdaptiveHybridDEPSO - LLAMAEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAEnhancedHyperAdaptiveHybridDEPSO" - ).set_name("LLAMAEnhancedHyperAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedHyperAdaptiveHybridDEPSO").set_name("LLAMAEnhancedHyperAdaptiveHybridDEPSO", register=True) except Exception as e: print("EnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 import ( - EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59, - ) + from nevergrad.optimization.lama.EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 import EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 - lama_register["EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59"] = ( - EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 - ) - LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 = NonObjectOptimizer( - method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59" - ).set_name("LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59", register=True) + lama_register["EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59"] = EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 + res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59").set_name("LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59", register=True) except Exception as e: print("EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 import ( - EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62, - ) + from nevergrad.optimization.lama.EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 import EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 - lama_register["EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62"] = ( - EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 - ) - LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 = NonObjectOptimizer( - method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62" - ).set_name("LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62", register=True) + lama_register["EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62"] = EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 + res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62").set_name("LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62", register=True) except Exception as e: print("EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHyperOptimizedMultiStrategicOptimizerV49 import ( - EnhancedHyperOptimizedMultiStrategicOptimizerV49, - ) + from nevergrad.optimization.lama.EnhancedHyperOptimizedMultiStrategicOptimizerV49 import EnhancedHyperOptimizedMultiStrategicOptimizerV49 - lama_register["EnhancedHyperOptimizedMultiStrategicOptimizerV49"] = ( - EnhancedHyperOptimizedMultiStrategicOptimizerV49 - ) - LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49 = NonObjectOptimizer( - method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49" - ).set_name("LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49", register=True) + lama_register["EnhancedHyperOptimizedMultiStrategicOptimizerV49"] = EnhancedHyperOptimizedMultiStrategicOptimizerV49 + res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49").set_name("LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49", register=True) except Exception as e: print("EnhancedHyperOptimizedMultiStrategicOptimizerV49 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 import ( - EnhancedHyperParameterTunedMetaHeuristicOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 import EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 - lama_register["EnhancedHyperParameterTunedMetaHeuristicOptimizerV4"] = ( - EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 - ) - LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4" - ).set_name("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4", register=True) + lama_register["EnhancedHyperParameterTunedMetaHeuristicOptimizerV4"] = EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4", register=True) except Exception as e: print("EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedHyperStrategicOptimizerV56 import ( - EnhancedHyperStrategicOptimizerV56, - ) + from nevergrad.optimization.lama.EnhancedHyperStrategicOptimizerV56 import EnhancedHyperStrategicOptimizerV56 lama_register["EnhancedHyperStrategicOptimizerV56"] = EnhancedHyperStrategicOptimizerV56 - LLAMAEnhancedHyperStrategicOptimizerV56 = NonObjectOptimizer( - method="LLAMAEnhancedHyperStrategicOptimizerV56" - ).set_name("LLAMAEnhancedHyperStrategicOptimizerV56", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedHyperStrategicOptimizerV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedHyperStrategicOptimizerV56 = NonObjectOptimizer(method="LLAMAEnhancedHyperStrategicOptimizerV56").set_name("LLAMAEnhancedHyperStrategicOptimizerV56", register=True) except Exception as e: print("EnhancedHyperStrategicOptimizerV56 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedImprovedDifferentialEvolutionLocalSearch_v58 import ( - EnhancedImprovedDifferentialEvolutionLocalSearch_v58, - ) + from nevergrad.optimization.lama.EnhancedImprovedDifferentialEvolutionLocalSearch_v58 import EnhancedImprovedDifferentialEvolutionLocalSearch_v58 - lama_register["EnhancedImprovedDifferentialEvolutionLocalSearch_v58"] = ( - EnhancedImprovedDifferentialEvolutionLocalSearch_v58 - ) - LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58 = NonObjectOptimizer( - method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58" - ).set_name("LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58", register=True) + lama_register["EnhancedImprovedDifferentialEvolutionLocalSearch_v58"] = EnhancedImprovedDifferentialEvolutionLocalSearch_v58 + res = NonObjectOptimizer(method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58 = NonObjectOptimizer(method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58").set_name("LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58", register=True) except Exception as e: print("EnhancedImprovedDifferentialEvolutionLocalSearch_v58 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer import ( - EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer import EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer - lama_register["EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer"] = ( - EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer - ) - LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer" - ).set_name("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer", register=True) + lama_register["EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer"] = EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer").set_name("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer", register=True) except Exception as e: print("EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 import ( - EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77, - ) + from nevergrad.optimization.lama.EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 import EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 - lama_register["EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77"] = ( - EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 - ) - LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 = NonObjectOptimizer( - method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77" - ).set_name("LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77", register=True) + lama_register["EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77"] = EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 + res = NonObjectOptimizer(method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 = NonObjectOptimizer(method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77").set_name("LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77", register=True) except Exception as e: print("EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 import ( - EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 import EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 - lama_register["EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7"] = ( - EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 - ) - LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7" - ).set_name("LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7", register=True) + lama_register["EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7"] = EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 + res = NonObjectOptimizer(method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategy import EnhancedIslandEvolutionStrategy lama_register["EnhancedIslandEvolutionStrategy"] = EnhancedIslandEvolutionStrategy - LLAMAEnhancedIslandEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedIslandEvolutionStrategy" - ).set_name("LLAMAEnhancedIslandEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedIslandEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategy").set_name("LLAMAEnhancedIslandEvolutionStrategy", register=True) except Exception as e: print("EnhancedIslandEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV10 import ( - EnhancedIslandEvolutionStrategyV10, - ) + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV10 import EnhancedIslandEvolutionStrategyV10 lama_register["EnhancedIslandEvolutionStrategyV10"] = EnhancedIslandEvolutionStrategyV10 - LLAMAEnhancedIslandEvolutionStrategyV10 = NonObjectOptimizer( - method="LLAMAEnhancedIslandEvolutionStrategyV10" - ).set_name("LLAMAEnhancedIslandEvolutionStrategyV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedIslandEvolutionStrategyV10 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV10").set_name("LLAMAEnhancedIslandEvolutionStrategyV10", register=True) except Exception as e: print("EnhancedIslandEvolutionStrategyV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV3 import ( - EnhancedIslandEvolutionStrategyV3, - ) + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV3 import EnhancedIslandEvolutionStrategyV3 lama_register["EnhancedIslandEvolutionStrategyV3"] = EnhancedIslandEvolutionStrategyV3 - LLAMAEnhancedIslandEvolutionStrategyV3 = NonObjectOptimizer( - method="LLAMAEnhancedIslandEvolutionStrategyV3" - ).set_name("LLAMAEnhancedIslandEvolutionStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedIslandEvolutionStrategyV3 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV3").set_name("LLAMAEnhancedIslandEvolutionStrategyV3", register=True) except Exception as e: print("EnhancedIslandEvolutionStrategyV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV7 import ( - EnhancedIslandEvolutionStrategyV7, - ) + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV7 import EnhancedIslandEvolutionStrategyV7 lama_register["EnhancedIslandEvolutionStrategyV7"] = EnhancedIslandEvolutionStrategyV7 - LLAMAEnhancedIslandEvolutionStrategyV7 = NonObjectOptimizer( - method="LLAMAEnhancedIslandEvolutionStrategyV7" - ).set_name("LLAMAEnhancedIslandEvolutionStrategyV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedIslandEvolutionStrategyV7 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV7").set_name("LLAMAEnhancedIslandEvolutionStrategyV7", register=True) except Exception as e: print("EnhancedIslandEvolutionStrategyV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV8 import ( - EnhancedIslandEvolutionStrategyV8, - ) + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV8 import EnhancedIslandEvolutionStrategyV8 lama_register["EnhancedIslandEvolutionStrategyV8"] = EnhancedIslandEvolutionStrategyV8 - LLAMAEnhancedIslandEvolutionStrategyV8 = NonObjectOptimizer( - method="LLAMAEnhancedIslandEvolutionStrategyV8" - ).set_name("LLAMAEnhancedIslandEvolutionStrategyV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedIslandEvolutionStrategyV8 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV8").set_name("LLAMAEnhancedIslandEvolutionStrategyV8", register=True) except Exception as e: print("EnhancedIslandEvolutionStrategyV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedLocalSearchAdaptiveStrategyV29 import ( - EnhancedLocalSearchAdaptiveStrategyV29, - ) + from nevergrad.optimization.lama.EnhancedLocalSearchAdaptiveStrategyV29 import EnhancedLocalSearchAdaptiveStrategyV29 lama_register["EnhancedLocalSearchAdaptiveStrategyV29"] = EnhancedLocalSearchAdaptiveStrategyV29 - LLAMAEnhancedLocalSearchAdaptiveStrategyV29 = NonObjectOptimizer( - method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29" - ).set_name("LLAMAEnhancedLocalSearchAdaptiveStrategyV29", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedLocalSearchAdaptiveStrategyV29 = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29").set_name("LLAMAEnhancedLocalSearchAdaptiveStrategyV29", register=True) except Exception as e: print("EnhancedLocalSearchAdaptiveStrategyV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedLocalSearchQuantumSimulatedAnnealingV6 import ( - EnhancedLocalSearchQuantumSimulatedAnnealingV6, - ) + from nevergrad.optimization.lama.EnhancedLocalSearchQuantumSimulatedAnnealingV6 import EnhancedLocalSearchQuantumSimulatedAnnealingV6 - lama_register["EnhancedLocalSearchQuantumSimulatedAnnealingV6"] = ( - EnhancedLocalSearchQuantumSimulatedAnnealingV6 - ) - LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6 = NonObjectOptimizer( - method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6" - ).set_name("LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6", register=True) + lama_register["EnhancedLocalSearchQuantumSimulatedAnnealingV6"] = EnhancedLocalSearchQuantumSimulatedAnnealingV6 + res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6 = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6").set_name("LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6", register=True) except Exception as e: print("EnhancedLocalSearchQuantumSimulatedAnnealingV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemeticDifferentialEvolution import ( - EnhancedMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedMemeticDifferentialEvolution import EnhancedMemeticDifferentialEvolution lama_register["EnhancedMemeticDifferentialEvolution"] = EnhancedMemeticDifferentialEvolution - LLAMAEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedMemeticDifferentialEvolution" - ).set_name("LLAMAEnhancedMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedMemeticDifferentialEvolution").set_name("LLAMAEnhancedMemeticDifferentialEvolution", register=True) except Exception as e: print("EnhancedMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemeticEvolutionarySearch import ( - EnhancedMemeticEvolutionarySearch, - ) + from nevergrad.optimization.lama.EnhancedMemeticEvolutionarySearch import EnhancedMemeticEvolutionarySearch lama_register["EnhancedMemeticEvolutionarySearch"] = EnhancedMemeticEvolutionarySearch - LLAMAEnhancedMemeticEvolutionarySearch = NonObjectOptimizer( - method="LLAMAEnhancedMemeticEvolutionarySearch" - ).set_name("LLAMAEnhancedMemeticEvolutionarySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemeticEvolutionarySearch = NonObjectOptimizer(method="LLAMAEnhancedMemeticEvolutionarySearch").set_name("LLAMAEnhancedMemeticEvolutionarySearch", register=True) except Exception as e: print("EnhancedMemeticEvolutionarySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemeticHarmonyOptimization import ( - EnhancedMemeticHarmonyOptimization, - ) + from nevergrad.optimization.lama.EnhancedMemeticHarmonyOptimization import EnhancedMemeticHarmonyOptimization lama_register["EnhancedMemeticHarmonyOptimization"] = EnhancedMemeticHarmonyOptimization - LLAMAEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( - method="LLAMAEnhancedMemeticHarmonyOptimization" - ).set_name("LLAMAEnhancedMemeticHarmonyOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedMemeticHarmonyOptimization").set_name("LLAMAEnhancedMemeticHarmonyOptimization", register=True) except Exception as e: print("EnhancedMemeticHarmonyOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemoryAdaptiveDynamicHybridOptimizer import ( - EnhancedMemoryAdaptiveDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMemoryAdaptiveDynamicHybridOptimizer import EnhancedMemoryAdaptiveDynamicHybridOptimizer - lama_register["EnhancedMemoryAdaptiveDynamicHybridOptimizer"] = ( - EnhancedMemoryAdaptiveDynamicHybridOptimizer - ) - LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer" - ).set_name("LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer", register=True) + lama_register["EnhancedMemoryAdaptiveDynamicHybridOptimizer"] = EnhancedMemoryAdaptiveDynamicHybridOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer").set_name("LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer", register=True) except Exception as e: print("EnhancedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 import ( - EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77, - ) + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 import EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 - lama_register["EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77"] = ( - EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 - ) - LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 = NonObjectOptimizer( - method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77" - ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77", register=True) + lama_register["EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77"] = EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 + res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77").set_name("LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77", register=True) except Exception as e: print("EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV41 import ( - EnhancedMemoryGuidedAdaptiveStrategyV41, - ) + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV41 import EnhancedMemoryGuidedAdaptiveStrategyV41 lama_register["EnhancedMemoryGuidedAdaptiveStrategyV41"] = EnhancedMemoryGuidedAdaptiveStrategyV41 - LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41 = NonObjectOptimizer( - method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41" - ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41").set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41", register=True) except Exception as e: print("EnhancedMemoryGuidedAdaptiveStrategyV41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV69 import ( - EnhancedMemoryGuidedAdaptiveStrategyV69, - ) + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV69 import EnhancedMemoryGuidedAdaptiveStrategyV69 lama_register["EnhancedMemoryGuidedAdaptiveStrategyV69"] = EnhancedMemoryGuidedAdaptiveStrategyV69 - LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69 = NonObjectOptimizer( - method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69" - ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69").set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69", register=True) except Exception as e: print("EnhancedMemoryGuidedAdaptiveStrategyV69 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaDynamicPrecisionOptimizerV1 import ( - EnhancedMetaDynamicPrecisionOptimizerV1, - ) + from nevergrad.optimization.lama.EnhancedMetaDynamicPrecisionOptimizerV1 import EnhancedMetaDynamicPrecisionOptimizerV1 lama_register["EnhancedMetaDynamicPrecisionOptimizerV1"] = EnhancedMetaDynamicPrecisionOptimizerV1 - LLAMAEnhancedMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( - method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1" - ).set_name("LLAMAEnhancedMetaDynamicPrecisionOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1").set_name("LLAMAEnhancedMetaDynamicPrecisionOptimizerV1", register=True) except Exception as e: print("EnhancedMetaDynamicPrecisionOptimizerV1 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaHeuristicOptimizerV2 import EnhancedMetaHeuristicOptimizerV2 lama_register["EnhancedMetaHeuristicOptimizerV2"] = EnhancedMetaHeuristicOptimizerV2 - LLAMAEnhancedMetaHeuristicOptimizerV2 = NonObjectOptimizer( - method="LLAMAEnhancedMetaHeuristicOptimizerV2" - ).set_name("LLAMAEnhancedMetaHeuristicOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedMetaHeuristicOptimizerV2", register=True) except Exception as e: print("EnhancedMetaHeuristicOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V1, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V1"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V2, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V2"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V3, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V3"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V4, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V4"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V5, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V5"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V6, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V6"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 import ( - EnhancedMetaNetAQAPSO_LS_DIW_AP_V7, - ) + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V7"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7 = NonObjectOptimizer( - method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7" - ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7", register=True) except Exception as e: print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv2 import EnhancedMetaNetAQAPSOv2 lama_register["EnhancedMetaNetAQAPSOv2"] = EnhancedMetaNetAQAPSOv2 - LLAMAEnhancedMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2").set_name( - "LLAMAEnhancedMetaNetAQAPSOv2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2").set_name("LLAMAEnhancedMetaNetAQAPSOv2", register=True) except Exception as e: print("EnhancedMetaNetAQAPSOv2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv3 import EnhancedMetaNetAQAPSOv3 lama_register["EnhancedMetaNetAQAPSOv3"] = EnhancedMetaNetAQAPSOv3 - LLAMAEnhancedMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3").set_name( - "LLAMAEnhancedMetaNetAQAPSOv3", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3").set_name("LLAMAEnhancedMetaNetAQAPSOv3", register=True) except Exception as e: print("EnhancedMetaNetAQAPSOv3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv4 import EnhancedMetaNetAQAPSOv4 lama_register["EnhancedMetaNetAQAPSOv4"] = EnhancedMetaNetAQAPSOv4 - LLAMAEnhancedMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4").set_name( - "LLAMAEnhancedMetaNetAQAPSOv4", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4").set_name("LLAMAEnhancedMetaNetAQAPSOv4", register=True) except Exception as e: print("EnhancedMetaNetAQAPSOv4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv5 import EnhancedMetaNetAQAPSOv5 lama_register["EnhancedMetaNetAQAPSOv5"] = EnhancedMetaNetAQAPSOv5 - LLAMAEnhancedMetaNetAQAPSOv5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5").set_name( - "LLAMAEnhancedMetaNetAQAPSOv5", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSOv5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5").set_name("LLAMAEnhancedMetaNetAQAPSOv5", register=True) except Exception as e: print("EnhancedMetaNetAQAPSOv5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv6 import EnhancedMetaNetAQAPSOv6 lama_register["EnhancedMetaNetAQAPSOv6"] = EnhancedMetaNetAQAPSOv6 - LLAMAEnhancedMetaNetAQAPSOv6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6").set_name( - "LLAMAEnhancedMetaNetAQAPSOv6", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetAQAPSOv6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6").set_name("LLAMAEnhancedMetaNetAQAPSOv6", register=True) except Exception as e: print("EnhancedMetaNetAQAPSOv6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetPSO import EnhancedMetaNetPSO lama_register["EnhancedMetaNetPSO"] = EnhancedMetaNetPSO - LLAMAEnhancedMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO").set_name( - "LLAMAEnhancedMetaNetPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO").set_name("LLAMAEnhancedMetaNetPSO", register=True) except Exception as e: print("EnhancedMetaNetPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMetaNetPSOv2 import EnhancedMetaNetPSOv2 lama_register["EnhancedMetaNetPSOv2"] = EnhancedMetaNetPSOv2 - LLAMAEnhancedMetaNetPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2").set_name( - "LLAMAEnhancedMetaNetPSOv2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaNetPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2").set_name("LLAMAEnhancedMetaNetPSOv2", register=True) except Exception as e: print("EnhancedMetaNetPSOv2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMetaPopulationAdaptiveGradientSearch import ( - EnhancedMetaPopulationAdaptiveGradientSearch, - ) + from nevergrad.optimization.lama.EnhancedMetaPopulationAdaptiveGradientSearch import EnhancedMetaPopulationAdaptiveGradientSearch - lama_register["EnhancedMetaPopulationAdaptiveGradientSearch"] = ( - EnhancedMetaPopulationAdaptiveGradientSearch - ) - LLAMAEnhancedMetaPopulationAdaptiveGradientSearch = NonObjectOptimizer( - method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch" - ).set_name("LLAMAEnhancedMetaPopulationAdaptiveGradientSearch", register=True) + lama_register["EnhancedMetaPopulationAdaptiveGradientSearch"] = EnhancedMetaPopulationAdaptiveGradientSearch + res = NonObjectOptimizer(method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMetaPopulationAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch").set_name("LLAMAEnhancedMetaPopulationAdaptiveGradientSearch", register=True) except Exception as e: print("EnhancedMetaPopulationAdaptiveGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiFocalAdaptiveOptimizer import ( - EnhancedMultiFocalAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMultiFocalAdaptiveOptimizer import EnhancedMultiFocalAdaptiveOptimizer lama_register["EnhancedMultiFocalAdaptiveOptimizer"] = EnhancedMultiFocalAdaptiveOptimizer - LLAMAEnhancedMultiFocalAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMultiFocalAdaptiveOptimizer" - ).set_name("LLAMAEnhancedMultiFocalAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiFocalAdaptiveOptimizer").set_name("LLAMAEnhancedMultiFocalAdaptiveOptimizer", register=True) except Exception as e: print("EnhancedMultiFocalAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiModalAdaptiveOptimizer import ( - EnhancedMultiModalAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMultiModalAdaptiveOptimizer import EnhancedMultiModalAdaptiveOptimizer lama_register["EnhancedMultiModalAdaptiveOptimizer"] = EnhancedMultiModalAdaptiveOptimizer - LLAMAEnhancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMultiModalAdaptiveOptimizer" - ).set_name("LLAMAEnhancedMultiModalAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiModalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalAdaptiveOptimizer").set_name("LLAMAEnhancedMultiModalAdaptiveOptimizer", register=True) except Exception as e: print("EnhancedMultiModalAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiModalConvergenceOptimizer import ( - EnhancedMultiModalConvergenceOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMultiModalConvergenceOptimizer import EnhancedMultiModalConvergenceOptimizer lama_register["EnhancedMultiModalConvergenceOptimizer"] = EnhancedMultiModalConvergenceOptimizer - LLAMAEnhancedMultiModalConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMultiModalConvergenceOptimizer" - ).set_name("LLAMAEnhancedMultiModalConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiModalConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalConvergenceOptimizer").set_name("LLAMAEnhancedMultiModalConvergenceOptimizer", register=True) except Exception as e: print("EnhancedMultiModalConvergenceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiModalExplorationStrategy import ( - EnhancedMultiModalExplorationStrategy, - ) + from nevergrad.optimization.lama.EnhancedMultiModalExplorationStrategy import EnhancedMultiModalExplorationStrategy lama_register["EnhancedMultiModalExplorationStrategy"] = EnhancedMultiModalExplorationStrategy - LLAMAEnhancedMultiModalExplorationStrategy = NonObjectOptimizer( - method="LLAMAEnhancedMultiModalExplorationStrategy" - ).set_name("LLAMAEnhancedMultiModalExplorationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalExplorationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiModalExplorationStrategy = NonObjectOptimizer(method="LLAMAEnhancedMultiModalExplorationStrategy").set_name("LLAMAEnhancedMultiModalExplorationStrategy", register=True) except Exception as e: print("EnhancedMultiModalExplorationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiModalMemoryHybridOptimizer import ( - EnhancedMultiModalMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMultiModalMemoryHybridOptimizer import EnhancedMultiModalMemoryHybridOptimizer lama_register["EnhancedMultiModalMemoryHybridOptimizer"] = EnhancedMultiModalMemoryHybridOptimizer - LLAMAEnhancedMultiModalMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMultiModalMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedMultiModalMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiModalMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalMemoryHybridOptimizer").set_name("LLAMAEnhancedMultiModalMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedMultiModalMemoryHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMultiOperatorSearch import EnhancedMultiOperatorSearch lama_register["EnhancedMultiOperatorSearch"] = EnhancedMultiOperatorSearch - LLAMAEnhancedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch").set_name( - "LLAMAEnhancedMultiOperatorSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch").set_name("LLAMAEnhancedMultiOperatorSearch", register=True) except Exception as e: print("EnhancedMultiOperatorSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMultiOperatorSearch2 import EnhancedMultiOperatorSearch2 lama_register["EnhancedMultiOperatorSearch2"] = EnhancedMultiOperatorSearch2 - LLAMAEnhancedMultiOperatorSearch2 = NonObjectOptimizer( - method="LLAMAEnhancedMultiOperatorSearch2" - ).set_name("LLAMAEnhancedMultiOperatorSearch2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiOperatorSearch2 = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch2").set_name("LLAMAEnhancedMultiOperatorSearch2", register=True) except Exception as e: print("EnhancedMultiOperatorSearch2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedMultiPhaseAdaptiveDE import EnhancedMultiPhaseAdaptiveDE lama_register["EnhancedMultiPhaseAdaptiveDE"] = EnhancedMultiPhaseAdaptiveDE - LLAMAEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedMultiPhaseAdaptiveDE" - ).set_name("LLAMAEnhancedMultiPhaseAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseAdaptiveDE").set_name("LLAMAEnhancedMultiPhaseAdaptiveDE", register=True) except Exception as e: print("EnhancedMultiPhaseAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiPhaseOptimizationAlgorithm import ( - EnhancedMultiPhaseOptimizationAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedMultiPhaseOptimizationAlgorithm import EnhancedMultiPhaseOptimizationAlgorithm lama_register["EnhancedMultiPhaseOptimizationAlgorithm"] = EnhancedMultiPhaseOptimizationAlgorithm - LLAMAEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm" - ).set_name("LLAMAEnhancedMultiPhaseOptimizationAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm").set_name("LLAMAEnhancedMultiPhaseOptimizationAlgorithm", register=True) except Exception as e: print("EnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiStageGradientBoostedAnnealing import ( - EnhancedMultiStageGradientBoostedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedMultiStageGradientBoostedAnnealing import EnhancedMultiStageGradientBoostedAnnealing lama_register["EnhancedMultiStageGradientBoostedAnnealing"] = EnhancedMultiStageGradientBoostedAnnealing - LLAMAEnhancedMultiStageGradientBoostedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedMultiStageGradientBoostedAnnealing" - ).set_name("LLAMAEnhancedMultiStageGradientBoostedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiStageGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiStageGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedMultiStageGradientBoostedAnnealing").set_name("LLAMAEnhancedMultiStageGradientBoostedAnnealing", register=True) except Exception as e: print("EnhancedMultiStageGradientBoostedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiStrategyDifferentialEvolution import ( - EnhancedMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedMultiStrategyDifferentialEvolution import EnhancedMultiStrategyDifferentialEvolution lama_register["EnhancedMultiStrategyDifferentialEvolution"] = EnhancedMultiStrategyDifferentialEvolution - LLAMAEnhancedMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedMultiStrategyDifferentialEvolution" - ).set_name("LLAMAEnhancedMultiStrategyDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("EnhancedMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedMultiStrategyQuantumLevyOptimizer import ( - EnhancedMultiStrategyQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.EnhancedMultiStrategyQuantumLevyOptimizer import EnhancedMultiStrategyQuantumLevyOptimizer lama_register["EnhancedMultiStrategyQuantumLevyOptimizer"] = EnhancedMultiStrategyQuantumLevyOptimizer - LLAMAEnhancedMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer" - ).set_name("LLAMAEnhancedMultiStrategyQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer").set_name("LLAMAEnhancedMultiStrategyQuantumLevyOptimizer", register=True) except Exception as e: print("EnhancedMultiStrategyQuantumLevyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedNicheDifferentialParticleSwarmOptimizer import ( - EnhancedNicheDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedNicheDifferentialParticleSwarmOptimizer import EnhancedNicheDifferentialParticleSwarmOptimizer - lama_register["EnhancedNicheDifferentialParticleSwarmOptimizer"] = ( - EnhancedNicheDifferentialParticleSwarmOptimizer - ) - LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer", register=True) + lama_register["EnhancedNicheDifferentialParticleSwarmOptimizer"] = EnhancedNicheDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOppositionBasedDifferentialEvolution import ( - EnhancedOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedOppositionBasedDifferentialEvolution import EnhancedOppositionBasedDifferentialEvolution - lama_register["EnhancedOppositionBasedDifferentialEvolution"] = ( - EnhancedOppositionBasedDifferentialEvolution - ) - LLAMAEnhancedOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedOppositionBasedDifferentialEvolution" - ).set_name("LLAMAEnhancedOppositionBasedDifferentialEvolution", register=True) + lama_register["EnhancedOppositionBasedDifferentialEvolution"] = EnhancedOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("EnhancedOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearch import ( - EnhancedOppositionBasedHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearch import EnhancedOppositionBasedHarmonySearch lama_register["EnhancedOppositionBasedHarmonySearch"] = EnhancedOppositionBasedHarmonySearch - LLAMAEnhancedOppositionBasedHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedOppositionBasedHarmonySearch" - ).set_name("LLAMAEnhancedOppositionBasedHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOppositionBasedHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearch").set_name("LLAMAEnhancedOppositionBasedHarmonySearch", register=True) except Exception as e: print("EnhancedOppositionBasedHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidth import ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidth, - ) + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidth import EnhancedOppositionBasedHarmonySearchDynamicBandwidth - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidth"] = ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidth - ) - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth = NonObjectOptimizer( - method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth" - ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth", register=True) + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidth"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidth + res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth", register=True) except Exception as e: print("EnhancedOppositionBasedHarmonySearchDynamicBandwidth can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC import ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC, - ) + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC import EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC"] = ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC - ) - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC = NonObjectOptimizer( - method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC" - ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC", register=True) + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC + res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC", register=True) except Exception as e: print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE, - ) + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE import EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( - EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE - ) - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( - method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE" - ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE + res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) except Exception as e: print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOptimalEvolutionaryGradientOptimizerV9 import ( - EnhancedOptimalEvolutionaryGradientOptimizerV9, - ) + from nevergrad.optimization.lama.EnhancedOptimalEvolutionaryGradientOptimizerV9 import EnhancedOptimalEvolutionaryGradientOptimizerV9 - lama_register["EnhancedOptimalEvolutionaryGradientOptimizerV9"] = ( - EnhancedOptimalEvolutionaryGradientOptimizerV9 - ) - LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9 = NonObjectOptimizer( - method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9" - ).set_name("LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9", register=True) + lama_register["EnhancedOptimalEvolutionaryGradientOptimizerV9"] = EnhancedOptimalEvolutionaryGradientOptimizerV9 + res = NonObjectOptimizer(method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9").set_name("LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9", register=True) except Exception as e: print("EnhancedOptimalEvolutionaryGradientOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOptimalPrecisionEvolutionaryThermalOptimizer import ( - EnhancedOptimalPrecisionEvolutionaryThermalOptimizer, - ) + from nevergrad.optimization.lama.EnhancedOptimalPrecisionEvolutionaryThermalOptimizer import EnhancedOptimalPrecisionEvolutionaryThermalOptimizer - lama_register["EnhancedOptimalPrecisionEvolutionaryThermalOptimizer"] = ( - EnhancedOptimalPrecisionEvolutionaryThermalOptimizer - ) - LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer" - ).set_name("LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer", register=True) + lama_register["EnhancedOptimalPrecisionEvolutionaryThermalOptimizer"] = EnhancedOptimalPrecisionEvolutionaryThermalOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer", register=True) except Exception as e: print("EnhancedOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOptimizedEvolutiveStrategy import ( - EnhancedOptimizedEvolutiveStrategy, - ) + from nevergrad.optimization.lama.EnhancedOptimizedEvolutiveStrategy import EnhancedOptimizedEvolutiveStrategy lama_register["EnhancedOptimizedEvolutiveStrategy"] = EnhancedOptimizedEvolutiveStrategy - LLAMAEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( - method="LLAMAEnhancedOptimizedEvolutiveStrategy" - ).set_name("LLAMAEnhancedOptimizedEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedOptimizedEvolutiveStrategy").set_name("LLAMAEnhancedOptimizedEvolutiveStrategy", register=True) except Exception as e: print("EnhancedOptimizedEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 import ( - EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46, - ) + from nevergrad.optimization.lama.EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 import EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 - lama_register["EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46"] = ( - EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 - ) - LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 = NonObjectOptimizer( - method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46" - ).set_name("LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46", register=True) + lama_register["EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46"] = EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 + res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 = NonObjectOptimizer(method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46").set_name("LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46", register=True) except Exception as e: print("EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedOrthogonalDE import EnhancedOrthogonalDE lama_register["EnhancedOrthogonalDE"] = EnhancedOrthogonalDE - LLAMAEnhancedOrthogonalDE = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE").set_name( - "LLAMAEnhancedOrthogonalDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDE = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE").set_name("LLAMAEnhancedOrthogonalDE", register=True) except Exception as e: print("EnhancedOrthogonalDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolution import ( - EnhancedOrthogonalDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolution import EnhancedOrthogonalDifferentialEvolution lama_register["EnhancedOrthogonalDifferentialEvolution"] = EnhancedOrthogonalDifferentialEvolution - LLAMAEnhancedOrthogonalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedOrthogonalDifferentialEvolution" - ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolution").set_name("LLAMAEnhancedOrthogonalDifferentialEvolution", register=True) except Exception as e: print("EnhancedOrthogonalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionImproved import ( - EnhancedOrthogonalDifferentialEvolutionImproved, - ) + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionImproved import EnhancedOrthogonalDifferentialEvolutionImproved - lama_register["EnhancedOrthogonalDifferentialEvolutionImproved"] = ( - EnhancedOrthogonalDifferentialEvolutionImproved - ) - LLAMAEnhancedOrthogonalDifferentialEvolutionImproved = NonObjectOptimizer( - method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved" - ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved", register=True) + lama_register["EnhancedOrthogonalDifferentialEvolutionImproved"] = EnhancedOrthogonalDifferentialEvolutionImproved + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved", register=True) except Exception as e: print("EnhancedOrthogonalDifferentialEvolutionImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV2 import ( - EnhancedOrthogonalDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV2 import EnhancedOrthogonalDifferentialEvolutionV2 lama_register["EnhancedOrthogonalDifferentialEvolutionV2"] = EnhancedOrthogonalDifferentialEvolutionV2 - LLAMAEnhancedOrthogonalDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2" - ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV2", register=True) except Exception as e: print("EnhancedOrthogonalDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV3 import ( - EnhancedOrthogonalDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV3 import EnhancedOrthogonalDifferentialEvolutionV3 lama_register["EnhancedOrthogonalDifferentialEvolutionV3"] = EnhancedOrthogonalDifferentialEvolutionV3 - LLAMAEnhancedOrthogonalDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3" - ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV3", register=True) except Exception as e: print("EnhancedOrthogonalDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV4 import ( - EnhancedOrthogonalDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV4 import EnhancedOrthogonalDifferentialEvolutionV4 lama_register["EnhancedOrthogonalDifferentialEvolutionV4"] = EnhancedOrthogonalDifferentialEvolutionV4 - LLAMAEnhancedOrthogonalDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4" - ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV4", register=True) except Exception as e: print("EnhancedOrthogonalDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedParallelDifferentialEvolution import ( - EnhancedParallelDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedParallelDifferentialEvolution import EnhancedParallelDifferentialEvolution lama_register["EnhancedParallelDifferentialEvolution"] = EnhancedParallelDifferentialEvolution - LLAMAEnhancedParallelDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedParallelDifferentialEvolution" - ).set_name("LLAMAEnhancedParallelDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedParallelDifferentialEvolution").set_name("LLAMAEnhancedParallelDifferentialEvolution", register=True) except Exception as e: print("EnhancedParallelDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedParticleSwarmOptimization import ( - EnhancedParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimization import EnhancedParticleSwarmOptimization lama_register["EnhancedParticleSwarmOptimization"] = EnhancedParticleSwarmOptimization - LLAMAEnhancedParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedParticleSwarmOptimization" - ).set_name("LLAMAEnhancedParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimization").set_name("LLAMAEnhancedParticleSwarmOptimization", register=True) except Exception as e: print("EnhancedParticleSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizer import EnhancedParticleSwarmOptimizer lama_register["EnhancedParticleSwarmOptimizer"] = EnhancedParticleSwarmOptimizer - LLAMAEnhancedParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedParticleSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizer").set_name("LLAMAEnhancedParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedParticleSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV4 import EnhancedParticleSwarmOptimizerV4 lama_register["EnhancedParticleSwarmOptimizerV4"] = EnhancedParticleSwarmOptimizerV4 - LLAMAEnhancedParticleSwarmOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedParticleSwarmOptimizerV4" - ).set_name("LLAMAEnhancedParticleSwarmOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParticleSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV4").set_name("LLAMAEnhancedParticleSwarmOptimizerV4", register=True) except Exception as e: print("EnhancedParticleSwarmOptimizerV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV5 import EnhancedParticleSwarmOptimizerV5 lama_register["EnhancedParticleSwarmOptimizerV5"] = EnhancedParticleSwarmOptimizerV5 - LLAMAEnhancedParticleSwarmOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedParticleSwarmOptimizerV5" - ).set_name("LLAMAEnhancedParticleSwarmOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParticleSwarmOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV5").set_name("LLAMAEnhancedParticleSwarmOptimizerV5", register=True) except Exception as e: print("EnhancedParticleSwarmOptimizerV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV6 import EnhancedParticleSwarmOptimizerV6 lama_register["EnhancedParticleSwarmOptimizerV6"] = EnhancedParticleSwarmOptimizerV6 - LLAMAEnhancedParticleSwarmOptimizerV6 = NonObjectOptimizer( - method="LLAMAEnhancedParticleSwarmOptimizerV6" - ).set_name("LLAMAEnhancedParticleSwarmOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedParticleSwarmOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV6").set_name("LLAMAEnhancedParticleSwarmOptimizerV6", register=True) except Exception as e: print("EnhancedParticleSwarmOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPhaseAdaptiveMemoryStrategyV75 import ( - EnhancedPhaseAdaptiveMemoryStrategyV75, - ) + from nevergrad.optimization.lama.EnhancedPhaseAdaptiveMemoryStrategyV75 import EnhancedPhaseAdaptiveMemoryStrategyV75 lama_register["EnhancedPhaseAdaptiveMemoryStrategyV75"] = EnhancedPhaseAdaptiveMemoryStrategyV75 - LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75 = NonObjectOptimizer( - method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75" - ).set_name("LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75 = NonObjectOptimizer(method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75").set_name("LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75", register=True) except Exception as e: print("EnhancedPhaseAdaptiveMemoryStrategyV75 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPhaseTransitionMemoryStrategyV82 import ( - EnhancedPhaseTransitionMemoryStrategyV82, - ) + from nevergrad.optimization.lama.EnhancedPhaseTransitionMemoryStrategyV82 import EnhancedPhaseTransitionMemoryStrategyV82 lama_register["EnhancedPhaseTransitionMemoryStrategyV82"] = EnhancedPhaseTransitionMemoryStrategyV82 - LLAMAEnhancedPhaseTransitionMemoryStrategyV82 = NonObjectOptimizer( - method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82" - ).set_name("LLAMAEnhancedPhaseTransitionMemoryStrategyV82", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPhaseTransitionMemoryStrategyV82 = NonObjectOptimizer(method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82").set_name("LLAMAEnhancedPhaseTransitionMemoryStrategyV82", register=True) except Exception as e: print("EnhancedPhaseTransitionMemoryStrategyV82 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveCohortOptimization import ( - EnhancedPrecisionAdaptiveCohortOptimization, - ) + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveCohortOptimization import EnhancedPrecisionAdaptiveCohortOptimization lama_register["EnhancedPrecisionAdaptiveCohortOptimization"] = EnhancedPrecisionAdaptiveCohortOptimization - LLAMAEnhancedPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization" - ).set_name("LLAMAEnhancedPrecisionAdaptiveCohortOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization").set_name("LLAMAEnhancedPrecisionAdaptiveCohortOptimization", register=True) except Exception as e: print("EnhancedPrecisionAdaptiveCohortOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveGradientClusteringPSO import ( - EnhancedPrecisionAdaptiveGradientClusteringPSO, - ) + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveGradientClusteringPSO import EnhancedPrecisionAdaptiveGradientClusteringPSO - lama_register["EnhancedPrecisionAdaptiveGradientClusteringPSO"] = ( - EnhancedPrecisionAdaptiveGradientClusteringPSO - ) - LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO" - ).set_name("LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO", register=True) + lama_register["EnhancedPrecisionAdaptiveGradientClusteringPSO"] = EnhancedPrecisionAdaptiveGradientClusteringPSO + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO").set_name("LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO", register=True) except Exception as e: print("EnhancedPrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionBoostedDifferentialEvolution import ( - EnhancedPrecisionBoostedDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedPrecisionBoostedDifferentialEvolution import EnhancedPrecisionBoostedDifferentialEvolution - lama_register["EnhancedPrecisionBoostedDifferentialEvolution"] = ( - EnhancedPrecisionBoostedDifferentialEvolution - ) - LLAMAEnhancedPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution" - ).set_name("LLAMAEnhancedPrecisionBoostedDifferentialEvolution", register=True) + lama_register["EnhancedPrecisionBoostedDifferentialEvolution"] = EnhancedPrecisionBoostedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionBoostedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution").set_name("LLAMAEnhancedPrecisionBoostedDifferentialEvolution", register=True) except Exception as e: print("EnhancedPrecisionBoostedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionConvergenceOptimizer import ( - EnhancedPrecisionConvergenceOptimizer, - ) + from nevergrad.optimization.lama.EnhancedPrecisionConvergenceOptimizer import EnhancedPrecisionConvergenceOptimizer lama_register["EnhancedPrecisionConvergenceOptimizer"] = EnhancedPrecisionConvergenceOptimizer - LLAMAEnhancedPrecisionConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionConvergenceOptimizer" - ).set_name("LLAMAEnhancedPrecisionConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedPrecisionConvergenceOptimizer").set_name("LLAMAEnhancedPrecisionConvergenceOptimizer", register=True) except Exception as e: print("EnhancedPrecisionConvergenceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV38 import ( - EnhancedPrecisionEvolutionaryOptimizerV38, - ) + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV38 import EnhancedPrecisionEvolutionaryOptimizerV38 lama_register["EnhancedPrecisionEvolutionaryOptimizerV38"] = EnhancedPrecisionEvolutionaryOptimizerV38 - LLAMAEnhancedPrecisionEvolutionaryOptimizerV38 = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38" - ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV38", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionEvolutionaryOptimizerV38 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38").set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV38", register=True) except Exception as e: print("EnhancedPrecisionEvolutionaryOptimizerV38 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV39 import ( - EnhancedPrecisionEvolutionaryOptimizerV39, - ) + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV39 import EnhancedPrecisionEvolutionaryOptimizerV39 lama_register["EnhancedPrecisionEvolutionaryOptimizerV39"] = EnhancedPrecisionEvolutionaryOptimizerV39 - LLAMAEnhancedPrecisionEvolutionaryOptimizerV39 = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39" - ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV39", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionEvolutionaryOptimizerV39 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39").set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV39", register=True) except Exception as e: print("EnhancedPrecisionEvolutionaryOptimizerV39 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionGuidedQuantumStrategy import ( - EnhancedPrecisionGuidedQuantumStrategy, - ) + from nevergrad.optimization.lama.EnhancedPrecisionGuidedQuantumStrategy import EnhancedPrecisionGuidedQuantumStrategy lama_register["EnhancedPrecisionGuidedQuantumStrategy"] = EnhancedPrecisionGuidedQuantumStrategy - LLAMAEnhancedPrecisionGuidedQuantumStrategy = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionGuidedQuantumStrategy" - ).set_name("LLAMAEnhancedPrecisionGuidedQuantumStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionGuidedQuantumStrategy = NonObjectOptimizer(method="LLAMAEnhancedPrecisionGuidedQuantumStrategy").set_name("LLAMAEnhancedPrecisionGuidedQuantumStrategy", register=True) except Exception as e: print("EnhancedPrecisionGuidedQuantumStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedPrecisionHybridSearchV2 import EnhancedPrecisionHybridSearchV2 lama_register["EnhancedPrecisionHybridSearchV2"] = EnhancedPrecisionHybridSearchV2 - LLAMAEnhancedPrecisionHybridSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionHybridSearchV2" - ).set_name("LLAMAEnhancedPrecisionHybridSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionHybridSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionHybridSearchV2").set_name("LLAMAEnhancedPrecisionHybridSearchV2", register=True) except Exception as e: print("EnhancedPrecisionHybridSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedPrecisionTunedCrossoverElitistStrategyV14 import ( - EnhancedPrecisionTunedCrossoverElitistStrategyV14, - ) + from nevergrad.optimization.lama.EnhancedPrecisionTunedCrossoverElitistStrategyV14 import EnhancedPrecisionTunedCrossoverElitistStrategyV14 - lama_register["EnhancedPrecisionTunedCrossoverElitistStrategyV14"] = ( - EnhancedPrecisionTunedCrossoverElitistStrategyV14 - ) - LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14 = NonObjectOptimizer( - method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14" - ).set_name("LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14", register=True) + lama_register["EnhancedPrecisionTunedCrossoverElitistStrategyV14"] = EnhancedPrecisionTunedCrossoverElitistStrategyV14 + res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14").set_name("LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14", register=True) except Exception as e: print("EnhancedPrecisionTunedCrossoverElitistStrategyV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedProgressiveAdaptiveDifferentialEvolution import ( - EnhancedProgressiveAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedProgressiveAdaptiveDifferentialEvolution import EnhancedProgressiveAdaptiveDifferentialEvolution - lama_register["EnhancedProgressiveAdaptiveDifferentialEvolution"] = ( - EnhancedProgressiveAdaptiveDifferentialEvolution - ) - LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution" - ).set_name("LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution", register=True) + lama_register["EnhancedProgressiveAdaptiveDifferentialEvolution"] = EnhancedProgressiveAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution", register=True) except Exception as e: print("EnhancedProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHR import EnhancedQAPSOAIRVCHR lama_register["EnhancedQAPSOAIRVCHR"] = EnhancedQAPSOAIRVCHR - LLAMAEnhancedQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR").set_name( - "LLAMAEnhancedQAPSOAIRVCHR", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR").set_name("LLAMAEnhancedQAPSOAIRVCHR", register=True) except Exception as e: print("EnhancedQAPSOAIRVCHR can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLS import EnhancedQAPSOAIRVCHRLS lama_register["EnhancedQAPSOAIRVCHRLS"] = EnhancedQAPSOAIRVCHRLS - LLAMAEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS").set_name( - "LLAMAEnhancedQAPSOAIRVCHRLS", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS").set_name("LLAMAEnhancedQAPSOAIRVCHRLS", register=True) except Exception as e: print("EnhancedQAPSOAIRVCHRLS can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLSDP import EnhancedQAPSOAIRVCHRLSDP lama_register["EnhancedQAPSOAIRVCHRLSDP"] = EnhancedQAPSOAIRVCHRLSDP - LLAMAEnhancedQAPSOAIRVCHRLSDP = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP").set_name( - "LLAMAEnhancedQAPSOAIRVCHRLSDP", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQAPSOAIRVCHRLSDP = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP").set_name("LLAMAEnhancedQAPSOAIRVCHRLSDP", register=True) except Exception as e: print("EnhancedQAPSOAIRVCHRLSDP can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumAdaptiveCrossover import EnhancedQuantumAdaptiveCrossover lama_register["EnhancedQuantumAdaptiveCrossover"] = EnhancedQuantumAdaptiveCrossover - LLAMAEnhancedQuantumAdaptiveCrossover = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveCrossover" - ).set_name("LLAMAEnhancedQuantumAdaptiveCrossover", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveCrossover = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveCrossover").set_name("LLAMAEnhancedQuantumAdaptiveCrossover", register=True) except Exception as e: print("EnhancedQuantumAdaptiveCrossover can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDE import EnhancedQuantumAdaptiveDE lama_register["EnhancedQuantumAdaptiveDE"] = EnhancedQuantumAdaptiveDE - LLAMAEnhancedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE").set_name( - "LLAMAEnhancedQuantumAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE").set_name("LLAMAEnhancedQuantumAdaptiveDE", register=True) except Exception as e: print("EnhancedQuantumAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( - EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - lama_register["EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( - EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - ) - LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" - ).set_name("LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) + lama_register["EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory").set_name("LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) except Exception as e: print("EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveEliteGuidedSearch import ( - EnhancedQuantumAdaptiveEliteGuidedSearch, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveEliteGuidedSearch import EnhancedQuantumAdaptiveEliteGuidedSearch lama_register["EnhancedQuantumAdaptiveEliteGuidedSearch"] = EnhancedQuantumAdaptiveEliteGuidedSearch - LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch" - ).set_name("LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch").set_name("LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch", register=True) except Exception as e: print("EnhancedQuantumAdaptiveEliteGuidedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveFireworksOptimizer import ( - EnhancedQuantumAdaptiveFireworksOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveFireworksOptimizer import EnhancedQuantumAdaptiveFireworksOptimizer lama_register["EnhancedQuantumAdaptiveFireworksOptimizer"] = EnhancedQuantumAdaptiveFireworksOptimizer - LLAMAEnhancedQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer" - ).set_name("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer").set_name("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer", register=True) except Exception as e: print("EnhancedQuantumAdaptiveFireworksOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveGradientDiversityExplorer import ( - EnhancedQuantumAdaptiveGradientDiversityExplorer, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveGradientDiversityExplorer import EnhancedQuantumAdaptiveGradientDiversityExplorer - lama_register["EnhancedQuantumAdaptiveGradientDiversityExplorer"] = ( - EnhancedQuantumAdaptiveGradientDiversityExplorer - ) - LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer" - ).set_name("LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer", register=True) + lama_register["EnhancedQuantumAdaptiveGradientDiversityExplorer"] = EnhancedQuantumAdaptiveGradientDiversityExplorer + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer").set_name("LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer", register=True) except Exception as e: print("EnhancedQuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridDEPSO_V4 import ( - EnhancedQuantumAdaptiveHybridDEPSO_V4, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridDEPSO_V4 import EnhancedQuantumAdaptiveHybridDEPSO_V4 lama_register["EnhancedQuantumAdaptiveHybridDEPSO_V4"] = EnhancedQuantumAdaptiveHybridDEPSO_V4 - LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4" - ).set_name("LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4").set_name("LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4", register=True) except Exception as e: print("EnhancedQuantumAdaptiveHybridDEPSO_V4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridSearchV2 import ( - EnhancedQuantumAdaptiveHybridSearchV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridSearchV2 import EnhancedQuantumAdaptiveHybridSearchV2 lama_register["EnhancedQuantumAdaptiveHybridSearchV2"] = EnhancedQuantumAdaptiveHybridSearchV2 - LLAMAEnhancedQuantumAdaptiveHybridSearchV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2" - ).set_name("LLAMAEnhancedQuantumAdaptiveHybridSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveHybridSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2").set_name("LLAMAEnhancedQuantumAdaptiveHybridSearchV2", register=True) except Exception as e: print("EnhancedQuantumAdaptiveHybridSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveLevySwarmOptimization import ( - EnhancedQuantumAdaptiveLevySwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveLevySwarmOptimization import EnhancedQuantumAdaptiveLevySwarmOptimization - lama_register["EnhancedQuantumAdaptiveLevySwarmOptimization"] = ( - EnhancedQuantumAdaptiveLevySwarmOptimization - ) - LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization" - ).set_name("LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization", register=True) + lama_register["EnhancedQuantumAdaptiveLevySwarmOptimization"] = EnhancedQuantumAdaptiveLevySwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization").set_name("LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization", register=True) except Exception as e: print("EnhancedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiPhaseDE_v3 import ( - EnhancedQuantumAdaptiveMultiPhaseDE_v3, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiPhaseDE_v3 import EnhancedQuantumAdaptiveMultiPhaseDE_v3 lama_register["EnhancedQuantumAdaptiveMultiPhaseDE_v3"] = EnhancedQuantumAdaptiveMultiPhaseDE_v3 - LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3" - ).set_name("LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3").set_name("LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3", register=True) except Exception as e: print("EnhancedQuantumAdaptiveMultiPhaseDE_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiStrategyEvolution import ( - EnhancedQuantumAdaptiveMultiStrategyEvolution, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiStrategyEvolution import EnhancedQuantumAdaptiveMultiStrategyEvolution - lama_register["EnhancedQuantumAdaptiveMultiStrategyEvolution"] = ( - EnhancedQuantumAdaptiveMultiStrategyEvolution - ) - LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution" - ).set_name("LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution", register=True) + lama_register["EnhancedQuantumAdaptiveMultiStrategyEvolution"] = EnhancedQuantumAdaptiveMultiStrategyEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution").set_name("LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution", register=True) except Exception as e: print("EnhancedQuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveNesterovStrategy import ( - EnhancedQuantumAdaptiveNesterovStrategy, - ) + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveNesterovStrategy import EnhancedQuantumAdaptiveNesterovStrategy lama_register["EnhancedQuantumAdaptiveNesterovStrategy"] = EnhancedQuantumAdaptiveNesterovStrategy - LLAMAEnhancedQuantumAdaptiveNesterovStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy" - ).set_name("LLAMAEnhancedQuantumAdaptiveNesterovStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveNesterovStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy").set_name("LLAMAEnhancedQuantumAdaptiveNesterovStrategy", register=True) except Exception as e: print("EnhancedQuantumAdaptiveNesterovStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumAdaptiveOptimizer import EnhancedQuantumAdaptiveOptimizer lama_register["EnhancedQuantumAdaptiveOptimizer"] = EnhancedQuantumAdaptiveOptimizer - LLAMAEnhancedQuantumAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAdaptiveOptimizer" - ).set_name("LLAMAEnhancedQuantumAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveOptimizer").set_name("LLAMAEnhancedQuantumAdaptiveOptimizer", register=True) except Exception as e: print("EnhancedQuantumAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumAnnealingOptimizer import ( - EnhancedQuantumAnnealingOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumAnnealingOptimizer import EnhancedQuantumAnnealingOptimizer lama_register["EnhancedQuantumAnnealingOptimizer"] = EnhancedQuantumAnnealingOptimizer - LLAMAEnhancedQuantumAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumAnnealingOptimizer" - ).set_name("LLAMAEnhancedQuantumAnnealingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumAnnealingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAnnealingOptimizer").set_name("LLAMAEnhancedQuantumAnnealingOptimizer", register=True) except Exception as e: print("EnhancedQuantumAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCognitionFocusedOptimizerV18 import ( - EnhancedQuantumCognitionFocusedOptimizerV18, - ) + from nevergrad.optimization.lama.EnhancedQuantumCognitionFocusedOptimizerV18 import EnhancedQuantumCognitionFocusedOptimizerV18 lama_register["EnhancedQuantumCognitionFocusedOptimizerV18"] = EnhancedQuantumCognitionFocusedOptimizerV18 - LLAMAEnhancedQuantumCognitionFocusedOptimizerV18 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18" - ).set_name("LLAMAEnhancedQuantumCognitionFocusedOptimizerV18", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCognitionFocusedOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18").set_name("LLAMAEnhancedQuantumCognitionFocusedOptimizerV18", register=True) except Exception as e: print("EnhancedQuantumCognitionFocusedOptimizerV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCognitionOptimizerV12 import ( - EnhancedQuantumCognitionOptimizerV12, - ) + from nevergrad.optimization.lama.EnhancedQuantumCognitionOptimizerV12 import EnhancedQuantumCognitionOptimizerV12 lama_register["EnhancedQuantumCognitionOptimizerV12"] = EnhancedQuantumCognitionOptimizerV12 - LLAMAEnhancedQuantumCognitionOptimizerV12 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCognitionOptimizerV12" - ).set_name("LLAMAEnhancedQuantumCognitionOptimizerV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCognitionOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionOptimizerV12").set_name("LLAMAEnhancedQuantumCognitionOptimizerV12", register=True) except Exception as e: print("EnhancedQuantumCognitionOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCooperativeStrategy import ( - EnhancedQuantumCooperativeStrategy, - ) + from nevergrad.optimization.lama.EnhancedQuantumCooperativeStrategy import EnhancedQuantumCooperativeStrategy lama_register["EnhancedQuantumCooperativeStrategy"] = EnhancedQuantumCooperativeStrategy - LLAMAEnhancedQuantumCooperativeStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCooperativeStrategy" - ).set_name("LLAMAEnhancedQuantumCooperativeStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCooperativeStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumCooperativeStrategy").set_name("LLAMAEnhancedQuantumCooperativeStrategy", register=True) except Exception as e: print("EnhancedQuantumCooperativeStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolution import ( - EnhancedQuantumCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolution import EnhancedQuantumCovarianceMatrixDifferentialEvolution - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( - EnhancedQuantumCovarianceMatrixDifferentialEvolution - ) - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolution"] = EnhancedQuantumCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("EnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus import ( - EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus import EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus"] = ( - EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus - ) - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus" - ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus", register=True) + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus"] = EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus", register=True) except Exception as e: print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( - EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( - EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - ) - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" - ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) except Exception as e: print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts import ( - EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts import EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts - lama_register["EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts"] = ( - EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts - ) - LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts" - ).set_name("LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts", register=True) + lama_register["EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts"] = EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts").set_name("LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts", register=True) except Exception as e: print("EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolution import ( - EnhancedQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolution import EnhancedQuantumDifferentialEvolution lama_register["EnhancedQuantumDifferentialEvolution"] = EnhancedQuantumDifferentialEvolution - LLAMAEnhancedQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialEvolution" - ).set_name("LLAMAEnhancedQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolution").set_name("LLAMAEnhancedQuantumDifferentialEvolution", register=True) except Exception as e: print("EnhancedQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart import ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart import EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart"] = ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart - ) - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart" - ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart", register=True) + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart", register=True) except Exception as e: print("EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts import ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts import EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts"] = ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts - ) - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts" - ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts", register=True) + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts", register=True) except Exception as e: print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory import ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory import EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory"] = ( - EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory - ) - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory" - ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory", register=True) + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory", register=True) except Exception as e: print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism import ( - EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism import EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism - lama_register["EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism"] = ( - EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism - ) - LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism" - ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism", register=True) + lama_register["EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism"] = EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism", register=True) except Exception as e: print("EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism import ( - EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism import EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism - lama_register["EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism"] = ( - EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism - ) - LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism" - ).set_name("LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism", register=True) + lama_register["EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism"] = EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism").set_name("LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism", register=True) except Exception as e: print("EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleSwarmOptimizer import ( - EnhancedQuantumDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleSwarmOptimizer import EnhancedQuantumDifferentialParticleSwarmOptimizer - lama_register["EnhancedQuantumDifferentialParticleSwarmOptimizer"] = ( - EnhancedQuantumDifferentialParticleSwarmOptimizer - ) - LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer", register=True) + lama_register["EnhancedQuantumDifferentialParticleSwarmOptimizer"] = EnhancedQuantumDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("EnhancedQuantumDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumDiversityDE import EnhancedQuantumDiversityDE lama_register["EnhancedQuantumDiversityDE"] = EnhancedQuantumDiversityDE - LLAMAEnhancedQuantumDiversityDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE").set_name( - "LLAMAEnhancedQuantumDiversityDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDiversityDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE").set_name("LLAMAEnhancedQuantumDiversityDE", register=True) except Exception as e: print("EnhancedQuantumDiversityDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDynamicAdaptiveHybridDEPSO import ( - EnhancedQuantumDynamicAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.EnhancedQuantumDynamicAdaptiveHybridDEPSO import EnhancedQuantumDynamicAdaptiveHybridDEPSO lama_register["EnhancedQuantumDynamicAdaptiveHybridDEPSO"] = EnhancedQuantumDynamicAdaptiveHybridDEPSO - LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO" - ).set_name("LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO").set_name("LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO", register=True) except Exception as e: print("EnhancedQuantumDynamicAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumDynamicBalanceOptimizer import ( - EnhancedQuantumDynamicBalanceOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumDynamicBalanceOptimizer import EnhancedQuantumDynamicBalanceOptimizer lama_register["EnhancedQuantumDynamicBalanceOptimizer"] = EnhancedQuantumDynamicBalanceOptimizer - LLAMAEnhancedQuantumDynamicBalanceOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDynamicBalanceOptimizer" - ).set_name("LLAMAEnhancedQuantumDynamicBalanceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicBalanceOptimizer").set_name("LLAMAEnhancedQuantumDynamicBalanceOptimizer", register=True) except Exception as e: print("EnhancedQuantumDynamicBalanceOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumDynamicOptimizer import EnhancedQuantumDynamicOptimizer lama_register["EnhancedQuantumDynamicOptimizer"] = EnhancedQuantumDynamicOptimizer - LLAMAEnhancedQuantumDynamicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumDynamicOptimizer" - ).set_name("LLAMAEnhancedQuantumDynamicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumDynamicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicOptimizer").set_name("LLAMAEnhancedQuantumDynamicOptimizer", register=True) except Exception as e: print("EnhancedQuantumDynamicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumEvolutionStrategy import EnhancedQuantumEvolutionStrategy lama_register["EnhancedQuantumEvolutionStrategy"] = EnhancedQuantumEvolutionStrategy - LLAMAEnhancedQuantumEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumEvolutionStrategy" - ).set_name("LLAMAEnhancedQuantumEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumEvolutionStrategy").set_name("LLAMAEnhancedQuantumEvolutionStrategy", register=True) except Exception as e: print("EnhancedQuantumEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithm import ( - EnhancedQuantumFireworksAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithm import EnhancedQuantumFireworksAlgorithm lama_register["EnhancedQuantumFireworksAlgorithm"] = EnhancedQuantumFireworksAlgorithm - LLAMAEnhancedQuantumFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedQuantumFireworksAlgorithm" - ).set_name("LLAMAEnhancedQuantumFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithm").set_name("LLAMAEnhancedQuantumFireworksAlgorithm", register=True) except Exception as e: print("EnhancedQuantumFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithmV2 import ( - EnhancedQuantumFireworksAlgorithmV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithmV2 import EnhancedQuantumFireworksAlgorithmV2 lama_register["EnhancedQuantumFireworksAlgorithmV2"] = EnhancedQuantumFireworksAlgorithmV2 - LLAMAEnhancedQuantumFireworksAlgorithmV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumFireworksAlgorithmV2" - ).set_name("LLAMAEnhancedQuantumFireworksAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithmV2").set_name("LLAMAEnhancedQuantumFireworksAlgorithmV2", register=True) except Exception as e: print("EnhancedQuantumFireworksAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimization import ( - EnhancedQuantumGradientAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimization import EnhancedQuantumGradientAdaptiveExplorationOptimization - lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimization"] = ( - EnhancedQuantumGradientAdaptiveExplorationOptimization - ) - LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization" - ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization", register=True) + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimization"] = EnhancedQuantumGradientAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization", register=True) except Exception as e: print("EnhancedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 import ( - EnhancedQuantumGradientAdaptiveExplorationOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 import EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 - lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimizationV5"] = ( - EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 - ) - LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5" - ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5", register=True) + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimizationV5"] = EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5").set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5", register=True) except Exception as e: print("EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimization import ( - EnhancedQuantumGradientExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimization import EnhancedQuantumGradientExplorationOptimization - lama_register["EnhancedQuantumGradientExplorationOptimization"] = ( - EnhancedQuantumGradientExplorationOptimization - ) - LLAMAEnhancedQuantumGradientExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientExplorationOptimization" - ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimization", register=True) + lama_register["EnhancedQuantumGradientExplorationOptimization"] = EnhancedQuantumGradientExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimization").set_name("LLAMAEnhancedQuantumGradientExplorationOptimization", register=True) except Exception as e: print("EnhancedQuantumGradientExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimizationV2 import ( - EnhancedQuantumGradientExplorationOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimizationV2 import EnhancedQuantumGradientExplorationOptimizationV2 - lama_register["EnhancedQuantumGradientExplorationOptimizationV2"] = ( - EnhancedQuantumGradientExplorationOptimizationV2 - ) - LLAMAEnhancedQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2" - ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimizationV2", register=True) + lama_register["EnhancedQuantumGradientExplorationOptimizationV2"] = EnhancedQuantumGradientExplorationOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2").set_name("LLAMAEnhancedQuantumGradientExplorationOptimizationV2", register=True) except Exception as e: print("EnhancedQuantumGradientExplorationOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientMemeticOptimizer import ( - EnhancedQuantumGradientMemeticOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientMemeticOptimizer import EnhancedQuantumGradientMemeticOptimizer lama_register["EnhancedQuantumGradientMemeticOptimizer"] = EnhancedQuantumGradientMemeticOptimizer - LLAMAEnhancedQuantumGradientMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientMemeticOptimizer" - ).set_name("LLAMAEnhancedQuantumGradientMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientMemeticOptimizer").set_name("LLAMAEnhancedQuantumGradientMemeticOptimizer", register=True) except Exception as e: print("EnhancedQuantumGradientMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumGradientOptimizerV5 import ( - EnhancedQuantumGradientOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedQuantumGradientOptimizerV5 import EnhancedQuantumGradientOptimizerV5 lama_register["EnhancedQuantumGradientOptimizerV5"] = EnhancedQuantumGradientOptimizerV5 - LLAMAEnhancedQuantumGradientOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumGradientOptimizerV5" - ).set_name("LLAMAEnhancedQuantumGradientOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumGradientOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientOptimizerV5").set_name("LLAMAEnhancedQuantumGradientOptimizerV5", register=True) except Exception as e: print("EnhancedQuantumGradientOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonicAdaptationStrategy import ( - EnhancedQuantumHarmonicAdaptationStrategy, - ) + from nevergrad.optimization.lama.EnhancedQuantumHarmonicAdaptationStrategy import EnhancedQuantumHarmonicAdaptationStrategy lama_register["EnhancedQuantumHarmonicAdaptationStrategy"] = EnhancedQuantumHarmonicAdaptationStrategy - LLAMAEnhancedQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy" - ).set_name("LLAMAEnhancedQuantumHarmonicAdaptationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonicAdaptationStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy").set_name("LLAMAEnhancedQuantumHarmonicAdaptationStrategy", register=True) except Exception as e: print("EnhancedQuantumHarmonicAdaptationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonyMemeticAlgorithm import ( - EnhancedQuantumHarmonyMemeticAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedQuantumHarmonyMemeticAlgorithm import EnhancedQuantumHarmonyMemeticAlgorithm lama_register["EnhancedQuantumHarmonyMemeticAlgorithm"] = EnhancedQuantumHarmonyMemeticAlgorithm - LLAMAEnhancedQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm" - ).set_name("LLAMAEnhancedQuantumHarmonyMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm").set_name("LLAMAEnhancedQuantumHarmonyMemeticAlgorithm", register=True) except Exception as e: print("EnhancedQuantumHarmonyMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumHarmonySearch import EnhancedQuantumHarmonySearch lama_register["EnhancedQuantumHarmonySearch"] = EnhancedQuantumHarmonySearch - LLAMAEnhancedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonySearch" - ).set_name("LLAMAEnhancedQuantumHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearch").set_name("LLAMAEnhancedQuantumHarmonySearch", register=True) except Exception as e: print("EnhancedQuantumHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchAB import EnhancedQuantumHarmonySearchAB lama_register["EnhancedQuantumHarmonySearchAB"] = EnhancedQuantumHarmonySearchAB - LLAMAEnhancedQuantumHarmonySearchAB = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonySearchAB" - ).set_name("LLAMAEnhancedQuantumHarmonySearchAB", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchAB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonySearchAB = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchAB").set_name("LLAMAEnhancedQuantumHarmonySearchAB", register=True) except Exception as e: print("EnhancedQuantumHarmonySearchAB can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGB import EnhancedQuantumHarmonySearchABGB lama_register["EnhancedQuantumHarmonySearchABGB"] = EnhancedQuantumHarmonySearchABGB - LLAMAEnhancedQuantumHarmonySearchABGB = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonySearchABGB" - ).set_name("LLAMAEnhancedQuantumHarmonySearchABGB", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonySearchABGB = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGB").set_name("LLAMAEnhancedQuantumHarmonySearchABGB", register=True) except Exception as e: print("EnhancedQuantumHarmonySearchABGB can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGBRefined import ( - EnhancedQuantumHarmonySearchABGBRefined, - ) + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGBRefined import EnhancedQuantumHarmonySearchABGBRefined lama_register["EnhancedQuantumHarmonySearchABGBRefined"] = EnhancedQuantumHarmonySearchABGBRefined - LLAMAEnhancedQuantumHarmonySearchABGBRefined = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHarmonySearchABGBRefined" - ).set_name("LLAMAEnhancedQuantumHarmonySearchABGBRefined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGBRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHarmonySearchABGBRefined = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGBRefined").set_name("LLAMAEnhancedQuantumHarmonySearchABGBRefined", register=True) except Exception as e: print("EnhancedQuantumHarmonySearchABGBRefined can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE import EnhancedQuantumHybridAdaptiveDE lama_register["EnhancedQuantumHybridAdaptiveDE"] = EnhancedQuantumHybridAdaptiveDE - LLAMAEnhancedQuantumHybridAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHybridAdaptiveDE" - ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE").set_name("LLAMAEnhancedQuantumHybridAdaptiveDE", register=True) except Exception as e: print("EnhancedQuantumHybridAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE_v2 import ( - EnhancedQuantumHybridAdaptiveDE_v2, - ) + from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE_v2 import EnhancedQuantumHybridAdaptiveDE_v2 lama_register["EnhancedQuantumHybridAdaptiveDE_v2"] = EnhancedQuantumHybridAdaptiveDE_v2 - LLAMAEnhancedQuantumHybridAdaptiveDE_v2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2" - ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumHybridAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2").set_name("LLAMAEnhancedQuantumHybridAdaptiveDE_v2", register=True) except Exception as e: print("EnhancedQuantumHybridAdaptiveDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumInformedGradientOptimizer import ( - EnhancedQuantumInformedGradientOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumInformedGradientOptimizer import EnhancedQuantumInformedGradientOptimizer lama_register["EnhancedQuantumInformedGradientOptimizer"] = EnhancedQuantumInformedGradientOptimizer - LLAMAEnhancedQuantumInformedGradientOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumInformedGradientOptimizer" - ).set_name("LLAMAEnhancedQuantumInformedGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumInformedGradientOptimizer").set_name("LLAMAEnhancedQuantumInformedGradientOptimizer", register=True) except Exception as e: print("EnhancedQuantumInformedGradientOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumInfusedAdaptiveStrategy import ( - EnhancedQuantumInfusedAdaptiveStrategy, - ) + from nevergrad.optimization.lama.EnhancedQuantumInfusedAdaptiveStrategy import EnhancedQuantumInfusedAdaptiveStrategy lama_register["EnhancedQuantumInfusedAdaptiveStrategy"] = EnhancedQuantumInfusedAdaptiveStrategy - LLAMAEnhancedQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy" - ).set_name("LLAMAEnhancedQuantumInfusedAdaptiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumInfusedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy").set_name("LLAMAEnhancedQuantumInfusedAdaptiveStrategy", register=True) except Exception as e: print("EnhancedQuantumInfusedAdaptiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumInspiredHybridOptimizer import ( - EnhancedQuantumInspiredHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumInspiredHybridOptimizer import EnhancedQuantumInspiredHybridOptimizer lama_register["EnhancedQuantumInspiredHybridOptimizer"] = EnhancedQuantumInspiredHybridOptimizer - LLAMAEnhancedQuantumInspiredHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumInspiredHybridOptimizer" - ).set_name("LLAMAEnhancedQuantumInspiredHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumInspiredHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumInspiredHybridOptimizer").set_name("LLAMAEnhancedQuantumInspiredHybridOptimizer", register=True) except Exception as e: print("EnhancedQuantumInspiredHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumIterativeRefinement import ( - EnhancedQuantumIterativeRefinement, - ) + from nevergrad.optimization.lama.EnhancedQuantumIterativeRefinement import EnhancedQuantumIterativeRefinement lama_register["EnhancedQuantumIterativeRefinement"] = EnhancedQuantumIterativeRefinement - LLAMAEnhancedQuantumIterativeRefinement = NonObjectOptimizer( - method="LLAMAEnhancedQuantumIterativeRefinement" - ).set_name("LLAMAEnhancedQuantumIterativeRefinement", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumIterativeRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumIterativeRefinement = NonObjectOptimizer(method="LLAMAEnhancedQuantumIterativeRefinement").set_name("LLAMAEnhancedQuantumIterativeRefinement", register=True) except Exception as e: print("EnhancedQuantumIterativeRefinement can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLeapGradientBoostPSO import ( - EnhancedQuantumLeapGradientBoostPSO, - ) + from nevergrad.optimization.lama.EnhancedQuantumLeapGradientBoostPSO import EnhancedQuantumLeapGradientBoostPSO lama_register["EnhancedQuantumLeapGradientBoostPSO"] = EnhancedQuantumLeapGradientBoostPSO - LLAMAEnhancedQuantumLeapGradientBoostPSO = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLeapGradientBoostPSO" - ).set_name("LLAMAEnhancedQuantumLeapGradientBoostPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapGradientBoostPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLeapGradientBoostPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapGradientBoostPSO").set_name("LLAMAEnhancedQuantumLeapGradientBoostPSO", register=True) except Exception as e: print("EnhancedQuantumLeapGradientBoostPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumLeapPSO import EnhancedQuantumLeapPSO lama_register["EnhancedQuantumLeapPSO"] = EnhancedQuantumLeapPSO - LLAMAEnhancedQuantumLeapPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO").set_name( - "LLAMAEnhancedQuantumLeapPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLeapPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO").set_name("LLAMAEnhancedQuantumLeapPSO", register=True) except Exception as e: print("EnhancedQuantumLeapPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialDynamicOptimizer import ( - EnhancedQuantumLevyDifferentialDynamicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialDynamicOptimizer import EnhancedQuantumLevyDifferentialDynamicOptimizer - lama_register["EnhancedQuantumLevyDifferentialDynamicOptimizer"] = ( - EnhancedQuantumLevyDifferentialDynamicOptimizer - ) - LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer" - ).set_name("LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer", register=True) + lama_register["EnhancedQuantumLevyDifferentialDynamicOptimizer"] = EnhancedQuantumLevyDifferentialDynamicOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer").set_name("LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer", register=True) except Exception as e: print("EnhancedQuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialOptimizer import ( - EnhancedQuantumLevyDifferentialOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialOptimizer import EnhancedQuantumLevyDifferentialOptimizer lama_register["EnhancedQuantumLevyDifferentialOptimizer"] = EnhancedQuantumLevyDifferentialOptimizer - LLAMAEnhancedQuantumLevyDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLevyDifferentialOptimizer" - ).set_name("LLAMAEnhancedQuantumLevyDifferentialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLevyDifferentialOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialOptimizer").set_name("LLAMAEnhancedQuantumLevyDifferentialOptimizer", register=True) except Exception as e: print("EnhancedQuantumLevyDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialSearch import ( - EnhancedQuantumLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialSearch import EnhancedQuantumLevyDifferentialSearch lama_register["EnhancedQuantumLevyDifferentialSearch"] = EnhancedQuantumLevyDifferentialSearch - LLAMAEnhancedQuantumLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLevyDifferentialSearch" - ).set_name("LLAMAEnhancedQuantumLevyDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialSearch").set_name("LLAMAEnhancedQuantumLevyDifferentialSearch", register=True) except Exception as e: print("EnhancedQuantumLevyDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLevyMemeticOptimizer import ( - EnhancedQuantumLevyMemeticOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumLevyMemeticOptimizer import EnhancedQuantumLevyMemeticOptimizer lama_register["EnhancedQuantumLevyMemeticOptimizer"] = EnhancedQuantumLevyMemeticOptimizer - LLAMAEnhancedQuantumLevyMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLevyMemeticOptimizer" - ).set_name("LLAMAEnhancedQuantumLevyMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyMemeticOptimizer").set_name("LLAMAEnhancedQuantumLevyMemeticOptimizer", register=True) except Exception as e: print("EnhancedQuantumLevyMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLevyParticleOptimization import ( - EnhancedQuantumLevyParticleOptimization, - ) + from nevergrad.optimization.lama.EnhancedQuantumLevyParticleOptimization import EnhancedQuantumLevyParticleOptimization lama_register["EnhancedQuantumLevyParticleOptimization"] = EnhancedQuantumLevyParticleOptimization - LLAMAEnhancedQuantumLevyParticleOptimization = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLevyParticleOptimization" - ).set_name("LLAMAEnhancedQuantumLevyParticleOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyParticleOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLevyParticleOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyParticleOptimization").set_name("LLAMAEnhancedQuantumLevyParticleOptimization", register=True) except Exception as e: print("EnhancedQuantumLevyParticleOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumLocalSearch import EnhancedQuantumLocalSearch lama_register["EnhancedQuantumLocalSearch"] = EnhancedQuantumLocalSearch - LLAMAEnhancedQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch").set_name( - "LLAMAEnhancedQuantumLocalSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch").set_name("LLAMAEnhancedQuantumLocalSearch", register=True) except Exception as e: print("EnhancedQuantumLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumLocalSearchImproved import ( - EnhancedQuantumLocalSearchImproved, - ) + from nevergrad.optimization.lama.EnhancedQuantumLocalSearchImproved import EnhancedQuantumLocalSearchImproved lama_register["EnhancedQuantumLocalSearchImproved"] = EnhancedQuantumLocalSearchImproved - LLAMAEnhancedQuantumLocalSearchImproved = NonObjectOptimizer( - method="LLAMAEnhancedQuantumLocalSearchImproved" - ).set_name("LLAMAEnhancedQuantumLocalSearchImproved", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumLocalSearchImproved = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearchImproved").set_name("LLAMAEnhancedQuantumLocalSearchImproved", register=True) except Exception as e: print("EnhancedQuantumLocalSearchImproved can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizer import EnhancedQuantumMemeticOptimizer lama_register["EnhancedQuantumMemeticOptimizer"] = EnhancedQuantumMemeticOptimizer - LLAMAEnhancedQuantumMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumMemeticOptimizer" - ).set_name("LLAMAEnhancedQuantumMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizer").set_name("LLAMAEnhancedQuantumMemeticOptimizer", register=True) except Exception as e: print("EnhancedQuantumMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizerV5 import ( - EnhancedQuantumMemeticOptimizerV5, - ) + from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizerV5 import EnhancedQuantumMemeticOptimizerV5 lama_register["EnhancedQuantumMemeticOptimizerV5"] = EnhancedQuantumMemeticOptimizerV5 - LLAMAEnhancedQuantumMemeticOptimizerV5 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumMemeticOptimizerV5" - ).set_name("LLAMAEnhancedQuantumMemeticOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumMemeticOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizerV5").set_name("LLAMAEnhancedQuantumMemeticOptimizerV5", register=True) except Exception as e: print("EnhancedQuantumMemeticOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumMultiPhaseAdaptiveDE_v10 import ( - EnhancedQuantumMultiPhaseAdaptiveDE_v10, - ) + from nevergrad.optimization.lama.EnhancedQuantumMultiPhaseAdaptiveDE_v10 import EnhancedQuantumMultiPhaseAdaptiveDE_v10 lama_register["EnhancedQuantumMultiPhaseAdaptiveDE_v10"] = EnhancedQuantumMultiPhaseAdaptiveDE_v10 - LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10" - ).set_name("LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10").set_name("LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10", register=True) except Exception as e: print("EnhancedQuantumMultiPhaseAdaptiveDE_v10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumMultiStrategyOptimization_v2 import ( - EnhancedQuantumMultiStrategyOptimization_v2, - ) + from nevergrad.optimization.lama.EnhancedQuantumMultiStrategyOptimization_v2 import EnhancedQuantumMultiStrategyOptimization_v2 lama_register["EnhancedQuantumMultiStrategyOptimization_v2"] = EnhancedQuantumMultiStrategyOptimization_v2 - LLAMAEnhancedQuantumMultiStrategyOptimization_v2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2" - ).set_name("LLAMAEnhancedQuantumMultiStrategyOptimization_v2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumMultiStrategyOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2").set_name("LLAMAEnhancedQuantumMultiStrategyOptimization_v2", register=True) except Exception as e: print("EnhancedQuantumMultiStrategyOptimization_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumPSO import EnhancedQuantumPSO lama_register["EnhancedQuantumPSO"] = EnhancedQuantumPSO - LLAMAEnhancedQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO").set_name( - "LLAMAEnhancedQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO").set_name("LLAMAEnhancedQuantumPSO", register=True) except Exception as e: print("EnhancedQuantumPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumReactiveCooperativeStrategy import ( - EnhancedQuantumReactiveCooperativeStrategy, - ) + from nevergrad.optimization.lama.EnhancedQuantumReactiveCooperativeStrategy import EnhancedQuantumReactiveCooperativeStrategy lama_register["EnhancedQuantumReactiveCooperativeStrategy"] = EnhancedQuantumReactiveCooperativeStrategy - LLAMAEnhancedQuantumReactiveCooperativeStrategy = NonObjectOptimizer( - method="LLAMAEnhancedQuantumReactiveCooperativeStrategy" - ).set_name("LLAMAEnhancedQuantumReactiveCooperativeStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumReactiveCooperativeStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumReactiveCooperativeStrategy").set_name("LLAMAEnhancedQuantumReactiveCooperativeStrategy", register=True) except Exception as e: print("EnhancedQuantumReactiveCooperativeStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumReinforcedNesterovAcceleratorV2 import ( - EnhancedQuantumReinforcedNesterovAcceleratorV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumReinforcedNesterovAcceleratorV2 import EnhancedQuantumReinforcedNesterovAcceleratorV2 - lama_register["EnhancedQuantumReinforcedNesterovAcceleratorV2"] = ( - EnhancedQuantumReinforcedNesterovAcceleratorV2 - ) - LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2" - ).set_name("LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2", register=True) + lama_register["EnhancedQuantumReinforcedNesterovAcceleratorV2"] = EnhancedQuantumReinforcedNesterovAcceleratorV2 + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2").set_name("LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2", register=True) except Exception as e: print("EnhancedQuantumReinforcedNesterovAcceleratorV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumResilientCrossoverStrategyV2 import ( - EnhancedQuantumResilientCrossoverStrategyV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumResilientCrossoverStrategyV2 import EnhancedQuantumResilientCrossoverStrategyV2 lama_register["EnhancedQuantumResilientCrossoverStrategyV2"] = EnhancedQuantumResilientCrossoverStrategyV2 - LLAMAEnhancedQuantumResilientCrossoverStrategyV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2" - ).set_name("LLAMAEnhancedQuantumResilientCrossoverStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumResilientCrossoverStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2").set_name("LLAMAEnhancedQuantumResilientCrossoverStrategyV2", register=True) except Exception as e: print("EnhancedQuantumResilientCrossoverStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealing import ( - EnhancedQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealing import EnhancedQuantumSimulatedAnnealing lama_register["EnhancedQuantumSimulatedAnnealing"] = EnhancedQuantumSimulatedAnnealing - LLAMAEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSimulatedAnnealing" - ).set_name("LLAMAEnhancedQuantumSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealing").set_name("LLAMAEnhancedQuantumSimulatedAnnealing", register=True) except Exception as e: print("EnhancedQuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingImproved import ( - EnhancedQuantumSimulatedAnnealingImproved, - ) + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingImproved import EnhancedQuantumSimulatedAnnealingImproved lama_register["EnhancedQuantumSimulatedAnnealingImproved"] = EnhancedQuantumSimulatedAnnealingImproved - LLAMAEnhancedQuantumSimulatedAnnealingImproved = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSimulatedAnnealingImproved" - ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingImproved", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSimulatedAnnealingImproved = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingImproved").set_name("LLAMAEnhancedQuantumSimulatedAnnealingImproved", register=True) except Exception as e: print("EnhancedQuantumSimulatedAnnealingImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingOptimized import ( - EnhancedQuantumSimulatedAnnealingOptimized, - ) + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingOptimized import EnhancedQuantumSimulatedAnnealingOptimized lama_register["EnhancedQuantumSimulatedAnnealingOptimized"] = EnhancedQuantumSimulatedAnnealingOptimized - LLAMAEnhancedQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized" - ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingOptimized", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSimulatedAnnealingOptimized = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized").set_name("LLAMAEnhancedQuantumSimulatedAnnealingOptimized", register=True) except Exception as e: print("EnhancedQuantumSimulatedAnnealingOptimized can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingV2 import ( - EnhancedQuantumSimulatedAnnealingV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingV2 import EnhancedQuantumSimulatedAnnealingV2 lama_register["EnhancedQuantumSimulatedAnnealingV2"] = EnhancedQuantumSimulatedAnnealingV2 - LLAMAEnhancedQuantumSimulatedAnnealingV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSimulatedAnnealingV2" - ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingV2").set_name("LLAMAEnhancedQuantumSimulatedAnnealingV2", register=True) except Exception as e: print("EnhancedQuantumSimulatedAnnealingV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumStateConvergenceOptimizer import ( - EnhancedQuantumStateConvergenceOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumStateConvergenceOptimizer import EnhancedQuantumStateConvergenceOptimizer lama_register["EnhancedQuantumStateConvergenceOptimizer"] = EnhancedQuantumStateConvergenceOptimizer - LLAMAEnhancedQuantumStateConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumStateConvergenceOptimizer" - ).set_name("LLAMAEnhancedQuantumStateConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumStateConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumStateConvergenceOptimizer").set_name("LLAMAEnhancedQuantumStateConvergenceOptimizer", register=True) except Exception as e: print("EnhancedQuantumStateConvergenceOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimization import EnhancedQuantumSwarmOptimization lama_register["EnhancedQuantumSwarmOptimization"] = EnhancedQuantumSwarmOptimization - LLAMAEnhancedQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimization").set_name("LLAMAEnhancedQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationRefined import ( - EnhancedQuantumSwarmOptimizationRefined, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationRefined import EnhancedQuantumSwarmOptimizationRefined lama_register["EnhancedQuantumSwarmOptimizationRefined"] = EnhancedQuantumSwarmOptimizationRefined - LLAMAEnhancedQuantumSwarmOptimizationRefined = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationRefined" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationRefined", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationRefined = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationRefined").set_name("LLAMAEnhancedQuantumSwarmOptimizationRefined", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV10 import ( - EnhancedQuantumSwarmOptimizationV10, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV10 import EnhancedQuantumSwarmOptimizationV10 lama_register["EnhancedQuantumSwarmOptimizationV10"] = EnhancedQuantumSwarmOptimizationV10 - LLAMAEnhancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV10" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV10", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedQuantumSwarmOptimizationV10", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV11 import ( - EnhancedQuantumSwarmOptimizationV11, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV11 import EnhancedQuantumSwarmOptimizationV11 lama_register["EnhancedQuantumSwarmOptimizationV11"] = EnhancedQuantumSwarmOptimizationV11 - LLAMAEnhancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV11" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV11", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedQuantumSwarmOptimizationV11", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV12 import ( - EnhancedQuantumSwarmOptimizationV12, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV12 import EnhancedQuantumSwarmOptimizationV12 lama_register["EnhancedQuantumSwarmOptimizationV12"] = EnhancedQuantumSwarmOptimizationV12 - LLAMAEnhancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV12" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV12", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedQuantumSwarmOptimizationV12", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV13 import ( - EnhancedQuantumSwarmOptimizationV13, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV13 import EnhancedQuantumSwarmOptimizationV13 lama_register["EnhancedQuantumSwarmOptimizationV13"] = EnhancedQuantumSwarmOptimizationV13 - LLAMAEnhancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV13" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV13", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedQuantumSwarmOptimizationV13", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV2 import ( - EnhancedQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV2 import EnhancedQuantumSwarmOptimizationV2 lama_register["EnhancedQuantumSwarmOptimizationV2"] = EnhancedQuantumSwarmOptimizationV2 - LLAMAEnhancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV2" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedQuantumSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV3 import ( - EnhancedQuantumSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV3 import EnhancedQuantumSwarmOptimizationV3 lama_register["EnhancedQuantumSwarmOptimizationV3"] = EnhancedQuantumSwarmOptimizationV3 - LLAMAEnhancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV3" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedQuantumSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV4 import ( - EnhancedQuantumSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV4 import EnhancedQuantumSwarmOptimizationV4 lama_register["EnhancedQuantumSwarmOptimizationV4"] = EnhancedQuantumSwarmOptimizationV4 - LLAMAEnhancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV4" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedQuantumSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV5 import ( - EnhancedQuantumSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV5 import EnhancedQuantumSwarmOptimizationV5 lama_register["EnhancedQuantumSwarmOptimizationV5"] = EnhancedQuantumSwarmOptimizationV5 - LLAMAEnhancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV5" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedQuantumSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV6 import ( - EnhancedQuantumSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV6 import EnhancedQuantumSwarmOptimizationV6 lama_register["EnhancedQuantumSwarmOptimizationV6"] = EnhancedQuantumSwarmOptimizationV6 - LLAMAEnhancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV6" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV6", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedQuantumSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV7 import ( - EnhancedQuantumSwarmOptimizationV7, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV7 import EnhancedQuantumSwarmOptimizationV7 lama_register["EnhancedQuantumSwarmOptimizationV7"] = EnhancedQuantumSwarmOptimizationV7 - LLAMAEnhancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV7" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV7", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedQuantumSwarmOptimizationV7", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV8 import ( - EnhancedQuantumSwarmOptimizationV8, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV8 import EnhancedQuantumSwarmOptimizationV8 lama_register["EnhancedQuantumSwarmOptimizationV8"] = EnhancedQuantumSwarmOptimizationV8 - LLAMAEnhancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV8" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedQuantumSwarmOptimizationV8", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV9 import ( - EnhancedQuantumSwarmOptimizationV9, - ) + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV9 import EnhancedQuantumSwarmOptimizationV9 lama_register["EnhancedQuantumSwarmOptimizationV9"] = EnhancedQuantumSwarmOptimizationV9 - LLAMAEnhancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizationV9" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedQuantumSwarmOptimizationV9", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizationV9 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizerV4 import EnhancedQuantumSwarmOptimizerV4 lama_register["EnhancedQuantumSwarmOptimizerV4"] = EnhancedQuantumSwarmOptimizerV4 - LLAMAEnhancedQuantumSwarmOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSwarmOptimizerV4" - ).set_name("LLAMAEnhancedQuantumSwarmOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizerV4").set_name("LLAMAEnhancedQuantumSwarmOptimizerV4", register=True) except Exception as e: print("EnhancedQuantumSwarmOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumSymbioticStrategyV5 import ( - EnhancedQuantumSymbioticStrategyV5, - ) + from nevergrad.optimization.lama.EnhancedQuantumSymbioticStrategyV5 import EnhancedQuantumSymbioticStrategyV5 lama_register["EnhancedQuantumSymbioticStrategyV5"] = EnhancedQuantumSymbioticStrategyV5 - LLAMAEnhancedQuantumSymbioticStrategyV5 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSymbioticStrategyV5" - ).set_name("LLAMAEnhancedQuantumSymbioticStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSymbioticStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSymbioticStrategyV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSymbioticStrategyV5").set_name("LLAMAEnhancedQuantumSymbioticStrategyV5", register=True) except Exception as e: print("EnhancedQuantumSymbioticStrategyV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedQuantumSynergyStrategyV2 import EnhancedQuantumSynergyStrategyV2 lama_register["EnhancedQuantumSynergyStrategyV2"] = EnhancedQuantumSynergyStrategyV2 - LLAMAEnhancedQuantumSynergyStrategyV2 = NonObjectOptimizer( - method="LLAMAEnhancedQuantumSynergyStrategyV2" - ).set_name("LLAMAEnhancedQuantumSynergyStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSynergyStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumSynergyStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSynergyStrategyV2").set_name("LLAMAEnhancedQuantumSynergyStrategyV2", register=True) except Exception as e: print("EnhancedQuantumSynergyStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedQuantumTunnelingOptimizer import ( - EnhancedQuantumTunnelingOptimizer, - ) + from nevergrad.optimization.lama.EnhancedQuantumTunnelingOptimizer import EnhancedQuantumTunnelingOptimizer lama_register["EnhancedQuantumTunnelingOptimizer"] = EnhancedQuantumTunnelingOptimizer - LLAMAEnhancedQuantumTunnelingOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedQuantumTunnelingOptimizer" - ).set_name("LLAMAEnhancedQuantumTunnelingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumTunnelingOptimizer").set_name("LLAMAEnhancedQuantumTunnelingOptimizer", register=True) except Exception as e: print("EnhancedQuantumTunnelingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRAMEDS import EnhancedRAMEDS lama_register["EnhancedRAMEDS"] = EnhancedRAMEDS - LLAMAEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS").set_name( - "LLAMAEnhancedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS").set_name("LLAMAEnhancedRAMEDS", register=True) except Exception as e: print("EnhancedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRAMEDSPro import EnhancedRAMEDSPro lama_register["EnhancedRAMEDSPro"] = EnhancedRAMEDSPro - LLAMAEnhancedRAMEDSPro = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro").set_name( - "LLAMAEnhancedRAMEDSPro", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRAMEDSPro = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro").set_name("LLAMAEnhancedRAMEDSPro", register=True) except Exception as e: print("EnhancedRAMEDSPro can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRAMEDSProV2 import EnhancedRAMEDSProV2 lama_register["EnhancedRAMEDSProV2"] = EnhancedRAMEDSProV2 - LLAMAEnhancedRAMEDSProV2 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2").set_name( - "LLAMAEnhancedRAMEDSProV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRAMEDSProV2 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2").set_name("LLAMAEnhancedRAMEDSProV2", register=True) except Exception as e: print("EnhancedRAMEDSProV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRAMEDSv3 import EnhancedRAMEDSv3 lama_register["EnhancedRAMEDSv3"] = EnhancedRAMEDSv3 - LLAMAEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3").set_name( - "LLAMAEnhancedRAMEDSv3", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3").set_name("LLAMAEnhancedRAMEDSv3", register=True) except Exception as e: print("EnhancedRAMEDSv3 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRAMEDSv4 import EnhancedRAMEDSv4 lama_register["EnhancedRAMEDSv4"] = EnhancedRAMEDSv4 - LLAMAEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4").set_name( - "LLAMAEnhancedRAMEDSv4", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4").set_name("LLAMAEnhancedRAMEDSv4", register=True) except Exception as e: print("EnhancedRAMEDSv4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolution import ( - EnhancedRefinedAdaptiveCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolution import EnhancedRefinedAdaptiveCovarianceMatrixEvolution - lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolution"] = ( - EnhancedRefinedAdaptiveCovarianceMatrixEvolution - ) - LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution" - ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution", register=True) + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolution"] = EnhancedRefinedAdaptiveCovarianceMatrixEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution").set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution", register=True) except Exception as e: print("EnhancedRefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus import ( - EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus import EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus - lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus"] = ( - EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus - ) - LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus" - ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus", register=True) + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus"] = EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus").set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus", register=True) except Exception as e: print("EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( - EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost import EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost - lama_register["EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( - EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost - ) - LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost" - ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) + lama_register["EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSearch import ( - EnhancedRefinedAdaptiveDifferentialSearch, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSearch import EnhancedRefinedAdaptiveDifferentialSearch lama_register["EnhancedRefinedAdaptiveDifferentialSearch"] = EnhancedRefinedAdaptiveDifferentialSearch - LLAMAEnhancedRefinedAdaptiveDifferentialSearch = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch" - ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSearch", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSpiralSearch import ( - EnhancedRefinedAdaptiveDifferentialSpiralSearch, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSpiralSearch import EnhancedRefinedAdaptiveDifferentialSpiralSearch - lama_register["EnhancedRefinedAdaptiveDifferentialSpiralSearch"] = ( - EnhancedRefinedAdaptiveDifferentialSpiralSearch - ) - LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch" - ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch", register=True) + lama_register["EnhancedRefinedAdaptiveDifferentialSpiralSearch"] = EnhancedRefinedAdaptiveDifferentialSpiralSearch + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDE import EnhancedRefinedAdaptiveDynamicDE lama_register["EnhancedRefinedAdaptiveDynamicDE"] = EnhancedRefinedAdaptiveDynamicDE - LLAMAEnhancedRefinedAdaptiveDynamicDE = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDynamicDE" - ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDE").set_name("LLAMAEnhancedRefinedAdaptiveDynamicDE", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 import ( - EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 import EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 - lama_register["EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15"] = ( - EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 - ) - LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15" - ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15", register=True) + lama_register["EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15"] = EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15").set_name("LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicExplorationOptimization import ( - EnhancedRefinedAdaptiveDynamicExplorationOptimization, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicExplorationOptimization import EnhancedRefinedAdaptiveDynamicExplorationOptimization - lama_register["EnhancedRefinedAdaptiveDynamicExplorationOptimization"] = ( - EnhancedRefinedAdaptiveDynamicExplorationOptimization - ) - LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization" - ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization", register=True) + lama_register["EnhancedRefinedAdaptiveDynamicExplorationOptimization"] = EnhancedRefinedAdaptiveDynamicExplorationOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( - EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution import EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveFocusedEvolutionStrategy import ( - EnhancedRefinedAdaptiveFocusedEvolutionStrategy, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveFocusedEvolutionStrategy import EnhancedRefinedAdaptiveFocusedEvolutionStrategy - lama_register["EnhancedRefinedAdaptiveFocusedEvolutionStrategy"] = ( - EnhancedRefinedAdaptiveFocusedEvolutionStrategy - ) - LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy" - ).set_name("LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy", register=True) + lama_register["EnhancedRefinedAdaptiveFocusedEvolutionStrategy"] = EnhancedRefinedAdaptiveFocusedEvolutionStrategy + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy").set_name("LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy", register=True) except Exception as e: print("EnhancedRefinedAdaptiveFocusedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveHarmonySearch import ( - EnhancedRefinedAdaptiveHarmonySearch, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveHarmonySearch import EnhancedRefinedAdaptiveHarmonySearch lama_register["EnhancedRefinedAdaptiveHarmonySearch"] = EnhancedRefinedAdaptiveHarmonySearch - LLAMAEnhancedRefinedAdaptiveHarmonySearch = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveHarmonySearch" - ).set_name("LLAMAEnhancedRefinedAdaptiveHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveHarmonySearch").set_name("LLAMAEnhancedRefinedAdaptiveHarmonySearch", register=True) except Exception as e: print("EnhancedRefinedAdaptiveHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMemeticDiverseOptimizer import ( - EnhancedRefinedAdaptiveMemeticDiverseOptimizer, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMemeticDiverseOptimizer import EnhancedRefinedAdaptiveMemeticDiverseOptimizer - lama_register["EnhancedRefinedAdaptiveMemeticDiverseOptimizer"] = ( - EnhancedRefinedAdaptiveMemeticDiverseOptimizer - ) - LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer" - ).set_name("LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer", register=True) + lama_register["EnhancedRefinedAdaptiveMemeticDiverseOptimizer"] = EnhancedRefinedAdaptiveMemeticDiverseOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer").set_name("LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer", register=True) except Exception as e: print("EnhancedRefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v4 import ( - EnhancedRefinedAdaptiveMetaNetPSO_v4, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v4 import EnhancedRefinedAdaptiveMetaNetPSO_v4 lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v4"] = EnhancedRefinedAdaptiveMetaNetPSO_v4 - LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4" - ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4").set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4", register=True) except Exception as e: print("EnhancedRefinedAdaptiveMetaNetPSO_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v5 import ( - EnhancedRefinedAdaptiveMetaNetPSO_v5, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v5 import EnhancedRefinedAdaptiveMetaNetPSO_v5 lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v5"] = EnhancedRefinedAdaptiveMetaNetPSO_v5 - LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5" - ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5").set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5", register=True) except Exception as e: print("EnhancedRefinedAdaptiveMetaNetPSO_v5 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v49 import EnhancedRefinedAdaptiveQGSA_v49 lama_register["EnhancedRefinedAdaptiveQGSA_v49"] = EnhancedRefinedAdaptiveQGSA_v49 - LLAMAEnhancedRefinedAdaptiveQGSA_v49 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v49" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v49", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v49 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v49").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v49", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v49 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v52 import EnhancedRefinedAdaptiveQGSA_v52 lama_register["EnhancedRefinedAdaptiveQGSA_v52"] = EnhancedRefinedAdaptiveQGSA_v52 - LLAMAEnhancedRefinedAdaptiveQGSA_v52 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v52" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v52", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v52 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v52").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v52", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v52 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v53 import EnhancedRefinedAdaptiveQGSA_v53 lama_register["EnhancedRefinedAdaptiveQGSA_v53"] = EnhancedRefinedAdaptiveQGSA_v53 - LLAMAEnhancedRefinedAdaptiveQGSA_v53 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v53" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v53", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v53 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v53").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v53", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v53 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v54 import EnhancedRefinedAdaptiveQGSA_v54 lama_register["EnhancedRefinedAdaptiveQGSA_v54"] = EnhancedRefinedAdaptiveQGSA_v54 - LLAMAEnhancedRefinedAdaptiveQGSA_v54 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v54" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v54", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v54 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v54").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v54", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v54 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v55 import EnhancedRefinedAdaptiveQGSA_v55 lama_register["EnhancedRefinedAdaptiveQGSA_v55"] = EnhancedRefinedAdaptiveQGSA_v55 - LLAMAEnhancedRefinedAdaptiveQGSA_v55 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v55" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v55", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v55 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v55").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v55", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v55 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v56 import EnhancedRefinedAdaptiveQGSA_v56 lama_register["EnhancedRefinedAdaptiveQGSA_v56"] = EnhancedRefinedAdaptiveQGSA_v56 - LLAMAEnhancedRefinedAdaptiveQGSA_v56 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v56" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v56", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v56 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v56").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v56", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v56 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v57 import EnhancedRefinedAdaptiveQGSA_v57 lama_register["EnhancedRefinedAdaptiveQGSA_v57"] = EnhancedRefinedAdaptiveQGSA_v57 - LLAMAEnhancedRefinedAdaptiveQGSA_v57 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v57" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v57", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v57 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v57").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v57", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v57 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v58 import EnhancedRefinedAdaptiveQGSA_v58 lama_register["EnhancedRefinedAdaptiveQGSA_v58"] = EnhancedRefinedAdaptiveQGSA_v58 - LLAMAEnhancedRefinedAdaptiveQGSA_v58 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v58" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v58", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v58 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v58").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v58", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v58 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v59 import EnhancedRefinedAdaptiveQGSA_v59 lama_register["EnhancedRefinedAdaptiveQGSA_v59"] = EnhancedRefinedAdaptiveQGSA_v59 - LLAMAEnhancedRefinedAdaptiveQGSA_v59 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v59" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v59", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v59 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v59").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v59", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v59 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v60 import EnhancedRefinedAdaptiveQGSA_v60 lama_register["EnhancedRefinedAdaptiveQGSA_v60"] = EnhancedRefinedAdaptiveQGSA_v60 - LLAMAEnhancedRefinedAdaptiveQGSA_v60 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveQGSA_v60" - ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v60", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v60 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v60").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v60", register=True) except Exception as e: print("EnhancedRefinedAdaptiveQGSA_v60 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSpiralGradientSearch import ( - EnhancedRefinedAdaptiveSpiralGradientSearch, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSpiralGradientSearch import EnhancedRefinedAdaptiveSpiralGradientSearch lama_register["EnhancedRefinedAdaptiveSpiralGradientSearch"] = EnhancedRefinedAdaptiveSpiralGradientSearch - LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch" - ).set_name("LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch").set_name("LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch", register=True) except Exception as e: print("EnhancedRefinedAdaptiveSpiralGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 import ( - EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3, - ) + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 import EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 - lama_register["EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3"] = ( - EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 - ) - LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3" - ).set_name("LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3", register=True) + lama_register["EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3"] = EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3").set_name("LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3", register=True) except Exception as e: print("EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedDualStrategyAdaptiveDE import ( - EnhancedRefinedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.EnhancedRefinedDualStrategyAdaptiveDE import EnhancedRefinedDualStrategyAdaptiveDE lama_register["EnhancedRefinedDualStrategyAdaptiveDE"] = EnhancedRefinedDualStrategyAdaptiveDE - LLAMAEnhancedRefinedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE" - ).set_name("LLAMAEnhancedRefinedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE").set_name("LLAMAEnhancedRefinedDualStrategyAdaptiveDE", register=True) except Exception as e: print("EnhancedRefinedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedDynamicFireworkAlgorithm import ( - EnhancedRefinedDynamicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedRefinedDynamicFireworkAlgorithm import EnhancedRefinedDynamicFireworkAlgorithm lama_register["EnhancedRefinedDynamicFireworkAlgorithm"] = EnhancedRefinedDynamicFireworkAlgorithm - LLAMAEnhancedRefinedDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm" - ).set_name("LLAMAEnhancedRefinedDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm").set_name("LLAMAEnhancedRefinedDynamicFireworkAlgorithm", register=True) except Exception as e: print("EnhancedRefinedDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( - EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing import EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing - lama_register["EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( - EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( - EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer import EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer - lama_register["EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( - EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer - ) - LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) + lama_register["EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedEliteDynamicMemoryHybridOptimizer import ( - EnhancedRefinedEliteDynamicMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.EnhancedRefinedEliteDynamicMemoryHybridOptimizer import EnhancedRefinedEliteDynamicMemoryHybridOptimizer - lama_register["EnhancedRefinedEliteDynamicMemoryHybridOptimizer"] = ( - EnhancedRefinedEliteDynamicMemoryHybridOptimizer - ) - LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer" - ).set_name("LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer", register=True) + lama_register["EnhancedRefinedEliteDynamicMemoryHybridOptimizer"] = EnhancedRefinedEliteDynamicMemoryHybridOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer").set_name("LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer", register=True) except Exception as e: print("EnhancedRefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 import ( - EnhancedRefinedEvolutionaryGradientHybridOptimizerV4, - ) + from nevergrad.optimization.lama.EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 import EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 - lama_register["EnhancedRefinedEvolutionaryGradientHybridOptimizerV4"] = ( - EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 - ) - LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4" - ).set_name("LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4", register=True) + lama_register["EnhancedRefinedEvolutionaryGradientHybridOptimizerV4"] = EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4").set_name("LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4", register=True) except Exception as e: print("EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGradientBoostedMemoryAnnealing import ( - EnhancedRefinedGradientBoostedMemoryAnnealing, - ) + from nevergrad.optimization.lama.EnhancedRefinedGradientBoostedMemoryAnnealing import EnhancedRefinedGradientBoostedMemoryAnnealing - lama_register["EnhancedRefinedGradientBoostedMemoryAnnealing"] = ( - EnhancedRefinedGradientBoostedMemoryAnnealing - ) - LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing" - ).set_name("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing", register=True) + lama_register["EnhancedRefinedGradientBoostedMemoryAnnealing"] = EnhancedRefinedGradientBoostedMemoryAnnealing + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing").set_name("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("EnhancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v88 import ( - EnhancedRefinedGuidedMassQGSA_v88, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v88 import EnhancedRefinedGuidedMassQGSA_v88 lama_register["EnhancedRefinedGuidedMassQGSA_v88"] = EnhancedRefinedGuidedMassQGSA_v88 - LLAMAEnhancedRefinedGuidedMassQGSA_v88 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v88" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v88", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v88")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v88 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v88").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v88", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v88 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v89 import ( - EnhancedRefinedGuidedMassQGSA_v89, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v89 import EnhancedRefinedGuidedMassQGSA_v89 lama_register["EnhancedRefinedGuidedMassQGSA_v89"] = EnhancedRefinedGuidedMassQGSA_v89 - LLAMAEnhancedRefinedGuidedMassQGSA_v89 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v89" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v89", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v89")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v89 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v89").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v89", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v89 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v90 import ( - EnhancedRefinedGuidedMassQGSA_v90, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v90 import EnhancedRefinedGuidedMassQGSA_v90 lama_register["EnhancedRefinedGuidedMassQGSA_v90"] = EnhancedRefinedGuidedMassQGSA_v90 - LLAMAEnhancedRefinedGuidedMassQGSA_v90 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v90" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v90", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v90")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v90 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v90").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v90", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v90 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v91 import ( - EnhancedRefinedGuidedMassQGSA_v91, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v91 import EnhancedRefinedGuidedMassQGSA_v91 lama_register["EnhancedRefinedGuidedMassQGSA_v91"] = EnhancedRefinedGuidedMassQGSA_v91 - LLAMAEnhancedRefinedGuidedMassQGSA_v91 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v91" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v91", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v91")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v91 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v91").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v91", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v91 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v92 import ( - EnhancedRefinedGuidedMassQGSA_v92, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v92 import EnhancedRefinedGuidedMassQGSA_v92 lama_register["EnhancedRefinedGuidedMassQGSA_v92"] = EnhancedRefinedGuidedMassQGSA_v92 - LLAMAEnhancedRefinedGuidedMassQGSA_v92 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v92" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v92", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v92")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v92 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v92").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v92", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v92 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v93 import ( - EnhancedRefinedGuidedMassQGSA_v93, - ) + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v93 import EnhancedRefinedGuidedMassQGSA_v93 lama_register["EnhancedRefinedGuidedMassQGSA_v93"] = EnhancedRefinedGuidedMassQGSA_v93 - LLAMAEnhancedRefinedGuidedMassQGSA_v93 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedGuidedMassQGSA_v93" - ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v93", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v93")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v93 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v93").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v93", register=True) except Exception as e: print("EnhancedRefinedGuidedMassQGSA_v93 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution import ( - EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution import EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution - lama_register["EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution"] = ( - EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution - ) - LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution", register=True) + lama_register["EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution"] = EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedHybridDEPSOWithDynamicAdaptation import ( - EnhancedRefinedHybridDEPSOWithDynamicAdaptation, - ) + from nevergrad.optimization.lama.EnhancedRefinedHybridDEPSOWithDynamicAdaptation import EnhancedRefinedHybridDEPSOWithDynamicAdaptation - lama_register["EnhancedRefinedHybridDEPSOWithDynamicAdaptation"] = ( - EnhancedRefinedHybridDEPSOWithDynamicAdaptation - ) - LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation" - ).set_name("LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", register=True) + lama_register["EnhancedRefinedHybridDEPSOWithDynamicAdaptation"] = EnhancedRefinedHybridDEPSOWithDynamicAdaptation + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation").set_name("LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", register=True) except Exception as e: print("EnhancedRefinedHybridDEPSOWithDynamicAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( - EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution import EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution - lama_register["EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( - EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution - ) - LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution" - ).set_name("LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) + lama_register["EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedHybridOptimizer import EnhancedRefinedHybridOptimizer lama_register["EnhancedRefinedHybridOptimizer"] = EnhancedRefinedHybridOptimizer - LLAMAEnhancedRefinedHybridOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHybridOptimizer" - ).set_name("LLAMAEnhancedRefinedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridOptimizer").set_name("LLAMAEnhancedRefinedHybridOptimizer", register=True) except Exception as e: print("EnhancedRefinedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 import ( - EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3, - ) + from nevergrad.optimization.lama.EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 import EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 - lama_register["EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3"] = ( - EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 - ) - LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3" - ).set_name("LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3", register=True) + lama_register["EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3"] = EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3").set_name("LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3", register=True) except Exception as e: print("EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer import ( - EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer import EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer - lama_register["EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( - EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer - ) - LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer" - ).set_name("LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) + lama_register["EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer"] = EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) except Exception as e: print("EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSO import EnhancedRefinedMetaNetAQAPSO lama_register["EnhancedRefinedMetaNetAQAPSO"] = EnhancedRefinedMetaNetAQAPSO - LLAMAEnhancedRefinedMetaNetAQAPSO = NonObjectOptimizer( - method="LLAMAEnhancedRefinedMetaNetAQAPSO" - ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSO").set_name("LLAMAEnhancedRefinedMetaNetAQAPSO", register=True) except Exception as e: print("EnhancedRefinedMetaNetAQAPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv8 import EnhancedRefinedMetaNetAQAPSOv8 lama_register["EnhancedRefinedMetaNetAQAPSOv8"] = EnhancedRefinedMetaNetAQAPSOv8 - LLAMAEnhancedRefinedMetaNetAQAPSOv8 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedMetaNetAQAPSOv8" - ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv8", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedMetaNetAQAPSOv8 = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv8").set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv8", register=True) except Exception as e: print("EnhancedRefinedMetaNetAQAPSOv8 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv9 import EnhancedRefinedMetaNetAQAPSOv9 lama_register["EnhancedRefinedMetaNetAQAPSOv9"] = EnhancedRefinedMetaNetAQAPSOv9 - LLAMAEnhancedRefinedMetaNetAQAPSOv9 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedMetaNetAQAPSOv9" - ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv9", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedMetaNetAQAPSOv9 = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv9").set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv9", register=True) except Exception as e: print("EnhancedRefinedMetaNetAQAPSOv9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 import ( - EnhancedRefinedOptimalDynamicPrecisionOptimizerV16, - ) + from nevergrad.optimization.lama.EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 import EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 - lama_register["EnhancedRefinedOptimalDynamicPrecisionOptimizerV16"] = ( - EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 - ) - LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16" - ).set_name("LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16", register=True) + lama_register["EnhancedRefinedOptimalDynamicPrecisionOptimizerV16"] = EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16 = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16").set_name("LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16", register=True) except Exception as e: print("EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization import ( - EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization import EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization - lama_register["EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( - EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization - ) - LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) + lama_register["EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization"] = EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRefinedSpatialOptimizer import EnhancedRefinedSpatialOptimizer lama_register["EnhancedRefinedSpatialOptimizer"] = EnhancedRefinedSpatialOptimizer - LLAMAEnhancedRefinedSpatialOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRefinedSpatialOptimizer" - ).set_name("LLAMAEnhancedRefinedSpatialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedSpatialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedSpatialOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedSpatialOptimizer").set_name("LLAMAEnhancedRefinedSpatialOptimizer", register=True) except Exception as e: print("EnhancedRefinedSpatialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 import ( - EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 import EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 - lama_register["EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35"] = ( - EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 - ) - LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35" - ).set_name("LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35", register=True) + lama_register["EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35"] = EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35").set_name("LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35", register=True) except Exception as e: print("EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v72 import ( - EnhancedRefinedUltimateGuidedMassQGSA_v72, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v72 import EnhancedRefinedUltimateGuidedMassQGSA_v72 lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v72"] = EnhancedRefinedUltimateGuidedMassQGSA_v72 - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72" - ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72", register=True) except Exception as e: print("EnhancedRefinedUltimateGuidedMassQGSA_v72 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v73 import ( - EnhancedRefinedUltimateGuidedMassQGSA_v73, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v73 import EnhancedRefinedUltimateGuidedMassQGSA_v73 lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v73"] = EnhancedRefinedUltimateGuidedMassQGSA_v73 - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73" - ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73", register=True) except Exception as e: print("EnhancedRefinedUltimateGuidedMassQGSA_v73 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v74 import ( - EnhancedRefinedUltimateGuidedMassQGSA_v74, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v74 import EnhancedRefinedUltimateGuidedMassQGSA_v74 lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v74"] = EnhancedRefinedUltimateGuidedMassQGSA_v74 - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74" - ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74", register=True) except Exception as e: print("EnhancedRefinedUltimateGuidedMassQGSA_v74 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v76 import ( - EnhancedRefinedUltimateGuidedMassQGSA_v76, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v76 import EnhancedRefinedUltimateGuidedMassQGSA_v76 lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v76"] = EnhancedRefinedUltimateGuidedMassQGSA_v76 - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76" - ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76", register=True) except Exception as e: print("EnhancedRefinedUltimateGuidedMassQGSA_v76 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 import ( - EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43, - ) + from nevergrad.optimization.lama.EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 import EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 - lama_register["EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43"] = ( - EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 - ) - LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( - method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43" - ).set_name("LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43", register=True) + lama_register["EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43"] = EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 + res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43").set_name("LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43", register=True) except Exception as e: print("EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedResilientAdaptivePSO import EnhancedResilientAdaptivePSO lama_register["EnhancedResilientAdaptivePSO"] = EnhancedResilientAdaptivePSO - LLAMAEnhancedResilientAdaptivePSO = NonObjectOptimizer( - method="LLAMAEnhancedResilientAdaptivePSO" - ).set_name("LLAMAEnhancedResilientAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedResilientAdaptivePSO").set_name("LLAMAEnhancedResilientAdaptivePSO", register=True) except Exception as e: print("EnhancedResilientAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch import ( - EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch, - ) + from nevergrad.optimization.lama.EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch import EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch - lama_register["EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch"] = ( - EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch - ) - LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( - method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch" - ).set_name("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch", register=True) + lama_register["EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch"] = EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch + res = NonObjectOptimizer(method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch").set_name("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch", register=True) except Exception as e: print("EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedRotationalClimbOptimizer import EnhancedRotationalClimbOptimizer lama_register["EnhancedRotationalClimbOptimizer"] = EnhancedRotationalClimbOptimizer - LLAMAEnhancedRotationalClimbOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedRotationalClimbOptimizer" - ).set_name("LLAMAEnhancedRotationalClimbOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRotationalClimbOptimizer").set_name("LLAMAEnhancedRotationalClimbOptimizer", register=True) except Exception as e: print("EnhancedRotationalClimbOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSelectiveEvolutionaryOptimizerV21 import ( - EnhancedSelectiveEvolutionaryOptimizerV21, - ) + from nevergrad.optimization.lama.EnhancedSelectiveEvolutionaryOptimizerV21 import EnhancedSelectiveEvolutionaryOptimizerV21 lama_register["EnhancedSelectiveEvolutionaryOptimizerV21"] = EnhancedSelectiveEvolutionaryOptimizerV21 - LLAMAEnhancedSelectiveEvolutionaryOptimizerV21 = NonObjectOptimizer( - method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21" - ).set_name("LLAMAEnhancedSelectiveEvolutionaryOptimizerV21", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSelectiveEvolutionaryOptimizerV21 = NonObjectOptimizer(method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21").set_name("LLAMAEnhancedSelectiveEvolutionaryOptimizerV21", register=True) except Exception as e: print("EnhancedSelectiveEvolutionaryOptimizerV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution import ( - EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution import EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution - lama_register["EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( - EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution - ) - LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) + lama_register["EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution"] = EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE import EnhancedSelfAdaptiveDE lama_register["EnhancedSelfAdaptiveDE"] = EnhancedSelfAdaptiveDE - LLAMAEnhancedSelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE").set_name( - "LLAMAEnhancedSelfAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE").set_name("LLAMAEnhancedSelfAdaptiveDE", register=True) except Exception as e: print("EnhancedSelfAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE2 import EnhancedSelfAdaptiveDE2 lama_register["EnhancedSelfAdaptiveDE2"] = EnhancedSelfAdaptiveDE2 - LLAMAEnhancedSelfAdaptiveDE2 = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2").set_name( - "LLAMAEnhancedSelfAdaptiveDE2", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSelfAdaptiveDE2 = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2").set_name("LLAMAEnhancedSelfAdaptiveDE2", register=True) except Exception as e: print("EnhancedSelfAdaptiveDE2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSelfAdaptiveMemeticAlgorithm import ( - EnhancedSelfAdaptiveMemeticAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedSelfAdaptiveMemeticAlgorithm import EnhancedSelfAdaptiveMemeticAlgorithm lama_register["EnhancedSelfAdaptiveMemeticAlgorithm"] = EnhancedSelfAdaptiveMemeticAlgorithm - LLAMAEnhancedSelfAdaptiveMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm" - ).set_name("LLAMAEnhancedSelfAdaptiveMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSelfAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm").set_name("LLAMAEnhancedSelfAdaptiveMemeticAlgorithm", register=True) except Exception as e: print("EnhancedSelfAdaptiveMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSequentialQuadraticAdaptiveEvolutionStrategy import ( - EnhancedSequentialQuadraticAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.EnhancedSequentialQuadraticAdaptiveEvolutionStrategy import EnhancedSequentialQuadraticAdaptiveEvolutionStrategy - lama_register["EnhancedSequentialQuadraticAdaptiveEvolutionStrategy"] = ( - EnhancedSequentialQuadraticAdaptiveEvolutionStrategy - ) - LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy" - ).set_name("LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy", register=True) + lama_register["EnhancedSequentialQuadraticAdaptiveEvolutionStrategy"] = EnhancedSequentialQuadraticAdaptiveEvolutionStrategy + res = NonObjectOptimizer(method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy").set_name("LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy", register=True) except Exception as e: print("EnhancedSequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSpatialAdaptiveEvolver import EnhancedSpatialAdaptiveEvolver lama_register["EnhancedSpatialAdaptiveEvolver"] = EnhancedSpatialAdaptiveEvolver - LLAMAEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAEnhancedSpatialAdaptiveEvolver" - ).set_name("LLAMAEnhancedSpatialAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveEvolver").set_name("LLAMAEnhancedSpatialAdaptiveEvolver", register=True) except Exception as e: print("EnhancedSpatialAdaptiveEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSpatialAdaptiveOptimizer import EnhancedSpatialAdaptiveOptimizer lama_register["EnhancedSpatialAdaptiveOptimizer"] = EnhancedSpatialAdaptiveOptimizer - LLAMAEnhancedSpatialAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedSpatialAdaptiveOptimizer" - ).set_name("LLAMAEnhancedSpatialAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSpatialAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveOptimizer").set_name("LLAMAEnhancedSpatialAdaptiveOptimizer", register=True) except Exception as e: print("EnhancedSpatialAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSpectralHybridOptimization import ( - EnhancedSpectralHybridOptimization, - ) + from nevergrad.optimization.lama.EnhancedSpectralHybridOptimization import EnhancedSpectralHybridOptimization lama_register["EnhancedSpectralHybridOptimization"] = EnhancedSpectralHybridOptimization - LLAMAEnhancedSpectralHybridOptimization = NonObjectOptimizer( - method="LLAMAEnhancedSpectralHybridOptimization" - ).set_name("LLAMAEnhancedSpectralHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSpectralHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSpectralHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedSpectralHybridOptimization").set_name("LLAMAEnhancedSpectralHybridOptimization", register=True) except Exception as e: print("EnhancedSpectralHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover import ( - EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover, - ) + from nevergrad.optimization.lama.EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover import EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover - lama_register["EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover"] = ( - EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover - ) - LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover = NonObjectOptimizer( - method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover" - ).set_name( - "LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover", register=True - ) + lama_register["EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover"] = EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover + res = NonObjectOptimizer(method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover = NonObjectOptimizer(method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover").set_name("LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover", register=True) except Exception as e: - print( - "EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover can not be imported: ", e - ) - + print("EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover can not be imported: ", e) try: - from nevergrad.optimization.lama.EnhancedStochasticGradientDifferentialEvolution import ( - EnhancedStochasticGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.EnhancedStochasticGradientDifferentialEvolution import EnhancedStochasticGradientDifferentialEvolution - lama_register["EnhancedStochasticGradientDifferentialEvolution"] = ( - EnhancedStochasticGradientDifferentialEvolution - ) - LLAMAEnhancedStochasticGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAEnhancedStochasticGradientDifferentialEvolution" - ).set_name("LLAMAEnhancedStochasticGradientDifferentialEvolution", register=True) + lama_register["EnhancedStochasticGradientDifferentialEvolution"] = EnhancedStochasticGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAEnhancedStochasticGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStochasticGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedStochasticGradientDifferentialEvolution").set_name("LLAMAEnhancedStochasticGradientDifferentialEvolution", register=True) except Exception as e: print("EnhancedStochasticGradientDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedStochasticMetaHeuristicOptimizer import ( - EnhancedStochasticMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.EnhancedStochasticMetaHeuristicOptimizer import EnhancedStochasticMetaHeuristicOptimizer lama_register["EnhancedStochasticMetaHeuristicOptimizer"] = EnhancedStochasticMetaHeuristicOptimizer - LLAMAEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedStochasticMetaHeuristicOptimizer" - ).set_name("LLAMAEnhancedStochasticMetaHeuristicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedStochasticMetaHeuristicOptimizer").set_name("LLAMAEnhancedStochasticMetaHeuristicOptimizer", register=True) except Exception as e: print("EnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedStrategicAdaptiveOptimizer import ( - EnhancedStrategicAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.EnhancedStrategicAdaptiveOptimizer import EnhancedStrategicAdaptiveOptimizer lama_register["EnhancedStrategicAdaptiveOptimizer"] = EnhancedStrategicAdaptiveOptimizer - LLAMAEnhancedStrategicAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAEnhancedStrategicAdaptiveOptimizer" - ).set_name("LLAMAEnhancedStrategicAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStrategicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedStrategicAdaptiveOptimizer").set_name("LLAMAEnhancedStrategicAdaptiveOptimizer", register=True) except Exception as e: print("EnhancedStrategicAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedStrategicMemoryAdaptiveStrategyV44 import ( - EnhancedStrategicMemoryAdaptiveStrategyV44, - ) + from nevergrad.optimization.lama.EnhancedStrategicMemoryAdaptiveStrategyV44 import EnhancedStrategicMemoryAdaptiveStrategyV44 lama_register["EnhancedStrategicMemoryAdaptiveStrategyV44"] = EnhancedStrategicMemoryAdaptiveStrategyV44 - LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44 = NonObjectOptimizer( - method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44" - ).set_name("LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44 = NonObjectOptimizer(method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44").set_name("LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44", register=True) except Exception as e: print("EnhancedStrategicMemoryAdaptiveStrategyV44 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedStrategicPSO import EnhancedStrategicPSO lama_register["EnhancedStrategicPSO"] = EnhancedStrategicPSO - LLAMAEnhancedStrategicPSO = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO").set_name( - "LLAMAEnhancedStrategicPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStrategicPSO = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO").set_name("LLAMAEnhancedStrategicPSO", register=True) except Exception as e: print("EnhancedStrategicPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedStrategyDE import EnhancedStrategyDE lama_register["EnhancedStrategyDE"] = EnhancedStrategyDE - LLAMAEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE").set_name( - "LLAMAEnhancedStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE").set_name("LLAMAEnhancedStrategyDE", register=True) except Exception as e: print("EnhancedStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimization import ( - EnhancedSuperDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimization import EnhancedSuperDynamicQuantumSwarmOptimization - lama_register["EnhancedSuperDynamicQuantumSwarmOptimization"] = ( - EnhancedSuperDynamicQuantumSwarmOptimization - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimization", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimization"] = EnhancedSuperDynamicQuantumSwarmOptimization + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV2 import ( - EnhancedSuperDynamicQuantumSwarmOptimizationV2, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV2 import EnhancedSuperDynamicQuantumSwarmOptimizationV2 - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV2"] = ( - EnhancedSuperDynamicQuantumSwarmOptimizationV2 - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV2"] = EnhancedSuperDynamicQuantumSwarmOptimizationV2 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV3 import ( - EnhancedSuperDynamicQuantumSwarmOptimizationV3, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV3 import EnhancedSuperDynamicQuantumSwarmOptimizationV3 - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV3"] = ( - EnhancedSuperDynamicQuantumSwarmOptimizationV3 - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV3"] = EnhancedSuperDynamicQuantumSwarmOptimizationV3 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV4 import ( - EnhancedSuperDynamicQuantumSwarmOptimizationV4, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV4 import EnhancedSuperDynamicQuantumSwarmOptimizationV4 - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV4"] = ( - EnhancedSuperDynamicQuantumSwarmOptimizationV4 - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV4"] = EnhancedSuperDynamicQuantumSwarmOptimizationV4 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV5 import ( - EnhancedSuperDynamicQuantumSwarmOptimizationV5, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV5 import EnhancedSuperDynamicQuantumSwarmOptimizationV5 - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV5"] = ( - EnhancedSuperDynamicQuantumSwarmOptimizationV5 - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV5"] = EnhancedSuperDynamicQuantumSwarmOptimizationV5 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV6 import ( - EnhancedSuperDynamicQuantumSwarmOptimizationV6, - ) + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV6 import EnhancedSuperDynamicQuantumSwarmOptimizationV6 - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV6"] = ( - EnhancedSuperDynamicQuantumSwarmOptimizationV6 - ) - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( - method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6" - ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6", register=True) + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV6"] = EnhancedSuperDynamicQuantumSwarmOptimizationV6 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6", register=True) except Exception as e: print("EnhancedSuperDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSuperRefinedRAMEDS import EnhancedSuperRefinedRAMEDS lama_register["EnhancedSuperRefinedRAMEDS"] = EnhancedSuperRefinedRAMEDS - LLAMAEnhancedSuperRefinedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS").set_name( - "LLAMAEnhancedSuperRefinedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperRefinedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS").set_name("LLAMAEnhancedSuperRefinedRAMEDS", register=True) except Exception as e: print("EnhancedSuperRefinedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 import ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9, - ) + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9"] = ( - EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 - ) - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 = NonObjectOptimizer( - method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9" - ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9", register=True) + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 + res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9", register=True) except Exception as e: print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSuperiorUltimateGuidedMassQGSA_v80 import ( - EnhancedSuperiorUltimateGuidedMassQGSA_v80, - ) + from nevergrad.optimization.lama.EnhancedSuperiorUltimateGuidedMassQGSA_v80 import EnhancedSuperiorUltimateGuidedMassQGSA_v80 lama_register["EnhancedSuperiorUltimateGuidedMassQGSA_v80"] = EnhancedSuperiorUltimateGuidedMassQGSA_v80 - LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80 = NonObjectOptimizer( - method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80" - ).set_name("LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80 = NonObjectOptimizer(method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80").set_name("LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80", register=True) except Exception as e: print("EnhancedSuperiorUltimateGuidedMassQGSA_v80 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedSupremeDynamicPrecisionOptimizerV1 import ( - EnhancedSupremeDynamicPrecisionOptimizerV1, - ) + from nevergrad.optimization.lama.EnhancedSupremeDynamicPrecisionOptimizerV1 import EnhancedSupremeDynamicPrecisionOptimizerV1 lama_register["EnhancedSupremeDynamicPrecisionOptimizerV1"] = EnhancedSupremeDynamicPrecisionOptimizerV1 - LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( - method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1" - ).set_name("LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1").set_name("LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1", register=True) except Exception as e: print("EnhancedSupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) - try: from nevergrad.optimization.lama.EnhancedSwarmHybridOptimization import EnhancedSwarmHybridOptimization lama_register["EnhancedSwarmHybridOptimization"] = EnhancedSwarmHybridOptimization - LLAMAEnhancedSwarmHybridOptimization = NonObjectOptimizer( - method="LLAMAEnhancedSwarmHybridOptimization" - ).set_name("LLAMAEnhancedSwarmHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedSwarmHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedSwarmHybridOptimization").set_name("LLAMAEnhancedSwarmHybridOptimization", register=True) except Exception as e: print("EnhancedSwarmHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedTwoPhaseDynamicStrategyV39 import ( - EnhancedTwoPhaseDynamicStrategyV39, - ) + from nevergrad.optimization.lama.EnhancedTwoPhaseDynamicStrategyV39 import EnhancedTwoPhaseDynamicStrategyV39 lama_register["EnhancedTwoPhaseDynamicStrategyV39"] = EnhancedTwoPhaseDynamicStrategyV39 - LLAMAEnhancedTwoPhaseDynamicStrategyV39 = NonObjectOptimizer( - method="LLAMAEnhancedTwoPhaseDynamicStrategyV39" - ).set_name("LLAMAEnhancedTwoPhaseDynamicStrategyV39", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedTwoPhaseDynamicStrategyV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedTwoPhaseDynamicStrategyV39 = NonObjectOptimizer(method="LLAMAEnhancedTwoPhaseDynamicStrategyV39").set_name("LLAMAEnhancedTwoPhaseDynamicStrategyV39", register=True) except Exception as e: print("EnhancedTwoPhaseDynamicStrategyV39 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithm import ( - EnhancedUltimateDynamicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithm import EnhancedUltimateDynamicFireworkAlgorithm lama_register["EnhancedUltimateDynamicFireworkAlgorithm"] = EnhancedUltimateDynamicFireworkAlgorithm - LLAMAEnhancedUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm" - ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm").set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithm", register=True) except Exception as e: print("EnhancedUltimateDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithmImproved import ( - EnhancedUltimateDynamicFireworkAlgorithmImproved, - ) + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithmImproved import EnhancedUltimateDynamicFireworkAlgorithmImproved - lama_register["EnhancedUltimateDynamicFireworkAlgorithmImproved"] = ( - EnhancedUltimateDynamicFireworkAlgorithmImproved - ) - LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( - method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved" - ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved", register=True) + lama_register["EnhancedUltimateDynamicFireworkAlgorithmImproved"] = EnhancedUltimateDynamicFireworkAlgorithmImproved + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved", register=True) except Exception as e: print("EnhancedUltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateEvolutionaryGradientOptimizerV36 import ( - EnhancedUltimateEvolutionaryGradientOptimizerV36, - ) + from nevergrad.optimization.lama.EnhancedUltimateEvolutionaryGradientOptimizerV36 import EnhancedUltimateEvolutionaryGradientOptimizerV36 - lama_register["EnhancedUltimateEvolutionaryGradientOptimizerV36"] = ( - EnhancedUltimateEvolutionaryGradientOptimizerV36 - ) - LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36 = NonObjectOptimizer( - method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36" - ).set_name("LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36", register=True) + lama_register["EnhancedUltimateEvolutionaryGradientOptimizerV36"] = EnhancedUltimateEvolutionaryGradientOptimizerV36 + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36 = NonObjectOptimizer(method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36").set_name("LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36", register=True) except Exception as e: print("EnhancedUltimateEvolutionaryGradientOptimizerV36 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP, - ) + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( - method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" - ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) except Exception as e: print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined import ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined, - ) + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined"] = ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined - ) - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( - method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined" - ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined", register=True) + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined", register=True) except Exception as e: print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 import ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2, - ) + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2"] = ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 - ) - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 = NonObjectOptimizer( - method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2" - ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2", register=True) + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2", register=True) except Exception as e: print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 import ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3, - ) + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3"] = ( - EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 - ) - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 = NonObjectOptimizer( - method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3" - ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3", register=True) + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 + res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3", register=True) except Exception as e: print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 import ( - EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44, - ) + from nevergrad.optimization.lama.EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 import EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 - lama_register["EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44"] = ( - EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 - ) - LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 = NonObjectOptimizer( - method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44" - ).set_name("LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44", register=True) + lama_register["EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44"] = EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 + res = NonObjectOptimizer(method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 = NonObjectOptimizer(method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44").set_name("LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44", register=True) except Exception as e: print("EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnsembleAdaptiveEvolutionaryAlgorithm import ( - EnsembleAdaptiveEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.EnsembleAdaptiveEvolutionaryAlgorithm import EnsembleAdaptiveEvolutionaryAlgorithm lama_register["EnsembleAdaptiveEvolutionaryAlgorithm"] = EnsembleAdaptiveEvolutionaryAlgorithm - LLAMAEnsembleAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm" - ).set_name("LLAMAEnsembleAdaptiveEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm").set_name("LLAMAEnsembleAdaptiveEvolutionaryAlgorithm", register=True) except Exception as e: print("EnsembleAdaptiveEvolutionaryAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleAdaptiveMemeticOptimizer import EnsembleAdaptiveMemeticOptimizer lama_register["EnsembleAdaptiveMemeticOptimizer"] = EnsembleAdaptiveMemeticOptimizer - LLAMAEnsembleAdaptiveMemeticOptimizer = NonObjectOptimizer( - method="LLAMAEnsembleAdaptiveMemeticOptimizer" - ).set_name("LLAMAEnsembleAdaptiveMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveMemeticOptimizer").set_name("LLAMAEnsembleAdaptiveMemeticOptimizer", register=True) except Exception as e: print("EnsembleAdaptiveMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleAdaptiveQuantumDE import EnsembleAdaptiveQuantumDE lama_register["EnsembleAdaptiveQuantumDE"] = EnsembleAdaptiveQuantumDE - LLAMAEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE").set_name( - "LLAMAEnsembleAdaptiveQuantumDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE").set_name("LLAMAEnsembleAdaptiveQuantumDE", register=True) except Exception as e: print("EnsembleAdaptiveQuantumDE can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleDE import EnsembleDE lama_register["EnsembleDE"] = EnsembleDE + res = NonObjectOptimizer(method="LLAMAEnsembleDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAEnsembleDE = NonObjectOptimizer(method="LLAMAEnsembleDE").set_name("LLAMAEnsembleDE", register=True) except Exception as e: print("EnsembleDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EnsembleEvolutionaryCulturalSearch import ( - EnsembleEvolutionaryCulturalSearch, - ) + from nevergrad.optimization.lama.EnsembleEvolutionaryCulturalSearch import EnsembleEvolutionaryCulturalSearch lama_register["EnsembleEvolutionaryCulturalSearch"] = EnsembleEvolutionaryCulturalSearch - LLAMAEnsembleEvolutionaryCulturalSearch = NonObjectOptimizer( - method="LLAMAEnsembleEvolutionaryCulturalSearch" - ).set_name("LLAMAEnsembleEvolutionaryCulturalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEnsembleEvolutionaryCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleEvolutionaryCulturalSearch = NonObjectOptimizer(method="LLAMAEnsembleEvolutionaryCulturalSearch").set_name("LLAMAEnsembleEvolutionaryCulturalSearch", register=True) except Exception as e: print("EnsembleEvolutionaryCulturalSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleHybridSearch import EnsembleHybridSearch lama_register["EnsembleHybridSearch"] = EnsembleHybridSearch - LLAMAEnsembleHybridSearch = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch").set_name( - "LLAMAEnsembleHybridSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleHybridSearch = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch").set_name("LLAMAEnsembleHybridSearch", register=True) except Exception as e: print("EnsembleHybridSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleMemeticAlgorithm import EnsembleMemeticAlgorithm lama_register["EnsembleMemeticAlgorithm"] = EnsembleMemeticAlgorithm - LLAMAEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm").set_name( - "LLAMAEnsembleMemeticAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm").set_name("LLAMAEnsembleMemeticAlgorithm", register=True) except Exception as e: print("EnsembleMemeticAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.EnsembleMutationAdaptiveDE import EnsembleMutationAdaptiveDE lama_register["EnsembleMutationAdaptiveDE"] = EnsembleMutationAdaptiveDE - LLAMAEnsembleMutationAdaptiveDE = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE").set_name( - "LLAMAEnsembleMutationAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEnsembleMutationAdaptiveDE = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE").set_name("LLAMAEnsembleMutationAdaptiveDE", register=True) except Exception as e: print("EnsembleMutationAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.EntropyEnhancedAdaptiveStrategyV61 import ( - EntropyEnhancedAdaptiveStrategyV61, - ) + from nevergrad.optimization.lama.EntropyEnhancedAdaptiveStrategyV61 import EntropyEnhancedAdaptiveStrategyV61 lama_register["EntropyEnhancedAdaptiveStrategyV61"] = EntropyEnhancedAdaptiveStrategyV61 - LLAMAEntropyEnhancedAdaptiveStrategyV61 = NonObjectOptimizer( - method="LLAMAEntropyEnhancedAdaptiveStrategyV61" - ).set_name("LLAMAEntropyEnhancedAdaptiveStrategyV61", register=True) + res = NonObjectOptimizer(method="LLAMAEntropyEnhancedAdaptiveStrategyV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEntropyEnhancedAdaptiveStrategyV61 = NonObjectOptimizer(method="LLAMAEntropyEnhancedAdaptiveStrategyV61").set_name("LLAMAEntropyEnhancedAdaptiveStrategyV61", register=True) except Exception as e: print("EntropyEnhancedAdaptiveStrategyV61 can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryConvergenceSpiralSearch import ( - EvolutionaryConvergenceSpiralSearch, - ) + from nevergrad.optimization.lama.EvolutionaryConvergenceSpiralSearch import EvolutionaryConvergenceSpiralSearch lama_register["EvolutionaryConvergenceSpiralSearch"] = EvolutionaryConvergenceSpiralSearch - LLAMAEvolutionaryConvergenceSpiralSearch = NonObjectOptimizer( - method="LLAMAEvolutionaryConvergenceSpiralSearch" - ).set_name("LLAMAEvolutionaryConvergenceSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryConvergenceSpiralSearch = NonObjectOptimizer(method="LLAMAEvolutionaryConvergenceSpiralSearch").set_name("LLAMAEvolutionaryConvergenceSpiralSearch", register=True) except Exception as e: print("EvolutionaryConvergenceSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryDynamicGradientSearch import ( - EvolutionaryDynamicGradientSearch, - ) + from nevergrad.optimization.lama.EvolutionaryDynamicGradientSearch import EvolutionaryDynamicGradientSearch lama_register["EvolutionaryDynamicGradientSearch"] = EvolutionaryDynamicGradientSearch - LLAMAEvolutionaryDynamicGradientSearch = NonObjectOptimizer( - method="LLAMAEvolutionaryDynamicGradientSearch" - ).set_name("LLAMAEvolutionaryDynamicGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryDynamicGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryDynamicGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryDynamicGradientSearch").set_name("LLAMAEvolutionaryDynamicGradientSearch", register=True) except Exception as e: print("EvolutionaryDynamicGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizer import ( - EvolutionaryGradientHybridOptimizer, - ) + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizer import EvolutionaryGradientHybridOptimizer lama_register["EvolutionaryGradientHybridOptimizer"] = EvolutionaryGradientHybridOptimizer - LLAMAEvolutionaryGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMAEvolutionaryGradientHybridOptimizer" - ).set_name("LLAMAEvolutionaryGradientHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizer").set_name("LLAMAEvolutionaryGradientHybridOptimizer", register=True) except Exception as e: print("EvolutionaryGradientHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizerV2 import ( - EvolutionaryGradientHybridOptimizerV2, - ) + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizerV2 import EvolutionaryGradientHybridOptimizerV2 lama_register["EvolutionaryGradientHybridOptimizerV2"] = EvolutionaryGradientHybridOptimizerV2 - LLAMAEvolutionaryGradientHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMAEvolutionaryGradientHybridOptimizerV2" - ).set_name("LLAMAEvolutionaryGradientHybridOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryGradientHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizerV2").set_name("LLAMAEvolutionaryGradientHybridOptimizerV2", register=True) except Exception as e: print("EvolutionaryGradientHybridOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.EvolutionaryGradientSearch import EvolutionaryGradientSearch lama_register["EvolutionaryGradientSearch"] = EvolutionaryGradientSearch - LLAMAEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch").set_name( - "LLAMAEvolutionaryGradientSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch").set_name("LLAMAEvolutionaryGradientSearch", register=True) except Exception as e: print("EvolutionaryGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryHarmonicFireworkAlgorithm import ( - EvolutionaryHarmonicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.EvolutionaryHarmonicFireworkAlgorithm import EvolutionaryHarmonicFireworkAlgorithm lama_register["EvolutionaryHarmonicFireworkAlgorithm"] = EvolutionaryHarmonicFireworkAlgorithm - LLAMAEvolutionaryHarmonicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAEvolutionaryHarmonicFireworkAlgorithm" - ).set_name("LLAMAEvolutionaryHarmonicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEvolutionaryHarmonicFireworkAlgorithm").set_name("LLAMAEvolutionaryHarmonicFireworkAlgorithm", register=True) except Exception as e: print("EvolutionaryHarmonicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.EvolutionaryParticleSwarmOptimizer import ( - EvolutionaryParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.EvolutionaryParticleSwarmOptimizer import EvolutionaryParticleSwarmOptimizer lama_register["EvolutionaryParticleSwarmOptimizer"] = EvolutionaryParticleSwarmOptimizer - LLAMAEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAEvolutionaryParticleSwarmOptimizer" - ).set_name("LLAMAEvolutionaryParticleSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEvolutionaryParticleSwarmOptimizer").set_name("LLAMAEvolutionaryParticleSwarmOptimizer", register=True) except Exception as e: print("EvolutionaryParticleSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ExDADe import ExDADe lama_register["ExDADe"] = ExDADe + res = NonObjectOptimizer(method="LLAMAExDADe")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAExDADe = NonObjectOptimizer(method="LLAMAExDADe").set_name("LLAMAExDADe", register=True) except Exception as e: print("ExDADe can not be imported: ", e) - try: from nevergrad.optimization.lama.FEDE import FEDE lama_register["FEDE"] = FEDE + res = NonObjectOptimizer(method="LLAMAFEDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAFEDE = NonObjectOptimizer(method="LLAMAFEDE").set_name("LLAMAFEDE", register=True) except Exception as e: print("FEDE can not be imported: ", e) - try: from nevergrad.optimization.lama.FTADEEM import FTADEEM lama_register["FTADEEM"] = FTADEEM + res = NonObjectOptimizer(method="LLAMAFTADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAFTADEEM = NonObjectOptimizer(method="LLAMAFTADEEM").set_name("LLAMAFTADEEM", register=True) except Exception as e: print("FTADEEM can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( - FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, - ) + from nevergrad.optimization.lama.FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - lama_register["FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( - FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - ) - LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( - method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" - ).set_name("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) + lama_register["FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) except Exception as e: print("FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalEnhancedDynamicLocalSearchFireworkAlgorithm import ( - FinalEnhancedDynamicLocalSearchFireworkAlgorithm, - ) + from nevergrad.optimization.lama.FinalEnhancedDynamicLocalSearchFireworkAlgorithm import FinalEnhancedDynamicLocalSearchFireworkAlgorithm - lama_register["FinalEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( - FinalEnhancedDynamicLocalSearchFireworkAlgorithm - ) - LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm" - ).set_name("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) + lama_register["FinalEnhancedDynamicLocalSearchFireworkAlgorithm"] = FinalEnhancedDynamicLocalSearchFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) except Exception as e: print("FinalEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( - FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, - ) + from nevergrad.optimization.lama.FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - lama_register["FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( - FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - ) - LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( - method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" - ).set_name("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) + lama_register["FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + res = NonObjectOptimizer(method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) except Exception as e: print("FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 import ( - FinalEnhancedRefinedUltimateGuidedMassQGSA_v75, - ) + from nevergrad.optimization.lama.FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 import FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 - lama_register["FinalEnhancedRefinedUltimateGuidedMassQGSA_v75"] = ( - FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 - ) - LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75 = NonObjectOptimizer( - method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75" - ).set_name("LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75", register=True) + lama_register["FinalEnhancedRefinedUltimateGuidedMassQGSA_v75"] = FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 + res = NonObjectOptimizer(method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75 = NonObjectOptimizer(method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75").set_name("LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75", register=True) except Exception as e: print("FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithm import ( - FinalOptimizedEnhancedDynamicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithm import FinalOptimizedEnhancedDynamicFireworkAlgorithm - lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithm"] = ( - FinalOptimizedEnhancedDynamicFireworkAlgorithm - ) - LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm" - ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm", register=True) + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithm"] = FinalOptimizedEnhancedDynamicFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm").set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm", register=True) except Exception as e: print("FinalOptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined import ( - FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined, - ) + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined import FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined - lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined"] = ( - FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined - ) - LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( - method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined" - ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined", register=True) + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined"] = FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined + res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined").set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined", register=True) except Exception as e: print("FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.FineTunedCohortDiversityOptimizer import ( - FineTunedCohortDiversityOptimizer, - ) + from nevergrad.optimization.lama.FineTunedCohortDiversityOptimizer import FineTunedCohortDiversityOptimizer lama_register["FineTunedCohortDiversityOptimizer"] = FineTunedCohortDiversityOptimizer - LLAMAFineTunedCohortDiversityOptimizer = NonObjectOptimizer( - method="LLAMAFineTunedCohortDiversityOptimizer" - ).set_name("LLAMAFineTunedCohortDiversityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAFineTunedCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFineTunedCohortDiversityOptimizer = NonObjectOptimizer(method="LLAMAFineTunedCohortDiversityOptimizer").set_name("LLAMAFineTunedCohortDiversityOptimizer", register=True) except Exception as e: print("FineTunedCohortDiversityOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.FineTunedFocusedAdaptiveOptimizer import ( - FineTunedFocusedAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.FineTunedFocusedAdaptiveOptimizer import FineTunedFocusedAdaptiveOptimizer lama_register["FineTunedFocusedAdaptiveOptimizer"] = FineTunedFocusedAdaptiveOptimizer - LLAMAFineTunedFocusedAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAFineTunedFocusedAdaptiveOptimizer" - ).set_name("LLAMAFineTunedFocusedAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAFineTunedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFineTunedFocusedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAFineTunedFocusedAdaptiveOptimizer").set_name("LLAMAFineTunedFocusedAdaptiveOptimizer", register=True) except Exception as e: print("FineTunedFocusedAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.FineTunedProgressiveAdaptiveSearch import ( - FineTunedProgressiveAdaptiveSearch, - ) + from nevergrad.optimization.lama.FineTunedProgressiveAdaptiveSearch import FineTunedProgressiveAdaptiveSearch lama_register["FineTunedProgressiveAdaptiveSearch"] = FineTunedProgressiveAdaptiveSearch - LLAMAFineTunedProgressiveAdaptiveSearch = NonObjectOptimizer( - method="LLAMAFineTunedProgressiveAdaptiveSearch" - ).set_name("LLAMAFineTunedProgressiveAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAFineTunedProgressiveAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFineTunedProgressiveAdaptiveSearch = NonObjectOptimizer(method="LLAMAFineTunedProgressiveAdaptiveSearch").set_name("LLAMAFineTunedProgressiveAdaptiveSearch", register=True) except Exception as e: print("FineTunedProgressiveAdaptiveSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.FocusedBalancedAdaptivePSO import FocusedBalancedAdaptivePSO lama_register["FocusedBalancedAdaptivePSO"] = FocusedBalancedAdaptivePSO - LLAMAFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO").set_name( - "LLAMAFocusedBalancedAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO").set_name("LLAMAFocusedBalancedAdaptivePSO", register=True) except Exception as e: print("FocusedBalancedAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.FocusedEvolutionStrategy import FocusedEvolutionStrategy lama_register["FocusedEvolutionStrategy"] = FocusedEvolutionStrategy - LLAMAFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy").set_name( - "LLAMAFocusedEvolutionStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy").set_name("LLAMAFocusedEvolutionStrategy", register=True) except Exception as e: print("FocusedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.FractionalOrderClusterHybridOptimization import ( - FractionalOrderClusterHybridOptimization, - ) + from nevergrad.optimization.lama.FractionalOrderClusterHybridOptimization import FractionalOrderClusterHybridOptimization lama_register["FractionalOrderClusterHybridOptimization"] = FractionalOrderClusterHybridOptimization - LLAMAFractionalOrderClusterHybridOptimization = NonObjectOptimizer( - method="LLAMAFractionalOrderClusterHybridOptimization" - ).set_name("LLAMAFractionalOrderClusterHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAFractionalOrderClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFractionalOrderClusterHybridOptimization = NonObjectOptimizer(method="LLAMAFractionalOrderClusterHybridOptimization").set_name("LLAMAFractionalOrderClusterHybridOptimization", register=True) except Exception as e: print("FractionalOrderClusterHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.FurtherEnhancedHybridMetaHeuristicOptimizerV13 import ( - FurtherEnhancedHybridMetaHeuristicOptimizerV13, - ) + from nevergrad.optimization.lama.FurtherEnhancedHybridMetaHeuristicOptimizerV13 import FurtherEnhancedHybridMetaHeuristicOptimizerV13 - lama_register["FurtherEnhancedHybridMetaHeuristicOptimizerV13"] = ( - FurtherEnhancedHybridMetaHeuristicOptimizerV13 - ) - LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( - method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13" - ).set_name("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13", register=True) + lama_register["FurtherEnhancedHybridMetaHeuristicOptimizerV13"] = FurtherEnhancedHybridMetaHeuristicOptimizerV13 + res = NonObjectOptimizer(method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer(method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13").set_name("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13", register=True) except Exception as e: print("FurtherEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.GEEA import GEEA lama_register["GEEA"] = GEEA + res = NonObjectOptimizer(method="LLAMAGEEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAGEEA = NonObjectOptimizer(method="LLAMAGEEA").set_name("LLAMAGEEA", register=True) except Exception as e: print("GEEA can not be imported: ", e) - try: from nevergrad.optimization.lama.GESA import GESA lama_register["GESA"] = GESA + res = NonObjectOptimizer(method="LLAMAGESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAGESA = NonObjectOptimizer(method="LLAMAGESA").set_name("LLAMAGESA", register=True) except Exception as e: print("GESA can not be imported: ", e) - try: from nevergrad.optimization.lama.GGAES import GGAES lama_register["GGAES"] = GGAES + res = NonObjectOptimizer(method="LLAMAGGAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAGGAES = NonObjectOptimizer(method="LLAMAGGAES").set_name("LLAMAGGAES", register=True) except Exception as e: print("GGAES can not be imported: ", e) - try: from nevergrad.optimization.lama.GIDE import GIDE lama_register["GIDE"] = GIDE + res = NonObjectOptimizer(method="LLAMAGIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAGIDE = NonObjectOptimizer(method="LLAMAGIDE").set_name("LLAMAGIDE", register=True) except Exception as e: print("GIDE can not be imported: ", e) - try: from nevergrad.optimization.lama.GaussianAdaptivePSO import GaussianAdaptivePSO lama_register["GaussianAdaptivePSO"] = GaussianAdaptivePSO - LLAMAGaussianAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO").set_name( - "LLAMAGaussianAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGaussianAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO").set_name("LLAMAGaussianAdaptivePSO", register=True) except Exception as e: print("GaussianAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.GaussianEnhancedAdaptivePSO import GaussianEnhancedAdaptivePSO lama_register["GaussianEnhancedAdaptivePSO"] = GaussianEnhancedAdaptivePSO - LLAMAGaussianEnhancedAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO").set_name( - "LLAMAGaussianEnhancedAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGaussianEnhancedAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO").set_name("LLAMAGaussianEnhancedAdaptivePSO", register=True) except Exception as e: print("GaussianEnhancedAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientAssistedDifferentialCrossover import ( - GradientAssistedDifferentialCrossover, - ) + from nevergrad.optimization.lama.GradientAssistedDifferentialCrossover import GradientAssistedDifferentialCrossover lama_register["GradientAssistedDifferentialCrossover"] = GradientAssistedDifferentialCrossover - LLAMAGradientAssistedDifferentialCrossover = NonObjectOptimizer( - method="LLAMAGradientAssistedDifferentialCrossover" - ).set_name("LLAMAGradientAssistedDifferentialCrossover", register=True) + res = NonObjectOptimizer(method="LLAMAGradientAssistedDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientAssistedDifferentialCrossover = NonObjectOptimizer(method="LLAMAGradientAssistedDifferentialCrossover").set_name("LLAMAGradientAssistedDifferentialCrossover", register=True) except Exception as e: print("GradientAssistedDifferentialCrossover can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientBalancedEvolutionStrategy import ( - GradientBalancedEvolutionStrategy, - ) + from nevergrad.optimization.lama.GradientBalancedEvolutionStrategy import GradientBalancedEvolutionStrategy lama_register["GradientBalancedEvolutionStrategy"] = GradientBalancedEvolutionStrategy - LLAMAGradientBalancedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAGradientBalancedEvolutionStrategy" - ).set_name("LLAMAGradientBalancedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGradientBalancedEvolutionStrategy").set_name("LLAMAGradientBalancedEvolutionStrategy", register=True) except Exception as e: print("GradientBalancedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientBasedAdaptiveCovarianceMatrixAdaptation import ( - GradientBasedAdaptiveCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.GradientBasedAdaptiveCovarianceMatrixAdaptation import GradientBasedAdaptiveCovarianceMatrixAdaptation - lama_register["GradientBasedAdaptiveCovarianceMatrixAdaptation"] = ( - GradientBasedAdaptiveCovarianceMatrixAdaptation - ) - LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation" - ).set_name("LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation", register=True) + lama_register["GradientBasedAdaptiveCovarianceMatrixAdaptation"] = GradientBasedAdaptiveCovarianceMatrixAdaptation + res = NonObjectOptimizer(method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation").set_name("LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation", register=True) except Exception as e: print("GradientBasedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientBoostedMemoryAnnealing import GradientBoostedMemoryAnnealing lama_register["GradientBoostedMemoryAnnealing"] = GradientBoostedMemoryAnnealing - LLAMAGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMAGradientBoostedMemoryAnnealing" - ).set_name("LLAMAGradientBoostedMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAGradientBoostedMemoryAnnealing").set_name("LLAMAGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("GradientBoostedMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientEnhancedAdaptiveAnnealing import ( - GradientEnhancedAdaptiveAnnealing, - ) + from nevergrad.optimization.lama.GradientEnhancedAdaptiveAnnealing import GradientEnhancedAdaptiveAnnealing lama_register["GradientEnhancedAdaptiveAnnealing"] = GradientEnhancedAdaptiveAnnealing - LLAMAGradientEnhancedAdaptiveAnnealing = NonObjectOptimizer( - method="LLAMAGradientEnhancedAdaptiveAnnealing" - ).set_name("LLAMAGradientEnhancedAdaptiveAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientEnhancedAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveAnnealing").set_name("LLAMAGradientEnhancedAdaptiveAnnealing", register=True) except Exception as e: print("GradientEnhancedAdaptiveAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientEnhancedAdaptiveDifferentialEvolution import ( - GradientEnhancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.GradientEnhancedAdaptiveDifferentialEvolution import GradientEnhancedAdaptiveDifferentialEvolution - lama_register["GradientEnhancedAdaptiveDifferentialEvolution"] = ( - GradientEnhancedAdaptiveDifferentialEvolution - ) - LLAMAGradientEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution" - ).set_name("LLAMAGradientEnhancedAdaptiveDifferentialEvolution", register=True) + lama_register["GradientEnhancedAdaptiveDifferentialEvolution"] = GradientEnhancedAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAGradientEnhancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("GradientEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientEstimationSearch import GradientEstimationSearch lama_register["GradientEstimationSearch"] = GradientEstimationSearch - LLAMAGradientEstimationSearch = NonObjectOptimizer(method="LLAMAGradientEstimationSearch").set_name( - "LLAMAGradientEstimationSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAGradientEstimationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientEstimationSearch = NonObjectOptimizer(method="LLAMAGradientEstimationSearch").set_name("LLAMAGradientEstimationSearch", register=True) except Exception as e: print("GradientEstimationSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientGuidedClusterSearch import GradientGuidedClusterSearch lama_register["GradientGuidedClusterSearch"] = GradientGuidedClusterSearch - LLAMAGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch").set_name( - "LLAMAGradientGuidedClusterSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch").set_name("LLAMAGradientGuidedClusterSearch", register=True) except Exception as e: print("GradientGuidedClusterSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientGuidedDifferentialEvolution import ( - GradientGuidedDifferentialEvolution, - ) + from nevergrad.optimization.lama.GradientGuidedDifferentialEvolution import GradientGuidedDifferentialEvolution lama_register["GradientGuidedDifferentialEvolution"] = GradientGuidedDifferentialEvolution - LLAMAGradientGuidedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAGradientGuidedDifferentialEvolution" - ).set_name("LLAMAGradientGuidedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAGradientGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAGradientGuidedDifferentialEvolution").set_name("LLAMAGradientGuidedDifferentialEvolution", register=True) except Exception as e: print("GradientGuidedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientGuidedEvolutionStrategy import GradientGuidedEvolutionStrategy lama_register["GradientGuidedEvolutionStrategy"] = GradientGuidedEvolutionStrategy - LLAMAGradientGuidedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAGradientGuidedEvolutionStrategy" - ).set_name("LLAMAGradientGuidedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGradientGuidedEvolutionStrategy").set_name("LLAMAGradientGuidedEvolutionStrategy", register=True) except Exception as e: print("GradientGuidedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientGuidedHybridPSO import GradientGuidedHybridPSO lama_register["GradientGuidedHybridPSO"] = GradientGuidedHybridPSO - LLAMAGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO").set_name( - "LLAMAGradientGuidedHybridPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO").set_name("LLAMAGradientGuidedHybridPSO", register=True) except Exception as e: print("GradientGuidedHybridPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientInformedAdaptiveDirectionSearch import ( - GradientInformedAdaptiveDirectionSearch, - ) + from nevergrad.optimization.lama.GradientInformedAdaptiveDirectionSearch import GradientInformedAdaptiveDirectionSearch lama_register["GradientInformedAdaptiveDirectionSearch"] = GradientInformedAdaptiveDirectionSearch - LLAMAGradientInformedAdaptiveDirectionSearch = NonObjectOptimizer( - method="LLAMAGradientInformedAdaptiveDirectionSearch" - ).set_name("LLAMAGradientInformedAdaptiveDirectionSearch", register=True) + res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveDirectionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientInformedAdaptiveDirectionSearch = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveDirectionSearch").set_name("LLAMAGradientInformedAdaptiveDirectionSearch", register=True) except Exception as e: print("GradientInformedAdaptiveDirectionSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.GradientInformedAdaptiveSearch import GradientInformedAdaptiveSearch lama_register["GradientInformedAdaptiveSearch"] = GradientInformedAdaptiveSearch - LLAMAGradientInformedAdaptiveSearch = NonObjectOptimizer( - method="LLAMAGradientInformedAdaptiveSearch" - ).set_name("LLAMAGradientInformedAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientInformedAdaptiveSearch = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveSearch").set_name("LLAMAGradientInformedAdaptiveSearch", register=True) except Exception as e: print("GradientInformedAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientInformedParticleOptimizer import ( - GradientInformedParticleOptimizer, - ) + from nevergrad.optimization.lama.GradientInformedParticleOptimizer import GradientInformedParticleOptimizer lama_register["GradientInformedParticleOptimizer"] = GradientInformedParticleOptimizer - LLAMAGradientInformedParticleOptimizer = NonObjectOptimizer( - method="LLAMAGradientInformedParticleOptimizer" - ).set_name("LLAMAGradientInformedParticleOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAGradientInformedParticleOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientInformedParticleOptimizer = NonObjectOptimizer(method="LLAMAGradientInformedParticleOptimizer").set_name("LLAMAGradientInformedParticleOptimizer", register=True) except Exception as e: print("GradientInformedParticleOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.GradientSpiralDifferentialEnhancerV5 import ( - GradientSpiralDifferentialEnhancerV5, - ) + from nevergrad.optimization.lama.GradientSpiralDifferentialEnhancerV5 import GradientSpiralDifferentialEnhancerV5 lama_register["GradientSpiralDifferentialEnhancerV5"] = GradientSpiralDifferentialEnhancerV5 - LLAMAGradientSpiralDifferentialEnhancerV5 = NonObjectOptimizer( - method="LLAMAGradientSpiralDifferentialEnhancerV5" - ).set_name("LLAMAGradientSpiralDifferentialEnhancerV5", register=True) + res = NonObjectOptimizer(method="LLAMAGradientSpiralDifferentialEnhancerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGradientSpiralDifferentialEnhancerV5 = NonObjectOptimizer(method="LLAMAGradientSpiralDifferentialEnhancerV5").set_name("LLAMAGradientSpiralDifferentialEnhancerV5", register=True) except Exception as e: print("GradientSpiralDifferentialEnhancerV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.GravitationalSwarmIntelligence import GravitationalSwarmIntelligence lama_register["GravitationalSwarmIntelligence"] = GravitationalSwarmIntelligence - LLAMAGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAGravitationalSwarmIntelligence" - ).set_name("LLAMAGravitationalSwarmIntelligence", register=True) + res = NonObjectOptimizer(method="LLAMAGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAGravitationalSwarmIntelligence").set_name("LLAMAGravitationalSwarmIntelligence", register=True) except Exception as e: print("GravitationalSwarmIntelligence can not be imported: ", e) - try: from nevergrad.optimization.lama.GreedyDiversityMultiStrategySADE import GreedyDiversityMultiStrategySADE lama_register["GreedyDiversityMultiStrategySADE"] = GreedyDiversityMultiStrategySADE - LLAMAGreedyDiversityMultiStrategySADE = NonObjectOptimizer( - method="LLAMAGreedyDiversityMultiStrategySADE" - ).set_name("LLAMAGreedyDiversityMultiStrategySADE", register=True) + res = NonObjectOptimizer(method="LLAMAGreedyDiversityMultiStrategySADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGreedyDiversityMultiStrategySADE = NonObjectOptimizer(method="LLAMAGreedyDiversityMultiStrategySADE").set_name("LLAMAGreedyDiversityMultiStrategySADE", register=True) except Exception as e: print("GreedyDiversityMultiStrategySADE can not be imported: ", e) - try: from nevergrad.optimization.lama.GreedyDynamicMultiStrategyDE import GreedyDynamicMultiStrategyDE lama_register["GreedyDynamicMultiStrategyDE"] = GreedyDynamicMultiStrategyDE - LLAMAGreedyDynamicMultiStrategyDE = NonObjectOptimizer( - method="LLAMAGreedyDynamicMultiStrategyDE" - ).set_name("LLAMAGreedyDynamicMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMAGreedyDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGreedyDynamicMultiStrategyDE = NonObjectOptimizer(method="LLAMAGreedyDynamicMultiStrategyDE").set_name("LLAMAGreedyDynamicMultiStrategyDE", register=True) except Exception as e: print("GreedyDynamicMultiStrategyDE can not be imported: ", e) - try: from nevergrad.optimization.lama.GuidedEvolutionStrategy import GuidedEvolutionStrategy lama_register["GuidedEvolutionStrategy"] = GuidedEvolutionStrategy - LLAMAGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy").set_name( - "LLAMAGuidedEvolutionStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy").set_name("LLAMAGuidedEvolutionStrategy", register=True) except Exception as e: print("GuidedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.GuidedMutationOptimizer import GuidedMutationOptimizer lama_register["GuidedMutationOptimizer"] = GuidedMutationOptimizer - LLAMAGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer").set_name( - "LLAMAGuidedMutationOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer").set_name("LLAMAGuidedMutationOptimizer", register=True) except Exception as e: print("GuidedMutationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HADE import HADE lama_register["HADE"] = HADE + res = NonObjectOptimizer(method="LLAMAHADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHADE = NonObjectOptimizer(method="LLAMAHADE").set_name("LLAMAHADE", register=True) except Exception as e: print("HADE can not be imported: ", e) - try: from nevergrad.optimization.lama.HADEEM import HADEEM lama_register["HADEEM"] = HADEEM + res = NonObjectOptimizer(method="LLAMAHADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHADEEM = NonObjectOptimizer(method="LLAMAHADEEM").set_name("LLAMAHADEEM", register=True) except Exception as e: print("HADEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.HADEMI import HADEMI lama_register["HADEMI"] = HADEMI + res = NonObjectOptimizer(method="LLAMAHADEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHADEMI = NonObjectOptimizer(method="LLAMAHADEMI").set_name("LLAMAHADEMI", register=True) except Exception as e: print("HADEMI can not be imported: ", e) - try: from nevergrad.optimization.lama.HAVCDE import HAVCDE lama_register["HAVCDE"] = HAVCDE + res = NonObjectOptimizer(method="LLAMAHAVCDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHAVCDE = NonObjectOptimizer(method="LLAMAHAVCDE").set_name("LLAMAHAVCDE", register=True) except Exception as e: print("HAVCDE can not be imported: ", e) - try: from nevergrad.optimization.lama.HEAS import HEAS lama_register["HEAS"] = HEAS + res = NonObjectOptimizer(method="LLAMAHEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHEAS = NonObjectOptimizer(method="LLAMAHEAS").set_name("LLAMAHEAS", register=True) except Exception as e: print("HEAS can not be imported: ", e) - try: from nevergrad.optimization.lama.HarmonyFireworkOptimizer import HarmonyFireworkOptimizer lama_register["HarmonyFireworkOptimizer"] = HarmonyFireworkOptimizer - LLAMAHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer").set_name( - "LLAMAHarmonyFireworkOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer").set_name("LLAMAHarmonyFireworkOptimizer", register=True) except Exception as e: print("HarmonyFireworkOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HarmonyTabuOptimization import HarmonyTabuOptimization lama_register["HarmonyTabuOptimization"] = HarmonyTabuOptimization - LLAMAHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization").set_name( - "LLAMAHarmonyTabuOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization").set_name("LLAMAHarmonyTabuOptimization", register=True) except Exception as e: print("HarmonyTabuOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.HierarchicalAdaptiveAnnealing import HierarchicalAdaptiveAnnealing lama_register["HierarchicalAdaptiveAnnealing"] = HierarchicalAdaptiveAnnealing - LLAMAHierarchicalAdaptiveAnnealing = NonObjectOptimizer( - method="LLAMAHierarchicalAdaptiveAnnealing" - ).set_name("LLAMAHierarchicalAdaptiveAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHierarchicalAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveAnnealing").set_name("LLAMAHierarchicalAdaptiveAnnealing", register=True) except Exception as e: print("HierarchicalAdaptiveAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.HierarchicalAdaptiveCovarianceMatrixAdaptation import ( - HierarchicalAdaptiveCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.HierarchicalAdaptiveCovarianceMatrixAdaptation import HierarchicalAdaptiveCovarianceMatrixAdaptation - lama_register["HierarchicalAdaptiveCovarianceMatrixAdaptation"] = ( - HierarchicalAdaptiveCovarianceMatrixAdaptation - ) - LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation" - ).set_name("LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation", register=True) + lama_register["HierarchicalAdaptiveCovarianceMatrixAdaptation"] = HierarchicalAdaptiveCovarianceMatrixAdaptation + res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation").set_name("LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation", register=True) except Exception as e: print("HierarchicalAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) - try: from nevergrad.optimization.lama.HierarchicalAdaptiveSearch import HierarchicalAdaptiveSearch lama_register["HierarchicalAdaptiveSearch"] = HierarchicalAdaptiveSearch - LLAMAHierarchicalAdaptiveSearch = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch").set_name( - "LLAMAHierarchicalAdaptiveSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHierarchicalAdaptiveSearch = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch").set_name("LLAMAHierarchicalAdaptiveSearch", register=True) except Exception as e: print("HierarchicalAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HierarchicalDiversityEnhancedCovarianceMatrixAdaptation import ( - HierarchicalDiversityEnhancedCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.HierarchicalDiversityEnhancedCovarianceMatrixAdaptation import HierarchicalDiversityEnhancedCovarianceMatrixAdaptation - lama_register["HierarchicalDiversityEnhancedCovarianceMatrixAdaptation"] = ( - HierarchicalDiversityEnhancedCovarianceMatrixAdaptation - ) - LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation" - ).set_name("LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation", register=True) + lama_register["HierarchicalDiversityEnhancedCovarianceMatrixAdaptation"] = HierarchicalDiversityEnhancedCovarianceMatrixAdaptation + res = NonObjectOptimizer(method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation").set_name("LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation", register=True) except Exception as e: print("HierarchicalDiversityEnhancedCovarianceMatrixAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.HighPerformanceAdaptiveDifferentialSearch import ( - HighPerformanceAdaptiveDifferentialSearch, - ) + from nevergrad.optimization.lama.HighPerformanceAdaptiveDifferentialSearch import HighPerformanceAdaptiveDifferentialSearch lama_register["HighPerformanceAdaptiveDifferentialSearch"] = HighPerformanceAdaptiveDifferentialSearch - LLAMAHighPerformanceAdaptiveDifferentialSearch = NonObjectOptimizer( - method="LLAMAHighPerformanceAdaptiveDifferentialSearch" - ).set_name("LLAMAHighPerformanceAdaptiveDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHighPerformanceAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHighPerformanceAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAHighPerformanceAdaptiveDifferentialSearch").set_name("LLAMAHighPerformanceAdaptiveDifferentialSearch", register=True) except Exception as e: print("HighPerformanceAdaptiveDifferentialSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HyGDAE import HyGDAE lama_register["HyGDAE"] = HyGDAE + res = NonObjectOptimizer(method="LLAMAHyGDAE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAHyGDAE = NonObjectOptimizer(method="LLAMAHyGDAE").set_name("LLAMAHyGDAE", register=True) except Exception as e: print("HyGDAE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveCovarianceMatrixDifferentialEvolution import ( - HybridAdaptiveCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveCovarianceMatrixDifferentialEvolution import HybridAdaptiveCovarianceMatrixDifferentialEvolution - lama_register["HybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( - HybridAdaptiveCovarianceMatrixDifferentialEvolution - ) - LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) + lama_register["HybridAdaptiveCovarianceMatrixDifferentialEvolution"] = HybridAdaptiveCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveCrossoverElitistStrategyV10 import ( - HybridAdaptiveCrossoverElitistStrategyV10, - ) + from nevergrad.optimization.lama.HybridAdaptiveCrossoverElitistStrategyV10 import HybridAdaptiveCrossoverElitistStrategyV10 lama_register["HybridAdaptiveCrossoverElitistStrategyV10"] = HybridAdaptiveCrossoverElitistStrategyV10 - LLAMAHybridAdaptiveCrossoverElitistStrategyV10 = NonObjectOptimizer( - method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10" - ).set_name("LLAMAHybridAdaptiveCrossoverElitistStrategyV10", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveCrossoverElitistStrategyV10 = NonObjectOptimizer(method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10").set_name("LLAMAHybridAdaptiveCrossoverElitistStrategyV10", register=True) except Exception as e: print("HybridAdaptiveCrossoverElitistStrategyV10 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveDE import HybridAdaptiveDE lama_register["HybridAdaptiveDE"] = HybridAdaptiveDE - LLAMAHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE").set_name( - "LLAMAHybridAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE").set_name("LLAMAHybridAdaptiveDE", register=True) except Exception as e: print("HybridAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolution import ( - HybridAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolution import HybridAdaptiveDifferentialEvolution lama_register["HybridAdaptiveDifferentialEvolution"] = HybridAdaptiveDifferentialEvolution - LLAMAHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolution").set_name("LLAMAHybridAdaptiveDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning import ( - HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning, - ) + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning import HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning - lama_register["HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning"] = ( - HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning - ) - LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning" - ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning", register=True) + lama_register["HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning"] = HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning").set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning", register=True) except Exception as e: print("HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch import ( - HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch, - ) + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch import HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch - lama_register["HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch"] = ( - HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch - ) - LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch" - ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch", register=True) + lama_register["HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch"] = HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch").set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch", register=True) except Exception as e: print("HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialQuantumSearch import ( - HybridAdaptiveDifferentialQuantumSearch, - ) + from nevergrad.optimization.lama.HybridAdaptiveDifferentialQuantumSearch import HybridAdaptiveDifferentialQuantumSearch lama_register["HybridAdaptiveDifferentialQuantumSearch"] = HybridAdaptiveDifferentialQuantumSearch - LLAMAHybridAdaptiveDifferentialQuantumSearch = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDifferentialQuantumSearch" - ).set_name("LLAMAHybridAdaptiveDifferentialQuantumSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDifferentialQuantumSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialQuantumSearch").set_name("LLAMAHybridAdaptiveDifferentialQuantumSearch", register=True) except Exception as e: print("HybridAdaptiveDifferentialQuantumSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveDifferentialSwarm import HybridAdaptiveDifferentialSwarm lama_register["HybridAdaptiveDifferentialSwarm"] = HybridAdaptiveDifferentialSwarm - LLAMAHybridAdaptiveDifferentialSwarm = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDifferentialSwarm" - ).set_name("LLAMAHybridAdaptiveDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialSwarm").set_name("LLAMAHybridAdaptiveDifferentialSwarm", register=True) except Exception as e: print("HybridAdaptiveDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDiversityMaintainingGradientEvolution import ( - HybridAdaptiveDiversityMaintainingGradientEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveDiversityMaintainingGradientEvolution import HybridAdaptiveDiversityMaintainingGradientEvolution - lama_register["HybridAdaptiveDiversityMaintainingGradientEvolution"] = ( - HybridAdaptiveDiversityMaintainingGradientEvolution - ) - LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution" - ).set_name("LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution", register=True) + lama_register["HybridAdaptiveDiversityMaintainingGradientEvolution"] = HybridAdaptiveDiversityMaintainingGradientEvolution + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution").set_name("LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution", register=True) except Exception as e: print("HybridAdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveDualPhaseStrategyV6 import ( - HybridAdaptiveDualPhaseStrategyV6, - ) + from nevergrad.optimization.lama.HybridAdaptiveDualPhaseStrategyV6 import HybridAdaptiveDualPhaseStrategyV6 lama_register["HybridAdaptiveDualPhaseStrategyV6"] = HybridAdaptiveDualPhaseStrategyV6 - LLAMAHybridAdaptiveDualPhaseStrategyV6 = NonObjectOptimizer( - method="LLAMAHybridAdaptiveDualPhaseStrategyV6" - ).set_name("LLAMAHybridAdaptiveDualPhaseStrategyV6", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDualPhaseStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveDualPhaseStrategyV6 = NonObjectOptimizer(method="LLAMAHybridAdaptiveDualPhaseStrategyV6").set_name("LLAMAHybridAdaptiveDualPhaseStrategyV6", register=True) except Exception as e: print("HybridAdaptiveDualPhaseStrategyV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveEvolutionaryOptimizer import ( - HybridAdaptiveEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.HybridAdaptiveEvolutionaryOptimizer import HybridAdaptiveEvolutionaryOptimizer lama_register["HybridAdaptiveEvolutionaryOptimizer"] = HybridAdaptiveEvolutionaryOptimizer - LLAMAHybridAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAHybridAdaptiveEvolutionaryOptimizer" - ).set_name("LLAMAHybridAdaptiveEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveEvolutionaryOptimizer").set_name("LLAMAHybridAdaptiveEvolutionaryOptimizer", register=True) except Exception as e: print("HybridAdaptiveEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveExplorationOptimizer import ( - HybridAdaptiveExplorationOptimizer, - ) + from nevergrad.optimization.lama.HybridAdaptiveExplorationOptimizer import HybridAdaptiveExplorationOptimizer lama_register["HybridAdaptiveExplorationOptimizer"] = HybridAdaptiveExplorationOptimizer - LLAMAHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( - method="LLAMAHybridAdaptiveExplorationOptimizer" - ).set_name("LLAMAHybridAdaptiveExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveExplorationOptimizer").set_name("LLAMAHybridAdaptiveExplorationOptimizer", register=True) except Exception as e: print("HybridAdaptiveExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizer import ( - HybridAdaptiveGeneticSwarmOptimizer, - ) + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizer import HybridAdaptiveGeneticSwarmOptimizer lama_register["HybridAdaptiveGeneticSwarmOptimizer"] = HybridAdaptiveGeneticSwarmOptimizer - LLAMAHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( - method="LLAMAHybridAdaptiveGeneticSwarmOptimizer" - ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizer", register=True) except Exception as e: print("HybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizerV2 import ( - HybridAdaptiveGeneticSwarmOptimizerV2, - ) + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizerV2 import HybridAdaptiveGeneticSwarmOptimizerV2 lama_register["HybridAdaptiveGeneticSwarmOptimizerV2"] = HybridAdaptiveGeneticSwarmOptimizerV2 - LLAMAHybridAdaptiveGeneticSwarmOptimizerV2 = NonObjectOptimizer( - method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2" - ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveGeneticSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2").set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizerV2", register=True) except Exception as e: print("HybridAdaptiveGeneticSwarmOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveGradientPSO import HybridAdaptiveGradientPSO lama_register["HybridAdaptiveGradientPSO"] = HybridAdaptiveGradientPSO - LLAMAHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO").set_name( - "LLAMAHybridAdaptiveGradientPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO").set_name("LLAMAHybridAdaptiveGradientPSO", register=True) except Exception as e: print("HybridAdaptiveGradientPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveHarmonicFireworksTabuSearch import ( - HybridAdaptiveHarmonicFireworksTabuSearch, - ) + from nevergrad.optimization.lama.HybridAdaptiveHarmonicFireworksTabuSearch import HybridAdaptiveHarmonicFireworksTabuSearch lama_register["HybridAdaptiveHarmonicFireworksTabuSearch"] = HybridAdaptiveHarmonicFireworksTabuSearch - LLAMAHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( - method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch" - ).set_name("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch", register=True) except Exception as e: print("HybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveMemeticAlgorithm import HybridAdaptiveMemeticAlgorithm lama_register["HybridAdaptiveMemeticAlgorithm"] = HybridAdaptiveMemeticAlgorithm - LLAMAHybridAdaptiveMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMemeticAlgorithm" - ).set_name("LLAMAHybridAdaptiveMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticAlgorithm").set_name("LLAMAHybridAdaptiveMemeticAlgorithm", register=True) except Exception as e: print("HybridAdaptiveMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism import ( - HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism, - ) + from nevergrad.optimization.lama.HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism import HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism - lama_register["HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism"] = ( - HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism - ) - LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism" - ).set_name("LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism", register=True) + lama_register["HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism"] = HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism").set_name("LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism", register=True) except Exception as e: print("HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveMemeticOptimizerV4 import HybridAdaptiveMemeticOptimizerV4 lama_register["HybridAdaptiveMemeticOptimizerV4"] = HybridAdaptiveMemeticOptimizerV4 - LLAMAHybridAdaptiveMemeticOptimizerV4 = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMemeticOptimizerV4" - ).set_name("LLAMAHybridAdaptiveMemeticOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMemeticOptimizerV4 = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticOptimizerV4").set_name("LLAMAHybridAdaptiveMemeticOptimizerV4", register=True) except Exception as e: print("HybridAdaptiveMemeticOptimizerV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveMemoryAnnealing import HybridAdaptiveMemoryAnnealing lama_register["HybridAdaptiveMemoryAnnealing"] = HybridAdaptiveMemoryAnnealing - LLAMAHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMemoryAnnealing" - ).set_name("LLAMAHybridAdaptiveMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemoryAnnealing").set_name("LLAMAHybridAdaptiveMemoryAnnealing", register=True) except Exception as e: print("HybridAdaptiveMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolution import ( - HybridAdaptiveMultiPhaseEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolution import HybridAdaptiveMultiPhaseEvolution lama_register["HybridAdaptiveMultiPhaseEvolution"] = HybridAdaptiveMultiPhaseEvolution - LLAMAHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMultiPhaseEvolution" - ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolution").set_name("LLAMAHybridAdaptiveMultiPhaseEvolution", register=True) except Exception as e: print("HybridAdaptiveMultiPhaseEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolutionV2 import ( - HybridAdaptiveMultiPhaseEvolutionV2, - ) + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolutionV2 import HybridAdaptiveMultiPhaseEvolutionV2 lama_register["HybridAdaptiveMultiPhaseEvolutionV2"] = HybridAdaptiveMultiPhaseEvolutionV2 - LLAMAHybridAdaptiveMultiPhaseEvolutionV2 = NonObjectOptimizer( - method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2" - ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolutionV2", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveMultiPhaseEvolutionV2 = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2").set_name("LLAMAHybridAdaptiveMultiPhaseEvolutionV2", register=True) except Exception as e: print("HybridAdaptiveMultiPhaseEvolutionV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveNesterovSynergy import HybridAdaptiveNesterovSynergy lama_register["HybridAdaptiveNesterovSynergy"] = HybridAdaptiveNesterovSynergy - LLAMAHybridAdaptiveNesterovSynergy = NonObjectOptimizer( - method="LLAMAHybridAdaptiveNesterovSynergy" - ).set_name("LLAMAHybridAdaptiveNesterovSynergy", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveNesterovSynergy = NonObjectOptimizer(method="LLAMAHybridAdaptiveNesterovSynergy").set_name("LLAMAHybridAdaptiveNesterovSynergy", register=True) except Exception as e: print("HybridAdaptiveNesterovSynergy can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveOptimization import HybridAdaptiveOptimization lama_register["HybridAdaptiveOptimization"] = HybridAdaptiveOptimization - LLAMAHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization").set_name( - "LLAMAHybridAdaptiveOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization").set_name("LLAMAHybridAdaptiveOptimization", register=True) except Exception as e: print("HybridAdaptiveOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveOrthogonalDifferentialEvolution import ( - HybridAdaptiveOrthogonalDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveOrthogonalDifferentialEvolution import HybridAdaptiveOrthogonalDifferentialEvolution - lama_register["HybridAdaptiveOrthogonalDifferentialEvolution"] = ( - HybridAdaptiveOrthogonalDifferentialEvolution - ) - LLAMAHybridAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution", register=True) + lama_register["HybridAdaptiveOrthogonalDifferentialEvolution"] = HybridAdaptiveOrthogonalDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveParallelDifferentialEvolution import ( - HybridAdaptiveParallelDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveParallelDifferentialEvolution import HybridAdaptiveParallelDifferentialEvolution lama_register["HybridAdaptiveParallelDifferentialEvolution"] = HybridAdaptiveParallelDifferentialEvolution - LLAMAHybridAdaptiveParallelDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveParallelDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveParallelDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveParallelDifferentialEvolution").set_name("LLAMAHybridAdaptiveParallelDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveParallelDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveParameterTuningOptimization import ( - HybridAdaptiveParameterTuningOptimization, - ) + from nevergrad.optimization.lama.HybridAdaptiveParameterTuningOptimization import HybridAdaptiveParameterTuningOptimization lama_register["HybridAdaptiveParameterTuningOptimization"] = HybridAdaptiveParameterTuningOptimization - LLAMAHybridAdaptiveParameterTuningOptimization = NonObjectOptimizer( - method="LLAMAHybridAdaptiveParameterTuningOptimization" - ).set_name("LLAMAHybridAdaptiveParameterTuningOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParameterTuningOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveParameterTuningOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveParameterTuningOptimization").set_name("LLAMAHybridAdaptiveParameterTuningOptimization", register=True) except Exception as e: print("HybridAdaptiveParameterTuningOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptivePopulationDE import HybridAdaptivePopulationDE lama_register["HybridAdaptivePopulationDE"] = HybridAdaptivePopulationDE - LLAMAHybridAdaptivePopulationDE = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE").set_name( - "LLAMAHybridAdaptivePopulationDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptivePopulationDE = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE").set_name("LLAMAHybridAdaptivePopulationDE", register=True) except Exception as e: print("HybridAdaptivePopulationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveQuantumLevySearch import HybridAdaptiveQuantumLevySearch lama_register["HybridAdaptiveQuantumLevySearch"] = HybridAdaptiveQuantumLevySearch - LLAMAHybridAdaptiveQuantumLevySearch = NonObjectOptimizer( - method="LLAMAHybridAdaptiveQuantumLevySearch" - ).set_name("LLAMAHybridAdaptiveQuantumLevySearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumLevySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveQuantumLevySearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumLevySearch").set_name("LLAMAHybridAdaptiveQuantumLevySearch", register=True) except Exception as e: print("HybridAdaptiveQuantumLevySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticDifferentialEvolution import ( - HybridAdaptiveQuantumMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticDifferentialEvolution import HybridAdaptiveQuantumMemeticDifferentialEvolution - lama_register["HybridAdaptiveQuantumMemeticDifferentialEvolution"] = ( - HybridAdaptiveQuantumMemeticDifferentialEvolution - ) - LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution", register=True) + lama_register["HybridAdaptiveQuantumMemeticDifferentialEvolution"] = HybridAdaptiveQuantumMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution").set_name("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveQuantumMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticOptimizer import ( - HybridAdaptiveQuantumMemeticOptimizer, - ) + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticOptimizer import HybridAdaptiveQuantumMemeticOptimizer lama_register["HybridAdaptiveQuantumMemeticOptimizer"] = HybridAdaptiveQuantumMemeticOptimizer - LLAMAHybridAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( - method="LLAMAHybridAdaptiveQuantumMemeticOptimizer" - ).set_name("LLAMAHybridAdaptiveQuantumMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticOptimizer").set_name("LLAMAHybridAdaptiveQuantumMemeticOptimizer", register=True) except Exception as e: print("HybridAdaptiveQuantumMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveQuantumPSO import HybridAdaptiveQuantumPSO lama_register["HybridAdaptiveQuantumPSO"] = HybridAdaptiveQuantumPSO - LLAMAHybridAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO").set_name( - "LLAMAHybridAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO").set_name("LLAMAHybridAdaptiveQuantumPSO", register=True) except Exception as e: print("HybridAdaptiveQuantumPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveSearch import HybridAdaptiveSearch lama_register["HybridAdaptiveSearch"] = HybridAdaptiveSearch - LLAMAHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch").set_name( - "LLAMAHybridAdaptiveSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch").set_name("LLAMAHybridAdaptiveSearch", register=True) except Exception as e: print("HybridAdaptiveSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridAdaptiveSearchStrategy import HybridAdaptiveSearchStrategy lama_register["HybridAdaptiveSearchStrategy"] = HybridAdaptiveSearchStrategy - LLAMAHybridAdaptiveSearchStrategy = NonObjectOptimizer( - method="LLAMAHybridAdaptiveSearchStrategy" - ).set_name("LLAMAHybridAdaptiveSearchStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearchStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveSearchStrategy = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearchStrategy").set_name("LLAMAHybridAdaptiveSearchStrategy", register=True) except Exception as e: print("HybridAdaptiveSearchStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveSelfAdaptiveDifferentialEvolution import ( - HybridAdaptiveSelfAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridAdaptiveSelfAdaptiveDifferentialEvolution import HybridAdaptiveSelfAdaptiveDifferentialEvolution - lama_register["HybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( - HybridAdaptiveSelfAdaptiveDifferentialEvolution - ) - LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution" - ).set_name("LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) + lama_register["HybridAdaptiveSelfAdaptiveDifferentialEvolution"] = HybridAdaptiveSelfAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution").set_name("LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) except Exception as e: print("HybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridAdaptiveSimulatedAnnealingDE import ( - HybridAdaptiveSimulatedAnnealingDE, - ) + from nevergrad.optimization.lama.HybridAdaptiveSimulatedAnnealingDE import HybridAdaptiveSimulatedAnnealingDE lama_register["HybridAdaptiveSimulatedAnnealingDE"] = HybridAdaptiveSimulatedAnnealingDE - LLAMAHybridAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( - method="LLAMAHybridAdaptiveSimulatedAnnealingDE" - ).set_name("LLAMAHybridAdaptiveSimulatedAnnealingDE", register=True) + res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridAdaptiveSimulatedAnnealingDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveSimulatedAnnealingDE").set_name("LLAMAHybridAdaptiveSimulatedAnnealingDE", register=True) except Exception as e: print("HybridAdaptiveSimulatedAnnealingDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridCosineSineDualPhaseStrategyV10 import ( - HybridCosineSineDualPhaseStrategyV10, - ) + from nevergrad.optimization.lama.HybridCosineSineDualPhaseStrategyV10 import HybridCosineSineDualPhaseStrategyV10 lama_register["HybridCosineSineDualPhaseStrategyV10"] = HybridCosineSineDualPhaseStrategyV10 - LLAMAHybridCosineSineDualPhaseStrategyV10 = NonObjectOptimizer( - method="LLAMAHybridCosineSineDualPhaseStrategyV10" - ).set_name("LLAMAHybridCosineSineDualPhaseStrategyV10", register=True) + res = NonObjectOptimizer(method="LLAMAHybridCosineSineDualPhaseStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridCosineSineDualPhaseStrategyV10 = NonObjectOptimizer(method="LLAMAHybridCosineSineDualPhaseStrategyV10").set_name("LLAMAHybridCosineSineDualPhaseStrategyV10", register=True) except Exception as e: print("HybridCosineSineDualPhaseStrategyV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptionDifferentialEvolution import ( - HybridCovarianceMatrixAdaptionDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptionDifferentialEvolution import HybridCovarianceMatrixAdaptionDifferentialEvolution - lama_register["HybridCovarianceMatrixAdaptionDifferentialEvolution"] = ( - HybridCovarianceMatrixAdaptionDifferentialEvolution - ) - LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution" - ).set_name("LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution", register=True) + lama_register["HybridCovarianceMatrixAdaptionDifferentialEvolution"] = HybridCovarianceMatrixAdaptionDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution").set_name("LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution", register=True) except Exception as e: print("HybridCovarianceMatrixAdaptionDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 import ( - HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 import HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 - lama_register["HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2"] = ( - HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 - ) - LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2" - ).set_name("LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2", register=True) + lama_register["HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2"] = HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2").set_name("LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2", register=True) except Exception as e: print("HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights import ( - HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights, - ) + from nevergrad.optimization.lama.HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights import HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights - lama_register["HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights"] = ( - HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights - ) - LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights = NonObjectOptimizer( - method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights" - ).set_name("LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights", register=True) + lama_register["HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights"] = HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights + res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights").set_name("LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights", register=True) except Exception as e: print("HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridCulturalDifferentialEvolution import ( - HybridCulturalDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridCulturalDifferentialEvolution import HybridCulturalDifferentialEvolution lama_register["HybridCulturalDifferentialEvolution"] = HybridCulturalDifferentialEvolution - LLAMAHybridCulturalDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridCulturalDifferentialEvolution" - ).set_name("LLAMAHybridCulturalDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridCulturalDifferentialEvolution").set_name("LLAMAHybridCulturalDifferentialEvolution", register=True) except Exception as e: print("HybridCulturalDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDEPSO import HybridDEPSO lama_register["HybridDEPSO"] = HybridDEPSO - LLAMAHybridDEPSO = NonObjectOptimizer(method="LLAMAHybridDEPSO").set_name( - "LLAMAHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDEPSO = NonObjectOptimizer(method="LLAMAHybridDEPSO").set_name("LLAMAHybridDEPSO", register=True) except Exception as e: print("HybridDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDEPSOWithDynamicAdaptation import HybridDEPSOWithDynamicAdaptation lama_register["HybridDEPSOWithDynamicAdaptation"] = HybridDEPSOWithDynamicAdaptation - LLAMAHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( - method="LLAMAHybridDEPSOWithDynamicAdaptation" - ).set_name("LLAMAHybridDEPSOWithDynamicAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer(method="LLAMAHybridDEPSOWithDynamicAdaptation").set_name("LLAMAHybridDEPSOWithDynamicAdaptation", register=True) except Exception as e: print("HybridDEPSOWithDynamicAdaptation can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDifferentialEvolution import HybridDifferentialEvolution lama_register["HybridDifferentialEvolution"] = HybridDifferentialEvolution - LLAMAHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution").set_name( - "LLAMAHybridDifferentialEvolution", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution").set_name("LLAMAHybridDifferentialEvolution", register=True) except Exception as e: print("HybridDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionMemeticOptimizer import ( - HybridDifferentialEvolutionMemeticOptimizer, - ) + from nevergrad.optimization.lama.HybridDifferentialEvolutionMemeticOptimizer import HybridDifferentialEvolutionMemeticOptimizer lama_register["HybridDifferentialEvolutionMemeticOptimizer"] = HybridDifferentialEvolutionMemeticOptimizer - LLAMAHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( - method="LLAMAHybridDifferentialEvolutionMemeticOptimizer" - ).set_name("LLAMAHybridDifferentialEvolutionMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionMemeticOptimizer").set_name("LLAMAHybridDifferentialEvolutionMemeticOptimizer", register=True) except Exception as e: print("HybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionParticleSwarmOptimizer import ( - HybridDifferentialEvolutionParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.HybridDifferentialEvolutionParticleSwarmOptimizer import HybridDifferentialEvolutionParticleSwarmOptimizer - lama_register["HybridDifferentialEvolutionParticleSwarmOptimizer"] = ( - HybridDifferentialEvolutionParticleSwarmOptimizer - ) - LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer" - ).set_name("LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer", register=True) + lama_register["HybridDifferentialEvolutionParticleSwarmOptimizer"] = HybridDifferentialEvolutionParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer").set_name("LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer", register=True) except Exception as e: print("HybridDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionWithLocalSearch import ( - HybridDifferentialEvolutionWithLocalSearch, - ) + from nevergrad.optimization.lama.HybridDifferentialEvolutionWithLocalSearch import HybridDifferentialEvolutionWithLocalSearch lama_register["HybridDifferentialEvolutionWithLocalSearch"] = HybridDifferentialEvolutionWithLocalSearch - LLAMAHybridDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( - method="LLAMAHybridDifferentialEvolutionWithLocalSearch" - ).set_name("LLAMAHybridDifferentialEvolutionWithLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionWithLocalSearch").set_name("LLAMAHybridDifferentialEvolutionWithLocalSearch", register=True) except Exception as e: print("HybridDifferentialEvolutionWithLocalSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDifferentialLocalSearch import HybridDifferentialLocalSearch lama_register["HybridDifferentialLocalSearch"] = HybridDifferentialLocalSearch - LLAMAHybridDifferentialLocalSearch = NonObjectOptimizer( - method="LLAMAHybridDifferentialLocalSearch" - ).set_name("LLAMAHybridDifferentialLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDifferentialLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDifferentialLocalSearch = NonObjectOptimizer(method="LLAMAHybridDifferentialLocalSearch").set_name("LLAMAHybridDifferentialLocalSearch", register=True) except Exception as e: print("HybridDifferentialLocalSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDualLocalOptimizationDE import HybridDualLocalOptimizationDE lama_register["HybridDualLocalOptimizationDE"] = HybridDualLocalOptimizationDE - LLAMAHybridDualLocalOptimizationDE = NonObjectOptimizer( - method="LLAMAHybridDualLocalOptimizationDE" - ).set_name("LLAMAHybridDualLocalOptimizationDE", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDualLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDualLocalOptimizationDE = NonObjectOptimizer(method="LLAMAHybridDualLocalOptimizationDE").set_name("LLAMAHybridDualLocalOptimizationDE", register=True) except Exception as e: print("HybridDualLocalOptimizationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDualPhaseParticleSwarmDifferentialEvolution import ( - HybridDualPhaseParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridDualPhaseParticleSwarmDifferentialEvolution import HybridDualPhaseParticleSwarmDifferentialEvolution - lama_register["HybridDualPhaseParticleSwarmDifferentialEvolution"] = ( - HybridDualPhaseParticleSwarmDifferentialEvolution - ) - LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution" - ).set_name("LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) + lama_register["HybridDualPhaseParticleSwarmDifferentialEvolution"] = HybridDualPhaseParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("HybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDynamicAdaptiveDE import HybridDynamicAdaptiveDE lama_register["HybridDynamicAdaptiveDE"] = HybridDynamicAdaptiveDE - LLAMAHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE").set_name( - "LLAMAHybridDynamicAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE").set_name("LLAMAHybridDynamicAdaptiveDE", register=True) except Exception as e: print("HybridDynamicAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDynamicAdaptiveExplorationOptimization import ( - HybridDynamicAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.HybridDynamicAdaptiveExplorationOptimization import HybridDynamicAdaptiveExplorationOptimization - lama_register["HybridDynamicAdaptiveExplorationOptimization"] = ( - HybridDynamicAdaptiveExplorationOptimization - ) - LLAMAHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAHybridDynamicAdaptiveExplorationOptimization" - ).set_name("LLAMAHybridDynamicAdaptiveExplorationOptimization", register=True) + lama_register["HybridDynamicAdaptiveExplorationOptimization"] = HybridDynamicAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveExplorationOptimization").set_name("LLAMAHybridDynamicAdaptiveExplorationOptimization", register=True) except Exception as e: print("HybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDynamicClusterOptimization import HybridDynamicClusterOptimization lama_register["HybridDynamicClusterOptimization"] = HybridDynamicClusterOptimization - LLAMAHybridDynamicClusterOptimization = NonObjectOptimizer( - method="LLAMAHybridDynamicClusterOptimization" - ).set_name("LLAMAHybridDynamicClusterOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicClusterOptimization = NonObjectOptimizer(method="LLAMAHybridDynamicClusterOptimization").set_name("LLAMAHybridDynamicClusterOptimization", register=True) except Exception as e: print("HybridDynamicClusterOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDynamicCuckooHarmonyAlgorithm import ( - HybridDynamicCuckooHarmonyAlgorithm, - ) + from nevergrad.optimization.lama.HybridDynamicCuckooHarmonyAlgorithm import HybridDynamicCuckooHarmonyAlgorithm lama_register["HybridDynamicCuckooHarmonyAlgorithm"] = HybridDynamicCuckooHarmonyAlgorithm - LLAMAHybridDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( - method="LLAMAHybridDynamicCuckooHarmonyAlgorithm" - ).set_name("LLAMAHybridDynamicCuckooHarmonyAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAHybridDynamicCuckooHarmonyAlgorithm").set_name("LLAMAHybridDynamicCuckooHarmonyAlgorithm", register=True) except Exception as e: print("HybridDynamicCuckooHarmonyAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDynamicDifferentialEvolution import ( - HybridDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolution import HybridDynamicDifferentialEvolution lama_register["HybridDynamicDifferentialEvolution"] = HybridDynamicDifferentialEvolution - LLAMAHybridDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridDynamicDifferentialEvolution" - ).set_name("LLAMAHybridDynamicDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolution").set_name("LLAMAHybridDynamicDifferentialEvolution", register=True) except Exception as e: print("HybridDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDynamicDifferentialEvolutionGradient import ( - HybridDynamicDifferentialEvolutionGradient, - ) + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolutionGradient import HybridDynamicDifferentialEvolutionGradient lama_register["HybridDynamicDifferentialEvolutionGradient"] = HybridDynamicDifferentialEvolutionGradient - LLAMAHybridDynamicDifferentialEvolutionGradient = NonObjectOptimizer( - method="LLAMAHybridDynamicDifferentialEvolutionGradient" - ).set_name("LLAMAHybridDynamicDifferentialEvolutionGradient", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolutionGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicDifferentialEvolutionGradient = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolutionGradient").set_name("LLAMAHybridDynamicDifferentialEvolutionGradient", register=True) except Exception as e: print("HybridDynamicDifferentialEvolutionGradient can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDynamicElitistDE import HybridDynamicElitistDE lama_register["HybridDynamicElitistDE"] = HybridDynamicElitistDE - LLAMAHybridDynamicElitistDE = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE").set_name( - "LLAMAHybridDynamicElitistDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicElitistDE = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE").set_name("LLAMAHybridDynamicElitistDE", register=True) except Exception as e: print("HybridDynamicElitistDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridDynamicQuantumLevyDifferentialSearch import ( - HybridDynamicQuantumLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.HybridDynamicQuantumLevyDifferentialSearch import HybridDynamicQuantumLevyDifferentialSearch lama_register["HybridDynamicQuantumLevyDifferentialSearch"] = HybridDynamicQuantumLevyDifferentialSearch - LLAMAHybridDynamicQuantumLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMAHybridDynamicQuantumLevyDifferentialSearch" - ).set_name("LLAMAHybridDynamicQuantumLevyDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridDynamicQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAHybridDynamicQuantumLevyDifferentialSearch").set_name("LLAMAHybridDynamicQuantumLevyDifferentialSearch", register=True) except Exception as e: print("HybridDynamicQuantumLevyDifferentialSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridDynamicSearch import HybridDynamicSearch lama_register["HybridDynamicSearch"] = HybridDynamicSearch - LLAMAHybridDynamicSearch = NonObjectOptimizer(method="LLAMAHybridDynamicSearch").set_name( - "LLAMAHybridDynamicSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridDynamicSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridDynamicSearch = NonObjectOptimizer(method="LLAMAHybridDynamicSearch").set_name("LLAMAHybridDynamicSearch", register=True) except Exception as e: print("HybridDynamicSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridEnhancedAdaptiveDifferentialEvolution import ( - HybridEnhancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridEnhancedAdaptiveDifferentialEvolution import HybridEnhancedAdaptiveDifferentialEvolution lama_register["HybridEnhancedAdaptiveDifferentialEvolution"] = HybridEnhancedAdaptiveDifferentialEvolution - LLAMAHybridEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution" - ).set_name("LLAMAHybridEnhancedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAHybridEnhancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("HybridEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridEnhancedDualPhaseAdaptiveOptimizationV6 import ( - HybridEnhancedDualPhaseAdaptiveOptimizationV6, - ) + from nevergrad.optimization.lama.HybridEnhancedDualPhaseAdaptiveOptimizationV6 import HybridEnhancedDualPhaseAdaptiveOptimizationV6 - lama_register["HybridEnhancedDualPhaseAdaptiveOptimizationV6"] = ( - HybridEnhancedDualPhaseAdaptiveOptimizationV6 - ) - LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6 = NonObjectOptimizer( - method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6" - ).set_name("LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6", register=True) + lama_register["HybridEnhancedDualPhaseAdaptiveOptimizationV6"] = HybridEnhancedDualPhaseAdaptiveOptimizationV6 + res = NonObjectOptimizer(method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6 = NonObjectOptimizer(method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6").set_name("LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6", register=True) except Exception as e: print("HybridEnhancedDualPhaseAdaptiveOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridEnhancedGravitationalSwarmIntelligence import ( - HybridEnhancedGravitationalSwarmIntelligence, - ) + from nevergrad.optimization.lama.HybridEnhancedGravitationalSwarmIntelligence import HybridEnhancedGravitationalSwarmIntelligence - lama_register["HybridEnhancedGravitationalSwarmIntelligence"] = ( - HybridEnhancedGravitationalSwarmIntelligence - ) - LLAMAHybridEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( - method="LLAMAHybridEnhancedGravitationalSwarmIntelligence" - ).set_name("LLAMAHybridEnhancedGravitationalSwarmIntelligence", register=True) + lama_register["HybridEnhancedGravitationalSwarmIntelligence"] = HybridEnhancedGravitationalSwarmIntelligence + res = NonObjectOptimizer(method="LLAMAHybridEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAHybridEnhancedGravitationalSwarmIntelligence").set_name("LLAMAHybridEnhancedGravitationalSwarmIntelligence", register=True) except Exception as e: print("HybridEnhancedGravitationalSwarmIntelligence can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridEvolutionaryAnnealingOptimizer import ( - HybridEvolutionaryAnnealingOptimizer, - ) + from nevergrad.optimization.lama.HybridEvolutionaryAnnealingOptimizer import HybridEvolutionaryAnnealingOptimizer lama_register["HybridEvolutionaryAnnealingOptimizer"] = HybridEvolutionaryAnnealingOptimizer - LLAMAHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAHybridEvolutionaryAnnealingOptimizer" - ).set_name("LLAMAHybridEvolutionaryAnnealingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAHybridEvolutionaryAnnealingOptimizer", register=True) except Exception as e: print("HybridEvolutionaryAnnealingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridEvolutionaryOptimization import HybridEvolutionaryOptimization lama_register["HybridEvolutionaryOptimization"] = HybridEvolutionaryOptimization - LLAMAHybridEvolutionaryOptimization = NonObjectOptimizer( - method="LLAMAHybridEvolutionaryOptimization" - ).set_name("LLAMAHybridEvolutionaryOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEvolutionaryOptimization = NonObjectOptimizer(method="LLAMAHybridEvolutionaryOptimization").set_name("LLAMAHybridEvolutionaryOptimization", register=True) except Exception as e: print("HybridEvolutionaryOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridEvolvingAdaptiveStrategyV28 import ( - HybridEvolvingAdaptiveStrategyV28, - ) + from nevergrad.optimization.lama.HybridEvolvingAdaptiveStrategyV28 import HybridEvolvingAdaptiveStrategyV28 lama_register["HybridEvolvingAdaptiveStrategyV28"] = HybridEvolvingAdaptiveStrategyV28 - LLAMAHybridEvolvingAdaptiveStrategyV28 = NonObjectOptimizer( - method="LLAMAHybridEvolvingAdaptiveStrategyV28" - ).set_name("LLAMAHybridEvolvingAdaptiveStrategyV28", register=True) + res = NonObjectOptimizer(method="LLAMAHybridEvolvingAdaptiveStrategyV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridEvolvingAdaptiveStrategyV28 = NonObjectOptimizer(method="LLAMAHybridEvolvingAdaptiveStrategyV28").set_name("LLAMAHybridEvolvingAdaptiveStrategyV28", register=True) except Exception as e: print("HybridEvolvingAdaptiveStrategyV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridExploitationExplorationGradientSearch import ( - HybridExploitationExplorationGradientSearch, - ) + from nevergrad.optimization.lama.HybridExploitationExplorationGradientSearch import HybridExploitationExplorationGradientSearch lama_register["HybridExploitationExplorationGradientSearch"] = HybridExploitationExplorationGradientSearch - LLAMAHybridExploitationExplorationGradientSearch = NonObjectOptimizer( - method="LLAMAHybridExploitationExplorationGradientSearch" - ).set_name("LLAMAHybridExploitationExplorationGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridExploitationExplorationGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridExploitationExplorationGradientSearch = NonObjectOptimizer(method="LLAMAHybridExploitationExplorationGradientSearch").set_name("LLAMAHybridExploitationExplorationGradientSearch", register=True) except Exception as e: print("HybridExploitationExplorationGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGradientAnnealingWithMemory import ( - HybridGradientAnnealingWithMemory, - ) + from nevergrad.optimization.lama.HybridGradientAnnealingWithMemory import HybridGradientAnnealingWithMemory lama_register["HybridGradientAnnealingWithMemory"] = HybridGradientAnnealingWithMemory - LLAMAHybridGradientAnnealingWithMemory = NonObjectOptimizer( - method="LLAMAHybridGradientAnnealingWithMemory" - ).set_name("LLAMAHybridGradientAnnealingWithMemory", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientAnnealingWithMemory = NonObjectOptimizer(method="LLAMAHybridGradientAnnealingWithMemory").set_name("LLAMAHybridGradientAnnealingWithMemory", register=True) except Exception as e: print("HybridGradientAnnealingWithMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGradientBoostedMemoryAnnealingPlus import ( - HybridGradientBoostedMemoryAnnealingPlus, - ) + from nevergrad.optimization.lama.HybridGradientBoostedMemoryAnnealingPlus import HybridGradientBoostedMemoryAnnealingPlus lama_register["HybridGradientBoostedMemoryAnnealingPlus"] = HybridGradientBoostedMemoryAnnealingPlus - LLAMAHybridGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( - method="LLAMAHybridGradientBoostedMemoryAnnealingPlus" - ).set_name("LLAMAHybridGradientBoostedMemoryAnnealingPlus", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer(method="LLAMAHybridGradientBoostedMemoryAnnealingPlus").set_name("LLAMAHybridGradientBoostedMemoryAnnealingPlus", register=True) except Exception as e: print("HybridGradientBoostedMemoryAnnealingPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGradientCrossoverOptimization import ( - HybridGradientCrossoverOptimization, - ) + from nevergrad.optimization.lama.HybridGradientCrossoverOptimization import HybridGradientCrossoverOptimization lama_register["HybridGradientCrossoverOptimization"] = HybridGradientCrossoverOptimization - LLAMAHybridGradientCrossoverOptimization = NonObjectOptimizer( - method="LLAMAHybridGradientCrossoverOptimization" - ).set_name("LLAMAHybridGradientCrossoverOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientCrossoverOptimization = NonObjectOptimizer(method="LLAMAHybridGradientCrossoverOptimization").set_name("LLAMAHybridGradientCrossoverOptimization", register=True) except Exception as e: print("HybridGradientCrossoverOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGradientDifferentialEvolution import ( - HybridGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridGradientDifferentialEvolution import HybridGradientDifferentialEvolution lama_register["HybridGradientDifferentialEvolution"] = HybridGradientDifferentialEvolution - LLAMAHybridGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridGradientDifferentialEvolution" - ).set_name("LLAMAHybridGradientDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridGradientDifferentialEvolution").set_name("LLAMAHybridGradientDifferentialEvolution", register=True) except Exception as e: print("HybridGradientDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridGradientEvolution import HybridGradientEvolution lama_register["HybridGradientEvolution"] = HybridGradientEvolution - LLAMAHybridGradientEvolution = NonObjectOptimizer(method="LLAMAHybridGradientEvolution").set_name( - "LLAMAHybridGradientEvolution", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientEvolution = NonObjectOptimizer(method="LLAMAHybridGradientEvolution").set_name("LLAMAHybridGradientEvolution", register=True) except Exception as e: print("HybridGradientEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridGradientMemoryAnnealing import HybridGradientMemoryAnnealing lama_register["HybridGradientMemoryAnnealing"] = HybridGradientMemoryAnnealing - LLAMAHybridGradientMemoryAnnealing = NonObjectOptimizer( - method="LLAMAHybridGradientMemoryAnnealing" - ).set_name("LLAMAHybridGradientMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientMemoryAnnealing = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealing").set_name("LLAMAHybridGradientMemoryAnnealing", register=True) except Exception as e: print("HybridGradientMemoryAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV2 import HybridGradientMemoryAnnealingV2 lama_register["HybridGradientMemoryAnnealingV2"] = HybridGradientMemoryAnnealingV2 - LLAMAHybridGradientMemoryAnnealingV2 = NonObjectOptimizer( - method="LLAMAHybridGradientMemoryAnnealingV2" - ).set_name("LLAMAHybridGradientMemoryAnnealingV2", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientMemoryAnnealingV2 = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV2").set_name("LLAMAHybridGradientMemoryAnnealingV2", register=True) except Exception as e: print("HybridGradientMemoryAnnealingV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV3 import HybridGradientMemoryAnnealingV3 lama_register["HybridGradientMemoryAnnealingV3"] = HybridGradientMemoryAnnealingV3 - LLAMAHybridGradientMemoryAnnealingV3 = NonObjectOptimizer( - method="LLAMAHybridGradientMemoryAnnealingV3" - ).set_name("LLAMAHybridGradientMemoryAnnealingV3", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientMemoryAnnealingV3 = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV3").set_name("LLAMAHybridGradientMemoryAnnealingV3", register=True) except Exception as e: print("HybridGradientMemoryAnnealingV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGradientMemorySimulatedAnnealing import ( - HybridGradientMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.HybridGradientMemorySimulatedAnnealing import HybridGradientMemorySimulatedAnnealing lama_register["HybridGradientMemorySimulatedAnnealing"] = HybridGradientMemorySimulatedAnnealing - LLAMAHybridGradientMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAHybridGradientMemorySimulatedAnnealing" - ).set_name("LLAMAHybridGradientMemorySimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAHybridGradientMemorySimulatedAnnealing").set_name("LLAMAHybridGradientMemorySimulatedAnnealing", register=True) except Exception as e: print("HybridGradientMemorySimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridGradientPSO import HybridGradientPSO lama_register["HybridGradientPSO"] = HybridGradientPSO - LLAMAHybridGradientPSO = NonObjectOptimizer(method="LLAMAHybridGradientPSO").set_name( - "LLAMAHybridGradientPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGradientPSO = NonObjectOptimizer(method="LLAMAHybridGradientPSO").set_name("LLAMAHybridGradientPSO", register=True) except Exception as e: print("HybridGradientPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridGuidedEvolutionaryOptimizer import ( - HybridGuidedEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.HybridGuidedEvolutionaryOptimizer import HybridGuidedEvolutionaryOptimizer lama_register["HybridGuidedEvolutionaryOptimizer"] = HybridGuidedEvolutionaryOptimizer - LLAMAHybridGuidedEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAHybridGuidedEvolutionaryOptimizer" - ).set_name("LLAMAHybridGuidedEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridGuidedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridGuidedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHybridGuidedEvolutionaryOptimizer").set_name("LLAMAHybridGuidedEvolutionaryOptimizer", register=True) except Exception as e: print("HybridGuidedEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridMemoryAdaptiveDE import HybridMemoryAdaptiveDE lama_register["HybridMemoryAdaptiveDE"] = HybridMemoryAdaptiveDE - LLAMAHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE").set_name( - "LLAMAHybridMemoryAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE").set_name("LLAMAHybridMemoryAdaptiveDE", register=True) except Exception as e: print("HybridMemoryAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridMultiDimensionalAnnealing import HybridMultiDimensionalAnnealing lama_register["HybridMultiDimensionalAnnealing"] = HybridMultiDimensionalAnnealing - LLAMAHybridMultiDimensionalAnnealing = NonObjectOptimizer( - method="LLAMAHybridMultiDimensionalAnnealing" - ).set_name("LLAMAHybridMultiDimensionalAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHybridMultiDimensionalAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridMultiDimensionalAnnealing = NonObjectOptimizer(method="LLAMAHybridMultiDimensionalAnnealing").set_name("LLAMAHybridMultiDimensionalAnnealing", register=True) except Exception as e: print("HybridMultiDimensionalAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridPSO_DE import HybridPSO_DE lama_register["HybridPSO_DE"] = HybridPSO_DE - LLAMAHybridPSO_DE = NonObjectOptimizer(method="LLAMAHybridPSO_DE").set_name( - "LLAMAHybridPSO_DE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridPSO_DE = NonObjectOptimizer(method="LLAMAHybridPSO_DE").set_name("LLAMAHybridPSO_DE", register=True) except Exception as e: print("HybridPSO_DE can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridPSO_DE_GradientOptimizer import HybridPSO_DE_GradientOptimizer lama_register["HybridPSO_DE_GradientOptimizer"] = HybridPSO_DE_GradientOptimizer - LLAMAHybridPSO_DE_GradientOptimizer = NonObjectOptimizer( - method="LLAMAHybridPSO_DE_GradientOptimizer" - ).set_name("LLAMAHybridPSO_DE_GradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHybridPSO_DE_GradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridPSO_DE_GradientOptimizer = NonObjectOptimizer(method="LLAMAHybridPSO_DE_GradientOptimizer").set_name("LLAMAHybridPSO_DE_GradientOptimizer", register=True) except Exception as e: print("HybridPSO_DE_GradientOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridParticleDE import HybridParticleDE lama_register["HybridParticleDE"] = HybridParticleDE - LLAMAHybridParticleDE = NonObjectOptimizer(method="LLAMAHybridParticleDE").set_name( - "LLAMAHybridParticleDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridParticleDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridParticleDE = NonObjectOptimizer(method="LLAMAHybridParticleDE").set_name("LLAMAHybridParticleDE", register=True) except Exception as e: print("HybridParticleDE can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridParticleDE_v2 import HybridParticleDE_v2 lama_register["HybridParticleDE_v2"] = HybridParticleDE_v2 - LLAMAHybridParticleDE_v2 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2").set_name( - "LLAMAHybridParticleDE_v2", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridParticleDE_v2 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2").set_name("LLAMAHybridParticleDE_v2", register=True) except Exception as e: print("HybridParticleDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridParticleDE_v3 import HybridParticleDE_v3 lama_register["HybridParticleDE_v3"] = HybridParticleDE_v3 - LLAMAHybridParticleDE_v3 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3").set_name( - "LLAMAHybridParticleDE_v3", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridParticleDE_v3 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3").set_name("LLAMAHybridParticleDE_v3", register=True) except Exception as e: print("HybridParticleDE_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridParticleSwarmDifferentialEvolutionOptimizer import ( - HybridParticleSwarmDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.HybridParticleSwarmDifferentialEvolutionOptimizer import HybridParticleSwarmDifferentialEvolutionOptimizer - lama_register["HybridParticleSwarmDifferentialEvolutionOptimizer"] = ( - HybridParticleSwarmDifferentialEvolutionOptimizer - ) - LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer" - ).set_name("LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer", register=True) + lama_register["HybridParticleSwarmDifferentialEvolutionOptimizer"] = HybridParticleSwarmDifferentialEvolutionOptimizer + res = NonObjectOptimizer(method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer").set_name("LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer", register=True) except Exception as e: print("HybridParticleSwarmDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumAdaptiveMemeticSearch import ( - HybridQuantumAdaptiveMemeticSearch, - ) + from nevergrad.optimization.lama.HybridQuantumAdaptiveMemeticSearch import HybridQuantumAdaptiveMemeticSearch lama_register["HybridQuantumAdaptiveMemeticSearch"] = HybridQuantumAdaptiveMemeticSearch - LLAMAHybridQuantumAdaptiveMemeticSearch = NonObjectOptimizer( - method="LLAMAHybridQuantumAdaptiveMemeticSearch" - ).set_name("LLAMAHybridQuantumAdaptiveMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumAdaptiveMemeticSearch = NonObjectOptimizer(method="LLAMAHybridQuantumAdaptiveMemeticSearch").set_name("LLAMAHybridQuantumAdaptiveMemeticSearch", register=True) except Exception as e: print("HybridQuantumAdaptiveMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolution import ( - HybridQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolution import HybridQuantumDifferentialEvolution lama_register["HybridQuantumDifferentialEvolution"] = HybridQuantumDifferentialEvolution - LLAMAHybridQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridQuantumDifferentialEvolution" - ).set_name("LLAMAHybridQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolution").set_name("LLAMAHybridQuantumDifferentialEvolution", register=True) except Exception as e: print("HybridQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart import ( - HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart, - ) + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart import HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart - lama_register["HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart"] = ( - HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart - ) - LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart = NonObjectOptimizer( - method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart" - ).set_name( - "LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart", register=True - ) + lama_register["HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart"] = HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart + res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart").set_name("LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart", register=True) except Exception as e: - print( - "HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart can not be imported: ", - e, - ) - + print("HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart can not be imported: ", e) try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch import ( - HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch, - ) + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch import HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch - lama_register["HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch"] = ( - HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch - ) - LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch = NonObjectOptimizer( - method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch" - ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch", register=True) + lama_register["HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch"] = HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch + res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch").set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch", register=True) except Exception as e: print("HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory import ( - HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory, - ) + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory import HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory - lama_register["HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory"] = ( - HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory - ) - LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory = NonObjectOptimizer( - method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory" - ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory", register=True) + lama_register["HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory"] = HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory + res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory").set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory", register=True) except Exception as e: print("HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumDifferentialParticleSwarmOptimization import ( - HybridQuantumDifferentialParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.HybridQuantumDifferentialParticleSwarmOptimization import HybridQuantumDifferentialParticleSwarmOptimization - lama_register["HybridQuantumDifferentialParticleSwarmOptimization"] = ( - HybridQuantumDifferentialParticleSwarmOptimization - ) - LLAMAHybridQuantumDifferentialParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization" - ).set_name("LLAMAHybridQuantumDifferentialParticleSwarmOptimization", register=True) + lama_register["HybridQuantumDifferentialParticleSwarmOptimization"] = HybridQuantumDifferentialParticleSwarmOptimization + res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization").set_name("LLAMAHybridQuantumDifferentialParticleSwarmOptimization", register=True) except Exception as e: print("HybridQuantumDifferentialParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuantumEnhancedMultiPhaseAdaptiveDE import ( - HybridQuantumEnhancedMultiPhaseAdaptiveDE, - ) + from nevergrad.optimization.lama.HybridQuantumEnhancedMultiPhaseAdaptiveDE import HybridQuantumEnhancedMultiPhaseAdaptiveDE lama_register["HybridQuantumEnhancedMultiPhaseAdaptiveDE"] = HybridQuantumEnhancedMultiPhaseAdaptiveDE - LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( - method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE" - ).set_name("LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE").set_name("LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE", register=True) except Exception as e: print("HybridQuantumEnhancedMultiPhaseAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridQuantumEvolution import HybridQuantumEvolution lama_register["HybridQuantumEvolution"] = HybridQuantumEvolution - LLAMAHybridQuantumEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution").set_name( - "LLAMAHybridQuantumEvolution", register=True - ) + res = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution").set_name("LLAMAHybridQuantumEvolution", register=True) except Exception as e: print("HybridQuantumEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridQuantumGradientEvolution import HybridQuantumGradientEvolution lama_register["HybridQuantumGradientEvolution"] = HybridQuantumGradientEvolution - LLAMAHybridQuantumGradientEvolution = NonObjectOptimizer( - method="LLAMAHybridQuantumGradientEvolution" - ).set_name("LLAMAHybridQuantumGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumGradientEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumGradientEvolution").set_name("LLAMAHybridQuantumGradientEvolution", register=True) except Exception as e: print("HybridQuantumGradientEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridQuantumLevyAdaptiveSwarmV2 import HybridQuantumLevyAdaptiveSwarmV2 lama_register["HybridQuantumLevyAdaptiveSwarmV2"] = HybridQuantumLevyAdaptiveSwarmV2 - LLAMAHybridQuantumLevyAdaptiveSwarmV2 = NonObjectOptimizer( - method="LLAMAHybridQuantumLevyAdaptiveSwarmV2" - ).set_name("LLAMAHybridQuantumLevyAdaptiveSwarmV2", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumLevyAdaptiveSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumLevyAdaptiveSwarmV2 = NonObjectOptimizer(method="LLAMAHybridQuantumLevyAdaptiveSwarmV2").set_name("LLAMAHybridQuantumLevyAdaptiveSwarmV2", register=True) except Exception as e: print("HybridQuantumLevyAdaptiveSwarmV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.HybridQuantumMemeticOptimization import HybridQuantumMemeticOptimization lama_register["HybridQuantumMemeticOptimization"] = HybridQuantumMemeticOptimization - LLAMAHybridQuantumMemeticOptimization = NonObjectOptimizer( - method="LLAMAHybridQuantumMemeticOptimization" - ).set_name("LLAMAHybridQuantumMemeticOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuantumMemeticOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuantumMemeticOptimization = NonObjectOptimizer(method="LLAMAHybridQuantumMemeticOptimization").set_name("LLAMAHybridQuantumMemeticOptimization", register=True) except Exception as e: print("HybridQuantumMemeticOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuasiRandomDEGradientAnnealing import ( - HybridQuasiRandomDEGradientAnnealing, - ) + from nevergrad.optimization.lama.HybridQuasiRandomDEGradientAnnealing import HybridQuasiRandomDEGradientAnnealing lama_register["HybridQuasiRandomDEGradientAnnealing"] = HybridQuasiRandomDEGradientAnnealing - LLAMAHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( - method="LLAMAHybridQuasiRandomDEGradientAnnealing" - ).set_name("LLAMAHybridQuasiRandomDEGradientAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMAHybridQuasiRandomDEGradientAnnealing").set_name("LLAMAHybridQuasiRandomDEGradientAnnealing", register=True) except Exception as e: print("HybridQuasiRandomDEGradientAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridQuasiRandomGradientDifferentialEvolution import ( - HybridQuasiRandomGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridQuasiRandomGradientDifferentialEvolution import HybridQuasiRandomGradientDifferentialEvolution - lama_register["HybridQuasiRandomGradientDifferentialEvolution"] = ( - HybridQuasiRandomGradientDifferentialEvolution - ) - LLAMAHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridQuasiRandomGradientDifferentialEvolution" - ).set_name("LLAMAHybridQuasiRandomGradientDifferentialEvolution", register=True) + lama_register["HybridQuasiRandomGradientDifferentialEvolution"] = HybridQuasiRandomGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridQuasiRandomGradientDifferentialEvolution").set_name("LLAMAHybridQuasiRandomGradientDifferentialEvolution", register=True) except Exception as e: print("HybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost import ( - HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost, - ) + from nevergrad.optimization.lama.HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost import HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost - lama_register["HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost"] = ( - HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost - ) - LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost = NonObjectOptimizer( - method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost" - ).set_name("LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost", register=True) + lama_register["HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost"] = HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost + res = NonObjectOptimizer(method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost = NonObjectOptimizer(method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost").set_name("LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost", register=True) except Exception as e: print("HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.HybridSelfAdaptiveDifferentialEvolution import ( - HybridSelfAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.HybridSelfAdaptiveDifferentialEvolution import HybridSelfAdaptiveDifferentialEvolution lama_register["HybridSelfAdaptiveDifferentialEvolution"] = HybridSelfAdaptiveDifferentialEvolution - LLAMAHybridSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAHybridSelfAdaptiveDifferentialEvolution" - ).set_name("LLAMAHybridSelfAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAHybridSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHybridSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridSelfAdaptiveDifferentialEvolution").set_name("LLAMAHybridSelfAdaptiveDifferentialEvolution", register=True) except Exception as e: print("HybridSelfAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperAdaptiveConvergenceStrategy import HyperAdaptiveConvergenceStrategy lama_register["HyperAdaptiveConvergenceStrategy"] = HyperAdaptiveConvergenceStrategy - LLAMAHyperAdaptiveConvergenceStrategy = NonObjectOptimizer( - method="LLAMAHyperAdaptiveConvergenceStrategy" - ).set_name("LLAMAHyperAdaptiveConvergenceStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveConvergenceStrategy = NonObjectOptimizer(method="LLAMAHyperAdaptiveConvergenceStrategy").set_name("LLAMAHyperAdaptiveConvergenceStrategy", register=True) except Exception as e: print("HyperAdaptiveConvergenceStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperAdaptiveGradientRAMEDS import HyperAdaptiveGradientRAMEDS lama_register["HyperAdaptiveGradientRAMEDS"] = HyperAdaptiveGradientRAMEDS - LLAMAHyperAdaptiveGradientRAMEDS = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS").set_name( - "LLAMAHyperAdaptiveGradientRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveGradientRAMEDS = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS").set_name("LLAMAHyperAdaptiveGradientRAMEDS", register=True) except Exception as e: print("HyperAdaptiveGradientRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperAdaptiveHybridDEPSOwithDynamicRestart import ( - HyperAdaptiveHybridDEPSOwithDynamicRestart, - ) + from nevergrad.optimization.lama.HyperAdaptiveHybridDEPSOwithDynamicRestart import HyperAdaptiveHybridDEPSOwithDynamicRestart lama_register["HyperAdaptiveHybridDEPSOwithDynamicRestart"] = HyperAdaptiveHybridDEPSOwithDynamicRestart - LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart = NonObjectOptimizer( - method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart" - ).set_name("LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart = NonObjectOptimizer(method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart").set_name("LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart", register=True) except Exception as e: print("HyperAdaptiveHybridDEPSOwithDynamicRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperAdaptiveMemoryGuidedStrategyV74 import ( - HyperAdaptiveMemoryGuidedStrategyV74, - ) + from nevergrad.optimization.lama.HyperAdaptiveMemoryGuidedStrategyV74 import HyperAdaptiveMemoryGuidedStrategyV74 lama_register["HyperAdaptiveMemoryGuidedStrategyV74"] = HyperAdaptiveMemoryGuidedStrategyV74 - LLAMAHyperAdaptiveMemoryGuidedStrategyV74 = NonObjectOptimizer( - method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74" - ).set_name("LLAMAHyperAdaptiveMemoryGuidedStrategyV74", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveMemoryGuidedStrategyV74 = NonObjectOptimizer(method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74").set_name("LLAMAHyperAdaptiveMemoryGuidedStrategyV74", register=True) except Exception as e: print("HyperAdaptiveMemoryGuidedStrategyV74 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperAdaptivePrecisionOptimizer import HyperAdaptivePrecisionOptimizer lama_register["HyperAdaptivePrecisionOptimizer"] = HyperAdaptivePrecisionOptimizer - LLAMAHyperAdaptivePrecisionOptimizer = NonObjectOptimizer( - method="LLAMAHyperAdaptivePrecisionOptimizer" - ).set_name("LLAMAHyperAdaptivePrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperAdaptivePrecisionOptimizer").set_name("LLAMAHyperAdaptivePrecisionOptimizer", register=True) except Exception as e: print("HyperAdaptivePrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperAdaptiveSinusoidalDifferentialSwarm import ( - HyperAdaptiveSinusoidalDifferentialSwarm, - ) + from nevergrad.optimization.lama.HyperAdaptiveSinusoidalDifferentialSwarm import HyperAdaptiveSinusoidalDifferentialSwarm lama_register["HyperAdaptiveSinusoidalDifferentialSwarm"] = HyperAdaptiveSinusoidalDifferentialSwarm - LLAMAHyperAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( - method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm" - ).set_name("LLAMAHyperAdaptiveSinusoidalDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAHyperAdaptiveSinusoidalDifferentialSwarm", register=True) except Exception as e: print("HyperAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperAdaptiveStrategyDE import HyperAdaptiveStrategyDE lama_register["HyperAdaptiveStrategyDE"] = HyperAdaptiveStrategyDE - LLAMAHyperAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE").set_name( - "LLAMAHyperAdaptiveStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE").set_name("LLAMAHyperAdaptiveStrategyDE", register=True) except Exception as e: print("HyperAdaptiveStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperAdvancedDynamicPrecisionOptimizerV41 import ( - HyperAdvancedDynamicPrecisionOptimizerV41, - ) + from nevergrad.optimization.lama.HyperAdvancedDynamicPrecisionOptimizerV41 import HyperAdvancedDynamicPrecisionOptimizerV41 lama_register["HyperAdvancedDynamicPrecisionOptimizerV41"] = HyperAdvancedDynamicPrecisionOptimizerV41 - LLAMAHyperAdvancedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( - method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41" - ).set_name("LLAMAHyperAdvancedDynamicPrecisionOptimizerV41", register=True) + res = NonObjectOptimizer(method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperAdvancedDynamicPrecisionOptimizerV41 = NonObjectOptimizer(method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41").set_name("LLAMAHyperAdvancedDynamicPrecisionOptimizerV41", register=True) except Exception as e: print("HyperAdvancedDynamicPrecisionOptimizerV41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperEvolvedDynamicPrecisionOptimizerV48 import ( - HyperEvolvedDynamicPrecisionOptimizerV48, - ) + from nevergrad.optimization.lama.HyperEvolvedDynamicPrecisionOptimizerV48 import HyperEvolvedDynamicPrecisionOptimizerV48 lama_register["HyperEvolvedDynamicPrecisionOptimizerV48"] = HyperEvolvedDynamicPrecisionOptimizerV48 - LLAMAHyperEvolvedDynamicPrecisionOptimizerV48 = NonObjectOptimizer( - method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48" - ).set_name("LLAMAHyperEvolvedDynamicPrecisionOptimizerV48", register=True) + res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperEvolvedDynamicPrecisionOptimizerV48 = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48").set_name("LLAMAHyperEvolvedDynamicPrecisionOptimizerV48", register=True) except Exception as e: print("HyperEvolvedDynamicPrecisionOptimizerV48 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperEvolvedDynamicRAMEDS import HyperEvolvedDynamicRAMEDS lama_register["HyperEvolvedDynamicRAMEDS"] = HyperEvolvedDynamicRAMEDS - LLAMAHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS").set_name( - "LLAMAHyperEvolvedDynamicRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS").set_name("LLAMAHyperEvolvedDynamicRAMEDS", register=True) except Exception as e: print("HyperEvolvedDynamicRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperEvolvedRAMEDS import HyperEvolvedRAMEDS lama_register["HyperEvolvedRAMEDS"] = HyperEvolvedRAMEDS - LLAMAHyperEvolvedRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS").set_name( - "LLAMAHyperEvolvedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperEvolvedRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS").set_name("LLAMAHyperEvolvedRAMEDS", register=True) except Exception as e: print("HyperEvolvedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperFocusedAdaptiveElitistStrategyV5 import ( - HyperFocusedAdaptiveElitistStrategyV5, - ) + from nevergrad.optimization.lama.HyperFocusedAdaptiveElitistStrategyV5 import HyperFocusedAdaptiveElitistStrategyV5 lama_register["HyperFocusedAdaptiveElitistStrategyV5"] = HyperFocusedAdaptiveElitistStrategyV5 - LLAMAHyperFocusedAdaptiveElitistStrategyV5 = NonObjectOptimizer( - method="LLAMAHyperFocusedAdaptiveElitistStrategyV5" - ).set_name("LLAMAHyperFocusedAdaptiveElitistStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAHyperFocusedAdaptiveElitistStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperFocusedAdaptiveElitistStrategyV5 = NonObjectOptimizer(method="LLAMAHyperFocusedAdaptiveElitistStrategyV5").set_name("LLAMAHyperFocusedAdaptiveElitistStrategyV5", register=True) except Exception as e: print("HyperFocusedAdaptiveElitistStrategyV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperOptimalRAMEDS import HyperOptimalRAMEDS lama_register["HyperOptimalRAMEDS"] = HyperOptimalRAMEDS - LLAMAHyperOptimalRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS").set_name( - "LLAMAHyperOptimalRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimalRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS").set_name("LLAMAHyperOptimalRAMEDS", register=True) except Exception as e: print("HyperOptimalRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimalStrategicEvolutionaryOptimizerV58 import ( - HyperOptimalStrategicEvolutionaryOptimizerV58, - ) + from nevergrad.optimization.lama.HyperOptimalStrategicEvolutionaryOptimizerV58 import HyperOptimalStrategicEvolutionaryOptimizerV58 - lama_register["HyperOptimalStrategicEvolutionaryOptimizerV58"] = ( - HyperOptimalStrategicEvolutionaryOptimizerV58 - ) - LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58 = NonObjectOptimizer( - method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58" - ).set_name("LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58", register=True) + lama_register["HyperOptimalStrategicEvolutionaryOptimizerV58"] = HyperOptimalStrategicEvolutionaryOptimizerV58 + res = NonObjectOptimizer(method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58 = NonObjectOptimizer(method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58").set_name("LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58", register=True) except Exception as e: print("HyperOptimalStrategicEvolutionaryOptimizerV58 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizer import ( - HyperOptimizedDynamicPrecisionOptimizer, - ) + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizer import HyperOptimizedDynamicPrecisionOptimizer lama_register["HyperOptimizedDynamicPrecisionOptimizer"] = HyperOptimizedDynamicPrecisionOptimizer - LLAMAHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( - method="LLAMAHyperOptimizedDynamicPrecisionOptimizer" - ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizer").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizer", register=True) except Exception as e: print("HyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV12 import ( - HyperOptimizedDynamicPrecisionOptimizerV12, - ) + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV12 import HyperOptimizedDynamicPrecisionOptimizerV12 lama_register["HyperOptimizedDynamicPrecisionOptimizerV12"] = HyperOptimizedDynamicPrecisionOptimizerV12 - LLAMAHyperOptimizedDynamicPrecisionOptimizerV12 = NonObjectOptimizer( - method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12" - ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV12", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV12 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV12", register=True) except Exception as e: print("HyperOptimizedDynamicPrecisionOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV42 import ( - HyperOptimizedDynamicPrecisionOptimizerV42, - ) + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV42 import HyperOptimizedDynamicPrecisionOptimizerV42 lama_register["HyperOptimizedDynamicPrecisionOptimizerV42"] = HyperOptimizedDynamicPrecisionOptimizerV42 - LLAMAHyperOptimizedDynamicPrecisionOptimizerV42 = NonObjectOptimizer( - method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42" - ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV42", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV42 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV42", register=True) except Exception as e: print("HyperOptimizedDynamicPrecisionOptimizerV42 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV43 import ( - HyperOptimizedDynamicPrecisionOptimizerV43, - ) + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV43 import HyperOptimizedDynamicPrecisionOptimizerV43 lama_register["HyperOptimizedDynamicPrecisionOptimizerV43"] = HyperOptimizedDynamicPrecisionOptimizerV43 - LLAMAHyperOptimizedDynamicPrecisionOptimizerV43 = NonObjectOptimizer( - method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43" - ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV43", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV43 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV43", register=True) except Exception as e: print("HyperOptimizedDynamicPrecisionOptimizerV43 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV57 import ( - HyperOptimizedDynamicPrecisionOptimizerV57, - ) + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV57 import HyperOptimizedDynamicPrecisionOptimizerV57 lama_register["HyperOptimizedDynamicPrecisionOptimizerV57"] = HyperOptimizedDynamicPrecisionOptimizerV57 - LLAMAHyperOptimizedDynamicPrecisionOptimizerV57 = NonObjectOptimizer( - method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57" - ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV57", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV57 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV57", register=True) except Exception as e: print("HyperOptimizedDynamicPrecisionOptimizerV57 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedEvolutionaryGradientOptimizerV61 import ( - HyperOptimizedEvolutionaryGradientOptimizerV61, - ) + from nevergrad.optimization.lama.HyperOptimizedEvolutionaryGradientOptimizerV61 import HyperOptimizedEvolutionaryGradientOptimizerV61 - lama_register["HyperOptimizedEvolutionaryGradientOptimizerV61"] = ( - HyperOptimizedEvolutionaryGradientOptimizerV61 - ) - LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61 = NonObjectOptimizer( - method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61" - ).set_name("LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61", register=True) + lama_register["HyperOptimizedEvolutionaryGradientOptimizerV61"] = HyperOptimizedEvolutionaryGradientOptimizerV61 + res = NonObjectOptimizer(method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61 = NonObjectOptimizer(method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61").set_name("LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61", register=True) except Exception as e: print("HyperOptimizedEvolutionaryGradientOptimizerV61 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedGradientEnhancedRAMEDS import ( - HyperOptimizedGradientEnhancedRAMEDS, - ) + from nevergrad.optimization.lama.HyperOptimizedGradientEnhancedRAMEDS import HyperOptimizedGradientEnhancedRAMEDS lama_register["HyperOptimizedGradientEnhancedRAMEDS"] = HyperOptimizedGradientEnhancedRAMEDS - LLAMAHyperOptimizedGradientEnhancedRAMEDS = NonObjectOptimizer( - method="LLAMAHyperOptimizedGradientEnhancedRAMEDS" - ).set_name("LLAMAHyperOptimizedGradientEnhancedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedGradientEnhancedRAMEDS").set_name("LLAMAHyperOptimizedGradientEnhancedRAMEDS", register=True) except Exception as e: print("HyperOptimizedGradientEnhancedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 import ( - HyperOptimizedMultiStrategicEvolutionaryOptimizerV47, - ) + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 import HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 - lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV47"] = ( - HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 - ) - LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47 = NonObjectOptimizer( - method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47" - ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47", register=True) + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV47"] = HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 + res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47 = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47").set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47", register=True) except Exception as e: print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 import ( - HyperOptimizedMultiStrategicEvolutionaryOptimizerV48, - ) + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 import HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 - lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV48"] = ( - HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 - ) - LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48 = NonObjectOptimizer( - method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48" - ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48", register=True) + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV48"] = HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 + res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48 = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48").set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48", register=True) except Exception as e: print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperOptimizedRAMEDS import HyperOptimizedRAMEDS lama_register["HyperOptimizedRAMEDS"] = HyperOptimizedRAMEDS - LLAMAHyperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS").set_name( - "LLAMAHyperOptimizedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS").set_name("LLAMAHyperOptimizedRAMEDS", register=True) except Exception as e: print("HyperOptimizedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedSpiralDifferentialOptimizerV8 import ( - HyperOptimizedSpiralDifferentialOptimizerV8, - ) + from nevergrad.optimization.lama.HyperOptimizedSpiralDifferentialOptimizerV8 import HyperOptimizedSpiralDifferentialOptimizerV8 lama_register["HyperOptimizedSpiralDifferentialOptimizerV8"] = HyperOptimizedSpiralDifferentialOptimizerV8 - LLAMAHyperOptimizedSpiralDifferentialOptimizerV8 = NonObjectOptimizer( - method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8" - ).set_name("LLAMAHyperOptimizedSpiralDifferentialOptimizerV8", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedSpiralDifferentialOptimizerV8 = NonObjectOptimizer(method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8").set_name("LLAMAHyperOptimizedSpiralDifferentialOptimizerV8", register=True) except Exception as e: print("HyperOptimizedSpiralDifferentialOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperOptimizedThermalEvolutionaryOptimizer import ( - HyperOptimizedThermalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.HyperOptimizedThermalEvolutionaryOptimizer import HyperOptimizedThermalEvolutionaryOptimizer lama_register["HyperOptimizedThermalEvolutionaryOptimizer"] = HyperOptimizedThermalEvolutionaryOptimizer - LLAMAHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer" - ).set_name("LLAMAHyperOptimizedThermalEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMAHyperOptimizedThermalEvolutionaryOptimizer", register=True) except Exception as e: print("HyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperOptimizedUltraRefinedRAMEDS import HyperOptimizedUltraRefinedRAMEDS lama_register["HyperOptimizedUltraRefinedRAMEDS"] = HyperOptimizedUltraRefinedRAMEDS - LLAMAHyperOptimizedUltraRefinedRAMEDS = NonObjectOptimizer( - method="LLAMAHyperOptimizedUltraRefinedRAMEDS" - ).set_name("LLAMAHyperOptimizedUltraRefinedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAHyperOptimizedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperOptimizedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedUltraRefinedRAMEDS").set_name("LLAMAHyperOptimizedUltraRefinedRAMEDS", register=True) except Exception as e: print("HyperOptimizedUltraRefinedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperPreciseEvolutionaryOptimizer import ( - HyperPreciseEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.HyperPreciseEvolutionaryOptimizer import HyperPreciseEvolutionaryOptimizer lama_register["HyperPreciseEvolutionaryOptimizer"] = HyperPreciseEvolutionaryOptimizer - LLAMAHyperPreciseEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAHyperPreciseEvolutionaryOptimizer" - ).set_name("LLAMAHyperPreciseEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperPreciseEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperPreciseEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHyperPreciseEvolutionaryOptimizer").set_name("LLAMAHyperPreciseEvolutionaryOptimizer", register=True) except Exception as e: print("HyperPreciseEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperPrecisionEvolutionaryOptimizerV23 import ( - HyperPrecisionEvolutionaryOptimizerV23, - ) + from nevergrad.optimization.lama.HyperPrecisionEvolutionaryOptimizerV23 import HyperPrecisionEvolutionaryOptimizerV23 lama_register["HyperPrecisionEvolutionaryOptimizerV23"] = HyperPrecisionEvolutionaryOptimizerV23 - LLAMAHyperPrecisionEvolutionaryOptimizerV23 = NonObjectOptimizer( - method="LLAMAHyperPrecisionEvolutionaryOptimizerV23" - ).set_name("LLAMAHyperPrecisionEvolutionaryOptimizerV23", register=True) + res = NonObjectOptimizer(method="LLAMAHyperPrecisionEvolutionaryOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperPrecisionEvolutionaryOptimizerV23 = NonObjectOptimizer(method="LLAMAHyperPrecisionEvolutionaryOptimizerV23").set_name("LLAMAHyperPrecisionEvolutionaryOptimizerV23", register=True) except Exception as e: print("HyperPrecisionEvolutionaryOptimizerV23 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperQuantumConvergenceOptimizer import HyperQuantumConvergenceOptimizer lama_register["HyperQuantumConvergenceOptimizer"] = HyperQuantumConvergenceOptimizer - LLAMAHyperQuantumConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAHyperQuantumConvergenceOptimizer" - ).set_name("LLAMAHyperQuantumConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperQuantumConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperQuantumConvergenceOptimizer = NonObjectOptimizer(method="LLAMAHyperQuantumConvergenceOptimizer").set_name("LLAMAHyperQuantumConvergenceOptimizer", register=True) except Exception as e: print("HyperQuantumConvergenceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperQuantumStateCrossoverOptimization import ( - HyperQuantumStateCrossoverOptimization, - ) + from nevergrad.optimization.lama.HyperQuantumStateCrossoverOptimization import HyperQuantumStateCrossoverOptimization lama_register["HyperQuantumStateCrossoverOptimization"] = HyperQuantumStateCrossoverOptimization - LLAMAHyperQuantumStateCrossoverOptimization = NonObjectOptimizer( - method="LLAMAHyperQuantumStateCrossoverOptimization" - ).set_name("LLAMAHyperQuantumStateCrossoverOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAHyperQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAHyperQuantumStateCrossoverOptimization").set_name("LLAMAHyperQuantumStateCrossoverOptimization", register=True) except Exception as e: print("HyperQuantumStateCrossoverOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperRAMEDS import HyperRAMEDS lama_register["HyperRAMEDS"] = HyperRAMEDS - LLAMAHyperRAMEDS = NonObjectOptimizer(method="LLAMAHyperRAMEDS").set_name( - "LLAMAHyperRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRAMEDS = NonObjectOptimizer(method="LLAMAHyperRAMEDS").set_name("LLAMAHyperRAMEDS", register=True) except Exception as e: print("HyperRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 import ( - HyperRefinedAdaptiveDynamicPrecisionOptimizerV52, - ) + from nevergrad.optimization.lama.HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 import HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 - lama_register["HyperRefinedAdaptiveDynamicPrecisionOptimizerV52"] = ( - HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 - ) - LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52 = NonObjectOptimizer( - method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52" - ).set_name("LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52", register=True) + lama_register["HyperRefinedAdaptiveDynamicPrecisionOptimizerV52"] = HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 + res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52 = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52").set_name("LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52", register=True) except Exception as e: print("HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedAdaptiveGuidedMutationOptimizer import ( - HyperRefinedAdaptiveGuidedMutationOptimizer, - ) + from nevergrad.optimization.lama.HyperRefinedAdaptiveGuidedMutationOptimizer import HyperRefinedAdaptiveGuidedMutationOptimizer lama_register["HyperRefinedAdaptiveGuidedMutationOptimizer"] = HyperRefinedAdaptiveGuidedMutationOptimizer - LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( - method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer" - ).set_name("LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer").set_name("LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer", register=True) except Exception as e: print("HyperRefinedAdaptiveGuidedMutationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionOptimizer import ( - HyperRefinedAdaptivePrecisionOptimizer, - ) + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionOptimizer import HyperRefinedAdaptivePrecisionOptimizer lama_register["HyperRefinedAdaptivePrecisionOptimizer"] = HyperRefinedAdaptivePrecisionOptimizer - LLAMAHyperRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( - method="LLAMAHyperRefinedAdaptivePrecisionOptimizer" - ).set_name("LLAMAHyperRefinedAdaptivePrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionOptimizer").set_name("LLAMAHyperRefinedAdaptivePrecisionOptimizer", register=True) except Exception as e: print("HyperRefinedAdaptivePrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionSearch import ( - HyperRefinedAdaptivePrecisionSearch, - ) + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionSearch import HyperRefinedAdaptivePrecisionSearch lama_register["HyperRefinedAdaptivePrecisionSearch"] = HyperRefinedAdaptivePrecisionSearch - LLAMAHyperRefinedAdaptivePrecisionSearch = NonObjectOptimizer( - method="LLAMAHyperRefinedAdaptivePrecisionSearch" - ).set_name("LLAMAHyperRefinedAdaptivePrecisionSearch", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionSearch").set_name("LLAMAHyperRefinedAdaptivePrecisionSearch", register=True) except Exception as e: print("HyperRefinedAdaptivePrecisionSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV3 import ( - HyperRefinedDynamicPrecisionOptimizerV3, - ) + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV3 import HyperRefinedDynamicPrecisionOptimizerV3 lama_register["HyperRefinedDynamicPrecisionOptimizerV3"] = HyperRefinedDynamicPrecisionOptimizerV3 - LLAMAHyperRefinedDynamicPrecisionOptimizerV3 = NonObjectOptimizer( - method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3" - ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedDynamicPrecisionOptimizerV3 = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3").set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV3", register=True) except Exception as e: print("HyperRefinedDynamicPrecisionOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV49 import ( - HyperRefinedDynamicPrecisionOptimizerV49, - ) + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV49 import HyperRefinedDynamicPrecisionOptimizerV49 lama_register["HyperRefinedDynamicPrecisionOptimizerV49"] = HyperRefinedDynamicPrecisionOptimizerV49 - LLAMAHyperRefinedDynamicPrecisionOptimizerV49 = NonObjectOptimizer( - method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49" - ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV49", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedDynamicPrecisionOptimizerV49 = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49").set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV49", register=True) except Exception as e: print("HyperRefinedDynamicPrecisionOptimizerV49 can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperRefinedEnhancedRAMEDS import HyperRefinedEnhancedRAMEDS lama_register["HyperRefinedEnhancedRAMEDS"] = HyperRefinedEnhancedRAMEDS - LLAMAHyperRefinedEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS").set_name( - "LLAMAHyperRefinedEnhancedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS").set_name("LLAMAHyperRefinedEnhancedRAMEDS", register=True) except Exception as e: print("HyperRefinedEnhancedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.HyperRefinedQuantumVelocityOptimizer import ( - HyperRefinedQuantumVelocityOptimizer, - ) + from nevergrad.optimization.lama.HyperRefinedQuantumVelocityOptimizer import HyperRefinedQuantumVelocityOptimizer lama_register["HyperRefinedQuantumVelocityOptimizer"] = HyperRefinedQuantumVelocityOptimizer - LLAMAHyperRefinedQuantumVelocityOptimizer = NonObjectOptimizer( - method="LLAMAHyperRefinedQuantumVelocityOptimizer" - ).set_name("LLAMAHyperRefinedQuantumVelocityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAHyperRefinedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperRefinedQuantumVelocityOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedQuantumVelocityOptimizer").set_name("LLAMAHyperRefinedQuantumVelocityOptimizer", register=True) except Exception as e: print("HyperRefinedQuantumVelocityOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperSpiralDifferentialClimber import HyperSpiralDifferentialClimber lama_register["HyperSpiralDifferentialClimber"] = HyperSpiralDifferentialClimber - LLAMAHyperSpiralDifferentialClimber = NonObjectOptimizer( - method="LLAMAHyperSpiralDifferentialClimber" - ).set_name("LLAMAHyperSpiralDifferentialClimber", register=True) + res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperSpiralDifferentialClimber = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimber").set_name("LLAMAHyperSpiralDifferentialClimber", register=True) except Exception as e: print("HyperSpiralDifferentialClimber can not be imported: ", e) - try: from nevergrad.optimization.lama.HyperSpiralDifferentialClimberV2 import HyperSpiralDifferentialClimberV2 lama_register["HyperSpiralDifferentialClimberV2"] = HyperSpiralDifferentialClimberV2 - LLAMAHyperSpiralDifferentialClimberV2 = NonObjectOptimizer( - method="LLAMAHyperSpiralDifferentialClimberV2" - ).set_name("LLAMAHyperSpiralDifferentialClimberV2", register=True) + res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimberV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAHyperSpiralDifferentialClimberV2 = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimberV2").set_name("LLAMAHyperSpiralDifferentialClimberV2", register=True) except Exception as e: print("HyperSpiralDifferentialClimberV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.IADEA import IADEA lama_register["IADEA"] = IADEA + res = NonObjectOptimizer(method="LLAMAIADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAIADEA = NonObjectOptimizer(method="LLAMAIADEA").set_name("LLAMAIADEA", register=True) except Exception as e: print("IADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.IAGEA import IAGEA lama_register["IAGEA"] = IAGEA + res = NonObjectOptimizer(method="LLAMAIAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAIAGEA = NonObjectOptimizer(method="LLAMAIAGEA").set_name("LLAMAIAGEA", register=True) except Exception as e: print("IAGEA can not be imported: ", e) - try: from nevergrad.optimization.lama.IALNF import IALNF lama_register["IALNF"] = IALNF + res = NonObjectOptimizer(method="LLAMAIALNF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAIALNF = NonObjectOptimizer(method="LLAMAIALNF").set_name("LLAMAIALNF", register=True) except Exception as e: print("IALNF can not be imported: ", e) - try: from nevergrad.optimization.lama.IASDD import IASDD lama_register["IASDD"] = IASDD + res = NonObjectOptimizer(method="LLAMAIASDD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAIASDD = NonObjectOptimizer(method="LLAMAIASDD").set_name("LLAMAIASDD", register=True) except Exception as e: print("IASDD can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveCovarianceGradientSearch import ( - ImprovedAdaptiveCovarianceGradientSearch, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveCovarianceGradientSearch import ImprovedAdaptiveCovarianceGradientSearch lama_register["ImprovedAdaptiveCovarianceGradientSearch"] = ImprovedAdaptiveCovarianceGradientSearch - LLAMAImprovedAdaptiveCovarianceGradientSearch = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveCovarianceGradientSearch" - ).set_name("LLAMAImprovedAdaptiveCovarianceGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveCovarianceGradientSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveCovarianceGradientSearch").set_name("LLAMAImprovedAdaptiveCovarianceGradientSearch", register=True) except Exception as e: print("ImprovedAdaptiveCovarianceGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveDifferentialEvolution import ( - ImprovedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveDifferentialEvolution import ImprovedAdaptiveDifferentialEvolution lama_register["ImprovedAdaptiveDifferentialEvolution"] = ImprovedAdaptiveDifferentialEvolution - LLAMAImprovedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveDifferentialEvolution" - ).set_name("LLAMAImprovedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDifferentialEvolution").set_name("LLAMAImprovedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ImprovedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( - ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution import ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveEliteGuidedRestartDE import ( - ImprovedAdaptiveEliteGuidedRestartDE, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveEliteGuidedRestartDE import ImprovedAdaptiveEliteGuidedRestartDE lama_register["ImprovedAdaptiveEliteGuidedRestartDE"] = ImprovedAdaptiveEliteGuidedRestartDE - LLAMAImprovedAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveEliteGuidedRestartDE" - ).set_name("LLAMAImprovedAdaptiveEliteGuidedRestartDE", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveEliteGuidedRestartDE = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEliteGuidedRestartDE").set_name("LLAMAImprovedAdaptiveEliteGuidedRestartDE", register=True) except Exception as e: print("ImprovedAdaptiveEliteGuidedRestartDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveEnhancedQuantumHarmonySearch import ( - ImprovedAdaptiveEnhancedQuantumHarmonySearch, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveEnhancedQuantumHarmonySearch import ImprovedAdaptiveEnhancedQuantumHarmonySearch - lama_register["ImprovedAdaptiveEnhancedQuantumHarmonySearch"] = ( - ImprovedAdaptiveEnhancedQuantumHarmonySearch - ) - LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch" - ).set_name("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch", register=True) + lama_register["ImprovedAdaptiveEnhancedQuantumHarmonySearch"] = ImprovedAdaptiveEnhancedQuantumHarmonySearch + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch").set_name("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch", register=True) except Exception as e: print("ImprovedAdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveEvolutionaryHyperHeuristic import ( - ImprovedAdaptiveEvolutionaryHyperHeuristic, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveEvolutionaryHyperHeuristic import ImprovedAdaptiveEvolutionaryHyperHeuristic lama_register["ImprovedAdaptiveEvolutionaryHyperHeuristic"] = ImprovedAdaptiveEvolutionaryHyperHeuristic - LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic" - ).set_name("LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic").set_name("LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic", register=True) except Exception as e: print("ImprovedAdaptiveEvolutionaryHyperHeuristic can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveExplorationExploitationAlgorithm import ( - ImprovedAdaptiveExplorationExploitationAlgorithm, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveExplorationExploitationAlgorithm import ImprovedAdaptiveExplorationExploitationAlgorithm - lama_register["ImprovedAdaptiveExplorationExploitationAlgorithm"] = ( - ImprovedAdaptiveExplorationExploitationAlgorithm - ) - LLAMAImprovedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm" - ).set_name("LLAMAImprovedAdaptiveExplorationExploitationAlgorithm", register=True) + lama_register["ImprovedAdaptiveExplorationExploitationAlgorithm"] = ImprovedAdaptiveExplorationExploitationAlgorithm + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAImprovedAdaptiveExplorationExploitationAlgorithm", register=True) except Exception as e: print("ImprovedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveHarmonyMemeticAlgorithmV17 import ( - ImprovedAdaptiveHarmonyMemeticAlgorithmV17, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonyMemeticAlgorithmV17 import ImprovedAdaptiveHarmonyMemeticAlgorithmV17 lama_register["ImprovedAdaptiveHarmonyMemeticAlgorithmV17"] = ImprovedAdaptiveHarmonyMemeticAlgorithmV17 - LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17 = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17" - ).set_name("LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17 = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17").set_name("LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17", register=True) except Exception as e: print("ImprovedAdaptiveHarmonyMemeticAlgorithmV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveHarmonySearchWithCuckooInspiration import ( - ImprovedAdaptiveHarmonySearchWithCuckooInspiration, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonySearchWithCuckooInspiration import ImprovedAdaptiveHarmonySearchWithCuckooInspiration - lama_register["ImprovedAdaptiveHarmonySearchWithCuckooInspiration"] = ( - ImprovedAdaptiveHarmonySearchWithCuckooInspiration - ) - LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration" - ).set_name("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration", register=True) + lama_register["ImprovedAdaptiveHarmonySearchWithCuckooInspiration"] = ImprovedAdaptiveHarmonySearchWithCuckooInspiration + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration").set_name("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration", register=True) except Exception as e: print("ImprovedAdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridMetaOptimizer import ( - ImprovedAdaptiveHybridMetaOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveHybridMetaOptimizer import ImprovedAdaptiveHybridMetaOptimizer lama_register["ImprovedAdaptiveHybridMetaOptimizer"] = ImprovedAdaptiveHybridMetaOptimizer - LLAMAImprovedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHybridMetaOptimizer" - ).set_name("LLAMAImprovedAdaptiveHybridMetaOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridMetaOptimizer").set_name("LLAMAImprovedAdaptiveHybridMetaOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveHybridMetaOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimization import ( - ImprovedAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimization import ImprovedAdaptiveHybridOptimization lama_register["ImprovedAdaptiveHybridOptimization"] = ImprovedAdaptiveHybridOptimization - LLAMAImprovedAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHybridOptimization" - ).set_name("LLAMAImprovedAdaptiveHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimization").set_name("LLAMAImprovedAdaptiveHybridOptimization", register=True) except Exception as e: print("ImprovedAdaptiveHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimizer import ImprovedAdaptiveHybridOptimizer lama_register["ImprovedAdaptiveHybridOptimizer"] = ImprovedAdaptiveHybridOptimizer - LLAMAImprovedAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHybridOptimizer" - ).set_name("LLAMAImprovedAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimizer").set_name("LLAMAImprovedAdaptiveHybridOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridSearchOptimizer import ( - ImprovedAdaptiveHybridSearchOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveHybridSearchOptimizer import ImprovedAdaptiveHybridSearchOptimizer lama_register["ImprovedAdaptiveHybridSearchOptimizer"] = ImprovedAdaptiveHybridSearchOptimizer - LLAMAImprovedAdaptiveHybridSearchOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveHybridSearchOptimizer" - ).set_name("LLAMAImprovedAdaptiveHybridSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveHybridSearchOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridSearchOptimizer").set_name("LLAMAImprovedAdaptiveHybridSearchOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveHybridSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveLevyHarmonySearch import ( - ImprovedAdaptiveLevyHarmonySearch, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveLevyHarmonySearch import ImprovedAdaptiveLevyHarmonySearch lama_register["ImprovedAdaptiveLevyHarmonySearch"] = ImprovedAdaptiveLevyHarmonySearch - LLAMAImprovedAdaptiveLevyHarmonySearch = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveLevyHarmonySearch" - ).set_name("LLAMAImprovedAdaptiveLevyHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveLevyHarmonySearch").set_name("LLAMAImprovedAdaptiveLevyHarmonySearch", register=True) except Exception as e: print("ImprovedAdaptiveLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveMemeticHybridOptimizer import ( - ImprovedAdaptiveMemeticHybridOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveMemeticHybridOptimizer import ImprovedAdaptiveMemeticHybridOptimizer lama_register["ImprovedAdaptiveMemeticHybridOptimizer"] = ImprovedAdaptiveMemeticHybridOptimizer - LLAMAImprovedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveMemeticHybridOptimizer" - ).set_name("LLAMAImprovedAdaptiveMemeticHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMemeticHybridOptimizer").set_name("LLAMAImprovedAdaptiveMemeticHybridOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveMemeticHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiOperatorSearch import ( - ImprovedAdaptiveMultiOperatorSearch, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveMultiOperatorSearch import ImprovedAdaptiveMultiOperatorSearch lama_register["ImprovedAdaptiveMultiOperatorSearch"] = ImprovedAdaptiveMultiOperatorSearch - LLAMAImprovedAdaptiveMultiOperatorSearch = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveMultiOperatorSearch" - ).set_name("LLAMAImprovedAdaptiveMultiOperatorSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiOperatorSearch").set_name("LLAMAImprovedAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("ImprovedAdaptiveMultiOperatorSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyDifferentialEvolution import ( - ImprovedAdaptiveMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyDifferentialEvolution import ImprovedAdaptiveMultiStrategyDifferentialEvolution - lama_register["ImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( - ImprovedAdaptiveMultiStrategyDifferentialEvolution - ) - LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution" - ).set_name("LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) + lama_register["ImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ImprovedAdaptiveMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("ImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyOptimizer import ( - ImprovedAdaptiveMultiStrategyOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyOptimizer import ImprovedAdaptiveMultiStrategyOptimizer lama_register["ImprovedAdaptiveMultiStrategyOptimizer"] = ImprovedAdaptiveMultiStrategyOptimizer - LLAMAImprovedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveMultiStrategyOptimizer" - ).set_name("LLAMAImprovedAdaptiveMultiStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyOptimizer").set_name("LLAMAImprovedAdaptiveMultiStrategyOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveMultiStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveParticleSwarmOptimization import ( - ImprovedAdaptiveParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveParticleSwarmOptimization import ImprovedAdaptiveParticleSwarmOptimization lama_register["ImprovedAdaptiveParticleSwarmOptimization"] = ImprovedAdaptiveParticleSwarmOptimization - LLAMAImprovedAdaptiveParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveParticleSwarmOptimization" - ).set_name("LLAMAImprovedAdaptiveParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveParticleSwarmOptimization").set_name("LLAMAImprovedAdaptiveParticleSwarmOptimization", register=True) except Exception as e: print("ImprovedAdaptiveParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptivePopulationMemeticOptimizer import ( - ImprovedAdaptivePopulationMemeticOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptivePopulationMemeticOptimizer import ImprovedAdaptivePopulationMemeticOptimizer lama_register["ImprovedAdaptivePopulationMemeticOptimizer"] = ImprovedAdaptivePopulationMemeticOptimizer - LLAMAImprovedAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptivePopulationMemeticOptimizer" - ).set_name("LLAMAImprovedAdaptivePopulationMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptivePopulationMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptivePopulationMemeticOptimizer").set_name("LLAMAImprovedAdaptivePopulationMemeticOptimizer", register=True) except Exception as e: print("ImprovedAdaptivePopulationMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( - ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - lama_register["ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( - ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - ) - LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" - ).set_name("LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) + lama_register["ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch").set_name("LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) except Exception as e: print("ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedAdaptiveQuantumEntropyDE import ImprovedAdaptiveQuantumEntropyDE lama_register["ImprovedAdaptiveQuantumEntropyDE"] = ImprovedAdaptiveQuantumEntropyDE - LLAMAImprovedAdaptiveQuantumEntropyDE = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveQuantumEntropyDE" - ).set_name("LLAMAImprovedAdaptiveQuantumEntropyDE", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumEntropyDE").set_name("LLAMAImprovedAdaptiveQuantumEntropyDE", register=True) except Exception as e: print("ImprovedAdaptiveQuantumEntropyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumLevyOptimizer import ( - ImprovedAdaptiveQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumLevyOptimizer import ImprovedAdaptiveQuantumLevyOptimizer lama_register["ImprovedAdaptiveQuantumLevyOptimizer"] = ImprovedAdaptiveQuantumLevyOptimizer - LLAMAImprovedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveQuantumLevyOptimizer" - ).set_name("LLAMAImprovedAdaptiveQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumLevyOptimizer").set_name("LLAMAImprovedAdaptiveQuantumLevyOptimizer", register=True) except Exception as e: print("ImprovedAdaptiveQuantumLevyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedAdaptiveQuantumPSO import ImprovedAdaptiveQuantumPSO lama_register["ImprovedAdaptiveQuantumPSO"] = ImprovedAdaptiveQuantumPSO - LLAMAImprovedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO").set_name( - "LLAMAImprovedAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO").set_name("LLAMAImprovedAdaptiveQuantumPSO", register=True) except Exception as e: print("ImprovedAdaptiveQuantumPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumSwarmOptimization import ( - ImprovedAdaptiveQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumSwarmOptimization import ImprovedAdaptiveQuantumSwarmOptimization lama_register["ImprovedAdaptiveQuantumSwarmOptimization"] = ImprovedAdaptiveQuantumSwarmOptimization - LLAMAImprovedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedAdaptiveQuantumSwarmOptimization" - ).set_name("LLAMAImprovedAdaptiveQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumSwarmOptimization").set_name("LLAMAImprovedAdaptiveQuantumSwarmOptimization", register=True) except Exception as e: print("ImprovedAdaptiveQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedAdvancedHybridAdaptiveOptimization import ( - ImprovedAdvancedHybridAdaptiveOptimization, - ) + from nevergrad.optimization.lama.ImprovedAdvancedHybridAdaptiveOptimization import ImprovedAdvancedHybridAdaptiveOptimization lama_register["ImprovedAdvancedHybridAdaptiveOptimization"] = ImprovedAdvancedHybridAdaptiveOptimization - LLAMAImprovedAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( - method="LLAMAImprovedAdvancedHybridAdaptiveOptimization" - ).set_name("LLAMAImprovedAdvancedHybridAdaptiveOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedAdvancedHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAImprovedAdvancedHybridAdaptiveOptimization").set_name("LLAMAImprovedAdvancedHybridAdaptiveOptimization", register=True) except Exception as e: print("ImprovedAdvancedHybridAdaptiveOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedBalancedQuantumLevyDifferentialSearch import ( - ImprovedBalancedQuantumLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.ImprovedBalancedQuantumLevyDifferentialSearch import ImprovedBalancedQuantumLevyDifferentialSearch - lama_register["ImprovedBalancedQuantumLevyDifferentialSearch"] = ( - ImprovedBalancedQuantumLevyDifferentialSearch - ) - LLAMAImprovedBalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch" - ).set_name("LLAMAImprovedBalancedQuantumLevyDifferentialSearch", register=True) + lama_register["ImprovedBalancedQuantumLevyDifferentialSearch"] = ImprovedBalancedQuantumLevyDifferentialSearch + res = NonObjectOptimizer(method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedBalancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch").set_name("LLAMAImprovedBalancedQuantumLevyDifferentialSearch", register=True) except Exception as e: print("ImprovedBalancedQuantumLevyDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedCooperativeAdaptiveEvolutionaryOptimizer import ( - ImprovedCooperativeAdaptiveEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.ImprovedCooperativeAdaptiveEvolutionaryOptimizer import ImprovedCooperativeAdaptiveEvolutionaryOptimizer - lama_register["ImprovedCooperativeAdaptiveEvolutionaryOptimizer"] = ( - ImprovedCooperativeAdaptiveEvolutionaryOptimizer - ) - LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer" - ).set_name("LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer", register=True) + lama_register["ImprovedCooperativeAdaptiveEvolutionaryOptimizer"] = ImprovedCooperativeAdaptiveEvolutionaryOptimizer + res = NonObjectOptimizer(method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer").set_name("LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer", register=True) except Exception as e: print("ImprovedCooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedCulturalDifferentialMemeticEvolution import ( - ImprovedCulturalDifferentialMemeticEvolution, - ) + from nevergrad.optimization.lama.ImprovedCulturalDifferentialMemeticEvolution import ImprovedCulturalDifferentialMemeticEvolution - lama_register["ImprovedCulturalDifferentialMemeticEvolution"] = ( - ImprovedCulturalDifferentialMemeticEvolution - ) - LLAMAImprovedCulturalDifferentialMemeticEvolution = NonObjectOptimizer( - method="LLAMAImprovedCulturalDifferentialMemeticEvolution" - ).set_name("LLAMAImprovedCulturalDifferentialMemeticEvolution", register=True) + lama_register["ImprovedCulturalDifferentialMemeticEvolution"] = ImprovedCulturalDifferentialMemeticEvolution + res = NonObjectOptimizer(method="LLAMAImprovedCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedCulturalDifferentialMemeticEvolution = NonObjectOptimizer(method="LLAMAImprovedCulturalDifferentialMemeticEvolution").set_name("LLAMAImprovedCulturalDifferentialMemeticEvolution", register=True) except Exception as e: print("ImprovedCulturalDifferentialMemeticEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedCulturalEvolutionaryOptimizer import ( - ImprovedCulturalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.ImprovedCulturalEvolutionaryOptimizer import ImprovedCulturalEvolutionaryOptimizer lama_register["ImprovedCulturalEvolutionaryOptimizer"] = ImprovedCulturalEvolutionaryOptimizer - LLAMAImprovedCulturalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAImprovedCulturalEvolutionaryOptimizer" - ).set_name("LLAMAImprovedCulturalEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedCulturalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAImprovedCulturalEvolutionaryOptimizer").set_name("LLAMAImprovedCulturalEvolutionaryOptimizer", register=True) except Exception as e: print("ImprovedCulturalEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDiversifiedHarmonySearchOptimizer import ( - ImprovedDiversifiedHarmonySearchOptimizer, - ) + from nevergrad.optimization.lama.ImprovedDiversifiedHarmonySearchOptimizer import ImprovedDiversifiedHarmonySearchOptimizer lama_register["ImprovedDiversifiedHarmonySearchOptimizer"] = ImprovedDiversifiedHarmonySearchOptimizer - LLAMAImprovedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( - method="LLAMAImprovedDiversifiedHarmonySearchOptimizer" - ).set_name("LLAMAImprovedDiversifiedHarmonySearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAImprovedDiversifiedHarmonySearchOptimizer").set_name("LLAMAImprovedDiversifiedHarmonySearchOptimizer", register=True) except Exception as e: print("ImprovedDiversifiedHarmonySearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveMemoryStrategyV58 import ( - ImprovedDualPhaseAdaptiveMemoryStrategyV58, - ) + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveMemoryStrategyV58 import ImprovedDualPhaseAdaptiveMemoryStrategyV58 lama_register["ImprovedDualPhaseAdaptiveMemoryStrategyV58"] = ImprovedDualPhaseAdaptiveMemoryStrategyV58 - LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58 = NonObjectOptimizer( - method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58" - ).set_name("LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58 = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58").set_name("LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58", register=True) except Exception as e: print("ImprovedDualPhaseAdaptiveMemoryStrategyV58 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 import ( - ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1, - ) + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 import ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 - lama_register["ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1"] = ( - ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 - ) - LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 = NonObjectOptimizer( - method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1" - ).set_name("LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1", register=True) + lama_register["ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1"] = ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 + res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1").set_name("LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1", register=True) except Exception as e: print("ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 import ( - ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2, - ) + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 import ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 - lama_register["ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2"] = ( - ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 - ) - LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 = NonObjectOptimizer( - method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2" - ).set_name("LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2", register=True) + lama_register["ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2"] = ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 + res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2").set_name("LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2", register=True) except Exception as e: print("ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveExplorationOptimization import ( - ImprovedDynamicAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveExplorationOptimization import ImprovedDynamicAdaptiveExplorationOptimization - lama_register["ImprovedDynamicAdaptiveExplorationOptimization"] = ( - ImprovedDynamicAdaptiveExplorationOptimization - ) - LLAMAImprovedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAImprovedDynamicAdaptiveExplorationOptimization" - ).set_name("LLAMAImprovedDynamicAdaptiveExplorationOptimization", register=True) + lama_register["ImprovedDynamicAdaptiveExplorationOptimization"] = ImprovedDynamicAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveExplorationOptimization").set_name("LLAMAImprovedDynamicAdaptiveExplorationOptimization", register=True) except Exception as e: print("ImprovedDynamicAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSO import ( - ImprovedDynamicAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSO import ImprovedDynamicAdaptiveHybridDEPSO lama_register["ImprovedDynamicAdaptiveHybridDEPSO"] = ImprovedDynamicAdaptiveHybridDEPSO - LLAMAImprovedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAImprovedDynamicAdaptiveHybridDEPSO" - ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSO").set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSO", register=True) except Exception as e: print("ImprovedDynamicAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( - ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory import ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory - lama_register["ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( - ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory - ) - LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory" - ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) + lama_register["ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory + res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicHarmonyFireworksSearch import ( - ImprovedDynamicHarmonyFireworksSearch, - ) + from nevergrad.optimization.lama.ImprovedDynamicHarmonyFireworksSearch import ImprovedDynamicHarmonyFireworksSearch lama_register["ImprovedDynamicHarmonyFireworksSearch"] = ImprovedDynamicHarmonyFireworksSearch - LLAMAImprovedDynamicHarmonyFireworksSearch = NonObjectOptimizer( - method="LLAMAImprovedDynamicHarmonyFireworksSearch" - ).set_name("LLAMAImprovedDynamicHarmonyFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAImprovedDynamicHarmonyFireworksSearch").set_name("LLAMAImprovedDynamicHarmonyFireworksSearch", register=True) except Exception as e: print("ImprovedDynamicHarmonyFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicHybridDEPSOWithEliteMemoryV3 import ( - ImprovedDynamicHybridDEPSOWithEliteMemoryV3, - ) + from nevergrad.optimization.lama.ImprovedDynamicHybridDEPSOWithEliteMemoryV3 import ImprovedDynamicHybridDEPSOWithEliteMemoryV3 lama_register["ImprovedDynamicHybridDEPSOWithEliteMemoryV3"] = ImprovedDynamicHybridDEPSOWithEliteMemoryV3 - LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3 = NonObjectOptimizer( - method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3" - ).set_name("LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3 = NonObjectOptimizer(method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3").set_name("LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3", register=True) except Exception as e: print("ImprovedDynamicHybridDEPSOWithEliteMemoryV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedDynamicQuantumSwarmOptimization import ( - ImprovedDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedDynamicQuantumSwarmOptimization import ImprovedDynamicQuantumSwarmOptimization lama_register["ImprovedDynamicQuantumSwarmOptimization"] = ImprovedDynamicQuantumSwarmOptimization - LLAMAImprovedDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedDynamicQuantumSwarmOptimization" - ).set_name("LLAMAImprovedDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("ImprovedDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 import ( - ImprovedEliteAdaptiveCrowdingHybridOptimizerV2, - ) + from nevergrad.optimization.lama.ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 import ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 - lama_register["ImprovedEliteAdaptiveCrowdingHybridOptimizerV2"] = ( - ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 - ) - LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2" - ).set_name("LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2", register=True) + lama_register["ImprovedEliteAdaptiveCrowdingHybridOptimizerV2"] = ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 + res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2").set_name("LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2", register=True) except Exception as e: print("ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemeticDifferentialEvolution import ( - ImprovedEliteAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemeticDifferentialEvolution import ImprovedEliteAdaptiveMemeticDifferentialEvolution - lama_register["ImprovedEliteAdaptiveMemeticDifferentialEvolution"] = ( - ImprovedEliteAdaptiveMemeticDifferentialEvolution - ) - LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution", register=True) + lama_register["ImprovedEliteAdaptiveMemeticDifferentialEvolution"] = ImprovedEliteAdaptiveMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution").set_name("LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("ImprovedEliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemoryHybridOptimizer import ( - ImprovedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemoryHybridOptimizer import ImprovedEliteAdaptiveMemoryHybridOptimizer lama_register["ImprovedEliteAdaptiveMemoryHybridOptimizer"] = ImprovedEliteAdaptiveMemoryHybridOptimizer - LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("ImprovedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEliteGuidedHybridAdaptiveDE import ( - ImprovedEliteGuidedHybridAdaptiveDE, - ) + from nevergrad.optimization.lama.ImprovedEliteGuidedHybridAdaptiveDE import ImprovedEliteGuidedHybridAdaptiveDE lama_register["ImprovedEliteGuidedHybridAdaptiveDE"] = ImprovedEliteGuidedHybridAdaptiveDE - LLAMAImprovedEliteGuidedHybridAdaptiveDE = NonObjectOptimizer( - method="LLAMAImprovedEliteGuidedHybridAdaptiveDE" - ).set_name("LLAMAImprovedEliteGuidedHybridAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedHybridAdaptiveDE").set_name("LLAMAImprovedEliteGuidedHybridAdaptiveDE", register=True) except Exception as e: print("ImprovedEliteGuidedHybridAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE import ImprovedEliteGuidedMutationDE lama_register["ImprovedEliteGuidedMutationDE"] = ImprovedEliteGuidedMutationDE - LLAMAImprovedEliteGuidedMutationDE = NonObjectOptimizer( - method="LLAMAImprovedEliteGuidedMutationDE" - ).set_name("LLAMAImprovedEliteGuidedMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE").set_name("LLAMAImprovedEliteGuidedMutationDE", register=True) except Exception as e: print("ImprovedEliteGuidedMutationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE_v2 import ImprovedEliteGuidedMutationDE_v2 lama_register["ImprovedEliteGuidedMutationDE_v2"] = ImprovedEliteGuidedMutationDE_v2 - LLAMAImprovedEliteGuidedMutationDE_v2 = NonObjectOptimizer( - method="LLAMAImprovedEliteGuidedMutationDE_v2" - ).set_name("LLAMAImprovedEliteGuidedMutationDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE_v2").set_name("LLAMAImprovedEliteGuidedMutationDE_v2", register=True) except Exception as e: print("ImprovedEliteGuidedMutationDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEliteQuantumDifferentialMemeticOptimizer import ( - ImprovedEliteQuantumDifferentialMemeticOptimizer, - ) + from nevergrad.optimization.lama.ImprovedEliteQuantumDifferentialMemeticOptimizer import ImprovedEliteQuantumDifferentialMemeticOptimizer - lama_register["ImprovedEliteQuantumDifferentialMemeticOptimizer"] = ( - ImprovedEliteQuantumDifferentialMemeticOptimizer - ) - LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( - method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer" - ).set_name("LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer", register=True) + lama_register["ImprovedEliteQuantumDifferentialMemeticOptimizer"] = ImprovedEliteQuantumDifferentialMemeticOptimizer + res = NonObjectOptimizer(method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer").set_name("LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer", register=True) except Exception as e: print("ImprovedEliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 import ( - ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 import ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 - lama_register["ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6"] = ( - ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 - ) - LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6" - ).set_name("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6", register=True) + lama_register["ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6"] = ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6").set_name("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6", register=True) except Exception as e: print("ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 import ( - ImprovedEnhancedAdaptiveDynamicHarmonySearchV4, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 import ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 - lama_register["ImprovedEnhancedAdaptiveDynamicHarmonySearchV4"] = ( - ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 - ) - LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4" - ).set_name("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4", register=True) + lama_register["ImprovedEnhancedAdaptiveDynamicHarmonySearchV4"] = ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4").set_name("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4", register=True) except Exception as e: print("ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 import ( - ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 import ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 - lama_register["ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19"] = ( - ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 - ) - LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19" - ).set_name("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19", register=True) + lama_register["ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19"] = ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19").set_name("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19", register=True) except Exception as e: print("ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveLevyHarmonySearchV4 import ( - ImprovedEnhancedAdaptiveLevyHarmonySearchV4, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveLevyHarmonySearchV4 import ImprovedEnhancedAdaptiveLevyHarmonySearchV4 lama_register["ImprovedEnhancedAdaptiveLevyHarmonySearchV4"] = ImprovedEnhancedAdaptiveLevyHarmonySearchV4 - LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4" - ).set_name("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4").set_name("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4", register=True) except Exception as e: print("ImprovedEnhancedAdaptiveLevyHarmonySearchV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 import ( - ImprovedEnhancedAdaptiveMetaNetAQAPSOv4, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 import ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 lama_register["ImprovedEnhancedAdaptiveMetaNetAQAPSOv4"] = ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 - LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4" - ).set_name("LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4").set_name("LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4", register=True) except Exception as e: print("ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 import ( - ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15, - ) + from nevergrad.optimization.lama.ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 import ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 - lama_register["ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15"] = ( - ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 - ) - LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15" - ).set_name("LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15", register=True) + lama_register["ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15"] = ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15").set_name("LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15", register=True) except Exception as e: print("ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 import ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v54, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v54"] = ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 - ) - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54" - ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54", register=True) + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v54"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54", register=True) except Exception as e: print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 import ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v61, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v61"] = ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 - ) - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61" - ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61", register=True) + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v61"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61", register=True) except Exception as e: print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 import ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v65, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v65"] = ( - ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 - ) - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65" - ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65", register=True) + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v65"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65", register=True) except Exception as e: print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDiversifiedGravitationalSwarmOptimization import ( - ImprovedEnhancedDiversifiedGravitationalSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDiversifiedGravitationalSwarmOptimization import ImprovedEnhancedDiversifiedGravitationalSwarmOptimization - lama_register["ImprovedEnhancedDiversifiedGravitationalSwarmOptimization"] = ( - ImprovedEnhancedDiversifiedGravitationalSwarmOptimization - ) - LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization" - ).set_name("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization", register=True) + lama_register["ImprovedEnhancedDiversifiedGravitationalSwarmOptimization"] = ImprovedEnhancedDiversifiedGravitationalSwarmOptimization + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization").set_name("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization", register=True) except Exception as e: print("ImprovedEnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicDifferentialEvolution import ( - ImprovedEnhancedDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDynamicDifferentialEvolution import ImprovedEnhancedDynamicDifferentialEvolution - lama_register["ImprovedEnhancedDynamicDifferentialEvolution"] = ( - ImprovedEnhancedDynamicDifferentialEvolution - ) - LLAMAImprovedEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDynamicDifferentialEvolution" - ).set_name("LLAMAImprovedEnhancedDynamicDifferentialEvolution", register=True) + lama_register["ImprovedEnhancedDynamicDifferentialEvolution"] = ImprovedEnhancedDynamicDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicDifferentialEvolution").set_name("LLAMAImprovedEnhancedDynamicDifferentialEvolution", register=True) except Exception as e: print("ImprovedEnhancedDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicHarmonyAlgorithm import ( - ImprovedEnhancedDynamicHarmonyAlgorithm, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDynamicHarmonyAlgorithm import ImprovedEnhancedDynamicHarmonyAlgorithm lama_register["ImprovedEnhancedDynamicHarmonyAlgorithm"] = ImprovedEnhancedDynamicHarmonyAlgorithm - LLAMAImprovedEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm" - ).set_name("LLAMAImprovedEnhancedDynamicHarmonyAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm").set_name("LLAMAImprovedEnhancedDynamicHarmonyAlgorithm", register=True) except Exception as e: print("ImprovedEnhancedDynamicHarmonyAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicLevyHarmonySearch import ( - ImprovedEnhancedDynamicLevyHarmonySearch, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLevyHarmonySearch import ImprovedEnhancedDynamicLevyHarmonySearch lama_register["ImprovedEnhancedDynamicLevyHarmonySearch"] = ImprovedEnhancedDynamicLevyHarmonySearch - LLAMAImprovedEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch" - ).set_name("LLAMAImprovedEnhancedDynamicLevyHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch").set_name("LLAMAImprovedEnhancedDynamicLevyHarmonySearch", register=True) except Exception as e: print("ImprovedEnhancedDynamicLevyHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm import ( - ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm import ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm - lama_register["ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( - ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm - ) - LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm" - ).set_name("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) + lama_register["ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm"] = ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) except Exception as e: print("ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicQuantumSwarmOptimization import ( - ImprovedEnhancedDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedEnhancedDynamicQuantumSwarmOptimization import ImprovedEnhancedDynamicQuantumSwarmOptimization - lama_register["ImprovedEnhancedDynamicQuantumSwarmOptimization"] = ( - ImprovedEnhancedDynamicQuantumSwarmOptimization - ) - LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization" - ).set_name("LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization", register=True) + lama_register["ImprovedEnhancedDynamicQuantumSwarmOptimization"] = ImprovedEnhancedDynamicQuantumSwarmOptimization + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("ImprovedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedEliteGuidedMassQGSA_v84 import ( - ImprovedEnhancedEliteGuidedMassQGSA_v84, - ) + from nevergrad.optimization.lama.ImprovedEnhancedEliteGuidedMassQGSA_v84 import ImprovedEnhancedEliteGuidedMassQGSA_v84 lama_register["ImprovedEnhancedEliteGuidedMassQGSA_v84"] = ImprovedEnhancedEliteGuidedMassQGSA_v84 - LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84" - ).set_name("LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84 = NonObjectOptimizer(method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84").set_name("LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84", register=True) except Exception as e: print("ImprovedEnhancedEliteGuidedMassQGSA_v84 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 import ( - ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11, - ) + from nevergrad.optimization.lama.ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 import ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 - lama_register["ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11"] = ( - ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 - ) - LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11" - ).set_name("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11", register=True) + lama_register["ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11"] = ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 = NonObjectOptimizer(method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11").set_name("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11", register=True) except Exception as e: print("ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedEvolutionaryFireworksSearch import ( - ImprovedEnhancedEvolutionaryFireworksSearch, - ) + from nevergrad.optimization.lama.ImprovedEnhancedEvolutionaryFireworksSearch import ImprovedEnhancedEvolutionaryFireworksSearch lama_register["ImprovedEnhancedEvolutionaryFireworksSearch"] = ImprovedEnhancedEvolutionaryFireworksSearch - LLAMAImprovedEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( - method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch" - ).set_name("LLAMAImprovedEnhancedEvolutionaryFireworksSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch").set_name("LLAMAImprovedEnhancedEvolutionaryFireworksSearch", register=True) except Exception as e: print("ImprovedEnhancedEvolutionaryFireworksSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmOptimization import ( - ImprovedEnhancedFireworkAlgorithmOptimization, - ) + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmOptimization import ImprovedEnhancedFireworkAlgorithmOptimization - lama_register["ImprovedEnhancedFireworkAlgorithmOptimization"] = ( - ImprovedEnhancedFireworkAlgorithmOptimization - ) - LLAMAImprovedEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( - method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization" - ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmOptimization", register=True) + lama_register["ImprovedEnhancedFireworkAlgorithmOptimization"] = ImprovedEnhancedFireworkAlgorithmOptimization + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization").set_name("LLAMAImprovedEnhancedFireworkAlgorithmOptimization", register=True) except Exception as e: print("ImprovedEnhancedFireworkAlgorithmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( - ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, - ) + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - lama_register["ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( - ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - ) - LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( - method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" - ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) + lama_register["ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) except Exception as e: print("ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedGradientDifferentialEvolution import ( - ImprovedEnhancedGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedEnhancedGradientDifferentialEvolution import ImprovedEnhancedGradientDifferentialEvolution - lama_register["ImprovedEnhancedGradientDifferentialEvolution"] = ( - ImprovedEnhancedGradientDifferentialEvolution - ) - LLAMAImprovedEnhancedGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedEnhancedGradientDifferentialEvolution" - ).set_name("LLAMAImprovedEnhancedGradientDifferentialEvolution", register=True) + lama_register["ImprovedEnhancedGradientDifferentialEvolution"] = ImprovedEnhancedGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedGradientDifferentialEvolution").set_name("LLAMAImprovedEnhancedGradientDifferentialEvolution", register=True) except Exception as e: print("ImprovedEnhancedGradientDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchOB import ImprovedEnhancedHarmonySearchOB lama_register["ImprovedEnhancedHarmonySearchOB"] = ImprovedEnhancedHarmonySearchOB - LLAMAImprovedEnhancedHarmonySearchOB = NonObjectOptimizer( - method="LLAMAImprovedEnhancedHarmonySearchOB" - ).set_name("LLAMAImprovedEnhancedHarmonySearchOB", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchOB").set_name("LLAMAImprovedEnhancedHarmonySearchOB", register=True) except Exception as e: print("ImprovedEnhancedHarmonySearchOB can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( - ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, - ) + from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - lama_register["ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( - ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - ) - LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( - method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" - ).set_name("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) + lama_register["ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) except Exception as e: print("ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedMemeticHarmonyOptimization import ( - ImprovedEnhancedMemeticHarmonyOptimization, - ) + from nevergrad.optimization.lama.ImprovedEnhancedMemeticHarmonyOptimization import ImprovedEnhancedMemeticHarmonyOptimization lama_register["ImprovedEnhancedMemeticHarmonyOptimization"] = ImprovedEnhancedMemeticHarmonyOptimization - LLAMAImprovedEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( - method="LLAMAImprovedEnhancedMemeticHarmonyOptimization" - ).set_name("LLAMAImprovedEnhancedMemeticHarmonyOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedMemeticHarmonyOptimization").set_name("LLAMAImprovedEnhancedMemeticHarmonyOptimization", register=True) except Exception as e: print("ImprovedEnhancedMemeticHarmonyOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution import ( - ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution import ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution - lama_register["ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( - ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution - ) - LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) + lama_register["ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedQuantumHarmonySearch import ( - ImprovedEnhancedQuantumHarmonySearch, - ) + from nevergrad.optimization.lama.ImprovedEnhancedQuantumHarmonySearch import ImprovedEnhancedQuantumHarmonySearch lama_register["ImprovedEnhancedQuantumHarmonySearch"] = ImprovedEnhancedQuantumHarmonySearch - LLAMAImprovedEnhancedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAImprovedEnhancedQuantumHarmonySearch" - ).set_name("LLAMAImprovedEnhancedQuantumHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumHarmonySearch").set_name("LLAMAImprovedEnhancedQuantumHarmonySearch", register=True) except Exception as e: print("ImprovedEnhancedQuantumHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedRefinedAdaptiveQGSA_v61 import ( - ImprovedEnhancedRefinedAdaptiveQGSA_v61, - ) + from nevergrad.optimization.lama.ImprovedEnhancedRefinedAdaptiveQGSA_v61 import ImprovedEnhancedRefinedAdaptiveQGSA_v61 lama_register["ImprovedEnhancedRefinedAdaptiveQGSA_v61"] = ImprovedEnhancedRefinedAdaptiveQGSA_v61 - LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61 = NonObjectOptimizer( - method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61" - ).set_name("LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61 = NonObjectOptimizer(method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61").set_name("LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61", register=True) except Exception as e: print("ImprovedEnhancedRefinedAdaptiveQGSA_v61 can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedEnhancedSADE import ImprovedEnhancedSADE lama_register["ImprovedEnhancedSADE"] = ImprovedEnhancedSADE - LLAMAImprovedEnhancedSADE = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE").set_name( - "LLAMAImprovedEnhancedSADE", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedSADE = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE").set_name("LLAMAImprovedEnhancedSADE", register=True) except Exception as e: print("ImprovedEnhancedSADE can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedEnhancedStochasticMetaHeuristicOptimizer import ( - ImprovedEnhancedStochasticMetaHeuristicOptimizer, - ) + from nevergrad.optimization.lama.ImprovedEnhancedStochasticMetaHeuristicOptimizer import ImprovedEnhancedStochasticMetaHeuristicOptimizer - lama_register["ImprovedEnhancedStochasticMetaHeuristicOptimizer"] = ( - ImprovedEnhancedStochasticMetaHeuristicOptimizer - ) - LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( - method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer" - ).set_name("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer", register=True) + lama_register["ImprovedEnhancedStochasticMetaHeuristicOptimizer"] = ImprovedEnhancedStochasticMetaHeuristicOptimizer + res = NonObjectOptimizer(method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer").set_name("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer", register=True) except Exception as e: print("ImprovedEnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedEnsembleMemeticOptimizer import ImprovedEnsembleMemeticOptimizer lama_register["ImprovedEnsembleMemeticOptimizer"] = ImprovedEnsembleMemeticOptimizer - LLAMAImprovedEnsembleMemeticOptimizer = NonObjectOptimizer( - method="LLAMAImprovedEnsembleMemeticOptimizer" - ).set_name("LLAMAImprovedEnsembleMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedEnsembleMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedEnsembleMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedEnsembleMemeticOptimizer").set_name("LLAMAImprovedEnsembleMemeticOptimizer", register=True) except Exception as e: print("ImprovedEnsembleMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedFireworkAlgorithm import ImprovedFireworkAlgorithm lama_register["ImprovedFireworkAlgorithm"] = ImprovedFireworkAlgorithm - LLAMAImprovedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm").set_name( - "LLAMAImprovedFireworkAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm").set_name("LLAMAImprovedFireworkAlgorithm", register=True) except Exception as e: print("ImprovedFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveDifferentialEvolution import ( - ImprovedHybridAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedHybridAdaptiveDifferentialEvolution import ImprovedHybridAdaptiveDifferentialEvolution lama_register["ImprovedHybridAdaptiveDifferentialEvolution"] = ImprovedHybridAdaptiveDifferentialEvolution - LLAMAImprovedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedHybridAdaptiveDifferentialEvolution" - ).set_name("LLAMAImprovedHybridAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveDifferentialEvolution").set_name("LLAMAImprovedHybridAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ImprovedHybridAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveGeneticSwarmOptimizer import ( - ImprovedHybridAdaptiveGeneticSwarmOptimizer, - ) + from nevergrad.optimization.lama.ImprovedHybridAdaptiveGeneticSwarmOptimizer import ImprovedHybridAdaptiveGeneticSwarmOptimizer lama_register["ImprovedHybridAdaptiveGeneticSwarmOptimizer"] = ImprovedHybridAdaptiveGeneticSwarmOptimizer - LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( - method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer" - ).set_name("LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer", register=True) except Exception as e: print("ImprovedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveHarmonicFireworksTabuSearch import ( - ImprovedHybridAdaptiveHarmonicFireworksTabuSearch, - ) + from nevergrad.optimization.lama.ImprovedHybridAdaptiveHarmonicFireworksTabuSearch import ImprovedHybridAdaptiveHarmonicFireworksTabuSearch - lama_register["ImprovedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( - ImprovedHybridAdaptiveHarmonicFireworksTabuSearch - ) - LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( - method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch" - ).set_name("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) + lama_register["ImprovedHybridAdaptiveHarmonicFireworksTabuSearch"] = ImprovedHybridAdaptiveHarmonicFireworksTabuSearch + res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) except Exception as e: print("ImprovedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedHybridCMAESDE import ImprovedHybridCMAESDE lama_register["ImprovedHybridCMAESDE"] = ImprovedHybridCMAESDE - LLAMAImprovedHybridCMAESDE = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE").set_name( - "LLAMAImprovedHybridCMAESDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridCMAESDE = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE").set_name("LLAMAImprovedHybridCMAESDE", register=True) except Exception as e: print("ImprovedHybridCMAESDE can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedHybridGeneticPSO import ImprovedHybridGeneticPSO lama_register["ImprovedHybridGeneticPSO"] = ImprovedHybridGeneticPSO - LLAMAImprovedHybridGeneticPSO = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO").set_name( - "LLAMAImprovedHybridGeneticPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridGeneticPSO = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO").set_name("LLAMAImprovedHybridGeneticPSO", register=True) except Exception as e: print("ImprovedHybridGeneticPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedHybridPSODEOptimizer import ImprovedHybridPSODEOptimizer lama_register["ImprovedHybridPSODEOptimizer"] = ImprovedHybridPSODEOptimizer - LLAMAImprovedHybridPSODEOptimizer = NonObjectOptimizer( - method="LLAMAImprovedHybridPSODEOptimizer" - ).set_name("LLAMAImprovedHybridPSODEOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMAImprovedHybridPSODEOptimizer").set_name("LLAMAImprovedHybridPSODEOptimizer", register=True) except Exception as e: print("ImprovedHybridPSODEOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedIterativeAdaptiveGradientEvolver import ( - ImprovedIterativeAdaptiveGradientEvolver, - ) + from nevergrad.optimization.lama.ImprovedIterativeAdaptiveGradientEvolver import ImprovedIterativeAdaptiveGradientEvolver lama_register["ImprovedIterativeAdaptiveGradientEvolver"] = ImprovedIterativeAdaptiveGradientEvolver - LLAMAImprovedIterativeAdaptiveGradientEvolver = NonObjectOptimizer( - method="LLAMAImprovedIterativeAdaptiveGradientEvolver" - ).set_name("LLAMAImprovedIterativeAdaptiveGradientEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedIterativeAdaptiveGradientEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedIterativeAdaptiveGradientEvolver = NonObjectOptimizer(method="LLAMAImprovedIterativeAdaptiveGradientEvolver").set_name("LLAMAImprovedIterativeAdaptiveGradientEvolver", register=True) except Exception as e: print("ImprovedIterativeAdaptiveGradientEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedMetaDynamicQuantumSwarmOptimization import ( - ImprovedMetaDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.ImprovedMetaDynamicQuantumSwarmOptimization import ImprovedMetaDynamicQuantumSwarmOptimization lama_register["ImprovedMetaDynamicQuantumSwarmOptimization"] = ImprovedMetaDynamicQuantumSwarmOptimization - LLAMAImprovedMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization" - ).set_name("LLAMAImprovedMetaDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedMetaDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("ImprovedMetaDynamicQuantumSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedMultiOperatorSearch import ImprovedMultiOperatorSearch lama_register["ImprovedMultiOperatorSearch"] = ImprovedMultiOperatorSearch - LLAMAImprovedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch").set_name( - "LLAMAImprovedMultiOperatorSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch").set_name("LLAMAImprovedMultiOperatorSearch", register=True) except Exception as e: print("ImprovedMultiOperatorSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedMultiStrategySelfAdaptiveDE import ( - ImprovedMultiStrategySelfAdaptiveDE, - ) + from nevergrad.optimization.lama.ImprovedMultiStrategySelfAdaptiveDE import ImprovedMultiStrategySelfAdaptiveDE lama_register["ImprovedMultiStrategySelfAdaptiveDE"] = ImprovedMultiStrategySelfAdaptiveDE - LLAMAImprovedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( - method="LLAMAImprovedMultiStrategySelfAdaptiveDE" - ).set_name("LLAMAImprovedMultiStrategySelfAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAImprovedMultiStrategySelfAdaptiveDE").set_name("LLAMAImprovedMultiStrategySelfAdaptiveDE", register=True) except Exception as e: print("ImprovedMultiStrategySelfAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedOppositionBasedDifferentialEvolution import ( - ImprovedOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedOppositionBasedDifferentialEvolution import ImprovedOppositionBasedDifferentialEvolution - lama_register["ImprovedOppositionBasedDifferentialEvolution"] = ( - ImprovedOppositionBasedDifferentialEvolution - ) - LLAMAImprovedOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedOppositionBasedDifferentialEvolution" - ).set_name("LLAMAImprovedOppositionBasedDifferentialEvolution", register=True) + lama_register["ImprovedOppositionBasedDifferentialEvolution"] = ImprovedOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedOppositionBasedDifferentialEvolution").set_name("LLAMAImprovedOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("ImprovedOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedPrecisionAdaptiveEvolutiveStrategy import ( - ImprovedPrecisionAdaptiveEvolutiveStrategy, - ) + from nevergrad.optimization.lama.ImprovedPrecisionAdaptiveEvolutiveStrategy import ImprovedPrecisionAdaptiveEvolutiveStrategy lama_register["ImprovedPrecisionAdaptiveEvolutiveStrategy"] = ImprovedPrecisionAdaptiveEvolutiveStrategy - LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy = NonObjectOptimizer( - method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy" - ).set_name("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy = NonObjectOptimizer(method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy").set_name("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy", register=True) except Exception as e: print("ImprovedPrecisionAdaptiveEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning import ( - ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning, - ) + from nevergrad.optimization.lama.ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning import ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning - lama_register["ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( - ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning - ) - LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( - method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning" - ).set_name("LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) + lama_register["ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning + res = NonObjectOptimizer(method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer(method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning").set_name("LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) except Exception as e: print("ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedQuantumEnhancedDynamicDifferentialEvolution import ( - ImprovedQuantumEnhancedDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedQuantumEnhancedDynamicDifferentialEvolution import ImprovedQuantumEnhancedDynamicDifferentialEvolution - lama_register["ImprovedQuantumEnhancedDynamicDifferentialEvolution"] = ( - ImprovedQuantumEnhancedDynamicDifferentialEvolution - ) - LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution" - ).set_name("LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution", register=True) + lama_register["ImprovedQuantumEnhancedDynamicDifferentialEvolution"] = ImprovedQuantumEnhancedDynamicDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution").set_name("LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution", register=True) except Exception as e: print("ImprovedQuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.ImprovedQuantumHarmonySearch import ImprovedQuantumHarmonySearch lama_register["ImprovedQuantumHarmonySearch"] = ImprovedQuantumHarmonySearch - LLAMAImprovedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAImprovedQuantumHarmonySearch" - ).set_name("LLAMAImprovedQuantumHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedQuantumHarmonySearch").set_name("LLAMAImprovedQuantumHarmonySearch", register=True) except Exception as e: print("ImprovedQuantumHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedQuantumLevyAdaptiveHybridSearch import ( - ImprovedQuantumLevyAdaptiveHybridSearch, - ) + from nevergrad.optimization.lama.ImprovedQuantumLevyAdaptiveHybridSearch import ImprovedQuantumLevyAdaptiveHybridSearch lama_register["ImprovedQuantumLevyAdaptiveHybridSearch"] = ImprovedQuantumLevyAdaptiveHybridSearch - LLAMAImprovedQuantumLevyAdaptiveHybridSearch = NonObjectOptimizer( - method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch" - ).set_name("LLAMAImprovedQuantumLevyAdaptiveHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedQuantumLevyAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch").set_name("LLAMAImprovedQuantumLevyAdaptiveHybridSearch", register=True) except Exception as e: print("ImprovedQuantumLevyAdaptiveHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedQuantumSimulatedAnnealing import ( - ImprovedQuantumSimulatedAnnealing, - ) + from nevergrad.optimization.lama.ImprovedQuantumSimulatedAnnealing import ImprovedQuantumSimulatedAnnealing lama_register["ImprovedQuantumSimulatedAnnealing"] = ImprovedQuantumSimulatedAnnealing - LLAMAImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAImprovedQuantumSimulatedAnnealing" - ).set_name("LLAMAImprovedQuantumSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAImprovedQuantumSimulatedAnnealing").set_name("LLAMAImprovedQuantumSimulatedAnnealing", register=True) except Exception as e: print("ImprovedQuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedAdaptiveDynamicExplorationOptimization import ( - ImprovedRefinedAdaptiveDynamicExplorationOptimization, - ) + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveDynamicExplorationOptimization import ImprovedRefinedAdaptiveDynamicExplorationOptimization - lama_register["ImprovedRefinedAdaptiveDynamicExplorationOptimization"] = ( - ImprovedRefinedAdaptiveDynamicExplorationOptimization - ) - LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( - method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization" - ).set_name("LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization", register=True) + lama_register["ImprovedRefinedAdaptiveDynamicExplorationOptimization"] = ImprovedRefinedAdaptiveDynamicExplorationOptimization + res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization", register=True) except Exception as e: print("ImprovedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedAdaptiveMultiOperatorSearch import ( - ImprovedRefinedAdaptiveMultiOperatorSearch, - ) + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveMultiOperatorSearch import ImprovedRefinedAdaptiveMultiOperatorSearch lama_register["ImprovedRefinedAdaptiveMultiOperatorSearch"] = ImprovedRefinedAdaptiveMultiOperatorSearch - LLAMAImprovedRefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( - method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch" - ).set_name("LLAMAImprovedRefinedAdaptiveMultiOperatorSearch", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch").set_name("LLAMAImprovedRefinedAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("ImprovedRefinedAdaptiveMultiOperatorSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( - ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution import ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution - lama_register["ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( - ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution - ) - LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution" - ).set_name("LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) + lama_register["ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization import ( - ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization import ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization - lama_register["ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( - ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization - ) - LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization" - ).set_name("LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) + lama_register["ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization"] = ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization + res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) except Exception as e: print("ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 import ( - ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 import ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 - lama_register["ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4"] = ( - ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 - ) - LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4" - ).set_name("LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4", register=True) + lama_register["ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4"] = ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 + res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4", register=True) except Exception as e: print("ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO import ( - ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO import ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO - lama_register["ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO"] = ( - ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO - ) - LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO" - ).set_name("LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO", register=True) + lama_register["ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO"] = ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO + res = NonObjectOptimizer(method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO", register=True) except Exception as e: print("ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveDifferentialEvolution import ( - ImprovedSelfAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedSelfAdaptiveDifferentialEvolution import ImprovedSelfAdaptiveDifferentialEvolution lama_register["ImprovedSelfAdaptiveDifferentialEvolution"] = ImprovedSelfAdaptiveDifferentialEvolution - LLAMAImprovedSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedSelfAdaptiveDifferentialEvolution" - ).set_name("LLAMAImprovedSelfAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveDifferentialEvolution").set_name("LLAMAImprovedSelfAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ImprovedSelfAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveHybridOptimizer import ( - ImprovedSelfAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.ImprovedSelfAdaptiveHybridOptimizer import ImprovedSelfAdaptiveHybridOptimizer lama_register["ImprovedSelfAdaptiveHybridOptimizer"] = ImprovedSelfAdaptiveHybridOptimizer - LLAMAImprovedSelfAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAImprovedSelfAdaptiveHybridOptimizer" - ).set_name("LLAMAImprovedSelfAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedSelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveHybridOptimizer").set_name("LLAMAImprovedSelfAdaptiveHybridOptimizer", register=True) except Exception as e: print("ImprovedSelfAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution import ( - ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution, - ) + from nevergrad.optimization.lama.ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution import ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution - lama_register["ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution"] = ( - ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution - ) - LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution" - ).set_name("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution", register=True) + lama_register["ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution"] = ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution", register=True) except Exception as e: print("ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ImprovedUnifiedAdaptiveMemeticOptimizer import ( - ImprovedUnifiedAdaptiveMemeticOptimizer, - ) + from nevergrad.optimization.lama.ImprovedUnifiedAdaptiveMemeticOptimizer import ImprovedUnifiedAdaptiveMemeticOptimizer lama_register["ImprovedUnifiedAdaptiveMemeticOptimizer"] = ImprovedUnifiedAdaptiveMemeticOptimizer - LLAMAImprovedUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( - method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer" - ).set_name("LLAMAImprovedUnifiedAdaptiveMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAImprovedUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer").set_name("LLAMAImprovedUnifiedAdaptiveMemeticOptimizer", register=True) except Exception as e: print("ImprovedUnifiedAdaptiveMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.IncrementalCrossoverOptimization import IncrementalCrossoverOptimization lama_register["IncrementalCrossoverOptimization"] = IncrementalCrossoverOptimization - LLAMAIncrementalCrossoverOptimization = NonObjectOptimizer( - method="LLAMAIncrementalCrossoverOptimization" - ).set_name("LLAMAIncrementalCrossoverOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAIncrementalCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIncrementalCrossoverOptimization = NonObjectOptimizer(method="LLAMAIncrementalCrossoverOptimization").set_name("LLAMAIncrementalCrossoverOptimization", register=True) except Exception as e: print("IncrementalCrossoverOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.IntelligentDynamicDualPhaseStrategyV39 import ( - IntelligentDynamicDualPhaseStrategyV39, - ) + from nevergrad.optimization.lama.IntelligentDynamicDualPhaseStrategyV39 import IntelligentDynamicDualPhaseStrategyV39 lama_register["IntelligentDynamicDualPhaseStrategyV39"] = IntelligentDynamicDualPhaseStrategyV39 - LLAMAIntelligentDynamicDualPhaseStrategyV39 = NonObjectOptimizer( - method="LLAMAIntelligentDynamicDualPhaseStrategyV39" - ).set_name("LLAMAIntelligentDynamicDualPhaseStrategyV39", register=True) + res = NonObjectOptimizer(method="LLAMAIntelligentDynamicDualPhaseStrategyV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIntelligentDynamicDualPhaseStrategyV39 = NonObjectOptimizer(method="LLAMAIntelligentDynamicDualPhaseStrategyV39").set_name("LLAMAIntelligentDynamicDualPhaseStrategyV39", register=True) except Exception as e: print("IntelligentDynamicDualPhaseStrategyV39 can not be imported: ", e) - try: - from nevergrad.optimization.lama.IntelligentEvolvingAdaptiveStrategyV34 import ( - IntelligentEvolvingAdaptiveStrategyV34, - ) + from nevergrad.optimization.lama.IntelligentEvolvingAdaptiveStrategyV34 import IntelligentEvolvingAdaptiveStrategyV34 lama_register["IntelligentEvolvingAdaptiveStrategyV34"] = IntelligentEvolvingAdaptiveStrategyV34 - LLAMAIntelligentEvolvingAdaptiveStrategyV34 = NonObjectOptimizer( - method="LLAMAIntelligentEvolvingAdaptiveStrategyV34" - ).set_name("LLAMAIntelligentEvolvingAdaptiveStrategyV34", register=True) + res = NonObjectOptimizer(method="LLAMAIntelligentEvolvingAdaptiveStrategyV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIntelligentEvolvingAdaptiveStrategyV34 = NonObjectOptimizer(method="LLAMAIntelligentEvolvingAdaptiveStrategyV34").set_name("LLAMAIntelligentEvolvingAdaptiveStrategyV34", register=True) except Exception as e: print("IntelligentEvolvingAdaptiveStrategyV34 can not be imported: ", e) - try: from nevergrad.optimization.lama.IntelligentPerturbationSearch import IntelligentPerturbationSearch lama_register["IntelligentPerturbationSearch"] = IntelligentPerturbationSearch - LLAMAIntelligentPerturbationSearch = NonObjectOptimizer( - method="LLAMAIntelligentPerturbationSearch" - ).set_name("LLAMAIntelligentPerturbationSearch", register=True) + res = NonObjectOptimizer(method="LLAMAIntelligentPerturbationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIntelligentPerturbationSearch = NonObjectOptimizer(method="LLAMAIntelligentPerturbationSearch").set_name("LLAMAIntelligentPerturbationSearch", register=True) except Exception as e: print("IntelligentPerturbationSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.IterativeAdaptiveDifferentialEvolution import ( - IterativeAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.IterativeAdaptiveDifferentialEvolution import IterativeAdaptiveDifferentialEvolution lama_register["IterativeAdaptiveDifferentialEvolution"] = IterativeAdaptiveDifferentialEvolution - LLAMAIterativeAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAIterativeAdaptiveDifferentialEvolution" - ).set_name("LLAMAIterativeAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAIterativeAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIterativeAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAIterativeAdaptiveDifferentialEvolution").set_name("LLAMAIterativeAdaptiveDifferentialEvolution", register=True) except Exception as e: print("IterativeAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.IterativeProgressiveDifferentialEvolution import ( - IterativeProgressiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.IterativeProgressiveDifferentialEvolution import IterativeProgressiveDifferentialEvolution lama_register["IterativeProgressiveDifferentialEvolution"] = IterativeProgressiveDifferentialEvolution - LLAMAIterativeProgressiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAIterativeProgressiveDifferentialEvolution" - ).set_name("LLAMAIterativeProgressiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAIterativeProgressiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAIterativeProgressiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAIterativeProgressiveDifferentialEvolution").set_name("LLAMAIterativeProgressiveDifferentialEvolution", register=True) except Exception as e: print("IterativeProgressiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.LADESA import LADESA lama_register["LADESA"] = LADESA + res = NonObjectOptimizer(method="LLAMALADESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMALADESA = NonObjectOptimizer(method="LLAMALADESA").set_name("LLAMALADESA", register=True) except Exception as e: print("LADESA can not be imported: ", e) - try: from nevergrad.optimization.lama.LAOS import LAOS lama_register["LAOS"] = LAOS + res = NonObjectOptimizer(method="LLAMALAOS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMALAOS = NonObjectOptimizer(method="LLAMALAOS").set_name("LLAMALAOS", register=True) except Exception as e: print("LAOS can not be imported: ", e) - try: - from nevergrad.optimization.lama.LearningAdaptiveMemoryEnhancedStrategyV42 import ( - LearningAdaptiveMemoryEnhancedStrategyV42, - ) + from nevergrad.optimization.lama.LearningAdaptiveMemoryEnhancedStrategyV42 import LearningAdaptiveMemoryEnhancedStrategyV42 lama_register["LearningAdaptiveMemoryEnhancedStrategyV42"] = LearningAdaptiveMemoryEnhancedStrategyV42 - LLAMALearningAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( - method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42" - ).set_name("LLAMALearningAdaptiveMemoryEnhancedStrategyV42", register=True) + res = NonObjectOptimizer(method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMALearningAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer(method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42").set_name("LLAMALearningAdaptiveMemoryEnhancedStrategyV42", register=True) except Exception as e: print("LearningAdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) - try: from nevergrad.optimization.lama.LearningAdaptiveStrategyV24 import LearningAdaptiveStrategyV24 lama_register["LearningAdaptiveStrategyV24"] = LearningAdaptiveStrategyV24 - LLAMALearningAdaptiveStrategyV24 = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24").set_name( - "LLAMALearningAdaptiveStrategyV24", register=True - ) + res = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMALearningAdaptiveStrategyV24 = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24").set_name("LLAMALearningAdaptiveStrategyV24", register=True) except Exception as e: print("LearningAdaptiveStrategyV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.LevyEnhancedAdaptiveSimulatedAnnealingDE import ( - LevyEnhancedAdaptiveSimulatedAnnealingDE, - ) + from nevergrad.optimization.lama.LevyEnhancedAdaptiveSimulatedAnnealingDE import LevyEnhancedAdaptiveSimulatedAnnealingDE lama_register["LevyEnhancedAdaptiveSimulatedAnnealingDE"] = LevyEnhancedAdaptiveSimulatedAnnealingDE - LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( - method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE" - ).set_name("LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE", register=True) + res = NonObjectOptimizer(method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE = NonObjectOptimizer(method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE").set_name("LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE", register=True) except Exception as e: print("LevyEnhancedAdaptiveSimulatedAnnealingDE can not be imported: ", e) - try: from nevergrad.optimization.lama.MADE import MADE lama_register["MADE"] = MADE + res = NonObjectOptimizer(method="LLAMAMADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAMADE = NonObjectOptimizer(method="LLAMAMADE").set_name("LLAMAMADE", register=True) except Exception as e: print("MADE can not be imported: ", e) - try: from nevergrad.optimization.lama.MIDEAT import MIDEAT lama_register["MIDEAT"] = MIDEAT + res = NonObjectOptimizer(method="LLAMAMIDEAT")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAMIDEAT = NonObjectOptimizer(method="LLAMAMIDEAT").set_name("LLAMAMIDEAT", register=True) except Exception as e: print("MIDEAT can not be imported: ", e) - try: from nevergrad.optimization.lama.MSADE import MSADE lama_register["MSADE"] = MSADE + res = NonObjectOptimizer(method="LLAMAMSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAMSADE = NonObjectOptimizer(method="LLAMAMSADE").set_name("LLAMAMSADE", register=True) except Exception as e: print("MSADE can not be imported: ", e) - try: from nevergrad.optimization.lama.MSEAS import MSEAS lama_register["MSEAS"] = MSEAS + res = NonObjectOptimizer(method="LLAMAMSEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAMSEAS = NonObjectOptimizer(method="LLAMAMSEAS").set_name("LLAMAMSEAS", register=True) except Exception as e: print("MSEAS can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemeticAdaptiveDifferentialEvolution import ( - MemeticAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.MemeticAdaptiveDifferentialEvolution import MemeticAdaptiveDifferentialEvolution lama_register["MemeticAdaptiveDifferentialEvolution"] = MemeticAdaptiveDifferentialEvolution - LLAMAMemeticAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAMemeticAdaptiveDifferentialEvolution" - ).set_name("LLAMAMemeticAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAMemeticAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemeticAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAMemeticAdaptiveDifferentialEvolution").set_name("LLAMAMemeticAdaptiveDifferentialEvolution", register=True) except Exception as e: print("MemeticAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemeticDifferentialEvolutionOptimizer import ( - MemeticDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.MemeticDifferentialEvolutionOptimizer import MemeticDifferentialEvolutionOptimizer lama_register["MemeticDifferentialEvolutionOptimizer"] = MemeticDifferentialEvolutionOptimizer - LLAMAMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAMemeticDifferentialEvolutionOptimizer" - ).set_name("LLAMAMemeticDifferentialEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAMemeticDifferentialEvolutionOptimizer").set_name("LLAMAMemeticDifferentialEvolutionOptimizer", register=True) except Exception as e: print("MemeticDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemeticElitistDifferentialEvolutionWithDynamicFandCR import ( - MemeticElitistDifferentialEvolutionWithDynamicFandCR, - ) + from nevergrad.optimization.lama.MemeticElitistDifferentialEvolutionWithDynamicFandCR import MemeticElitistDifferentialEvolutionWithDynamicFandCR - lama_register["MemeticElitistDifferentialEvolutionWithDynamicFandCR"] = ( - MemeticElitistDifferentialEvolutionWithDynamicFandCR - ) - LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR = NonObjectOptimizer( - method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR" - ).set_name("LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR", register=True) + lama_register["MemeticElitistDifferentialEvolutionWithDynamicFandCR"] = MemeticElitistDifferentialEvolutionWithDynamicFandCR + res = NonObjectOptimizer(method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR = NonObjectOptimizer(method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR").set_name("LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR", register=True) except Exception as e: print("MemeticElitistDifferentialEvolutionWithDynamicFandCR can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemeticEnhancedParticleSwarmOptimization import ( - MemeticEnhancedParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.MemeticEnhancedParticleSwarmOptimization import MemeticEnhancedParticleSwarmOptimization lama_register["MemeticEnhancedParticleSwarmOptimization"] = MemeticEnhancedParticleSwarmOptimization - LLAMAMemeticEnhancedParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAMemeticEnhancedParticleSwarmOptimization" - ).set_name("LLAMAMemeticEnhancedParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAMemeticEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemeticEnhancedParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAMemeticEnhancedParticleSwarmOptimization").set_name("LLAMAMemeticEnhancedParticleSwarmOptimization", register=True) except Exception as e: print("MemeticEnhancedParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemeticSpatialDifferentialEvolution import ( - MemeticSpatialDifferentialEvolution, - ) + from nevergrad.optimization.lama.MemeticSpatialDifferentialEvolution import MemeticSpatialDifferentialEvolution lama_register["MemeticSpatialDifferentialEvolution"] = MemeticSpatialDifferentialEvolution - LLAMAMemeticSpatialDifferentialEvolution = NonObjectOptimizer( - method="LLAMAMemeticSpatialDifferentialEvolution" - ).set_name("LLAMAMemeticSpatialDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAMemeticSpatialDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemeticSpatialDifferentialEvolution = NonObjectOptimizer(method="LLAMAMemeticSpatialDifferentialEvolution").set_name("LLAMAMemeticSpatialDifferentialEvolution", register=True) except Exception as e: print("MemeticSpatialDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.MemoryBasedSimulatedAnnealing import MemoryBasedSimulatedAnnealing lama_register["MemoryBasedSimulatedAnnealing"] = MemoryBasedSimulatedAnnealing - LLAMAMemoryBasedSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAMemoryBasedSimulatedAnnealing" - ).set_name("LLAMAMemoryBasedSimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAMemoryBasedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryBasedSimulatedAnnealing = NonObjectOptimizer(method="LLAMAMemoryBasedSimulatedAnnealing").set_name("LLAMAMemoryBasedSimulatedAnnealing", register=True) except Exception as e: print("MemoryBasedSimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.MemoryEnhancedAdaptiveAnnealing import MemoryEnhancedAdaptiveAnnealing lama_register["MemoryEnhancedAdaptiveAnnealing"] = MemoryEnhancedAdaptiveAnnealing - LLAMAMemoryEnhancedAdaptiveAnnealing = NonObjectOptimizer( - method="LLAMAMemoryEnhancedAdaptiveAnnealing" - ).set_name("LLAMAMemoryEnhancedAdaptiveAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryEnhancedAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveAnnealing").set_name("LLAMAMemoryEnhancedAdaptiveAnnealing", register=True) except Exception as e: print("MemoryEnhancedAdaptiveAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealing import ( - MemoryEnhancedAdaptiveMultiPhaseAnnealing, - ) + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealing import MemoryEnhancedAdaptiveMultiPhaseAnnealing lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealing"] = MemoryEnhancedAdaptiveMultiPhaseAnnealing - LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( - method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing" - ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing").set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing", register=True) except Exception as e: print("MemoryEnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( - MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient, - ) + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient import MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient - lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( - MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient - ) - LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( - method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient" - ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) + lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient + res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient").set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) except Exception as e: print("MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemoryEnhancedDynamicHybridOptimizer import ( - MemoryEnhancedDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.MemoryEnhancedDynamicHybridOptimizer import MemoryEnhancedDynamicHybridOptimizer lama_register["MemoryEnhancedDynamicHybridOptimizer"] = MemoryEnhancedDynamicHybridOptimizer - LLAMAMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMAMemoryEnhancedDynamicHybridOptimizer" - ).set_name("LLAMAMemoryEnhancedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMAMemoryEnhancedDynamicHybridOptimizer", register=True) except Exception as e: print("MemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.MemoryGuidedAdaptiveDualPhaseStrategyV40 import ( - MemoryGuidedAdaptiveDualPhaseStrategyV40, - ) + from nevergrad.optimization.lama.MemoryGuidedAdaptiveDualPhaseStrategyV40 import MemoryGuidedAdaptiveDualPhaseStrategyV40 lama_register["MemoryGuidedAdaptiveDualPhaseStrategyV40"] = MemoryGuidedAdaptiveDualPhaseStrategyV40 - LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40 = NonObjectOptimizer( - method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40" - ).set_name("LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40", register=True) + res = NonObjectOptimizer(method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40 = NonObjectOptimizer(method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40").set_name("LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40", register=True) except Exception as e: print("MemoryGuidedAdaptiveDualPhaseStrategyV40 can not be imported: ", e) - try: from nevergrad.optimization.lama.MemoryHybridAdaptiveDE import MemoryHybridAdaptiveDE lama_register["MemoryHybridAdaptiveDE"] = MemoryHybridAdaptiveDE - LLAMAMemoryHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE").set_name( - "LLAMAMemoryHybridAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMemoryHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE").set_name("LLAMAMemoryHybridAdaptiveDE", register=True) except Exception as e: print("MemoryHybridAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.MetaDynamicPrecisionOptimizerV1 import MetaDynamicPrecisionOptimizerV1 lama_register["MetaDynamicPrecisionOptimizerV1"] = MetaDynamicPrecisionOptimizerV1 - LLAMAMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( - method="LLAMAMetaDynamicPrecisionOptimizerV1" - ).set_name("LLAMAMetaDynamicPrecisionOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMAMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAMetaDynamicPrecisionOptimizerV1").set_name("LLAMAMetaDynamicPrecisionOptimizerV1", register=True) except Exception as e: print("MetaDynamicPrecisionOptimizerV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.MetaDynamicQuantumSwarmOptimization import ( - MetaDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.MetaDynamicQuantumSwarmOptimization import MetaDynamicQuantumSwarmOptimization lama_register["MetaDynamicQuantumSwarmOptimization"] = MetaDynamicQuantumSwarmOptimization - LLAMAMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAMetaDynamicQuantumSwarmOptimization" - ).set_name("LLAMAMetaDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAMetaDynamicQuantumSwarmOptimization").set_name("LLAMAMetaDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("MetaDynamicQuantumSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.MetaHarmonicSearch import MetaHarmonicSearch lama_register["MetaHarmonicSearch"] = MetaHarmonicSearch - LLAMAMetaHarmonicSearch = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch").set_name( - "LLAMAMetaHarmonicSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMetaHarmonicSearch = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch").set_name("LLAMAMetaHarmonicSearch", register=True) except Exception as e: print("MetaHarmonicSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.MetaHarmonicSearch2 import MetaHarmonicSearch2 lama_register["MetaHarmonicSearch2"] = MetaHarmonicSearch2 - LLAMAMetaHarmonicSearch2 = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2").set_name( - "LLAMAMetaHarmonicSearch2", register=True - ) + res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMetaHarmonicSearch2 = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2").set_name("LLAMAMetaHarmonicSearch2", register=True) except Exception as e: print("MetaHarmonicSearch2 can not be imported: ", e) - try: from nevergrad.optimization.lama.MetaNetAQAPSO import MetaNetAQAPSO lama_register["MetaNetAQAPSO"] = MetaNetAQAPSO - LLAMAMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO").set_name( - "LLAMAMetaNetAQAPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO").set_name("LLAMAMetaNetAQAPSO", register=True) except Exception as e: print("MetaNetAQAPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.MomentumGradientExploration import MomentumGradientExploration lama_register["MomentumGradientExploration"] = MomentumGradientExploration - LLAMAMomentumGradientExploration = NonObjectOptimizer(method="LLAMAMomentumGradientExploration").set_name( - "LLAMAMomentumGradientExploration", register=True - ) + res = NonObjectOptimizer(method="LLAMAMomentumGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMomentumGradientExploration = NonObjectOptimizer(method="LLAMAMomentumGradientExploration").set_name("LLAMAMomentumGradientExploration", register=True) except Exception as e: print("MomentumGradientExploration can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiFacetAdaptiveSearch import MultiFacetAdaptiveSearch lama_register["MultiFacetAdaptiveSearch"] = MultiFacetAdaptiveSearch - LLAMAMultiFacetAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch").set_name( - "LLAMAMultiFacetAdaptiveSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiFacetAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch").set_name("LLAMAMultiFacetAdaptiveSearch", register=True) except Exception as e: print("MultiFacetAdaptiveSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiFocalAdaptiveOptimizer import MultiFocalAdaptiveOptimizer lama_register["MultiFocalAdaptiveOptimizer"] = MultiFocalAdaptiveOptimizer - LLAMAMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer").set_name( - "LLAMAMultiFocalAdaptiveOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer").set_name("LLAMAMultiFocalAdaptiveOptimizer", register=True) except Exception as e: print("MultiFocalAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiLayeredAdaptiveCovarianceMatrixEvolution import ( - MultiLayeredAdaptiveCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.MultiLayeredAdaptiveCovarianceMatrixEvolution import MultiLayeredAdaptiveCovarianceMatrixEvolution - lama_register["MultiLayeredAdaptiveCovarianceMatrixEvolution"] = ( - MultiLayeredAdaptiveCovarianceMatrixEvolution - ) - LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution" - ).set_name("LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution", register=True) + lama_register["MultiLayeredAdaptiveCovarianceMatrixEvolution"] = MultiLayeredAdaptiveCovarianceMatrixEvolution + res = NonObjectOptimizer(method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution").set_name("LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution", register=True) except Exception as e: print("MultiLayeredAdaptiveCovarianceMatrixEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiModalMemoryEnhancedHybridOptimizer import ( - MultiModalMemoryEnhancedHybridOptimizer, - ) + from nevergrad.optimization.lama.MultiModalMemoryEnhancedHybridOptimizer import MultiModalMemoryEnhancedHybridOptimizer lama_register["MultiModalMemoryEnhancedHybridOptimizer"] = MultiModalMemoryEnhancedHybridOptimizer - LLAMAMultiModalMemoryEnhancedHybridOptimizer = NonObjectOptimizer( - method="LLAMAMultiModalMemoryEnhancedHybridOptimizer" - ).set_name("LLAMAMultiModalMemoryEnhancedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAMultiModalMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiModalMemoryEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAMultiModalMemoryEnhancedHybridOptimizer").set_name("LLAMAMultiModalMemoryEnhancedHybridOptimizer", register=True) except Exception as e: print("MultiModalMemoryEnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 import ( - MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66, - ) + from nevergrad.optimization.lama.MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 import MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 - lama_register["MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66"] = ( - MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 - ) - LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 = NonObjectOptimizer( - method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66" - ).set_name("LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66", register=True) + lama_register["MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66"] = MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 + res = NonObjectOptimizer(method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 = NonObjectOptimizer(method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66").set_name("LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66", register=True) except Exception as e: print("MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 import ( - MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67, - ) + from nevergrad.optimization.lama.MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 import MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 - lama_register["MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67"] = ( - MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 - ) - LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 = NonObjectOptimizer( - method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67" - ).set_name("LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67", register=True) + lama_register["MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67"] = MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 + res = NonObjectOptimizer(method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 = NonObjectOptimizer(method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67").set_name("LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67", register=True) except Exception as e: print("MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiOperatorSearch import MultiOperatorSearch lama_register["MultiOperatorSearch"] = MultiOperatorSearch - LLAMAMultiOperatorSearch = NonObjectOptimizer(method="LLAMAMultiOperatorSearch").set_name( - "LLAMAMultiOperatorSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiOperatorSearch = NonObjectOptimizer(method="LLAMAMultiOperatorSearch").set_name("LLAMAMultiOperatorSearch", register=True) except Exception as e: print("MultiOperatorSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiPhaseAdaptiveDE import MultiPhaseAdaptiveDE lama_register["MultiPhaseAdaptiveDE"] = MultiPhaseAdaptiveDE - LLAMAMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE").set_name( - "LLAMAMultiPhaseAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE").set_name("LLAMAMultiPhaseAdaptiveDE", register=True) except Exception as e: print("MultiPhaseAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiPhaseAdaptiveDifferentialEvolution import ( - MultiPhaseAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.MultiPhaseAdaptiveDifferentialEvolution import MultiPhaseAdaptiveDifferentialEvolution lama_register["MultiPhaseAdaptiveDifferentialEvolution"] = MultiPhaseAdaptiveDifferentialEvolution - LLAMAMultiPhaseAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAMultiPhaseAdaptiveDifferentialEvolution" - ).set_name("LLAMAMultiPhaseAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPhaseAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDifferentialEvolution").set_name("LLAMAMultiPhaseAdaptiveDifferentialEvolution", register=True) except Exception as e: print("MultiPhaseAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiPhaseAdaptiveExplorationOptimization import ( - MultiPhaseAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.MultiPhaseAdaptiveExplorationOptimization import MultiPhaseAdaptiveExplorationOptimization lama_register["MultiPhaseAdaptiveExplorationOptimization"] = MultiPhaseAdaptiveExplorationOptimization - LLAMAMultiPhaseAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAMultiPhaseAdaptiveExplorationOptimization" - ).set_name("LLAMAMultiPhaseAdaptiveExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPhaseAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveExplorationOptimization").set_name("LLAMAMultiPhaseAdaptiveExplorationOptimization", register=True) except Exception as e: print("MultiPhaseAdaptiveExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiPhaseAdaptiveHybridDEPSO import MultiPhaseAdaptiveHybridDEPSO lama_register["MultiPhaseAdaptiveHybridDEPSO"] = MultiPhaseAdaptiveHybridDEPSO - LLAMAMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAMultiPhaseAdaptiveHybridDEPSO" - ).set_name("LLAMAMultiPhaseAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMAMultiPhaseAdaptiveHybridDEPSO", register=True) except Exception as e: print("MultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiPhaseDiversityAdaptiveDE import MultiPhaseDiversityAdaptiveDE lama_register["MultiPhaseDiversityAdaptiveDE"] = MultiPhaseDiversityAdaptiveDE - LLAMAMultiPhaseDiversityAdaptiveDE = NonObjectOptimizer( - method="LLAMAMultiPhaseDiversityAdaptiveDE" - ).set_name("LLAMAMultiPhaseDiversityAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAMultiPhaseDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPhaseDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseDiversityAdaptiveDE").set_name("LLAMAMultiPhaseDiversityAdaptiveDE", register=True) except Exception as e: print("MultiPhaseDiversityAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiPopulationAdaptiveMemorySearch import ( - MultiPopulationAdaptiveMemorySearch, - ) + from nevergrad.optimization.lama.MultiPopulationAdaptiveMemorySearch import MultiPopulationAdaptiveMemorySearch lama_register["MultiPopulationAdaptiveMemorySearch"] = MultiPopulationAdaptiveMemorySearch - LLAMAMultiPopulationAdaptiveMemorySearch = NonObjectOptimizer( - method="LLAMAMultiPopulationAdaptiveMemorySearch" - ).set_name("LLAMAMultiPopulationAdaptiveMemorySearch", register=True) + res = NonObjectOptimizer(method="LLAMAMultiPopulationAdaptiveMemorySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiPopulationAdaptiveMemorySearch = NonObjectOptimizer(method="LLAMAMultiPopulationAdaptiveMemorySearch").set_name("LLAMAMultiPopulationAdaptiveMemorySearch", register=True) except Exception as e: print("MultiPopulationAdaptiveMemorySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiScaleAdaptiveHybridOptimization import ( - MultiScaleAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.MultiScaleAdaptiveHybridOptimization import MultiScaleAdaptiveHybridOptimization lama_register["MultiScaleAdaptiveHybridOptimization"] = MultiScaleAdaptiveHybridOptimization - LLAMAMultiScaleAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMAMultiScaleAdaptiveHybridOptimization" - ).set_name("LLAMAMultiScaleAdaptiveHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAMultiScaleAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiScaleAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAMultiScaleAdaptiveHybridOptimization").set_name("LLAMAMultiScaleAdaptiveHybridOptimization", register=True) except Exception as e: print("MultiScaleAdaptiveHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiScaleGradientExploration import MultiScaleGradientExploration lama_register["MultiScaleGradientExploration"] = MultiScaleGradientExploration - LLAMAMultiScaleGradientExploration = NonObjectOptimizer( - method="LLAMAMultiScaleGradientExploration" - ).set_name("LLAMAMultiScaleGradientExploration", register=True) + res = NonObjectOptimizer(method="LLAMAMultiScaleGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiScaleGradientExploration = NonObjectOptimizer(method="LLAMAMultiScaleGradientExploration").set_name("LLAMAMultiScaleGradientExploration", register=True) except Exception as e: print("MultiScaleGradientExploration can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiScaleGradientSearch import MultiScaleGradientSearch lama_register["MultiScaleGradientSearch"] = MultiScaleGradientSearch - LLAMAMultiScaleGradientSearch = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch").set_name( - "LLAMAMultiScaleGradientSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiScaleGradientSearch = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch").set_name("LLAMAMultiScaleGradientSearch", register=True) except Exception as e: print("MultiScaleGradientSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiScaleQuadraticSearch import MultiScaleQuadraticSearch lama_register["MultiScaleQuadraticSearch"] = MultiScaleQuadraticSearch - LLAMAMultiScaleQuadraticSearch = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch").set_name( - "LLAMAMultiScaleQuadraticSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiScaleQuadraticSearch = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch").set_name("LLAMAMultiScaleQuadraticSearch", register=True) except Exception as e: print("MultiScaleQuadraticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiStageAdaptiveSearch import MultiStageAdaptiveSearch lama_register["MultiStageAdaptiveSearch"] = MultiStageAdaptiveSearch - LLAMAMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch").set_name( - "LLAMAMultiStageAdaptiveSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch").set_name("LLAMAMultiStageAdaptiveSearch", register=True) except Exception as e: print("MultiStageAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStageHybridGradientBoostedAnnealing import ( - MultiStageHybridGradientBoostedAnnealing, - ) + from nevergrad.optimization.lama.MultiStageHybridGradientBoostedAnnealing import MultiStageHybridGradientBoostedAnnealing lama_register["MultiStageHybridGradientBoostedAnnealing"] = MultiStageHybridGradientBoostedAnnealing - LLAMAMultiStageHybridGradientBoostedAnnealing = NonObjectOptimizer( - method="LLAMAMultiStageHybridGradientBoostedAnnealing" - ).set_name("LLAMAMultiStageHybridGradientBoostedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStageHybridGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStageHybridGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAMultiStageHybridGradientBoostedAnnealing").set_name("LLAMAMultiStageHybridGradientBoostedAnnealing", register=True) except Exception as e: print("MultiStageHybridGradientBoostedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStrategyAdaptiveGradientEvolution import ( - MultiStrategyAdaptiveGradientEvolution, - ) + from nevergrad.optimization.lama.MultiStrategyAdaptiveGradientEvolution import MultiStrategyAdaptiveGradientEvolution lama_register["MultiStrategyAdaptiveGradientEvolution"] = MultiStrategyAdaptiveGradientEvolution - LLAMAMultiStrategyAdaptiveGradientEvolution = NonObjectOptimizer( - method="LLAMAMultiStrategyAdaptiveGradientEvolution" - ).set_name("LLAMAMultiStrategyAdaptiveGradientEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveGradientEvolution").set_name("LLAMAMultiStrategyAdaptiveGradientEvolution", register=True) except Exception as e: print("MultiStrategyAdaptiveGradientEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStrategyAdaptiveSwarmDifferentialEvolution import ( - MultiStrategyAdaptiveSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.MultiStrategyAdaptiveSwarmDifferentialEvolution import MultiStrategyAdaptiveSwarmDifferentialEvolution - lama_register["MultiStrategyAdaptiveSwarmDifferentialEvolution"] = ( - MultiStrategyAdaptiveSwarmDifferentialEvolution - ) - LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution" - ).set_name("LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution", register=True) + lama_register["MultiStrategyAdaptiveSwarmDifferentialEvolution"] = MultiStrategyAdaptiveSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution").set_name("LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution", register=True) except Exception as e: print("MultiStrategyAdaptiveSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStrategyDifferentialEvolution import ( - MultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.MultiStrategyDifferentialEvolution import MultiStrategyDifferentialEvolution lama_register["MultiStrategyDifferentialEvolution"] = MultiStrategyDifferentialEvolution - LLAMAMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMAMultiStrategyDifferentialEvolution" - ).set_name("LLAMAMultiStrategyDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyDifferentialEvolution").set_name("LLAMAMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("MultiStrategyDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiStrategyMemeticAlgorithm import MultiStrategyMemeticAlgorithm lama_register["MultiStrategyMemeticAlgorithm"] = MultiStrategyMemeticAlgorithm - LLAMAMultiStrategyMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAMultiStrategyMemeticAlgorithm" - ).set_name("LLAMAMultiStrategyMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStrategyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAMultiStrategyMemeticAlgorithm").set_name("LLAMAMultiStrategyMemeticAlgorithm", register=True) except Exception as e: print("MultiStrategyMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStrategyQuantumCognitionOptimizerV9 import ( - MultiStrategyQuantumCognitionOptimizerV9, - ) + from nevergrad.optimization.lama.MultiStrategyQuantumCognitionOptimizerV9 import MultiStrategyQuantumCognitionOptimizerV9 lama_register["MultiStrategyQuantumCognitionOptimizerV9"] = MultiStrategyQuantumCognitionOptimizerV9 - LLAMAMultiStrategyQuantumCognitionOptimizerV9 = NonObjectOptimizer( - method="LLAMAMultiStrategyQuantumCognitionOptimizerV9" - ).set_name("LLAMAMultiStrategyQuantumCognitionOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumCognitionOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyQuantumCognitionOptimizerV9 = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumCognitionOptimizerV9").set_name("LLAMAMultiStrategyQuantumCognitionOptimizerV9", register=True) except Exception as e: print("MultiStrategyQuantumCognitionOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.MultiStrategyQuantumLevyOptimizer import ( - MultiStrategyQuantumLevyOptimizer, - ) + from nevergrad.optimization.lama.MultiStrategyQuantumLevyOptimizer import MultiStrategyQuantumLevyOptimizer lama_register["MultiStrategyQuantumLevyOptimizer"] = MultiStrategyQuantumLevyOptimizer - LLAMAMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( - method="LLAMAMultiStrategyQuantumLevyOptimizer" - ).set_name("LLAMAMultiStrategyQuantumLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumLevyOptimizer").set_name("LLAMAMultiStrategyQuantumLevyOptimizer", register=True) except Exception as e: print("MultiStrategyQuantumLevyOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiStrategySelfAdaptiveDE import MultiStrategySelfAdaptiveDE lama_register["MultiStrategySelfAdaptiveDE"] = MultiStrategySelfAdaptiveDE - LLAMAMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE").set_name( - "LLAMAMultiStrategySelfAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE").set_name("LLAMAMultiStrategySelfAdaptiveDE", register=True) except Exception as e: print("MultiStrategySelfAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.MultiSwarmAdaptiveDE_PSO import MultiSwarmAdaptiveDE_PSO lama_register["MultiSwarmAdaptiveDE_PSO"] = MultiSwarmAdaptiveDE_PSO - LLAMAMultiSwarmAdaptiveDE_PSO = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO").set_name( - "LLAMAMultiSwarmAdaptiveDE_PSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAMultiSwarmAdaptiveDE_PSO = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO").set_name("LLAMAMultiSwarmAdaptiveDE_PSO", register=True) except Exception as e: print("MultiSwarmAdaptiveDE_PSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.NovelAdaptiveHarmonicFireworksTabuSearch import ( - NovelAdaptiveHarmonicFireworksTabuSearch, - ) + from nevergrad.optimization.lama.NovelAdaptiveHarmonicFireworksTabuSearch import NovelAdaptiveHarmonicFireworksTabuSearch lama_register["NovelAdaptiveHarmonicFireworksTabuSearch"] = NovelAdaptiveHarmonicFireworksTabuSearch - LLAMANovelAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( - method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch" - ).set_name("LLAMANovelAdaptiveHarmonicFireworksTabuSearch", register=True) + res = NonObjectOptimizer(method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMANovelAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMANovelAdaptiveHarmonicFireworksTabuSearch", register=True) except Exception as e: print("NovelAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.NovelDynamicFireworkAlgorithm import NovelDynamicFireworkAlgorithm lama_register["NovelDynamicFireworkAlgorithm"] = NovelDynamicFireworkAlgorithm - LLAMANovelDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMANovelDynamicFireworkAlgorithm" - ).set_name("LLAMANovelDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMANovelDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMANovelDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMANovelDynamicFireworkAlgorithm").set_name("LLAMANovelDynamicFireworkAlgorithm", register=True) except Exception as e: print("NovelDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 import ( - NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2, - ) + from nevergrad.optimization.lama.NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 import NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 - lama_register["NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2"] = ( - NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 - ) - LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( - method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2" - ).set_name("LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2", register=True) + lama_register["NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2"] = NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 + res = NonObjectOptimizer(method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2", register=True) except Exception as e: print("NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.NovelHarmonyTabuSearch import NovelHarmonyTabuSearch lama_register["NovelHarmonyTabuSearch"] = NovelHarmonyTabuSearch - LLAMANovelHarmonyTabuSearch = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch").set_name( - "LLAMANovelHarmonyTabuSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMANovelHarmonyTabuSearch = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch").set_name("LLAMANovelHarmonyTabuSearch", register=True) except Exception as e: print("NovelHarmonyTabuSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.ODEMF import ODEMF lama_register["ODEMF"] = ODEMF + res = NonObjectOptimizer(method="LLAMAODEMF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAODEMF = NonObjectOptimizer(method="LLAMAODEMF").set_name("LLAMAODEMF", register=True) except Exception as e: print("ODEMF can not be imported: ", e) - try: from nevergrad.optimization.lama.ORAMED import ORAMED lama_register["ORAMED"] = ORAMED + res = NonObjectOptimizer(method="LLAMAORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAORAMED = NonObjectOptimizer(method="LLAMAORAMED").set_name("LLAMAORAMED", register=True) except Exception as e: print("ORAMED can not be imported: ", e) - try: from nevergrad.optimization.lama.OctopusSwarmAlgorithm import OctopusSwarmAlgorithm lama_register["OctopusSwarmAlgorithm"] = OctopusSwarmAlgorithm - LLAMAOctopusSwarmAlgorithm = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm").set_name( - "LLAMAOctopusSwarmAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOctopusSwarmAlgorithm = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm").set_name("LLAMAOctopusSwarmAlgorithm", register=True) except Exception as e: print("OctopusSwarmAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalAdaptiveDifferentialEvolution import ( - OptimalAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialEvolution import OptimalAdaptiveDifferentialEvolution lama_register["OptimalAdaptiveDifferentialEvolution"] = OptimalAdaptiveDifferentialEvolution - LLAMAOptimalAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAOptimalAdaptiveDifferentialEvolution" - ).set_name("LLAMAOptimalAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialEvolution").set_name("LLAMAOptimalAdaptiveDifferentialEvolution", register=True) except Exception as e: print("OptimalAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalAdaptiveDifferentialSearch import ( - OptimalAdaptiveDifferentialSearch, - ) + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialSearch import OptimalAdaptiveDifferentialSearch lama_register["OptimalAdaptiveDifferentialSearch"] = OptimalAdaptiveDifferentialSearch - LLAMAOptimalAdaptiveDifferentialSearch = NonObjectOptimizer( - method="LLAMAOptimalAdaptiveDifferentialSearch" - ).set_name("LLAMAOptimalAdaptiveDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialSearch").set_name("LLAMAOptimalAdaptiveDifferentialSearch", register=True) except Exception as e: print("OptimalAdaptiveDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalAdaptiveMutationEnhancedSearch import ( - OptimalAdaptiveMutationEnhancedSearch, - ) + from nevergrad.optimization.lama.OptimalAdaptiveMutationEnhancedSearch import OptimalAdaptiveMutationEnhancedSearch lama_register["OptimalAdaptiveMutationEnhancedSearch"] = OptimalAdaptiveMutationEnhancedSearch - LLAMAOptimalAdaptiveMutationEnhancedSearch = NonObjectOptimizer( - method="LLAMAOptimalAdaptiveMutationEnhancedSearch" - ).set_name("LLAMAOptimalAdaptiveMutationEnhancedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveMutationEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalAdaptiveMutationEnhancedSearch = NonObjectOptimizer(method="LLAMAOptimalAdaptiveMutationEnhancedSearch").set_name("LLAMAOptimalAdaptiveMutationEnhancedSearch", register=True) except Exception as e: print("OptimalAdaptiveMutationEnhancedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalAdaptiveSwarmDifferentialEvolution import ( - OptimalAdaptiveSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.OptimalAdaptiveSwarmDifferentialEvolution import OptimalAdaptiveSwarmDifferentialEvolution lama_register["OptimalAdaptiveSwarmDifferentialEvolution"] = OptimalAdaptiveSwarmDifferentialEvolution - LLAMAOptimalAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution" - ).set_name("LLAMAOptimalAdaptiveSwarmDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution").set_name("LLAMAOptimalAdaptiveSwarmDifferentialEvolution", register=True) except Exception as e: print("OptimalAdaptiveSwarmDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalBalanceSearch import OptimalBalanceSearch lama_register["OptimalBalanceSearch"] = OptimalBalanceSearch - LLAMAOptimalBalanceSearch = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch").set_name( - "LLAMAOptimalBalanceSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalBalanceSearch = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch").set_name("LLAMAOptimalBalanceSearch", register=True) except Exception as e: print("OptimalBalanceSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalCohortDiversityOptimizer import OptimalCohortDiversityOptimizer lama_register["OptimalCohortDiversityOptimizer"] = OptimalCohortDiversityOptimizer - LLAMAOptimalCohortDiversityOptimizer = NonObjectOptimizer( - method="LLAMAOptimalCohortDiversityOptimizer" - ).set_name("LLAMAOptimalCohortDiversityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalCohortDiversityOptimizer = NonObjectOptimizer(method="LLAMAOptimalCohortDiversityOptimizer").set_name("LLAMAOptimalCohortDiversityOptimizer", register=True) except Exception as e: print("OptimalCohortDiversityOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalConvergenceDE import OptimalConvergenceDE lama_register["OptimalConvergenceDE"] = OptimalConvergenceDE - LLAMAOptimalConvergenceDE = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE").set_name( - "LLAMAOptimalConvergenceDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalConvergenceDE = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE").set_name("LLAMAOptimalConvergenceDE", register=True) except Exception as e: print("OptimalConvergenceDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalDynamicAdaptiveEvolutionOptimizer import ( - OptimalDynamicAdaptiveEvolutionOptimizer, - ) + from nevergrad.optimization.lama.OptimalDynamicAdaptiveEvolutionOptimizer import OptimalDynamicAdaptiveEvolutionOptimizer lama_register["OptimalDynamicAdaptiveEvolutionOptimizer"] = OptimalDynamicAdaptiveEvolutionOptimizer - LLAMAOptimalDynamicAdaptiveEvolutionOptimizer = NonObjectOptimizer( - method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer" - ).set_name("LLAMAOptimalDynamicAdaptiveEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalDynamicAdaptiveEvolutionOptimizer = NonObjectOptimizer(method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer").set_name("LLAMAOptimalDynamicAdaptiveEvolutionOptimizer", register=True) except Exception as e: print("OptimalDynamicAdaptiveEvolutionOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalDynamicMutationSearch import OptimalDynamicMutationSearch lama_register["OptimalDynamicMutationSearch"] = OptimalDynamicMutationSearch - LLAMAOptimalDynamicMutationSearch = NonObjectOptimizer( - method="LLAMAOptimalDynamicMutationSearch" - ).set_name("LLAMAOptimalDynamicMutationSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalDynamicMutationSearch = NonObjectOptimizer(method="LLAMAOptimalDynamicMutationSearch").set_name("LLAMAOptimalDynamicMutationSearch", register=True) except Exception as e: print("OptimalDynamicMutationSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV14 import ( - OptimalDynamicPrecisionOptimizerV14, - ) + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV14 import OptimalDynamicPrecisionOptimizerV14 lama_register["OptimalDynamicPrecisionOptimizerV14"] = OptimalDynamicPrecisionOptimizerV14 - LLAMAOptimalDynamicPrecisionOptimizerV14 = NonObjectOptimizer( - method="LLAMAOptimalDynamicPrecisionOptimizerV14" - ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV14", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalDynamicPrecisionOptimizerV14 = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV14").set_name("LLAMAOptimalDynamicPrecisionOptimizerV14", register=True) except Exception as e: print("OptimalDynamicPrecisionOptimizerV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV21 import ( - OptimalDynamicPrecisionOptimizerV21, - ) + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV21 import OptimalDynamicPrecisionOptimizerV21 lama_register["OptimalDynamicPrecisionOptimizerV21"] = OptimalDynamicPrecisionOptimizerV21 - LLAMAOptimalDynamicPrecisionOptimizerV21 = NonObjectOptimizer( - method="LLAMAOptimalDynamicPrecisionOptimizerV21" - ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV21", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalDynamicPrecisionOptimizerV21 = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV21").set_name("LLAMAOptimalDynamicPrecisionOptimizerV21", register=True) except Exception as e: print("OptimalDynamicPrecisionOptimizerV21 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalEnhancedRAMEDS import OptimalEnhancedRAMEDS lama_register["OptimalEnhancedRAMEDS"] = OptimalEnhancedRAMEDS - LLAMAOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS").set_name( - "LLAMAOptimalEnhancedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS").set_name("LLAMAOptimalEnhancedRAMEDS", register=True) except Exception as e: print("OptimalEnhancedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalEnhancedStrategyDE import OptimalEnhancedStrategyDE lama_register["OptimalEnhancedStrategyDE"] = OptimalEnhancedStrategyDE - LLAMAOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE").set_name( - "LLAMAOptimalEnhancedStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE").set_name("LLAMAOptimalEnhancedStrategyDE", register=True) except Exception as e: print("OptimalEnhancedStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientHybridOptimizerV8 import ( - OptimalEvolutionaryGradientHybridOptimizerV8, - ) + from nevergrad.optimization.lama.OptimalEvolutionaryGradientHybridOptimizerV8 import OptimalEvolutionaryGradientHybridOptimizerV8 - lama_register["OptimalEvolutionaryGradientHybridOptimizerV8"] = ( - OptimalEvolutionaryGradientHybridOptimizerV8 - ) - LLAMAOptimalEvolutionaryGradientHybridOptimizerV8 = NonObjectOptimizer( - method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8" - ).set_name("LLAMAOptimalEvolutionaryGradientHybridOptimizerV8", register=True) + lama_register["OptimalEvolutionaryGradientHybridOptimizerV8"] = OptimalEvolutionaryGradientHybridOptimizerV8 + res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalEvolutionaryGradientHybridOptimizerV8 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8").set_name("LLAMAOptimalEvolutionaryGradientHybridOptimizerV8", register=True) except Exception as e: print("OptimalEvolutionaryGradientHybridOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV11 import ( - OptimalEvolutionaryGradientOptimizerV11, - ) + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV11 import OptimalEvolutionaryGradientOptimizerV11 lama_register["OptimalEvolutionaryGradientOptimizerV11"] = OptimalEvolutionaryGradientOptimizerV11 - LLAMAOptimalEvolutionaryGradientOptimizerV11 = NonObjectOptimizer( - method="LLAMAOptimalEvolutionaryGradientOptimizerV11" - ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalEvolutionaryGradientOptimizerV11 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV11").set_name("LLAMAOptimalEvolutionaryGradientOptimizerV11", register=True) except Exception as e: print("OptimalEvolutionaryGradientOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV25 import ( - OptimalEvolutionaryGradientOptimizerV25, - ) + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV25 import OptimalEvolutionaryGradientOptimizerV25 lama_register["OptimalEvolutionaryGradientOptimizerV25"] = OptimalEvolutionaryGradientOptimizerV25 - LLAMAOptimalEvolutionaryGradientOptimizerV25 = NonObjectOptimizer( - method="LLAMAOptimalEvolutionaryGradientOptimizerV25" - ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV25", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalEvolutionaryGradientOptimizerV25 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV25").set_name("LLAMAOptimalEvolutionaryGradientOptimizerV25", register=True) except Exception as e: print("OptimalEvolutionaryGradientOptimizerV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalHybridDifferentialAnnealingOptimizer import ( - OptimalHybridDifferentialAnnealingOptimizer, - ) + from nevergrad.optimization.lama.OptimalHybridDifferentialAnnealingOptimizer import OptimalHybridDifferentialAnnealingOptimizer lama_register["OptimalHybridDifferentialAnnealingOptimizer"] = OptimalHybridDifferentialAnnealingOptimizer - LLAMAOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAOptimalHybridDifferentialAnnealingOptimizer" - ).set_name("LLAMAOptimalHybridDifferentialAnnealingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer(method="LLAMAOptimalHybridDifferentialAnnealingOptimizer").set_name("LLAMAOptimalHybridDifferentialAnnealingOptimizer", register=True) except Exception as e: print("OptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalHyperStrategicOptimizerV51 import ( - OptimalHyperStrategicOptimizerV51, - ) + from nevergrad.optimization.lama.OptimalHyperStrategicOptimizerV51 import OptimalHyperStrategicOptimizerV51 lama_register["OptimalHyperStrategicOptimizerV51"] = OptimalHyperStrategicOptimizerV51 - LLAMAOptimalHyperStrategicOptimizerV51 = NonObjectOptimizer( - method="LLAMAOptimalHyperStrategicOptimizerV51" - ).set_name("LLAMAOptimalHyperStrategicOptimizerV51", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalHyperStrategicOptimizerV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalHyperStrategicOptimizerV51 = NonObjectOptimizer(method="LLAMAOptimalHyperStrategicOptimizerV51").set_name("LLAMAOptimalHyperStrategicOptimizerV51", register=True) except Exception as e: print("OptimalHyperStrategicOptimizerV51 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalPrecisionDynamicAdaptationOptimizer import ( - OptimalPrecisionDynamicAdaptationOptimizer, - ) + from nevergrad.optimization.lama.OptimalPrecisionDynamicAdaptationOptimizer import OptimalPrecisionDynamicAdaptationOptimizer lama_register["OptimalPrecisionDynamicAdaptationOptimizer"] = OptimalPrecisionDynamicAdaptationOptimizer - LLAMAOptimalPrecisionDynamicAdaptationOptimizer = NonObjectOptimizer( - method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer" - ).set_name("LLAMAOptimalPrecisionDynamicAdaptationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalPrecisionDynamicAdaptationOptimizer = NonObjectOptimizer(method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer").set_name("LLAMAOptimalPrecisionDynamicAdaptationOptimizer", register=True) except Exception as e: print("OptimalPrecisionDynamicAdaptationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryOptimizerV37 import ( - OptimalPrecisionEvolutionaryOptimizerV37, - ) + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryOptimizerV37 import OptimalPrecisionEvolutionaryOptimizerV37 lama_register["OptimalPrecisionEvolutionaryOptimizerV37"] = OptimalPrecisionEvolutionaryOptimizerV37 - LLAMAOptimalPrecisionEvolutionaryOptimizerV37 = NonObjectOptimizer( - method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37" - ).set_name("LLAMAOptimalPrecisionEvolutionaryOptimizerV37", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalPrecisionEvolutionaryOptimizerV37 = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37").set_name("LLAMAOptimalPrecisionEvolutionaryOptimizerV37", register=True) except Exception as e: print("OptimalPrecisionEvolutionaryOptimizerV37 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryThermalOptimizer import ( - OptimalPrecisionEvolutionaryThermalOptimizer, - ) + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryThermalOptimizer import OptimalPrecisionEvolutionaryThermalOptimizer - lama_register["OptimalPrecisionEvolutionaryThermalOptimizer"] = ( - OptimalPrecisionEvolutionaryThermalOptimizer - ) - LLAMAOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( - method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer" - ).set_name("LLAMAOptimalPrecisionEvolutionaryThermalOptimizer", register=True) + lama_register["OptimalPrecisionEvolutionaryThermalOptimizer"] = OptimalPrecisionEvolutionaryThermalOptimizer + res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAOptimalPrecisionEvolutionaryThermalOptimizer", register=True) except Exception as e: print("OptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalPrecisionHybridSearchV3 import OptimalPrecisionHybridSearchV3 lama_register["OptimalPrecisionHybridSearchV3"] = OptimalPrecisionHybridSearchV3 - LLAMAOptimalPrecisionHybridSearchV3 = NonObjectOptimizer( - method="LLAMAOptimalPrecisionHybridSearchV3" - ).set_name("LLAMAOptimalPrecisionHybridSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalPrecisionHybridSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalPrecisionHybridSearchV3 = NonObjectOptimizer(method="LLAMAOptimalPrecisionHybridSearchV3").set_name("LLAMAOptimalPrecisionHybridSearchV3", register=True) except Exception as e: print("OptimalPrecisionHybridSearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalQuantumSynergyStrategy import OptimalQuantumSynergyStrategy lama_register["OptimalQuantumSynergyStrategy"] = OptimalQuantumSynergyStrategy - LLAMAOptimalQuantumSynergyStrategy = NonObjectOptimizer( - method="LLAMAOptimalQuantumSynergyStrategy" - ).set_name("LLAMAOptimalQuantumSynergyStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalQuantumSynergyStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalQuantumSynergyStrategy = NonObjectOptimizer(method="LLAMAOptimalQuantumSynergyStrategy").set_name("LLAMAOptimalQuantumSynergyStrategy", register=True) except Exception as e: print("OptimalQuantumSynergyStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalRefinedEnhancedUltraRefinedRAMEDS import ( - OptimalRefinedEnhancedUltraRefinedRAMEDS, - ) + from nevergrad.optimization.lama.OptimalRefinedEnhancedUltraRefinedRAMEDS import OptimalRefinedEnhancedUltraRefinedRAMEDS lama_register["OptimalRefinedEnhancedUltraRefinedRAMEDS"] = OptimalRefinedEnhancedUltraRefinedRAMEDS - LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( - method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS" - ).set_name("LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS").set_name("LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS", register=True) except Exception as e: print("OptimalRefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalSelectiveEvolutionaryOptimizerV20 import ( - OptimalSelectiveEvolutionaryOptimizerV20, - ) + from nevergrad.optimization.lama.OptimalSelectiveEvolutionaryOptimizerV20 import OptimalSelectiveEvolutionaryOptimizerV20 lama_register["OptimalSelectiveEvolutionaryOptimizerV20"] = OptimalSelectiveEvolutionaryOptimizerV20 - LLAMAOptimalSelectiveEvolutionaryOptimizerV20 = NonObjectOptimizer( - method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20" - ).set_name("LLAMAOptimalSelectiveEvolutionaryOptimizerV20", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalSelectiveEvolutionaryOptimizerV20 = NonObjectOptimizer(method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20").set_name("LLAMAOptimalSelectiveEvolutionaryOptimizerV20", register=True) except Exception as e: print("OptimalSelectiveEvolutionaryOptimizerV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalSmartRefinedRAMEDS import OptimalSmartRefinedRAMEDS lama_register["OptimalSmartRefinedRAMEDS"] = OptimalSmartRefinedRAMEDS - LLAMAOptimalSmartRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS").set_name( - "LLAMAOptimalSmartRefinedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalSmartRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS").set_name("LLAMAOptimalSmartRefinedRAMEDS", register=True) except Exception as e: print("OptimalSmartRefinedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalSpiralCentroidSearch import OptimalSpiralCentroidSearch lama_register["OptimalSpiralCentroidSearch"] = OptimalSpiralCentroidSearch - LLAMAOptimalSpiralCentroidSearch = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch").set_name( - "LLAMAOptimalSpiralCentroidSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalSpiralCentroidSearch = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch").set_name("LLAMAOptimalSpiralCentroidSearch", register=True) except Exception as e: print("OptimalSpiralCentroidSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimalStrategicAdaptiveOptimizer import ( - OptimalStrategicAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.OptimalStrategicAdaptiveOptimizer import OptimalStrategicAdaptiveOptimizer lama_register["OptimalStrategicAdaptiveOptimizer"] = OptimalStrategicAdaptiveOptimizer - LLAMAOptimalStrategicAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAOptimalStrategicAdaptiveOptimizer" - ).set_name("LLAMAOptimalStrategicAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimalStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalStrategicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAOptimalStrategicAdaptiveOptimizer").set_name("LLAMAOptimalStrategicAdaptiveOptimizer", register=True) except Exception as e: print("OptimalStrategicAdaptiveOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimalStrategicHybridDE import OptimalStrategicHybridDE lama_register["OptimalStrategicHybridDE"] = OptimalStrategicHybridDE - LLAMAOptimalStrategicHybridDE = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE").set_name( - "LLAMAOptimalStrategicHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimalStrategicHybridDE = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE").set_name("LLAMAOptimalStrategicHybridDE", register=True) except Exception as e: print("OptimalStrategicHybridDE can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimallyBalancedQuantumStrategy import OptimallyBalancedQuantumStrategy lama_register["OptimallyBalancedQuantumStrategy"] = OptimallyBalancedQuantumStrategy - LLAMAOptimallyBalancedQuantumStrategy = NonObjectOptimizer( - method="LLAMAOptimallyBalancedQuantumStrategy" - ).set_name("LLAMAOptimallyBalancedQuantumStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimallyBalancedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimallyBalancedQuantumStrategy = NonObjectOptimizer(method="LLAMAOptimallyBalancedQuantumStrategy").set_name("LLAMAOptimallyBalancedQuantumStrategy", register=True) except Exception as e: print("OptimallyBalancedQuantumStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveDifferentialClimber import ( - OptimizedAdaptiveDifferentialClimber, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveDifferentialClimber import OptimizedAdaptiveDifferentialClimber lama_register["OptimizedAdaptiveDifferentialClimber"] = OptimizedAdaptiveDifferentialClimber - LLAMAOptimizedAdaptiveDifferentialClimber = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveDifferentialClimber" - ).set_name("LLAMAOptimizedAdaptiveDifferentialClimber", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDifferentialClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveDifferentialClimber = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDifferentialClimber").set_name("LLAMAOptimizedAdaptiveDifferentialClimber", register=True) except Exception as e: print("OptimizedAdaptiveDifferentialClimber can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategy import ( - OptimizedAdaptiveDualPhaseStrategy, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategy import OptimizedAdaptiveDualPhaseStrategy lama_register["OptimizedAdaptiveDualPhaseStrategy"] = OptimizedAdaptiveDualPhaseStrategy - LLAMAOptimizedAdaptiveDualPhaseStrategy = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveDualPhaseStrategy" - ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategy").set_name("LLAMAOptimizedAdaptiveDualPhaseStrategy", register=True) except Exception as e: print("OptimizedAdaptiveDualPhaseStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategyV4 import ( - OptimizedAdaptiveDualPhaseStrategyV4, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategyV4 import OptimizedAdaptiveDualPhaseStrategyV4 lama_register["OptimizedAdaptiveDualPhaseStrategyV4"] = OptimizedAdaptiveDualPhaseStrategyV4 - LLAMAOptimizedAdaptiveDualPhaseStrategyV4 = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4" - ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveDualPhaseStrategyV4 = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4").set_name("LLAMAOptimizedAdaptiveDualPhaseStrategyV4", register=True) except Exception as e: print("OptimizedAdaptiveDualPhaseStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveDynamicStrategyV34 import ( - OptimizedAdaptiveDynamicStrategyV34, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveDynamicStrategyV34 import OptimizedAdaptiveDynamicStrategyV34 lama_register["OptimizedAdaptiveDynamicStrategyV34"] = OptimizedAdaptiveDynamicStrategyV34 - LLAMAOptimizedAdaptiveDynamicStrategyV34 = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveDynamicStrategyV34" - ).set_name("LLAMAOptimizedAdaptiveDynamicStrategyV34", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDynamicStrategyV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveDynamicStrategyV34 = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDynamicStrategyV34").set_name("LLAMAOptimizedAdaptiveDynamicStrategyV34", register=True) except Exception as e: print("OptimizedAdaptiveDynamicStrategyV34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveGlobalLocalSearch import ( - OptimizedAdaptiveGlobalLocalSearch, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveGlobalLocalSearch import OptimizedAdaptiveGlobalLocalSearch lama_register["OptimizedAdaptiveGlobalLocalSearch"] = OptimizedAdaptiveGlobalLocalSearch - LLAMAOptimizedAdaptiveGlobalLocalSearch = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveGlobalLocalSearch" - ).set_name("LLAMAOptimizedAdaptiveGlobalLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveGlobalLocalSearch = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveGlobalLocalSearch").set_name("LLAMAOptimizedAdaptiveGlobalLocalSearch", register=True) except Exception as e: print("OptimizedAdaptiveGlobalLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveQuantumGradientHybridStrategy import ( - OptimizedAdaptiveQuantumGradientHybridStrategy, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveQuantumGradientHybridStrategy import OptimizedAdaptiveQuantumGradientHybridStrategy - lama_register["OptimizedAdaptiveQuantumGradientHybridStrategy"] = ( - OptimizedAdaptiveQuantumGradientHybridStrategy - ) - LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy" - ).set_name("LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy", register=True) + lama_register["OptimizedAdaptiveQuantumGradientHybridStrategy"] = OptimizedAdaptiveQuantumGradientHybridStrategy + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy").set_name("LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy", register=True) except Exception as e: print("OptimizedAdaptiveQuantumGradientHybridStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedAdaptiveSimulatedAnnealingWithSmartMemory import ( - OptimizedAdaptiveSimulatedAnnealingWithSmartMemory, - ) + from nevergrad.optimization.lama.OptimizedAdaptiveSimulatedAnnealingWithSmartMemory import OptimizedAdaptiveSimulatedAnnealingWithSmartMemory - lama_register["OptimizedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( - OptimizedAdaptiveSimulatedAnnealingWithSmartMemory - ) - LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( - method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory" - ).set_name("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) + lama_register["OptimizedAdaptiveSimulatedAnnealingWithSmartMemory"] = OptimizedAdaptiveSimulatedAnnealingWithSmartMemory + res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) except Exception as e: print("OptimizedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedBalancedDualStrategyAdaptiveDE import ( - OptimizedBalancedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.OptimizedBalancedDualStrategyAdaptiveDE import OptimizedBalancedDualStrategyAdaptiveDE lama_register["OptimizedBalancedDualStrategyAdaptiveDE"] = OptimizedBalancedDualStrategyAdaptiveDE - LLAMAOptimizedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE" - ).set_name("LLAMAOptimizedBalancedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedBalancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("OptimizedBalancedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedConvergenceIslandStrategy import ( - OptimizedConvergenceIslandStrategy, - ) + from nevergrad.optimization.lama.OptimizedConvergenceIslandStrategy import OptimizedConvergenceIslandStrategy lama_register["OptimizedConvergenceIslandStrategy"] = OptimizedConvergenceIslandStrategy - LLAMAOptimizedConvergenceIslandStrategy = NonObjectOptimizer( - method="LLAMAOptimizedConvergenceIslandStrategy" - ).set_name("LLAMAOptimizedConvergenceIslandStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedConvergenceIslandStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedConvergenceIslandStrategy = NonObjectOptimizer(method="LLAMAOptimizedConvergenceIslandStrategy").set_name("LLAMAOptimizedConvergenceIslandStrategy", register=True) except Exception as e: print("OptimizedConvergenceIslandStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedConvergentAdaptiveEvolver import ( - OptimizedConvergentAdaptiveEvolver, - ) + from nevergrad.optimization.lama.OptimizedConvergentAdaptiveEvolver import OptimizedConvergentAdaptiveEvolver lama_register["OptimizedConvergentAdaptiveEvolver"] = OptimizedConvergentAdaptiveEvolver - LLAMAOptimizedConvergentAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAOptimizedConvergentAdaptiveEvolver" - ).set_name("LLAMAOptimizedConvergentAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedConvergentAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedConvergentAdaptiveEvolver = NonObjectOptimizer(method="LLAMAOptimizedConvergentAdaptiveEvolver").set_name("LLAMAOptimizedConvergentAdaptiveEvolver", register=True) except Exception as e: print("OptimizedConvergentAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedCrossoverElitistStrategyV8 import ( - OptimizedCrossoverElitistStrategyV8, - ) + from nevergrad.optimization.lama.OptimizedCrossoverElitistStrategyV8 import OptimizedCrossoverElitistStrategyV8 lama_register["OptimizedCrossoverElitistStrategyV8"] = OptimizedCrossoverElitistStrategyV8 - LLAMAOptimizedCrossoverElitistStrategyV8 = NonObjectOptimizer( - method="LLAMAOptimizedCrossoverElitistStrategyV8" - ).set_name("LLAMAOptimizedCrossoverElitistStrategyV8", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedCrossoverElitistStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedCrossoverElitistStrategyV8 = NonObjectOptimizer(method="LLAMAOptimizedCrossoverElitistStrategyV8").set_name("LLAMAOptimizedCrossoverElitistStrategyV8", register=True) except Exception as e: print("OptimizedCrossoverElitistStrategyV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedDifferentialEvolution import OptimizedDifferentialEvolution lama_register["OptimizedDifferentialEvolution"] = OptimizedDifferentialEvolution - LLAMAOptimizedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAOptimizedDifferentialEvolution" - ).set_name("LLAMAOptimizedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimizedDifferentialEvolution").set_name("LLAMAOptimizedDifferentialEvolution", register=True) except Exception as e: print("OptimizedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDualPhaseAdaptiveHybridOptimizationV4 import ( - OptimizedDualPhaseAdaptiveHybridOptimizationV4, - ) + from nevergrad.optimization.lama.OptimizedDualPhaseAdaptiveHybridOptimizationV4 import OptimizedDualPhaseAdaptiveHybridOptimizationV4 - lama_register["OptimizedDualPhaseAdaptiveHybridOptimizationV4"] = ( - OptimizedDualPhaseAdaptiveHybridOptimizationV4 - ) - LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4 = NonObjectOptimizer( - method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4" - ).set_name("LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4", register=True) + lama_register["OptimizedDualPhaseAdaptiveHybridOptimizationV4"] = OptimizedDualPhaseAdaptiveHybridOptimizationV4 + res = NonObjectOptimizer(method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4 = NonObjectOptimizer(method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4").set_name("LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4", register=True) except Exception as e: print("OptimizedDualPhaseAdaptiveHybridOptimizationV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedDualStrategyAdaptiveDE import OptimizedDualStrategyAdaptiveDE lama_register["OptimizedDualStrategyAdaptiveDE"] = OptimizedDualStrategyAdaptiveDE - LLAMAOptimizedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAOptimizedDualStrategyAdaptiveDE" - ).set_name("LLAMAOptimizedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedDualStrategyAdaptiveDE", register=True) except Exception as e: print("OptimizedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicAdaptiveHybridOptimizer import ( - OptimizedDynamicAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.OptimizedDynamicAdaptiveHybridOptimizer import OptimizedDynamicAdaptiveHybridOptimizer lama_register["OptimizedDynamicAdaptiveHybridOptimizer"] = OptimizedDynamicAdaptiveHybridOptimizer - LLAMAOptimizedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer" - ).set_name("LLAMAOptimizedDynamicAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer").set_name("LLAMAOptimizedDynamicAdaptiveHybridOptimizer", register=True) except Exception as e: print("OptimizedDynamicAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicDualPhaseStrategyV13 import ( - OptimizedDynamicDualPhaseStrategyV13, - ) + from nevergrad.optimization.lama.OptimizedDynamicDualPhaseStrategyV13 import OptimizedDynamicDualPhaseStrategyV13 lama_register["OptimizedDynamicDualPhaseStrategyV13"] = OptimizedDynamicDualPhaseStrategyV13 - LLAMAOptimizedDynamicDualPhaseStrategyV13 = NonObjectOptimizer( - method="LLAMAOptimizedDynamicDualPhaseStrategyV13" - ).set_name("LLAMAOptimizedDynamicDualPhaseStrategyV13", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicDualPhaseStrategyV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicDualPhaseStrategyV13 = NonObjectOptimizer(method="LLAMAOptimizedDynamicDualPhaseStrategyV13").set_name("LLAMAOptimizedDynamicDualPhaseStrategyV13", register=True) except Exception as e: print("OptimizedDynamicDualPhaseStrategyV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( - OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus, - ) + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus import OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus - lama_register["OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( - OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus - ) - LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( - method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus" - ).set_name("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) + lama_register["OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) except Exception as e: print("OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedSimulatedAnnealing import ( - OptimizedDynamicGradientBoostedSimulatedAnnealing, - ) + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedSimulatedAnnealing import OptimizedDynamicGradientBoostedSimulatedAnnealing - lama_register["OptimizedDynamicGradientBoostedSimulatedAnnealing"] = ( - OptimizedDynamicGradientBoostedSimulatedAnnealing - ) - LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing = NonObjectOptimizer( - method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing" - ).set_name("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing", register=True) + lama_register["OptimizedDynamicGradientBoostedSimulatedAnnealing"] = OptimizedDynamicGradientBoostedSimulatedAnnealing + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing").set_name("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing", register=True) except Exception as e: print("OptimizedDynamicGradientBoostedSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicQuantumSwarmOptimization import ( - OptimizedDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.OptimizedDynamicQuantumSwarmOptimization import OptimizedDynamicQuantumSwarmOptimization lama_register["OptimizedDynamicQuantumSwarmOptimization"] = OptimizedDynamicQuantumSwarmOptimization - LLAMAOptimizedDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMAOptimizedDynamicQuantumSwarmOptimization" - ).set_name("LLAMAOptimizedDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAOptimizedDynamicQuantumSwarmOptimization").set_name("LLAMAOptimizedDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("OptimizedDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedDynamicRestartAdaptiveDE import ( - OptimizedDynamicRestartAdaptiveDE, - ) + from nevergrad.optimization.lama.OptimizedDynamicRestartAdaptiveDE import OptimizedDynamicRestartAdaptiveDE lama_register["OptimizedDynamicRestartAdaptiveDE"] = OptimizedDynamicRestartAdaptiveDE - LLAMAOptimizedDynamicRestartAdaptiveDE = NonObjectOptimizer( - method="LLAMAOptimizedDynamicRestartAdaptiveDE" - ).set_name("LLAMAOptimizedDynamicRestartAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedDynamicRestartAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedDynamicRestartAdaptiveDE").set_name("LLAMAOptimizedDynamicRestartAdaptiveDE", register=True) except Exception as e: print("OptimizedDynamicRestartAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedEliteAdaptiveMemoryHybridOptimizer import ( - OptimizedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.OptimizedEliteAdaptiveMemoryHybridOptimizer import OptimizedEliteAdaptiveMemoryHybridOptimizer lama_register["OptimizedEliteAdaptiveMemoryHybridOptimizer"] = OptimizedEliteAdaptiveMemoryHybridOptimizer - LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("OptimizedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedEnhancedAdaptiveMetaNetAQAPSO import ( - OptimizedEnhancedAdaptiveMetaNetAQAPSO, - ) + from nevergrad.optimization.lama.OptimizedEnhancedAdaptiveMetaNetAQAPSO import OptimizedEnhancedAdaptiveMetaNetAQAPSO lama_register["OptimizedEnhancedAdaptiveMetaNetAQAPSO"] = OptimizedEnhancedAdaptiveMetaNetAQAPSO - LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( - method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO" - ).set_name("LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO", register=True) except Exception as e: print("OptimizedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedEnhancedDualStrategyAdaptiveDE import ( - OptimizedEnhancedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.OptimizedEnhancedDualStrategyAdaptiveDE import OptimizedEnhancedDualStrategyAdaptiveDE lama_register["OptimizedEnhancedDualStrategyAdaptiveDE"] = OptimizedEnhancedDualStrategyAdaptiveDE - LLAMAOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE" - ).set_name("LLAMAOptimizedEnhancedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedEnhancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("OptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedEnhancedDynamicFireworkAlgorithm import ( - OptimizedEnhancedDynamicFireworkAlgorithm, - ) + from nevergrad.optimization.lama.OptimizedEnhancedDynamicFireworkAlgorithm import OptimizedEnhancedDynamicFireworkAlgorithm lama_register["OptimizedEnhancedDynamicFireworkAlgorithm"] = OptimizedEnhancedDynamicFireworkAlgorithm - LLAMAOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm" - ).set_name("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm").set_name("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm", register=True) except Exception as e: print("OptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedEvolutiveStrategy import OptimizedEvolutiveStrategy lama_register["OptimizedEvolutiveStrategy"] = OptimizedEvolutiveStrategy - LLAMAOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy").set_name( - "LLAMAOptimizedEvolutiveStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy").set_name("LLAMAOptimizedEvolutiveStrategy", register=True) except Exception as e: print("OptimizedEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedExplorationConvergenceStrategy import ( - OptimizedExplorationConvergenceStrategy, - ) + from nevergrad.optimization.lama.OptimizedExplorationConvergenceStrategy import OptimizedExplorationConvergenceStrategy lama_register["OptimizedExplorationConvergenceStrategy"] = OptimizedExplorationConvergenceStrategy - LLAMAOptimizedExplorationConvergenceStrategy = NonObjectOptimizer( - method="LLAMAOptimizedExplorationConvergenceStrategy" - ).set_name("LLAMAOptimizedExplorationConvergenceStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedExplorationConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedExplorationConvergenceStrategy = NonObjectOptimizer(method="LLAMAOptimizedExplorationConvergenceStrategy").set_name("LLAMAOptimizedExplorationConvergenceStrategy", register=True) except Exception as e: print("OptimizedExplorationConvergenceStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedGlobalStructureAwareEvolver import ( - OptimizedGlobalStructureAwareEvolver, - ) + from nevergrad.optimization.lama.OptimizedGlobalStructureAwareEvolver import OptimizedGlobalStructureAwareEvolver lama_register["OptimizedGlobalStructureAwareEvolver"] = OptimizedGlobalStructureAwareEvolver - LLAMAOptimizedGlobalStructureAwareEvolver = NonObjectOptimizer( - method="LLAMAOptimizedGlobalStructureAwareEvolver" - ).set_name("LLAMAOptimizedGlobalStructureAwareEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedGlobalStructureAwareEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedGlobalStructureAwareEvolver = NonObjectOptimizer(method="LLAMAOptimizedGlobalStructureAwareEvolver").set_name("LLAMAOptimizedGlobalStructureAwareEvolver", register=True) except Exception as e: print("OptimizedGlobalStructureAwareEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedGradientBalancedPSO import OptimizedGradientBalancedPSO lama_register["OptimizedGradientBalancedPSO"] = OptimizedGradientBalancedPSO - LLAMAOptimizedGradientBalancedPSO = NonObjectOptimizer( - method="LLAMAOptimizedGradientBalancedPSO" - ).set_name("LLAMAOptimizedGradientBalancedPSO", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedGradientBalancedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedGradientBalancedPSO = NonObjectOptimizer(method="LLAMAOptimizedGradientBalancedPSO").set_name("LLAMAOptimizedGradientBalancedPSO", register=True) except Exception as e: print("OptimizedGradientBalancedPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch import ( - OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch, - ) + from nevergrad.optimization.lama.OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch import OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch - lama_register["OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch"] = ( - OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch - ) - LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch = NonObjectOptimizer( - method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch" - ).set_name("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch", register=True) + lama_register["OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch"] = OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch + res = NonObjectOptimizer(method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch = NonObjectOptimizer(method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch").set_name("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch", register=True) except Exception as e: print("OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedGradientMemorySimulatedAnnealing import ( - OptimizedGradientMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.OptimizedGradientMemorySimulatedAnnealing import OptimizedGradientMemorySimulatedAnnealing lama_register["OptimizedGradientMemorySimulatedAnnealing"] = OptimizedGradientMemorySimulatedAnnealing - LLAMAOptimizedGradientMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMAOptimizedGradientMemorySimulatedAnnealing" - ).set_name("LLAMAOptimizedGradientMemorySimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedGradientMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAOptimizedGradientMemorySimulatedAnnealing").set_name("LLAMAOptimizedGradientMemorySimulatedAnnealing", register=True) except Exception as e: print("OptimizedGradientMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedHybridAdaptiveDualPhaseStrategyV7 import ( - OptimizedHybridAdaptiveDualPhaseStrategyV7, - ) + from nevergrad.optimization.lama.OptimizedHybridAdaptiveDualPhaseStrategyV7 import OptimizedHybridAdaptiveDualPhaseStrategyV7 lama_register["OptimizedHybridAdaptiveDualPhaseStrategyV7"] = OptimizedHybridAdaptiveDualPhaseStrategyV7 - LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7 = NonObjectOptimizer( - method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7" - ).set_name("LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7 = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7").set_name("LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7", register=True) except Exception as e: print("OptimizedHybridAdaptiveDualPhaseStrategyV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedHybridAdaptiveMultiStageOptimization import ( - OptimizedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.OptimizedHybridAdaptiveMultiStageOptimization import OptimizedHybridAdaptiveMultiStageOptimization - lama_register["OptimizedHybridAdaptiveMultiStageOptimization"] = ( - OptimizedHybridAdaptiveMultiStageOptimization - ) - LLAMAOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMAOptimizedHybridAdaptiveMultiStageOptimization", register=True) + lama_register["OptimizedHybridAdaptiveMultiStageOptimization"] = OptimizedHybridAdaptiveMultiStageOptimization + res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMAOptimizedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("OptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedHybridExplorationOptimization import ( - OptimizedHybridExplorationOptimization, - ) + from nevergrad.optimization.lama.OptimizedHybridExplorationOptimization import OptimizedHybridExplorationOptimization lama_register["OptimizedHybridExplorationOptimization"] = OptimizedHybridExplorationOptimization - LLAMAOptimizedHybridExplorationOptimization = NonObjectOptimizer( - method="LLAMAOptimizedHybridExplorationOptimization" - ).set_name("LLAMAOptimizedHybridExplorationOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMAOptimizedHybridExplorationOptimization").set_name("LLAMAOptimizedHybridExplorationOptimization", register=True) except Exception as e: print("OptimizedHybridExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedHybridSearch import OptimizedHybridSearch lama_register["OptimizedHybridSearch"] = OptimizedHybridSearch - LLAMAOptimizedHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch").set_name( - "LLAMAOptimizedHybridSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch").set_name("LLAMAOptimizedHybridSearch", register=True) except Exception as e: print("OptimizedHybridSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedHybridStrategyDE import OptimizedHybridStrategyDE lama_register["OptimizedHybridStrategyDE"] = OptimizedHybridStrategyDE - LLAMAOptimizedHybridStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE").set_name( - "LLAMAOptimizedHybridStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHybridStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE").set_name("LLAMAOptimizedHybridStrategyDE", register=True) except Exception as e: print("OptimizedHybridStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedHyperStrategicOptimizerV53 import ( - OptimizedHyperStrategicOptimizerV53, - ) + from nevergrad.optimization.lama.OptimizedHyperStrategicOptimizerV53 import OptimizedHyperStrategicOptimizerV53 lama_register["OptimizedHyperStrategicOptimizerV53"] = OptimizedHyperStrategicOptimizerV53 - LLAMAOptimizedHyperStrategicOptimizerV53 = NonObjectOptimizer( - method="LLAMAOptimizedHyperStrategicOptimizerV53" - ).set_name("LLAMAOptimizedHyperStrategicOptimizerV53", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedHyperStrategicOptimizerV53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedHyperStrategicOptimizerV53 = NonObjectOptimizer(method="LLAMAOptimizedHyperStrategicOptimizerV53").set_name("LLAMAOptimizedHyperStrategicOptimizerV53", register=True) except Exception as e: print("OptimizedHyperStrategicOptimizerV53 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedIslandEvolutionStrategyV4 import ( - OptimizedIslandEvolutionStrategyV4, - ) + from nevergrad.optimization.lama.OptimizedIslandEvolutionStrategyV4 import OptimizedIslandEvolutionStrategyV4 lama_register["OptimizedIslandEvolutionStrategyV4"] = OptimizedIslandEvolutionStrategyV4 - LLAMAOptimizedIslandEvolutionStrategyV4 = NonObjectOptimizer( - method="LLAMAOptimizedIslandEvolutionStrategyV4" - ).set_name("LLAMAOptimizedIslandEvolutionStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedIslandEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedIslandEvolutionStrategyV4 = NonObjectOptimizer(method="LLAMAOptimizedIslandEvolutionStrategyV4").set_name("LLAMAOptimizedIslandEvolutionStrategyV4", register=True) except Exception as e: print("OptimizedIslandEvolutionStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedMemoryEnhancedAdaptiveStrategyV70 import ( - OptimizedMemoryEnhancedAdaptiveStrategyV70, - ) + from nevergrad.optimization.lama.OptimizedMemoryEnhancedAdaptiveStrategyV70 import OptimizedMemoryEnhancedAdaptiveStrategyV70 lama_register["OptimizedMemoryEnhancedAdaptiveStrategyV70"] = OptimizedMemoryEnhancedAdaptiveStrategyV70 - LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70 = NonObjectOptimizer( - method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70" - ).set_name("LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70 = NonObjectOptimizer(method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70").set_name("LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70", register=True) except Exception as e: print("OptimizedMemoryEnhancedAdaptiveStrategyV70 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedMemoryGuidedAdaptiveStrategyV81 import ( - OptimizedMemoryGuidedAdaptiveStrategyV81, - ) + from nevergrad.optimization.lama.OptimizedMemoryGuidedAdaptiveStrategyV81 import OptimizedMemoryGuidedAdaptiveStrategyV81 lama_register["OptimizedMemoryGuidedAdaptiveStrategyV81"] = OptimizedMemoryGuidedAdaptiveStrategyV81 - LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81 = NonObjectOptimizer( - method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81" - ).set_name("LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81 = NonObjectOptimizer(method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81").set_name("LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81", register=True) except Exception as e: print("OptimizedMemoryGuidedAdaptiveStrategyV81 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedMemoryResponsiveAdaptiveStrategyV78 import ( - OptimizedMemoryResponsiveAdaptiveStrategyV78, - ) + from nevergrad.optimization.lama.OptimizedMemoryResponsiveAdaptiveStrategyV78 import OptimizedMemoryResponsiveAdaptiveStrategyV78 - lama_register["OptimizedMemoryResponsiveAdaptiveStrategyV78"] = ( - OptimizedMemoryResponsiveAdaptiveStrategyV78 - ) - LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78 = NonObjectOptimizer( - method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78" - ).set_name("LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78", register=True) + lama_register["OptimizedMemoryResponsiveAdaptiveStrategyV78"] = OptimizedMemoryResponsiveAdaptiveStrategyV78 + res = NonObjectOptimizer(method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78 = NonObjectOptimizer(method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78").set_name("LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78", register=True) except Exception as e: print("OptimizedMemoryResponsiveAdaptiveStrategyV78 can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedParallelStrategyDE import OptimizedParallelStrategyDE lama_register["OptimizedParallelStrategyDE"] = OptimizedParallelStrategyDE - LLAMAOptimizedParallelStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE").set_name( - "LLAMAOptimizedParallelStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedParallelStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE").set_name("LLAMAOptimizedParallelStrategyDE", register=True) except Exception as e: print("OptimizedParallelStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedPrecisionAdaptiveStrategy import ( - OptimizedPrecisionAdaptiveStrategy, - ) + from nevergrad.optimization.lama.OptimizedPrecisionAdaptiveStrategy import OptimizedPrecisionAdaptiveStrategy lama_register["OptimizedPrecisionAdaptiveStrategy"] = OptimizedPrecisionAdaptiveStrategy - LLAMAOptimizedPrecisionAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAOptimizedPrecisionAdaptiveStrategy" - ).set_name("LLAMAOptimizedPrecisionAdaptiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedPrecisionAdaptiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedPrecisionAdaptiveStrategy").set_name("LLAMAOptimizedPrecisionAdaptiveStrategy", register=True) except Exception as e: print("OptimizedPrecisionAdaptiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedPrecisionTunedCrossoverElitistStrategyV13 import ( - OptimizedPrecisionTunedCrossoverElitistStrategyV13, - ) + from nevergrad.optimization.lama.OptimizedPrecisionTunedCrossoverElitistStrategyV13 import OptimizedPrecisionTunedCrossoverElitistStrategyV13 - lama_register["OptimizedPrecisionTunedCrossoverElitistStrategyV13"] = ( - OptimizedPrecisionTunedCrossoverElitistStrategyV13 - ) - LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13 = NonObjectOptimizer( - method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13" - ).set_name("LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13", register=True) + lama_register["OptimizedPrecisionTunedCrossoverElitistStrategyV13"] = OptimizedPrecisionTunedCrossoverElitistStrategyV13 + res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13 = NonObjectOptimizer(method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13").set_name("LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13", register=True) except Exception as e: print("OptimizedPrecisionTunedCrossoverElitistStrategyV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 import ( - OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 import OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 - lama_register["OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3"] = ( - OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 - ) - LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3" - ).set_name("LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3", register=True) + lama_register["OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3"] = OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3", register=True) except Exception as e: print("OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedQuantumFluxDifferentialSwarm import ( - OptimizedQuantumFluxDifferentialSwarm, - ) + from nevergrad.optimization.lama.OptimizedQuantumFluxDifferentialSwarm import OptimizedQuantumFluxDifferentialSwarm lama_register["OptimizedQuantumFluxDifferentialSwarm"] = OptimizedQuantumFluxDifferentialSwarm - LLAMAOptimizedQuantumFluxDifferentialSwarm = NonObjectOptimizer( - method="LLAMAOptimizedQuantumFluxDifferentialSwarm" - ).set_name("LLAMAOptimizedQuantumFluxDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMAOptimizedQuantumFluxDifferentialSwarm").set_name("LLAMAOptimizedQuantumFluxDifferentialSwarm", register=True) except Exception as e: print("OptimizedQuantumFluxDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedQuantumGradientExplorationOptimization import ( - OptimizedQuantumGradientExplorationOptimization, - ) + from nevergrad.optimization.lama.OptimizedQuantumGradientExplorationOptimization import OptimizedQuantumGradientExplorationOptimization - lama_register["OptimizedQuantumGradientExplorationOptimization"] = ( - OptimizedQuantumGradientExplorationOptimization - ) - LLAMAOptimizedQuantumGradientExplorationOptimization = NonObjectOptimizer( - method="LLAMAOptimizedQuantumGradientExplorationOptimization" - ).set_name("LLAMAOptimizedQuantumGradientExplorationOptimization", register=True) + lama_register["OptimizedQuantumGradientExplorationOptimization"] = OptimizedQuantumGradientExplorationOptimization + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAOptimizedQuantumGradientExplorationOptimization").set_name("LLAMAOptimizedQuantumGradientExplorationOptimization", register=True) except Exception as e: print("OptimizedQuantumGradientExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedQuantumHarmonySearch import OptimizedQuantumHarmonySearch lama_register["OptimizedQuantumHarmonySearch"] = OptimizedQuantumHarmonySearch - LLAMAOptimizedQuantumHarmonySearch = NonObjectOptimizer( - method="LLAMAOptimizedQuantumHarmonySearch" - ).set_name("LLAMAOptimizedQuantumHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAOptimizedQuantumHarmonySearch").set_name("LLAMAOptimizedQuantumHarmonySearch", register=True) except Exception as e: print("OptimizedQuantumHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedQuantumHybridDEPSO import OptimizedQuantumHybridDEPSO lama_register["OptimizedQuantumHybridDEPSO"] = OptimizedQuantumHybridDEPSO - LLAMAOptimizedQuantumHybridDEPSO = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO").set_name( - "LLAMAOptimizedQuantumHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumHybridDEPSO = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO").set_name("LLAMAOptimizedQuantumHybridDEPSO", register=True) except Exception as e: print("OptimizedQuantumHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedQuantumLevyDifferentialSearch import ( - OptimizedQuantumLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.OptimizedQuantumLevyDifferentialSearch import OptimizedQuantumLevyDifferentialSearch lama_register["OptimizedQuantumLevyDifferentialSearch"] = OptimizedQuantumLevyDifferentialSearch - LLAMAOptimizedQuantumLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMAOptimizedQuantumLevyDifferentialSearch" - ).set_name("LLAMAOptimizedQuantumLevyDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAOptimizedQuantumLevyDifferentialSearch").set_name("LLAMAOptimizedQuantumLevyDifferentialSearch", register=True) except Exception as e: print("OptimizedQuantumLevyDifferentialSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedRAMEDS import OptimizedRAMEDS lama_register["OptimizedRAMEDS"] = OptimizedRAMEDS - LLAMAOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS").set_name( - "LLAMAOptimizedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS").set_name("LLAMAOptimizedRAMEDS", register=True) except Exception as e: print("OptimizedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( - OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO, - ) + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO import OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO - lama_register["OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( - OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO - ) - LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( - method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO" - ).set_name("LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) + lama_register["OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) except Exception as e: print("OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveHybridSearch import ( - OptimizedRefinedAdaptiveHybridSearch, - ) + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveHybridSearch import OptimizedRefinedAdaptiveHybridSearch lama_register["OptimizedRefinedAdaptiveHybridSearch"] = OptimizedRefinedAdaptiveHybridSearch - LLAMAOptimizedRefinedAdaptiveHybridSearch = NonObjectOptimizer( - method="LLAMAOptimizedRefinedAdaptiveHybridSearch" - ).set_name("LLAMAOptimizedRefinedAdaptiveHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveHybridSearch").set_name("LLAMAOptimizedRefinedAdaptiveHybridSearch", register=True) except Exception as e: print("OptimizedRefinedAdaptiveHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveMultiStrategyDE import ( - OptimizedRefinedAdaptiveMultiStrategyDE, - ) + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveMultiStrategyDE import OptimizedRefinedAdaptiveMultiStrategyDE lama_register["OptimizedRefinedAdaptiveMultiStrategyDE"] = OptimizedRefinedAdaptiveMultiStrategyDE - LLAMAOptimizedRefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( - method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE" - ).set_name("LLAMAOptimizedRefinedAdaptiveMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE").set_name("LLAMAOptimizedRefinedAdaptiveMultiStrategyDE", register=True) except Exception as e: print("OptimizedRefinedAdaptiveMultiStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveRefinementPSO import ( - OptimizedRefinedAdaptiveRefinementPSO, - ) + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveRefinementPSO import OptimizedRefinedAdaptiveRefinementPSO lama_register["OptimizedRefinedAdaptiveRefinementPSO"] = OptimizedRefinedAdaptiveRefinementPSO - LLAMAOptimizedRefinedAdaptiveRefinementPSO = NonObjectOptimizer( - method="LLAMAOptimizedRefinedAdaptiveRefinementPSO" - ).set_name("LLAMAOptimizedRefinedAdaptiveRefinementPSO", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveRefinementPSO").set_name("LLAMAOptimizedRefinedAdaptiveRefinementPSO", register=True) except Exception as e: print("OptimizedRefinedAdaptiveRefinementPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.OptimizedRefinedEnhancedRAMEDSv5 import OptimizedRefinedEnhancedRAMEDSv5 lama_register["OptimizedRefinedEnhancedRAMEDSv5"] = OptimizedRefinedEnhancedRAMEDSv5 - LLAMAOptimizedRefinedEnhancedRAMEDSv5 = NonObjectOptimizer( - method="LLAMAOptimizedRefinedEnhancedRAMEDSv5" - ).set_name("LLAMAOptimizedRefinedEnhancedRAMEDSv5", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedEnhancedRAMEDSv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedEnhancedRAMEDSv5 = NonObjectOptimizer(method="LLAMAOptimizedRefinedEnhancedRAMEDSv5").set_name("LLAMAOptimizedRefinedEnhancedRAMEDSv5", register=True) except Exception as e: print("OptimizedRefinedEnhancedRAMEDSv5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedRefinedMemoryDualPhaseStrategyV65 import ( - OptimizedRefinedMemoryDualPhaseStrategyV65, - ) + from nevergrad.optimization.lama.OptimizedRefinedMemoryDualPhaseStrategyV65 import OptimizedRefinedMemoryDualPhaseStrategyV65 lama_register["OptimizedRefinedMemoryDualPhaseStrategyV65"] = OptimizedRefinedMemoryDualPhaseStrategyV65 - LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65 = NonObjectOptimizer( - method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65" - ).set_name("LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65", register=True) + res = NonObjectOptimizer(method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65 = NonObjectOptimizer(method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65").set_name("LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65", register=True) except Exception as e: print("OptimizedRefinedMemoryDualPhaseStrategyV65 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 import ( - OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45, - ) + from nevergrad.optimization.lama.OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 import OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 - lama_register["OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45"] = ( - OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 - ) - LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 = NonObjectOptimizer( - method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45" - ).set_name("LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45", register=True) + lama_register["OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45"] = OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 + res = NonObjectOptimizer(method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 = NonObjectOptimizer(method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45").set_name("LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45", register=True) except Exception as e: print("OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 can not be imported: ", e) - try: - from nevergrad.optimization.lama.OscillatoryCrossoverDifferentialEvolution import ( - OscillatoryCrossoverDifferentialEvolution, - ) + from nevergrad.optimization.lama.OscillatoryCrossoverDifferentialEvolution import OscillatoryCrossoverDifferentialEvolution lama_register["OscillatoryCrossoverDifferentialEvolution"] = OscillatoryCrossoverDifferentialEvolution - LLAMAOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( - method="LLAMAOscillatoryCrossoverDifferentialEvolution" - ).set_name("LLAMAOscillatoryCrossoverDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer(method="LLAMAOscillatoryCrossoverDifferentialEvolution").set_name("LLAMAOscillatoryCrossoverDifferentialEvolution", register=True) except Exception as e: print("OscillatoryCrossoverDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.PADE import PADE lama_register["PADE"] = PADE + res = NonObjectOptimizer(method="LLAMAPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPADE = NonObjectOptimizer(method="LLAMAPADE").set_name("LLAMAPADE", register=True) except Exception as e: print("PADE can not be imported: ", e) - try: from nevergrad.optimization.lama.PAMDMDESM import PAMDMDESM lama_register["PAMDMDESM"] = PAMDMDESM + res = NonObjectOptimizer(method="LLAMAPAMDMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPAMDMDESM = NonObjectOptimizer(method="LLAMAPAMDMDESM").set_name("LLAMAPAMDMDESM", register=True) except Exception as e: print("PAMDMDESM can not be imported: ", e) - try: from nevergrad.optimization.lama.PDEAF import PDEAF lama_register["PDEAF"] = PDEAF + res = NonObjectOptimizer(method="LLAMAPDEAF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPDEAF = NonObjectOptimizer(method="LLAMAPDEAF").set_name("LLAMAPDEAF", register=True) except Exception as e: print("PDEAF can not be imported: ", e) - try: from nevergrad.optimization.lama.PGDE import PGDE lama_register["PGDE"] = PGDE + res = NonObjectOptimizer(method="LLAMAPGDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPGDE = NonObjectOptimizer(method="LLAMAPGDE").set_name("LLAMAPGDE", register=True) except Exception as e: print("PGDE can not be imported: ", e) - try: from nevergrad.optimization.lama.PMFSA import PMFSA lama_register["PMFSA"] = PMFSA + res = NonObjectOptimizer(method="LLAMAPMFSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPMFSA = NonObjectOptimizer(method="LLAMAPMFSA").set_name("LLAMAPMFSA", register=True) except Exception as e: print("PMFSA can not be imported: ", e) - try: from nevergrad.optimization.lama.PPDE import PPDE lama_register["PPDE"] = PPDE + res = NonObjectOptimizer(method="LLAMAPPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPPDE = NonObjectOptimizer(method="LLAMAPPDE").set_name("LLAMAPPDE", register=True) except Exception as e: print("PPDE can not be imported: ", e) - try: from nevergrad.optimization.lama.PWDE import PWDE lama_register["PWDE"] = PWDE + res = NonObjectOptimizer(method="LLAMAPWDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAPWDE = NonObjectOptimizer(method="LLAMAPWDE").set_name("LLAMAPWDE", register=True) except Exception as e: print("PWDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimization import ( - PrecisionAdaptiveCohortOptimization, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimization import PrecisionAdaptiveCohortOptimization lama_register["PrecisionAdaptiveCohortOptimization"] = PrecisionAdaptiveCohortOptimization - LLAMAPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveCohortOptimization" - ).set_name("LLAMAPrecisionAdaptiveCohortOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimization").set_name("LLAMAPrecisionAdaptiveCohortOptimization", register=True) except Exception as e: print("PrecisionAdaptiveCohortOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimizationV2 import ( - PrecisionAdaptiveCohortOptimizationV2, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimizationV2 import PrecisionAdaptiveCohortOptimizationV2 lama_register["PrecisionAdaptiveCohortOptimizationV2"] = PrecisionAdaptiveCohortOptimizationV2 - LLAMAPrecisionAdaptiveCohortOptimizationV2 = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveCohortOptimizationV2" - ).set_name("LLAMAPrecisionAdaptiveCohortOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveCohortOptimizationV2 = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimizationV2").set_name("LLAMAPrecisionAdaptiveCohortOptimizationV2", register=True) except Exception as e: print("PrecisionAdaptiveCohortOptimizationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionAdaptiveDecayOptimizer import PrecisionAdaptiveDecayOptimizer lama_register["PrecisionAdaptiveDecayOptimizer"] = PrecisionAdaptiveDecayOptimizer - LLAMAPrecisionAdaptiveDecayOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveDecayOptimizer" - ).set_name("LLAMAPrecisionAdaptiveDecayOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDecayOptimizer").set_name("LLAMAPrecisionAdaptiveDecayOptimizer", register=True) except Exception as e: print("PrecisionAdaptiveDecayOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveDifferentialEvolutionPlus import ( - PrecisionAdaptiveDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveDifferentialEvolutionPlus import PrecisionAdaptiveDifferentialEvolutionPlus lama_register["PrecisionAdaptiveDifferentialEvolutionPlus"] = PrecisionAdaptiveDifferentialEvolutionPlus - LLAMAPrecisionAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus" - ).set_name("LLAMAPrecisionAdaptiveDifferentialEvolutionPlus", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus").set_name("LLAMAPrecisionAdaptiveDifferentialEvolutionPlus", register=True) except Exception as e: print("PrecisionAdaptiveDifferentialEvolutionPlus can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveDynamicStrategyV33 import ( - PrecisionAdaptiveDynamicStrategyV33, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveDynamicStrategyV33 import PrecisionAdaptiveDynamicStrategyV33 lama_register["PrecisionAdaptiveDynamicStrategyV33"] = PrecisionAdaptiveDynamicStrategyV33 - LLAMAPrecisionAdaptiveDynamicStrategyV33 = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveDynamicStrategyV33" - ).set_name("LLAMAPrecisionAdaptiveDynamicStrategyV33", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDynamicStrategyV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveDynamicStrategyV33 = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDynamicStrategyV33").set_name("LLAMAPrecisionAdaptiveDynamicStrategyV33", register=True) except Exception as e: print("PrecisionAdaptiveDynamicStrategyV33 can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveGlobalClimbingEnhancer import ( - PrecisionAdaptiveGlobalClimbingEnhancer, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveGlobalClimbingEnhancer import PrecisionAdaptiveGlobalClimbingEnhancer lama_register["PrecisionAdaptiveGlobalClimbingEnhancer"] = PrecisionAdaptiveGlobalClimbingEnhancer - LLAMAPrecisionAdaptiveGlobalClimbingEnhancer = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer" - ).set_name("LLAMAPrecisionAdaptiveGlobalClimbingEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveGlobalClimbingEnhancer = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer").set_name("LLAMAPrecisionAdaptiveGlobalClimbingEnhancer", register=True) except Exception as e: print("PrecisionAdaptiveGlobalClimbingEnhancer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionAdaptiveGradientClusteringPSO import ( - PrecisionAdaptiveGradientClusteringPSO, - ) + from nevergrad.optimization.lama.PrecisionAdaptiveGradientClusteringPSO import PrecisionAdaptiveGradientClusteringPSO lama_register["PrecisionAdaptiveGradientClusteringPSO"] = PrecisionAdaptiveGradientClusteringPSO - LLAMAPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( - method="LLAMAPrecisionAdaptiveGradientClusteringPSO" - ).set_name("LLAMAPrecisionAdaptiveGradientClusteringPSO", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGradientClusteringPSO").set_name("LLAMAPrecisionAdaptiveGradientClusteringPSO", register=True) except Exception as e: print("PrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionAdaptivePSO import PrecisionAdaptivePSO lama_register["PrecisionAdaptivePSO"] = PrecisionAdaptivePSO - LLAMAPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO").set_name( - "LLAMAPrecisionAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO").set_name("LLAMAPrecisionAdaptivePSO", register=True) except Exception as e: print("PrecisionAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionBalancedAdaptivePSO import PrecisionBalancedAdaptivePSO lama_register["PrecisionBalancedAdaptivePSO"] = PrecisionBalancedAdaptivePSO - LLAMAPrecisionBalancedAdaptivePSO = NonObjectOptimizer( - method="LLAMAPrecisionBalancedAdaptivePSO" - ).set_name("LLAMAPrecisionBalancedAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionBalancedAdaptivePSO").set_name("LLAMAPrecisionBalancedAdaptivePSO", register=True) except Exception as e: print("PrecisionBalancedAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionBalancedEvolutionStrategy import ( - PrecisionBalancedEvolutionStrategy, - ) + from nevergrad.optimization.lama.PrecisionBalancedEvolutionStrategy import PrecisionBalancedEvolutionStrategy lama_register["PrecisionBalancedEvolutionStrategy"] = PrecisionBalancedEvolutionStrategy - LLAMAPrecisionBalancedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAPrecisionBalancedEvolutionStrategy" - ).set_name("LLAMAPrecisionBalancedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionBalancedEvolutionStrategy").set_name("LLAMAPrecisionBalancedEvolutionStrategy", register=True) except Exception as e: print("PrecisionBalancedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionBalancedOptimizer import PrecisionBalancedOptimizer lama_register["PrecisionBalancedOptimizer"] = PrecisionBalancedOptimizer - LLAMAPrecisionBalancedOptimizer = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer").set_name( - "LLAMAPrecisionBalancedOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionBalancedOptimizer = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer").set_name("LLAMAPrecisionBalancedOptimizer", register=True) except Exception as e: print("PrecisionBalancedOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionBoostedDifferentialEvolution import ( - PrecisionBoostedDifferentialEvolution, - ) + from nevergrad.optimization.lama.PrecisionBoostedDifferentialEvolution import PrecisionBoostedDifferentialEvolution lama_register["PrecisionBoostedDifferentialEvolution"] = PrecisionBoostedDifferentialEvolution - LLAMAPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAPrecisionBoostedDifferentialEvolution" - ).set_name("LLAMAPrecisionBoostedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionBoostedDifferentialEvolution = NonObjectOptimizer(method="LLAMAPrecisionBoostedDifferentialEvolution").set_name("LLAMAPrecisionBoostedDifferentialEvolution", register=True) except Exception as e: print("PrecisionBoostedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionCosineAdaptiveDifferentialSwarm import ( - PrecisionCosineAdaptiveDifferentialSwarm, - ) + from nevergrad.optimization.lama.PrecisionCosineAdaptiveDifferentialSwarm import PrecisionCosineAdaptiveDifferentialSwarm lama_register["PrecisionCosineAdaptiveDifferentialSwarm"] = PrecisionCosineAdaptiveDifferentialSwarm - LLAMAPrecisionCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( - method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm" - ).set_name("LLAMAPrecisionCosineAdaptiveDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm").set_name("LLAMAPrecisionCosineAdaptiveDifferentialSwarm", register=True) except Exception as e: print("PrecisionCosineAdaptiveDifferentialSwarm can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionDifferentialEvolution import PrecisionDifferentialEvolution lama_register["PrecisionDifferentialEvolution"] = PrecisionDifferentialEvolution - LLAMAPrecisionDifferentialEvolution = NonObjectOptimizer( - method="LLAMAPrecisionDifferentialEvolution" - ).set_name("LLAMAPrecisionDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMAPrecisionDifferentialEvolution").set_name("LLAMAPrecisionDifferentialEvolution", register=True) except Exception as e: print("PrecisionDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionDynamicAdaptiveOptimizerV6 import ( - PrecisionDynamicAdaptiveOptimizerV6, - ) + from nevergrad.optimization.lama.PrecisionDynamicAdaptiveOptimizerV6 import PrecisionDynamicAdaptiveOptimizerV6 lama_register["PrecisionDynamicAdaptiveOptimizerV6"] = PrecisionDynamicAdaptiveOptimizerV6 - LLAMAPrecisionDynamicAdaptiveOptimizerV6 = NonObjectOptimizer( - method="LLAMAPrecisionDynamicAdaptiveOptimizerV6" - ).set_name("LLAMAPrecisionDynamicAdaptiveOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionDynamicAdaptiveOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionDynamicAdaptiveOptimizerV6 = NonObjectOptimizer(method="LLAMAPrecisionDynamicAdaptiveOptimizerV6").set_name("LLAMAPrecisionDynamicAdaptiveOptimizerV6", register=True) except Exception as e: print("PrecisionDynamicAdaptiveOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEnhancedDualStrategyOptimizer import ( - PrecisionEnhancedDualStrategyOptimizer, - ) + from nevergrad.optimization.lama.PrecisionEnhancedDualStrategyOptimizer import PrecisionEnhancedDualStrategyOptimizer lama_register["PrecisionEnhancedDualStrategyOptimizer"] = PrecisionEnhancedDualStrategyOptimizer - LLAMAPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionEnhancedDualStrategyOptimizer" - ).set_name("LLAMAPrecisionEnhancedDualStrategyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDualStrategyOptimizer").set_name("LLAMAPrecisionEnhancedDualStrategyOptimizer", register=True) except Exception as e: print("PrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEnhancedDynamicOptimizerV13 import ( - PrecisionEnhancedDynamicOptimizerV13, - ) + from nevergrad.optimization.lama.PrecisionEnhancedDynamicOptimizerV13 import PrecisionEnhancedDynamicOptimizerV13 lama_register["PrecisionEnhancedDynamicOptimizerV13"] = PrecisionEnhancedDynamicOptimizerV13 - LLAMAPrecisionEnhancedDynamicOptimizerV13 = NonObjectOptimizer( - method="LLAMAPrecisionEnhancedDynamicOptimizerV13" - ).set_name("LLAMAPrecisionEnhancedDynamicOptimizerV13", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDynamicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedDynamicOptimizerV13 = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDynamicOptimizerV13").set_name("LLAMAPrecisionEnhancedDynamicOptimizerV13", register=True) except Exception as e: print("PrecisionEnhancedDynamicOptimizerV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionEnhancedSearch import PrecisionEnhancedSearch lama_register["PrecisionEnhancedSearch"] = PrecisionEnhancedSearch - LLAMAPrecisionEnhancedSearch = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch").set_name( - "LLAMAPrecisionEnhancedSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedSearch = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch").set_name("LLAMAPrecisionEnhancedSearch", register=True) except Exception as e: print("PrecisionEnhancedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEnhancedSpatialAdaptiveEvolver import ( - PrecisionEnhancedSpatialAdaptiveEvolver, - ) + from nevergrad.optimization.lama.PrecisionEnhancedSpatialAdaptiveEvolver import PrecisionEnhancedSpatialAdaptiveEvolver lama_register["PrecisionEnhancedSpatialAdaptiveEvolver"] = PrecisionEnhancedSpatialAdaptiveEvolver - LLAMAPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver" - ).set_name("LLAMAPrecisionEnhancedSpatialAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver").set_name("LLAMAPrecisionEnhancedSpatialAdaptiveEvolver", register=True) except Exception as e: print("PrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEnhancedSpiralDifferentialClimberV4 import ( - PrecisionEnhancedSpiralDifferentialClimberV4, - ) + from nevergrad.optimization.lama.PrecisionEnhancedSpiralDifferentialClimberV4 import PrecisionEnhancedSpiralDifferentialClimberV4 - lama_register["PrecisionEnhancedSpiralDifferentialClimberV4"] = ( - PrecisionEnhancedSpiralDifferentialClimberV4 - ) - LLAMAPrecisionEnhancedSpiralDifferentialClimberV4 = NonObjectOptimizer( - method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4" - ).set_name("LLAMAPrecisionEnhancedSpiralDifferentialClimberV4", register=True) + lama_register["PrecisionEnhancedSpiralDifferentialClimberV4"] = PrecisionEnhancedSpiralDifferentialClimberV4 + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedSpiralDifferentialClimberV4 = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4").set_name("LLAMAPrecisionEnhancedSpiralDifferentialClimberV4", register=True) except Exception as e: print("PrecisionEnhancedSpiralDifferentialClimberV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEnhancedStrategicOptimizer import ( - PrecisionEnhancedStrategicOptimizer, - ) + from nevergrad.optimization.lama.PrecisionEnhancedStrategicOptimizer import PrecisionEnhancedStrategicOptimizer lama_register["PrecisionEnhancedStrategicOptimizer"] = PrecisionEnhancedStrategicOptimizer - LLAMAPrecisionEnhancedStrategicOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionEnhancedStrategicOptimizer" - ).set_name("LLAMAPrecisionEnhancedStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEnhancedStrategicOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEnhancedStrategicOptimizer").set_name("LLAMAPrecisionEnhancedStrategicOptimizer", register=True) except Exception as e: print("PrecisionEnhancedStrategicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionEvolutionaryThermalOptimizer import ( - PrecisionEvolutionaryThermalOptimizer, - ) + from nevergrad.optimization.lama.PrecisionEvolutionaryThermalOptimizer import PrecisionEvolutionaryThermalOptimizer lama_register["PrecisionEvolutionaryThermalOptimizer"] = PrecisionEvolutionaryThermalOptimizer - LLAMAPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionEvolutionaryThermalOptimizer" - ).set_name("LLAMAPrecisionEvolutionaryThermalOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAPrecisionEvolutionaryThermalOptimizer", register=True) except Exception as e: print("PrecisionEvolutionaryThermalOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionFocusedAdaptivePSO import PrecisionFocusedAdaptivePSO lama_register["PrecisionFocusedAdaptivePSO"] = PrecisionFocusedAdaptivePSO - LLAMAPrecisionFocusedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO").set_name( - "LLAMAPrecisionFocusedAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionFocusedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO").set_name("LLAMAPrecisionFocusedAdaptivePSO", register=True) except Exception as e: print("PrecisionFocusedAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionGuidedEvolutionStrategy import PrecisionGuidedEvolutionStrategy lama_register["PrecisionGuidedEvolutionStrategy"] = PrecisionGuidedEvolutionStrategy - LLAMAPrecisionGuidedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAPrecisionGuidedEvolutionStrategy" - ).set_name("LLAMAPrecisionGuidedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionStrategy").set_name("LLAMAPrecisionGuidedEvolutionStrategy", register=True) except Exception as e: print("PrecisionGuidedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionGuidedEvolutionaryAlgorithm import ( - PrecisionGuidedEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.PrecisionGuidedEvolutionaryAlgorithm import PrecisionGuidedEvolutionaryAlgorithm lama_register["PrecisionGuidedEvolutionaryAlgorithm"] = PrecisionGuidedEvolutionaryAlgorithm - LLAMAPrecisionGuidedEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAPrecisionGuidedEvolutionaryAlgorithm" - ).set_name("LLAMAPrecisionGuidedEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionGuidedEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionaryAlgorithm").set_name("LLAMAPrecisionGuidedEvolutionaryAlgorithm", register=True) except Exception as e: print("PrecisionGuidedEvolutionaryAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionGuidedQuantumStrategy import PrecisionGuidedQuantumStrategy lama_register["PrecisionGuidedQuantumStrategy"] = PrecisionGuidedQuantumStrategy - LLAMAPrecisionGuidedQuantumStrategy = NonObjectOptimizer( - method="LLAMAPrecisionGuidedQuantumStrategy" - ).set_name("LLAMAPrecisionGuidedQuantumStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionGuidedQuantumStrategy = NonObjectOptimizer(method="LLAMAPrecisionGuidedQuantumStrategy").set_name("LLAMAPrecisionGuidedQuantumStrategy", register=True) except Exception as e: print("PrecisionGuidedQuantumStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionIncrementalEvolutionStrategy import ( - PrecisionIncrementalEvolutionStrategy, - ) + from nevergrad.optimization.lama.PrecisionIncrementalEvolutionStrategy import PrecisionIncrementalEvolutionStrategy lama_register["PrecisionIncrementalEvolutionStrategy"] = PrecisionIncrementalEvolutionStrategy - LLAMAPrecisionIncrementalEvolutionStrategy = NonObjectOptimizer( - method="LLAMAPrecisionIncrementalEvolutionStrategy" - ).set_name("LLAMAPrecisionIncrementalEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionIncrementalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionIncrementalEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionIncrementalEvolutionStrategy").set_name("LLAMAPrecisionIncrementalEvolutionStrategy", register=True) except Exception as e: print("PrecisionIncrementalEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionOptimizedEvolutionaryOptimizerV22 import ( - PrecisionOptimizedEvolutionaryOptimizerV22, - ) + from nevergrad.optimization.lama.PrecisionOptimizedEvolutionaryOptimizerV22 import PrecisionOptimizedEvolutionaryOptimizerV22 lama_register["PrecisionOptimizedEvolutionaryOptimizerV22"] = PrecisionOptimizedEvolutionaryOptimizerV22 - LLAMAPrecisionOptimizedEvolutionaryOptimizerV22 = NonObjectOptimizer( - method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22" - ).set_name("LLAMAPrecisionOptimizedEvolutionaryOptimizerV22", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionOptimizedEvolutionaryOptimizerV22 = NonObjectOptimizer(method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22").set_name("LLAMAPrecisionOptimizedEvolutionaryOptimizerV22", register=True) except Exception as e: print("PrecisionOptimizedEvolutionaryOptimizerV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionRotationalClimbOptimizer import ( - PrecisionRotationalClimbOptimizer, - ) + from nevergrad.optimization.lama.PrecisionRotationalClimbOptimizer import PrecisionRotationalClimbOptimizer lama_register["PrecisionRotationalClimbOptimizer"] = PrecisionRotationalClimbOptimizer - LLAMAPrecisionRotationalClimbOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionRotationalClimbOptimizer" - ).set_name("LLAMAPrecisionRotationalClimbOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAPrecisionRotationalClimbOptimizer").set_name("LLAMAPrecisionRotationalClimbOptimizer", register=True) except Exception as e: print("PrecisionRotationalClimbOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionScaledEvolutionarySearch import ( - PrecisionScaledEvolutionarySearch, - ) + from nevergrad.optimization.lama.PrecisionScaledEvolutionarySearch import PrecisionScaledEvolutionarySearch lama_register["PrecisionScaledEvolutionarySearch"] = PrecisionScaledEvolutionarySearch - LLAMAPrecisionScaledEvolutionarySearch = NonObjectOptimizer( - method="LLAMAPrecisionScaledEvolutionarySearch" - ).set_name("LLAMAPrecisionScaledEvolutionarySearch", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionScaledEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionScaledEvolutionarySearch = NonObjectOptimizer(method="LLAMAPrecisionScaledEvolutionarySearch").set_name("LLAMAPrecisionScaledEvolutionarySearch", register=True) except Exception as e: print("PrecisionScaledEvolutionarySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionSpiralDifferentialOptimizerV6 import ( - PrecisionSpiralDifferentialOptimizerV6, - ) + from nevergrad.optimization.lama.PrecisionSpiralDifferentialOptimizerV6 import PrecisionSpiralDifferentialOptimizerV6 lama_register["PrecisionSpiralDifferentialOptimizerV6"] = PrecisionSpiralDifferentialOptimizerV6 - LLAMAPrecisionSpiralDifferentialOptimizerV6 = NonObjectOptimizer( - method="LLAMAPrecisionSpiralDifferentialOptimizerV6" - ).set_name("LLAMAPrecisionSpiralDifferentialOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionSpiralDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionSpiralDifferentialOptimizerV6 = NonObjectOptimizer(method="LLAMAPrecisionSpiralDifferentialOptimizerV6").set_name("LLAMAPrecisionSpiralDifferentialOptimizerV6", register=True) except Exception as e: print("PrecisionSpiralDifferentialOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionTunedCrossoverElitistStrategyV11 import ( - PrecisionTunedCrossoverElitistStrategyV11, - ) + from nevergrad.optimization.lama.PrecisionTunedCrossoverElitistStrategyV11 import PrecisionTunedCrossoverElitistStrategyV11 lama_register["PrecisionTunedCrossoverElitistStrategyV11"] = PrecisionTunedCrossoverElitistStrategyV11 - LLAMAPrecisionTunedCrossoverElitistStrategyV11 = NonObjectOptimizer( - method="LLAMAPrecisionTunedCrossoverElitistStrategyV11" - ).set_name("LLAMAPrecisionTunedCrossoverElitistStrategyV11", register=True) + res = NonObjectOptimizer(method="LLAMAPrecisionTunedCrossoverElitistStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionTunedCrossoverElitistStrategyV11 = NonObjectOptimizer(method="LLAMAPrecisionTunedCrossoverElitistStrategyV11").set_name("LLAMAPrecisionTunedCrossoverElitistStrategyV11", register=True) except Exception as e: print("PrecisionTunedCrossoverElitistStrategyV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionTunedEvolver import PrecisionTunedEvolver lama_register["PrecisionTunedEvolver"] = PrecisionTunedEvolver - LLAMAPrecisionTunedEvolver = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver").set_name( - "LLAMAPrecisionTunedEvolver", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionTunedEvolver = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver").set_name("LLAMAPrecisionTunedEvolver", register=True) except Exception as e: print("PrecisionTunedEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionTunedHybridSearch import PrecisionTunedHybridSearch lama_register["PrecisionTunedHybridSearch"] = PrecisionTunedHybridSearch - LLAMAPrecisionTunedHybridSearch = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch").set_name( - "LLAMAPrecisionTunedHybridSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionTunedHybridSearch = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch").set_name("LLAMAPrecisionTunedHybridSearch", register=True) except Exception as e: print("PrecisionTunedHybridSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.PrecisionTunedPSO import PrecisionTunedPSO lama_register["PrecisionTunedPSO"] = PrecisionTunedPSO - LLAMAPrecisionTunedPSO = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO").set_name( - "LLAMAPrecisionTunedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionTunedPSO = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO").set_name("LLAMAPrecisionTunedPSO", register=True) except Exception as e: print("PrecisionTunedPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.PrecisionTunedQuantumHarmonicFeedbackOptimizer import ( - PrecisionTunedQuantumHarmonicFeedbackOptimizer, - ) + from nevergrad.optimization.lama.PrecisionTunedQuantumHarmonicFeedbackOptimizer import PrecisionTunedQuantumHarmonicFeedbackOptimizer - lama_register["PrecisionTunedQuantumHarmonicFeedbackOptimizer"] = ( - PrecisionTunedQuantumHarmonicFeedbackOptimizer - ) - LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( - method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer" - ).set_name("LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer", register=True) + lama_register["PrecisionTunedQuantumHarmonicFeedbackOptimizer"] = PrecisionTunedQuantumHarmonicFeedbackOptimizer + res = NonObjectOptimizer(method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer").set_name("LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer", register=True) except Exception as e: print("PrecisionTunedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveAdaptiveDifferentialEvolution import ( - ProgressiveAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ProgressiveAdaptiveDifferentialEvolution import ProgressiveAdaptiveDifferentialEvolution lama_register["ProgressiveAdaptiveDifferentialEvolution"] = ProgressiveAdaptiveDifferentialEvolution - LLAMAProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAProgressiveAdaptiveDifferentialEvolution" - ).set_name("LLAMAProgressiveAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveDifferentialEvolution").set_name("LLAMAProgressiveAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveAdaptiveGlobalLocalSearch import ( - ProgressiveAdaptiveGlobalLocalSearch, - ) + from nevergrad.optimization.lama.ProgressiveAdaptiveGlobalLocalSearch import ProgressiveAdaptiveGlobalLocalSearch lama_register["ProgressiveAdaptiveGlobalLocalSearch"] = ProgressiveAdaptiveGlobalLocalSearch - LLAMAProgressiveAdaptiveGlobalLocalSearch = NonObjectOptimizer( - method="LLAMAProgressiveAdaptiveGlobalLocalSearch" - ).set_name("LLAMAProgressiveAdaptiveGlobalLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveAdaptiveGlobalLocalSearch = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveGlobalLocalSearch").set_name("LLAMAProgressiveAdaptiveGlobalLocalSearch", register=True) except Exception as e: print("ProgressiveAdaptiveGlobalLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveCohortDiversityOptimization import ( - ProgressiveCohortDiversityOptimization, - ) + from nevergrad.optimization.lama.ProgressiveCohortDiversityOptimization import ProgressiveCohortDiversityOptimization lama_register["ProgressiveCohortDiversityOptimization"] = ProgressiveCohortDiversityOptimization - LLAMAProgressiveCohortDiversityOptimization = NonObjectOptimizer( - method="LLAMAProgressiveCohortDiversityOptimization" - ).set_name("LLAMAProgressiveCohortDiversityOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveCohortDiversityOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveCohortDiversityOptimization = NonObjectOptimizer(method="LLAMAProgressiveCohortDiversityOptimization").set_name("LLAMAProgressiveCohortDiversityOptimization", register=True) except Exception as e: print("ProgressiveCohortDiversityOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.ProgressiveDimensionalOptimizer import ProgressiveDimensionalOptimizer lama_register["ProgressiveDimensionalOptimizer"] = ProgressiveDimensionalOptimizer - LLAMAProgressiveDimensionalOptimizer = NonObjectOptimizer( - method="LLAMAProgressiveDimensionalOptimizer" - ).set_name("LLAMAProgressiveDimensionalOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveDimensionalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveDimensionalOptimizer = NonObjectOptimizer(method="LLAMAProgressiveDimensionalOptimizer").set_name("LLAMAProgressiveDimensionalOptimizer", register=True) except Exception as e: print("ProgressiveDimensionalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveEvolutionaryFireworkAlgorithm import ( - ProgressiveEvolutionaryFireworkAlgorithm, - ) + from nevergrad.optimization.lama.ProgressiveEvolutionaryFireworkAlgorithm import ProgressiveEvolutionaryFireworkAlgorithm lama_register["ProgressiveEvolutionaryFireworkAlgorithm"] = ProgressiveEvolutionaryFireworkAlgorithm - LLAMAProgressiveEvolutionaryFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAProgressiveEvolutionaryFireworkAlgorithm" - ).set_name("LLAMAProgressiveEvolutionaryFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveEvolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveEvolutionaryFireworkAlgorithm = NonObjectOptimizer(method="LLAMAProgressiveEvolutionaryFireworkAlgorithm").set_name("LLAMAProgressiveEvolutionaryFireworkAlgorithm", register=True) except Exception as e: print("ProgressiveEvolutionaryFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveHybridAdaptiveDifferentialEvolution import ( - ProgressiveHybridAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.ProgressiveHybridAdaptiveDifferentialEvolution import ProgressiveHybridAdaptiveDifferentialEvolution - lama_register["ProgressiveHybridAdaptiveDifferentialEvolution"] = ( - ProgressiveHybridAdaptiveDifferentialEvolution - ) - LLAMAProgressiveHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution" - ).set_name("LLAMAProgressiveHybridAdaptiveDifferentialEvolution", register=True) + lama_register["ProgressiveHybridAdaptiveDifferentialEvolution"] = ProgressiveHybridAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution").set_name("LLAMAProgressiveHybridAdaptiveDifferentialEvolution", register=True) except Exception as e: print("ProgressiveHybridAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveParticleSwarmOptimization import ( - ProgressiveParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.ProgressiveParticleSwarmOptimization import ProgressiveParticleSwarmOptimization lama_register["ProgressiveParticleSwarmOptimization"] = ProgressiveParticleSwarmOptimization - LLAMAProgressiveParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAProgressiveParticleSwarmOptimization" - ).set_name("LLAMAProgressiveParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAProgressiveParticleSwarmOptimization").set_name("LLAMAProgressiveParticleSwarmOptimization", register=True) except Exception as e: print("ProgressiveParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressivePopulationRefinementStrategy import ( - ProgressivePopulationRefinementStrategy, - ) + from nevergrad.optimization.lama.ProgressivePopulationRefinementStrategy import ProgressivePopulationRefinementStrategy lama_register["ProgressivePopulationRefinementStrategy"] = ProgressivePopulationRefinementStrategy - LLAMAProgressivePopulationRefinementStrategy = NonObjectOptimizer( - method="LLAMAProgressivePopulationRefinementStrategy" - ).set_name("LLAMAProgressivePopulationRefinementStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAProgressivePopulationRefinementStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressivePopulationRefinementStrategy = NonObjectOptimizer(method="LLAMAProgressivePopulationRefinementStrategy").set_name("LLAMAProgressivePopulationRefinementStrategy", register=True) except Exception as e: print("ProgressivePopulationRefinementStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.ProgressiveQuorumEvolutionStrategy import ( - ProgressiveQuorumEvolutionStrategy, - ) + from nevergrad.optimization.lama.ProgressiveQuorumEvolutionStrategy import ProgressiveQuorumEvolutionStrategy lama_register["ProgressiveQuorumEvolutionStrategy"] = ProgressiveQuorumEvolutionStrategy - LLAMAProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( - method="LLAMAProgressiveQuorumEvolutionStrategy" - ).set_name("LLAMAProgressiveQuorumEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveQuorumEvolutionStrategy = NonObjectOptimizer(method="LLAMAProgressiveQuorumEvolutionStrategy").set_name("LLAMAProgressiveQuorumEvolutionStrategy", register=True) except Exception as e: print("ProgressiveQuorumEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.ProgressiveRefinementSearch import ProgressiveRefinementSearch lama_register["ProgressiveRefinementSearch"] = ProgressiveRefinementSearch - LLAMAProgressiveRefinementSearch = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch").set_name( - "LLAMAProgressiveRefinementSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAProgressiveRefinementSearch = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch").set_name("LLAMAProgressiveRefinementSearch", register=True) except Exception as e: print("ProgressiveRefinementSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSO import QAPSO lama_register["QAPSO"] = QAPSO + res = NonObjectOptimizer(method="LLAMAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQAPSO = NonObjectOptimizer(method="LLAMAQAPSO").set_name("LLAMAQAPSO", register=True) except Exception as e: print("QAPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSOAIR import QAPSOAIR lama_register["QAPSOAIR"] = QAPSOAIR + res = NonObjectOptimizer(method="LLAMAQAPSOAIR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQAPSOAIR = NonObjectOptimizer(method="LLAMAQAPSOAIR").set_name("LLAMAQAPSOAIR", register=True) except Exception as e: print("QAPSOAIR can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSOAIRVC import QAPSOAIRVC lama_register["QAPSOAIRVC"] = QAPSOAIRVC + res = NonObjectOptimizer(method="LLAMAQAPSOAIRVC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQAPSOAIRVC = NonObjectOptimizer(method="LLAMAQAPSOAIRVC").set_name("LLAMAQAPSOAIRVC", register=True) except Exception as e: print("QAPSOAIRVC can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSOAIRVCHR import QAPSOAIRVCHR lama_register["QAPSOAIRVCHR"] = QAPSOAIRVCHR - LLAMAQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR").set_name( - "LLAMAQAPSOAIRVCHR", register=True - ) + res = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR").set_name("LLAMAQAPSOAIRVCHR", register=True) except Exception as e: print("QAPSOAIRVCHR can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSOAIW import QAPSOAIW lama_register["QAPSOAIW"] = QAPSOAIW + res = NonObjectOptimizer(method="LLAMAQAPSOAIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQAPSOAIW = NonObjectOptimizer(method="LLAMAQAPSOAIW").set_name("LLAMAQAPSOAIW", register=True) except Exception as e: print("QAPSOAIW can not be imported: ", e) - try: from nevergrad.optimization.lama.QAPSOAIWRR import QAPSOAIWRR lama_register["QAPSOAIWRR"] = QAPSOAIWRR + res = NonObjectOptimizer(method="LLAMAQAPSOAIWRR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQAPSOAIWRR = NonObjectOptimizer(method="LLAMAQAPSOAIWRR").set_name("LLAMAQAPSOAIWRR", register=True) except Exception as e: print("QAPSOAIWRR can not be imported: ", e) - try: from nevergrad.optimization.lama.QPSO import QPSO lama_register["QPSO"] = QPSO + res = NonObjectOptimizer(method="LLAMAQPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAQPSO = NonObjectOptimizer(method="LLAMAQPSO").set_name("LLAMAQPSO", register=True) except Exception as e: print("QPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAcceleratedEvolutionStrategy import ( - QuantumAcceleratedEvolutionStrategy, - ) + from nevergrad.optimization.lama.QuantumAcceleratedEvolutionStrategy import QuantumAcceleratedEvolutionStrategy lama_register["QuantumAcceleratedEvolutionStrategy"] = QuantumAcceleratedEvolutionStrategy - LLAMAQuantumAcceleratedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumAcceleratedEvolutionStrategy" - ).set_name("LLAMAQuantumAcceleratedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAcceleratedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumAcceleratedEvolutionStrategy").set_name("LLAMAQuantumAcceleratedEvolutionStrategy", register=True) except Exception as e: print("QuantumAcceleratedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAcceleratedNesterovOptimizer import ( - QuantumAcceleratedNesterovOptimizer, - ) + from nevergrad.optimization.lama.QuantumAcceleratedNesterovOptimizer import QuantumAcceleratedNesterovOptimizer lama_register["QuantumAcceleratedNesterovOptimizer"] = QuantumAcceleratedNesterovOptimizer - LLAMAQuantumAcceleratedNesterovOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAcceleratedNesterovOptimizer" - ).set_name("LLAMAQuantumAcceleratedNesterovOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAcceleratedNesterovOptimizer = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovOptimizer").set_name("LLAMAQuantumAcceleratedNesterovOptimizer", register=True) except Exception as e: print("QuantumAcceleratedNesterovOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAcceleratedNesterovPlusOptimizer import ( - QuantumAcceleratedNesterovPlusOptimizer, - ) + from nevergrad.optimization.lama.QuantumAcceleratedNesterovPlusOptimizer import QuantumAcceleratedNesterovPlusOptimizer lama_register["QuantumAcceleratedNesterovPlusOptimizer"] = QuantumAcceleratedNesterovPlusOptimizer - LLAMAQuantumAcceleratedNesterovPlusOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAcceleratedNesterovPlusOptimizer" - ).set_name("LLAMAQuantumAcceleratedNesterovPlusOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovPlusOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAcceleratedNesterovPlusOptimizer = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovPlusOptimizer").set_name("LLAMAQuantumAcceleratedNesterovPlusOptimizer", register=True) except Exception as e: print("QuantumAcceleratedNesterovPlusOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV5 import ( - QuantumAdaptiveCognitionOptimizerV5, - ) + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV5 import QuantumAdaptiveCognitionOptimizerV5 lama_register["QuantumAdaptiveCognitionOptimizerV5"] = QuantumAdaptiveCognitionOptimizerV5 - LLAMAQuantumAdaptiveCognitionOptimizerV5 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveCognitionOptimizerV5" - ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveCognitionOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV5").set_name("LLAMAQuantumAdaptiveCognitionOptimizerV5", register=True) except Exception as e: print("QuantumAdaptiveCognitionOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV6 import ( - QuantumAdaptiveCognitionOptimizerV6, - ) + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV6 import QuantumAdaptiveCognitionOptimizerV6 lama_register["QuantumAdaptiveCognitionOptimizerV6"] = QuantumAdaptiveCognitionOptimizerV6 - LLAMAQuantumAdaptiveCognitionOptimizerV6 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveCognitionOptimizerV6" - ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveCognitionOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV6").set_name("LLAMAQuantumAdaptiveCognitionOptimizerV6", register=True) except Exception as e: print("QuantumAdaptiveCognitionOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveConvergenceOptimizer import ( - QuantumAdaptiveConvergenceOptimizer, - ) + from nevergrad.optimization.lama.QuantumAdaptiveConvergenceOptimizer import QuantumAdaptiveConvergenceOptimizer lama_register["QuantumAdaptiveConvergenceOptimizer"] = QuantumAdaptiveConvergenceOptimizer - LLAMAQuantumAdaptiveConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveConvergenceOptimizer" - ).set_name("LLAMAQuantumAdaptiveConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveConvergenceOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveConvergenceOptimizer").set_name("LLAMAQuantumAdaptiveConvergenceOptimizer", register=True) except Exception as e: print("QuantumAdaptiveConvergenceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveCrossoverRefinement import ( - QuantumAdaptiveCrossoverRefinement, - ) + from nevergrad.optimization.lama.QuantumAdaptiveCrossoverRefinement import QuantumAdaptiveCrossoverRefinement lama_register["QuantumAdaptiveCrossoverRefinement"] = QuantumAdaptiveCrossoverRefinement - LLAMAQuantumAdaptiveCrossoverRefinement = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveCrossoverRefinement" - ).set_name("LLAMAQuantumAdaptiveCrossoverRefinement", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCrossoverRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveCrossoverRefinement = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCrossoverRefinement").set_name("LLAMAQuantumAdaptiveCrossoverRefinement", register=True) except Exception as e: print("QuantumAdaptiveCrossoverRefinement can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( - QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - lama_register["QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( - QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - ) - LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" - ).set_name("LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) + lama_register["QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory").set_name("LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) except Exception as e: print("QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolution import ( - QuantumAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolution import QuantumAdaptiveDifferentialEvolution lama_register["QuantumAdaptiveDifferentialEvolution"] = QuantumAdaptiveDifferentialEvolution - LLAMAQuantumAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialEvolution" - ).set_name("LLAMAQuantumAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolution").set_name("LLAMAQuantumAdaptiveDifferentialEvolution", register=True) except Exception as e: print("QuantumAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV3 import ( - QuantumAdaptiveDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV3 import QuantumAdaptiveDifferentialEvolutionV3 lama_register["QuantumAdaptiveDifferentialEvolutionV3"] = QuantumAdaptiveDifferentialEvolutionV3 - LLAMAQuantumAdaptiveDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialEvolutionV3" - ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV3").set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV3", register=True) except Exception as e: print("QuantumAdaptiveDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV4 import ( - QuantumAdaptiveDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV4 import QuantumAdaptiveDifferentialEvolutionV4 lama_register["QuantumAdaptiveDifferentialEvolutionV4"] = QuantumAdaptiveDifferentialEvolutionV4 - LLAMAQuantumAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialEvolutionV4" - ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV4").set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV4", register=True) except Exception as e: print("QuantumAdaptiveDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV10 import ( - QuantumAdaptiveDifferentialStrategyV10, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV10 import QuantumAdaptiveDifferentialStrategyV10 lama_register["QuantumAdaptiveDifferentialStrategyV10"] = QuantumAdaptiveDifferentialStrategyV10 - LLAMAQuantumAdaptiveDifferentialStrategyV10 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialStrategyV10" - ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialStrategyV10 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV10").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV10", register=True) except Exception as e: print("QuantumAdaptiveDifferentialStrategyV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV11 import ( - QuantumAdaptiveDifferentialStrategyV11, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV11 import QuantumAdaptiveDifferentialStrategyV11 lama_register["QuantumAdaptiveDifferentialStrategyV11"] = QuantumAdaptiveDifferentialStrategyV11 - LLAMAQuantumAdaptiveDifferentialStrategyV11 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialStrategyV11" - ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV11", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialStrategyV11 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV11").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV11", register=True) except Exception as e: print("QuantumAdaptiveDifferentialStrategyV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV12 import ( - QuantumAdaptiveDifferentialStrategyV12, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV12 import QuantumAdaptiveDifferentialStrategyV12 lama_register["QuantumAdaptiveDifferentialStrategyV12"] = QuantumAdaptiveDifferentialStrategyV12 - LLAMAQuantumAdaptiveDifferentialStrategyV12 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDifferentialStrategyV12" - ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV12", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDifferentialStrategyV12 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV12").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV12", register=True) except Exception as e: print("QuantumAdaptiveDifferentialStrategyV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV11 import ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV11, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV11 import QuantumAdaptiveDiversifiedDynamicHybridSearchV11 - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV11"] = ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV11 - ) - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11" - ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11", register=True) + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV11"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV11 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedDynamicHybridSearchV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV12 import ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV12, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV12 import QuantumAdaptiveDiversifiedDynamicHybridSearchV12 - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV12"] = ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV12 - ) - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12" - ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12", register=True) + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV12"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV12 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedDynamicHybridSearchV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV13 import ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV13, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV13 import QuantumAdaptiveDiversifiedDynamicHybridSearchV13 - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV13"] = ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV13 - ) - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13" - ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13", register=True) + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV13"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV13 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedDynamicHybridSearchV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV14 import ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV14, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV14 import QuantumAdaptiveDiversifiedDynamicHybridSearchV14 - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV14"] = ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV14 - ) - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14" - ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14", register=True) + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV14"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV14 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedDynamicHybridSearchV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV15 import ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV15, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV15 import QuantumAdaptiveDiversifiedDynamicHybridSearchV15 - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV15"] = ( - QuantumAdaptiveDiversifiedDynamicHybridSearchV15 - ) - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15" - ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15", register=True) + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV15"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV15 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedDynamicHybridSearchV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedHybridSearchV10 import ( - QuantumAdaptiveDiversifiedHybridSearchV10, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedHybridSearchV10 import QuantumAdaptiveDiversifiedHybridSearchV10 lama_register["QuantumAdaptiveDiversifiedHybridSearchV10"] = QuantumAdaptiveDiversifiedHybridSearchV10 - LLAMAQuantumAdaptiveDiversifiedHybridSearchV10 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10" - ).set_name("LLAMAQuantumAdaptiveDiversifiedHybridSearchV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDiversifiedHybridSearchV10 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10").set_name("LLAMAQuantumAdaptiveDiversifiedHybridSearchV10", register=True) except Exception as e: print("QuantumAdaptiveDiversifiedHybridSearchV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExploration import ( - QuantumAdaptiveDynamicExploration, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExploration import QuantumAdaptiveDynamicExploration lama_register["QuantumAdaptiveDynamicExploration"] = QuantumAdaptiveDynamicExploration - LLAMAQuantumAdaptiveDynamicExploration = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExploration" - ).set_name("LLAMAQuantumAdaptiveDynamicExploration", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExploration = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExploration").set_name("LLAMAQuantumAdaptiveDynamicExploration", register=True) except Exception as e: print("QuantumAdaptiveDynamicExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV2 import ( - QuantumAdaptiveDynamicExplorationV2, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV2 import QuantumAdaptiveDynamicExplorationV2 lama_register["QuantumAdaptiveDynamicExplorationV2"] = QuantumAdaptiveDynamicExplorationV2 - LLAMAQuantumAdaptiveDynamicExplorationV2 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV2" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV2").set_name("LLAMAQuantumAdaptiveDynamicExplorationV2", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV3 import ( - QuantumAdaptiveDynamicExplorationV3, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV3 import QuantumAdaptiveDynamicExplorationV3 lama_register["QuantumAdaptiveDynamicExplorationV3"] = QuantumAdaptiveDynamicExplorationV3 - LLAMAQuantumAdaptiveDynamicExplorationV3 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV3" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV3").set_name("LLAMAQuantumAdaptiveDynamicExplorationV3", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV4 import ( - QuantumAdaptiveDynamicExplorationV4, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV4 import QuantumAdaptiveDynamicExplorationV4 lama_register["QuantumAdaptiveDynamicExplorationV4"] = QuantumAdaptiveDynamicExplorationV4 - LLAMAQuantumAdaptiveDynamicExplorationV4 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV4" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV4").set_name("LLAMAQuantumAdaptiveDynamicExplorationV4", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV5 import ( - QuantumAdaptiveDynamicExplorationV5, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV5 import QuantumAdaptiveDynamicExplorationV5 lama_register["QuantumAdaptiveDynamicExplorationV5"] = QuantumAdaptiveDynamicExplorationV5 - LLAMAQuantumAdaptiveDynamicExplorationV5 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV5" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV5 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV5").set_name("LLAMAQuantumAdaptiveDynamicExplorationV5", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV6 import ( - QuantumAdaptiveDynamicExplorationV6, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV6 import QuantumAdaptiveDynamicExplorationV6 lama_register["QuantumAdaptiveDynamicExplorationV6"] = QuantumAdaptiveDynamicExplorationV6 - LLAMAQuantumAdaptiveDynamicExplorationV6 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV6" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV6").set_name("LLAMAQuantumAdaptiveDynamicExplorationV6", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV7 import ( - QuantumAdaptiveDynamicExplorationV7, - ) + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV7 import QuantumAdaptiveDynamicExplorationV7 lama_register["QuantumAdaptiveDynamicExplorationV7"] = QuantumAdaptiveDynamicExplorationV7 - LLAMAQuantumAdaptiveDynamicExplorationV7 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicExplorationV7" - ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicExplorationV7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV7").set_name("LLAMAQuantumAdaptiveDynamicExplorationV7", register=True) except Exception as e: print("QuantumAdaptiveDynamicExplorationV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveDynamicStrategyV7 import QuantumAdaptiveDynamicStrategyV7 lama_register["QuantumAdaptiveDynamicStrategyV7"] = QuantumAdaptiveDynamicStrategyV7 - LLAMAQuantumAdaptiveDynamicStrategyV7 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveDynamicStrategyV7" - ).set_name("LLAMAQuantumAdaptiveDynamicStrategyV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveDynamicStrategyV7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicStrategyV7").set_name("LLAMAQuantumAdaptiveDynamicStrategyV7", register=True) except Exception as e: print("QuantumAdaptiveDynamicStrategyV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveEliteGuidedSearch import QuantumAdaptiveEliteGuidedSearch lama_register["QuantumAdaptiveEliteGuidedSearch"] = QuantumAdaptiveEliteGuidedSearch - LLAMAQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveEliteGuidedSearch" - ).set_name("LLAMAQuantumAdaptiveEliteGuidedSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveEliteGuidedSearch").set_name("LLAMAQuantumAdaptiveEliteGuidedSearch", register=True) except Exception as e: print("QuantumAdaptiveEliteGuidedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveFireworksOptimizer import ( - QuantumAdaptiveFireworksOptimizer, - ) + from nevergrad.optimization.lama.QuantumAdaptiveFireworksOptimizer import QuantumAdaptiveFireworksOptimizer lama_register["QuantumAdaptiveFireworksOptimizer"] = QuantumAdaptiveFireworksOptimizer - LLAMAQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveFireworksOptimizer" - ).set_name("LLAMAQuantumAdaptiveFireworksOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveFireworksOptimizer").set_name("LLAMAQuantumAdaptiveFireworksOptimizer", register=True) except Exception as e: print("QuantumAdaptiveFireworksOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveGradientDiversityExplorer import ( - QuantumAdaptiveGradientDiversityExplorer, - ) + from nevergrad.optimization.lama.QuantumAdaptiveGradientDiversityExplorer import QuantumAdaptiveGradientDiversityExplorer lama_register["QuantumAdaptiveGradientDiversityExplorer"] = QuantumAdaptiveGradientDiversityExplorer - LLAMAQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveGradientDiversityExplorer" - ).set_name("LLAMAQuantumAdaptiveGradientDiversityExplorer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientDiversityExplorer").set_name("LLAMAQuantumAdaptiveGradientDiversityExplorer", register=True) except Exception as e: print("QuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveGradientSearch import QuantumAdaptiveGradientSearch lama_register["QuantumAdaptiveGradientSearch"] = QuantumAdaptiveGradientSearch - LLAMAQuantumAdaptiveGradientSearch = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveGradientSearch" - ).set_name("LLAMAQuantumAdaptiveGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientSearch").set_name("LLAMAQuantumAdaptiveGradientSearch", register=True) except Exception as e: print("QuantumAdaptiveGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveHarmonicOptimizerV8 import ( - QuantumAdaptiveHarmonicOptimizerV8, - ) + from nevergrad.optimization.lama.QuantumAdaptiveHarmonicOptimizerV8 import QuantumAdaptiveHarmonicOptimizerV8 lama_register["QuantumAdaptiveHarmonicOptimizerV8"] = QuantumAdaptiveHarmonicOptimizerV8 - LLAMAQuantumAdaptiveHarmonicOptimizerV8 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveHarmonicOptimizerV8" - ).set_name("LLAMAQuantumAdaptiveHarmonicOptimizerV8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHarmonicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveHarmonicOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHarmonicOptimizerV8").set_name("LLAMAQuantumAdaptiveHarmonicOptimizerV8", register=True) except Exception as e: print("QuantumAdaptiveHarmonicOptimizerV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveHybridDEPSO_V7 import QuantumAdaptiveHybridDEPSO_V7 lama_register["QuantumAdaptiveHybridDEPSO_V7"] = QuantumAdaptiveHybridDEPSO_V7 - LLAMAQuantumAdaptiveHybridDEPSO_V7 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveHybridDEPSO_V7" - ).set_name("LLAMAQuantumAdaptiveHybridDEPSO_V7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridDEPSO_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveHybridDEPSO_V7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridDEPSO_V7").set_name("LLAMAQuantumAdaptiveHybridDEPSO_V7", register=True) except Exception as e: print("QuantumAdaptiveHybridDEPSO_V7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizer import QuantumAdaptiveHybridOptimizer lama_register["QuantumAdaptiveHybridOptimizer"] = QuantumAdaptiveHybridOptimizer - LLAMAQuantumAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveHybridOptimizer" - ).set_name("LLAMAQuantumAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizer").set_name("LLAMAQuantumAdaptiveHybridOptimizer", register=True) except Exception as e: print("QuantumAdaptiveHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizerV3 import QuantumAdaptiveHybridOptimizerV3 lama_register["QuantumAdaptiveHybridOptimizerV3"] = QuantumAdaptiveHybridOptimizerV3 - LLAMAQuantumAdaptiveHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveHybridOptimizerV3" - ).set_name("LLAMAQuantumAdaptiveHybridOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizerV3").set_name("LLAMAQuantumAdaptiveHybridOptimizerV3", register=True) except Exception as e: print("QuantumAdaptiveHybridOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveHybridStrategyV4 import QuantumAdaptiveHybridStrategyV4 lama_register["QuantumAdaptiveHybridStrategyV4"] = QuantumAdaptiveHybridStrategyV4 - LLAMAQuantumAdaptiveHybridStrategyV4 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveHybridStrategyV4" - ).set_name("LLAMAQuantumAdaptiveHybridStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveHybridStrategyV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridStrategyV4").set_name("LLAMAQuantumAdaptiveHybridStrategyV4", register=True) except Exception as e: print("QuantumAdaptiveHybridStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveLevyDifferentialSearch import ( - QuantumAdaptiveLevyDifferentialSearch, - ) + from nevergrad.optimization.lama.QuantumAdaptiveLevyDifferentialSearch import QuantumAdaptiveLevyDifferentialSearch lama_register["QuantumAdaptiveLevyDifferentialSearch"] = QuantumAdaptiveLevyDifferentialSearch - LLAMAQuantumAdaptiveLevyDifferentialSearch = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveLevyDifferentialSearch" - ).set_name("LLAMAQuantumAdaptiveLevyDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDifferentialSearch").set_name("LLAMAQuantumAdaptiveLevyDifferentialSearch", register=True) except Exception as e: print("QuantumAdaptiveLevyDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveLevyDynamicDifferentialSwarmV4 import ( - QuantumAdaptiveLevyDynamicDifferentialSwarmV4, - ) + from nevergrad.optimization.lama.QuantumAdaptiveLevyDynamicDifferentialSwarmV4 import QuantumAdaptiveLevyDynamicDifferentialSwarmV4 - lama_register["QuantumAdaptiveLevyDynamicDifferentialSwarmV4"] = ( - QuantumAdaptiveLevyDynamicDifferentialSwarmV4 - ) - LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4" - ).set_name("LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4", register=True) + lama_register["QuantumAdaptiveLevyDynamicDifferentialSwarmV4"] = QuantumAdaptiveLevyDynamicDifferentialSwarmV4 + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4").set_name("LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4", register=True) except Exception as e: print("QuantumAdaptiveLevyDynamicDifferentialSwarmV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveLevyMemeticSearch import QuantumAdaptiveLevyMemeticSearch lama_register["QuantumAdaptiveLevyMemeticSearch"] = QuantumAdaptiveLevyMemeticSearch - LLAMAQuantumAdaptiveLevyMemeticSearch = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveLevyMemeticSearch" - ).set_name("LLAMAQuantumAdaptiveLevyMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveLevyMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyMemeticSearch").set_name("LLAMAQuantumAdaptiveLevyMemeticSearch", register=True) except Exception as e: print("QuantumAdaptiveLevyMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveLevyOptimizer import QuantumAdaptiveLevyOptimizer lama_register["QuantumAdaptiveLevyOptimizer"] = QuantumAdaptiveLevyOptimizer - LLAMAQuantumAdaptiveLevyOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveLevyOptimizer" - ).set_name("LLAMAQuantumAdaptiveLevyOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveLevyOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyOptimizer").set_name("LLAMAQuantumAdaptiveLevyOptimizer", register=True) except Exception as e: print("QuantumAdaptiveLevyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveLevySwarmOptimizationV2 import ( - QuantumAdaptiveLevySwarmOptimizationV2, - ) + from nevergrad.optimization.lama.QuantumAdaptiveLevySwarmOptimizationV2 import QuantumAdaptiveLevySwarmOptimizationV2 lama_register["QuantumAdaptiveLevySwarmOptimizationV2"] = QuantumAdaptiveLevySwarmOptimizationV2 - LLAMAQuantumAdaptiveLevySwarmOptimizationV2 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2" - ).set_name("LLAMAQuantumAdaptiveLevySwarmOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveLevySwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2").set_name("LLAMAQuantumAdaptiveLevySwarmOptimizationV2", register=True) except Exception as e: print("QuantumAdaptiveLevySwarmOptimizationV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithm import QuantumAdaptiveMemeticAlgorithm lama_register["QuantumAdaptiveMemeticAlgorithm"] = QuantumAdaptiveMemeticAlgorithm - LLAMAQuantumAdaptiveMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMemeticAlgorithm" - ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithm").set_name("LLAMAQuantumAdaptiveMemeticAlgorithm", register=True) except Exception as e: print("QuantumAdaptiveMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithmV2 import ( - QuantumAdaptiveMemeticAlgorithmV2, - ) + from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithmV2 import QuantumAdaptiveMemeticAlgorithmV2 lama_register["QuantumAdaptiveMemeticAlgorithmV2"] = QuantumAdaptiveMemeticAlgorithmV2 - LLAMAQuantumAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMemeticAlgorithmV2" - ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithmV2").set_name("LLAMAQuantumAdaptiveMemeticAlgorithmV2", register=True) except Exception as e: print("QuantumAdaptiveMemeticAlgorithmV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveMemeticSearchV2 import QuantumAdaptiveMemeticSearchV2 lama_register["QuantumAdaptiveMemeticSearchV2"] = QuantumAdaptiveMemeticSearchV2 - LLAMAQuantumAdaptiveMemeticSearchV2 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMemeticSearchV2" - ).set_name("LLAMAQuantumAdaptiveMemeticSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMemeticSearchV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticSearchV2").set_name("LLAMAQuantumAdaptiveMemeticSearchV2", register=True) except Exception as e: print("QuantumAdaptiveMemeticSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveMultiPhaseDE_v6 import QuantumAdaptiveMultiPhaseDE_v6 lama_register["QuantumAdaptiveMultiPhaseDE_v6"] = QuantumAdaptiveMultiPhaseDE_v6 - LLAMAQuantumAdaptiveMultiPhaseDE_v6 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMultiPhaseDE_v6" - ).set_name("LLAMAQuantumAdaptiveMultiPhaseDE_v6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPhaseDE_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMultiPhaseDE_v6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPhaseDE_v6").set_name("LLAMAQuantumAdaptiveMultiPhaseDE_v6", register=True) except Exception as e: print("QuantumAdaptiveMultiPhaseDE_v6 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveMultiPopulationDE import QuantumAdaptiveMultiPopulationDE lama_register["QuantumAdaptiveMultiPopulationDE"] = QuantumAdaptiveMultiPopulationDE - LLAMAQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMultiPopulationDE" - ).set_name("LLAMAQuantumAdaptiveMultiPopulationDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPopulationDE").set_name("LLAMAQuantumAdaptiveMultiPopulationDE", register=True) except Exception as e: print("QuantumAdaptiveMultiPopulationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveMultiStrategyEvolution import ( - QuantumAdaptiveMultiStrategyEvolution, - ) + from nevergrad.optimization.lama.QuantumAdaptiveMultiStrategyEvolution import QuantumAdaptiveMultiStrategyEvolution lama_register["QuantumAdaptiveMultiStrategyEvolution"] = QuantumAdaptiveMultiStrategyEvolution - LLAMAQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveMultiStrategyEvolution" - ).set_name("LLAMAQuantumAdaptiveMultiStrategyEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiStrategyEvolution").set_name("LLAMAQuantumAdaptiveMultiStrategyEvolution", register=True) except Exception as e: print("QuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveNesterovGradientEnhancer import ( - QuantumAdaptiveNesterovGradientEnhancer, - ) + from nevergrad.optimization.lama.QuantumAdaptiveNesterovGradientEnhancer import QuantumAdaptiveNesterovGradientEnhancer lama_register["QuantumAdaptiveNesterovGradientEnhancer"] = QuantumAdaptiveNesterovGradientEnhancer - LLAMAQuantumAdaptiveNesterovGradientEnhancer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveNesterovGradientEnhancer" - ).set_name("LLAMAQuantumAdaptiveNesterovGradientEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovGradientEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveNesterovGradientEnhancer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovGradientEnhancer").set_name("LLAMAQuantumAdaptiveNesterovGradientEnhancer", register=True) except Exception as e: print("QuantumAdaptiveNesterovGradientEnhancer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveNesterovSynergy import QuantumAdaptiveNesterovSynergy lama_register["QuantumAdaptiveNesterovSynergy"] = QuantumAdaptiveNesterovSynergy - LLAMAQuantumAdaptiveNesterovSynergy = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveNesterovSynergy" - ).set_name("LLAMAQuantumAdaptiveNesterovSynergy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveNesterovSynergy = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovSynergy").set_name("LLAMAQuantumAdaptiveNesterovSynergy", register=True) except Exception as e: print("QuantumAdaptiveNesterovSynergy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementOptimizer import ( - QuantumAdaptiveRefinementOptimizer, - ) + from nevergrad.optimization.lama.QuantumAdaptiveRefinementOptimizer import QuantumAdaptiveRefinementOptimizer lama_register["QuantumAdaptiveRefinementOptimizer"] = QuantumAdaptiveRefinementOptimizer - LLAMAQuantumAdaptiveRefinementOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveRefinementOptimizer" - ).set_name("LLAMAQuantumAdaptiveRefinementOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementOptimizer").set_name("LLAMAQuantumAdaptiveRefinementOptimizer", register=True) except Exception as e: print("QuantumAdaptiveRefinementOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategy import ( - QuantumAdaptiveRefinementStrategy, - ) + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategy import QuantumAdaptiveRefinementStrategy lama_register["QuantumAdaptiveRefinementStrategy"] = QuantumAdaptiveRefinementStrategy - LLAMAQuantumAdaptiveRefinementStrategy = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveRefinementStrategy" - ).set_name("LLAMAQuantumAdaptiveRefinementStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveRefinementStrategy = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategy").set_name("LLAMAQuantumAdaptiveRefinementStrategy", register=True) except Exception as e: print("QuantumAdaptiveRefinementStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategyV2 import ( - QuantumAdaptiveRefinementStrategyV2, - ) + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategyV2 import QuantumAdaptiveRefinementStrategyV2 lama_register["QuantumAdaptiveRefinementStrategyV2"] = QuantumAdaptiveRefinementStrategyV2 - LLAMAQuantumAdaptiveRefinementStrategyV2 = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveRefinementStrategyV2" - ).set_name("LLAMAQuantumAdaptiveRefinementStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveRefinementStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategyV2").set_name("LLAMAQuantumAdaptiveRefinementStrategyV2", register=True) except Exception as e: print("QuantumAdaptiveRefinementStrategyV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveStrategicEnhancer import QuantumAdaptiveStrategicEnhancer lama_register["QuantumAdaptiveStrategicEnhancer"] = QuantumAdaptiveStrategicEnhancer - LLAMAQuantumAdaptiveStrategicEnhancer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveStrategicEnhancer" - ).set_name("LLAMAQuantumAdaptiveStrategicEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveStrategicEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveStrategicEnhancer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveStrategicEnhancer").set_name("LLAMAQuantumAdaptiveStrategicEnhancer", register=True) except Exception as e: print("QuantumAdaptiveStrategicEnhancer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAdaptiveVelocityOptimizer import QuantumAdaptiveVelocityOptimizer lama_register["QuantumAdaptiveVelocityOptimizer"] = QuantumAdaptiveVelocityOptimizer - LLAMAQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( - method="LLAMAQuantumAdaptiveVelocityOptimizer" - ).set_name("LLAMAQuantumAdaptiveVelocityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveVelocityOptimizer").set_name("LLAMAQuantumAdaptiveVelocityOptimizer", register=True) except Exception as e: print("QuantumAdaptiveVelocityOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumAnnealingDifferentialEvolution import ( - QuantumAnnealingDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumAnnealingDifferentialEvolution import QuantumAnnealingDifferentialEvolution lama_register["QuantumAnnealingDifferentialEvolution"] = QuantumAnnealingDifferentialEvolution - LLAMAQuantumAnnealingDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumAnnealingDifferentialEvolution" - ).set_name("LLAMAQuantumAnnealingDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAnnealingDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumAnnealingDifferentialEvolution").set_name("LLAMAQuantumAnnealingDifferentialEvolution", register=True) except Exception as e: print("QuantumAnnealingDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumAssistedHybridOptimizerV1 import QuantumAssistedHybridOptimizerV1 lama_register["QuantumAssistedHybridOptimizerV1"] = QuantumAssistedHybridOptimizerV1 - LLAMAQuantumAssistedHybridOptimizerV1 = NonObjectOptimizer( - method="LLAMAQuantumAssistedHybridOptimizerV1" - ).set_name("LLAMAQuantumAssistedHybridOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumAssistedHybridOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumAssistedHybridOptimizerV1 = NonObjectOptimizer(method="LLAMAQuantumAssistedHybridOptimizerV1").set_name("LLAMAQuantumAssistedHybridOptimizerV1", register=True) except Exception as e: print("QuantumAssistedHybridOptimizerV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumBalancedAdaptiveNesterovStrategy import ( - QuantumBalancedAdaptiveNesterovStrategy, - ) + from nevergrad.optimization.lama.QuantumBalancedAdaptiveNesterovStrategy import QuantumBalancedAdaptiveNesterovStrategy lama_register["QuantumBalancedAdaptiveNesterovStrategy"] = QuantumBalancedAdaptiveNesterovStrategy - LLAMAQuantumBalancedAdaptiveNesterovStrategy = NonObjectOptimizer( - method="LLAMAQuantumBalancedAdaptiveNesterovStrategy" - ).set_name("LLAMAQuantumBalancedAdaptiveNesterovStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumBalancedAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumBalancedAdaptiveNesterovStrategy = NonObjectOptimizer(method="LLAMAQuantumBalancedAdaptiveNesterovStrategy").set_name("LLAMAQuantumBalancedAdaptiveNesterovStrategy", register=True) except Exception as e: print("QuantumBalancedAdaptiveNesterovStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumBalancedEvolutionStrategy import QuantumBalancedEvolutionStrategy lama_register["QuantumBalancedEvolutionStrategy"] = QuantumBalancedEvolutionStrategy - LLAMAQuantumBalancedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumBalancedEvolutionStrategy" - ).set_name("LLAMAQuantumBalancedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumBalancedEvolutionStrategy").set_name("LLAMAQuantumBalancedEvolutionStrategy", register=True) except Exception as e: print("QuantumBalancedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancedOptimizerV16 import ( - QuantumCognitionAdaptiveEnhancedOptimizerV16, - ) + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancedOptimizerV16 import QuantumCognitionAdaptiveEnhancedOptimizerV16 - lama_register["QuantumCognitionAdaptiveEnhancedOptimizerV16"] = ( - QuantumCognitionAdaptiveEnhancedOptimizerV16 - ) - LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16 = NonObjectOptimizer( - method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16" - ).set_name("LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16", register=True) + lama_register["QuantumCognitionAdaptiveEnhancedOptimizerV16"] = QuantumCognitionAdaptiveEnhancedOptimizerV16 + res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16").set_name("LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16", register=True) except Exception as e: print("QuantumCognitionAdaptiveEnhancedOptimizerV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancerV8 import ( - QuantumCognitionAdaptiveEnhancerV8, - ) + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancerV8 import QuantumCognitionAdaptiveEnhancerV8 lama_register["QuantumCognitionAdaptiveEnhancerV8"] = QuantumCognitionAdaptiveEnhancerV8 - LLAMAQuantumCognitionAdaptiveEnhancerV8 = NonObjectOptimizer( - method="LLAMAQuantumCognitionAdaptiveEnhancerV8" - ).set_name("LLAMAQuantumCognitionAdaptiveEnhancerV8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionAdaptiveEnhancerV8 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancerV8").set_name("LLAMAQuantumCognitionAdaptiveEnhancerV8", register=True) except Exception as e: print("QuantumCognitionAdaptiveEnhancerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveTuningOptimizerV14 import ( - QuantumCognitionAdaptiveTuningOptimizerV14, - ) + from nevergrad.optimization.lama.QuantumCognitionAdaptiveTuningOptimizerV14 import QuantumCognitionAdaptiveTuningOptimizerV14 lama_register["QuantumCognitionAdaptiveTuningOptimizerV14"] = QuantumCognitionAdaptiveTuningOptimizerV14 - LLAMAQuantumCognitionAdaptiveTuningOptimizerV14 = NonObjectOptimizer( - method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14" - ).set_name("LLAMAQuantumCognitionAdaptiveTuningOptimizerV14", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionAdaptiveTuningOptimizerV14 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14").set_name("LLAMAQuantumCognitionAdaptiveTuningOptimizerV14", register=True) except Exception as e: print("QuantumCognitionAdaptiveTuningOptimizerV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionDynamicAdaptationOptimizerV30 import ( - QuantumCognitionDynamicAdaptationOptimizerV30, - ) + from nevergrad.optimization.lama.QuantumCognitionDynamicAdaptationOptimizerV30 import QuantumCognitionDynamicAdaptationOptimizerV30 - lama_register["QuantumCognitionDynamicAdaptationOptimizerV30"] = ( - QuantumCognitionDynamicAdaptationOptimizerV30 - ) - LLAMAQuantumCognitionDynamicAdaptationOptimizerV30 = NonObjectOptimizer( - method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30" - ).set_name("LLAMAQuantumCognitionDynamicAdaptationOptimizerV30", register=True) + lama_register["QuantumCognitionDynamicAdaptationOptimizerV30"] = QuantumCognitionDynamicAdaptationOptimizerV30 + res = NonObjectOptimizer(method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionDynamicAdaptationOptimizerV30 = NonObjectOptimizer(method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30").set_name("LLAMAQuantumCognitionDynamicAdaptationOptimizerV30", register=True) except Exception as e: print("QuantumCognitionDynamicAdaptationOptimizerV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionEnhancedOptimizerV7 import ( - QuantumCognitionEnhancedOptimizerV7, - ) + from nevergrad.optimization.lama.QuantumCognitionEnhancedOptimizerV7 import QuantumCognitionEnhancedOptimizerV7 lama_register["QuantumCognitionEnhancedOptimizerV7"] = QuantumCognitionEnhancedOptimizerV7 - LLAMAQuantumCognitionEnhancedOptimizerV7 = NonObjectOptimizer( - method="LLAMAQuantumCognitionEnhancedOptimizerV7" - ).set_name("LLAMAQuantumCognitionEnhancedOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionEnhancedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionEnhancedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumCognitionEnhancedOptimizerV7").set_name("LLAMAQuantumCognitionEnhancedOptimizerV7", register=True) except Exception as e: print("QuantumCognitionEnhancedOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionFocusedHybridOptimizerV21 import ( - QuantumCognitionFocusedHybridOptimizerV21, - ) + from nevergrad.optimization.lama.QuantumCognitionFocusedHybridOptimizerV21 import QuantumCognitionFocusedHybridOptimizerV21 lama_register["QuantumCognitionFocusedHybridOptimizerV21"] = QuantumCognitionFocusedHybridOptimizerV21 - LLAMAQuantumCognitionFocusedHybridOptimizerV21 = NonObjectOptimizer( - method="LLAMAQuantumCognitionFocusedHybridOptimizerV21" - ).set_name("LLAMAQuantumCognitionFocusedHybridOptimizerV21", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedHybridOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionFocusedHybridOptimizerV21 = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedHybridOptimizerV21").set_name("LLAMAQuantumCognitionFocusedHybridOptimizerV21", register=True) except Exception as e: print("QuantumCognitionFocusedHybridOptimizerV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionFocusedOptimizerV17 import ( - QuantumCognitionFocusedOptimizerV17, - ) + from nevergrad.optimization.lama.QuantumCognitionFocusedOptimizerV17 import QuantumCognitionFocusedOptimizerV17 lama_register["QuantumCognitionFocusedOptimizerV17"] = QuantumCognitionFocusedOptimizerV17 - LLAMAQuantumCognitionFocusedOptimizerV17 = NonObjectOptimizer( - method="LLAMAQuantumCognitionFocusedOptimizerV17" - ).set_name("LLAMAQuantumCognitionFocusedOptimizerV17", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionFocusedOptimizerV17 = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedOptimizerV17").set_name("LLAMAQuantumCognitionFocusedOptimizerV17", register=True) except Exception as e: print("QuantumCognitionFocusedOptimizerV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV19 import ( - QuantumCognitionHybridEvolutionaryOptimizerV19, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV19 import QuantumCognitionHybridEvolutionaryOptimizerV19 - lama_register["QuantumCognitionHybridEvolutionaryOptimizerV19"] = ( - QuantumCognitionHybridEvolutionaryOptimizerV19 - ) - LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19" - ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19", register=True) + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV19"] = QuantumCognitionHybridEvolutionaryOptimizerV19 + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19").set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19", register=True) except Exception as e: print("QuantumCognitionHybridEvolutionaryOptimizerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV20 import ( - QuantumCognitionHybridEvolutionaryOptimizerV20, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV20 import QuantumCognitionHybridEvolutionaryOptimizerV20 - lama_register["QuantumCognitionHybridEvolutionaryOptimizerV20"] = ( - QuantumCognitionHybridEvolutionaryOptimizerV20 - ) - LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20" - ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20", register=True) + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV20"] = QuantumCognitionHybridEvolutionaryOptimizerV20 + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20").set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20", register=True) except Exception as e: print("QuantumCognitionHybridEvolutionaryOptimizerV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV23 import ( - QuantumCognitionHybridOptimizerV23, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV23 import QuantumCognitionHybridOptimizerV23 lama_register["QuantumCognitionHybridOptimizerV23"] = QuantumCognitionHybridOptimizerV23 - LLAMAQuantumCognitionHybridOptimizerV23 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridOptimizerV23" - ).set_name("LLAMAQuantumCognitionHybridOptimizerV23", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridOptimizerV23 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV23").set_name("LLAMAQuantumCognitionHybridOptimizerV23", register=True) except Exception as e: print("QuantumCognitionHybridOptimizerV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV24 import ( - QuantumCognitionHybridOptimizerV24, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV24 import QuantumCognitionHybridOptimizerV24 lama_register["QuantumCognitionHybridOptimizerV24"] = QuantumCognitionHybridOptimizerV24 - LLAMAQuantumCognitionHybridOptimizerV24 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridOptimizerV24" - ).set_name("LLAMAQuantumCognitionHybridOptimizerV24", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridOptimizerV24 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV24").set_name("LLAMAQuantumCognitionHybridOptimizerV24", register=True) except Exception as e: print("QuantumCognitionHybridOptimizerV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV25 import ( - QuantumCognitionHybridOptimizerV25, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV25 import QuantumCognitionHybridOptimizerV25 lama_register["QuantumCognitionHybridOptimizerV25"] = QuantumCognitionHybridOptimizerV25 - LLAMAQuantumCognitionHybridOptimizerV25 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridOptimizerV25" - ).set_name("LLAMAQuantumCognitionHybridOptimizerV25", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridOptimizerV25 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV25").set_name("LLAMAQuantumCognitionHybridOptimizerV25", register=True) except Exception as e: print("QuantumCognitionHybridOptimizerV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV26 import ( - QuantumCognitionHybridOptimizerV26, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV26 import QuantumCognitionHybridOptimizerV26 lama_register["QuantumCognitionHybridOptimizerV26"] = QuantumCognitionHybridOptimizerV26 - LLAMAQuantumCognitionHybridOptimizerV26 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridOptimizerV26" - ).set_name("LLAMAQuantumCognitionHybridOptimizerV26", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridOptimizerV26 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV26").set_name("LLAMAQuantumCognitionHybridOptimizerV26", register=True) except Exception as e: print("QuantumCognitionHybridOptimizerV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV27 import ( - QuantumCognitionHybridOptimizerV27, - ) + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV27 import QuantumCognitionHybridOptimizerV27 lama_register["QuantumCognitionHybridOptimizerV27"] = QuantumCognitionHybridOptimizerV27 - LLAMAQuantumCognitionHybridOptimizerV27 = NonObjectOptimizer( - method="LLAMAQuantumCognitionHybridOptimizerV27" - ).set_name("LLAMAQuantumCognitionHybridOptimizerV27", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionHybridOptimizerV27 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV27").set_name("LLAMAQuantumCognitionHybridOptimizerV27", register=True) except Exception as e: print("QuantumCognitionHybridOptimizerV27 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumCognitionOptimizerV2 import QuantumCognitionOptimizerV2 lama_register["QuantumCognitionOptimizerV2"] = QuantumCognitionOptimizerV2 - LLAMAQuantumCognitionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2").set_name( - "LLAMAQuantumCognitionOptimizerV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2").set_name("LLAMAQuantumCognitionOptimizerV2", register=True) except Exception as e: print("QuantumCognitionOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitionTrajectoryOptimizerV28 import ( - QuantumCognitionTrajectoryOptimizerV28, - ) + from nevergrad.optimization.lama.QuantumCognitionTrajectoryOptimizerV28 import QuantumCognitionTrajectoryOptimizerV28 lama_register["QuantumCognitionTrajectoryOptimizerV28"] = QuantumCognitionTrajectoryOptimizerV28 - LLAMAQuantumCognitionTrajectoryOptimizerV28 = NonObjectOptimizer( - method="LLAMAQuantumCognitionTrajectoryOptimizerV28" - ).set_name("LLAMAQuantumCognitionTrajectoryOptimizerV28", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitionTrajectoryOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitionTrajectoryOptimizerV28 = NonObjectOptimizer(method="LLAMAQuantumCognitionTrajectoryOptimizerV28").set_name("LLAMAQuantumCognitionTrajectoryOptimizerV28", register=True) except Exception as e: print("QuantumCognitionTrajectoryOptimizerV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCognitiveAdaptiveOptimizer import ( - QuantumCognitiveAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.QuantumCognitiveAdaptiveOptimizer import QuantumCognitiveAdaptiveOptimizer lama_register["QuantumCognitiveAdaptiveOptimizer"] = QuantumCognitiveAdaptiveOptimizer - LLAMAQuantumCognitiveAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAQuantumCognitiveAdaptiveOptimizer" - ).set_name("LLAMAQuantumCognitiveAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCognitiveAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCognitiveAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumCognitiveAdaptiveOptimizer").set_name("LLAMAQuantumCognitiveAdaptiveOptimizer", register=True) except Exception as e: print("QuantumCognitiveAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumControlledDiversityStrategy import ( - QuantumControlledDiversityStrategy, - ) + from nevergrad.optimization.lama.QuantumControlledDiversityStrategy import QuantumControlledDiversityStrategy lama_register["QuantumControlledDiversityStrategy"] = QuantumControlledDiversityStrategy - LLAMAQuantumControlledDiversityStrategy = NonObjectOptimizer( - method="LLAMAQuantumControlledDiversityStrategy" - ).set_name("LLAMAQuantumControlledDiversityStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumControlledDiversityStrategy = NonObjectOptimizer(method="LLAMAQuantumControlledDiversityStrategy").set_name("LLAMAQuantumControlledDiversityStrategy", register=True) except Exception as e: print("QuantumControlledDiversityStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCooperativeCrossoverStrategy import ( - QuantumCooperativeCrossoverStrategy, - ) + from nevergrad.optimization.lama.QuantumCooperativeCrossoverStrategy import QuantumCooperativeCrossoverStrategy lama_register["QuantumCooperativeCrossoverStrategy"] = QuantumCooperativeCrossoverStrategy - LLAMAQuantumCooperativeCrossoverStrategy = NonObjectOptimizer( - method="LLAMAQuantumCooperativeCrossoverStrategy" - ).set_name("LLAMAQuantumCooperativeCrossoverStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumCooperativeCrossoverStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCooperativeCrossoverStrategy = NonObjectOptimizer(method="LLAMAQuantumCooperativeCrossoverStrategy").set_name("LLAMAQuantumCooperativeCrossoverStrategy", register=True) except Exception as e: print("QuantumCooperativeCrossoverStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolution import ( - QuantumCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolution import QuantumCovarianceMatrixDifferentialEvolution - lama_register["QuantumCovarianceMatrixDifferentialEvolution"] = ( - QuantumCovarianceMatrixDifferentialEvolution - ) - LLAMAQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolution", register=True) + lama_register["QuantumCovarianceMatrixDifferentialEvolution"] = QuantumCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("QuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( - QuantumCovarianceMatrixDifferentialEvolutionRefinedV2, - ) + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - lama_register["QuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( - QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - ) - LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( - method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" - ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) + lama_register["QuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2").set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) except Exception as e: print("QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch import ( - QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch - lama_register["QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch"] = ( - QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart import ( - QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart import QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart - lama_register["QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart"] = ( - QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart"] = QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch import ( - QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch - lama_register["QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch"] = ( - QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning import ( - QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning import QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning"] = ( - QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning"] = QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement import ( - QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement import QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement"] = ( - QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement"] = QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning import ( - QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning import QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning"] = ( - QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning - ) - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning" - ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning", register=True) + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning"] = QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning import ( - QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning import QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning - lama_register["QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning"] = ( - QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning - ) - LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning" - ).set_name( - "LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning", register=True - ) + lama_register["QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning"] = QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning", register=True) except Exception as e: - print( - "QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning can not be imported: ", e - ) - + print("QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning can not be imported: ", e) try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts import ( - QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts import QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts - lama_register["QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts"] = ( - QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts - ) - LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts" - ).set_name("LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts", register=True) + lama_register["QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts"] = QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch import ( - QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch import QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch - lama_register["QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch"] = ( - QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch - ) - LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch"] = QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicElitismAndRestarts import ( - QuantumDifferentialEvolutionWithDynamicElitismAndRestarts, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicElitismAndRestarts import QuantumDifferentialEvolutionWithDynamicElitismAndRestarts - lama_register["QuantumDifferentialEvolutionWithDynamicElitismAndRestarts"] = ( - QuantumDifferentialEvolutionWithDynamicElitismAndRestarts - ) - LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts" - ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts", register=True) + lama_register["QuantumDifferentialEvolutionWithDynamicElitismAndRestarts"] = QuantumDifferentialEvolutionWithDynamicElitismAndRestarts + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithDynamicElitismAndRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart import ( - QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart import QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart - lama_register["QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart"] = ( - QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart - ) - LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart" - ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart", register=True) + lama_register["QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart"] = QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEliteGuidance import ( - QuantumDifferentialEvolutionWithEliteGuidance, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEliteGuidance import QuantumDifferentialEvolutionWithEliteGuidance - lama_register["QuantumDifferentialEvolutionWithEliteGuidance"] = ( - QuantumDifferentialEvolutionWithEliteGuidance - ) - LLAMAQuantumDifferentialEvolutionWithEliteGuidance = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance" - ).set_name("LLAMAQuantumDifferentialEvolutionWithEliteGuidance", register=True) + lama_register["QuantumDifferentialEvolutionWithEliteGuidance"] = QuantumDifferentialEvolutionWithEliteGuidance + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithEliteGuidance = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance").set_name("LLAMAQuantumDifferentialEvolutionWithEliteGuidance", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithEliteGuidance can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitism import ( - QuantumDifferentialEvolutionWithElitism, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitism import QuantumDifferentialEvolutionWithElitism lama_register["QuantumDifferentialEvolutionWithElitism"] = QuantumDifferentialEvolutionWithElitism - LLAMAQuantumDifferentialEvolutionWithElitism = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithElitism" - ).set_name("LLAMAQuantumDifferentialEvolutionWithElitism", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithElitism = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitism").set_name("LLAMAQuantumDifferentialEvolutionWithElitism", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch import ( - QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch - lama_register["QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch"] = ( - QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch - ) - LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch import ( - QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch import QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch - lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch"] = ( - QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch - ) - LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch import ( - QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch import QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch - lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch"] = ( - QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch - ) - LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch" - ).set_name( - "LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch", register=True - ) + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch", register=True) except Exception as e: - print( - "QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch can not be imported: ", - e, - ) - + print("QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch can not be imported: ", e) try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch import ( - QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch import QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch - lama_register["QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch"] = ( - QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch - ) - LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts import ( - QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts import QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts - lama_register["QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts"] = ( - QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts - ) - LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts" - ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts", register=True) + lama_register["QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts"] = QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch import ( - QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch import QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch - lama_register["QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch"] = ( - QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch - ) - LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch" - ).set_name("LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch", register=True) + lama_register["QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch"] = QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithMultiStrategyLearning import ( - QuantumDifferentialEvolutionWithMultiStrategyLearning, - ) + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithMultiStrategyLearning import QuantumDifferentialEvolutionWithMultiStrategyLearning - lama_register["QuantumDifferentialEvolutionWithMultiStrategyLearning"] = ( - QuantumDifferentialEvolutionWithMultiStrategyLearning - ) - LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning = NonObjectOptimizer( - method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning" - ).set_name("LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning", register=True) + lama_register["QuantumDifferentialEvolutionWithMultiStrategyLearning"] = QuantumDifferentialEvolutionWithMultiStrategyLearning + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning").set_name("LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning", register=True) except Exception as e: print("QuantumDifferentialEvolutionWithMultiStrategyLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithAdaptiveRestarts import ( - QuantumDifferentialParticleOptimizerWithAdaptiveRestarts, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithAdaptiveRestarts import QuantumDifferentialParticleOptimizerWithAdaptiveRestarts - lama_register["QuantumDifferentialParticleOptimizerWithAdaptiveRestarts"] = ( - QuantumDifferentialParticleOptimizerWithAdaptiveRestarts - ) - LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts" - ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts", register=True) + lama_register["QuantumDifferentialParticleOptimizerWithAdaptiveRestarts"] = QuantumDifferentialParticleOptimizerWithAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts").set_name("LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts", register=True) except Exception as e: print("QuantumDifferentialParticleOptimizerWithAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteGuidedMutation import ( - QuantumDifferentialParticleOptimizerWithEliteGuidedMutation, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteGuidedMutation import QuantumDifferentialParticleOptimizerWithEliteGuidedMutation - lama_register["QuantumDifferentialParticleOptimizerWithEliteGuidedMutation"] = ( - QuantumDifferentialParticleOptimizerWithEliteGuidedMutation - ) - LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation" - ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation", register=True) + lama_register["QuantumDifferentialParticleOptimizerWithEliteGuidedMutation"] = QuantumDifferentialParticleOptimizerWithEliteGuidedMutation + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation", register=True) except Exception as e: print("QuantumDifferentialParticleOptimizerWithEliteGuidedMutation can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteRefinement import ( - QuantumDifferentialParticleOptimizerWithEliteRefinement, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteRefinement import QuantumDifferentialParticleOptimizerWithEliteRefinement - lama_register["QuantumDifferentialParticleOptimizerWithEliteRefinement"] = ( - QuantumDifferentialParticleOptimizerWithEliteRefinement - ) - LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement" - ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement", register=True) + lama_register["QuantumDifferentialParticleOptimizerWithEliteRefinement"] = QuantumDifferentialParticleOptimizerWithEliteRefinement + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement", register=True) except Exception as e: print("QuantumDifferentialParticleOptimizerWithEliteRefinement can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithElitism import ( - QuantumDifferentialParticleOptimizerWithElitism, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithElitism import QuantumDifferentialParticleOptimizerWithElitism - lama_register["QuantumDifferentialParticleOptimizerWithElitism"] = ( - QuantumDifferentialParticleOptimizerWithElitism - ) - LLAMAQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleOptimizerWithElitism" - ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithElitism", register=True) + lama_register["QuantumDifferentialParticleOptimizerWithElitism"] = QuantumDifferentialParticleOptimizerWithElitism + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithElitism").set_name("LLAMAQuantumDifferentialParticleOptimizerWithElitism", register=True) except Exception as e: print("QuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts import ( - QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts import QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts - lama_register["QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts"] = ( - QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts - ) - LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts" - ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts", register=True) + lama_register["QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts"] = QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts", register=True) except Exception as e: print("QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDifferentialParticleSwarmRefinement import ( - QuantumDifferentialParticleSwarmRefinement, - ) + from nevergrad.optimization.lama.QuantumDifferentialParticleSwarmRefinement import QuantumDifferentialParticleSwarmRefinement lama_register["QuantumDifferentialParticleSwarmRefinement"] = QuantumDifferentialParticleSwarmRefinement - LLAMAQuantumDifferentialParticleSwarmRefinement = NonObjectOptimizer( - method="LLAMAQuantumDifferentialParticleSwarmRefinement" - ).set_name("LLAMAQuantumDifferentialParticleSwarmRefinement", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleSwarmRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDifferentialParticleSwarmRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleSwarmRefinement").set_name("LLAMAQuantumDifferentialParticleSwarmRefinement", register=True) except Exception as e: print("QuantumDifferentialParticleSwarmRefinement can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalAcceleratorV19 import QuantumDirectionalAcceleratorV19 lama_register["QuantumDirectionalAcceleratorV19"] = QuantumDirectionalAcceleratorV19 - LLAMAQuantumDirectionalAcceleratorV19 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalAcceleratorV19" - ).set_name("LLAMAQuantumDirectionalAcceleratorV19", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalAcceleratorV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalAcceleratorV19 = NonObjectOptimizer(method="LLAMAQuantumDirectionalAcceleratorV19").set_name("LLAMAQuantumDirectionalAcceleratorV19", register=True) except Exception as e: print("QuantumDirectionalAcceleratorV19 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancer import QuantumDirectionalEnhancer lama_register["QuantumDirectionalEnhancer"] = QuantumDirectionalEnhancer - LLAMAQuantumDirectionalEnhancer = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer").set_name( - "LLAMAQuantumDirectionalEnhancer", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancer = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer").set_name("LLAMAQuantumDirectionalEnhancer", register=True) except Exception as e: print("QuantumDirectionalEnhancer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV10 import QuantumDirectionalEnhancerV10 lama_register["QuantumDirectionalEnhancerV10"] = QuantumDirectionalEnhancerV10 - LLAMAQuantumDirectionalEnhancerV10 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV10" - ).set_name("LLAMAQuantumDirectionalEnhancerV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV10 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV10").set_name("LLAMAQuantumDirectionalEnhancerV10", register=True) except Exception as e: print("QuantumDirectionalEnhancerV10 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV11 import QuantumDirectionalEnhancerV11 lama_register["QuantumDirectionalEnhancerV11"] = QuantumDirectionalEnhancerV11 - LLAMAQuantumDirectionalEnhancerV11 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV11" - ).set_name("LLAMAQuantumDirectionalEnhancerV11", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV11 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV11").set_name("LLAMAQuantumDirectionalEnhancerV11", register=True) except Exception as e: print("QuantumDirectionalEnhancerV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV12 import QuantumDirectionalEnhancerV12 lama_register["QuantumDirectionalEnhancerV12"] = QuantumDirectionalEnhancerV12 - LLAMAQuantumDirectionalEnhancerV12 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV12" - ).set_name("LLAMAQuantumDirectionalEnhancerV12", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV12 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV12").set_name("LLAMAQuantumDirectionalEnhancerV12", register=True) except Exception as e: print("QuantumDirectionalEnhancerV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV13 import QuantumDirectionalEnhancerV13 lama_register["QuantumDirectionalEnhancerV13"] = QuantumDirectionalEnhancerV13 - LLAMAQuantumDirectionalEnhancerV13 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV13" - ).set_name("LLAMAQuantumDirectionalEnhancerV13", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV13 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV13").set_name("LLAMAQuantumDirectionalEnhancerV13", register=True) except Exception as e: print("QuantumDirectionalEnhancerV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV14 import QuantumDirectionalEnhancerV14 lama_register["QuantumDirectionalEnhancerV14"] = QuantumDirectionalEnhancerV14 - LLAMAQuantumDirectionalEnhancerV14 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV14" - ).set_name("LLAMAQuantumDirectionalEnhancerV14", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV14 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV14").set_name("LLAMAQuantumDirectionalEnhancerV14", register=True) except Exception as e: print("QuantumDirectionalEnhancerV14 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV15 import QuantumDirectionalEnhancerV15 lama_register["QuantumDirectionalEnhancerV15"] = QuantumDirectionalEnhancerV15 - LLAMAQuantumDirectionalEnhancerV15 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV15" - ).set_name("LLAMAQuantumDirectionalEnhancerV15", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV15 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV15").set_name("LLAMAQuantumDirectionalEnhancerV15", register=True) except Exception as e: print("QuantumDirectionalEnhancerV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV16 import QuantumDirectionalEnhancerV16 lama_register["QuantumDirectionalEnhancerV16"] = QuantumDirectionalEnhancerV16 - LLAMAQuantumDirectionalEnhancerV16 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV16" - ).set_name("LLAMAQuantumDirectionalEnhancerV16", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV16 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV16").set_name("LLAMAQuantumDirectionalEnhancerV16", register=True) except Exception as e: print("QuantumDirectionalEnhancerV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV17 import QuantumDirectionalEnhancerV17 lama_register["QuantumDirectionalEnhancerV17"] = QuantumDirectionalEnhancerV17 - LLAMAQuantumDirectionalEnhancerV17 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV17" - ).set_name("LLAMAQuantumDirectionalEnhancerV17", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV17 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV17").set_name("LLAMAQuantumDirectionalEnhancerV17", register=True) except Exception as e: print("QuantumDirectionalEnhancerV17 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV18 import QuantumDirectionalEnhancerV18 lama_register["QuantumDirectionalEnhancerV18"] = QuantumDirectionalEnhancerV18 - LLAMAQuantumDirectionalEnhancerV18 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV18" - ).set_name("LLAMAQuantumDirectionalEnhancerV18", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV18 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV18").set_name("LLAMAQuantumDirectionalEnhancerV18", register=True) except Exception as e: print("QuantumDirectionalEnhancerV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV2 import QuantumDirectionalEnhancerV2 lama_register["QuantumDirectionalEnhancerV2"] = QuantumDirectionalEnhancerV2 - LLAMAQuantumDirectionalEnhancerV2 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV2" - ).set_name("LLAMAQuantumDirectionalEnhancerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV2 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV2").set_name("LLAMAQuantumDirectionalEnhancerV2", register=True) except Exception as e: print("QuantumDirectionalEnhancerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV3 import QuantumDirectionalEnhancerV3 lama_register["QuantumDirectionalEnhancerV3"] = QuantumDirectionalEnhancerV3 - LLAMAQuantumDirectionalEnhancerV3 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV3" - ).set_name("LLAMAQuantumDirectionalEnhancerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV3 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV3").set_name("LLAMAQuantumDirectionalEnhancerV3", register=True) except Exception as e: print("QuantumDirectionalEnhancerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV4 import QuantumDirectionalEnhancerV4 lama_register["QuantumDirectionalEnhancerV4"] = QuantumDirectionalEnhancerV4 - LLAMAQuantumDirectionalEnhancerV4 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV4" - ).set_name("LLAMAQuantumDirectionalEnhancerV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV4 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV4").set_name("LLAMAQuantumDirectionalEnhancerV4", register=True) except Exception as e: print("QuantumDirectionalEnhancerV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV5 import QuantumDirectionalEnhancerV5 lama_register["QuantumDirectionalEnhancerV5"] = QuantumDirectionalEnhancerV5 - LLAMAQuantumDirectionalEnhancerV5 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV5" - ).set_name("LLAMAQuantumDirectionalEnhancerV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV5 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV5").set_name("LLAMAQuantumDirectionalEnhancerV5", register=True) except Exception as e: print("QuantumDirectionalEnhancerV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV6 import QuantumDirectionalEnhancerV6 lama_register["QuantumDirectionalEnhancerV6"] = QuantumDirectionalEnhancerV6 - LLAMAQuantumDirectionalEnhancerV6 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV6" - ).set_name("LLAMAQuantumDirectionalEnhancerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV6 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV6").set_name("LLAMAQuantumDirectionalEnhancerV6", register=True) except Exception as e: print("QuantumDirectionalEnhancerV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV7 import QuantumDirectionalEnhancerV7 lama_register["QuantumDirectionalEnhancerV7"] = QuantumDirectionalEnhancerV7 - LLAMAQuantumDirectionalEnhancerV7 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV7" - ).set_name("LLAMAQuantumDirectionalEnhancerV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV7 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV7").set_name("LLAMAQuantumDirectionalEnhancerV7", register=True) except Exception as e: print("QuantumDirectionalEnhancerV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV8 import QuantumDirectionalEnhancerV8 lama_register["QuantumDirectionalEnhancerV8"] = QuantumDirectionalEnhancerV8 - LLAMAQuantumDirectionalEnhancerV8 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV8" - ).set_name("LLAMAQuantumDirectionalEnhancerV8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV8 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV8").set_name("LLAMAQuantumDirectionalEnhancerV8", register=True) except Exception as e: print("QuantumDirectionalEnhancerV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalEnhancerV9 import QuantumDirectionalEnhancerV9 lama_register["QuantumDirectionalEnhancerV9"] = QuantumDirectionalEnhancerV9 - LLAMAQuantumDirectionalEnhancerV9 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalEnhancerV9" - ).set_name("LLAMAQuantumDirectionalEnhancerV9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalEnhancerV9 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV9").set_name("LLAMAQuantumDirectionalEnhancerV9", register=True) except Exception as e: print("QuantumDirectionalEnhancerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizer import ( - QuantumDirectionalFusionOptimizer, - ) + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizer import QuantumDirectionalFusionOptimizer lama_register["QuantumDirectionalFusionOptimizer"] = QuantumDirectionalFusionOptimizer - LLAMAQuantumDirectionalFusionOptimizer = NonObjectOptimizer( - method="LLAMAQuantumDirectionalFusionOptimizer" - ).set_name("LLAMAQuantumDirectionalFusionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalFusionOptimizer = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizer").set_name("LLAMAQuantumDirectionalFusionOptimizer", register=True) except Exception as e: print("QuantumDirectionalFusionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizerV2 import ( - QuantumDirectionalFusionOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizerV2 import QuantumDirectionalFusionOptimizerV2 lama_register["QuantumDirectionalFusionOptimizerV2"] = QuantumDirectionalFusionOptimizerV2 - LLAMAQuantumDirectionalFusionOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalFusionOptimizerV2" - ).set_name("LLAMAQuantumDirectionalFusionOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalFusionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizerV2").set_name("LLAMAQuantumDirectionalFusionOptimizerV2", register=True) except Exception as e: print("QuantumDirectionalFusionOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV20 import QuantumDirectionalRefinerV20 lama_register["QuantumDirectionalRefinerV20"] = QuantumDirectionalRefinerV20 - LLAMAQuantumDirectionalRefinerV20 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV20" - ).set_name("LLAMAQuantumDirectionalRefinerV20", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV20 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV20").set_name("LLAMAQuantumDirectionalRefinerV20", register=True) except Exception as e: print("QuantumDirectionalRefinerV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV21 import QuantumDirectionalRefinerV21 lama_register["QuantumDirectionalRefinerV21"] = QuantumDirectionalRefinerV21 - LLAMAQuantumDirectionalRefinerV21 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV21" - ).set_name("LLAMAQuantumDirectionalRefinerV21", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV21 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV21").set_name("LLAMAQuantumDirectionalRefinerV21", register=True) except Exception as e: print("QuantumDirectionalRefinerV21 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV22 import QuantumDirectionalRefinerV22 lama_register["QuantumDirectionalRefinerV22"] = QuantumDirectionalRefinerV22 - LLAMAQuantumDirectionalRefinerV22 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV22" - ).set_name("LLAMAQuantumDirectionalRefinerV22", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV22 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV22").set_name("LLAMAQuantumDirectionalRefinerV22", register=True) except Exception as e: print("QuantumDirectionalRefinerV22 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV23 import QuantumDirectionalRefinerV23 lama_register["QuantumDirectionalRefinerV23"] = QuantumDirectionalRefinerV23 - LLAMAQuantumDirectionalRefinerV23 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV23" - ).set_name("LLAMAQuantumDirectionalRefinerV23", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV23 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV23").set_name("LLAMAQuantumDirectionalRefinerV23", register=True) except Exception as e: print("QuantumDirectionalRefinerV23 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV24 import QuantumDirectionalRefinerV24 lama_register["QuantumDirectionalRefinerV24"] = QuantumDirectionalRefinerV24 - LLAMAQuantumDirectionalRefinerV24 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV24" - ).set_name("LLAMAQuantumDirectionalRefinerV24", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV24 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV24").set_name("LLAMAQuantumDirectionalRefinerV24", register=True) except Exception as e: print("QuantumDirectionalRefinerV24 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV25 import QuantumDirectionalRefinerV25 lama_register["QuantumDirectionalRefinerV25"] = QuantumDirectionalRefinerV25 - LLAMAQuantumDirectionalRefinerV25 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV25" - ).set_name("LLAMAQuantumDirectionalRefinerV25", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV25 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV25").set_name("LLAMAQuantumDirectionalRefinerV25", register=True) except Exception as e: print("QuantumDirectionalRefinerV25 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV26 import QuantumDirectionalRefinerV26 lama_register["QuantumDirectionalRefinerV26"] = QuantumDirectionalRefinerV26 - LLAMAQuantumDirectionalRefinerV26 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV26" - ).set_name("LLAMAQuantumDirectionalRefinerV26", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV26 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV26").set_name("LLAMAQuantumDirectionalRefinerV26", register=True) except Exception as e: print("QuantumDirectionalRefinerV26 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV27 import QuantumDirectionalRefinerV27 lama_register["QuantumDirectionalRefinerV27"] = QuantumDirectionalRefinerV27 - LLAMAQuantumDirectionalRefinerV27 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV27" - ).set_name("LLAMAQuantumDirectionalRefinerV27", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV27 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV27").set_name("LLAMAQuantumDirectionalRefinerV27", register=True) except Exception as e: print("QuantumDirectionalRefinerV27 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV28 import QuantumDirectionalRefinerV28 lama_register["QuantumDirectionalRefinerV28"] = QuantumDirectionalRefinerV28 - LLAMAQuantumDirectionalRefinerV28 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV28" - ).set_name("LLAMAQuantumDirectionalRefinerV28", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV28 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV28").set_name("LLAMAQuantumDirectionalRefinerV28", register=True) except Exception as e: print("QuantumDirectionalRefinerV28 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV29 import QuantumDirectionalRefinerV29 lama_register["QuantumDirectionalRefinerV29"] = QuantumDirectionalRefinerV29 - LLAMAQuantumDirectionalRefinerV29 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV29" - ).set_name("LLAMAQuantumDirectionalRefinerV29", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV29 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV29").set_name("LLAMAQuantumDirectionalRefinerV29", register=True) except Exception as e: print("QuantumDirectionalRefinerV29 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV30 import QuantumDirectionalRefinerV30 lama_register["QuantumDirectionalRefinerV30"] = QuantumDirectionalRefinerV30 - LLAMAQuantumDirectionalRefinerV30 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV30" - ).set_name("LLAMAQuantumDirectionalRefinerV30", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV30 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV30").set_name("LLAMAQuantumDirectionalRefinerV30", register=True) except Exception as e: print("QuantumDirectionalRefinerV30 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV31 import QuantumDirectionalRefinerV31 lama_register["QuantumDirectionalRefinerV31"] = QuantumDirectionalRefinerV31 - LLAMAQuantumDirectionalRefinerV31 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV31" - ).set_name("LLAMAQuantumDirectionalRefinerV31", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV31 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV31").set_name("LLAMAQuantumDirectionalRefinerV31", register=True) except Exception as e: print("QuantumDirectionalRefinerV31 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV32 import QuantumDirectionalRefinerV32 lama_register["QuantumDirectionalRefinerV32"] = QuantumDirectionalRefinerV32 - LLAMAQuantumDirectionalRefinerV32 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV32" - ).set_name("LLAMAQuantumDirectionalRefinerV32", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV32 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV32").set_name("LLAMAQuantumDirectionalRefinerV32", register=True) except Exception as e: print("QuantumDirectionalRefinerV32 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDirectionalRefinerV33 import QuantumDirectionalRefinerV33 lama_register["QuantumDirectionalRefinerV33"] = QuantumDirectionalRefinerV33 - LLAMAQuantumDirectionalRefinerV33 = NonObjectOptimizer( - method="LLAMAQuantumDirectionalRefinerV33" - ).set_name("LLAMAQuantumDirectionalRefinerV33", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDirectionalRefinerV33 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV33").set_name("LLAMAQuantumDirectionalRefinerV33", register=True) except Exception as e: print("QuantumDirectionalRefinerV33 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDualStrategyAdaptiveDE import QuantumDualStrategyAdaptiveDE lama_register["QuantumDualStrategyAdaptiveDE"] = QuantumDualStrategyAdaptiveDE - LLAMAQuantumDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMAQuantumDualStrategyAdaptiveDE" - ).set_name("LLAMAQuantumDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumDualStrategyAdaptiveDE").set_name("LLAMAQuantumDualStrategyAdaptiveDE", register=True) except Exception as e: print("QuantumDualStrategyAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDynamicAdaptationStrategy import QuantumDynamicAdaptationStrategy lama_register["QuantumDynamicAdaptationStrategy"] = QuantumDynamicAdaptationStrategy - LLAMAQuantumDynamicAdaptationStrategy = NonObjectOptimizer( - method="LLAMAQuantumDynamicAdaptationStrategy" - ).set_name("LLAMAQuantumDynamicAdaptationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicAdaptationStrategy = NonObjectOptimizer(method="LLAMAQuantumDynamicAdaptationStrategy").set_name("LLAMAQuantumDynamicAdaptationStrategy", register=True) except Exception as e: print("QuantumDynamicAdaptationStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDynamicBalanceOptimizer import QuantumDynamicBalanceOptimizer lama_register["QuantumDynamicBalanceOptimizer"] = QuantumDynamicBalanceOptimizer - LLAMAQuantumDynamicBalanceOptimizer = NonObjectOptimizer( - method="LLAMAQuantumDynamicBalanceOptimizer" - ).set_name("LLAMAQuantumDynamicBalanceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAQuantumDynamicBalanceOptimizer").set_name("LLAMAQuantumDynamicBalanceOptimizer", register=True) except Exception as e: print("QuantumDynamicBalanceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDynamicBalancedOptimizerV7 import ( - QuantumDynamicBalancedOptimizerV7, - ) + from nevergrad.optimization.lama.QuantumDynamicBalancedOptimizerV7 import QuantumDynamicBalancedOptimizerV7 lama_register["QuantumDynamicBalancedOptimizerV7"] = QuantumDynamicBalancedOptimizerV7 - LLAMAQuantumDynamicBalancedOptimizerV7 = NonObjectOptimizer( - method="LLAMAQuantumDynamicBalancedOptimizerV7" - ).set_name("LLAMAQuantumDynamicBalancedOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalancedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicBalancedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumDynamicBalancedOptimizerV7").set_name("LLAMAQuantumDynamicBalancedOptimizerV7", register=True) except Exception as e: print("QuantumDynamicBalancedOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDynamicExplorationOptimizerV6 import ( - QuantumDynamicExplorationOptimizerV6, - ) + from nevergrad.optimization.lama.QuantumDynamicExplorationOptimizerV6 import QuantumDynamicExplorationOptimizerV6 lama_register["QuantumDynamicExplorationOptimizerV6"] = QuantumDynamicExplorationOptimizerV6 - LLAMAQuantumDynamicExplorationOptimizerV6 = NonObjectOptimizer( - method="LLAMAQuantumDynamicExplorationOptimizerV6" - ).set_name("LLAMAQuantumDynamicExplorationOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicExplorationOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicExplorationOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumDynamicExplorationOptimizerV6").set_name("LLAMAQuantumDynamicExplorationOptimizerV6", register=True) except Exception as e: print("QuantumDynamicExplorationOptimizerV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDynamicGradientClimberV2 import QuantumDynamicGradientClimberV2 lama_register["QuantumDynamicGradientClimberV2"] = QuantumDynamicGradientClimberV2 - LLAMAQuantumDynamicGradientClimberV2 = NonObjectOptimizer( - method="LLAMAQuantumDynamicGradientClimberV2" - ).set_name("LLAMAQuantumDynamicGradientClimberV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicGradientClimberV2 = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV2").set_name("LLAMAQuantumDynamicGradientClimberV2", register=True) except Exception as e: print("QuantumDynamicGradientClimberV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumDynamicGradientClimberV3 import QuantumDynamicGradientClimberV3 lama_register["QuantumDynamicGradientClimberV3"] = QuantumDynamicGradientClimberV3 - LLAMAQuantumDynamicGradientClimberV3 = NonObjectOptimizer( - method="LLAMAQuantumDynamicGradientClimberV3" - ).set_name("LLAMAQuantumDynamicGradientClimberV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicGradientClimberV3 = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV3").set_name("LLAMAQuantumDynamicGradientClimberV3", register=True) except Exception as e: print("QuantumDynamicGradientClimberV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumDynamicallyAdaptiveFireworksAlgorithm import ( - QuantumDynamicallyAdaptiveFireworksAlgorithm, - ) + from nevergrad.optimization.lama.QuantumDynamicallyAdaptiveFireworksAlgorithm import QuantumDynamicallyAdaptiveFireworksAlgorithm - lama_register["QuantumDynamicallyAdaptiveFireworksAlgorithm"] = ( - QuantumDynamicallyAdaptiveFireworksAlgorithm - ) - LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm" - ).set_name("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm", register=True) + lama_register["QuantumDynamicallyAdaptiveFireworksAlgorithm"] = QuantumDynamicallyAdaptiveFireworksAlgorithm + res = NonObjectOptimizer(method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm").set_name("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm", register=True) except Exception as e: print("QuantumDynamicallyAdaptiveFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEliteMemeticAdaptiveSearch import ( - QuantumEliteMemeticAdaptiveSearch, - ) + from nevergrad.optimization.lama.QuantumEliteMemeticAdaptiveSearch import QuantumEliteMemeticAdaptiveSearch lama_register["QuantumEliteMemeticAdaptiveSearch"] = QuantumEliteMemeticAdaptiveSearch - LLAMAQuantumEliteMemeticAdaptiveSearch = NonObjectOptimizer( - method="LLAMAQuantumEliteMemeticAdaptiveSearch" - ).set_name("LLAMAQuantumEliteMemeticAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEliteMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEliteMemeticAdaptiveSearch = NonObjectOptimizer(method="LLAMAQuantumEliteMemeticAdaptiveSearch").set_name("LLAMAQuantumEliteMemeticAdaptiveSearch", register=True) except Exception as e: print("QuantumEliteMemeticAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v4 import ( - QuantumEnhancedAdaptiveDifferentialEvolution_v4, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v4 import QuantumEnhancedAdaptiveDifferentialEvolution_v4 - lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v4"] = ( - QuantumEnhancedAdaptiveDifferentialEvolution_v4 - ) - LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4" - ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4", register=True) + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v4"] = QuantumEnhancedAdaptiveDifferentialEvolution_v4 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4").set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4", register=True) except Exception as e: print("QuantumEnhancedAdaptiveDifferentialEvolution_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v5 import ( - QuantumEnhancedAdaptiveDifferentialEvolution_v5, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v5 import QuantumEnhancedAdaptiveDifferentialEvolution_v5 - lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v5"] = ( - QuantumEnhancedAdaptiveDifferentialEvolution_v5 - ) - LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5" - ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5", register=True) + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v5"] = QuantumEnhancedAdaptiveDifferentialEvolution_v5 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5").set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5", register=True) except Exception as e: print("QuantumEnhancedAdaptiveDifferentialEvolution_v5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDiversityStrategyV6 import ( - QuantumEnhancedAdaptiveDiversityStrategyV6, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDiversityStrategyV6 import QuantumEnhancedAdaptiveDiversityStrategyV6 lama_register["QuantumEnhancedAdaptiveDiversityStrategyV6"] = QuantumEnhancedAdaptiveDiversityStrategyV6 - LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6" - ).set_name("LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6").set_name("LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6", register=True) except Exception as e: print("QuantumEnhancedAdaptiveDiversityStrategyV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDualStrategyDE import ( - QuantumEnhancedAdaptiveDualStrategyDE, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDualStrategyDE import QuantumEnhancedAdaptiveDualStrategyDE lama_register["QuantumEnhancedAdaptiveDualStrategyDE"] = QuantumEnhancedAdaptiveDualStrategyDE - LLAMAQuantumEnhancedAdaptiveDualStrategyDE = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE" - ).set_name("LLAMAQuantumEnhancedAdaptiveDualStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveDualStrategyDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE").set_name("LLAMAQuantumEnhancedAdaptiveDualStrategyDE", register=True) except Exception as e: print("QuantumEnhancedAdaptiveDualStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveExplorationOptimization import ( - QuantumEnhancedAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveExplorationOptimization import QuantumEnhancedAdaptiveExplorationOptimization - lama_register["QuantumEnhancedAdaptiveExplorationOptimization"] = ( - QuantumEnhancedAdaptiveExplorationOptimization - ) - LLAMAQuantumEnhancedAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization" - ).set_name("LLAMAQuantumEnhancedAdaptiveExplorationOptimization", register=True) + lama_register["QuantumEnhancedAdaptiveExplorationOptimization"] = QuantumEnhancedAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization").set_name("LLAMAQuantumEnhancedAdaptiveExplorationOptimization", register=True) except Exception as e: print("QuantumEnhancedAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE import ( - QuantumEnhancedAdaptiveMultiPhaseDE, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE import QuantumEnhancedAdaptiveMultiPhaseDE lama_register["QuantumEnhancedAdaptiveMultiPhaseDE"] = QuantumEnhancedAdaptiveMultiPhaseDE - LLAMAQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE" - ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE").set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE", register=True) except Exception as e: print("QuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE_v7 import ( - QuantumEnhancedAdaptiveMultiPhaseDE_v7, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE_v7 import QuantumEnhancedAdaptiveMultiPhaseDE_v7 lama_register["QuantumEnhancedAdaptiveMultiPhaseDE_v7"] = QuantumEnhancedAdaptiveMultiPhaseDE_v7 - LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7" - ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7").set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7", register=True) except Exception as e: print("QuantumEnhancedAdaptiveMultiPhaseDE_v7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedAdaptiveOptimizer import QuantumEnhancedAdaptiveOptimizer lama_register["QuantumEnhancedAdaptiveOptimizer"] = QuantumEnhancedAdaptiveOptimizer - LLAMAQuantumEnhancedAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveOptimizer" - ).set_name("LLAMAQuantumEnhancedAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveOptimizer").set_name("LLAMAQuantumEnhancedAdaptiveOptimizer", register=True) except Exception as e: print("QuantumEnhancedAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveSwarmOptimization import ( - QuantumEnhancedAdaptiveSwarmOptimization, - ) + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveSwarmOptimization import QuantumEnhancedAdaptiveSwarmOptimization lama_register["QuantumEnhancedAdaptiveSwarmOptimization"] = QuantumEnhancedAdaptiveSwarmOptimization - LLAMAQuantumEnhancedAdaptiveSwarmOptimization = NonObjectOptimizer( - method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization" - ).set_name("LLAMAQuantumEnhancedAdaptiveSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedAdaptiveSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization").set_name("LLAMAQuantumEnhancedAdaptiveSwarmOptimization", register=True) except Exception as e: print("QuantumEnhancedAdaptiveSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolution import ( - QuantumEnhancedDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolution import QuantumEnhancedDifferentialEvolution lama_register["QuantumEnhancedDifferentialEvolution"] = QuantumEnhancedDifferentialEvolution - LLAMAQuantumEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDifferentialEvolution" - ).set_name("LLAMAQuantumEnhancedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolution").set_name("LLAMAQuantumEnhancedDifferentialEvolution", register=True) except Exception as e: print("QuantumEnhancedDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart import ( - QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart, - ) + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart import QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart - lama_register["QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart"] = ( - QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart - ) - LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart" - ).set_name("LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart", register=True) + lama_register["QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart"] = QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart").set_name("LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart", register=True) except Exception as e: print("QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDiversityExplorerV8 import ( - QuantumEnhancedDiversityExplorerV8, - ) + from nevergrad.optimization.lama.QuantumEnhancedDiversityExplorerV8 import QuantumEnhancedDiversityExplorerV8 lama_register["QuantumEnhancedDiversityExplorerV8"] = QuantumEnhancedDiversityExplorerV8 - LLAMAQuantumEnhancedDiversityExplorerV8 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDiversityExplorerV8" - ).set_name("LLAMAQuantumEnhancedDiversityExplorerV8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDiversityExplorerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDiversityExplorerV8 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDiversityExplorerV8").set_name("LLAMAQuantumEnhancedDiversityExplorerV8", register=True) except Exception as e: print("QuantumEnhancedDiversityExplorerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO import ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO import QuantumEnhancedDynamicAdaptiveHybridDEPSO lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO" - ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO", register=True) except Exception as e: print("QuantumEnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 import ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2"] = ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 - ) - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2" - ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2", register=True) + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2", register=True) except Exception as e: print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 import ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3"] = ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 - ) - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3" - ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3", register=True) + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3", register=True) except Exception as e: print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 import ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4"] = ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 - ) - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4" - ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4", register=True) + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4", register=True) except Exception as e: print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 import ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5"] = ( - QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 - ) - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5" - ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5", register=True) + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5", register=True) except Exception as e: print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution import ( - QuantumEnhancedDynamicDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution import QuantumEnhancedDynamicDifferentialEvolution lama_register["QuantumEnhancedDynamicDifferentialEvolution"] = QuantumEnhancedDynamicDifferentialEvolution - LLAMAQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicDifferentialEvolution" - ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution", register=True) except Exception as e: print("QuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v2 import ( - QuantumEnhancedDynamicDifferentialEvolution_v2, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v2 import QuantumEnhancedDynamicDifferentialEvolution_v2 - lama_register["QuantumEnhancedDynamicDifferentialEvolution_v2"] = ( - QuantumEnhancedDynamicDifferentialEvolution_v2 - ) - LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2" - ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2", register=True) + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v2"] = QuantumEnhancedDynamicDifferentialEvolution_v2 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2", register=True) except Exception as e: print("QuantumEnhancedDynamicDifferentialEvolution_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v3 import ( - QuantumEnhancedDynamicDifferentialEvolution_v3, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v3 import QuantumEnhancedDynamicDifferentialEvolution_v3 - lama_register["QuantumEnhancedDynamicDifferentialEvolution_v3"] = ( - QuantumEnhancedDynamicDifferentialEvolution_v3 - ) - LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3" - ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3", register=True) + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v3"] = QuantumEnhancedDynamicDifferentialEvolution_v3 + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3", register=True) except Exception as e: print("QuantumEnhancedDynamicDifferentialEvolution_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicHybridSearchV9 import ( - QuantumEnhancedDynamicHybridSearchV9, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicHybridSearchV9 import QuantumEnhancedDynamicHybridSearchV9 lama_register["QuantumEnhancedDynamicHybridSearchV9"] = QuantumEnhancedDynamicHybridSearchV9 - LLAMAQuantumEnhancedDynamicHybridSearchV9 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicHybridSearchV9" - ).set_name("LLAMAQuantumEnhancedDynamicHybridSearchV9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicHybridSearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicHybridSearchV9 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicHybridSearchV9").set_name("LLAMAQuantumEnhancedDynamicHybridSearchV9", register=True) except Exception as e: print("QuantumEnhancedDynamicHybridSearchV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE import ( - QuantumEnhancedDynamicMultiStrategyDE, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE import QuantumEnhancedDynamicMultiStrategyDE lama_register["QuantumEnhancedDynamicMultiStrategyDE"] = QuantumEnhancedDynamicMultiStrategyDE - LLAMAQuantumEnhancedDynamicMultiStrategyDE = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicMultiStrategyDE" - ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicMultiStrategyDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE").set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE", register=True) except Exception as e: print("QuantumEnhancedDynamicMultiStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE_v2 import ( - QuantumEnhancedDynamicMultiStrategyDE_v2, - ) + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE_v2 import QuantumEnhancedDynamicMultiStrategyDE_v2 lama_register["QuantumEnhancedDynamicMultiStrategyDE_v2"] = QuantumEnhancedDynamicMultiStrategyDE_v2 - LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2" - ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2").set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2", register=True) except Exception as e: print("QuantumEnhancedDynamicMultiStrategyDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedGlobalTacticalOptimizer import ( - QuantumEnhancedGlobalTacticalOptimizer, - ) + from nevergrad.optimization.lama.QuantumEnhancedGlobalTacticalOptimizer import QuantumEnhancedGlobalTacticalOptimizer lama_register["QuantumEnhancedGlobalTacticalOptimizer"] = QuantumEnhancedGlobalTacticalOptimizer - LLAMAQuantumEnhancedGlobalTacticalOptimizer = NonObjectOptimizer( - method="LLAMAQuantumEnhancedGlobalTacticalOptimizer" - ).set_name("LLAMAQuantumEnhancedGlobalTacticalOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGlobalTacticalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedGlobalTacticalOptimizer = NonObjectOptimizer(method="LLAMAQuantumEnhancedGlobalTacticalOptimizer").set_name("LLAMAQuantumEnhancedGlobalTacticalOptimizer", register=True) except Exception as e: print("QuantumEnhancedGlobalTacticalOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedGradientClimber import QuantumEnhancedGradientClimber lama_register["QuantumEnhancedGradientClimber"] = QuantumEnhancedGradientClimber - LLAMAQuantumEnhancedGradientClimber = NonObjectOptimizer( - method="LLAMAQuantumEnhancedGradientClimber" - ).set_name("LLAMAQuantumEnhancedGradientClimber", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGradientClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedGradientClimber = NonObjectOptimizer(method="LLAMAQuantumEnhancedGradientClimber").set_name("LLAMAQuantumEnhancedGradientClimber", register=True) except Exception as e: print("QuantumEnhancedGradientClimber can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedHybridDEPSO import QuantumEnhancedHybridDEPSO lama_register["QuantumEnhancedHybridDEPSO"] = QuantumEnhancedHybridDEPSO - LLAMAQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO").set_name( - "LLAMAQuantumEnhancedHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO").set_name("LLAMAQuantumEnhancedHybridDEPSO", register=True) except Exception as e: print("QuantumEnhancedHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedMemeticAdaptiveSearch import ( - QuantumEnhancedMemeticAdaptiveSearch, - ) + from nevergrad.optimization.lama.QuantumEnhancedMemeticAdaptiveSearch import QuantumEnhancedMemeticAdaptiveSearch lama_register["QuantumEnhancedMemeticAdaptiveSearch"] = QuantumEnhancedMemeticAdaptiveSearch - LLAMAQuantumEnhancedMemeticAdaptiveSearch = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMemeticAdaptiveSearch" - ).set_name("LLAMAQuantumEnhancedMemeticAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMemeticAdaptiveSearch = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticAdaptiveSearch").set_name("LLAMAQuantumEnhancedMemeticAdaptiveSearch", register=True) except Exception as e: print("QuantumEnhancedMemeticAdaptiveSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMemeticSearch import QuantumEnhancedMemeticSearch lama_register["QuantumEnhancedMemeticSearch"] = QuantumEnhancedMemeticSearch - LLAMAQuantumEnhancedMemeticSearch = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMemeticSearch" - ).set_name("LLAMAQuantumEnhancedMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticSearch").set_name("LLAMAQuantumEnhancedMemeticSearch", register=True) except Exception as e: print("QuantumEnhancedMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v8 import ( - QuantumEnhancedMultiPhaseAdaptiveDE_v8, - ) + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v8 import QuantumEnhancedMultiPhaseAdaptiveDE_v8 lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v8"] = QuantumEnhancedMultiPhaseAdaptiveDE_v8 - LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8" - ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8").set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseAdaptiveDE_v8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v9 import ( - QuantumEnhancedMultiPhaseAdaptiveDE_v9, - ) + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v9 import QuantumEnhancedMultiPhaseAdaptiveDE_v9 lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v9"] = QuantumEnhancedMultiPhaseAdaptiveDE_v9 - LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9" - ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9").set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseAdaptiveDE_v9 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE import QuantumEnhancedMultiPhaseDE lama_register["QuantumEnhancedMultiPhaseDE"] = QuantumEnhancedMultiPhaseDE - LLAMAQuantumEnhancedMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE").set_name( - "LLAMAQuantumEnhancedMultiPhaseDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE").set_name("LLAMAQuantumEnhancedMultiPhaseDE", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseDE can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v2 import QuantumEnhancedMultiPhaseDE_v2 lama_register["QuantumEnhancedMultiPhaseDE_v2"] = QuantumEnhancedMultiPhaseDE_v2 - LLAMAQuantumEnhancedMultiPhaseDE_v2 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseDE_v2" - ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseDE_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v2").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v2", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v3 import QuantumEnhancedMultiPhaseDE_v3 lama_register["QuantumEnhancedMultiPhaseDE_v3"] = QuantumEnhancedMultiPhaseDE_v3 - LLAMAQuantumEnhancedMultiPhaseDE_v3 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseDE_v3" - ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseDE_v3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v3").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v3", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v4 import QuantumEnhancedMultiPhaseDE_v4 lama_register["QuantumEnhancedMultiPhaseDE_v4"] = QuantumEnhancedMultiPhaseDE_v4 - LLAMAQuantumEnhancedMultiPhaseDE_v4 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseDE_v4" - ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseDE_v4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v4").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v4", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseDE_v4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v5 import QuantumEnhancedMultiPhaseDE_v5 lama_register["QuantumEnhancedMultiPhaseDE_v5"] = QuantumEnhancedMultiPhaseDE_v5 - LLAMAQuantumEnhancedMultiPhaseDE_v5 = NonObjectOptimizer( - method="LLAMAQuantumEnhancedMultiPhaseDE_v5" - ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedMultiPhaseDE_v5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v5").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v5", register=True) except Exception as e: print("QuantumEnhancedMultiPhaseDE_v5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEnhancedRefinedAdaptiveExplorationOptimization import ( - QuantumEnhancedRefinedAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.QuantumEnhancedRefinedAdaptiveExplorationOptimization import QuantumEnhancedRefinedAdaptiveExplorationOptimization - lama_register["QuantumEnhancedRefinedAdaptiveExplorationOptimization"] = ( - QuantumEnhancedRefinedAdaptiveExplorationOptimization - ) - LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization" - ).set_name("LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization", register=True) + lama_register["QuantumEnhancedRefinedAdaptiveExplorationOptimization"] = QuantumEnhancedRefinedAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization").set_name("LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization", register=True) except Exception as e: print("QuantumEnhancedRefinedAdaptiveExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEntropyEnhancedDE import QuantumEntropyEnhancedDE lama_register["QuantumEntropyEnhancedDE"] = QuantumEntropyEnhancedDE - LLAMAQuantumEntropyEnhancedDE = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE").set_name( - "LLAMAQuantumEntropyEnhancedDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEntropyEnhancedDE = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE").set_name("LLAMAQuantumEntropyEnhancedDE", register=True) except Exception as e: print("QuantumEntropyEnhancedDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolutionaryAdaptiveOptimizer import ( - QuantumEvolutionaryAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.QuantumEvolutionaryAdaptiveOptimizer import QuantumEvolutionaryAdaptiveOptimizer lama_register["QuantumEvolutionaryAdaptiveOptimizer"] = QuantumEvolutionaryAdaptiveOptimizer - LLAMAQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAQuantumEvolutionaryAdaptiveOptimizer" - ).set_name("LLAMAQuantumEvolutionaryAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryAdaptiveOptimizer").set_name("LLAMAQuantumEvolutionaryAdaptiveOptimizer", register=True) except Exception as e: print("QuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategy import ( - QuantumEvolutionaryConvergenceStrategy, - ) + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategy import QuantumEvolutionaryConvergenceStrategy lama_register["QuantumEvolutionaryConvergenceStrategy"] = QuantumEvolutionaryConvergenceStrategy - LLAMAQuantumEvolutionaryConvergenceStrategy = NonObjectOptimizer( - method="LLAMAQuantumEvolutionaryConvergenceStrategy" - ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolutionaryConvergenceStrategy = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategy").set_name("LLAMAQuantumEvolutionaryConvergenceStrategy", register=True) except Exception as e: print("QuantumEvolutionaryConvergenceStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategyV2 import ( - QuantumEvolutionaryConvergenceStrategyV2, - ) + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategyV2 import QuantumEvolutionaryConvergenceStrategyV2 lama_register["QuantumEvolutionaryConvergenceStrategyV2"] = QuantumEvolutionaryConvergenceStrategyV2 - LLAMAQuantumEvolutionaryConvergenceStrategyV2 = NonObjectOptimizer( - method="LLAMAQuantumEvolutionaryConvergenceStrategyV2" - ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolutionaryConvergenceStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategyV2").set_name("LLAMAQuantumEvolutionaryConvergenceStrategyV2", register=True) except Exception as e: print("QuantumEvolutionaryConvergenceStrategyV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumEvolutionaryOptimization import QuantumEvolutionaryOptimization lama_register["QuantumEvolutionaryOptimization"] = QuantumEvolutionaryOptimization - LLAMAQuantumEvolutionaryOptimization = NonObjectOptimizer( - method="LLAMAQuantumEvolutionaryOptimization" - ).set_name("LLAMAQuantumEvolutionaryOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolutionaryOptimization = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryOptimization").set_name("LLAMAQuantumEvolutionaryOptimization", register=True) except Exception as e: print("QuantumEvolutionaryOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV10 import ( - QuantumEvolvedDiversityExplorerV10, - ) + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV10 import QuantumEvolvedDiversityExplorerV10 lama_register["QuantumEvolvedDiversityExplorerV10"] = QuantumEvolvedDiversityExplorerV10 - LLAMAQuantumEvolvedDiversityExplorerV10 = NonObjectOptimizer( - method="LLAMAQuantumEvolvedDiversityExplorerV10" - ).set_name("LLAMAQuantumEvolvedDiversityExplorerV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolvedDiversityExplorerV10 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV10").set_name("LLAMAQuantumEvolvedDiversityExplorerV10", register=True) except Exception as e: print("QuantumEvolvedDiversityExplorerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV11 import ( - QuantumEvolvedDiversityExplorerV11, - ) + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV11 import QuantumEvolvedDiversityExplorerV11 lama_register["QuantumEvolvedDiversityExplorerV11"] = QuantumEvolvedDiversityExplorerV11 - LLAMAQuantumEvolvedDiversityExplorerV11 = NonObjectOptimizer( - method="LLAMAQuantumEvolvedDiversityExplorerV11" - ).set_name("LLAMAQuantumEvolvedDiversityExplorerV11", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolvedDiversityExplorerV11 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV11").set_name("LLAMAQuantumEvolvedDiversityExplorerV11", register=True) except Exception as e: print("QuantumEvolvedDiversityExplorerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV12 import ( - QuantumEvolvedDiversityExplorerV12, - ) + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV12 import QuantumEvolvedDiversityExplorerV12 lama_register["QuantumEvolvedDiversityExplorerV12"] = QuantumEvolvedDiversityExplorerV12 - LLAMAQuantumEvolvedDiversityExplorerV12 = NonObjectOptimizer( - method="LLAMAQuantumEvolvedDiversityExplorerV12" - ).set_name("LLAMAQuantumEvolvedDiversityExplorerV12", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolvedDiversityExplorerV12 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV12").set_name("LLAMAQuantumEvolvedDiversityExplorerV12", register=True) except Exception as e: print("QuantumEvolvedDiversityExplorerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV9 import ( - QuantumEvolvedDiversityExplorerV9, - ) + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV9 import QuantumEvolvedDiversityExplorerV9 lama_register["QuantumEvolvedDiversityExplorerV9"] = QuantumEvolvedDiversityExplorerV9 - LLAMAQuantumEvolvedDiversityExplorerV9 = NonObjectOptimizer( - method="LLAMAQuantumEvolvedDiversityExplorerV9" - ).set_name("LLAMAQuantumEvolvedDiversityExplorerV9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumEvolvedDiversityExplorerV9 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV9").set_name("LLAMAQuantumEvolvedDiversityExplorerV9", register=True) except Exception as e: print("QuantumEvolvedDiversityExplorerV9 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumFeedbackEvolutionStrategy import QuantumFeedbackEvolutionStrategy lama_register["QuantumFeedbackEvolutionStrategy"] = QuantumFeedbackEvolutionStrategy - LLAMAQuantumFeedbackEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumFeedbackEvolutionStrategy" - ).set_name("LLAMAQuantumFeedbackEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumFeedbackEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumFeedbackEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumFeedbackEvolutionStrategy").set_name("LLAMAQuantumFeedbackEvolutionStrategy", register=True) except Exception as e: print("QuantumFeedbackEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumFireworksAlgorithm import QuantumFireworksAlgorithm lama_register["QuantumFireworksAlgorithm"] = QuantumFireworksAlgorithm - LLAMAQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm").set_name( - "LLAMAQuantumFireworksAlgorithm", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm").set_name("LLAMAQuantumFireworksAlgorithm", register=True) except Exception as e: print("QuantumFireworksAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumFluxDifferentialSwarm import QuantumFluxDifferentialSwarm lama_register["QuantumFluxDifferentialSwarm"] = QuantumFluxDifferentialSwarm - LLAMAQuantumFluxDifferentialSwarm = NonObjectOptimizer( - method="LLAMAQuantumFluxDifferentialSwarm" - ).set_name("LLAMAQuantumFluxDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumFluxDifferentialSwarm").set_name("LLAMAQuantumFluxDifferentialSwarm", register=True) except Exception as e: print("QuantumFluxDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGeneticDifferentialEvolution import ( - QuantumGeneticDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumGeneticDifferentialEvolution import QuantumGeneticDifferentialEvolution lama_register["QuantumGeneticDifferentialEvolution"] = QuantumGeneticDifferentialEvolution - LLAMAQuantumGeneticDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumGeneticDifferentialEvolution" - ).set_name("LLAMAQuantumGeneticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGeneticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGeneticDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumGeneticDifferentialEvolution").set_name("LLAMAQuantumGeneticDifferentialEvolution", register=True) except Exception as e: print("QuantumGeneticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimization import ( - QuantumGradientAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimization import QuantumGradientAdaptiveExplorationOptimization - lama_register["QuantumGradientAdaptiveExplorationOptimization"] = ( - QuantumGradientAdaptiveExplorationOptimization - ) - LLAMAQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationOptimization" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimization", register=True) + lama_register["QuantumGradientAdaptiveExplorationOptimization"] = QuantumGradientAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimization", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV2 import ( - QuantumGradientAdaptiveExplorationOptimizationV2, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV2 import QuantumGradientAdaptiveExplorationOptimizationV2 - lama_register["QuantumGradientAdaptiveExplorationOptimizationV2"] = ( - QuantumGradientAdaptiveExplorationOptimizationV2 - ) - LLAMAQuantumGradientAdaptiveExplorationOptimizationV2 = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV2", register=True) + lama_register["QuantumGradientAdaptiveExplorationOptimizationV2"] = QuantumGradientAdaptiveExplorationOptimizationV2 + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV2", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV3 import ( - QuantumGradientAdaptiveExplorationOptimizationV3, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV3 import QuantumGradientAdaptiveExplorationOptimizationV3 - lama_register["QuantumGradientAdaptiveExplorationOptimizationV3"] = ( - QuantumGradientAdaptiveExplorationOptimizationV3 - ) - LLAMAQuantumGradientAdaptiveExplorationOptimizationV3 = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV3", register=True) + lama_register["QuantumGradientAdaptiveExplorationOptimizationV3"] = QuantumGradientAdaptiveExplorationOptimizationV3 + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV3", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV4 import ( - QuantumGradientAdaptiveExplorationOptimizationV4, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV4 import QuantumGradientAdaptiveExplorationOptimizationV4 - lama_register["QuantumGradientAdaptiveExplorationOptimizationV4"] = ( - QuantumGradientAdaptiveExplorationOptimizationV4 - ) - LLAMAQuantumGradientAdaptiveExplorationOptimizationV4 = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV4", register=True) + lama_register["QuantumGradientAdaptiveExplorationOptimizationV4"] = QuantumGradientAdaptiveExplorationOptimizationV4 + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV4 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV4", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV5 import ( - QuantumGradientAdaptiveExplorationOptimizationV5, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV5 import QuantumGradientAdaptiveExplorationOptimizationV5 - lama_register["QuantumGradientAdaptiveExplorationOptimizationV5"] = ( - QuantumGradientAdaptiveExplorationOptimizationV5 - ) - LLAMAQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV5", register=True) + lama_register["QuantumGradientAdaptiveExplorationOptimizationV5"] = QuantumGradientAdaptiveExplorationOptimizationV5 + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV5", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationRefinedOptimization import ( - QuantumGradientAdaptiveExplorationRefinedOptimization, - ) + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationRefinedOptimization import QuantumGradientAdaptiveExplorationRefinedOptimization - lama_register["QuantumGradientAdaptiveExplorationRefinedOptimization"] = ( - QuantumGradientAdaptiveExplorationRefinedOptimization - ) - LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization = NonObjectOptimizer( - method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization" - ).set_name("LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization", register=True) + lama_register["QuantumGradientAdaptiveExplorationRefinedOptimization"] = QuantumGradientAdaptiveExplorationRefinedOptimization + res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization").set_name("LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization", register=True) except Exception as e: print("QuantumGradientAdaptiveExplorationRefinedOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientBalancedOptimizerV6 import ( - QuantumGradientBalancedOptimizerV6, - ) + from nevergrad.optimization.lama.QuantumGradientBalancedOptimizerV6 import QuantumGradientBalancedOptimizerV6 lama_register["QuantumGradientBalancedOptimizerV6"] = QuantumGradientBalancedOptimizerV6 - LLAMAQuantumGradientBalancedOptimizerV6 = NonObjectOptimizer( - method="LLAMAQuantumGradientBalancedOptimizerV6" - ).set_name("LLAMAQuantumGradientBalancedOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientBalancedOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientBalancedOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumGradientBalancedOptimizerV6").set_name("LLAMAQuantumGradientBalancedOptimizerV6", register=True) except Exception as e: print("QuantumGradientBalancedOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientBoostedMemeticSearch import ( - QuantumGradientBoostedMemeticSearch, - ) + from nevergrad.optimization.lama.QuantumGradientBoostedMemeticSearch import QuantumGradientBoostedMemeticSearch lama_register["QuantumGradientBoostedMemeticSearch"] = QuantumGradientBoostedMemeticSearch - LLAMAQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( - method="LLAMAQuantumGradientBoostedMemeticSearch" - ).set_name("LLAMAQuantumGradientBoostedMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumGradientBoostedMemeticSearch").set_name("LLAMAQuantumGradientBoostedMemeticSearch", register=True) except Exception as e: print("QuantumGradientBoostedMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientEnhancedExplorationOptimization import ( - QuantumGradientEnhancedExplorationOptimization, - ) + from nevergrad.optimization.lama.QuantumGradientEnhancedExplorationOptimization import QuantumGradientEnhancedExplorationOptimization - lama_register["QuantumGradientEnhancedExplorationOptimization"] = ( - QuantumGradientEnhancedExplorationOptimization - ) - LLAMAQuantumGradientEnhancedExplorationOptimization = NonObjectOptimizer( - method="LLAMAQuantumGradientEnhancedExplorationOptimization" - ).set_name("LLAMAQuantumGradientEnhancedExplorationOptimization", register=True) + lama_register["QuantumGradientEnhancedExplorationOptimization"] = QuantumGradientEnhancedExplorationOptimization + res = NonObjectOptimizer(method="LLAMAQuantumGradientEnhancedExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientEnhancedExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientEnhancedExplorationOptimization").set_name("LLAMAQuantumGradientEnhancedExplorationOptimization", register=True) except Exception as e: print("QuantumGradientEnhancedExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientFusionOptimizer import QuantumGradientFusionOptimizer lama_register["QuantumGradientFusionOptimizer"] = QuantumGradientFusionOptimizer - LLAMAQuantumGradientFusionOptimizer = NonObjectOptimizer( - method="LLAMAQuantumGradientFusionOptimizer" - ).set_name("LLAMAQuantumGradientFusionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientFusionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientFusionOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientFusionOptimizer").set_name("LLAMAQuantumGradientFusionOptimizer", register=True) except Exception as e: print("QuantumGradientFusionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientGuidedFireworksAlgorithm import ( - QuantumGradientGuidedFireworksAlgorithm, - ) + from nevergrad.optimization.lama.QuantumGradientGuidedFireworksAlgorithm import QuantumGradientGuidedFireworksAlgorithm lama_register["QuantumGradientGuidedFireworksAlgorithm"] = QuantumGradientGuidedFireworksAlgorithm - LLAMAQuantumGradientGuidedFireworksAlgorithm = NonObjectOptimizer( - method="LLAMAQuantumGradientGuidedFireworksAlgorithm" - ).set_name("LLAMAQuantumGradientGuidedFireworksAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientGuidedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientGuidedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumGradientGuidedFireworksAlgorithm").set_name("LLAMAQuantumGradientGuidedFireworksAlgorithm", register=True) except Exception as e: print("QuantumGradientGuidedFireworksAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimization import ( - QuantumGradientHybridOptimization, - ) + from nevergrad.optimization.lama.QuantumGradientHybridOptimization import QuantumGradientHybridOptimization lama_register["QuantumGradientHybridOptimization"] = QuantumGradientHybridOptimization - LLAMAQuantumGradientHybridOptimization = NonObjectOptimizer( - method="LLAMAQuantumGradientHybridOptimization" - ).set_name("LLAMAQuantumGradientHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientHybridOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimization").set_name("LLAMAQuantumGradientHybridOptimization", register=True) except Exception as e: print("QuantumGradientHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV2 import ( - QuantumGradientHybridOptimizationV2, - ) + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV2 import QuantumGradientHybridOptimizationV2 lama_register["QuantumGradientHybridOptimizationV2"] = QuantumGradientHybridOptimizationV2 - LLAMAQuantumGradientHybridOptimizationV2 = NonObjectOptimizer( - method="LLAMAQuantumGradientHybridOptimizationV2" - ).set_name("LLAMAQuantumGradientHybridOptimizationV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV2").set_name("LLAMAQuantumGradientHybridOptimizationV2", register=True) except Exception as e: print("QuantumGradientHybridOptimizationV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV3 import ( - QuantumGradientHybridOptimizationV3, - ) + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV3 import QuantumGradientHybridOptimizationV3 lama_register["QuantumGradientHybridOptimizationV3"] = QuantumGradientHybridOptimizationV3 - LLAMAQuantumGradientHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMAQuantumGradientHybridOptimizationV3" - ).set_name("LLAMAQuantumGradientHybridOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV3").set_name("LLAMAQuantumGradientHybridOptimizationV3", register=True) except Exception as e: print("QuantumGradientHybridOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV4 import ( - QuantumGradientHybridOptimizationV4, - ) + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV4 import QuantumGradientHybridOptimizationV4 lama_register["QuantumGradientHybridOptimizationV4"] = QuantumGradientHybridOptimizationV4 - LLAMAQuantumGradientHybridOptimizationV4 = NonObjectOptimizer( - method="LLAMAQuantumGradientHybridOptimizationV4" - ).set_name("LLAMAQuantumGradientHybridOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientHybridOptimizationV4 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV4").set_name("LLAMAQuantumGradientHybridOptimizationV4", register=True) except Exception as e: print("QuantumGradientHybridOptimizationV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientHybridOptimizer import QuantumGradientHybridOptimizer lama_register["QuantumGradientHybridOptimizer"] = QuantumGradientHybridOptimizer - LLAMAQuantumGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumGradientHybridOptimizer" - ).set_name("LLAMAQuantumGradientHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizer").set_name("LLAMAQuantumGradientHybridOptimizer", register=True) except Exception as e: print("QuantumGradientHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientMemeticOptimizer import QuantumGradientMemeticOptimizer lama_register["QuantumGradientMemeticOptimizer"] = QuantumGradientMemeticOptimizer - LLAMAQuantumGradientMemeticOptimizer = NonObjectOptimizer( - method="LLAMAQuantumGradientMemeticOptimizer" - ).set_name("LLAMAQuantumGradientMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticOptimizer").set_name("LLAMAQuantumGradientMemeticOptimizer", register=True) except Exception as e: print("QuantumGradientMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientMemeticSearch import QuantumGradientMemeticSearch lama_register["QuantumGradientMemeticSearch"] = QuantumGradientMemeticSearch - LLAMAQuantumGradientMemeticSearch = NonObjectOptimizer( - method="LLAMAQuantumGradientMemeticSearch" - ).set_name("LLAMAQuantumGradientMemeticSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearch").set_name("LLAMAQuantumGradientMemeticSearch", register=True) except Exception as e: print("QuantumGradientMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientMemeticSearchV2 import QuantumGradientMemeticSearchV2 lama_register["QuantumGradientMemeticSearchV2"] = QuantumGradientMemeticSearchV2 - LLAMAQuantumGradientMemeticSearchV2 = NonObjectOptimizer( - method="LLAMAQuantumGradientMemeticSearchV2" - ).set_name("LLAMAQuantumGradientMemeticSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientMemeticSearchV2 = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV2").set_name("LLAMAQuantumGradientMemeticSearchV2", register=True) except Exception as e: print("QuantumGradientMemeticSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGradientMemeticSearchV3 import QuantumGradientMemeticSearchV3 lama_register["QuantumGradientMemeticSearchV3"] = QuantumGradientMemeticSearchV3 - LLAMAQuantumGradientMemeticSearchV3 = NonObjectOptimizer( - method="LLAMAQuantumGradientMemeticSearchV3" - ).set_name("LLAMAQuantumGradientMemeticSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGradientMemeticSearchV3 = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV3").set_name("LLAMAQuantumGradientMemeticSearchV3", register=True) except Exception as e: print("QuantumGradientMemeticSearchV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGuidedAdaptiveStrategy import QuantumGuidedAdaptiveStrategy lama_register["QuantumGuidedAdaptiveStrategy"] = QuantumGuidedAdaptiveStrategy - LLAMAQuantumGuidedAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAQuantumGuidedAdaptiveStrategy" - ).set_name("LLAMAQuantumGuidedAdaptiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGuidedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGuidedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumGuidedAdaptiveStrategy").set_name("LLAMAQuantumGuidedAdaptiveStrategy", register=True) except Exception as e: print("QuantumGuidedAdaptiveStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGuidedCrossoverAdaptation import QuantumGuidedCrossoverAdaptation lama_register["QuantumGuidedCrossoverAdaptation"] = QuantumGuidedCrossoverAdaptation - LLAMAQuantumGuidedCrossoverAdaptation = NonObjectOptimizer( - method="LLAMAQuantumGuidedCrossoverAdaptation" - ).set_name("LLAMAQuantumGuidedCrossoverAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGuidedCrossoverAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGuidedCrossoverAdaptation = NonObjectOptimizer(method="LLAMAQuantumGuidedCrossoverAdaptation").set_name("LLAMAQuantumGuidedCrossoverAdaptation", register=True) except Exception as e: print("QuantumGuidedCrossoverAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumGuidedHybridDifferentialSwarm import ( - QuantumGuidedHybridDifferentialSwarm, - ) + from nevergrad.optimization.lama.QuantumGuidedHybridDifferentialSwarm import QuantumGuidedHybridDifferentialSwarm lama_register["QuantumGuidedHybridDifferentialSwarm"] = QuantumGuidedHybridDifferentialSwarm - LLAMAQuantumGuidedHybridDifferentialSwarm = NonObjectOptimizer( - method="LLAMAQuantumGuidedHybridDifferentialSwarm" - ).set_name("LLAMAQuantumGuidedHybridDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGuidedHybridDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGuidedHybridDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumGuidedHybridDifferentialSwarm").set_name("LLAMAQuantumGuidedHybridDifferentialSwarm", register=True) except Exception as e: print("QuantumGuidedHybridDifferentialSwarm can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumGuidedLevyAdaptiveSwarm import QuantumGuidedLevyAdaptiveSwarm lama_register["QuantumGuidedLevyAdaptiveSwarm"] = QuantumGuidedLevyAdaptiveSwarm - LLAMAQuantumGuidedLevyAdaptiveSwarm = NonObjectOptimizer( - method="LLAMAQuantumGuidedLevyAdaptiveSwarm" - ).set_name("LLAMAQuantumGuidedLevyAdaptiveSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumGuidedLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumGuidedLevyAdaptiveSwarm = NonObjectOptimizer(method="LLAMAQuantumGuidedLevyAdaptiveSwarm").set_name("LLAMAQuantumGuidedLevyAdaptiveSwarm", register=True) except Exception as e: print("QuantumGuidedLevyAdaptiveSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptationStrategy import ( - QuantumHarmonicAdaptationStrategy, - ) + from nevergrad.optimization.lama.QuantumHarmonicAdaptationStrategy import QuantumHarmonicAdaptationStrategy lama_register["QuantumHarmonicAdaptationStrategy"] = QuantumHarmonicAdaptationStrategy - LLAMAQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( - method="LLAMAQuantumHarmonicAdaptationStrategy" - ).set_name("LLAMAQuantumHarmonicAdaptationStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicAdaptationStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptationStrategy").set_name("LLAMAQuantumHarmonicAdaptationStrategy", register=True) except Exception as e: print("QuantumHarmonicAdaptationStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptiveFeedbackOptimizer import ( - QuantumHarmonicAdaptiveFeedbackOptimizer, - ) + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveFeedbackOptimizer import QuantumHarmonicAdaptiveFeedbackOptimizer lama_register["QuantumHarmonicAdaptiveFeedbackOptimizer"] = QuantumHarmonicAdaptiveFeedbackOptimizer - LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer" - ).set_name("LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer", register=True) except Exception as e: print("QuantumHarmonicAdaptiveFeedbackOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicAdaptiveOptimizer import QuantumHarmonicAdaptiveOptimizer lama_register["QuantumHarmonicAdaptiveOptimizer"] = QuantumHarmonicAdaptiveOptimizer - LLAMAQuantumHarmonicAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicAdaptiveOptimizer" - ).set_name("LLAMAQuantumHarmonicAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveOptimizer", register=True) except Exception as e: print("QuantumHarmonicAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptiveRefinementOptimizer import ( - QuantumHarmonicAdaptiveRefinementOptimizer, - ) + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveRefinementOptimizer import QuantumHarmonicAdaptiveRefinementOptimizer lama_register["QuantumHarmonicAdaptiveRefinementOptimizer"] = QuantumHarmonicAdaptiveRefinementOptimizer - LLAMAQuantumHarmonicAdaptiveRefinementOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer" - ).set_name("LLAMAQuantumHarmonicAdaptiveRefinementOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicAdaptiveRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveRefinementOptimizer", register=True) except Exception as e: print("QuantumHarmonicAdaptiveRefinementOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicDynamicAdaptation import QuantumHarmonicDynamicAdaptation lama_register["QuantumHarmonicDynamicAdaptation"] = QuantumHarmonicDynamicAdaptation - LLAMAQuantumHarmonicDynamicAdaptation = NonObjectOptimizer( - method="LLAMAQuantumHarmonicDynamicAdaptation" - ).set_name("LLAMAQuantumHarmonicDynamicAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicDynamicAdaptation = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicAdaptation").set_name("LLAMAQuantumHarmonicDynamicAdaptation", register=True) except Exception as e: print("QuantumHarmonicDynamicAdaptation can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicDynamicOptimizer import QuantumHarmonicDynamicOptimizer lama_register["QuantumHarmonicDynamicOptimizer"] = QuantumHarmonicDynamicOptimizer - LLAMAQuantumHarmonicDynamicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicDynamicOptimizer" - ).set_name("LLAMAQuantumHarmonicDynamicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicOptimizer").set_name("LLAMAQuantumHarmonicDynamicOptimizer", register=True) except Exception as e: print("QuantumHarmonicDynamicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicEvolutionStrategy import QuantumHarmonicEvolutionStrategy lama_register["QuantumHarmonicEvolutionStrategy"] = QuantumHarmonicEvolutionStrategy - LLAMAQuantumHarmonicEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumHarmonicEvolutionStrategy" - ).set_name("LLAMAQuantumHarmonicEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicEvolutionStrategy").set_name("LLAMAQuantumHarmonicEvolutionStrategy", register=True) except Exception as e: print("QuantumHarmonicEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicFeedbackOptimizer import QuantumHarmonicFeedbackOptimizer lama_register["QuantumHarmonicFeedbackOptimizer"] = QuantumHarmonicFeedbackOptimizer - LLAMAQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFeedbackOptimizer" - ).set_name("LLAMAQuantumHarmonicFeedbackOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicFeedbackOptimizer").set_name("LLAMAQuantumHarmonicFeedbackOptimizer", register=True) except Exception as e: print("QuantumHarmonicFeedbackOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizer import QuantumHarmonicFocusedOptimizer lama_register["QuantumHarmonicFocusedOptimizer"] = QuantumHarmonicFocusedOptimizer - LLAMAQuantumHarmonicFocusedOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizer" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizer").set_name("LLAMAQuantumHarmonicFocusedOptimizer", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV2 import ( - QuantumHarmonicFocusedOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV2 import QuantumHarmonicFocusedOptimizerV2 lama_register["QuantumHarmonicFocusedOptimizerV2"] = QuantumHarmonicFocusedOptimizerV2 - LLAMAQuantumHarmonicFocusedOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV2" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV2").set_name("LLAMAQuantumHarmonicFocusedOptimizerV2", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV3 import ( - QuantumHarmonicFocusedOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV3 import QuantumHarmonicFocusedOptimizerV3 lama_register["QuantumHarmonicFocusedOptimizerV3"] = QuantumHarmonicFocusedOptimizerV3 - LLAMAQuantumHarmonicFocusedOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV3" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV3").set_name("LLAMAQuantumHarmonicFocusedOptimizerV3", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV4 import ( - QuantumHarmonicFocusedOptimizerV4, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV4 import QuantumHarmonicFocusedOptimizerV4 lama_register["QuantumHarmonicFocusedOptimizerV4"] = QuantumHarmonicFocusedOptimizerV4 - LLAMAQuantumHarmonicFocusedOptimizerV4 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV4" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV4").set_name("LLAMAQuantumHarmonicFocusedOptimizerV4", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV5 import ( - QuantumHarmonicFocusedOptimizerV5, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV5 import QuantumHarmonicFocusedOptimizerV5 lama_register["QuantumHarmonicFocusedOptimizerV5"] = QuantumHarmonicFocusedOptimizerV5 - LLAMAQuantumHarmonicFocusedOptimizerV5 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV5" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV5").set_name("LLAMAQuantumHarmonicFocusedOptimizerV5", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV6 import ( - QuantumHarmonicFocusedOptimizerV6, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV6 import QuantumHarmonicFocusedOptimizerV6 lama_register["QuantumHarmonicFocusedOptimizerV6"] = QuantumHarmonicFocusedOptimizerV6 - LLAMAQuantumHarmonicFocusedOptimizerV6 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV6" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV6").set_name("LLAMAQuantumHarmonicFocusedOptimizerV6", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV7 import ( - QuantumHarmonicFocusedOptimizerV7, - ) + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV7 import QuantumHarmonicFocusedOptimizerV7 lama_register["QuantumHarmonicFocusedOptimizerV7"] = QuantumHarmonicFocusedOptimizerV7 - LLAMAQuantumHarmonicFocusedOptimizerV7 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicFocusedOptimizerV7" - ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicFocusedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV7").set_name("LLAMAQuantumHarmonicFocusedOptimizerV7", register=True) except Exception as e: print("QuantumHarmonicFocusedOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicImpulseOptimizerV9 import ( - QuantumHarmonicImpulseOptimizerV9, - ) + from nevergrad.optimization.lama.QuantumHarmonicImpulseOptimizerV9 import QuantumHarmonicImpulseOptimizerV9 lama_register["QuantumHarmonicImpulseOptimizerV9"] = QuantumHarmonicImpulseOptimizerV9 - LLAMAQuantumHarmonicImpulseOptimizerV9 = NonObjectOptimizer( - method="LLAMAQuantumHarmonicImpulseOptimizerV9" - ).set_name("LLAMAQuantumHarmonicImpulseOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicImpulseOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicImpulseOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumHarmonicImpulseOptimizerV9").set_name("LLAMAQuantumHarmonicImpulseOptimizerV9", register=True) except Exception as e: print("QuantumHarmonicImpulseOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicPrecisionOptimizer import ( - QuantumHarmonicPrecisionOptimizer, - ) + from nevergrad.optimization.lama.QuantumHarmonicPrecisionOptimizer import QuantumHarmonicPrecisionOptimizer lama_register["QuantumHarmonicPrecisionOptimizer"] = QuantumHarmonicPrecisionOptimizer - LLAMAQuantumHarmonicPrecisionOptimizer = NonObjectOptimizer( - method="LLAMAQuantumHarmonicPrecisionOptimizer" - ).set_name("LLAMAQuantumHarmonicPrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicPrecisionOptimizer").set_name("LLAMAQuantumHarmonicPrecisionOptimizer", register=True) except Exception as e: print("QuantumHarmonicPrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonicResilientEvolutionStrategy import ( - QuantumHarmonicResilientEvolutionStrategy, - ) + from nevergrad.optimization.lama.QuantumHarmonicResilientEvolutionStrategy import QuantumHarmonicResilientEvolutionStrategy lama_register["QuantumHarmonicResilientEvolutionStrategy"] = QuantumHarmonicResilientEvolutionStrategy - LLAMAQuantumHarmonicResilientEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumHarmonicResilientEvolutionStrategy" - ).set_name("LLAMAQuantumHarmonicResilientEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonicResilientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonicResilientEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicResilientEvolutionStrategy").set_name("LLAMAQuantumHarmonicResilientEvolutionStrategy", register=True) except Exception as e: print("QuantumHarmonicResilientEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonizedPSO import QuantumHarmonizedPSO lama_register["QuantumHarmonizedPSO"] = QuantumHarmonizedPSO - LLAMAQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO").set_name( - "LLAMAQuantumHarmonizedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO").set_name("LLAMAQuantumHarmonizedPSO", register=True) except Exception as e: print("QuantumHarmonizedPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithm import QuantumHarmonyMemeticAlgorithm lama_register["QuantumHarmonyMemeticAlgorithm"] = QuantumHarmonyMemeticAlgorithm - LLAMAQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( - method="LLAMAQuantumHarmonyMemeticAlgorithm" - ).set_name("LLAMAQuantumHarmonyMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithm").set_name("LLAMAQuantumHarmonyMemeticAlgorithm", register=True) except Exception as e: print("QuantumHarmonyMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmImproved import ( - QuantumHarmonyMemeticAlgorithmImproved, - ) + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmImproved import QuantumHarmonyMemeticAlgorithmImproved lama_register["QuantumHarmonyMemeticAlgorithmImproved"] = QuantumHarmonyMemeticAlgorithmImproved - LLAMAQuantumHarmonyMemeticAlgorithmImproved = NonObjectOptimizer( - method="LLAMAQuantumHarmonyMemeticAlgorithmImproved" - ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmImproved", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonyMemeticAlgorithmImproved = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmImproved").set_name("LLAMAQuantumHarmonyMemeticAlgorithmImproved", register=True) except Exception as e: print("QuantumHarmonyMemeticAlgorithmImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmRefined import ( - QuantumHarmonyMemeticAlgorithmRefined, - ) + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmRefined import QuantumHarmonyMemeticAlgorithmRefined lama_register["QuantumHarmonyMemeticAlgorithmRefined"] = QuantumHarmonyMemeticAlgorithmRefined - LLAMAQuantumHarmonyMemeticAlgorithmRefined = NonObjectOptimizer( - method="LLAMAQuantumHarmonyMemeticAlgorithmRefined" - ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmRefined", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonyMemeticAlgorithmRefined = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmRefined").set_name("LLAMAQuantumHarmonyMemeticAlgorithmRefined", register=True) except Exception as e: print("QuantumHarmonyMemeticAlgorithmRefined can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHarmonySearch import QuantumHarmonySearch lama_register["QuantumHarmonySearch"] = QuantumHarmonySearch - LLAMAQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch").set_name( - "LLAMAQuantumHarmonySearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch").set_name("LLAMAQuantumHarmonySearch", register=True) except Exception as e: print("QuantumHarmonySearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategy import QuantumHybridAdaptiveStrategy lama_register["QuantumHybridAdaptiveStrategy"] = QuantumHybridAdaptiveStrategy - LLAMAQuantumHybridAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAQuantumHybridAdaptiveStrategy" - ).set_name("LLAMAQuantumHybridAdaptiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategy").set_name("LLAMAQuantumHybridAdaptiveStrategy", register=True) except Exception as e: print("QuantumHybridAdaptiveStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV2 import QuantumHybridAdaptiveStrategyV2 lama_register["QuantumHybridAdaptiveStrategyV2"] = QuantumHybridAdaptiveStrategyV2 - LLAMAQuantumHybridAdaptiveStrategyV2 = NonObjectOptimizer( - method="LLAMAQuantumHybridAdaptiveStrategyV2" - ).set_name("LLAMAQuantumHybridAdaptiveStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridAdaptiveStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV2").set_name("LLAMAQuantumHybridAdaptiveStrategyV2", register=True) except Exception as e: print("QuantumHybridAdaptiveStrategyV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV8 import QuantumHybridAdaptiveStrategyV8 lama_register["QuantumHybridAdaptiveStrategyV8"] = QuantumHybridAdaptiveStrategyV8 - LLAMAQuantumHybridAdaptiveStrategyV8 = NonObjectOptimizer( - method="LLAMAQuantumHybridAdaptiveStrategyV8" - ).set_name("LLAMAQuantumHybridAdaptiveStrategyV8", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridAdaptiveStrategyV8 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV8").set_name("LLAMAQuantumHybridAdaptiveStrategyV8", register=True) except Exception as e: print("QuantumHybridAdaptiveStrategyV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV9 import QuantumHybridAdaptiveStrategyV9 lama_register["QuantumHybridAdaptiveStrategyV9"] = QuantumHybridAdaptiveStrategyV9 - LLAMAQuantumHybridAdaptiveStrategyV9 = NonObjectOptimizer( - method="LLAMAQuantumHybridAdaptiveStrategyV9" - ).set_name("LLAMAQuantumHybridAdaptiveStrategyV9", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridAdaptiveStrategyV9 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV9").set_name("LLAMAQuantumHybridAdaptiveStrategyV9", register=True) except Exception as e: print("QuantumHybridAdaptiveStrategyV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHybridDifferentialEvolution import ( - QuantumHybridDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumHybridDifferentialEvolution import QuantumHybridDifferentialEvolution lama_register["QuantumHybridDifferentialEvolution"] = QuantumHybridDifferentialEvolution - LLAMAQuantumHybridDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumHybridDifferentialEvolution" - ).set_name("LLAMAQuantumHybridDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumHybridDifferentialEvolution").set_name("LLAMAQuantumHybridDifferentialEvolution", register=True) except Exception as e: print("QuantumHybridDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE import QuantumHybridDynamicAdaptiveDE lama_register["QuantumHybridDynamicAdaptiveDE"] = QuantumHybridDynamicAdaptiveDE - LLAMAQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( - method="LLAMAQuantumHybridDynamicAdaptiveDE" - ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE").set_name("LLAMAQuantumHybridDynamicAdaptiveDE", register=True) except Exception as e: print("QuantumHybridDynamicAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v2 import ( - QuantumHybridDynamicAdaptiveDE_v2, - ) + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v2 import QuantumHybridDynamicAdaptiveDE_v2 lama_register["QuantumHybridDynamicAdaptiveDE_v2"] = QuantumHybridDynamicAdaptiveDE_v2 - LLAMAQuantumHybridDynamicAdaptiveDE_v2 = NonObjectOptimizer( - method="LLAMAQuantumHybridDynamicAdaptiveDE_v2" - ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridDynamicAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v2").set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v2", register=True) except Exception as e: print("QuantumHybridDynamicAdaptiveDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v3 import ( - QuantumHybridDynamicAdaptiveDE_v3, - ) + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v3 import QuantumHybridDynamicAdaptiveDE_v3 lama_register["QuantumHybridDynamicAdaptiveDE_v3"] = QuantumHybridDynamicAdaptiveDE_v3 - LLAMAQuantumHybridDynamicAdaptiveDE_v3 = NonObjectOptimizer( - method="LLAMAQuantumHybridDynamicAdaptiveDE_v3" - ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridDynamicAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v3").set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v3", register=True) except Exception as e: print("QuantumHybridDynamicAdaptiveDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE import QuantumHybridEliteAdaptiveDE lama_register["QuantumHybridEliteAdaptiveDE"] = QuantumHybridEliteAdaptiveDE - LLAMAQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE").set_name("LLAMAQuantumHybridEliteAdaptiveDE", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v2 import QuantumHybridEliteAdaptiveDE_v2 lama_register["QuantumHybridEliteAdaptiveDE_v2"] = QuantumHybridEliteAdaptiveDE_v2 - LLAMAQuantumHybridEliteAdaptiveDE_v2 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v2" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v2").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v2", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v3 import QuantumHybridEliteAdaptiveDE_v3 lama_register["QuantumHybridEliteAdaptiveDE_v3"] = QuantumHybridEliteAdaptiveDE_v3 - LLAMAQuantumHybridEliteAdaptiveDE_v3 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v3" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v3").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v3", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v4 import QuantumHybridEliteAdaptiveDE_v4 lama_register["QuantumHybridEliteAdaptiveDE_v4"] = QuantumHybridEliteAdaptiveDE_v4 - LLAMAQuantumHybridEliteAdaptiveDE_v4 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v4" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v4 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v4").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v4", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v5 import QuantumHybridEliteAdaptiveDE_v5 lama_register["QuantumHybridEliteAdaptiveDE_v5"] = QuantumHybridEliteAdaptiveDE_v5 - LLAMAQuantumHybridEliteAdaptiveDE_v5 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v5" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v5 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v5").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v5", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v5 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v6 import QuantumHybridEliteAdaptiveDE_v6 lama_register["QuantumHybridEliteAdaptiveDE_v6"] = QuantumHybridEliteAdaptiveDE_v6 - LLAMAQuantumHybridEliteAdaptiveDE_v6 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v6" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v6 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v6").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v6", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v6 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v7 import QuantumHybridEliteAdaptiveDE_v7 lama_register["QuantumHybridEliteAdaptiveDE_v7"] = QuantumHybridEliteAdaptiveDE_v7 - LLAMAQuantumHybridEliteAdaptiveDE_v7 = NonObjectOptimizer( - method="LLAMAQuantumHybridEliteAdaptiveDE_v7" - ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v7", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridEliteAdaptiveDE_v7 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v7").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v7", register=True) except Exception as e: print("QuantumHybridEliteAdaptiveDE_v7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumHybridImprovedDE import QuantumHybridImprovedDE lama_register["QuantumHybridImprovedDE"] = QuantumHybridImprovedDE - LLAMAQuantumHybridImprovedDE = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE").set_name( - "LLAMAQuantumHybridImprovedDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridImprovedDE = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE").set_name("LLAMAQuantumHybridImprovedDE", register=True) except Exception as e: print("QuantumHybridImprovedDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumHybridParticleDifferentialSearch import ( - QuantumHybridParticleDifferentialSearch, - ) + from nevergrad.optimization.lama.QuantumHybridParticleDifferentialSearch import QuantumHybridParticleDifferentialSearch lama_register["QuantumHybridParticleDifferentialSearch"] = QuantumHybridParticleDifferentialSearch - LLAMAQuantumHybridParticleDifferentialSearch = NonObjectOptimizer( - method="LLAMAQuantumHybridParticleDifferentialSearch" - ).set_name("LLAMAQuantumHybridParticleDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumHybridParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumHybridParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAQuantumHybridParticleDifferentialSearch").set_name("LLAMAQuantumHybridParticleDifferentialSearch", register=True) except Exception as e: print("QuantumHybridParticleDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInfluenceCrossoverOptimizer import ( - QuantumInfluenceCrossoverOptimizer, - ) + from nevergrad.optimization.lama.QuantumInfluenceCrossoverOptimizer import QuantumInfluenceCrossoverOptimizer lama_register["QuantumInfluenceCrossoverOptimizer"] = QuantumInfluenceCrossoverOptimizer - LLAMAQuantumInfluenceCrossoverOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInfluenceCrossoverOptimizer" - ).set_name("LLAMAQuantumInfluenceCrossoverOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInfluenceCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInfluenceCrossoverOptimizer = NonObjectOptimizer(method="LLAMAQuantumInfluenceCrossoverOptimizer").set_name("LLAMAQuantumInfluenceCrossoverOptimizer", register=True) except Exception as e: print("QuantumInfluenceCrossoverOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInfluencedAdaptiveDifferentialSwarm import ( - QuantumInfluencedAdaptiveDifferentialSwarm, - ) + from nevergrad.optimization.lama.QuantumInfluencedAdaptiveDifferentialSwarm import QuantumInfluencedAdaptiveDifferentialSwarm lama_register["QuantumInfluencedAdaptiveDifferentialSwarm"] = QuantumInfluencedAdaptiveDifferentialSwarm - LLAMAQuantumInfluencedAdaptiveDifferentialSwarm = NonObjectOptimizer( - method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm" - ).set_name("LLAMAQuantumInfluencedAdaptiveDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInfluencedAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm").set_name("LLAMAQuantumInfluencedAdaptiveDifferentialSwarm", register=True) except Exception as e: print("QuantumInfluencedAdaptiveDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearch import ( - QuantumInformedAdaptiveHybridSearch, - ) + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearch import QuantumInformedAdaptiveHybridSearch lama_register["QuantumInformedAdaptiveHybridSearch"] = QuantumInformedAdaptiveHybridSearch - LLAMAQuantumInformedAdaptiveHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveHybridSearch" - ).set_name("LLAMAQuantumInformedAdaptiveHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearch").set_name("LLAMAQuantumInformedAdaptiveHybridSearch", register=True) except Exception as e: print("QuantumInformedAdaptiveHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearchV4 import ( - QuantumInformedAdaptiveHybridSearchV4, - ) + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearchV4 import QuantumInformedAdaptiveHybridSearchV4 lama_register["QuantumInformedAdaptiveHybridSearchV4"] = QuantumInformedAdaptiveHybridSearchV4 - LLAMAQuantumInformedAdaptiveHybridSearchV4 = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveHybridSearchV4" - ).set_name("LLAMAQuantumInformedAdaptiveHybridSearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveHybridSearchV4 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearchV4").set_name("LLAMAQuantumInformedAdaptiveHybridSearchV4", register=True) except Exception as e: print("QuantumInformedAdaptiveHybridSearchV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveInertiaOptimizer import ( - QuantumInformedAdaptiveInertiaOptimizer, - ) + from nevergrad.optimization.lama.QuantumInformedAdaptiveInertiaOptimizer import QuantumInformedAdaptiveInertiaOptimizer lama_register["QuantumInformedAdaptiveInertiaOptimizer"] = QuantumInformedAdaptiveInertiaOptimizer - LLAMAQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveInertiaOptimizer" - ).set_name("LLAMAQuantumInformedAdaptiveInertiaOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveInertiaOptimizer").set_name("LLAMAQuantumInformedAdaptiveInertiaOptimizer", register=True) except Exception as e: print("QuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedAdaptivePSO import QuantumInformedAdaptivePSO lama_register["QuantumInformedAdaptivePSO"] = QuantumInformedAdaptivePSO - LLAMAQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO").set_name( - "LLAMAQuantumInformedAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO").set_name("LLAMAQuantumInformedAdaptivePSO", register=True) except Exception as e: print("QuantumInformedAdaptivePSO can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV4 import QuantumInformedAdaptiveSearchV4 lama_register["QuantumInformedAdaptiveSearchV4"] = QuantumInformedAdaptiveSearchV4 - LLAMAQuantumInformedAdaptiveSearchV4 = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveSearchV4" - ).set_name("LLAMAQuantumInformedAdaptiveSearchV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveSearchV4 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV4").set_name("LLAMAQuantumInformedAdaptiveSearchV4", register=True) except Exception as e: print("QuantumInformedAdaptiveSearchV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV5 import QuantumInformedAdaptiveSearchV5 lama_register["QuantumInformedAdaptiveSearchV5"] = QuantumInformedAdaptiveSearchV5 - LLAMAQuantumInformedAdaptiveSearchV5 = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveSearchV5" - ).set_name("LLAMAQuantumInformedAdaptiveSearchV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveSearchV5 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV5").set_name("LLAMAQuantumInformedAdaptiveSearchV5", register=True) except Exception as e: print("QuantumInformedAdaptiveSearchV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV6 import QuantumInformedAdaptiveSearchV6 lama_register["QuantumInformedAdaptiveSearchV6"] = QuantumInformedAdaptiveSearchV6 - LLAMAQuantumInformedAdaptiveSearchV6 = NonObjectOptimizer( - method="LLAMAQuantumInformedAdaptiveSearchV6" - ).set_name("LLAMAQuantumInformedAdaptiveSearchV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedAdaptiveSearchV6 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV6").set_name("LLAMAQuantumInformedAdaptiveSearchV6", register=True) except Exception as e: print("QuantumInformedAdaptiveSearchV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedCooperativeSearchV1 import ( - QuantumInformedCooperativeSearchV1, - ) + from nevergrad.optimization.lama.QuantumInformedCooperativeSearchV1 import QuantumInformedCooperativeSearchV1 lama_register["QuantumInformedCooperativeSearchV1"] = QuantumInformedCooperativeSearchV1 - LLAMAQuantumInformedCooperativeSearchV1 = NonObjectOptimizer( - method="LLAMAQuantumInformedCooperativeSearchV1" - ).set_name("LLAMAQuantumInformedCooperativeSearchV1", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedCooperativeSearchV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedCooperativeSearchV1 = NonObjectOptimizer(method="LLAMAQuantumInformedCooperativeSearchV1").set_name("LLAMAQuantumInformedCooperativeSearchV1", register=True) except Exception as e: print("QuantumInformedCooperativeSearchV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedCrossoverEvolution import ( - QuantumInformedCrossoverEvolution, - ) + from nevergrad.optimization.lama.QuantumInformedCrossoverEvolution import QuantumInformedCrossoverEvolution lama_register["QuantumInformedCrossoverEvolution"] = QuantumInformedCrossoverEvolution - LLAMAQuantumInformedCrossoverEvolution = NonObjectOptimizer( - method="LLAMAQuantumInformedCrossoverEvolution" - ).set_name("LLAMAQuantumInformedCrossoverEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedCrossoverEvolution = NonObjectOptimizer(method="LLAMAQuantumInformedCrossoverEvolution").set_name("LLAMAQuantumInformedCrossoverEvolution", register=True) except Exception as e: print("QuantumInformedCrossoverEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedDifferentialStrategy import ( - QuantumInformedDifferentialStrategy, - ) + from nevergrad.optimization.lama.QuantumInformedDifferentialStrategy import QuantumInformedDifferentialStrategy lama_register["QuantumInformedDifferentialStrategy"] = QuantumInformedDifferentialStrategy - LLAMAQuantumInformedDifferentialStrategy = NonObjectOptimizer( - method="LLAMAQuantumInformedDifferentialStrategy" - ).set_name("LLAMAQuantumInformedDifferentialStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedDifferentialStrategy = NonObjectOptimizer(method="LLAMAQuantumInformedDifferentialStrategy").set_name("LLAMAQuantumInformedDifferentialStrategy", register=True) except Exception as e: print("QuantumInformedDifferentialStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedDynamicSwarmOptimizer import ( - QuantumInformedDynamicSwarmOptimizer, - ) + from nevergrad.optimization.lama.QuantumInformedDynamicSwarmOptimizer import QuantumInformedDynamicSwarmOptimizer lama_register["QuantumInformedDynamicSwarmOptimizer"] = QuantumInformedDynamicSwarmOptimizer - LLAMAQuantumInformedDynamicSwarmOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedDynamicSwarmOptimizer" - ).set_name("LLAMAQuantumInformedDynamicSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedDynamicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedDynamicSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedDynamicSwarmOptimizer").set_name("LLAMAQuantumInformedDynamicSwarmOptimizer", register=True) except Exception as e: print("QuantumInformedDynamicSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedEvolutionStrategy import QuantumInformedEvolutionStrategy lama_register["QuantumInformedEvolutionStrategy"] = QuantumInformedEvolutionStrategy - LLAMAQuantumInformedEvolutionStrategy = NonObjectOptimizer( - method="LLAMAQuantumInformedEvolutionStrategy" - ).set_name("LLAMAQuantumInformedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumInformedEvolutionStrategy").set_name("LLAMAQuantumInformedEvolutionStrategy", register=True) except Exception as e: print("QuantumInformedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedGradientOptimizer import QuantumInformedGradientOptimizer lama_register["QuantumInformedGradientOptimizer"] = QuantumInformedGradientOptimizer - LLAMAQuantumInformedGradientOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedGradientOptimizer" - ).set_name("LLAMAQuantumInformedGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedGradientOptimizer").set_name("LLAMAQuantumInformedGradientOptimizer", register=True) except Exception as e: print("QuantumInformedGradientOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedHyperStrategicOptimizer import ( - QuantumInformedHyperStrategicOptimizer, - ) + from nevergrad.optimization.lama.QuantumInformedHyperStrategicOptimizer import QuantumInformedHyperStrategicOptimizer lama_register["QuantumInformedHyperStrategicOptimizer"] = QuantumInformedHyperStrategicOptimizer - LLAMAQuantumInformedHyperStrategicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedHyperStrategicOptimizer" - ).set_name("LLAMAQuantumInformedHyperStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedHyperStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedHyperStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedHyperStrategicOptimizer").set_name("LLAMAQuantumInformedHyperStrategicOptimizer", register=True) except Exception as e: print("QuantumInformedHyperStrategicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedOptimizer import QuantumInformedOptimizer lama_register["QuantumInformedOptimizer"] = QuantumInformedOptimizer - LLAMAQuantumInformedOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer").set_name( - "LLAMAQuantumInformedOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer").set_name("LLAMAQuantumInformedOptimizer", register=True) except Exception as e: print("QuantumInformedOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInformedPSO import QuantumInformedPSO lama_register["QuantumInformedPSO"] = QuantumInformedPSO - LLAMAQuantumInformedPSO = NonObjectOptimizer(method="LLAMAQuantumInformedPSO").set_name( - "LLAMAQuantumInformedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedPSO = NonObjectOptimizer(method="LLAMAQuantumInformedPSO").set_name("LLAMAQuantumInformedPSO", register=True) except Exception as e: print("QuantumInformedPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedParticleSwarmOptimizer import ( - QuantumInformedParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.QuantumInformedParticleSwarmOptimizer import QuantumInformedParticleSwarmOptimizer lama_register["QuantumInformedParticleSwarmOptimizer"] = QuantumInformedParticleSwarmOptimizer - LLAMAQuantumInformedParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedParticleSwarmOptimizer" - ).set_name("LLAMAQuantumInformedParticleSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedParticleSwarmOptimizer").set_name("LLAMAQuantumInformedParticleSwarmOptimizer", register=True) except Exception as e: print("QuantumInformedParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInformedStrategicOptimizer import ( - QuantumInformedStrategicOptimizer, - ) + from nevergrad.optimization.lama.QuantumInformedStrategicOptimizer import QuantumInformedStrategicOptimizer lama_register["QuantumInformedStrategicOptimizer"] = QuantumInformedStrategicOptimizer - LLAMAQuantumInformedStrategicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInformedStrategicOptimizer" - ).set_name("LLAMAQuantumInformedStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInformedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInformedStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedStrategicOptimizer").set_name("LLAMAQuantumInformedStrategicOptimizer", register=True) except Exception as e: print("QuantumInformedStrategicOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInfusedAdaptiveStrategy import QuantumInfusedAdaptiveStrategy lama_register["QuantumInfusedAdaptiveStrategy"] = QuantumInfusedAdaptiveStrategy - LLAMAQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( - method="LLAMAQuantumInfusedAdaptiveStrategy" - ).set_name("LLAMAQuantumInfusedAdaptiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInfusedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumInfusedAdaptiveStrategy").set_name("LLAMAQuantumInfusedAdaptiveStrategy", register=True) except Exception as e: print("QuantumInfusedAdaptiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEElitistLocalSearch import ( - QuantumInspiredAdaptiveDEElitistLocalSearch, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEElitistLocalSearch import QuantumInspiredAdaptiveDEElitistLocalSearch lama_register["QuantumInspiredAdaptiveDEElitistLocalSearch"] = QuantumInspiredAdaptiveDEElitistLocalSearch - LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch" - ).set_name("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch", register=True) except Exception as e: print("QuantumInspiredAdaptiveDEElitistLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEHybridLocalSearch import ( - QuantumInspiredAdaptiveDEHybridLocalSearch, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEHybridLocalSearch import QuantumInspiredAdaptiveDEHybridLocalSearch lama_register["QuantumInspiredAdaptiveDEHybridLocalSearch"] = QuantumInspiredAdaptiveDEHybridLocalSearch - LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch" - ).set_name("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch", register=True) except Exception as e: print("QuantumInspiredAdaptiveDEHybridLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning import ( - QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning import QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning - lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning"] = ( - QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning - ) - LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning" - ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning", register=True) + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning"] = QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning").set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning", register=True) except Exception as e: print("QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch import ( - QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch import QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch - lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch"] = ( - QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch - ) - LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch" - ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch", register=True) + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch"] = QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch", register=True) except Exception as e: print("QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridDEPSO import ( - QuantumInspiredAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridDEPSO import QuantumInspiredAdaptiveHybridDEPSO lama_register["QuantumInspiredAdaptiveHybridDEPSO"] = QuantumInspiredAdaptiveHybridDEPSO - LLAMAQuantumInspiredAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveHybridDEPSO" - ).set_name("LLAMAQuantumInspiredAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridDEPSO").set_name("LLAMAQuantumInspiredAdaptiveHybridDEPSO", register=True) except Exception as e: print("QuantumInspiredAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridOptimizer import ( - QuantumInspiredAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridOptimizer import QuantumInspiredAdaptiveHybridOptimizer lama_register["QuantumInspiredAdaptiveHybridOptimizer"] = QuantumInspiredAdaptiveHybridOptimizer - LLAMAQuantumInspiredAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveHybridOptimizer" - ).set_name("LLAMAQuantumInspiredAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridOptimizer").set_name("LLAMAQuantumInspiredAdaptiveHybridOptimizer", register=True) except Exception as e: print("QuantumInspiredAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveMemeticOptimizer import ( - QuantumInspiredAdaptiveMemeticOptimizer, - ) + from nevergrad.optimization.lama.QuantumInspiredAdaptiveMemeticOptimizer import QuantumInspiredAdaptiveMemeticOptimizer lama_register["QuantumInspiredAdaptiveMemeticOptimizer"] = QuantumInspiredAdaptiveMemeticOptimizer - LLAMAQuantumInspiredAdaptiveMemeticOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer" - ).set_name("LLAMAQuantumInspiredAdaptiveMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer").set_name("LLAMAQuantumInspiredAdaptiveMemeticOptimizer", register=True) except Exception as e: print("QuantumInspiredAdaptiveMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredDifferentialEvolution import ( - QuantumInspiredDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumInspiredDifferentialEvolution import QuantumInspiredDifferentialEvolution lama_register["QuantumInspiredDifferentialEvolution"] = QuantumInspiredDifferentialEvolution - LLAMAQuantumInspiredDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumInspiredDifferentialEvolution" - ).set_name("LLAMAQuantumInspiredDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialEvolution").set_name("LLAMAQuantumInspiredDifferentialEvolution", register=True) except Exception as e: print("QuantumInspiredDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumInspiredDifferentialParticleSwarmOptimizer import ( - QuantumInspiredDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.QuantumInspiredDifferentialParticleSwarmOptimizer import QuantumInspiredDifferentialParticleSwarmOptimizer - lama_register["QuantumInspiredDifferentialParticleSwarmOptimizer"] = ( - QuantumInspiredDifferentialParticleSwarmOptimizer - ) - LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer" - ).set_name("LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer", register=True) + lama_register["QuantumInspiredDifferentialParticleSwarmOptimizer"] = QuantumInspiredDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer").set_name("LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("QuantumInspiredDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInspiredHybridOptimizer import QuantumInspiredHybridOptimizer lama_register["QuantumInspiredHybridOptimizer"] = QuantumInspiredHybridOptimizer - LLAMAQuantumInspiredHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInspiredHybridOptimizer" - ).set_name("LLAMAQuantumInspiredHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredHybridOptimizer").set_name("LLAMAQuantumInspiredHybridOptimizer", register=True) except Exception as e: print("QuantumInspiredHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInspiredMetaheuristic import QuantumInspiredMetaheuristic lama_register["QuantumInspiredMetaheuristic"] = QuantumInspiredMetaheuristic - LLAMAQuantumInspiredMetaheuristic = NonObjectOptimizer( - method="LLAMAQuantumInspiredMetaheuristic" - ).set_name("LLAMAQuantumInspiredMetaheuristic", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredMetaheuristic = NonObjectOptimizer(method="LLAMAQuantumInspiredMetaheuristic").set_name("LLAMAQuantumInspiredMetaheuristic", register=True) except Exception as e: print("QuantumInspiredMetaheuristic can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInspiredOptimization import QuantumInspiredOptimization lama_register["QuantumInspiredOptimization"] = QuantumInspiredOptimization - LLAMAQuantumInspiredOptimization = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization").set_name( - "LLAMAQuantumInspiredOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredOptimization = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization").set_name("LLAMAQuantumInspiredOptimization", register=True) except Exception as e: print("QuantumInspiredOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumInspiredSpiralOptimizer import QuantumInspiredSpiralOptimizer lama_register["QuantumInspiredSpiralOptimizer"] = QuantumInspiredSpiralOptimizer - LLAMAQuantumInspiredSpiralOptimizer = NonObjectOptimizer( - method="LLAMAQuantumInspiredSpiralOptimizer" - ).set_name("LLAMAQuantumInspiredSpiralOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumInspiredSpiralOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumInspiredSpiralOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredSpiralOptimizer").set_name("LLAMAQuantumInspiredSpiralOptimizer", register=True) except Exception as e: print("QuantumInspiredSpiralOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumIterativeDeepeningHybridSearch import ( - QuantumIterativeDeepeningHybridSearch, - ) + from nevergrad.optimization.lama.QuantumIterativeDeepeningHybridSearch import QuantumIterativeDeepeningHybridSearch lama_register["QuantumIterativeDeepeningHybridSearch"] = QuantumIterativeDeepeningHybridSearch - LLAMAQuantumIterativeDeepeningHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumIterativeDeepeningHybridSearch" - ).set_name("LLAMAQuantumIterativeDeepeningHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumIterativeDeepeningHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumIterativeDeepeningHybridSearch = NonObjectOptimizer(method="LLAMAQuantumIterativeDeepeningHybridSearch").set_name("LLAMAQuantumIterativeDeepeningHybridSearch", register=True) except Exception as e: print("QuantumIterativeDeepeningHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumIterativeRefinementOptimizer import ( - QuantumIterativeRefinementOptimizer, - ) + from nevergrad.optimization.lama.QuantumIterativeRefinementOptimizer import QuantumIterativeRefinementOptimizer lama_register["QuantumIterativeRefinementOptimizer"] = QuantumIterativeRefinementOptimizer - LLAMAQuantumIterativeRefinementOptimizer = NonObjectOptimizer( - method="LLAMAQuantumIterativeRefinementOptimizer" - ).set_name("LLAMAQuantumIterativeRefinementOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumIterativeRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumIterativeRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumIterativeRefinementOptimizer").set_name("LLAMAQuantumIterativeRefinementOptimizer", register=True) except Exception as e: print("QuantumIterativeRefinementOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLeapOptimizer import QuantumLeapOptimizer lama_register["QuantumLeapOptimizer"] = QuantumLeapOptimizer - LLAMAQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer").set_name( - "LLAMAQuantumLeapOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer").set_name("LLAMAQuantumLeapOptimizer", register=True) except Exception as e: print("QuantumLeapOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLeapOptimizerV2 import QuantumLeapOptimizerV2 lama_register["QuantumLeapOptimizerV2"] = QuantumLeapOptimizerV2 - LLAMAQuantumLeapOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2").set_name( - "LLAMAQuantumLeapOptimizerV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLeapOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2").set_name("LLAMAQuantumLeapOptimizerV2", register=True) except Exception as e: print("QuantumLeapOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDEHybridLocalSearch import ( - QuantumLevyAdaptiveDEHybridLocalSearch, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDEHybridLocalSearch import QuantumLevyAdaptiveDEHybridLocalSearch lama_register["QuantumLevyAdaptiveDEHybridLocalSearch"] = QuantumLevyAdaptiveDEHybridLocalSearch - LLAMAQuantumLevyAdaptiveDEHybridLocalSearch = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch" - ).set_name("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDEHybridLocalSearch = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch").set_name("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch", register=True) except Exception as e: print("QuantumLevyAdaptiveDEHybridLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV2 import ( - QuantumLevyAdaptiveDifferentialOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV2 import QuantumLevyAdaptiveDifferentialOptimizerV2 lama_register["QuantumLevyAdaptiveDifferentialOptimizerV2"] = QuantumLevyAdaptiveDifferentialOptimizerV2 - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2" - ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2", register=True) except Exception as e: print("QuantumLevyAdaptiveDifferentialOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV3 import ( - QuantumLevyAdaptiveDifferentialOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV3 import QuantumLevyAdaptiveDifferentialOptimizerV3 lama_register["QuantumLevyAdaptiveDifferentialOptimizerV3"] = QuantumLevyAdaptiveDifferentialOptimizerV3 - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3" - ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3", register=True) except Exception as e: print("QuantumLevyAdaptiveDifferentialOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV4 import ( - QuantumLevyAdaptiveDifferentialOptimizerV4, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV4 import QuantumLevyAdaptiveDifferentialOptimizerV4 lama_register["QuantumLevyAdaptiveDifferentialOptimizerV4"] = QuantumLevyAdaptiveDifferentialOptimizerV4 - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4" - ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4", register=True) except Exception as e: print("QuantumLevyAdaptiveDifferentialOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV5 import ( - QuantumLevyAdaptiveDifferentialOptimizerV5, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV5 import QuantumLevyAdaptiveDifferentialOptimizerV5 lama_register["QuantumLevyAdaptiveDifferentialOptimizerV5"] = QuantumLevyAdaptiveDifferentialOptimizerV5 - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5" - ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5", register=True) except Exception as e: print("QuantumLevyAdaptiveDifferentialOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV6 import ( - QuantumLevyAdaptiveDifferentialOptimizerV6, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV6 import QuantumLevyAdaptiveDifferentialOptimizerV6 lama_register["QuantumLevyAdaptiveDifferentialOptimizerV6"] = QuantumLevyAdaptiveDifferentialOptimizerV6 - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6" - ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6", register=True) except Exception as e: print("QuantumLevyAdaptiveDifferentialOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveMemeticOptimizerV3 import ( - QuantumLevyAdaptiveMemeticOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumLevyAdaptiveMemeticOptimizerV3 import QuantumLevyAdaptiveMemeticOptimizerV3 lama_register["QuantumLevyAdaptiveMemeticOptimizerV3"] = QuantumLevyAdaptiveMemeticOptimizerV3 - LLAMAQuantumLevyAdaptiveMemeticOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3" - ).set_name("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyAdaptiveMemeticOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3").set_name("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3", register=True) except Exception as e: print("QuantumLevyAdaptiveMemeticOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizer import ( - QuantumLevyDifferentialDynamicOptimizer, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizer import QuantumLevyDifferentialDynamicOptimizer lama_register["QuantumLevyDifferentialDynamicOptimizer"] = QuantumLevyDifferentialDynamicOptimizer - LLAMAQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialDynamicOptimizer" - ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizer").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizer", register=True) except Exception as e: print("QuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV2 import ( - QuantumLevyDifferentialDynamicOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV2 import QuantumLevyDifferentialDynamicOptimizerV2 lama_register["QuantumLevyDifferentialDynamicOptimizerV2"] = QuantumLevyDifferentialDynamicOptimizerV2 - LLAMAQuantumLevyDifferentialDynamicOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2" - ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialDynamicOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV2", register=True) except Exception as e: print("QuantumLevyDifferentialDynamicOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV3 import ( - QuantumLevyDifferentialDynamicOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV3 import QuantumLevyDifferentialDynamicOptimizerV3 lama_register["QuantumLevyDifferentialDynamicOptimizerV3"] = QuantumLevyDifferentialDynamicOptimizerV3 - LLAMAQuantumLevyDifferentialDynamicOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3" - ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialDynamicOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV3", register=True) except Exception as e: print("QuantumLevyDifferentialDynamicOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizer import ( - QuantumLevyDifferentialHybridOptimizer, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizer import QuantumLevyDifferentialHybridOptimizer lama_register["QuantumLevyDifferentialHybridOptimizer"] = QuantumLevyDifferentialHybridOptimizer - LLAMAQuantumLevyDifferentialHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialHybridOptimizer" - ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizer").set_name("LLAMAQuantumLevyDifferentialHybridOptimizer", register=True) except Exception as e: print("QuantumLevyDifferentialHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizerV2 import ( - QuantumLevyDifferentialHybridOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizerV2 import QuantumLevyDifferentialHybridOptimizerV2 lama_register["QuantumLevyDifferentialHybridOptimizerV2"] = QuantumLevyDifferentialHybridOptimizerV2 - LLAMAQuantumLevyDifferentialHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialHybridOptimizerV2" - ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizerV2").set_name("LLAMAQuantumLevyDifferentialHybridOptimizerV2", register=True) except Exception as e: print("QuantumLevyDifferentialHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridSearch import ( - QuantumLevyDifferentialHybridSearch, - ) + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridSearch import QuantumLevyDifferentialHybridSearch lama_register["QuantumLevyDifferentialHybridSearch"] = QuantumLevyDifferentialHybridSearch - LLAMAQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( - method="LLAMAQuantumLevyDifferentialHybridSearch" - ).set_name("LLAMAQuantumLevyDifferentialHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDifferentialHybridSearch = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridSearch").set_name("LLAMAQuantumLevyDifferentialHybridSearch", register=True) except Exception as e: print("QuantumLevyDifferentialHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmOptimizerV3 import ( - QuantumLevyDynamicDifferentialSwarmOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmOptimizerV3 import QuantumLevyDynamicDifferentialSwarmOptimizerV3 - lama_register["QuantumLevyDynamicDifferentialSwarmOptimizerV3"] = ( - QuantumLevyDynamicDifferentialSwarmOptimizerV3 - ) - LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3" - ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3", register=True) + lama_register["QuantumLevyDynamicDifferentialSwarmOptimizerV3"] = QuantumLevyDynamicDifferentialSwarmOptimizerV3 + res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3").set_name("LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3", register=True) except Exception as e: print("QuantumLevyDynamicDifferentialSwarmOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmV5 import ( - QuantumLevyDynamicDifferentialSwarmV5, - ) + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmV5 import QuantumLevyDynamicDifferentialSwarmV5 lama_register["QuantumLevyDynamicDifferentialSwarmV5"] = QuantumLevyDynamicDifferentialSwarmV5 - LLAMAQuantumLevyDynamicDifferentialSwarmV5 = NonObjectOptimizer( - method="LLAMAQuantumLevyDynamicDifferentialSwarmV5" - ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDynamicDifferentialSwarmV5 = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmV5").set_name("LLAMAQuantumLevyDynamicDifferentialSwarmV5", register=True) except Exception as e: print("QuantumLevyDynamicDifferentialSwarmV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLevyDynamicParticleSwarm import QuantumLevyDynamicParticleSwarm lama_register["QuantumLevyDynamicParticleSwarm"] = QuantumLevyDynamicParticleSwarm - LLAMAQuantumLevyDynamicParticleSwarm = NonObjectOptimizer( - method="LLAMAQuantumLevyDynamicParticleSwarm" - ).set_name("LLAMAQuantumLevyDynamicParticleSwarm", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicParticleSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDynamicParticleSwarm = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicParticleSwarm").set_name("LLAMAQuantumLevyDynamicParticleSwarm", register=True) except Exception as e: print("QuantumLevyDynamicParticleSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyDynamicSwarmOptimization import ( - QuantumLevyDynamicSwarmOptimization, - ) + from nevergrad.optimization.lama.QuantumLevyDynamicSwarmOptimization import QuantumLevyDynamicSwarmOptimization lama_register["QuantumLevyDynamicSwarmOptimization"] = QuantumLevyDynamicSwarmOptimization - LLAMAQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( - method="LLAMAQuantumLevyDynamicSwarmOptimization" - ).set_name("LLAMAQuantumLevyDynamicSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicSwarmOptimization").set_name("LLAMAQuantumLevyDynamicSwarmOptimization", register=True) except Exception as e: print("QuantumLevyDynamicSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyEliteMemeticDEHybridOptimizer import ( - QuantumLevyEliteMemeticDEHybridOptimizer, - ) + from nevergrad.optimization.lama.QuantumLevyEliteMemeticDEHybridOptimizer import QuantumLevyEliteMemeticDEHybridOptimizer lama_register["QuantumLevyEliteMemeticDEHybridOptimizer"] = QuantumLevyEliteMemeticDEHybridOptimizer - LLAMAQuantumLevyEliteMemeticDEHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer" - ).set_name("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEliteMemeticDEHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer").set_name("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer", register=True) except Exception as e: print("QuantumLevyEliteMemeticDEHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLevyEliteMemeticOptimizer import QuantumLevyEliteMemeticOptimizer lama_register["QuantumLevyEliteMemeticOptimizer"] = QuantumLevyEliteMemeticOptimizer - LLAMAQuantumLevyEliteMemeticOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyEliteMemeticOptimizer" - ).set_name("LLAMAQuantumLevyEliteMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticOptimizer").set_name("LLAMAQuantumLevyEliteMemeticOptimizer", register=True) except Exception as e: print("QuantumLevyEliteMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveDifferentialOptimizer import ( - QuantumLevyEnhancedAdaptiveDifferentialOptimizer, - ) + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveDifferentialOptimizer import QuantumLevyEnhancedAdaptiveDifferentialOptimizer - lama_register["QuantumLevyEnhancedAdaptiveDifferentialOptimizer"] = ( - QuantumLevyEnhancedAdaptiveDifferentialOptimizer - ) - LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer" - ).set_name("LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer", register=True) + lama_register["QuantumLevyEnhancedAdaptiveDifferentialOptimizer"] = QuantumLevyEnhancedAdaptiveDifferentialOptimizer + res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer").set_name("LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer", register=True) except Exception as e: print("QuantumLevyEnhancedAdaptiveDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveOptimizerV2 import ( - QuantumLevyEnhancedAdaptiveOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveOptimizerV2 import QuantumLevyEnhancedAdaptiveOptimizerV2 lama_register["QuantumLevyEnhancedAdaptiveOptimizerV2"] = QuantumLevyEnhancedAdaptiveOptimizerV2 - LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2" - ).set_name("LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2").set_name("LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2", register=True) except Exception as e: print("QuantumLevyEnhancedAdaptiveOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyEnhancedDifferentialOptimizer import ( - QuantumLevyEnhancedDifferentialOptimizer, - ) + from nevergrad.optimization.lama.QuantumLevyEnhancedDifferentialOptimizer import QuantumLevyEnhancedDifferentialOptimizer lama_register["QuantumLevyEnhancedDifferentialOptimizer"] = QuantumLevyEnhancedDifferentialOptimizer - LLAMAQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( - method="LLAMAQuantumLevyEnhancedDifferentialOptimizer" - ).set_name("LLAMAQuantumLevyEnhancedDifferentialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedDifferentialOptimizer").set_name("LLAMAQuantumLevyEnhancedDifferentialOptimizer", register=True) except Exception as e: print("QuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyEnhancedMemeticOptimizerV2 import ( - QuantumLevyEnhancedMemeticOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumLevyEnhancedMemeticOptimizerV2 import QuantumLevyEnhancedMemeticOptimizerV2 lama_register["QuantumLevyEnhancedMemeticOptimizerV2"] = QuantumLevyEnhancedMemeticOptimizerV2 - LLAMAQuantumLevyEnhancedMemeticOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2" - ).set_name("LLAMAQuantumLevyEnhancedMemeticOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyEnhancedMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2").set_name("LLAMAQuantumLevyEnhancedMemeticOptimizerV2", register=True) except Exception as e: print("QuantumLevyEnhancedMemeticOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyImprovedDifferentialSwarmOptimization import ( - QuantumLevyImprovedDifferentialSwarmOptimization, - ) + from nevergrad.optimization.lama.QuantumLevyImprovedDifferentialSwarmOptimization import QuantumLevyImprovedDifferentialSwarmOptimization - lama_register["QuantumLevyImprovedDifferentialSwarmOptimization"] = ( - QuantumLevyImprovedDifferentialSwarmOptimization - ) - LLAMAQuantumLevyImprovedDifferentialSwarmOptimization = NonObjectOptimizer( - method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization" - ).set_name("LLAMAQuantumLevyImprovedDifferentialSwarmOptimization", register=True) + lama_register["QuantumLevyImprovedDifferentialSwarmOptimization"] = QuantumLevyImprovedDifferentialSwarmOptimization + res = NonObjectOptimizer(method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyImprovedDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization").set_name("LLAMAQuantumLevyImprovedDifferentialSwarmOptimization", register=True) except Exception as e: print("QuantumLevyImprovedDifferentialSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumLevyParticleAdaptiveOptimization import ( - QuantumLevyParticleAdaptiveOptimization, - ) + from nevergrad.optimization.lama.QuantumLevyParticleAdaptiveOptimization import QuantumLevyParticleAdaptiveOptimization lama_register["QuantumLevyParticleAdaptiveOptimization"] = QuantumLevyParticleAdaptiveOptimization - LLAMAQuantumLevyParticleAdaptiveOptimization = NonObjectOptimizer( - method="LLAMAQuantumLevyParticleAdaptiveOptimization" - ).set_name("LLAMAQuantumLevyParticleAdaptiveOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevyParticleAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevyParticleAdaptiveOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyParticleAdaptiveOptimization").set_name("LLAMAQuantumLevyParticleAdaptiveOptimization", register=True) except Exception as e: print("QuantumLevyParticleAdaptiveOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLevySwarmOptimizationV3 import QuantumLevySwarmOptimizationV3 lama_register["QuantumLevySwarmOptimizationV3"] = QuantumLevySwarmOptimizationV3 - LLAMAQuantumLevySwarmOptimizationV3 = NonObjectOptimizer( - method="LLAMAQuantumLevySwarmOptimizationV3" - ).set_name("LLAMAQuantumLevySwarmOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumLevySwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLevySwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumLevySwarmOptimizationV3").set_name("LLAMAQuantumLevySwarmOptimizationV3", register=True) except Exception as e: print("QuantumLevySwarmOptimizationV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLocustSearch import QuantumLocustSearch lama_register["QuantumLocustSearch"] = QuantumLocustSearch - LLAMAQuantumLocustSearch = NonObjectOptimizer(method="LLAMAQuantumLocustSearch").set_name( - "LLAMAQuantumLocustSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumLocustSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLocustSearch = NonObjectOptimizer(method="LLAMAQuantumLocustSearch").set_name("LLAMAQuantumLocustSearch", register=True) except Exception as e: print("QuantumLocustSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumLocustSearchV2 import QuantumLocustSearchV2 lama_register["QuantumLocustSearchV2"] = QuantumLocustSearchV2 - LLAMAQuantumLocustSearchV2 = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2").set_name( - "LLAMAQuantumLocustSearchV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumLocustSearchV2 = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2").set_name("LLAMAQuantumLocustSearchV2", register=True) except Exception as e: print("QuantumLocustSearchV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalAdaptiveCrossoverOptimizerV20 import ( - QuantumOrbitalAdaptiveCrossoverOptimizerV20, - ) + from nevergrad.optimization.lama.QuantumOrbitalAdaptiveCrossoverOptimizerV20 import QuantumOrbitalAdaptiveCrossoverOptimizerV20 lama_register["QuantumOrbitalAdaptiveCrossoverOptimizerV20"] = QuantumOrbitalAdaptiveCrossoverOptimizerV20 - LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20" - ).set_name("LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20 = NonObjectOptimizer(method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20").set_name("LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20", register=True) except Exception as e: print("QuantumOrbitalAdaptiveCrossoverOptimizerV20 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV12 import QuantumOrbitalDynamicEnhancerV12 lama_register["QuantumOrbitalDynamicEnhancerV12"] = QuantumOrbitalDynamicEnhancerV12 - LLAMAQuantumOrbitalDynamicEnhancerV12 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV12" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV12", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV12 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV12").set_name("LLAMAQuantumOrbitalDynamicEnhancerV12", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV13 import QuantumOrbitalDynamicEnhancerV13 lama_register["QuantumOrbitalDynamicEnhancerV13"] = QuantumOrbitalDynamicEnhancerV13 - LLAMAQuantumOrbitalDynamicEnhancerV13 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV13" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV13", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV13 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV13").set_name("LLAMAQuantumOrbitalDynamicEnhancerV13", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV14 import QuantumOrbitalDynamicEnhancerV14 lama_register["QuantumOrbitalDynamicEnhancerV14"] = QuantumOrbitalDynamicEnhancerV14 - LLAMAQuantumOrbitalDynamicEnhancerV14 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV14" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV14", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV14 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV14").set_name("LLAMAQuantumOrbitalDynamicEnhancerV14", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV14 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV15 import QuantumOrbitalDynamicEnhancerV15 lama_register["QuantumOrbitalDynamicEnhancerV15"] = QuantumOrbitalDynamicEnhancerV15 - LLAMAQuantumOrbitalDynamicEnhancerV15 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV15" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV15", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV15 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV15").set_name("LLAMAQuantumOrbitalDynamicEnhancerV15", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV16 import QuantumOrbitalDynamicEnhancerV16 lama_register["QuantumOrbitalDynamicEnhancerV16"] = QuantumOrbitalDynamicEnhancerV16 - LLAMAQuantumOrbitalDynamicEnhancerV16 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV16" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV16", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV16 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV16").set_name("LLAMAQuantumOrbitalDynamicEnhancerV16", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV17 import QuantumOrbitalDynamicEnhancerV17 lama_register["QuantumOrbitalDynamicEnhancerV17"] = QuantumOrbitalDynamicEnhancerV17 - LLAMAQuantumOrbitalDynamicEnhancerV17 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV17" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV17", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV17 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV17").set_name("LLAMAQuantumOrbitalDynamicEnhancerV17", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV17 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV18 import QuantumOrbitalDynamicEnhancerV18 lama_register["QuantumOrbitalDynamicEnhancerV18"] = QuantumOrbitalDynamicEnhancerV18 - LLAMAQuantumOrbitalDynamicEnhancerV18 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV18" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV18", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV18 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV18").set_name("LLAMAQuantumOrbitalDynamicEnhancerV18", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV24 import QuantumOrbitalDynamicEnhancerV24 lama_register["QuantumOrbitalDynamicEnhancerV24"] = QuantumOrbitalDynamicEnhancerV24 - LLAMAQuantumOrbitalDynamicEnhancerV24 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV24" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV24", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV24 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV24").set_name("LLAMAQuantumOrbitalDynamicEnhancerV24", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV24 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV25 import QuantumOrbitalDynamicEnhancerV25 lama_register["QuantumOrbitalDynamicEnhancerV25"] = QuantumOrbitalDynamicEnhancerV25 - LLAMAQuantumOrbitalDynamicEnhancerV25 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV25" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV25", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV25 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV25").set_name("LLAMAQuantumOrbitalDynamicEnhancerV25", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV25 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV26 import QuantumOrbitalDynamicEnhancerV26 lama_register["QuantumOrbitalDynamicEnhancerV26"] = QuantumOrbitalDynamicEnhancerV26 - LLAMAQuantumOrbitalDynamicEnhancerV26 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV26" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV26", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV26 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV26").set_name("LLAMAQuantumOrbitalDynamicEnhancerV26", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV26 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV27 import QuantumOrbitalDynamicEnhancerV27 lama_register["QuantumOrbitalDynamicEnhancerV27"] = QuantumOrbitalDynamicEnhancerV27 - LLAMAQuantumOrbitalDynamicEnhancerV27 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV27" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV27", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV27 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV27").set_name("LLAMAQuantumOrbitalDynamicEnhancerV27", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV27 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV28 import QuantumOrbitalDynamicEnhancerV28 lama_register["QuantumOrbitalDynamicEnhancerV28"] = QuantumOrbitalDynamicEnhancerV28 - LLAMAQuantumOrbitalDynamicEnhancerV28 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV28" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV28", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV28 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV28").set_name("LLAMAQuantumOrbitalDynamicEnhancerV28", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV28 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV29 import QuantumOrbitalDynamicEnhancerV29 lama_register["QuantumOrbitalDynamicEnhancerV29"] = QuantumOrbitalDynamicEnhancerV29 - LLAMAQuantumOrbitalDynamicEnhancerV29 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV29" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV29", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV29 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV29").set_name("LLAMAQuantumOrbitalDynamicEnhancerV29", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV29 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV30 import QuantumOrbitalDynamicEnhancerV30 lama_register["QuantumOrbitalDynamicEnhancerV30"] = QuantumOrbitalDynamicEnhancerV30 - LLAMAQuantumOrbitalDynamicEnhancerV30 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV30" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV30", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV30 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV30").set_name("LLAMAQuantumOrbitalDynamicEnhancerV30", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV30 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV31 import QuantumOrbitalDynamicEnhancerV31 lama_register["QuantumOrbitalDynamicEnhancerV31"] = QuantumOrbitalDynamicEnhancerV31 - LLAMAQuantumOrbitalDynamicEnhancerV31 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV31" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV31", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV31 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV31").set_name("LLAMAQuantumOrbitalDynamicEnhancerV31", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV31 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV32 import QuantumOrbitalDynamicEnhancerV32 lama_register["QuantumOrbitalDynamicEnhancerV32"] = QuantumOrbitalDynamicEnhancerV32 - LLAMAQuantumOrbitalDynamicEnhancerV32 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV32" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV32", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV32 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV32").set_name("LLAMAQuantumOrbitalDynamicEnhancerV32", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV32 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV33 import QuantumOrbitalDynamicEnhancerV33 lama_register["QuantumOrbitalDynamicEnhancerV33"] = QuantumOrbitalDynamicEnhancerV33 - LLAMAQuantumOrbitalDynamicEnhancerV33 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV33" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV33", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV33 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV33").set_name("LLAMAQuantumOrbitalDynamicEnhancerV33", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV33 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV34 import QuantumOrbitalDynamicEnhancerV34 lama_register["QuantumOrbitalDynamicEnhancerV34"] = QuantumOrbitalDynamicEnhancerV34 - LLAMAQuantumOrbitalDynamicEnhancerV34 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicEnhancerV34" - ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV34", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicEnhancerV34 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV34").set_name("LLAMAQuantumOrbitalDynamicEnhancerV34", register=True) except Exception as e: print("QuantumOrbitalDynamicEnhancerV34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalDynamicOptimizerV11 import ( - QuantumOrbitalDynamicOptimizerV11, - ) + from nevergrad.optimization.lama.QuantumOrbitalDynamicOptimizerV11 import QuantumOrbitalDynamicOptimizerV11 lama_register["QuantumOrbitalDynamicOptimizerV11"] = QuantumOrbitalDynamicOptimizerV11 - LLAMAQuantumOrbitalDynamicOptimizerV11 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalDynamicOptimizerV11" - ).set_name("LLAMAQuantumOrbitalDynamicOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalDynamicOptimizerV11 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicOptimizerV11").set_name("LLAMAQuantumOrbitalDynamicOptimizerV11", register=True) except Exception as e: print("QuantumOrbitalDynamicOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalEnhancedCrossoverOptimizerV22 import ( - QuantumOrbitalEnhancedCrossoverOptimizerV22, - ) + from nevergrad.optimization.lama.QuantumOrbitalEnhancedCrossoverOptimizerV22 import QuantumOrbitalEnhancedCrossoverOptimizerV22 lama_register["QuantumOrbitalEnhancedCrossoverOptimizerV22"] = QuantumOrbitalEnhancedCrossoverOptimizerV22 - LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22" - ).set_name("LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22 = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22").set_name("LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22", register=True) except Exception as e: print("QuantumOrbitalEnhancedCrossoverOptimizerV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalEnhancedDynamicEnhancerV19 import ( - QuantumOrbitalEnhancedDynamicEnhancerV19, - ) + from nevergrad.optimization.lama.QuantumOrbitalEnhancedDynamicEnhancerV19 import QuantumOrbitalEnhancedDynamicEnhancerV19 lama_register["QuantumOrbitalEnhancedDynamicEnhancerV19"] = QuantumOrbitalEnhancedDynamicEnhancerV19 - LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19" - ).set_name("LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19 = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19").set_name("LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19", register=True) except Exception as e: print("QuantumOrbitalEnhancedDynamicEnhancerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalHarmonicOptimizerV10 import ( - QuantumOrbitalHarmonicOptimizerV10, - ) + from nevergrad.optimization.lama.QuantumOrbitalHarmonicOptimizerV10 import QuantumOrbitalHarmonicOptimizerV10 lama_register["QuantumOrbitalHarmonicOptimizerV10"] = QuantumOrbitalHarmonicOptimizerV10 - LLAMAQuantumOrbitalHarmonicOptimizerV10 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalHarmonicOptimizerV10" - ).set_name("LLAMAQuantumOrbitalHarmonicOptimizerV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalHarmonicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalHarmonicOptimizerV10 = NonObjectOptimizer(method="LLAMAQuantumOrbitalHarmonicOptimizerV10").set_name("LLAMAQuantumOrbitalHarmonicOptimizerV10", register=True) except Exception as e: print("QuantumOrbitalHarmonicOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalPrecisionOptimizerV34 import ( - QuantumOrbitalPrecisionOptimizerV34, - ) + from nevergrad.optimization.lama.QuantumOrbitalPrecisionOptimizerV34 import QuantumOrbitalPrecisionOptimizerV34 lama_register["QuantumOrbitalPrecisionOptimizerV34"] = QuantumOrbitalPrecisionOptimizerV34 - LLAMAQuantumOrbitalPrecisionOptimizerV34 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalPrecisionOptimizerV34" - ).set_name("LLAMAQuantumOrbitalPrecisionOptimizerV34", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalPrecisionOptimizerV34 = NonObjectOptimizer(method="LLAMAQuantumOrbitalPrecisionOptimizerV34").set_name("LLAMAQuantumOrbitalPrecisionOptimizerV34", register=True) except Exception as e: print("QuantumOrbitalPrecisionOptimizerV34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV21 import ( - QuantumOrbitalRefinedCrossoverOptimizerV21, - ) + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV21 import QuantumOrbitalRefinedCrossoverOptimizerV21 lama_register["QuantumOrbitalRefinedCrossoverOptimizerV21"] = QuantumOrbitalRefinedCrossoverOptimizerV21 - LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21" - ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21 = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21").set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21", register=True) except Exception as e: print("QuantumOrbitalRefinedCrossoverOptimizerV21 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV23 import ( - QuantumOrbitalRefinedCrossoverOptimizerV23, - ) + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV23 import QuantumOrbitalRefinedCrossoverOptimizerV23 lama_register["QuantumOrbitalRefinedCrossoverOptimizerV23"] = QuantumOrbitalRefinedCrossoverOptimizerV23 - LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23 = NonObjectOptimizer( - method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23" - ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23 = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23").set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23", register=True) except Exception as e: print("QuantumOrbitalRefinedCrossoverOptimizerV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumParticleSwarmDifferentialEvolution import ( - QuantumParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.QuantumParticleSwarmDifferentialEvolution import QuantumParticleSwarmDifferentialEvolution lama_register["QuantumParticleSwarmDifferentialEvolution"] = QuantumParticleSwarmDifferentialEvolution - LLAMAQuantumParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMAQuantumParticleSwarmDifferentialEvolution" - ).set_name("LLAMAQuantumParticleSwarmDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmDifferentialEvolution").set_name("LLAMAQuantumParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("QuantumParticleSwarmDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumParticleSwarmOptimization import QuantumParticleSwarmOptimization lama_register["QuantumParticleSwarmOptimization"] = QuantumParticleSwarmOptimization - LLAMAQuantumParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMAQuantumParticleSwarmOptimization" - ).set_name("LLAMAQuantumParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmOptimization").set_name("LLAMAQuantumParticleSwarmOptimization", register=True) except Exception as e: print("QuantumParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumReactiveCooperativeStrategy import ( - QuantumReactiveCooperativeStrategy, - ) + from nevergrad.optimization.lama.QuantumReactiveCooperativeStrategy import QuantumReactiveCooperativeStrategy lama_register["QuantumReactiveCooperativeStrategy"] = QuantumReactiveCooperativeStrategy - LLAMAQuantumReactiveCooperativeStrategy = NonObjectOptimizer( - method="LLAMAQuantumReactiveCooperativeStrategy" - ).set_name("LLAMAQuantumReactiveCooperativeStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumReactiveCooperativeStrategy = NonObjectOptimizer(method="LLAMAQuantumReactiveCooperativeStrategy").set_name("LLAMAQuantumReactiveCooperativeStrategy", register=True) except Exception as e: print("QuantumReactiveCooperativeStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveExplorationOptimization import ( - QuantumRefinedAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.QuantumRefinedAdaptiveExplorationOptimization import QuantumRefinedAdaptiveExplorationOptimization - lama_register["QuantumRefinedAdaptiveExplorationOptimization"] = ( - QuantumRefinedAdaptiveExplorationOptimization - ) - LLAMAQuantumRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMAQuantumRefinedAdaptiveExplorationOptimization" - ).set_name("LLAMAQuantumRefinedAdaptiveExplorationOptimization", register=True) + lama_register["QuantumRefinedAdaptiveExplorationOptimization"] = QuantumRefinedAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumRefinedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveExplorationOptimization").set_name("LLAMAQuantumRefinedAdaptiveExplorationOptimization", register=True) except Exception as e: print("QuantumRefinedAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveHybridStrategyV5 import ( - QuantumRefinedAdaptiveHybridStrategyV5, - ) + from nevergrad.optimization.lama.QuantumRefinedAdaptiveHybridStrategyV5 import QuantumRefinedAdaptiveHybridStrategyV5 lama_register["QuantumRefinedAdaptiveHybridStrategyV5"] = QuantumRefinedAdaptiveHybridStrategyV5 - LLAMAQuantumRefinedAdaptiveHybridStrategyV5 = NonObjectOptimizer( - method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5" - ).set_name("LLAMAQuantumRefinedAdaptiveHybridStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumRefinedAdaptiveHybridStrategyV5 = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5").set_name("LLAMAQuantumRefinedAdaptiveHybridStrategyV5", register=True) except Exception as e: print("QuantumRefinedAdaptiveHybridStrategyV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveStrategicOptimizer import ( - QuantumRefinedAdaptiveStrategicOptimizer, - ) + from nevergrad.optimization.lama.QuantumRefinedAdaptiveStrategicOptimizer import QuantumRefinedAdaptiveStrategicOptimizer lama_register["QuantumRefinedAdaptiveStrategicOptimizer"] = QuantumRefinedAdaptiveStrategicOptimizer - LLAMAQuantumRefinedAdaptiveStrategicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer" - ).set_name("LLAMAQuantumRefinedAdaptiveStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumRefinedAdaptiveStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer").set_name("LLAMAQuantumRefinedAdaptiveStrategicOptimizer", register=True) except Exception as e: print("QuantumRefinedAdaptiveStrategicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumRefinedDynamicAdaptiveHybridDEPSO import ( - QuantumRefinedDynamicAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.QuantumRefinedDynamicAdaptiveHybridDEPSO import QuantumRefinedDynamicAdaptiveHybridDEPSO lama_register["QuantumRefinedDynamicAdaptiveHybridDEPSO"] = QuantumRefinedDynamicAdaptiveHybridDEPSO - LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO" - ).set_name("LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO").set_name("LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO", register=True) except Exception as e: print("QuantumRefinedDynamicAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumReinforcedNesterovAccelerator import ( - QuantumReinforcedNesterovAccelerator, - ) + from nevergrad.optimization.lama.QuantumReinforcedNesterovAccelerator import QuantumReinforcedNesterovAccelerator lama_register["QuantumReinforcedNesterovAccelerator"] = QuantumReinforcedNesterovAccelerator - LLAMAQuantumReinforcedNesterovAccelerator = NonObjectOptimizer( - method="LLAMAQuantumReinforcedNesterovAccelerator" - ).set_name("LLAMAQuantumReinforcedNesterovAccelerator", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumReinforcedNesterovAccelerator")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumReinforcedNesterovAccelerator = NonObjectOptimizer(method="LLAMAQuantumReinforcedNesterovAccelerator").set_name("LLAMAQuantumReinforcedNesterovAccelerator", register=True) except Exception as e: print("QuantumReinforcedNesterovAccelerator can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumResonanceEvolutionaryStrategy import ( - QuantumResonanceEvolutionaryStrategy, - ) + from nevergrad.optimization.lama.QuantumResonanceEvolutionaryStrategy import QuantumResonanceEvolutionaryStrategy lama_register["QuantumResonanceEvolutionaryStrategy"] = QuantumResonanceEvolutionaryStrategy - LLAMAQuantumResonanceEvolutionaryStrategy = NonObjectOptimizer( - method="LLAMAQuantumResonanceEvolutionaryStrategy" - ).set_name("LLAMAQuantumResonanceEvolutionaryStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumResonanceEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumResonanceEvolutionaryStrategy = NonObjectOptimizer(method="LLAMAQuantumResonanceEvolutionaryStrategy").set_name("LLAMAQuantumResonanceEvolutionaryStrategy", register=True) except Exception as e: print("QuantumResonanceEvolutionaryStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumSearch import QuantumSearch lama_register["QuantumSearch"] = QuantumSearch - LLAMAQuantumSearch = NonObjectOptimizer(method="LLAMAQuantumSearch").set_name( - "LLAMAQuantumSearch", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSearch = NonObjectOptimizer(method="LLAMAQuantumSearch").set_name("LLAMAQuantumSearch", register=True) except Exception as e: print("QuantumSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumSimulatedAnnealing import QuantumSimulatedAnnealing lama_register["QuantumSimulatedAnnealing"] = QuantumSimulatedAnnealing - LLAMAQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing").set_name( - "LLAMAQuantumSimulatedAnnealing", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing").set_name("LLAMAQuantumSimulatedAnnealing", register=True) except Exception as e: print("QuantumSimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSimulatedAnnealingHybridOptimizer import ( - QuantumSimulatedAnnealingHybridOptimizer, - ) + from nevergrad.optimization.lama.QuantumSimulatedAnnealingHybridOptimizer import QuantumSimulatedAnnealingHybridOptimizer lama_register["QuantumSimulatedAnnealingHybridOptimizer"] = QuantumSimulatedAnnealingHybridOptimizer - LLAMAQuantumSimulatedAnnealingHybridOptimizer = NonObjectOptimizer( - method="LLAMAQuantumSimulatedAnnealingHybridOptimizer" - ).set_name("LLAMAQuantumSimulatedAnnealingHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSimulatedAnnealingHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingHybridOptimizer").set_name("LLAMAQuantumSimulatedAnnealingHybridOptimizer", register=True) except Exception as e: print("QuantumSimulatedAnnealingHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSimulatedAnnealingImproved import ( - QuantumSimulatedAnnealingImproved, - ) + from nevergrad.optimization.lama.QuantumSimulatedAnnealingImproved import QuantumSimulatedAnnealingImproved lama_register["QuantumSimulatedAnnealingImproved"] = QuantumSimulatedAnnealingImproved - LLAMAQuantumSimulatedAnnealingImproved = NonObjectOptimizer( - method="LLAMAQuantumSimulatedAnnealingImproved" - ).set_name("LLAMAQuantumSimulatedAnnealingImproved", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSimulatedAnnealingImproved = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingImproved").set_name("LLAMAQuantumSimulatedAnnealingImproved", register=True) except Exception as e: print("QuantumSimulatedAnnealingImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveHybridStrategy import ( - QuantumSpectralAdaptiveHybridStrategy, - ) + from nevergrad.optimization.lama.QuantumSpectralAdaptiveHybridStrategy import QuantumSpectralAdaptiveHybridStrategy lama_register["QuantumSpectralAdaptiveHybridStrategy"] = QuantumSpectralAdaptiveHybridStrategy - LLAMAQuantumSpectralAdaptiveHybridStrategy = NonObjectOptimizer( - method="LLAMAQuantumSpectralAdaptiveHybridStrategy" - ).set_name("LLAMAQuantumSpectralAdaptiveHybridStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralAdaptiveHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveHybridStrategy").set_name("LLAMAQuantumSpectralAdaptiveHybridStrategy", register=True) except Exception as e: print("QuantumSpectralAdaptiveHybridStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV2 import ( - QuantumSpectralAdaptiveOptimizerV2, - ) + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV2 import QuantumSpectralAdaptiveOptimizerV2 lama_register["QuantumSpectralAdaptiveOptimizerV2"] = QuantumSpectralAdaptiveOptimizerV2 - LLAMAQuantumSpectralAdaptiveOptimizerV2 = NonObjectOptimizer( - method="LLAMAQuantumSpectralAdaptiveOptimizerV2" - ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV2").set_name("LLAMAQuantumSpectralAdaptiveOptimizerV2", register=True) except Exception as e: print("QuantumSpectralAdaptiveOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV3 import ( - QuantumSpectralAdaptiveOptimizerV3, - ) + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV3 import QuantumSpectralAdaptiveOptimizerV3 lama_register["QuantumSpectralAdaptiveOptimizerV3"] = QuantumSpectralAdaptiveOptimizerV3 - LLAMAQuantumSpectralAdaptiveOptimizerV3 = NonObjectOptimizer( - method="LLAMAQuantumSpectralAdaptiveOptimizerV3" - ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralAdaptiveOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV3").set_name("LLAMAQuantumSpectralAdaptiveOptimizerV3", register=True) except Exception as e: print("QuantumSpectralAdaptiveOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumSpectralDynamicOptimizer import QuantumSpectralDynamicOptimizer lama_register["QuantumSpectralDynamicOptimizer"] = QuantumSpectralDynamicOptimizer - LLAMAQuantumSpectralDynamicOptimizer = NonObjectOptimizer( - method="LLAMAQuantumSpectralDynamicOptimizer" - ).set_name("LLAMAQuantumSpectralDynamicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumSpectralDynamicOptimizer").set_name("LLAMAQuantumSpectralDynamicOptimizer", register=True) except Exception as e: print("QuantumSpectralDynamicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSpectralEnhancedOptimizerV5 import ( - QuantumSpectralEnhancedOptimizerV5, - ) + from nevergrad.optimization.lama.QuantumSpectralEnhancedOptimizerV5 import QuantumSpectralEnhancedOptimizerV5 lama_register["QuantumSpectralEnhancedOptimizerV5"] = QuantumSpectralEnhancedOptimizerV5 - LLAMAQuantumSpectralEnhancedOptimizerV5 = NonObjectOptimizer( - method="LLAMAQuantumSpectralEnhancedOptimizerV5" - ).set_name("LLAMAQuantumSpectralEnhancedOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralEnhancedOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralEnhancedOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumSpectralEnhancedOptimizerV5").set_name("LLAMAQuantumSpectralEnhancedOptimizerV5", register=True) except Exception as e: print("QuantumSpectralEnhancedOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSpectralRefinedOptimizerV4 import ( - QuantumSpectralRefinedOptimizerV4, - ) + from nevergrad.optimization.lama.QuantumSpectralRefinedOptimizerV4 import QuantumSpectralRefinedOptimizerV4 lama_register["QuantumSpectralRefinedOptimizerV4"] = QuantumSpectralRefinedOptimizerV4 - LLAMAQuantumSpectralRefinedOptimizerV4 = NonObjectOptimizer( - method="LLAMAQuantumSpectralRefinedOptimizerV4" - ).set_name("LLAMAQuantumSpectralRefinedOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSpectralRefinedOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSpectralRefinedOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumSpectralRefinedOptimizerV4").set_name("LLAMAQuantumSpectralRefinedOptimizerV4", register=True) except Exception as e: print("QuantumSpectralRefinedOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumStabilizedDynamicBalanceOptimizer import ( - QuantumStabilizedDynamicBalanceOptimizer, - ) + from nevergrad.optimization.lama.QuantumStabilizedDynamicBalanceOptimizer import QuantumStabilizedDynamicBalanceOptimizer lama_register["QuantumStabilizedDynamicBalanceOptimizer"] = QuantumStabilizedDynamicBalanceOptimizer - LLAMAQuantumStabilizedDynamicBalanceOptimizer = NonObjectOptimizer( - method="LLAMAQuantumStabilizedDynamicBalanceOptimizer" - ).set_name("LLAMAQuantumStabilizedDynamicBalanceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStabilizedDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStabilizedDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAQuantumStabilizedDynamicBalanceOptimizer").set_name("LLAMAQuantumStabilizedDynamicBalanceOptimizer", register=True) except Exception as e: print("QuantumStabilizedDynamicBalanceOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumStateConvergenceOptimizer import QuantumStateConvergenceOptimizer lama_register["QuantumStateConvergenceOptimizer"] = QuantumStateConvergenceOptimizer - LLAMAQuantumStateConvergenceOptimizer = NonObjectOptimizer( - method="LLAMAQuantumStateConvergenceOptimizer" - ).set_name("LLAMAQuantumStateConvergenceOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStateConvergenceOptimizer = NonObjectOptimizer(method="LLAMAQuantumStateConvergenceOptimizer").set_name("LLAMAQuantumStateConvergenceOptimizer", register=True) except Exception as e: print("QuantumStateConvergenceOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumStateCrossoverOptimization import ( - QuantumStateCrossoverOptimization, - ) + from nevergrad.optimization.lama.QuantumStateCrossoverOptimization import QuantumStateCrossoverOptimization lama_register["QuantumStateCrossoverOptimization"] = QuantumStateCrossoverOptimization - LLAMAQuantumStateCrossoverOptimization = NonObjectOptimizer( - method="LLAMAQuantumStateCrossoverOptimization" - ).set_name("LLAMAQuantumStateCrossoverOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAQuantumStateCrossoverOptimization").set_name("LLAMAQuantumStateCrossoverOptimization", register=True) except Exception as e: print("QuantumStateCrossoverOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumStateHybridStrategy import QuantumStateHybridStrategy lama_register["QuantumStateHybridStrategy"] = QuantumStateHybridStrategy - LLAMAQuantumStateHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy").set_name( - "LLAMAQuantumStateHybridStrategy", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStateHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy").set_name("LLAMAQuantumStateHybridStrategy", register=True) except Exception as e: print("QuantumStateHybridStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumStateRefinedHybridStrategy import ( - QuantumStateRefinedHybridStrategy, - ) + from nevergrad.optimization.lama.QuantumStateRefinedHybridStrategy import QuantumStateRefinedHybridStrategy lama_register["QuantumStateRefinedHybridStrategy"] = QuantumStateRefinedHybridStrategy - LLAMAQuantumStateRefinedHybridStrategy = NonObjectOptimizer( - method="LLAMAQuantumStateRefinedHybridStrategy" - ).set_name("LLAMAQuantumStateRefinedHybridStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStateRefinedHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStateRefinedHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateRefinedHybridStrategy").set_name("LLAMAQuantumStateRefinedHybridStrategy", register=True) except Exception as e: print("QuantumStateRefinedHybridStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumStochasticGradientDescentFireworks import ( - QuantumStochasticGradientDescentFireworks, - ) + from nevergrad.optimization.lama.QuantumStochasticGradientDescentFireworks import QuantumStochasticGradientDescentFireworks lama_register["QuantumStochasticGradientDescentFireworks"] = QuantumStochasticGradientDescentFireworks - LLAMAQuantumStochasticGradientDescentFireworks = NonObjectOptimizer( - method="LLAMAQuantumStochasticGradientDescentFireworks" - ).set_name("LLAMAQuantumStochasticGradientDescentFireworks", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientDescentFireworks")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStochasticGradientDescentFireworks = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientDescentFireworks").set_name("LLAMAQuantumStochasticGradientDescentFireworks", register=True) except Exception as e: print("QuantumStochasticGradientDescentFireworks can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumStochasticGradientOptimizer import ( - QuantumStochasticGradientOptimizer, - ) + from nevergrad.optimization.lama.QuantumStochasticGradientOptimizer import QuantumStochasticGradientOptimizer lama_register["QuantumStochasticGradientOptimizer"] = QuantumStochasticGradientOptimizer - LLAMAQuantumStochasticGradientOptimizer = NonObjectOptimizer( - method="LLAMAQuantumStochasticGradientOptimizer" - ).set_name("LLAMAQuantumStochasticGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumStochasticGradientOptimizer = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientOptimizer").set_name("LLAMAQuantumStochasticGradientOptimizer", register=True) except Exception as e: print("QuantumStochasticGradientOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumSwarmOptimization import QuantumSwarmOptimization lama_register["QuantumSwarmOptimization"] = QuantumSwarmOptimization - LLAMAQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization").set_name( - "LLAMAQuantumSwarmOptimization", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization").set_name("LLAMAQuantumSwarmOptimization", register=True) except Exception as e: print("QuantumSwarmOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumSwarmOptimizationImproved import QuantumSwarmOptimizationImproved lama_register["QuantumSwarmOptimizationImproved"] = QuantumSwarmOptimizationImproved - LLAMAQuantumSwarmOptimizationImproved = NonObjectOptimizer( - method="LLAMAQuantumSwarmOptimizationImproved" - ).set_name("LLAMAQuantumSwarmOptimizationImproved", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimizationImproved").set_name("LLAMAQuantumSwarmOptimizationImproved", register=True) except Exception as e: print("QuantumSwarmOptimizationImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.QuantumSymbioticEnhancedStrategyV3 import ( - QuantumSymbioticEnhancedStrategyV3, - ) + from nevergrad.optimization.lama.QuantumSymbioticEnhancedStrategyV3 import QuantumSymbioticEnhancedStrategyV3 lama_register["QuantumSymbioticEnhancedStrategyV3"] = QuantumSymbioticEnhancedStrategyV3 - LLAMAQuantumSymbioticEnhancedStrategyV3 = NonObjectOptimizer( - method="LLAMAQuantumSymbioticEnhancedStrategyV3" - ).set_name("LLAMAQuantumSymbioticEnhancedStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumSymbioticEnhancedStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumSymbioticEnhancedStrategyV3 = NonObjectOptimizer(method="LLAMAQuantumSymbioticEnhancedStrategyV3").set_name("LLAMAQuantumSymbioticEnhancedStrategyV3", register=True) except Exception as e: print("QuantumSymbioticEnhancedStrategyV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunedGradientSearchV2 import QuantumTunedGradientSearchV2 lama_register["QuantumTunedGradientSearchV2"] = QuantumTunedGradientSearchV2 - LLAMAQuantumTunedGradientSearchV2 = NonObjectOptimizer( - method="LLAMAQuantumTunedGradientSearchV2" - ).set_name("LLAMAQuantumTunedGradientSearchV2", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunedGradientSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunedGradientSearchV2 = NonObjectOptimizer(method="LLAMAQuantumTunedGradientSearchV2").set_name("LLAMAQuantumTunedGradientSearchV2", register=True) except Exception as e: print("QuantumTunedGradientSearchV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizer import QuantumTunnelingOptimizer lama_register["QuantumTunnelingOptimizer"] = QuantumTunnelingOptimizer - LLAMAQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer").set_name( - "LLAMAQuantumTunnelingOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer").set_name("LLAMAQuantumTunnelingOptimizer", register=True) except Exception as e: print("QuantumTunnelingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV10 import QuantumTunnelingOptimizerV10 lama_register["QuantumTunnelingOptimizerV10"] = QuantumTunnelingOptimizerV10 - LLAMAQuantumTunnelingOptimizerV10 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV10" - ).set_name("LLAMAQuantumTunnelingOptimizerV10", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV10 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV10").set_name("LLAMAQuantumTunnelingOptimizerV10", register=True) except Exception as e: print("QuantumTunnelingOptimizerV10 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV11 import QuantumTunnelingOptimizerV11 lama_register["QuantumTunnelingOptimizerV11"] = QuantumTunnelingOptimizerV11 - LLAMAQuantumTunnelingOptimizerV11 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV11" - ).set_name("LLAMAQuantumTunnelingOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV11 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV11").set_name("LLAMAQuantumTunnelingOptimizerV11", register=True) except Exception as e: print("QuantumTunnelingOptimizerV11 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV12 import QuantumTunnelingOptimizerV12 lama_register["QuantumTunnelingOptimizerV12"] = QuantumTunnelingOptimizerV12 - LLAMAQuantumTunnelingOptimizerV12 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV12" - ).set_name("LLAMAQuantumTunnelingOptimizerV12", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV12 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV12").set_name("LLAMAQuantumTunnelingOptimizerV12", register=True) except Exception as e: print("QuantumTunnelingOptimizerV12 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV13 import QuantumTunnelingOptimizerV13 lama_register["QuantumTunnelingOptimizerV13"] = QuantumTunnelingOptimizerV13 - LLAMAQuantumTunnelingOptimizerV13 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV13" - ).set_name("LLAMAQuantumTunnelingOptimizerV13", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV13 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV13").set_name("LLAMAQuantumTunnelingOptimizerV13", register=True) except Exception as e: print("QuantumTunnelingOptimizerV13 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV14 import QuantumTunnelingOptimizerV14 lama_register["QuantumTunnelingOptimizerV14"] = QuantumTunnelingOptimizerV14 - LLAMAQuantumTunnelingOptimizerV14 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV14" - ).set_name("LLAMAQuantumTunnelingOptimizerV14", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV14 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV14").set_name("LLAMAQuantumTunnelingOptimizerV14", register=True) except Exception as e: print("QuantumTunnelingOptimizerV14 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV15 import QuantumTunnelingOptimizerV15 lama_register["QuantumTunnelingOptimizerV15"] = QuantumTunnelingOptimizerV15 - LLAMAQuantumTunnelingOptimizerV15 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV15" - ).set_name("LLAMAQuantumTunnelingOptimizerV15", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV15 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV15").set_name("LLAMAQuantumTunnelingOptimizerV15", register=True) except Exception as e: print("QuantumTunnelingOptimizerV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV16 import QuantumTunnelingOptimizerV16 lama_register["QuantumTunnelingOptimizerV16"] = QuantumTunnelingOptimizerV16 - LLAMAQuantumTunnelingOptimizerV16 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV16" - ).set_name("LLAMAQuantumTunnelingOptimizerV16", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV16 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV16").set_name("LLAMAQuantumTunnelingOptimizerV16", register=True) except Exception as e: print("QuantumTunnelingOptimizerV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV17 import QuantumTunnelingOptimizerV17 lama_register["QuantumTunnelingOptimizerV17"] = QuantumTunnelingOptimizerV17 - LLAMAQuantumTunnelingOptimizerV17 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV17" - ).set_name("LLAMAQuantumTunnelingOptimizerV17", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV17 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV17").set_name("LLAMAQuantumTunnelingOptimizerV17", register=True) except Exception as e: print("QuantumTunnelingOptimizerV17 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV18 import QuantumTunnelingOptimizerV18 lama_register["QuantumTunnelingOptimizerV18"] = QuantumTunnelingOptimizerV18 - LLAMAQuantumTunnelingOptimizerV18 = NonObjectOptimizer( - method="LLAMAQuantumTunnelingOptimizerV18" - ).set_name("LLAMAQuantumTunnelingOptimizerV18", register=True) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV18 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV18").set_name("LLAMAQuantumTunnelingOptimizerV18", register=True) except Exception as e: print("QuantumTunnelingOptimizerV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV2 import QuantumTunnelingOptimizerV2 lama_register["QuantumTunnelingOptimizerV2"] = QuantumTunnelingOptimizerV2 - LLAMAQuantumTunnelingOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2").set_name( - "LLAMAQuantumTunnelingOptimizerV2", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2").set_name("LLAMAQuantumTunnelingOptimizerV2", register=True) except Exception as e: print("QuantumTunnelingOptimizerV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV3 import QuantumTunnelingOptimizerV3 lama_register["QuantumTunnelingOptimizerV3"] = QuantumTunnelingOptimizerV3 - LLAMAQuantumTunnelingOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3").set_name( - "LLAMAQuantumTunnelingOptimizerV3", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3").set_name("LLAMAQuantumTunnelingOptimizerV3", register=True) except Exception as e: print("QuantumTunnelingOptimizerV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV4 import QuantumTunnelingOptimizerV4 lama_register["QuantumTunnelingOptimizerV4"] = QuantumTunnelingOptimizerV4 - LLAMAQuantumTunnelingOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4").set_name( - "LLAMAQuantumTunnelingOptimizerV4", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4").set_name("LLAMAQuantumTunnelingOptimizerV4", register=True) except Exception as e: print("QuantumTunnelingOptimizerV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV5 import QuantumTunnelingOptimizerV5 lama_register["QuantumTunnelingOptimizerV5"] = QuantumTunnelingOptimizerV5 - LLAMAQuantumTunnelingOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5").set_name( - "LLAMAQuantumTunnelingOptimizerV5", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5").set_name("LLAMAQuantumTunnelingOptimizerV5", register=True) except Exception as e: print("QuantumTunnelingOptimizerV5 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV6 import QuantumTunnelingOptimizerV6 lama_register["QuantumTunnelingOptimizerV6"] = QuantumTunnelingOptimizerV6 - LLAMAQuantumTunnelingOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6").set_name( - "LLAMAQuantumTunnelingOptimizerV6", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6").set_name("LLAMAQuantumTunnelingOptimizerV6", register=True) except Exception as e: print("QuantumTunnelingOptimizerV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV7 import QuantumTunnelingOptimizerV7 lama_register["QuantumTunnelingOptimizerV7"] = QuantumTunnelingOptimizerV7 - LLAMAQuantumTunnelingOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7").set_name( - "LLAMAQuantumTunnelingOptimizerV7", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7").set_name("LLAMAQuantumTunnelingOptimizerV7", register=True) except Exception as e: print("QuantumTunnelingOptimizerV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV8 import QuantumTunnelingOptimizerV8 lama_register["QuantumTunnelingOptimizerV8"] = QuantumTunnelingOptimizerV8 - LLAMAQuantumTunnelingOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8").set_name( - "LLAMAQuantumTunnelingOptimizerV8", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8").set_name("LLAMAQuantumTunnelingOptimizerV8", register=True) except Exception as e: print("QuantumTunnelingOptimizerV8 can not be imported: ", e) - try: from nevergrad.optimization.lama.QuantumTunnelingOptimizerV9 import QuantumTunnelingOptimizerV9 lama_register["QuantumTunnelingOptimizerV9"] = QuantumTunnelingOptimizerV9 - LLAMAQuantumTunnelingOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9").set_name( - "LLAMAQuantumTunnelingOptimizerV9", register=True - ) + res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAQuantumTunnelingOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9").set_name("LLAMAQuantumTunnelingOptimizerV9", register=True) except Exception as e: print("QuantumTunnelingOptimizerV9 can not be imported: ", e) - try: from nevergrad.optimization.lama.RADE import RADE lama_register["RADE"] = RADE + res = NonObjectOptimizer(method="LLAMARADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADE = NonObjectOptimizer(method="LLAMARADE").set_name("LLAMARADE", register=True) except Exception as e: print("RADE can not be imported: ", e) - try: from nevergrad.optimization.lama.RADEA import RADEA lama_register["RADEA"] = RADEA + res = NonObjectOptimizer(method="LLAMARADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADEA = NonObjectOptimizer(method="LLAMARADEA").set_name("LLAMARADEA", register=True) except Exception as e: print("RADEA can not be imported: ", e) - try: from nevergrad.optimization.lama.RADECM import RADECM lama_register["RADECM"] = RADECM + res = NonObjectOptimizer(method="LLAMARADECM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADECM = NonObjectOptimizer(method="LLAMARADECM").set_name("LLAMARADECM", register=True) except Exception as e: print("RADECM can not be imported: ", e) - try: from nevergrad.optimization.lama.RADEDM import RADEDM lama_register["RADEDM"] = RADEDM + res = NonObjectOptimizer(method="LLAMARADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADEDM = NonObjectOptimizer(method="LLAMARADEDM").set_name("LLAMARADEDM", register=True) except Exception as e: print("RADEDM can not be imported: ", e) - try: from nevergrad.optimization.lama.RADEEM import RADEEM lama_register["RADEEM"] = RADEEM + res = NonObjectOptimizer(method="LLAMARADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADEEM = NonObjectOptimizer(method="LLAMARADEEM").set_name("LLAMARADEEM", register=True) except Exception as e: print("RADEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.RADEPM import RADEPM lama_register["RADEPM"] = RADEPM + res = NonObjectOptimizer(method="LLAMARADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARADEPM = NonObjectOptimizer(method="LLAMARADEPM").set_name("LLAMARADEPM", register=True) except Exception as e: print("RADEPM can not be imported: ", e) - try: from nevergrad.optimization.lama.RADSDiffEvo import RADSDiffEvo lama_register["RADSDiffEvo"] = RADSDiffEvo - LLAMARADSDiffEvo = NonObjectOptimizer(method="LLAMARADSDiffEvo").set_name( - "LLAMARADSDiffEvo", register=True - ) + res = NonObjectOptimizer(method="LLAMARADSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARADSDiffEvo = NonObjectOptimizer(method="LLAMARADSDiffEvo").set_name("LLAMARADSDiffEvo", register=True) except Exception as e: print("RADSDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.RAGCES import RAGCES lama_register["RAGCES"] = RAGCES + res = NonObjectOptimizer(method="LLAMARAGCES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAGCES = NonObjectOptimizer(method="LLAMARAGCES").set_name("LLAMARAGCES", register=True) except Exception as e: print("RAGCES can not be imported: ", e) - try: from nevergrad.optimization.lama.RAGEA import RAGEA lama_register["RAGEA"] = RAGEA + res = NonObjectOptimizer(method="LLAMARAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAGEA = NonObjectOptimizer(method="LLAMARAGEA").set_name("LLAMARAGEA", register=True) except Exception as e: print("RAGEA can not be imported: ", e) - try: from nevergrad.optimization.lama.RAHDEMI import RAHDEMI lama_register["RAHDEMI"] = RAHDEMI + res = NonObjectOptimizer(method="LLAMARAHDEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAHDEMI = NonObjectOptimizer(method="LLAMARAHDEMI").set_name("LLAMARAHDEMI", register=True) except Exception as e: print("RAHDEMI can not be imported: ", e) - try: from nevergrad.optimization.lama.RALES import RALES lama_register["RALES"] = RALES + res = NonObjectOptimizer(method="LLAMARALES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARALES = NonObjectOptimizer(method="LLAMARALES").set_name("LLAMARALES", register=True) except Exception as e: print("RALES can not be imported: ", e) - try: from nevergrad.optimization.lama.RAMDE import RAMDE lama_register["RAMDE"] = RAMDE + res = NonObjectOptimizer(method="LLAMARAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAMDE = NonObjectOptimizer(method="LLAMARAMDE").set_name("LLAMARAMDE", register=True) except Exception as e: print("RAMDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RAMEDS import RAMEDS lama_register["RAMEDS"] = RAMEDS + res = NonObjectOptimizer(method="LLAMARAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAMEDS = NonObjectOptimizer(method="LLAMARAMEDS").set_name("LLAMARAMEDS", register=True) except Exception as e: print("RAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.RAMEDSPlus import RAMEDSPlus lama_register["RAMEDSPlus"] = RAMEDSPlus + res = NonObjectOptimizer(method="LLAMARAMEDSPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAMEDSPlus = NonObjectOptimizer(method="LLAMARAMEDSPlus").set_name("LLAMARAMEDSPlus", register=True) except Exception as e: print("RAMEDSPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.RAMEDSPro import RAMEDSPro lama_register["RAMEDSPro"] = RAMEDSPro + res = NonObjectOptimizer(method="LLAMARAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAMEDSPro = NonObjectOptimizer(method="LLAMARAMEDSPro").set_name("LLAMARAMEDSPro", register=True) except Exception as e: print("RAMEDSPro can not be imported: ", e) - try: from nevergrad.optimization.lama.RAMSDiffEvo import RAMSDiffEvo lama_register["RAMSDiffEvo"] = RAMSDiffEvo - LLAMARAMSDiffEvo = NonObjectOptimizer(method="LLAMARAMSDiffEvo").set_name( - "LLAMARAMSDiffEvo", register=True - ) + res = NonObjectOptimizer(method="LLAMARAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARAMSDiffEvo = NonObjectOptimizer(method="LLAMARAMSDiffEvo").set_name("LLAMARAMSDiffEvo", register=True) except Exception as e: print("RAMSDiffEvo can not be imported: ", e) - try: from nevergrad.optimization.lama.RAPDE import RAPDE lama_register["RAPDE"] = RAPDE + res = NonObjectOptimizer(method="LLAMARAPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAPDE = NonObjectOptimizer(method="LLAMARAPDE").set_name("LLAMARAPDE", register=True) except Exception as e: print("RAPDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RASES import RASES lama_register["RASES"] = RASES + res = NonObjectOptimizer(method="LLAMARASES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARASES = NonObjectOptimizer(method="LLAMARASES").set_name("LLAMARASES", register=True) except Exception as e: print("RASES can not be imported: ", e) - try: from nevergrad.optimization.lama.RAVDE import RAVDE lama_register["RAVDE"] = RAVDE + res = NonObjectOptimizer(method="LLAMARAVDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARAVDE = NonObjectOptimizer(method="LLAMARAVDE").set_name("LLAMARAVDE", register=True) except Exception as e: print("RAVDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RDACE import RDACE lama_register["RDACE"] = RDACE + res = NonObjectOptimizer(method="LLAMARDACE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARDACE = NonObjectOptimizer(method="LLAMARDACE").set_name("LLAMARDACE", register=True) except Exception as e: print("RDACE can not be imported: ", e) - try: from nevergrad.optimization.lama.RDSAS import RDSAS lama_register["RDSAS"] = RDSAS + res = NonObjectOptimizer(method="LLAMARDSAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARDSAS = NonObjectOptimizer(method="LLAMARDSAS").set_name("LLAMARDSAS", register=True) except Exception as e: print("RDSAS can not be imported: ", e) - try: from nevergrad.optimization.lama.READEPMC import READEPMC lama_register["READEPMC"] = READEPMC + res = NonObjectOptimizer(method="LLAMAREADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAREADEPMC = NonObjectOptimizer(method="LLAMAREADEPMC").set_name("LLAMAREADEPMC", register=True) except Exception as e: print("READEPMC can not be imported: ", e) - try: from nevergrad.optimization.lama.REAMSEA import REAMSEA lama_register["REAMSEA"] = REAMSEA + res = NonObjectOptimizer(method="LLAMAREAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAREAMSEA = NonObjectOptimizer(method="LLAMAREAMSEA").set_name("LLAMAREAMSEA", register=True) except Exception as e: print("REAMSEA can not be imported: ", e) - try: from nevergrad.optimization.lama.RE_ADMMMS import RE_ADMMMS lama_register["RE_ADMMMS"] = RE_ADMMMS + res = NonObjectOptimizer(method="LLAMARE_ADMMMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARE_ADMMMS = NonObjectOptimizer(method="LLAMARE_ADMMMS").set_name("LLAMARE_ADMMMS", register=True) except Exception as e: print("RE_ADMMMS can not be imported: ", e) - try: from nevergrad.optimization.lama.RPWDE import RPWDE lama_register["RPWDE"] = RPWDE + res = NonObjectOptimizer(method="LLAMARPWDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMARPWDE = NonObjectOptimizer(method="LLAMARPWDE").set_name("LLAMARPWDE", register=True) except Exception as e: print("RPWDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RankingDifferentialEvolution import RankingDifferentialEvolution lama_register["RankingDifferentialEvolution"] = RankingDifferentialEvolution - LLAMARankingDifferentialEvolution = NonObjectOptimizer( - method="LLAMARankingDifferentialEvolution" - ).set_name("LLAMARankingDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARankingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARankingDifferentialEvolution = NonObjectOptimizer(method="LLAMARankingDifferentialEvolution").set_name("LLAMARankingDifferentialEvolution", register=True) except Exception as e: print("RankingDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveClusteredDifferentialEvolution import ( - RefinedAdaptiveClusteredDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveClusteredDifferentialEvolution import RefinedAdaptiveClusteredDifferentialEvolution - lama_register["RefinedAdaptiveClusteredDifferentialEvolution"] = ( - RefinedAdaptiveClusteredDifferentialEvolution - ) - LLAMARefinedAdaptiveClusteredDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveClusteredDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveClusteredDifferentialEvolution", register=True) + lama_register["RefinedAdaptiveClusteredDifferentialEvolution"] = RefinedAdaptiveClusteredDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveClusteredDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveClusteredDifferentialEvolution").set_name("LLAMARefinedAdaptiveClusteredDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveClusteredDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixAdaptation import ( - RefinedAdaptiveCovarianceMatrixAdaptation, - ) + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixAdaptation import RefinedAdaptiveCovarianceMatrixAdaptation lama_register["RefinedAdaptiveCovarianceMatrixAdaptation"] = RefinedAdaptiveCovarianceMatrixAdaptation - LLAMARefinedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( - method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation" - ).set_name("LLAMARefinedAdaptiveCovarianceMatrixAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation").set_name("LLAMARefinedAdaptiveCovarianceMatrixAdaptation", register=True) except Exception as e: print("RefinedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixEvolution import ( - RefinedAdaptiveCovarianceMatrixEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixEvolution import RefinedAdaptiveCovarianceMatrixEvolution lama_register["RefinedAdaptiveCovarianceMatrixEvolution"] = RefinedAdaptiveCovarianceMatrixEvolution - LLAMARefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveCovarianceMatrixEvolution" - ).set_name("LLAMARefinedAdaptiveCovarianceMatrixEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixEvolution").set_name("LLAMARefinedAdaptiveCovarianceMatrixEvolution", register=True) except Exception as e: print("RefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveCrossoverElitistStrategyV7 import ( - RefinedAdaptiveCrossoverElitistStrategyV7, - ) + from nevergrad.optimization.lama.RefinedAdaptiveCrossoverElitistStrategyV7 import RefinedAdaptiveCrossoverElitistStrategyV7 lama_register["RefinedAdaptiveCrossoverElitistStrategyV7"] = RefinedAdaptiveCrossoverElitistStrategyV7 - LLAMARefinedAdaptiveCrossoverElitistStrategyV7 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7" - ).set_name("LLAMARefinedAdaptiveCrossoverElitistStrategyV7", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveCrossoverElitistStrategyV7 = NonObjectOptimizer(method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7").set_name("LLAMARefinedAdaptiveCrossoverElitistStrategyV7", register=True) except Exception as e: print("RefinedAdaptiveCrossoverElitistStrategyV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolution import ( - RefinedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolution import RefinedAdaptiveDifferentialEvolution lama_register["RefinedAdaptiveDifferentialEvolution"] = RefinedAdaptiveDifferentialEvolution - LLAMARefinedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolution").set_name("LLAMARefinedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionStrategy import ( - RefinedAdaptiveDifferentialEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionStrategy import RefinedAdaptiveDifferentialEvolutionStrategy - lama_register["RefinedAdaptiveDifferentialEvolutionStrategy"] = ( - RefinedAdaptiveDifferentialEvolutionStrategy - ) - LLAMARefinedAdaptiveDifferentialEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy" - ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionStrategy", register=True) + lama_register["RefinedAdaptiveDifferentialEvolutionStrategy"] = RefinedAdaptiveDifferentialEvolutionStrategy + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy").set_name("LLAMARefinedAdaptiveDifferentialEvolutionStrategy", register=True) except Exception as e: print("RefinedAdaptiveDifferentialEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( - RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation import RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation - lama_register["RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( - RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation - ) - LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation" - ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) + lama_register["RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation").set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) except Exception as e: print("RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( - RefinedAdaptiveDifferentialEvolutionWithGradientBoost, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithGradientBoost import RefinedAdaptiveDifferentialEvolutionWithGradientBoost - lama_register["RefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( - RefinedAdaptiveDifferentialEvolutionWithGradientBoost - ) - LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost" - ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) + lama_register["RefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = RefinedAdaptiveDifferentialEvolutionWithGradientBoost + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) except Exception as e: print("RefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSearch import ( - RefinedAdaptiveDifferentialSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSearch import RefinedAdaptiveDifferentialSearch lama_register["RefinedAdaptiveDifferentialSearch"] = RefinedAdaptiveDifferentialSearch - LLAMARefinedAdaptiveDifferentialSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialSearch" - ).set_name("LLAMARefinedAdaptiveDifferentialSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSearch").set_name("LLAMARefinedAdaptiveDifferentialSearch", register=True) except Exception as e: print("RefinedAdaptiveDifferentialSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSpiralSearch import ( - RefinedAdaptiveDifferentialSpiralSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSpiralSearch import RefinedAdaptiveDifferentialSpiralSearch lama_register["RefinedAdaptiveDifferentialSpiralSearch"] = RefinedAdaptiveDifferentialSpiralSearch - LLAMARefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDifferentialSpiralSearch" - ).set_name("LLAMARefinedAdaptiveDifferentialSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSpiralSearch").set_name("LLAMARefinedAdaptiveDifferentialSpiralSearch", register=True) except Exception as e: print("RefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDimensionalClimbingStrategy import ( - RefinedAdaptiveDimensionalClimbingStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalClimbingStrategy import RefinedAdaptiveDimensionalClimbingStrategy lama_register["RefinedAdaptiveDimensionalClimbingStrategy"] = RefinedAdaptiveDimensionalClimbingStrategy - LLAMARefinedAdaptiveDimensionalClimbingStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDimensionalClimbingStrategy" - ).set_name("LLAMARefinedAdaptiveDimensionalClimbingStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDimensionalClimbingStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalClimbingStrategy").set_name("LLAMARefinedAdaptiveDimensionalClimbingStrategy", register=True) except Exception as e: print("RefinedAdaptiveDimensionalClimbingStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDimensionalCrossoverEvolver import ( - RefinedAdaptiveDimensionalCrossoverEvolver, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalCrossoverEvolver import RefinedAdaptiveDimensionalCrossoverEvolver lama_register["RefinedAdaptiveDimensionalCrossoverEvolver"] = RefinedAdaptiveDimensionalCrossoverEvolver - LLAMARefinedAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver" - ).set_name("LLAMARefinedAdaptiveDimensionalCrossoverEvolver", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver").set_name("LLAMARefinedAdaptiveDimensionalCrossoverEvolver", register=True) except Exception as e: print("RefinedAdaptiveDimensionalCrossoverEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDirectionalBiasQuorumOptimization import ( - RefinedAdaptiveDirectionalBiasQuorumOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDirectionalBiasQuorumOptimization import RefinedAdaptiveDirectionalBiasQuorumOptimization - lama_register["RefinedAdaptiveDirectionalBiasQuorumOptimization"] = ( - RefinedAdaptiveDirectionalBiasQuorumOptimization - ) - LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization" - ).set_name("LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization", register=True) + lama_register["RefinedAdaptiveDirectionalBiasQuorumOptimization"] = RefinedAdaptiveDirectionalBiasQuorumOptimization + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization", register=True) except Exception as e: print("RefinedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDivergenceClusteringSearch import ( - RefinedAdaptiveDivergenceClusteringSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDivergenceClusteringSearch import RefinedAdaptiveDivergenceClusteringSearch lama_register["RefinedAdaptiveDivergenceClusteringSearch"] = RefinedAdaptiveDivergenceClusteringSearch - LLAMARefinedAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDivergenceClusteringSearch" - ).set_name("LLAMARefinedAdaptiveDivergenceClusteringSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDivergenceClusteringSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDivergenceClusteringSearch").set_name("LLAMARefinedAdaptiveDivergenceClusteringSearch", register=True) except Exception as e: print("RefinedAdaptiveDivergenceClusteringSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveDiversityPSO import RefinedAdaptiveDiversityPSO lama_register["RefinedAdaptiveDiversityPSO"] = RefinedAdaptiveDiversityPSO - LLAMARefinedAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO").set_name( - "LLAMARefinedAdaptiveDiversityPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO").set_name("LLAMARefinedAdaptiveDiversityPSO", register=True) except Exception as e: print("RefinedAdaptiveDiversityPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategy import RefinedAdaptiveDualPhaseStrategy lama_register["RefinedAdaptiveDualPhaseStrategy"] = RefinedAdaptiveDualPhaseStrategy - LLAMARefinedAdaptiveDualPhaseStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDualPhaseStrategy" - ).set_name("LLAMARefinedAdaptiveDualPhaseStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategy").set_name("LLAMARefinedAdaptiveDualPhaseStrategy", register=True) except Exception as e: print("RefinedAdaptiveDualPhaseStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategyV3 import ( - RefinedAdaptiveDualPhaseStrategyV3, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategyV3 import RefinedAdaptiveDualPhaseStrategyV3 lama_register["RefinedAdaptiveDualPhaseStrategyV3"] = RefinedAdaptiveDualPhaseStrategyV3 - LLAMARefinedAdaptiveDualPhaseStrategyV3 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDualPhaseStrategyV3" - ).set_name("LLAMARefinedAdaptiveDualPhaseStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDualPhaseStrategyV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategyV3").set_name("LLAMARefinedAdaptiveDualPhaseStrategyV3", register=True) except Exception as e: print("RefinedAdaptiveDualPhaseStrategyV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveDynamicDE import RefinedAdaptiveDynamicDE lama_register["RefinedAdaptiveDynamicDE"] = RefinedAdaptiveDynamicDE - LLAMARefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE").set_name( - "LLAMARefinedAdaptiveDynamicDE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE").set_name("LLAMARefinedAdaptiveDynamicDE", register=True) except Exception as e: print("RefinedAdaptiveDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV14 import ( - RefinedAdaptiveDynamicDualPhaseStrategyV14, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV14 import RefinedAdaptiveDynamicDualPhaseStrategyV14 lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV14"] = RefinedAdaptiveDynamicDualPhaseStrategyV14 - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14" - ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14", register=True) except Exception as e: print("RefinedAdaptiveDynamicDualPhaseStrategyV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV17 import ( - RefinedAdaptiveDynamicDualPhaseStrategyV17, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV17 import RefinedAdaptiveDynamicDualPhaseStrategyV17 lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV17"] = RefinedAdaptiveDynamicDualPhaseStrategyV17 - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17" - ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17", register=True) except Exception as e: print("RefinedAdaptiveDynamicDualPhaseStrategyV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV20 import ( - RefinedAdaptiveDynamicDualPhaseStrategyV20, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV20 import RefinedAdaptiveDynamicDualPhaseStrategyV20 lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV20"] = RefinedAdaptiveDynamicDualPhaseStrategyV20 - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20" - ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20", register=True) except Exception as e: print("RefinedAdaptiveDynamicDualPhaseStrategyV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicExplorationOptimization import ( - RefinedAdaptiveDynamicExplorationOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicExplorationOptimization import RefinedAdaptiveDynamicExplorationOptimization - lama_register["RefinedAdaptiveDynamicExplorationOptimization"] = ( - RefinedAdaptiveDynamicExplorationOptimization - ) - LLAMARefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicExplorationOptimization" - ).set_name("LLAMARefinedAdaptiveDynamicExplorationOptimization", register=True) + lama_register["RefinedAdaptiveDynamicExplorationOptimization"] = RefinedAdaptiveDynamicExplorationOptimization + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMARefinedAdaptiveDynamicExplorationOptimization", register=True) except Exception as e: print("RefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( - RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm import RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm - lama_register["RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( - RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm - ) - LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm" - ).set_name("LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) + lama_register["RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicStrategyV25 import ( - RefinedAdaptiveDynamicStrategyV25, - ) + from nevergrad.optimization.lama.RefinedAdaptiveDynamicStrategyV25 import RefinedAdaptiveDynamicStrategyV25 lama_register["RefinedAdaptiveDynamicStrategyV25"] = RefinedAdaptiveDynamicStrategyV25 - LLAMARefinedAdaptiveDynamicStrategyV25 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveDynamicStrategyV25" - ).set_name("LLAMARefinedAdaptiveDynamicStrategyV25", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicStrategyV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveDynamicStrategyV25 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicStrategyV25").set_name("LLAMARefinedAdaptiveDynamicStrategyV25", register=True) except Exception as e: print("RefinedAdaptiveDynamicStrategyV25 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedDE import RefinedAdaptiveEliteGuidedDE lama_register["RefinedAdaptiveEliteGuidedDE"] = RefinedAdaptiveEliteGuidedDE - LLAMARefinedAdaptiveEliteGuidedDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEliteGuidedDE" - ).set_name("LLAMARefinedAdaptiveEliteGuidedDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEliteGuidedDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedDE").set_name("LLAMARefinedAdaptiveEliteGuidedDE", register=True) except Exception as e: print("RefinedAdaptiveEliteGuidedDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE import ( - RefinedAdaptiveEliteGuidedMutationDE, - ) + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE import RefinedAdaptiveEliteGuidedMutationDE lama_register["RefinedAdaptiveEliteGuidedMutationDE"] = RefinedAdaptiveEliteGuidedMutationDE - LLAMARefinedAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEliteGuidedMutationDE" - ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE").set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE", register=True) except Exception as e: print("RefinedAdaptiveEliteGuidedMutationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE_v5 import ( - RefinedAdaptiveEliteGuidedMutationDE_v5, - ) + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE_v5 import RefinedAdaptiveEliteGuidedMutationDE_v5 lama_register["RefinedAdaptiveEliteGuidedMutationDE_v5"] = RefinedAdaptiveEliteGuidedMutationDE_v5 - LLAMARefinedAdaptiveEliteGuidedMutationDE_v5 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5" - ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE_v5", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEliteGuidedMutationDE_v5 = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5").set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE_v5", register=True) except Exception as e: print("RefinedAdaptiveEliteGuidedMutationDE_v5 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveElitistDE_v4 import RefinedAdaptiveElitistDE_v4 lama_register["RefinedAdaptiveElitistDE_v4"] = RefinedAdaptiveElitistDE_v4 - LLAMARefinedAdaptiveElitistDE_v4 = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4").set_name( - "LLAMARefinedAdaptiveElitistDE_v4", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveElitistDE_v4 = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4").set_name("LLAMARefinedAdaptiveElitistDE_v4", register=True) except Exception as e: print("RefinedAdaptiveElitistDE_v4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( - RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch import RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch - lama_register["RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( - RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch - ) - LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" - ).set_name("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) + lama_register["RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) except Exception as e: print("RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( - RefinedAdaptiveEnhancedGradientGuidedHybridPSO, - ) + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedGradientGuidedHybridPSO import RefinedAdaptiveEnhancedGradientGuidedHybridPSO - lama_register["RefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( - RefinedAdaptiveEnhancedGradientGuidedHybridPSO - ) - LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO" - ).set_name("LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) + lama_register["RefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = RefinedAdaptiveEnhancedGradientGuidedHybridPSO + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) except Exception as e: print("RefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 import ( - RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2, - ) + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 import RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 - lama_register["RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2"] = ( - RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 - ) - LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2" - ).set_name("LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2", register=True) + lama_register["RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2"] = RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2").set_name("LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2", register=True) except Exception as e: print("RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveEvolutionStrategy import RefinedAdaptiveEvolutionStrategy lama_register["RefinedAdaptiveEvolutionStrategy"] = RefinedAdaptiveEvolutionStrategy - LLAMARefinedAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveEvolutionStrategy" - ).set_name("LLAMARefinedAdaptiveEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveEvolutionStrategy").set_name("LLAMARefinedAdaptiveEvolutionStrategy", register=True) except Exception as e: print("RefinedAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveExplorationOptimizer import ( - RefinedAdaptiveExplorationOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptiveExplorationOptimizer import RefinedAdaptiveExplorationOptimizer lama_register["RefinedAdaptiveExplorationOptimizer"] = RefinedAdaptiveExplorationOptimizer - LLAMARefinedAdaptiveExplorationOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveExplorationOptimizer" - ).set_name("LLAMARefinedAdaptiveExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveExplorationOptimizer").set_name("LLAMARefinedAdaptiveExplorationOptimizer", register=True) except Exception as e: print("RefinedAdaptiveExplorationOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingOptimizerV5 import ( - RefinedAdaptiveGlobalClimbingOptimizerV5, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingOptimizerV5 import RefinedAdaptiveGlobalClimbingOptimizerV5 lama_register["RefinedAdaptiveGlobalClimbingOptimizerV5"] = RefinedAdaptiveGlobalClimbingOptimizerV5 - LLAMARefinedAdaptiveGlobalClimbingOptimizerV5 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5" - ).set_name("LLAMARefinedAdaptiveGlobalClimbingOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGlobalClimbingOptimizerV5 = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5").set_name("LLAMARefinedAdaptiveGlobalClimbingOptimizerV5", register=True) except Exception as e: print("RefinedAdaptiveGlobalClimbingOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingStrategy import ( - RefinedAdaptiveGlobalClimbingStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingStrategy import RefinedAdaptiveGlobalClimbingStrategy lama_register["RefinedAdaptiveGlobalClimbingStrategy"] = RefinedAdaptiveGlobalClimbingStrategy - LLAMARefinedAdaptiveGlobalClimbingStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGlobalClimbingStrategy" - ).set_name("LLAMARefinedAdaptiveGlobalClimbingStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGlobalClimbingStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingStrategy").set_name("LLAMARefinedAdaptiveGlobalClimbingStrategy", register=True) except Exception as e: print("RefinedAdaptiveGlobalClimbingStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveGradientCrossover import RefinedAdaptiveGradientCrossover lama_register["RefinedAdaptiveGradientCrossover"] = RefinedAdaptiveGradientCrossover - LLAMARefinedAdaptiveGradientCrossover = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientCrossover" - ).set_name("LLAMARefinedAdaptiveGradientCrossover", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientCrossover = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientCrossover").set_name("LLAMARefinedAdaptiveGradientCrossover", register=True) except Exception as e: print("RefinedAdaptiveGradientCrossover can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientDifferentialEvolution import ( - RefinedAdaptiveGradientDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGradientDifferentialEvolution import RefinedAdaptiveGradientDifferentialEvolution - lama_register["RefinedAdaptiveGradientDifferentialEvolution"] = ( - RefinedAdaptiveGradientDifferentialEvolution - ) - LLAMARefinedAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveGradientDifferentialEvolution", register=True) + lama_register["RefinedAdaptiveGradientDifferentialEvolution"] = RefinedAdaptiveGradientDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientDifferentialEvolution").set_name("LLAMARefinedAdaptiveGradientDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveGradientDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientEnhancedRAMEDS import ( - RefinedAdaptiveGradientEnhancedRAMEDS, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGradientEnhancedRAMEDS import RefinedAdaptiveGradientEnhancedRAMEDS lama_register["RefinedAdaptiveGradientEnhancedRAMEDS"] = RefinedAdaptiveGradientEnhancedRAMEDS - LLAMARefinedAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS" - ).set_name("LLAMARefinedAdaptiveGradientEnhancedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS").set_name("LLAMARefinedAdaptiveGradientEnhancedRAMEDS", register=True) except Exception as e: print("RefinedAdaptiveGradientEnhancedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveGradientEvolverV2 import RefinedAdaptiveGradientEvolverV2 lama_register["RefinedAdaptiveGradientEvolverV2"] = RefinedAdaptiveGradientEvolverV2 - LLAMARefinedAdaptiveGradientEvolverV2 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientEvolverV2" - ).set_name("LLAMARefinedAdaptiveGradientEvolverV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientEvolverV2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEvolverV2").set_name("LLAMARefinedAdaptiveGradientEvolverV2", register=True) except Exception as e: print("RefinedAdaptiveGradientEvolverV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientGuidedEvolution import ( - RefinedAdaptiveGradientGuidedEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGradientGuidedEvolution import RefinedAdaptiveGradientGuidedEvolution lama_register["RefinedAdaptiveGradientGuidedEvolution"] = RefinedAdaptiveGradientGuidedEvolution - LLAMARefinedAdaptiveGradientGuidedEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientGuidedEvolution" - ).set_name("LLAMARefinedAdaptiveGradientGuidedEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientGuidedEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientGuidedEvolution").set_name("LLAMARefinedAdaptiveGradientGuidedEvolution", register=True) except Exception as e: print("RefinedAdaptiveGradientGuidedEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientHybridOptimizer import ( - RefinedAdaptiveGradientHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGradientHybridOptimizer import RefinedAdaptiveGradientHybridOptimizer lama_register["RefinedAdaptiveGradientHybridOptimizer"] = RefinedAdaptiveGradientHybridOptimizer - LLAMARefinedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGradientHybridOptimizer" - ).set_name("LLAMARefinedAdaptiveGradientHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGradientHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientHybridOptimizer").set_name("LLAMARefinedAdaptiveGradientHybridOptimizer", register=True) except Exception as e: print("RefinedAdaptiveGradientHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveGuidedEvolutionStrategy import ( - RefinedAdaptiveGuidedEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptiveGuidedEvolutionStrategy import RefinedAdaptiveGuidedEvolutionStrategy lama_register["RefinedAdaptiveGuidedEvolutionStrategy"] = RefinedAdaptiveGuidedEvolutionStrategy - LLAMARefinedAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveGuidedEvolutionStrategy" - ).set_name("LLAMARefinedAdaptiveGuidedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveGuidedEvolutionStrategy").set_name("LLAMARefinedAdaptiveGuidedEvolutionStrategy", register=True) except Exception as e: print("RefinedAdaptiveGuidedEvolutionStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveHybridDE import RefinedAdaptiveHybridDE lama_register["RefinedAdaptiveHybridDE"] = RefinedAdaptiveHybridDE - LLAMARefinedAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE").set_name( - "LLAMARefinedAdaptiveHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE").set_name("LLAMARefinedAdaptiveHybridDE", register=True) except Exception as e: print("RefinedAdaptiveHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridEvolutionStrategyV6 import ( - RefinedAdaptiveHybridEvolutionStrategyV6, - ) + from nevergrad.optimization.lama.RefinedAdaptiveHybridEvolutionStrategyV6 import RefinedAdaptiveHybridEvolutionStrategyV6 lama_register["RefinedAdaptiveHybridEvolutionStrategyV6"] = RefinedAdaptiveHybridEvolutionStrategyV6 - LLAMARefinedAdaptiveHybridEvolutionStrategyV6 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6" - ).set_name("LLAMARefinedAdaptiveHybridEvolutionStrategyV6", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridEvolutionStrategyV6 = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6").set_name("LLAMARefinedAdaptiveHybridEvolutionStrategyV6", register=True) except Exception as e: print("RefinedAdaptiveHybridEvolutionStrategyV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimization import ( - RefinedAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimization import RefinedAdaptiveHybridOptimization lama_register["RefinedAdaptiveHybridOptimization"] = RefinedAdaptiveHybridOptimization - LLAMARefinedAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridOptimization" - ).set_name("LLAMARefinedAdaptiveHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimization").set_name("LLAMARefinedAdaptiveHybridOptimization", register=True) except Exception as e: print("RefinedAdaptiveHybridOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimizer import RefinedAdaptiveHybridOptimizer lama_register["RefinedAdaptiveHybridOptimizer"] = RefinedAdaptiveHybridOptimizer - LLAMARefinedAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridOptimizer" - ).set_name("LLAMARefinedAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimizer").set_name("LLAMARefinedAdaptiveHybridOptimizer", register=True) except Exception as e: print("RefinedAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridParticleSwarmDifferentialEvolution import ( - RefinedAdaptiveHybridParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveHybridParticleSwarmDifferentialEvolution import RefinedAdaptiveHybridParticleSwarmDifferentialEvolution - lama_register["RefinedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( - RefinedAdaptiveHybridParticleSwarmDifferentialEvolution - ) - LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) + lama_register["RefinedAdaptiveHybridParticleSwarmDifferentialEvolution"] = RefinedAdaptiveHybridParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridQuasiRandomGradientDE import ( - RefinedAdaptiveHybridQuasiRandomGradientDE, - ) + from nevergrad.optimization.lama.RefinedAdaptiveHybridQuasiRandomGradientDE import RefinedAdaptiveHybridQuasiRandomGradientDE lama_register["RefinedAdaptiveHybridQuasiRandomGradientDE"] = RefinedAdaptiveHybridQuasiRandomGradientDE - LLAMARefinedAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE" - ).set_name("LLAMARefinedAdaptiveHybridQuasiRandomGradientDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE").set_name("LLAMARefinedAdaptiveHybridQuasiRandomGradientDE", register=True) except Exception as e: print("RefinedAdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridSwarmEvolutionOptimization import ( - RefinedAdaptiveHybridSwarmEvolutionOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveHybridSwarmEvolutionOptimization import RefinedAdaptiveHybridSwarmEvolutionOptimization - lama_register["RefinedAdaptiveHybridSwarmEvolutionOptimization"] = ( - RefinedAdaptiveHybridSwarmEvolutionOptimization - ) - LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization" - ).set_name("LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization", register=True) + lama_register["RefinedAdaptiveHybridSwarmEvolutionOptimization"] = RefinedAdaptiveHybridSwarmEvolutionOptimization + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization").set_name("LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization", register=True) except Exception as e: print("RefinedAdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveIncrementalCrossover import ( - RefinedAdaptiveIncrementalCrossover, - ) + from nevergrad.optimization.lama.RefinedAdaptiveIncrementalCrossover import RefinedAdaptiveIncrementalCrossover lama_register["RefinedAdaptiveIncrementalCrossover"] = RefinedAdaptiveIncrementalCrossover - LLAMARefinedAdaptiveIncrementalCrossover = NonObjectOptimizer( - method="LLAMARefinedAdaptiveIncrementalCrossover" - ).set_name("LLAMARefinedAdaptiveIncrementalCrossover", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIncrementalCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveIncrementalCrossover = NonObjectOptimizer(method="LLAMARefinedAdaptiveIncrementalCrossover").set_name("LLAMARefinedAdaptiveIncrementalCrossover", register=True) except Exception as e: print("RefinedAdaptiveIncrementalCrossover can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveIslandEvolutionStrategy import ( - RefinedAdaptiveIslandEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptiveIslandEvolutionStrategy import RefinedAdaptiveIslandEvolutionStrategy lama_register["RefinedAdaptiveIslandEvolutionStrategy"] = RefinedAdaptiveIslandEvolutionStrategy - LLAMARefinedAdaptiveIslandEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptiveIslandEvolutionStrategy" - ).set_name("LLAMARefinedAdaptiveIslandEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveIslandEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveIslandEvolutionStrategy").set_name("LLAMARefinedAdaptiveIslandEvolutionStrategy", register=True) except Exception as e: print("RefinedAdaptiveIslandEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMemeticDifferentialEvolution import ( - RefinedAdaptiveMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDifferentialEvolution import RefinedAdaptiveMemeticDifferentialEvolution lama_register["RefinedAdaptiveMemeticDifferentialEvolution"] = RefinedAdaptiveMemeticDifferentialEvolution - LLAMARefinedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMemeticDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDifferentialEvolution").set_name("LLAMARefinedAdaptiveMemeticDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMemeticDiverseOptimizer import ( - RefinedAdaptiveMemeticDiverseOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDiverseOptimizer import RefinedAdaptiveMemeticDiverseOptimizer lama_register["RefinedAdaptiveMemeticDiverseOptimizer"] = RefinedAdaptiveMemeticDiverseOptimizer - LLAMARefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMemeticDiverseOptimizer" - ).set_name("LLAMARefinedAdaptiveMemeticDiverseOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDiverseOptimizer").set_name("LLAMARefinedAdaptiveMemeticDiverseOptimizer", register=True) except Exception as e: print("RefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedSearch import ( - RefinedAdaptiveMemoryEnhancedSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedSearch import RefinedAdaptiveMemoryEnhancedSearch lama_register["RefinedAdaptiveMemoryEnhancedSearch"] = RefinedAdaptiveMemoryEnhancedSearch - LLAMARefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMemoryEnhancedSearch" - ).set_name("LLAMARefinedAdaptiveMemoryEnhancedSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedSearch").set_name("LLAMARefinedAdaptiveMemoryEnhancedSearch", register=True) except Exception as e: print("RefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedStrategyV55 import ( - RefinedAdaptiveMemoryEnhancedStrategyV55, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedStrategyV55 import RefinedAdaptiveMemoryEnhancedStrategyV55 lama_register["RefinedAdaptiveMemoryEnhancedStrategyV55"] = RefinedAdaptiveMemoryEnhancedStrategyV55 - LLAMARefinedAdaptiveMemoryEnhancedStrategyV55 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55" - ).set_name("LLAMARefinedAdaptiveMemoryEnhancedStrategyV55", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMemoryEnhancedStrategyV55 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55").set_name("LLAMARefinedAdaptiveMemoryEnhancedStrategyV55", register=True) except Exception as e: print("RefinedAdaptiveMemoryEnhancedStrategyV55 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveMemoryStrategyV67 import RefinedAdaptiveMemoryStrategyV67 lama_register["RefinedAdaptiveMemoryStrategyV67"] = RefinedAdaptiveMemoryStrategyV67 - LLAMARefinedAdaptiveMemoryStrategyV67 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMemoryStrategyV67" - ).set_name("LLAMARefinedAdaptiveMemoryStrategyV67", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryStrategyV67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMemoryStrategyV67 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryStrategyV67").set_name("LLAMARefinedAdaptiveMemoryStrategyV67", register=True) except Exception as e: print("RefinedAdaptiveMemoryStrategyV67 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiOperatorSearch import ( - RefinedAdaptiveMultiOperatorSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMultiOperatorSearch import RefinedAdaptiveMultiOperatorSearch lama_register["RefinedAdaptiveMultiOperatorSearch"] = RefinedAdaptiveMultiOperatorSearch - LLAMARefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMultiOperatorSearch" - ).set_name("LLAMARefinedAdaptiveMultiOperatorSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiOperatorSearch").set_name("LLAMARefinedAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("RefinedAdaptiveMultiOperatorSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE import RefinedAdaptiveMultiStrategyDE lama_register["RefinedAdaptiveMultiStrategyDE"] = RefinedAdaptiveMultiStrategyDE - LLAMARefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMultiStrategyDE" - ).set_name("LLAMARefinedAdaptiveMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE").set_name("LLAMARefinedAdaptiveMultiStrategyDE", register=True) except Exception as e: print("RefinedAdaptiveMultiStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE_v2 import ( - RefinedAdaptiveMultiStrategyDE_v2, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE_v2 import RefinedAdaptiveMultiStrategyDE_v2 lama_register["RefinedAdaptiveMultiStrategyDE_v2"] = RefinedAdaptiveMultiStrategyDE_v2 - LLAMARefinedAdaptiveMultiStrategyDE_v2 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMultiStrategyDE_v2" - ).set_name("LLAMARefinedAdaptiveMultiStrategyDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMultiStrategyDE_v2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE_v2").set_name("LLAMARefinedAdaptiveMultiStrategyDE_v2", register=True) except Exception as e: print("RefinedAdaptiveMultiStrategyDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolution import ( - RefinedAdaptiveMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolution import RefinedAdaptiveMultiStrategyDifferentialEvolution - lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolution"] = ( - RefinedAdaptiveMultiStrategyDifferentialEvolution - ) - LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution", register=True) + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolution"] = RefinedAdaptiveMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 import ( - RefinedAdaptiveMultiStrategyDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 import RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 - lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolutionV2"] = ( - RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 - ) - LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2" - ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2", register=True) + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolutionV2"] = RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2").set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2", register=True) except Exception as e: print("RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveParameterStrategyV38 import ( - RefinedAdaptiveParameterStrategyV38, - ) + from nevergrad.optimization.lama.RefinedAdaptiveParameterStrategyV38 import RefinedAdaptiveParameterStrategyV38 lama_register["RefinedAdaptiveParameterStrategyV38"] = RefinedAdaptiveParameterStrategyV38 - LLAMARefinedAdaptiveParameterStrategyV38 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveParameterStrategyV38" - ).set_name("LLAMARefinedAdaptiveParameterStrategyV38", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveParameterStrategyV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveParameterStrategyV38 = NonObjectOptimizer(method="LLAMARefinedAdaptiveParameterStrategyV38").set_name("LLAMARefinedAdaptiveParameterStrategyV38", register=True) except Exception as e: print("RefinedAdaptiveParameterStrategyV38 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( - RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - lama_register["RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( - RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - ) - LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" - ).set_name( - "LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True - ) + lama_register["RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) except Exception as e: - print( - "RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e - ) - + print("RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) try: - from nevergrad.optimization.lama.RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( - RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - lama_register["RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( - RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - ) - LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" - ).set_name("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) + lama_register["RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) except Exception as e: print("RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionBalanceStrategy import ( - RefinedAdaptivePrecisionBalanceStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionBalanceStrategy import RefinedAdaptivePrecisionBalanceStrategy lama_register["RefinedAdaptivePrecisionBalanceStrategy"] = RefinedAdaptivePrecisionBalanceStrategy - LLAMARefinedAdaptivePrecisionBalanceStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionBalanceStrategy" - ).set_name("LLAMARefinedAdaptivePrecisionBalanceStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionBalanceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionBalanceStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionBalanceStrategy").set_name("LLAMARefinedAdaptivePrecisionBalanceStrategy", register=True) except Exception as e: print("RefinedAdaptivePrecisionBalanceStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV4 import ( - RefinedAdaptivePrecisionCohortOptimizationV4, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV4 import RefinedAdaptivePrecisionCohortOptimizationV4 - lama_register["RefinedAdaptivePrecisionCohortOptimizationV4"] = ( - RefinedAdaptivePrecisionCohortOptimizationV4 - ) - LLAMARefinedAdaptivePrecisionCohortOptimizationV4 = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4" - ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV4", register=True) + lama_register["RefinedAdaptivePrecisionCohortOptimizationV4"] = RefinedAdaptivePrecisionCohortOptimizationV4 + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionCohortOptimizationV4 = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4").set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV4", register=True) except Exception as e: print("RefinedAdaptivePrecisionCohortOptimizationV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV6 import ( - RefinedAdaptivePrecisionCohortOptimizationV6, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV6 import RefinedAdaptivePrecisionCohortOptimizationV6 - lama_register["RefinedAdaptivePrecisionCohortOptimizationV6"] = ( - RefinedAdaptivePrecisionCohortOptimizationV6 - ) - LLAMARefinedAdaptivePrecisionCohortOptimizationV6 = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6" - ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV6", register=True) + lama_register["RefinedAdaptivePrecisionCohortOptimizationV6"] = RefinedAdaptivePrecisionCohortOptimizationV6 + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionCohortOptimizationV6 = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6").set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV6", register=True) except Exception as e: print("RefinedAdaptivePrecisionCohortOptimizationV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionDifferentialEvolution import ( - RefinedAdaptivePrecisionDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDifferentialEvolution import RefinedAdaptivePrecisionDifferentialEvolution - lama_register["RefinedAdaptivePrecisionDifferentialEvolution"] = ( - RefinedAdaptivePrecisionDifferentialEvolution - ) - LLAMARefinedAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionDifferentialEvolution" - ).set_name("LLAMARefinedAdaptivePrecisionDifferentialEvolution", register=True) + lama_register["RefinedAdaptivePrecisionDifferentialEvolution"] = RefinedAdaptivePrecisionDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDifferentialEvolution").set_name("LLAMARefinedAdaptivePrecisionDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptivePrecisionDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionDivideSearch import ( - RefinedAdaptivePrecisionDivideSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDivideSearch import RefinedAdaptivePrecisionDivideSearch lama_register["RefinedAdaptivePrecisionDivideSearch"] = RefinedAdaptivePrecisionDivideSearch - LLAMARefinedAdaptivePrecisionDivideSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionDivideSearch" - ).set_name("LLAMARefinedAdaptivePrecisionDivideSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionDivideSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDivideSearch").set_name("LLAMARefinedAdaptivePrecisionDivideSearch", register=True) except Exception as e: print("RefinedAdaptivePrecisionDivideSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionEvolutionStrategy import ( - RefinedAdaptivePrecisionEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionEvolutionStrategy import RefinedAdaptivePrecisionEvolutionStrategy lama_register["RefinedAdaptivePrecisionEvolutionStrategy"] = RefinedAdaptivePrecisionEvolutionStrategy - LLAMARefinedAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionEvolutionStrategy" - ).set_name("LLAMARefinedAdaptivePrecisionEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionEvolutionStrategy").set_name("LLAMARefinedAdaptivePrecisionEvolutionStrategy", register=True) except Exception as e: print("RefinedAdaptivePrecisionEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionFocalHybrid import ( - RefinedAdaptivePrecisionFocalHybrid, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionFocalHybrid import RefinedAdaptivePrecisionFocalHybrid lama_register["RefinedAdaptivePrecisionFocalHybrid"] = RefinedAdaptivePrecisionFocalHybrid - LLAMARefinedAdaptivePrecisionFocalHybrid = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionFocalHybrid" - ).set_name("LLAMARefinedAdaptivePrecisionFocalHybrid", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionFocalHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionFocalHybrid = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionFocalHybrid").set_name("LLAMARefinedAdaptivePrecisionFocalHybrid", register=True) except Exception as e: print("RefinedAdaptivePrecisionFocalHybrid can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionHybridSearch import ( - RefinedAdaptivePrecisionHybridSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionHybridSearch import RefinedAdaptivePrecisionHybridSearch lama_register["RefinedAdaptivePrecisionHybridSearch"] = RefinedAdaptivePrecisionHybridSearch - LLAMARefinedAdaptivePrecisionHybridSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionHybridSearch" - ).set_name("LLAMARefinedAdaptivePrecisionHybridSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionHybridSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionHybridSearch").set_name("LLAMARefinedAdaptivePrecisionHybridSearch", register=True) except Exception as e: print("RefinedAdaptivePrecisionHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionStrategicOptimizer import ( - RefinedAdaptivePrecisionStrategicOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptivePrecisionStrategicOptimizer import RefinedAdaptivePrecisionStrategicOptimizer lama_register["RefinedAdaptivePrecisionStrategicOptimizer"] = RefinedAdaptivePrecisionStrategicOptimizer - LLAMARefinedAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptivePrecisionStrategicOptimizer" - ).set_name("LLAMARefinedAdaptivePrecisionStrategicOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionStrategicOptimizer").set_name("LLAMARefinedAdaptivePrecisionStrategicOptimizer", register=True) except Exception as e: print("RefinedAdaptivePrecisionStrategicOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumCrossoverStrategyV3 import ( - RefinedAdaptiveQuantumCrossoverStrategyV3, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumCrossoverStrategyV3 import RefinedAdaptiveQuantumCrossoverStrategyV3 lama_register["RefinedAdaptiveQuantumCrossoverStrategyV3"] = RefinedAdaptiveQuantumCrossoverStrategyV3 - LLAMARefinedAdaptiveQuantumCrossoverStrategyV3 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3" - ).set_name("LLAMARefinedAdaptiveQuantumCrossoverStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumCrossoverStrategyV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3").set_name("LLAMARefinedAdaptiveQuantumCrossoverStrategyV3", register=True) except Exception as e: print("RefinedAdaptiveQuantumCrossoverStrategyV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolution import ( - RefinedAdaptiveQuantumDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolution import RefinedAdaptiveQuantumDifferentialEvolution lama_register["RefinedAdaptiveQuantumDifferentialEvolution"] = RefinedAdaptiveQuantumDifferentialEvolution - LLAMARefinedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolution").set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolutionPlus import ( - RefinedAdaptiveQuantumDifferentialEvolutionPlus, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolutionPlus import RefinedAdaptiveQuantumDifferentialEvolutionPlus - lama_register["RefinedAdaptiveQuantumDifferentialEvolutionPlus"] = ( - RefinedAdaptiveQuantumDifferentialEvolutionPlus - ) - LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus" - ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus", register=True) + lama_register["RefinedAdaptiveQuantumDifferentialEvolutionPlus"] = RefinedAdaptiveQuantumDifferentialEvolutionPlus + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus").set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus", register=True) except Exception as e: print("RefinedAdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveQuantumEliteDE import RefinedAdaptiveQuantumEliteDE lama_register["RefinedAdaptiveQuantumEliteDE"] = RefinedAdaptiveQuantumEliteDE - LLAMARefinedAdaptiveQuantumEliteDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumEliteDE" - ).set_name("LLAMARefinedAdaptiveQuantumEliteDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEliteDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumEliteDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEliteDE").set_name("LLAMARefinedAdaptiveQuantumEliteDE", register=True) except Exception as e: print("RefinedAdaptiveQuantumEliteDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveQuantumEntropyDE import RefinedAdaptiveQuantumEntropyDE lama_register["RefinedAdaptiveQuantumEntropyDE"] = RefinedAdaptiveQuantumEntropyDE - LLAMARefinedAdaptiveQuantumEntropyDE = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumEntropyDE" - ).set_name("LLAMARefinedAdaptiveQuantumEntropyDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEntropyDE").set_name("LLAMARefinedAdaptiveQuantumEntropyDE", register=True) except Exception as e: print("RefinedAdaptiveQuantumEntropyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientBoostedMemeticSearch import ( - RefinedAdaptiveQuantumGradientBoostedMemeticSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientBoostedMemeticSearch import RefinedAdaptiveQuantumGradientBoostedMemeticSearch - lama_register["RefinedAdaptiveQuantumGradientBoostedMemeticSearch"] = ( - RefinedAdaptiveQuantumGradientBoostedMemeticSearch - ) - LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch" - ).set_name("LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch", register=True) + lama_register["RefinedAdaptiveQuantumGradientBoostedMemeticSearch"] = RefinedAdaptiveQuantumGradientBoostedMemeticSearch + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch").set_name("LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch", register=True) except Exception as e: print("RefinedAdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientExplorationOptimization import ( - RefinedAdaptiveQuantumGradientExplorationOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientExplorationOptimization import RefinedAdaptiveQuantumGradientExplorationOptimization - lama_register["RefinedAdaptiveQuantumGradientExplorationOptimization"] = ( - RefinedAdaptiveQuantumGradientExplorationOptimization - ) - LLAMARefinedAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization" - ).set_name("LLAMARefinedAdaptiveQuantumGradientExplorationOptimization", register=True) + lama_register["RefinedAdaptiveQuantumGradientExplorationOptimization"] = RefinedAdaptiveQuantumGradientExplorationOptimization + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization").set_name("LLAMARefinedAdaptiveQuantumGradientExplorationOptimization", register=True) except Exception as e: print("RefinedAdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientHybridOptimizer import ( - RefinedAdaptiveQuantumGradientHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientHybridOptimizer import RefinedAdaptiveQuantumGradientHybridOptimizer - lama_register["RefinedAdaptiveQuantumGradientHybridOptimizer"] = ( - RefinedAdaptiveQuantumGradientHybridOptimizer - ) - LLAMARefinedAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer" - ).set_name("LLAMARefinedAdaptiveQuantumGradientHybridOptimizer", register=True) + lama_register["RefinedAdaptiveQuantumGradientHybridOptimizer"] = RefinedAdaptiveQuantumGradientHybridOptimizer + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer").set_name("LLAMARefinedAdaptiveQuantumGradientHybridOptimizer", register=True) except Exception as e: print("RefinedAdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveQuantumPSO import RefinedAdaptiveQuantumPSO lama_register["RefinedAdaptiveQuantumPSO"] = RefinedAdaptiveQuantumPSO - LLAMARefinedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO").set_name( - "LLAMARefinedAdaptiveQuantumPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO").set_name("LLAMARefinedAdaptiveQuantumPSO", register=True) except Exception as e: print("RefinedAdaptiveQuantumPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumSwarmOptimizerV3 import ( - RefinedAdaptiveQuantumSwarmOptimizerV3, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuantumSwarmOptimizerV3 import RefinedAdaptiveQuantumSwarmOptimizerV3 lama_register["RefinedAdaptiveQuantumSwarmOptimizerV3"] = RefinedAdaptiveQuantumSwarmOptimizerV3 - LLAMARefinedAdaptiveQuantumSwarmOptimizerV3 = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3" - ).set_name("LLAMARefinedAdaptiveQuantumSwarmOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuantumSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3").set_name("LLAMARefinedAdaptiveQuantumSwarmOptimizerV3", register=True) except Exception as e: print("RefinedAdaptiveQuantumSwarmOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomDEGradientAnnealing import ( - RefinedAdaptiveQuasiRandomDEGradientAnnealing, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomDEGradientAnnealing import RefinedAdaptiveQuasiRandomDEGradientAnnealing - lama_register["RefinedAdaptiveQuasiRandomDEGradientAnnealing"] = ( - RefinedAdaptiveQuasiRandomDEGradientAnnealing - ) - LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing" - ).set_name("LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing", register=True) + lama_register["RefinedAdaptiveQuasiRandomDEGradientAnnealing"] = RefinedAdaptiveQuasiRandomDEGradientAnnealing + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing").set_name("LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing", register=True) except Exception as e: print("RefinedAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution import ( - RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution import RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution - lama_register["RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( - RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution - ) - LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) + lama_register["RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution"] = RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution").set_name("LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveRefinementPSO import RefinedAdaptiveRefinementPSO lama_register["RefinedAdaptiveRefinementPSO"] = RefinedAdaptiveRefinementPSO - LLAMARefinedAdaptiveRefinementPSO = NonObjectOptimizer( - method="LLAMARefinedAdaptiveRefinementPSO" - ).set_name("LLAMARefinedAdaptiveRefinementPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveRefinementPSO").set_name("LLAMARefinedAdaptiveRefinementPSO", register=True) except Exception as e: print("RefinedAdaptiveRefinementPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveSimulatedAnnealingWithSmartMemory import ( - RefinedAdaptiveSimulatedAnnealingWithSmartMemory, - ) + from nevergrad.optimization.lama.RefinedAdaptiveSimulatedAnnealingWithSmartMemory import RefinedAdaptiveSimulatedAnnealingWithSmartMemory - lama_register["RefinedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( - RefinedAdaptiveSimulatedAnnealingWithSmartMemory - ) - LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory" - ).set_name("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) + lama_register["RefinedAdaptiveSimulatedAnnealingWithSmartMemory"] = RefinedAdaptiveSimulatedAnnealingWithSmartMemory + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) except Exception as e: print("RefinedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveSpatialExplorationOptimizer import ( - RefinedAdaptiveSpatialExplorationOptimizer, - ) + from nevergrad.optimization.lama.RefinedAdaptiveSpatialExplorationOptimizer import RefinedAdaptiveSpatialExplorationOptimizer lama_register["RefinedAdaptiveSpatialExplorationOptimizer"] = RefinedAdaptiveSpatialExplorationOptimizer - LLAMARefinedAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSpatialExplorationOptimizer" - ).set_name("LLAMARefinedAdaptiveSpatialExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialExplorationOptimizer").set_name("LLAMARefinedAdaptiveSpatialExplorationOptimizer", register=True) except Exception as e: print("RefinedAdaptiveSpatialExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveSpatialOptimizer import RefinedAdaptiveSpatialOptimizer lama_register["RefinedAdaptiveSpatialOptimizer"] = RefinedAdaptiveSpatialOptimizer - LLAMARefinedAdaptiveSpatialOptimizer = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSpatialOptimizer" - ).set_name("LLAMARefinedAdaptiveSpatialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSpatialOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialOptimizer").set_name("LLAMARefinedAdaptiveSpatialOptimizer", register=True) except Exception as e: print("RefinedAdaptiveSpatialOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAdaptiveSpectralEvolution import RefinedAdaptiveSpectralEvolution lama_register["RefinedAdaptiveSpectralEvolution"] = RefinedAdaptiveSpectralEvolution - LLAMARefinedAdaptiveSpectralEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSpectralEvolution" - ).set_name("LLAMARefinedAdaptiveSpectralEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpectralEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSpectralEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpectralEvolution").set_name("LLAMARefinedAdaptiveSpectralEvolution", register=True) except Exception as e: print("RefinedAdaptiveSpectralEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveSpiralGradientSearch import ( - RefinedAdaptiveSpiralGradientSearch, - ) + from nevergrad.optimization.lama.RefinedAdaptiveSpiralGradientSearch import RefinedAdaptiveSpiralGradientSearch lama_register["RefinedAdaptiveSpiralGradientSearch"] = RefinedAdaptiveSpiralGradientSearch - LLAMARefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSpiralGradientSearch" - ).set_name("LLAMARefinedAdaptiveSpiralGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpiralGradientSearch").set_name("LLAMARefinedAdaptiveSpiralGradientSearch", register=True) except Exception as e: print("RefinedAdaptiveSpiralGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveStochasticGradientQuorumOptimization import ( - RefinedAdaptiveStochasticGradientQuorumOptimization, - ) + from nevergrad.optimization.lama.RefinedAdaptiveStochasticGradientQuorumOptimization import RefinedAdaptiveStochasticGradientQuorumOptimization - lama_register["RefinedAdaptiveStochasticGradientQuorumOptimization"] = ( - RefinedAdaptiveStochasticGradientQuorumOptimization - ) - LLAMARefinedAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( - method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization" - ).set_name("LLAMARefinedAdaptiveStochasticGradientQuorumOptimization", register=True) + lama_register["RefinedAdaptiveStochasticGradientQuorumOptimization"] = RefinedAdaptiveStochasticGradientQuorumOptimization + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization").set_name("LLAMARefinedAdaptiveStochasticGradientQuorumOptimization", register=True) except Exception as e: print("RefinedAdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveStochasticHybridEvolution import ( - RefinedAdaptiveStochasticHybridEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveStochasticHybridEvolution import RefinedAdaptiveStochasticHybridEvolution lama_register["RefinedAdaptiveStochasticHybridEvolution"] = RefinedAdaptiveStochasticHybridEvolution - LLAMARefinedAdaptiveStochasticHybridEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveStochasticHybridEvolution" - ).set_name("LLAMARefinedAdaptiveStochasticHybridEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveStochasticHybridEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticHybridEvolution").set_name("LLAMARefinedAdaptiveStochasticHybridEvolution", register=True) except Exception as e: print("RefinedAdaptiveStochasticHybridEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdaptiveSwarmDifferentialEvolution import ( - RefinedAdaptiveSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdaptiveSwarmDifferentialEvolution import RefinedAdaptiveSwarmDifferentialEvolution lama_register["RefinedAdaptiveSwarmDifferentialEvolution"] = RefinedAdaptiveSwarmDifferentialEvolution - LLAMARefinedAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdaptiveSwarmDifferentialEvolution" - ).set_name("LLAMARefinedAdaptiveSwarmDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveSwarmDifferentialEvolution").set_name("LLAMARefinedAdaptiveSwarmDifferentialEvolution", register=True) except Exception as e: print("RefinedAdaptiveSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( - RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - lama_register["RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( - RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - ) - LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" - ).set_name("LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) + lama_register["RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( - RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - lama_register["RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( - RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - ) - LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" - ).set_name("LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) + lama_register["RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + res = NonObjectOptimizer(method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( - RefinedArchiveEnhancedAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedArchiveEnhancedAdaptiveDifferentialEvolution import RefinedArchiveEnhancedAdaptiveDifferentialEvolution - lama_register["RefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( - RefinedArchiveEnhancedAdaptiveDifferentialEvolution - ) - LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution" - ).set_name("LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) + lama_register["RefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = RefinedArchiveEnhancedAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution").set_name("LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedAttenuatedAdaptiveEvolver import RefinedAttenuatedAdaptiveEvolver lama_register["RefinedAttenuatedAdaptiveEvolver"] = RefinedAttenuatedAdaptiveEvolver - LLAMARefinedAttenuatedAdaptiveEvolver = NonObjectOptimizer( - method="LLAMARefinedAttenuatedAdaptiveEvolver" - ).set_name("LLAMARefinedAttenuatedAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMARefinedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMARefinedAttenuatedAdaptiveEvolver").set_name("LLAMARefinedAttenuatedAdaptiveEvolver", register=True) except Exception as e: print("RefinedAttenuatedAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedBalancedAdaptiveElitistStrategy import ( - RefinedBalancedAdaptiveElitistStrategy, - ) + from nevergrad.optimization.lama.RefinedBalancedAdaptiveElitistStrategy import RefinedBalancedAdaptiveElitistStrategy lama_register["RefinedBalancedAdaptiveElitistStrategy"] = RefinedBalancedAdaptiveElitistStrategy - LLAMARefinedBalancedAdaptiveElitistStrategy = NonObjectOptimizer( - method="LLAMARefinedBalancedAdaptiveElitistStrategy" - ).set_name("LLAMARefinedBalancedAdaptiveElitistStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedBalancedAdaptiveElitistStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedBalancedAdaptiveElitistStrategy = NonObjectOptimizer(method="LLAMARefinedBalancedAdaptiveElitistStrategy").set_name("LLAMARefinedBalancedAdaptiveElitistStrategy", register=True) except Exception as e: print("RefinedBalancedAdaptiveElitistStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedBalancedExplorationOptimizer import ( - RefinedBalancedExplorationOptimizer, - ) + from nevergrad.optimization.lama.RefinedBalancedExplorationOptimizer import RefinedBalancedExplorationOptimizer lama_register["RefinedBalancedExplorationOptimizer"] = RefinedBalancedExplorationOptimizer - LLAMARefinedBalancedExplorationOptimizer = NonObjectOptimizer( - method="LLAMARefinedBalancedExplorationOptimizer" - ).set_name("LLAMARefinedBalancedExplorationOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedBalancedExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedBalancedExplorationOptimizer").set_name("LLAMARefinedBalancedExplorationOptimizer", register=True) except Exception as e: print("RefinedBalancedExplorationOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedCMADiffEvoPSO import RefinedCMADiffEvoPSO lama_register["RefinedCMADiffEvoPSO"] = RefinedCMADiffEvoPSO - LLAMARefinedCMADiffEvoPSO = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO").set_name( - "LLAMARefinedCMADiffEvoPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedCMADiffEvoPSO = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO").set_name("LLAMARefinedCMADiffEvoPSO", register=True) except Exception as e: print("RefinedCMADiffEvoPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedConcentricDiversityStrategy import ( - RefinedConcentricDiversityStrategy, - ) + from nevergrad.optimization.lama.RefinedConcentricDiversityStrategy import RefinedConcentricDiversityStrategy lama_register["RefinedConcentricDiversityStrategy"] = RefinedConcentricDiversityStrategy - LLAMARefinedConcentricDiversityStrategy = NonObjectOptimizer( - method="LLAMARefinedConcentricDiversityStrategy" - ).set_name("LLAMARefinedConcentricDiversityStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMARefinedConcentricDiversityStrategy").set_name("LLAMARefinedConcentricDiversityStrategy", register=True) except Exception as e: print("RefinedConcentricDiversityStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedConcentricQuantumCrossoverStrategyV5 import ( - RefinedConcentricQuantumCrossoverStrategyV5, - ) + from nevergrad.optimization.lama.RefinedConcentricQuantumCrossoverStrategyV5 import RefinedConcentricQuantumCrossoverStrategyV5 lama_register["RefinedConcentricQuantumCrossoverStrategyV5"] = RefinedConcentricQuantumCrossoverStrategyV5 - LLAMARefinedConcentricQuantumCrossoverStrategyV5 = NonObjectOptimizer( - method="LLAMARefinedConcentricQuantumCrossoverStrategyV5" - ).set_name("LLAMARefinedConcentricQuantumCrossoverStrategyV5", register=True) + res = NonObjectOptimizer(method="LLAMARefinedConcentricQuantumCrossoverStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedConcentricQuantumCrossoverStrategyV5 = NonObjectOptimizer(method="LLAMARefinedConcentricQuantumCrossoverStrategyV5").set_name("LLAMARefinedConcentricQuantumCrossoverStrategyV5", register=True) except Exception as e: print("RefinedConcentricQuantumCrossoverStrategyV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedConvergenceAdaptiveOptimizer import ( - RefinedConvergenceAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.RefinedConvergenceAdaptiveOptimizer import RefinedConvergenceAdaptiveOptimizer lama_register["RefinedConvergenceAdaptiveOptimizer"] = RefinedConvergenceAdaptiveOptimizer - LLAMARefinedConvergenceAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMARefinedConvergenceAdaptiveOptimizer" - ).set_name("LLAMARefinedConvergenceAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedConvergenceAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedConvergenceAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedConvergenceAdaptiveOptimizer").set_name("LLAMARefinedConvergenceAdaptiveOptimizer", register=True) except Exception as e: print("RefinedConvergenceAdaptiveOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedConvergenceDE import RefinedConvergenceDE lama_register["RefinedConvergenceDE"] = RefinedConvergenceDE - LLAMARefinedConvergenceDE = NonObjectOptimizer(method="LLAMARefinedConvergenceDE").set_name( - "LLAMARefinedConvergenceDE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedConvergenceDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedConvergenceDE = NonObjectOptimizer(method="LLAMARefinedConvergenceDE").set_name("LLAMARefinedConvergenceDE", register=True) except Exception as e: print("RefinedConvergenceDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedConvergentAdaptiveEvolutionStrategy import ( - RefinedConvergentAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedConvergentAdaptiveEvolutionStrategy import RefinedConvergentAdaptiveEvolutionStrategy lama_register["RefinedConvergentAdaptiveEvolutionStrategy"] = RefinedConvergentAdaptiveEvolutionStrategy - LLAMARefinedConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedConvergentAdaptiveEvolutionStrategy" - ).set_name("LLAMARefinedConvergentAdaptiveEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedConvergentAdaptiveEvolutionStrategy").set_name("LLAMARefinedConvergentAdaptiveEvolutionStrategy", register=True) except Exception as e: print("RefinedConvergentAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedCooperativeDifferentialEvolution import ( - RefinedCooperativeDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedCooperativeDifferentialEvolution import RefinedCooperativeDifferentialEvolution lama_register["RefinedCooperativeDifferentialEvolution"] = RefinedCooperativeDifferentialEvolution - LLAMARefinedCooperativeDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedCooperativeDifferentialEvolution" - ).set_name("LLAMARefinedCooperativeDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedCooperativeDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedCooperativeDifferentialEvolution").set_name("LLAMARefinedCooperativeDifferentialEvolution", register=True) except Exception as e: print("RefinedCooperativeDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedCosineAdaptiveDifferentialSwarm import ( - RefinedCosineAdaptiveDifferentialSwarm, - ) + from nevergrad.optimization.lama.RefinedCosineAdaptiveDifferentialSwarm import RefinedCosineAdaptiveDifferentialSwarm lama_register["RefinedCosineAdaptiveDifferentialSwarm"] = RefinedCosineAdaptiveDifferentialSwarm - LLAMARefinedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( - method="LLAMARefinedCosineAdaptiveDifferentialSwarm" - ).set_name("LLAMARefinedCosineAdaptiveDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMARefinedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMARefinedCosineAdaptiveDifferentialSwarm").set_name("LLAMARefinedCosineAdaptiveDifferentialSwarm", register=True) except Exception as e: print("RefinedCosineAdaptiveDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDifferentialEvolutionWithAdaptiveLearningRate import ( - RefinedDifferentialEvolutionWithAdaptiveLearningRate, - ) + from nevergrad.optimization.lama.RefinedDifferentialEvolutionWithAdaptiveLearningRate import RefinedDifferentialEvolutionWithAdaptiveLearningRate - lama_register["RefinedDifferentialEvolutionWithAdaptiveLearningRate"] = ( - RefinedDifferentialEvolutionWithAdaptiveLearningRate - ) - LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( - method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate" - ).set_name("LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate", register=True) + lama_register["RefinedDifferentialEvolutionWithAdaptiveLearningRate"] = RefinedDifferentialEvolutionWithAdaptiveLearningRate + res = NonObjectOptimizer(method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer(method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate").set_name("LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate", register=True) except Exception as e: print("RefinedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDifferentialParticleSwarmOptimization import ( - RefinedDifferentialParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.RefinedDifferentialParticleSwarmOptimization import RefinedDifferentialParticleSwarmOptimization - lama_register["RefinedDifferentialParticleSwarmOptimization"] = ( - RefinedDifferentialParticleSwarmOptimization - ) - LLAMARefinedDifferentialParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMARefinedDifferentialParticleSwarmOptimization" - ).set_name("LLAMARefinedDifferentialParticleSwarmOptimization", register=True) + lama_register["RefinedDifferentialParticleSwarmOptimization"] = RefinedDifferentialParticleSwarmOptimization + res = NonObjectOptimizer(method="LLAMARefinedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMARefinedDifferentialParticleSwarmOptimization").set_name("LLAMARefinedDifferentialParticleSwarmOptimization", register=True) except Exception as e: print("RefinedDifferentialParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDimensionalCyclicCrossoverEvolver import ( - RefinedDimensionalCyclicCrossoverEvolver, - ) + from nevergrad.optimization.lama.RefinedDimensionalCyclicCrossoverEvolver import RefinedDimensionalCyclicCrossoverEvolver lama_register["RefinedDimensionalCyclicCrossoverEvolver"] = RefinedDimensionalCyclicCrossoverEvolver - LLAMARefinedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( - method="LLAMARefinedDimensionalCyclicCrossoverEvolver" - ).set_name("LLAMARefinedDimensionalCyclicCrossoverEvolver", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer(method="LLAMARefinedDimensionalCyclicCrossoverEvolver").set_name("LLAMARefinedDimensionalCyclicCrossoverEvolver", register=True) except Exception as e: print("RefinedDimensionalCyclicCrossoverEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV2 import ( - RefinedDimensionalFeedbackEvolverV2, - ) + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV2 import RefinedDimensionalFeedbackEvolverV2 lama_register["RefinedDimensionalFeedbackEvolverV2"] = RefinedDimensionalFeedbackEvolverV2 - LLAMARefinedDimensionalFeedbackEvolverV2 = NonObjectOptimizer( - method="LLAMARefinedDimensionalFeedbackEvolverV2" - ).set_name("LLAMARefinedDimensionalFeedbackEvolverV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDimensionalFeedbackEvolverV2 = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV2").set_name("LLAMARefinedDimensionalFeedbackEvolverV2", register=True) except Exception as e: print("RefinedDimensionalFeedbackEvolverV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV4 import ( - RefinedDimensionalFeedbackEvolverV4, - ) + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV4 import RefinedDimensionalFeedbackEvolverV4 lama_register["RefinedDimensionalFeedbackEvolverV4"] = RefinedDimensionalFeedbackEvolverV4 - LLAMARefinedDimensionalFeedbackEvolverV4 = NonObjectOptimizer( - method="LLAMARefinedDimensionalFeedbackEvolverV4" - ).set_name("LLAMARefinedDimensionalFeedbackEvolverV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDimensionalFeedbackEvolverV4 = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV4").set_name("LLAMARefinedDimensionalFeedbackEvolverV4", register=True) except Exception as e: print("RefinedDimensionalFeedbackEvolverV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDualConvergenceEvolutiveStrategy import ( - RefinedDualConvergenceEvolutiveStrategy, - ) + from nevergrad.optimization.lama.RefinedDualConvergenceEvolutiveStrategy import RefinedDualConvergenceEvolutiveStrategy lama_register["RefinedDualConvergenceEvolutiveStrategy"] = RefinedDualConvergenceEvolutiveStrategy - LLAMARefinedDualConvergenceEvolutiveStrategy = NonObjectOptimizer( - method="LLAMARefinedDualConvergenceEvolutiveStrategy" - ).set_name("LLAMARefinedDualConvergenceEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDualConvergenceEvolutiveStrategy = NonObjectOptimizer(method="LLAMARefinedDualConvergenceEvolutiveStrategy").set_name("LLAMARefinedDualConvergenceEvolutiveStrategy", register=True) except Exception as e: print("RefinedDualConvergenceEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDualPhaseADPSO_DE_V3_Enhanced import ( - RefinedDualPhaseADPSO_DE_V3_Enhanced, - ) + from nevergrad.optimization.lama.RefinedDualPhaseADPSO_DE_V3_Enhanced import RefinedDualPhaseADPSO_DE_V3_Enhanced lama_register["RefinedDualPhaseADPSO_DE_V3_Enhanced"] = RefinedDualPhaseADPSO_DE_V3_Enhanced - LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced = NonObjectOptimizer( - method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced" - ).set_name("LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced = NonObjectOptimizer(method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced").set_name("LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced", register=True) except Exception as e: print("RefinedDualPhaseADPSO_DE_V3_Enhanced can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDualPhaseOptimization import RefinedDualPhaseOptimization lama_register["RefinedDualPhaseOptimization"] = RefinedDualPhaseOptimization - LLAMARefinedDualPhaseOptimization = NonObjectOptimizer( - method="LLAMARefinedDualPhaseOptimization" - ).set_name("LLAMARefinedDualPhaseOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDualPhaseOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDualPhaseOptimization = NonObjectOptimizer(method="LLAMARefinedDualPhaseOptimization").set_name("LLAMARefinedDualPhaseOptimization", register=True) except Exception as e: print("RefinedDualPhaseOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDualStrategyAdaptiveDE import RefinedDualStrategyAdaptiveDE lama_register["RefinedDualStrategyAdaptiveDE"] = RefinedDualStrategyAdaptiveDE - LLAMARefinedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedDualStrategyAdaptiveDE" - ).set_name("LLAMARefinedDualStrategyAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDualStrategyAdaptiveDE").set_name("LLAMARefinedDualStrategyAdaptiveDE", register=True) except Exception as e: print("RefinedDualStrategyAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDynamicAdaptiveDE import RefinedDynamicAdaptiveDE lama_register["RefinedDynamicAdaptiveDE"] = RefinedDynamicAdaptiveDE - LLAMARefinedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE").set_name( - "LLAMARefinedDynamicAdaptiveDE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE").set_name("LLAMARefinedDynamicAdaptiveDE", register=True) except Exception as e: print("RefinedDynamicAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDE import RefinedDynamicAdaptiveHybridDE lama_register["RefinedDynamicAdaptiveHybridDE"] = RefinedDynamicAdaptiveHybridDE - LLAMARefinedDynamicAdaptiveHybridDE = NonObjectOptimizer( - method="LLAMARefinedDynamicAdaptiveHybridDE" - ).set_name("LLAMARefinedDynamicAdaptiveHybridDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDE").set_name("LLAMARefinedDynamicAdaptiveHybridDE", register=True) except Exception as e: print("RefinedDynamicAdaptiveHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( - RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory, - ) + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory import RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory - lama_register["RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( - RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory - ) - LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( - method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory" - ).set_name("LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) + lama_register["RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) except Exception as e: print("RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizer import ( - RefinedDynamicAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizer import RefinedDynamicAdaptiveHybridOptimizer lama_register["RefinedDynamicAdaptiveHybridOptimizer"] = RefinedDynamicAdaptiveHybridOptimizer - LLAMARefinedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedDynamicAdaptiveHybridOptimizer" - ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizer").set_name("LLAMARefinedDynamicAdaptiveHybridOptimizer", register=True) except Exception as e: print("RefinedDynamicAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizerV2 import ( - RefinedDynamicAdaptiveHybridOptimizerV2, - ) + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizerV2 import RefinedDynamicAdaptiveHybridOptimizerV2 lama_register["RefinedDynamicAdaptiveHybridOptimizerV2"] = RefinedDynamicAdaptiveHybridOptimizerV2 - LLAMARefinedDynamicAdaptiveHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2" - ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveHybridOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2").set_name("LLAMARefinedDynamicAdaptiveHybridOptimizerV2", register=True) except Exception as e: print("RefinedDynamicAdaptiveHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveStrategyV23 import ( - RefinedDynamicAdaptiveStrategyV23, - ) + from nevergrad.optimization.lama.RefinedDynamicAdaptiveStrategyV23 import RefinedDynamicAdaptiveStrategyV23 lama_register["RefinedDynamicAdaptiveStrategyV23"] = RefinedDynamicAdaptiveStrategyV23 - LLAMARefinedDynamicAdaptiveStrategyV23 = NonObjectOptimizer( - method="LLAMARefinedDynamicAdaptiveStrategyV23" - ).set_name("LLAMARefinedDynamicAdaptiveStrategyV23", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveStrategyV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicAdaptiveStrategyV23 = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveStrategyV23").set_name("LLAMARefinedDynamicAdaptiveStrategyV23", register=True) except Exception as e: print("RefinedDynamicAdaptiveStrategyV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV3 import ( - RefinedDynamicClusterHybridOptimizationV3, - ) + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV3 import RefinedDynamicClusterHybridOptimizationV3 lama_register["RefinedDynamicClusterHybridOptimizationV3"] = RefinedDynamicClusterHybridOptimizationV3 - LLAMARefinedDynamicClusterHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMARefinedDynamicClusterHybridOptimizationV3" - ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicClusterHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV3").set_name("LLAMARefinedDynamicClusterHybridOptimizationV3", register=True) except Exception as e: print("RefinedDynamicClusterHybridOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV4 import ( - RefinedDynamicClusterHybridOptimizationV4, - ) + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV4 import RefinedDynamicClusterHybridOptimizationV4 lama_register["RefinedDynamicClusterHybridOptimizationV4"] = RefinedDynamicClusterHybridOptimizationV4 - LLAMARefinedDynamicClusterHybridOptimizationV4 = NonObjectOptimizer( - method="LLAMARefinedDynamicClusterHybridOptimizationV4" - ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicClusterHybridOptimizationV4 = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV4").set_name("LLAMARefinedDynamicClusterHybridOptimizationV4", register=True) except Exception as e: print("RefinedDynamicClusterHybridOptimizationV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDynamicClusteringPSO import RefinedDynamicClusteringPSO lama_register["RefinedDynamicClusteringPSO"] = RefinedDynamicClusteringPSO - LLAMARefinedDynamicClusteringPSO = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO").set_name( - "LLAMARefinedDynamicClusteringPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicClusteringPSO = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO").set_name("LLAMARefinedDynamicClusteringPSO", register=True) except Exception as e: print("RefinedDynamicClusteringPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicCrowdingHybridOptimizer import ( - RefinedDynamicCrowdingHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedDynamicCrowdingHybridOptimizer import RefinedDynamicCrowdingHybridOptimizer lama_register["RefinedDynamicCrowdingHybridOptimizer"] = RefinedDynamicCrowdingHybridOptimizer - LLAMARefinedDynamicCrowdingHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedDynamicCrowdingHybridOptimizer" - ).set_name("LLAMARefinedDynamicCrowdingHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicCrowdingHybridOptimizer").set_name("LLAMARefinedDynamicCrowdingHybridOptimizer", register=True) except Exception as e: print("RefinedDynamicCrowdingHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicEliteAdaptiveHybridOptimizer import ( - RefinedDynamicEliteAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedDynamicEliteAdaptiveHybridOptimizer import RefinedDynamicEliteAdaptiveHybridOptimizer lama_register["RefinedDynamicEliteAdaptiveHybridOptimizer"] = RefinedDynamicEliteAdaptiveHybridOptimizer - LLAMARefinedDynamicEliteAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer" - ).set_name("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicEliteAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer").set_name("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer", register=True) except Exception as e: print("RefinedDynamicEliteAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicEnhancedHybridOptimizer import ( - RefinedDynamicEnhancedHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedDynamicEnhancedHybridOptimizer import RefinedDynamicEnhancedHybridOptimizer lama_register["RefinedDynamicEnhancedHybridOptimizer"] = RefinedDynamicEnhancedHybridOptimizer - LLAMARefinedDynamicEnhancedHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedDynamicEnhancedHybridOptimizer" - ).set_name("LLAMARefinedDynamicEnhancedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicEnhancedHybridOptimizer").set_name("LLAMARefinedDynamicEnhancedHybridOptimizer", register=True) except Exception as e: print("RefinedDynamicEnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( - RefinedDynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.RefinedDynamicGradientBoostedMemorySimulatedAnnealing import RefinedDynamicGradientBoostedMemorySimulatedAnnealing - lama_register["RefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( - RefinedDynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["RefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = RefinedDynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("RefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedDynamicHybridDEPSOWithEliteMemoryV2 import ( - RefinedDynamicHybridDEPSOWithEliteMemoryV2, - ) + from nevergrad.optimization.lama.RefinedDynamicHybridDEPSOWithEliteMemoryV2 import RefinedDynamicHybridDEPSOWithEliteMemoryV2 lama_register["RefinedDynamicHybridDEPSOWithEliteMemoryV2"] = RefinedDynamicHybridDEPSOWithEliteMemoryV2 - LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2 = NonObjectOptimizer( - method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2" - ).set_name("LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2 = NonObjectOptimizer(method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2").set_name("LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2", register=True) except Exception as e: print("RefinedDynamicHybridDEPSOWithEliteMemoryV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDynamicHybridOptimizer import RefinedDynamicHybridOptimizer lama_register["RefinedDynamicHybridOptimizer"] = RefinedDynamicHybridOptimizer - LLAMARefinedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedDynamicHybridOptimizer" - ).set_name("LLAMARefinedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicHybridOptimizer").set_name("LLAMARefinedDynamicHybridOptimizer", register=True) except Exception as e: print("RefinedDynamicHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedDynamicQuantumEvolution import RefinedDynamicQuantumEvolution lama_register["RefinedDynamicQuantumEvolution"] = RefinedDynamicQuantumEvolution - LLAMARefinedDynamicQuantumEvolution = NonObjectOptimizer( - method="LLAMARefinedDynamicQuantumEvolution" - ).set_name("LLAMARefinedDynamicQuantumEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedDynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedDynamicQuantumEvolution = NonObjectOptimizer(method="LLAMARefinedDynamicQuantumEvolution").set_name("LLAMARefinedDynamicQuantumEvolution", register=True) except Exception as e: print("RefinedDynamicQuantumEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEliteAdaptiveHybridDEPSO import RefinedEliteAdaptiveHybridDEPSO lama_register["RefinedEliteAdaptiveHybridDEPSO"] = RefinedEliteAdaptiveHybridDEPSO - LLAMARefinedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveHybridDEPSO" - ).set_name("LLAMARefinedEliteAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveHybridDEPSO").set_name("LLAMARefinedEliteAdaptiveHybridDEPSO", register=True) except Exception as e: print("RefinedEliteAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( - RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( - RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - ) - LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" - ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 import ( - RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 import RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 - lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3"] = ( - RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 - ) - LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3" - ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3", register=True) + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3"] = RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3").set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizer import ( - RefinedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizer import RefinedEliteAdaptiveMemoryHybridOptimizer lama_register["RefinedEliteAdaptiveMemoryHybridOptimizer"] = RefinedEliteAdaptiveMemoryHybridOptimizer - LLAMARefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV3 import ( - RefinedEliteAdaptiveMemoryHybridOptimizerV3, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV3 import RefinedEliteAdaptiveMemoryHybridOptimizerV3 lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV3"] = RefinedEliteAdaptiveMemoryHybridOptimizerV3 - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3" - ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV4 import ( - RefinedEliteAdaptiveMemoryHybridOptimizerV4, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV4 import RefinedEliteAdaptiveMemoryHybridOptimizerV4 lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV4"] = RefinedEliteAdaptiveMemoryHybridOptimizerV4 - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4" - ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV5 import ( - RefinedEliteAdaptiveMemoryHybridOptimizerV5, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV5 import RefinedEliteAdaptiveMemoryHybridOptimizerV5 lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV5"] = RefinedEliteAdaptiveMemoryHybridOptimizerV5 - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5" - ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5", register=True) except Exception as e: print("RefinedEliteAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch import ( - RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch, - ) + from nevergrad.optimization.lama.RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch import RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch - lama_register["RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch"] = ( - RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch - ) - LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch = NonObjectOptimizer( - method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch" - ).set_name("LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch", register=True) + lama_register["RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch"] = RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch + res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch").set_name("LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch", register=True) except Exception as e: print("RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteDynamicHybridOptimizer import ( - RefinedEliteDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedEliteDynamicHybridOptimizer import RefinedEliteDynamicHybridOptimizer lama_register["RefinedEliteDynamicHybridOptimizer"] = RefinedEliteDynamicHybridOptimizer - LLAMARefinedEliteDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedEliteDynamicHybridOptimizer" - ).set_name("LLAMARefinedEliteDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteDynamicHybridOptimizer").set_name("LLAMARefinedEliteDynamicHybridOptimizer", register=True) except Exception as e: print("RefinedEliteDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEliteDynamicMemoryHybridOptimizer import ( - RefinedEliteDynamicMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedEliteDynamicMemoryHybridOptimizer import RefinedEliteDynamicMemoryHybridOptimizer lama_register["RefinedEliteDynamicMemoryHybridOptimizer"] = RefinedEliteDynamicMemoryHybridOptimizer - LLAMARefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedEliteDynamicMemoryHybridOptimizer" - ).set_name("LLAMARefinedEliteDynamicMemoryHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteDynamicMemoryHybridOptimizer").set_name("LLAMARefinedEliteDynamicMemoryHybridOptimizer", register=True) except Exception as e: print("RefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEliteGuidedAdaptiveDE import RefinedEliteGuidedAdaptiveDE lama_register["RefinedEliteGuidedAdaptiveDE"] = RefinedEliteGuidedAdaptiveDE - LLAMARefinedEliteGuidedAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedEliteGuidedAdaptiveDE" - ).set_name("LLAMARefinedEliteGuidedAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteGuidedAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedEliteGuidedAdaptiveDE").set_name("LLAMARefinedEliteGuidedAdaptiveDE", register=True) except Exception as e: print("RefinedEliteGuidedAdaptiveDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE import RefinedEliteGuidedMutationDE lama_register["RefinedEliteGuidedMutationDE"] = RefinedEliteGuidedMutationDE - LLAMARefinedEliteGuidedMutationDE = NonObjectOptimizer( - method="LLAMARefinedEliteGuidedMutationDE" - ).set_name("LLAMARefinedEliteGuidedMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE").set_name("LLAMARefinedEliteGuidedMutationDE", register=True) except Exception as e: print("RefinedEliteGuidedMutationDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE_v3 import RefinedEliteGuidedMutationDE_v3 lama_register["RefinedEliteGuidedMutationDE_v3"] = RefinedEliteGuidedMutationDE_v3 - LLAMARefinedEliteGuidedMutationDE_v3 = NonObjectOptimizer( - method="LLAMARefinedEliteGuidedMutationDE_v3" - ).set_name("LLAMARefinedEliteGuidedMutationDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE_v3").set_name("LLAMARefinedEliteGuidedMutationDE_v3", register=True) except Exception as e: print("RefinedEliteGuidedMutationDE_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( - RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, - ) + from nevergrad.optimization.lama.RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - lama_register["RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = ( - RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - ) - LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( - method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" - ).set_name("LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) + lama_register["RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer(method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined").set_name("LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) except Exception as e: print("RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 import ( - RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 import RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 - lama_register["RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5"] = ( - RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 - ) - LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5" - ).set_name("LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5", register=True) + lama_register["RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5"] = RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5").set_name("LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5", register=True) except Exception as e: print("RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost import ( - RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost import RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost - lama_register["RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( - RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost - ) - LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost" - ).set_name("LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) + lama_register["RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost"] = RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) except Exception as e: print("RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDualPhaseStrategyV9 import ( - RefinedEnhancedAdaptiveDualPhaseStrategyV9, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDualPhaseStrategyV9 import RefinedEnhancedAdaptiveDualPhaseStrategyV9 lama_register["RefinedEnhancedAdaptiveDualPhaseStrategyV9"] = RefinedEnhancedAdaptiveDualPhaseStrategyV9 - LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9" - ).set_name("LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9").set_name("LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9", register=True) except Exception as e: print("RefinedEnhancedAdaptiveDualPhaseStrategyV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO import ( - RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO import RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO - lama_register["RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( - RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO - ) - LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO" - ).set_name("LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) + lama_register["RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO"] = RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) except Exception as e: print("RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 import ( - RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 import RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 - lama_register["RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9"] = ( - RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 - ) - LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9" - ).set_name("LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9", register=True) + lama_register["RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9"] = RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9").set_name("LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9", register=True) except Exception as e: print("RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonySearch import ( - RefinedEnhancedAdaptiveHarmonySearch, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonySearch import RefinedEnhancedAdaptiveHarmonySearch lama_register["RefinedEnhancedAdaptiveHarmonySearch"] = RefinedEnhancedAdaptiveHarmonySearch - LLAMARefinedEnhancedAdaptiveHarmonySearch = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveHarmonySearch" - ).set_name("LLAMARefinedEnhancedAdaptiveHarmonySearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonySearch").set_name("LLAMARefinedEnhancedAdaptiveHarmonySearch", register=True) except Exception as e: print("RefinedEnhancedAdaptiveHarmonySearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 import ( - RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 import RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 - lama_register["RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2"] = ( - RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 - ) - LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2" - ).set_name("LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2", register=True) + lama_register["RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2"] = RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2").set_name("LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2", register=True) except Exception as e: print("RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm import ( - RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm import RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm - lama_register["RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm"] = ( - RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm - ) - LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm" - ).set_name("LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm", register=True) + lama_register["RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm"] = RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiOperatorSearch import ( - RefinedEnhancedAdaptiveMultiOperatorSearch, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiOperatorSearch import RefinedEnhancedAdaptiveMultiOperatorSearch lama_register["RefinedEnhancedAdaptiveMultiOperatorSearch"] = RefinedEnhancedAdaptiveMultiOperatorSearch - LLAMARefinedEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch" - ).set_name("LLAMARefinedEnhancedAdaptiveMultiOperatorSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch").set_name("LLAMARefinedEnhancedAdaptiveMultiOperatorSearch", register=True) except Exception as e: print("RefinedEnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiStrategyDE import ( - RefinedEnhancedAdaptiveMultiStrategyDE, - ) + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiStrategyDE import RefinedEnhancedAdaptiveMultiStrategyDE lama_register["RefinedEnhancedAdaptiveMultiStrategyDE"] = RefinedEnhancedAdaptiveMultiStrategyDE - LLAMARefinedEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE" - ).set_name("LLAMARefinedEnhancedAdaptiveMultiStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE").set_name("LLAMARefinedEnhancedAdaptiveMultiStrategyDE", register=True) except Exception as e: print("RefinedEnhancedAdaptiveMultiStrategyDE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v45 import RefinedEnhancedAdaptiveQGSA_v45 lama_register["RefinedEnhancedAdaptiveQGSA_v45"] = RefinedEnhancedAdaptiveQGSA_v45 - LLAMARefinedEnhancedAdaptiveQGSA_v45 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveQGSA_v45" - ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v45", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveQGSA_v45 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v45").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v45", register=True) except Exception as e: print("RefinedEnhancedAdaptiveQGSA_v45 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v46 import RefinedEnhancedAdaptiveQGSA_v46 lama_register["RefinedEnhancedAdaptiveQGSA_v46"] = RefinedEnhancedAdaptiveQGSA_v46 - LLAMARefinedEnhancedAdaptiveQGSA_v46 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveQGSA_v46" - ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v46", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveQGSA_v46 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v46").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v46", register=True) except Exception as e: print("RefinedEnhancedAdaptiveQGSA_v46 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v48 import RefinedEnhancedAdaptiveQGSA_v48 lama_register["RefinedEnhancedAdaptiveQGSA_v48"] = RefinedEnhancedAdaptiveQGSA_v48 - LLAMARefinedEnhancedAdaptiveQGSA_v48 = NonObjectOptimizer( - method="LLAMARefinedEnhancedAdaptiveQGSA_v48" - ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v48", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedAdaptiveQGSA_v48 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v48").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v48", register=True) except Exception as e: print("RefinedEnhancedAdaptiveQGSA_v48 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedBalancedDualStrategyAdaptiveDE import ( - RefinedEnhancedBalancedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.RefinedEnhancedBalancedDualStrategyAdaptiveDE import RefinedEnhancedBalancedDualStrategyAdaptiveDE - lama_register["RefinedEnhancedBalancedDualStrategyAdaptiveDE"] = ( - RefinedEnhancedBalancedDualStrategyAdaptiveDE - ) - LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE" - ).set_name("LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE", register=True) + lama_register["RefinedEnhancedBalancedDualStrategyAdaptiveDE"] = RefinedEnhancedBalancedDualStrategyAdaptiveDE + res = NonObjectOptimizer(method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE").set_name("LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("RefinedEnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedCovarianceMatrixDifferentialEvolution import ( - RefinedEnhancedCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedEnhancedCovarianceMatrixDifferentialEvolution import RefinedEnhancedCovarianceMatrixDifferentialEvolution - lama_register["RefinedEnhancedCovarianceMatrixDifferentialEvolution"] = ( - RefinedEnhancedCovarianceMatrixDifferentialEvolution - ) - LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution", register=True) + lama_register["RefinedEnhancedCovarianceMatrixDifferentialEvolution"] = RefinedEnhancedCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("RefinedEnhancedCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDifferentialEvolutionLocalSearch_v42 import ( - RefinedEnhancedDifferentialEvolutionLocalSearch_v42, - ) + from nevergrad.optimization.lama.RefinedEnhancedDifferentialEvolutionLocalSearch_v42 import RefinedEnhancedDifferentialEvolutionLocalSearch_v42 - lama_register["RefinedEnhancedDifferentialEvolutionLocalSearch_v42"] = ( - RefinedEnhancedDifferentialEvolutionLocalSearch_v42 - ) - LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42" - ).set_name("LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42", register=True) + lama_register["RefinedEnhancedDifferentialEvolutionLocalSearch_v42"] = RefinedEnhancedDifferentialEvolutionLocalSearch_v42 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42 = NonObjectOptimizer(method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42").set_name("LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42", register=True) except Exception as e: print("RefinedEnhancedDifferentialEvolutionLocalSearch_v42 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( - RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 import RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 - lama_register["RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( - RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 - ) - LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3" - ).set_name("LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) + lama_register["RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3"] = RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3").set_name("LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) except Exception as e: print("RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimization import ( - RefinedEnhancedDualPhaseHybridOptimization, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimization import RefinedEnhancedDualPhaseHybridOptimization lama_register["RefinedEnhancedDualPhaseHybridOptimization"] = RefinedEnhancedDualPhaseHybridOptimization - LLAMARefinedEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualPhaseHybridOptimization" - ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualPhaseHybridOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimization").set_name("LLAMARefinedEnhancedDualPhaseHybridOptimization", register=True) except Exception as e: print("RefinedEnhancedDualPhaseHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimizationV3 import ( - RefinedEnhancedDualPhaseHybridOptimizationV3, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimizationV3 import RefinedEnhancedDualPhaseHybridOptimizationV3 - lama_register["RefinedEnhancedDualPhaseHybridOptimizationV3"] = ( - RefinedEnhancedDualPhaseHybridOptimizationV3 - ) - LLAMARefinedEnhancedDualPhaseHybridOptimizationV3 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3" - ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimizationV3", register=True) + lama_register["RefinedEnhancedDualPhaseHybridOptimizationV3"] = RefinedEnhancedDualPhaseHybridOptimizationV3 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualPhaseHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3").set_name("LLAMARefinedEnhancedDualPhaseHybridOptimizationV3", register=True) except Exception as e: print("RefinedEnhancedDualPhaseHybridOptimizationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v2 import ( - RefinedEnhancedDualStrategyAdaptiveDE_v2, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v2 import RefinedEnhancedDualStrategyAdaptiveDE_v2 lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v2"] = RefinedEnhancedDualStrategyAdaptiveDE_v2 - LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2" - ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2").set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2", register=True) except Exception as e: print("RefinedEnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v3 import ( - RefinedEnhancedDualStrategyAdaptiveDE_v3, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v3 import RefinedEnhancedDualStrategyAdaptiveDE_v3 lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v3"] = RefinedEnhancedDualStrategyAdaptiveDE_v3 - LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3" - ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3").set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3", register=True) except Exception as e: print("RefinedEnhancedDualStrategyAdaptiveDE_v3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyDynamicDE import ( - RefinedEnhancedDualStrategyDynamicDE, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyDynamicDE import RefinedEnhancedDualStrategyDynamicDE lama_register["RefinedEnhancedDualStrategyDynamicDE"] = RefinedEnhancedDualStrategyDynamicDE - LLAMARefinedEnhancedDualStrategyDynamicDE = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualStrategyDynamicDE" - ).set_name("LLAMARefinedEnhancedDualStrategyDynamicDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualStrategyDynamicDE = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyDynamicDE").set_name("LLAMARefinedEnhancedDualStrategyDynamicDE", register=True) except Exception as e: print("RefinedEnhancedDualStrategyDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyElitistDE_v2 import ( - RefinedEnhancedDualStrategyElitistDE_v2, - ) + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyElitistDE_v2 import RefinedEnhancedDualStrategyElitistDE_v2 lama_register["RefinedEnhancedDualStrategyElitistDE_v2"] = RefinedEnhancedDualStrategyElitistDE_v2 - LLAMARefinedEnhancedDualStrategyElitistDE_v2 = NonObjectOptimizer( - method="LLAMARefinedEnhancedDualStrategyElitistDE_v2" - ).set_name("LLAMARefinedEnhancedDualStrategyElitistDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyElitistDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDualStrategyElitistDE_v2 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyElitistDE_v2").set_name("LLAMARefinedEnhancedDualStrategyElitistDE_v2", register=True) except Exception as e: print("RefinedEnhancedDualStrategyElitistDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDynamicAdaptiveHybridOptimization import ( - RefinedEnhancedDynamicAdaptiveHybridOptimization, - ) + from nevergrad.optimization.lama.RefinedEnhancedDynamicAdaptiveHybridOptimization import RefinedEnhancedDynamicAdaptiveHybridOptimization - lama_register["RefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( - RefinedEnhancedDynamicAdaptiveHybridOptimization - ) - LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( - method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization" - ).set_name("LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) + lama_register["RefinedEnhancedDynamicAdaptiveHybridOptimization"] = RefinedEnhancedDynamicAdaptiveHybridOptimization + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) except Exception as e: print("RefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedDynamicDualStrategyHybridDE import ( - RefinedEnhancedDynamicDualStrategyHybridDE, - ) + from nevergrad.optimization.lama.RefinedEnhancedDynamicDualStrategyHybridDE import RefinedEnhancedDynamicDualStrategyHybridDE lama_register["RefinedEnhancedDynamicDualStrategyHybridDE"] = RefinedEnhancedDynamicDualStrategyHybridDE - LLAMARefinedEnhancedDynamicDualStrategyHybridDE = NonObjectOptimizer( - method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE" - ).set_name("LLAMARefinedEnhancedDynamicDualStrategyHybridDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedDynamicDualStrategyHybridDE = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE").set_name("LLAMARefinedEnhancedDynamicDualStrategyHybridDE", register=True) except Exception as e: print("RefinedEnhancedDynamicDualStrategyHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedAdaptiveRestartDE import ( - RefinedEnhancedEliteGuidedAdaptiveRestartDE, - ) + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedAdaptiveRestartDE import RefinedEnhancedEliteGuidedAdaptiveRestartDE lama_register["RefinedEnhancedEliteGuidedAdaptiveRestartDE"] = RefinedEnhancedEliteGuidedAdaptiveRestartDE - LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( - method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE" - ).set_name("LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE").set_name("LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE", register=True) except Exception as e: print("RefinedEnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedMassQGSA_v87 import ( - RefinedEnhancedEliteGuidedMassQGSA_v87, - ) + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedMassQGSA_v87 import RefinedEnhancedEliteGuidedMassQGSA_v87 lama_register["RefinedEnhancedEliteGuidedMassQGSA_v87"] = RefinedEnhancedEliteGuidedMassQGSA_v87 - LLAMARefinedEnhancedEliteGuidedMassQGSA_v87 = NonObjectOptimizer( - method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87" - ).set_name("LLAMARefinedEnhancedEliteGuidedMassQGSA_v87", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedEliteGuidedMassQGSA_v87 = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87").set_name("LLAMARefinedEnhancedEliteGuidedMassQGSA_v87", register=True) except Exception as e: print("RefinedEnhancedEliteGuidedMassQGSA_v87 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHybridAdaptiveMultiStageOptimization import ( - RefinedEnhancedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.RefinedEnhancedHybridAdaptiveMultiStageOptimization import RefinedEnhancedHybridAdaptiveMultiStageOptimization - lama_register["RefinedEnhancedHybridAdaptiveMultiStageOptimization"] = ( - RefinedEnhancedHybridAdaptiveMultiStageOptimization - ) - LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization", register=True) + lama_register["RefinedEnhancedHybridAdaptiveMultiStageOptimization"] = RefinedEnhancedHybridAdaptiveMultiStageOptimization + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("RefinedEnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( - RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3, - ) + from nevergrad.optimization.lama.RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 import RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 - lama_register["RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( - RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 - ) - LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( - method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3" - ).set_name("LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) + lama_register["RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3"] = RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) except Exception as e: print("RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 import ( - RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2, - ) + from nevergrad.optimization.lama.RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 import RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 - lama_register["RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2"] = ( - RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 - ) - LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 = NonObjectOptimizer( - method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2" - ).set_name("LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2", register=True) + lama_register["RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2"] = RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2").set_name("LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2", register=True) except Exception as e: print("RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHybridExplorationOptimization import ( - RefinedEnhancedHybridExplorationOptimization, - ) + from nevergrad.optimization.lama.RefinedEnhancedHybridExplorationOptimization import RefinedEnhancedHybridExplorationOptimization - lama_register["RefinedEnhancedHybridExplorationOptimization"] = ( - RefinedEnhancedHybridExplorationOptimization - ) - LLAMARefinedEnhancedHybridExplorationOptimization = NonObjectOptimizer( - method="LLAMARefinedEnhancedHybridExplorationOptimization" - ).set_name("LLAMARefinedEnhancedHybridExplorationOptimization", register=True) + lama_register["RefinedEnhancedHybridExplorationOptimization"] = RefinedEnhancedHybridExplorationOptimization + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridExplorationOptimization").set_name("LLAMARefinedEnhancedHybridExplorationOptimization", register=True) except Exception as e: print("RefinedEnhancedHybridExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHyperAdaptiveHybridDEPSO import ( - RefinedEnhancedHyperAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.RefinedEnhancedHyperAdaptiveHybridDEPSO import RefinedEnhancedHyperAdaptiveHybridDEPSO lama_register["RefinedEnhancedHyperAdaptiveHybridDEPSO"] = RefinedEnhancedHyperAdaptiveHybridDEPSO - LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO" - ).set_name("LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO").set_name("LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO", register=True) except Exception as e: print("RefinedEnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 import ( - RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63, - ) + from nevergrad.optimization.lama.RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 import RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 - lama_register["RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63"] = ( - RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 - ) - LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 = NonObjectOptimizer( - method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63" - ).set_name("LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63", register=True) + lama_register["RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63"] = RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63").set_name("LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63", register=True) except Exception as e: print("RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedHyperStrategicOptimizerV57 import ( - RefinedEnhancedHyperStrategicOptimizerV57, - ) + from nevergrad.optimization.lama.RefinedEnhancedHyperStrategicOptimizerV57 import RefinedEnhancedHyperStrategicOptimizerV57 lama_register["RefinedEnhancedHyperStrategicOptimizerV57"] = RefinedEnhancedHyperStrategicOptimizerV57 - LLAMARefinedEnhancedHyperStrategicOptimizerV57 = NonObjectOptimizer( - method="LLAMARefinedEnhancedHyperStrategicOptimizerV57" - ).set_name("LLAMARefinedEnhancedHyperStrategicOptimizerV57", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperStrategicOptimizerV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedHyperStrategicOptimizerV57 = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperStrategicOptimizerV57").set_name("LLAMARefinedEnhancedHyperStrategicOptimizerV57", register=True) except Exception as e: print("RefinedEnhancedHyperStrategicOptimizerV57 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedMetaNetAQAPSOv7 import RefinedEnhancedMetaNetAQAPSOv7 lama_register["RefinedEnhancedMetaNetAQAPSOv7"] = RefinedEnhancedMetaNetAQAPSOv7 - LLAMARefinedEnhancedMetaNetAQAPSOv7 = NonObjectOptimizer( - method="LLAMARefinedEnhancedMetaNetAQAPSOv7" - ).set_name("LLAMARefinedEnhancedMetaNetAQAPSOv7", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedMetaNetAQAPSOv7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedMetaNetAQAPSOv7 = NonObjectOptimizer(method="LLAMARefinedEnhancedMetaNetAQAPSOv7").set_name("LLAMARefinedEnhancedMetaNetAQAPSOv7", register=True) except Exception as e: print("RefinedEnhancedMetaNetAQAPSOv7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedOptimizedEvolutiveStrategy import ( - RefinedEnhancedOptimizedEvolutiveStrategy, - ) + from nevergrad.optimization.lama.RefinedEnhancedOptimizedEvolutiveStrategy import RefinedEnhancedOptimizedEvolutiveStrategy lama_register["RefinedEnhancedOptimizedEvolutiveStrategy"] = RefinedEnhancedOptimizedEvolutiveStrategy - LLAMARefinedEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( - method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy" - ).set_name("LLAMARefinedEnhancedOptimizedEvolutiveStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy").set_name("LLAMARefinedEnhancedOptimizedEvolutiveStrategy", register=True) except Exception as e: print("RefinedEnhancedOptimizedEvolutiveStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedPrecisionEvolutionaryOptimizerV40 import ( - RefinedEnhancedPrecisionEvolutionaryOptimizerV40, - ) + from nevergrad.optimization.lama.RefinedEnhancedPrecisionEvolutionaryOptimizerV40 import RefinedEnhancedPrecisionEvolutionaryOptimizerV40 - lama_register["RefinedEnhancedPrecisionEvolutionaryOptimizerV40"] = ( - RefinedEnhancedPrecisionEvolutionaryOptimizerV40 - ) - LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40 = NonObjectOptimizer( - method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40" - ).set_name("LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40", register=True) + lama_register["RefinedEnhancedPrecisionEvolutionaryOptimizerV40"] = RefinedEnhancedPrecisionEvolutionaryOptimizerV40 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40 = NonObjectOptimizer(method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40").set_name("LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40", register=True) except Exception as e: print("RefinedEnhancedPrecisionEvolutionaryOptimizerV40 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedQAPSOAIRVCHRLS import RefinedEnhancedQAPSOAIRVCHRLS lama_register["RefinedEnhancedQAPSOAIRVCHRLS"] = RefinedEnhancedQAPSOAIRVCHRLS - LLAMARefinedEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer( - method="LLAMARefinedEnhancedQAPSOAIRVCHRLS" - ).set_name("LLAMARefinedEnhancedQAPSOAIRVCHRLS", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMARefinedEnhancedQAPSOAIRVCHRLS").set_name("LLAMARefinedEnhancedQAPSOAIRVCHRLS", register=True) except Exception as e: print("RefinedEnhancedQAPSOAIRVCHRLS can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 import ( - RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 import RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 - lama_register["RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2"] = ( - RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 - ) - LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2" - ).set_name("LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2", register=True) + lama_register["RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2"] = RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2").set_name("LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2", register=True) except Exception as e: print("RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedRAMEDSProV3 import RefinedEnhancedRAMEDSProV3 lama_register["RefinedEnhancedRAMEDSProV3"] = RefinedEnhancedRAMEDSProV3 - LLAMARefinedEnhancedRAMEDSProV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3").set_name( - "LLAMARefinedEnhancedRAMEDSProV3", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedRAMEDSProV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3").set_name("LLAMARefinedEnhancedRAMEDSProV3", register=True) except Exception as e: print("RefinedEnhancedRAMEDSProV3 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv3 import RefinedEnhancedRAMEDSv3 lama_register["RefinedEnhancedRAMEDSv3"] = RefinedEnhancedRAMEDSv3 - LLAMARefinedEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3").set_name( - "LLAMARefinedEnhancedRAMEDSv3", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3").set_name("LLAMARefinedEnhancedRAMEDSv3", register=True) except Exception as e: print("RefinedEnhancedRAMEDSv3 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv4 import RefinedEnhancedRAMEDSv4 lama_register["RefinedEnhancedRAMEDSv4"] = RefinedEnhancedRAMEDSv4 - LLAMARefinedEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4").set_name( - "LLAMARefinedEnhancedRAMEDSv4", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4").set_name("LLAMARefinedEnhancedRAMEDSv4", register=True) except Exception as e: print("RefinedEnhancedRAMEDSv4 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnhancedStrategyDE import RefinedEnhancedStrategyDE lama_register["RefinedEnhancedStrategyDE"] = RefinedEnhancedStrategyDE - LLAMARefinedEnhancedStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE").set_name( - "LLAMARefinedEnhancedStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE").set_name("LLAMARefinedEnhancedStrategyDE", register=True) except Exception as e: print("RefinedEnhancedStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEnhancedUltraRefinedRAMEDS import ( - RefinedEnhancedUltraRefinedRAMEDS, - ) + from nevergrad.optimization.lama.RefinedEnhancedUltraRefinedRAMEDS import RefinedEnhancedUltraRefinedRAMEDS lama_register["RefinedEnhancedUltraRefinedRAMEDS"] = RefinedEnhancedUltraRefinedRAMEDS - LLAMARefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( - method="LLAMARefinedEnhancedUltraRefinedRAMEDS" - ).set_name("LLAMARefinedEnhancedUltraRefinedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedEnhancedUltraRefinedRAMEDS").set_name("LLAMARefinedEnhancedUltraRefinedRAMEDS", register=True) except Exception as e: print("RefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedEnsembleAdaptiveQuantumDE import RefinedEnsembleAdaptiveQuantumDE lama_register["RefinedEnsembleAdaptiveQuantumDE"] = RefinedEnsembleAdaptiveQuantumDE - LLAMARefinedEnsembleAdaptiveQuantumDE = NonObjectOptimizer( - method="LLAMARefinedEnsembleAdaptiveQuantumDE" - ).set_name("LLAMARefinedEnsembleAdaptiveQuantumDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMARefinedEnsembleAdaptiveQuantumDE").set_name("LLAMARefinedEnsembleAdaptiveQuantumDE", register=True) except Exception as e: print("RefinedEnsembleAdaptiveQuantumDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEvolutionaryGradientHybridOptimizerV3 import ( - RefinedEvolutionaryGradientHybridOptimizerV3, - ) + from nevergrad.optimization.lama.RefinedEvolutionaryGradientHybridOptimizerV3 import RefinedEvolutionaryGradientHybridOptimizerV3 - lama_register["RefinedEvolutionaryGradientHybridOptimizerV3"] = ( - RefinedEvolutionaryGradientHybridOptimizerV3 - ) - LLAMARefinedEvolutionaryGradientHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3" - ).set_name("LLAMARefinedEvolutionaryGradientHybridOptimizerV3", register=True) + lama_register["RefinedEvolutionaryGradientHybridOptimizerV3"] = RefinedEvolutionaryGradientHybridOptimizerV3 + res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEvolutionaryGradientHybridOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3").set_name("LLAMARefinedEvolutionaryGradientHybridOptimizerV3", register=True) except Exception as e: print("RefinedEvolutionaryGradientHybridOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedEvolutionaryTuningStrategy import ( - RefinedEvolutionaryTuningStrategy, - ) + from nevergrad.optimization.lama.RefinedEvolutionaryTuningStrategy import RefinedEvolutionaryTuningStrategy lama_register["RefinedEvolutionaryTuningStrategy"] = RefinedEvolutionaryTuningStrategy - LLAMARefinedEvolutionaryTuningStrategy = NonObjectOptimizer( - method="LLAMARefinedEvolutionaryTuningStrategy" - ).set_name("LLAMARefinedEvolutionaryTuningStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryTuningStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedEvolutionaryTuningStrategy = NonObjectOptimizer(method="LLAMARefinedEvolutionaryTuningStrategy").set_name("LLAMARefinedEvolutionaryTuningStrategy", register=True) except Exception as e: print("RefinedEvolutionaryTuningStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedGlobalClimbingOptimizerV2 import RefinedGlobalClimbingOptimizerV2 lama_register["RefinedGlobalClimbingOptimizerV2"] = RefinedGlobalClimbingOptimizerV2 - LLAMARefinedGlobalClimbingOptimizerV2 = NonObjectOptimizer( - method="LLAMARefinedGlobalClimbingOptimizerV2" - ).set_name("LLAMARefinedGlobalClimbingOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGlobalClimbingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGlobalClimbingOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedGlobalClimbingOptimizerV2").set_name("LLAMARefinedGlobalClimbingOptimizerV2", register=True) except Exception as e: print("RefinedGlobalClimbingOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGlobalLocalBalancingOptimizer import ( - RefinedGlobalLocalBalancingOptimizer, - ) + from nevergrad.optimization.lama.RefinedGlobalLocalBalancingOptimizer import RefinedGlobalLocalBalancingOptimizer lama_register["RefinedGlobalLocalBalancingOptimizer"] = RefinedGlobalLocalBalancingOptimizer - LLAMARefinedGlobalLocalBalancingOptimizer = NonObjectOptimizer( - method="LLAMARefinedGlobalLocalBalancingOptimizer" - ).set_name("LLAMARefinedGlobalLocalBalancingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGlobalLocalBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGlobalLocalBalancingOptimizer = NonObjectOptimizer(method="LLAMARefinedGlobalLocalBalancingOptimizer").set_name("LLAMARefinedGlobalLocalBalancingOptimizer", register=True) except Exception as e: print("RefinedGlobalLocalBalancingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGlobalStructureAdaptiveEvolverV2 import ( - RefinedGlobalStructureAdaptiveEvolverV2, - ) + from nevergrad.optimization.lama.RefinedGlobalStructureAdaptiveEvolverV2 import RefinedGlobalStructureAdaptiveEvolverV2 lama_register["RefinedGlobalStructureAdaptiveEvolverV2"] = RefinedGlobalStructureAdaptiveEvolverV2 - LLAMARefinedGlobalStructureAdaptiveEvolverV2 = NonObjectOptimizer( - method="LLAMARefinedGlobalStructureAdaptiveEvolverV2" - ).set_name("LLAMARefinedGlobalStructureAdaptiveEvolverV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAdaptiveEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGlobalStructureAdaptiveEvolverV2 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAdaptiveEvolverV2").set_name("LLAMARefinedGlobalStructureAdaptiveEvolverV2", register=True) except Exception as e: print("RefinedGlobalStructureAdaptiveEvolverV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV2 import ( - RefinedGlobalStructureAwareOptimizerV2, - ) + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV2 import RefinedGlobalStructureAwareOptimizerV2 lama_register["RefinedGlobalStructureAwareOptimizerV2"] = RefinedGlobalStructureAwareOptimizerV2 - LLAMARefinedGlobalStructureAwareOptimizerV2 = NonObjectOptimizer( - method="LLAMARefinedGlobalStructureAwareOptimizerV2" - ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGlobalStructureAwareOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV2").set_name("LLAMARefinedGlobalStructureAwareOptimizerV2", register=True) except Exception as e: print("RefinedGlobalStructureAwareOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV3 import ( - RefinedGlobalStructureAwareOptimizerV3, - ) + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV3 import RefinedGlobalStructureAwareOptimizerV3 lama_register["RefinedGlobalStructureAwareOptimizerV3"] = RefinedGlobalStructureAwareOptimizerV3 - LLAMARefinedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( - method="LLAMARefinedGlobalStructureAwareOptimizerV3" - ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV3").set_name("LLAMARefinedGlobalStructureAwareOptimizerV3", register=True) except Exception as e: print("RefinedGlobalStructureAwareOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientBalancedExplorationPSO import ( - RefinedGradientBalancedExplorationPSO, - ) + from nevergrad.optimization.lama.RefinedGradientBalancedExplorationPSO import RefinedGradientBalancedExplorationPSO lama_register["RefinedGradientBalancedExplorationPSO"] = RefinedGradientBalancedExplorationPSO - LLAMARefinedGradientBalancedExplorationPSO = NonObjectOptimizer( - method="LLAMARefinedGradientBalancedExplorationPSO" - ).set_name("LLAMARefinedGradientBalancedExplorationPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGradientBalancedExplorationPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBalancedExplorationPSO = NonObjectOptimizer(method="LLAMARefinedGradientBalancedExplorationPSO").set_name("LLAMARefinedGradientBalancedExplorationPSO", register=True) except Exception as e: print("RefinedGradientBalancedExplorationPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration import ( - RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration, - ) + from nevergrad.optimization.lama.RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration import RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration - lama_register["RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration"] = ( - RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration - ) - LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration = NonObjectOptimizer( - method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration" - ).set_name("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration", register=True) + lama_register["RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration"] = RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration + res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration = NonObjectOptimizer(method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration").set_name("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration", register=True) except Exception as e: print("RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemoryAnnealing import ( - RefinedGradientBoostedMemoryAnnealing, - ) + from nevergrad.optimization.lama.RefinedGradientBoostedMemoryAnnealing import RefinedGradientBoostedMemoryAnnealing lama_register["RefinedGradientBoostedMemoryAnnealing"] = RefinedGradientBoostedMemoryAnnealing - LLAMARefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( - method="LLAMARefinedGradientBoostedMemoryAnnealing" - ).set_name("LLAMARefinedGradientBoostedMemoryAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemoryAnnealing").set_name("LLAMARefinedGradientBoostedMemoryAnnealing", register=True) except Exception as e: print("RefinedGradientBoostedMemoryAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealing import ( - RefinedGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealing import RefinedGradientBoostedMemorySimulatedAnnealing - lama_register["RefinedGradientBoostedMemorySimulatedAnnealing"] = ( - RefinedGradientBoostedMemorySimulatedAnnealing - ) - LLAMARefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["RefinedGradientBoostedMemorySimulatedAnnealing"] = RefinedGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("RefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealingPlus import ( - RefinedGradientBoostedMemorySimulatedAnnealingPlus, - ) + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealingPlus import RefinedGradientBoostedMemorySimulatedAnnealingPlus - lama_register["RefinedGradientBoostedMemorySimulatedAnnealingPlus"] = ( - RefinedGradientBoostedMemorySimulatedAnnealingPlus - ) - LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( - method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus" - ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus", register=True) + lama_register["RefinedGradientBoostedMemorySimulatedAnnealingPlus"] = RefinedGradientBoostedMemorySimulatedAnnealingPlus + res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus", register=True) except Exception as e: print("RefinedGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedGradientBoostedOptimizer import RefinedGradientBoostedOptimizer lama_register["RefinedGradientBoostedOptimizer"] = RefinedGradientBoostedOptimizer - LLAMARefinedGradientBoostedOptimizer = NonObjectOptimizer( - method="LLAMARefinedGradientBoostedOptimizer" - ).set_name("LLAMARefinedGradientBoostedOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientBoostedOptimizer = NonObjectOptimizer(method="LLAMARefinedGradientBoostedOptimizer").set_name("LLAMARefinedGradientBoostedOptimizer", register=True) except Exception as e: print("RefinedGradientBoostedOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedGradientGuidedEvolutionStrategy import ( - RefinedGradientGuidedEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedGradientGuidedEvolutionStrategy import RefinedGradientGuidedEvolutionStrategy lama_register["RefinedGradientGuidedEvolutionStrategy"] = RefinedGradientGuidedEvolutionStrategy - LLAMARefinedGradientGuidedEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedGradientGuidedEvolutionStrategy" - ).set_name("LLAMARefinedGradientGuidedEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedGradientGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedGradientGuidedEvolutionStrategy").set_name("LLAMARefinedGradientGuidedEvolutionStrategy", register=True) except Exception as e: print("RefinedGradientGuidedEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution import ( - RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution import RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution - lama_register["RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( - RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution - ) - LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) + lama_register["RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution"] = RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveDifferentialEvolution import ( - RefinedHybridAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedHybridAdaptiveDifferentialEvolution import RefinedHybridAdaptiveDifferentialEvolution lama_register["RefinedHybridAdaptiveDifferentialEvolution"] = RefinedHybridAdaptiveDifferentialEvolution - LLAMARefinedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedHybridAdaptiveDifferentialEvolution" - ).set_name("LLAMARefinedHybridAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveDifferentialEvolution").set_name("LLAMARefinedHybridAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RefinedHybridAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridAdaptiveGradientPSO import RefinedHybridAdaptiveGradientPSO lama_register["RefinedHybridAdaptiveGradientPSO"] = RefinedHybridAdaptiveGradientPSO - LLAMARefinedHybridAdaptiveGradientPSO = NonObjectOptimizer( - method="LLAMARefinedHybridAdaptiveGradientPSO" - ).set_name("LLAMARefinedHybridAdaptiveGradientPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveGradientPSO").set_name("LLAMARefinedHybridAdaptiveGradientPSO", register=True) except Exception as e: print("RefinedHybridAdaptiveGradientPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveMultiStageOptimization import ( - RefinedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.RefinedHybridAdaptiveMultiStageOptimization import RefinedHybridAdaptiveMultiStageOptimization lama_register["RefinedHybridAdaptiveMultiStageOptimization"] = RefinedHybridAdaptiveMultiStageOptimization - LLAMARefinedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMARefinedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMARefinedHybridAdaptiveMultiStageOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("RefinedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridCovarianceMatrixDifferentialEvolution import ( - RefinedHybridCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedHybridCovarianceMatrixDifferentialEvolution import RefinedHybridCovarianceMatrixDifferentialEvolution - lama_register["RefinedHybridCovarianceMatrixDifferentialEvolution"] = ( - RefinedHybridCovarianceMatrixDifferentialEvolution - ) - LLAMARefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMARefinedHybridCovarianceMatrixDifferentialEvolution", register=True) + lama_register["RefinedHybridCovarianceMatrixDifferentialEvolution"] = RefinedHybridCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedHybridCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("RefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridDEPSO import RefinedHybridDEPSO lama_register["RefinedHybridDEPSO"] = RefinedHybridDEPSO - LLAMARefinedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO").set_name( - "LLAMARefinedHybridDEPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO").set_name("LLAMARefinedHybridDEPSO", register=True) except Exception as e: print("RefinedHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridDEPSOWithAdaptiveMemoryV4 import ( - RefinedHybridDEPSOWithAdaptiveMemoryV4, - ) + from nevergrad.optimization.lama.RefinedHybridDEPSOWithAdaptiveMemoryV4 import RefinedHybridDEPSOWithAdaptiveMemoryV4 lama_register["RefinedHybridDEPSOWithAdaptiveMemoryV4"] = RefinedHybridDEPSOWithAdaptiveMemoryV4 - LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4 = NonObjectOptimizer( - method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4" - ).set_name("LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4 = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4").set_name("LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4", register=True) except Exception as e: print("RefinedHybridDEPSOWithAdaptiveMemoryV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridDEPSOWithDynamicAdaptationV3 import ( - RefinedHybridDEPSOWithDynamicAdaptationV3, - ) + from nevergrad.optimization.lama.RefinedHybridDEPSOWithDynamicAdaptationV3 import RefinedHybridDEPSOWithDynamicAdaptationV3 lama_register["RefinedHybridDEPSOWithDynamicAdaptationV3"] = RefinedHybridDEPSOWithDynamicAdaptationV3 - LLAMARefinedHybridDEPSOWithDynamicAdaptationV3 = NonObjectOptimizer( - method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3" - ).set_name("LLAMARefinedHybridDEPSOWithDynamicAdaptationV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridDEPSOWithDynamicAdaptationV3 = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3").set_name("LLAMARefinedHybridDEPSOWithDynamicAdaptationV3", register=True) except Exception as e: print("RefinedHybridDEPSOWithDynamicAdaptationV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( - RefinedHybridDualPhaseParticleSwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedHybridDualPhaseParticleSwarmDifferentialEvolution import RefinedHybridDualPhaseParticleSwarmDifferentialEvolution - lama_register["RefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( - RefinedHybridDualPhaseParticleSwarmDifferentialEvolution - ) - LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution" - ).set_name("LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) + lama_register["RefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = RefinedHybridDualPhaseParticleSwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) except Exception as e: print("RefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridDynamicClusterOptimization import ( - RefinedHybridDynamicClusterOptimization, - ) + from nevergrad.optimization.lama.RefinedHybridDynamicClusterOptimization import RefinedHybridDynamicClusterOptimization lama_register["RefinedHybridDynamicClusterOptimization"] = RefinedHybridDynamicClusterOptimization - LLAMARefinedHybridDynamicClusterOptimization = NonObjectOptimizer( - method="LLAMARefinedHybridDynamicClusterOptimization" - ).set_name("LLAMARefinedHybridDynamicClusterOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridDynamicClusterOptimization = NonObjectOptimizer(method="LLAMARefinedHybridDynamicClusterOptimization").set_name("LLAMARefinedHybridDynamicClusterOptimization", register=True) except Exception as e: print("RefinedHybridDynamicClusterOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE import ( - RefinedHybridEliteGuidedMutationDE, - ) + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE import RefinedHybridEliteGuidedMutationDE lama_register["RefinedHybridEliteGuidedMutationDE"] = RefinedHybridEliteGuidedMutationDE - LLAMARefinedHybridEliteGuidedMutationDE = NonObjectOptimizer( - method="LLAMARefinedHybridEliteGuidedMutationDE" - ).set_name("LLAMARefinedHybridEliteGuidedMutationDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE").set_name("LLAMARefinedHybridEliteGuidedMutationDE", register=True) except Exception as e: print("RefinedHybridEliteGuidedMutationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v2 import ( - RefinedHybridEliteGuidedMutationDE_v2, - ) + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v2 import RefinedHybridEliteGuidedMutationDE_v2 lama_register["RefinedHybridEliteGuidedMutationDE_v2"] = RefinedHybridEliteGuidedMutationDE_v2 - LLAMARefinedHybridEliteGuidedMutationDE_v2 = NonObjectOptimizer( - method="LLAMARefinedHybridEliteGuidedMutationDE_v2" - ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v2").set_name("LLAMARefinedHybridEliteGuidedMutationDE_v2", register=True) except Exception as e: print("RefinedHybridEliteGuidedMutationDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v3 import ( - RefinedHybridEliteGuidedMutationDE_v3, - ) + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v3 import RefinedHybridEliteGuidedMutationDE_v3 lama_register["RefinedHybridEliteGuidedMutationDE_v3"] = RefinedHybridEliteGuidedMutationDE_v3 - LLAMARefinedHybridEliteGuidedMutationDE_v3 = NonObjectOptimizer( - method="LLAMARefinedHybridEliteGuidedMutationDE_v3" - ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v3").set_name("LLAMARefinedHybridEliteGuidedMutationDE_v3", register=True) except Exception as e: print("RefinedHybridEliteGuidedMutationDE_v3 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridEvolutionStrategyV4 import RefinedHybridEvolutionStrategyV4 lama_register["RefinedHybridEvolutionStrategyV4"] = RefinedHybridEvolutionStrategyV4 - LLAMARefinedHybridEvolutionStrategyV4 = NonObjectOptimizer( - method="LLAMARefinedHybridEvolutionStrategyV4" - ).set_name("LLAMARefinedHybridEvolutionStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridEvolutionStrategyV4 = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionStrategyV4").set_name("LLAMARefinedHybridEvolutionStrategyV4", register=True) except Exception as e: print("RefinedHybridEvolutionStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridEvolutionaryAnnealingOptimizer import ( - RefinedHybridEvolutionaryAnnealingOptimizer, - ) + from nevergrad.optimization.lama.RefinedHybridEvolutionaryAnnealingOptimizer import RefinedHybridEvolutionaryAnnealingOptimizer lama_register["RefinedHybridEvolutionaryAnnealingOptimizer"] = RefinedHybridEvolutionaryAnnealingOptimizer - LLAMARefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( - method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer" - ).set_name("LLAMARefinedHybridEvolutionaryAnnealingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMARefinedHybridEvolutionaryAnnealingOptimizer", register=True) except Exception as e: print("RefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridOptimizer import RefinedHybridOptimizer lama_register["RefinedHybridOptimizer"] = RefinedHybridOptimizer - LLAMARefinedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer").set_name( - "LLAMARefinedHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer").set_name("LLAMARefinedHybridOptimizer", register=True) except Exception as e: print("RefinedHybridOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridPSODEOptimizer import RefinedHybridPSODEOptimizer lama_register["RefinedHybridPSODEOptimizer"] = RefinedHybridPSODEOptimizer - LLAMARefinedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer").set_name( - "LLAMARefinedHybridPSODEOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer").set_name("LLAMARefinedHybridPSODEOptimizer", register=True) except Exception as e: print("RefinedHybridPSODEOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridPSODESimulatedAnnealing import ( - RefinedHybridPSODESimulatedAnnealing, - ) + from nevergrad.optimization.lama.RefinedHybridPSODESimulatedAnnealing import RefinedHybridPSODESimulatedAnnealing lama_register["RefinedHybridPSODESimulatedAnnealing"] = RefinedHybridPSODESimulatedAnnealing - LLAMARefinedHybridPSODESimulatedAnnealing = NonObjectOptimizer( - method="LLAMARefinedHybridPSODESimulatedAnnealing" - ).set_name("LLAMARefinedHybridPSODESimulatedAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridPSODESimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedHybridPSODESimulatedAnnealing").set_name("LLAMARefinedHybridPSODESimulatedAnnealing", register=True) except Exception as e: print("RefinedHybridPSODESimulatedAnnealing can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridPSO_DE import RefinedHybridPSO_DE lama_register["RefinedHybridPSO_DE"] = RefinedHybridPSO_DE - LLAMARefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE").set_name( - "LLAMARefinedHybridPSO_DE", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE").set_name("LLAMARefinedHybridPSO_DE", register=True) except Exception as e: print("RefinedHybridPSO_DE can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridPrecisionSearch import RefinedHybridPrecisionSearch lama_register["RefinedHybridPrecisionSearch"] = RefinedHybridPrecisionSearch - LLAMARefinedHybridPrecisionSearch = NonObjectOptimizer( - method="LLAMARefinedHybridPrecisionSearch" - ).set_name("LLAMARefinedHybridPrecisionSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridPrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridPrecisionSearch = NonObjectOptimizer(method="LLAMARefinedHybridPrecisionSearch").set_name("LLAMARefinedHybridPrecisionSearch", register=True) except Exception as e: print("RefinedHybridPrecisionSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHybridQuantumAdaptiveDE import RefinedHybridQuantumAdaptiveDE lama_register["RefinedHybridQuantumAdaptiveDE"] = RefinedHybridQuantumAdaptiveDE - LLAMARefinedHybridQuantumAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedHybridQuantumAdaptiveDE" - ).set_name("LLAMARefinedHybridQuantumAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedHybridQuantumAdaptiveDE").set_name("LLAMARefinedHybridQuantumAdaptiveDE", register=True) except Exception as e: print("RefinedHybridQuantumAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridQuantumLevyAdaptiveSwarm import ( - RefinedHybridQuantumLevyAdaptiveSwarm, - ) + from nevergrad.optimization.lama.RefinedHybridQuantumLevyAdaptiveSwarm import RefinedHybridQuantumLevyAdaptiveSwarm lama_register["RefinedHybridQuantumLevyAdaptiveSwarm"] = RefinedHybridQuantumLevyAdaptiveSwarm - LLAMARefinedHybridQuantumLevyAdaptiveSwarm = NonObjectOptimizer( - method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm" - ).set_name("LLAMARefinedHybridQuantumLevyAdaptiveSwarm", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridQuantumLevyAdaptiveSwarm = NonObjectOptimizer(method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm").set_name("LLAMARefinedHybridQuantumLevyAdaptiveSwarm", register=True) except Exception as e: print("RefinedHybridQuantumLevyAdaptiveSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHybridQuasiRandomDEGradientAnnealing import ( - RefinedHybridQuasiRandomDEGradientAnnealing, - ) + from nevergrad.optimization.lama.RefinedHybridQuasiRandomDEGradientAnnealing import RefinedHybridQuasiRandomDEGradientAnnealing lama_register["RefinedHybridQuasiRandomDEGradientAnnealing"] = RefinedHybridQuasiRandomDEGradientAnnealing - LLAMARefinedHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( - method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing" - ).set_name("LLAMARefinedHybridQuasiRandomDEGradientAnnealing", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing").set_name("LLAMARefinedHybridQuasiRandomDEGradientAnnealing", register=True) except Exception as e: print("RefinedHybridQuasiRandomDEGradientAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 import ( - RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2, - ) + from nevergrad.optimization.lama.RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 import RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 - lama_register["RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2"] = ( - RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 - ) - LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 = NonObjectOptimizer( - method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2" - ).set_name("LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2", register=True) + lama_register["RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2"] = RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 + res = NonObjectOptimizer(method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 = NonObjectOptimizer(method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2").set_name("LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2", register=True) except Exception as e: print("RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedHyperEvolvedDynamicRAMEDS import RefinedHyperEvolvedDynamicRAMEDS lama_register["RefinedHyperEvolvedDynamicRAMEDS"] = RefinedHyperEvolvedDynamicRAMEDS - LLAMARefinedHyperEvolvedDynamicRAMEDS = NonObjectOptimizer( - method="LLAMARefinedHyperEvolvedDynamicRAMEDS" - ).set_name("LLAMARefinedHyperEvolvedDynamicRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMARefinedHyperEvolvedDynamicRAMEDS").set_name("LLAMARefinedHyperEvolvedDynamicRAMEDS", register=True) except Exception as e: print("RefinedHyperEvolvedDynamicRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperOptimizedDynamicPrecisionOptimizer import ( - RefinedHyperOptimizedDynamicPrecisionOptimizer, - ) + from nevergrad.optimization.lama.RefinedHyperOptimizedDynamicPrecisionOptimizer import RefinedHyperOptimizedDynamicPrecisionOptimizer - lama_register["RefinedHyperOptimizedDynamicPrecisionOptimizer"] = ( - RefinedHyperOptimizedDynamicPrecisionOptimizer - ) - LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( - method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer" - ).set_name("LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer", register=True) + lama_register["RefinedHyperOptimizedDynamicPrecisionOptimizer"] = RefinedHyperOptimizedDynamicPrecisionOptimizer + res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer").set_name("LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer", register=True) except Exception as e: print("RefinedHyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperOptimizedThermalEvolutionaryOptimizer import ( - RefinedHyperOptimizedThermalEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.RefinedHyperOptimizedThermalEvolutionaryOptimizer import RefinedHyperOptimizedThermalEvolutionaryOptimizer - lama_register["RefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( - RefinedHyperOptimizedThermalEvolutionaryOptimizer - ) - LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer" - ).set_name("LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) + lama_register["RefinedHyperOptimizedThermalEvolutionaryOptimizer"] = RefinedHyperOptimizedThermalEvolutionaryOptimizer + res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) except Exception as e: print("RefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperRefinedDynamicPrecisionOptimizerV50 import ( - RefinedHyperRefinedDynamicPrecisionOptimizerV50, - ) + from nevergrad.optimization.lama.RefinedHyperRefinedDynamicPrecisionOptimizerV50 import RefinedHyperRefinedDynamicPrecisionOptimizerV50 - lama_register["RefinedHyperRefinedDynamicPrecisionOptimizerV50"] = ( - RefinedHyperRefinedDynamicPrecisionOptimizerV50 - ) - LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50 = NonObjectOptimizer( - method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50" - ).set_name("LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50", register=True) + lama_register["RefinedHyperRefinedDynamicPrecisionOptimizerV50"] = RefinedHyperRefinedDynamicPrecisionOptimizerV50 + res = NonObjectOptimizer(method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50 = NonObjectOptimizer(method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50").set_name("LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50", register=True) except Exception as e: print("RefinedHyperRefinedDynamicPrecisionOptimizerV50 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV52 import ( - RefinedHyperStrategicOptimizerV52, - ) + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV52 import RefinedHyperStrategicOptimizerV52 lama_register["RefinedHyperStrategicOptimizerV52"] = RefinedHyperStrategicOptimizerV52 - LLAMARefinedHyperStrategicOptimizerV52 = NonObjectOptimizer( - method="LLAMARefinedHyperStrategicOptimizerV52" - ).set_name("LLAMARefinedHyperStrategicOptimizerV52", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperStrategicOptimizerV52 = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV52").set_name("LLAMARefinedHyperStrategicOptimizerV52", register=True) except Exception as e: print("RefinedHyperStrategicOptimizerV52 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV55 import ( - RefinedHyperStrategicOptimizerV55, - ) + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV55 import RefinedHyperStrategicOptimizerV55 lama_register["RefinedHyperStrategicOptimizerV55"] = RefinedHyperStrategicOptimizerV55 - LLAMARefinedHyperStrategicOptimizerV55 = NonObjectOptimizer( - method="LLAMARefinedHyperStrategicOptimizerV55" - ).set_name("LLAMARefinedHyperStrategicOptimizerV55", register=True) + res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedHyperStrategicOptimizerV55 = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV55").set_name("LLAMARefinedHyperStrategicOptimizerV55", register=True) except Exception as e: print("RefinedHyperStrategicOptimizerV55 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution import ( - RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution import RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution - lama_register["RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( - RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution - ) - LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution" - ).set_name("LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) + lama_register["RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution"] = RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 import ( - RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2, - ) + from nevergrad.optimization.lama.RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 import RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 - lama_register["RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2"] = ( - RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 - ) - LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 = NonObjectOptimizer( - method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2" - ).set_name("LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2", register=True) + lama_register["RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2"] = RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 + res = NonObjectOptimizer(method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2").set_name("LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2", register=True) except Exception as e: print("RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 import ( - RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4, - ) + from nevergrad.optimization.lama.RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 import RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 - lama_register["RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4"] = ( - RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 - ) - LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 = NonObjectOptimizer( - method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4" - ).set_name("LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4", register=True) + lama_register["RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4"] = RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 + res = NonObjectOptimizer(method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 = NonObjectOptimizer(method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4").set_name("LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4", register=True) except Exception as e: print("RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedInertiaFocalOptimizer import RefinedInertiaFocalOptimizer lama_register["RefinedInertiaFocalOptimizer"] = RefinedInertiaFocalOptimizer - LLAMARefinedInertiaFocalOptimizer = NonObjectOptimizer( - method="LLAMARefinedInertiaFocalOptimizer" - ).set_name("LLAMARefinedInertiaFocalOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedInertiaFocalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedInertiaFocalOptimizer = NonObjectOptimizer(method="LLAMARefinedInertiaFocalOptimizer").set_name("LLAMARefinedInertiaFocalOptimizer", register=True) except Exception as e: print("RefinedInertiaFocalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedIntelligentEvolvingAdaptiveStrategyV35 import ( - RefinedIntelligentEvolvingAdaptiveStrategyV35, - ) + from nevergrad.optimization.lama.RefinedIntelligentEvolvingAdaptiveStrategyV35 import RefinedIntelligentEvolvingAdaptiveStrategyV35 - lama_register["RefinedIntelligentEvolvingAdaptiveStrategyV35"] = ( - RefinedIntelligentEvolvingAdaptiveStrategyV35 - ) - LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35 = NonObjectOptimizer( - method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35" - ).set_name("LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35", register=True) + lama_register["RefinedIntelligentEvolvingAdaptiveStrategyV35"] = RefinedIntelligentEvolvingAdaptiveStrategyV35 + res = NonObjectOptimizer(method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35 = NonObjectOptimizer(method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35").set_name("LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35", register=True) except Exception as e: print("RefinedIntelligentEvolvingAdaptiveStrategyV35 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV10Plus import ( - RefinedIslandEvolutionStrategyV10Plus, - ) + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV10Plus import RefinedIslandEvolutionStrategyV10Plus lama_register["RefinedIslandEvolutionStrategyV10Plus"] = RefinedIslandEvolutionStrategyV10Plus - LLAMARefinedIslandEvolutionStrategyV10Plus = NonObjectOptimizer( - method="LLAMARefinedIslandEvolutionStrategyV10Plus" - ).set_name("LLAMARefinedIslandEvolutionStrategyV10Plus", register=True) + res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV10Plus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedIslandEvolutionStrategyV10Plus = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV10Plus").set_name("LLAMARefinedIslandEvolutionStrategyV10Plus", register=True) except Exception as e: print("RefinedIslandEvolutionStrategyV10Plus can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV2 import RefinedIslandEvolutionStrategyV2 lama_register["RefinedIslandEvolutionStrategyV2"] = RefinedIslandEvolutionStrategyV2 - LLAMARefinedIslandEvolutionStrategyV2 = NonObjectOptimizer( - method="LLAMARefinedIslandEvolutionStrategyV2" - ).set_name("LLAMARefinedIslandEvolutionStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedIslandEvolutionStrategyV2 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV2").set_name("LLAMARefinedIslandEvolutionStrategyV2", register=True) except Exception as e: print("RefinedIslandEvolutionStrategyV2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV6 import RefinedIslandEvolutionStrategyV6 lama_register["RefinedIslandEvolutionStrategyV6"] = RefinedIslandEvolutionStrategyV6 - LLAMARefinedIslandEvolutionStrategyV6 = NonObjectOptimizer( - method="LLAMARefinedIslandEvolutionStrategyV6" - ).set_name("LLAMARefinedIslandEvolutionStrategyV6", register=True) + res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedIslandEvolutionStrategyV6 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV6").set_name("LLAMARefinedIslandEvolutionStrategyV6", register=True) except Exception as e: print("RefinedIslandEvolutionStrategyV6 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV9 import RefinedIslandEvolutionStrategyV9 lama_register["RefinedIslandEvolutionStrategyV9"] = RefinedIslandEvolutionStrategyV9 - LLAMARefinedIslandEvolutionStrategyV9 = NonObjectOptimizer( - method="LLAMARefinedIslandEvolutionStrategyV9" - ).set_name("LLAMARefinedIslandEvolutionStrategyV9", register=True) + res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedIslandEvolutionStrategyV9 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV9").set_name("LLAMARefinedIslandEvolutionStrategyV9", register=True) except Exception as e: print("RefinedIslandEvolutionStrategyV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemeticDifferentialEvolution import ( - RefinedMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedMemeticDifferentialEvolution import RefinedMemeticDifferentialEvolution lama_register["RefinedMemeticDifferentialEvolution"] = RefinedMemeticDifferentialEvolution - LLAMARefinedMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedMemeticDifferentialEvolution" - ).set_name("LLAMARefinedMemeticDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMemeticDifferentialEvolution").set_name("LLAMARefinedMemeticDifferentialEvolution", register=True) except Exception as e: print("RefinedMemeticDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizer import RefinedMemeticDiverseOptimizer lama_register["RefinedMemeticDiverseOptimizer"] = RefinedMemeticDiverseOptimizer - LLAMARefinedMemeticDiverseOptimizer = NonObjectOptimizer( - method="LLAMARefinedMemeticDiverseOptimizer" - ).set_name("LLAMARefinedMemeticDiverseOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizer").set_name("LLAMARefinedMemeticDiverseOptimizer", register=True) except Exception as e: print("RefinedMemeticDiverseOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizerV4 import RefinedMemeticDiverseOptimizerV4 lama_register["RefinedMemeticDiverseOptimizerV4"] = RefinedMemeticDiverseOptimizerV4 - LLAMARefinedMemeticDiverseOptimizerV4 = NonObjectOptimizer( - method="LLAMARefinedMemeticDiverseOptimizerV4" - ).set_name("LLAMARefinedMemeticDiverseOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemeticDiverseOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizerV4").set_name("LLAMARefinedMemeticDiverseOptimizerV4", register=True) except Exception as e: print("RefinedMemeticDiverseOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemeticQuantumDifferentialOptimizer import ( - RefinedMemeticQuantumDifferentialOptimizer, - ) + from nevergrad.optimization.lama.RefinedMemeticQuantumDifferentialOptimizer import RefinedMemeticQuantumDifferentialOptimizer lama_register["RefinedMemeticQuantumDifferentialOptimizer"] = RefinedMemeticQuantumDifferentialOptimizer - LLAMARefinedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( - method="LLAMARefinedMemeticQuantumDifferentialOptimizer" - ).set_name("LLAMARefinedMemeticQuantumDifferentialOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer(method="LLAMARefinedMemeticQuantumDifferentialOptimizer").set_name("LLAMARefinedMemeticQuantumDifferentialOptimizer", register=True) except Exception as e: print("RefinedMemeticQuantumDifferentialOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryAdaptiveDynamicHybridOptimizer import ( - RefinedMemoryAdaptiveDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedMemoryAdaptiveDynamicHybridOptimizer import RefinedMemoryAdaptiveDynamicHybridOptimizer lama_register["RefinedMemoryAdaptiveDynamicHybridOptimizer"] = RefinedMemoryAdaptiveDynamicHybridOptimizer - LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer" - ).set_name("LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer").set_name("LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer", register=True) except Exception as e: print("RefinedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryAdaptiveHybridOptimizer import ( - RefinedMemoryAdaptiveHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedMemoryAdaptiveHybridOptimizer import RefinedMemoryAdaptiveHybridOptimizer lama_register["RefinedMemoryAdaptiveHybridOptimizer"] = RefinedMemoryAdaptiveHybridOptimizer - LLAMARefinedMemoryAdaptiveHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedMemoryAdaptiveHybridOptimizer" - ).set_name("LLAMARefinedMemoryAdaptiveHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveHybridOptimizer").set_name("LLAMARefinedMemoryAdaptiveHybridOptimizer", register=True) except Exception as e: print("RefinedMemoryAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryEnhancedDynamicHybridOptimizer import ( - RefinedMemoryEnhancedDynamicHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedMemoryEnhancedDynamicHybridOptimizer import RefinedMemoryEnhancedDynamicHybridOptimizer lama_register["RefinedMemoryEnhancedDynamicHybridOptimizer"] = RefinedMemoryEnhancedDynamicHybridOptimizer - LLAMARefinedMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer" - ).set_name("LLAMARefinedMemoryEnhancedDynamicHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMARefinedMemoryEnhancedDynamicHybridOptimizer", register=True) except Exception as e: print("RefinedMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryEnhancedHybridOptimizerV2 import ( - RefinedMemoryEnhancedHybridOptimizerV2, - ) + from nevergrad.optimization.lama.RefinedMemoryEnhancedHybridOptimizerV2 import RefinedMemoryEnhancedHybridOptimizerV2 lama_register["RefinedMemoryEnhancedHybridOptimizerV2"] = RefinedMemoryEnhancedHybridOptimizerV2 - LLAMARefinedMemoryEnhancedHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMARefinedMemoryEnhancedHybridOptimizerV2" - ).set_name("LLAMARefinedMemoryEnhancedHybridOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryEnhancedHybridOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedHybridOptimizerV2").set_name("LLAMARefinedMemoryEnhancedHybridOptimizerV2", register=True) except Exception as e: print("RefinedMemoryEnhancedHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 import ( - RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72, - ) + from nevergrad.optimization.lama.RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 import RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 - lama_register["RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72"] = ( - RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 - ) - LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 = NonObjectOptimizer( - method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72" - ).set_name("LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72", register=True) + lama_register["RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72"] = RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 + res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72").set_name("LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72", register=True) except Exception as e: print("RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMemoryGuidedHybridStrategyV63 import ( - RefinedMemoryGuidedHybridStrategyV63, - ) + from nevergrad.optimization.lama.RefinedMemoryGuidedHybridStrategyV63 import RefinedMemoryGuidedHybridStrategyV63 lama_register["RefinedMemoryGuidedHybridStrategyV63"] = RefinedMemoryGuidedHybridStrategyV63 - LLAMARefinedMemoryGuidedHybridStrategyV63 = NonObjectOptimizer( - method="LLAMARefinedMemoryGuidedHybridStrategyV63" - ).set_name("LLAMARefinedMemoryGuidedHybridStrategyV63", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedHybridStrategyV63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMemoryGuidedHybridStrategyV63 = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedHybridStrategyV63").set_name("LLAMARefinedMemoryGuidedHybridStrategyV63", register=True) except Exception as e: print("RefinedMemoryGuidedHybridStrategyV63 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedMetaNetAQAPSO import RefinedMetaNetAQAPSO lama_register["RefinedMetaNetAQAPSO"] = RefinedMetaNetAQAPSO - LLAMARefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO").set_name( - "LLAMARefinedMetaNetAQAPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO").set_name("LLAMARefinedMetaNetAQAPSO", register=True) except Exception as e: print("RefinedMetaNetAQAPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiFocalAdaptiveElitistStrategyV4 import ( - RefinedMultiFocalAdaptiveElitistStrategyV4, - ) + from nevergrad.optimization.lama.RefinedMultiFocalAdaptiveElitistStrategyV4 import RefinedMultiFocalAdaptiveElitistStrategyV4 lama_register["RefinedMultiFocalAdaptiveElitistStrategyV4"] = RefinedMultiFocalAdaptiveElitistStrategyV4 - LLAMARefinedMultiFocalAdaptiveElitistStrategyV4 = NonObjectOptimizer( - method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4" - ).set_name("LLAMARefinedMultiFocalAdaptiveElitistStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiFocalAdaptiveElitistStrategyV4 = NonObjectOptimizer(method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4").set_name("LLAMARefinedMultiFocalAdaptiveElitistStrategyV4", register=True) except Exception as e: print("RefinedMultiFocalAdaptiveElitistStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiOperatorAdaptiveOptimization import ( - RefinedMultiOperatorAdaptiveOptimization, - ) + from nevergrad.optimization.lama.RefinedMultiOperatorAdaptiveOptimization import RefinedMultiOperatorAdaptiveOptimization lama_register["RefinedMultiOperatorAdaptiveOptimization"] = RefinedMultiOperatorAdaptiveOptimization - LLAMARefinedMultiOperatorAdaptiveOptimization = NonObjectOptimizer( - method="LLAMARefinedMultiOperatorAdaptiveOptimization" - ).set_name("LLAMARefinedMultiOperatorAdaptiveOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiOperatorAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiOperatorAdaptiveOptimization = NonObjectOptimizer(method="LLAMARefinedMultiOperatorAdaptiveOptimization").set_name("LLAMARefinedMultiOperatorAdaptiveOptimization", register=True) except Exception as e: print("RefinedMultiOperatorAdaptiveOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiPhaseAdaptiveHybridDEPSO import ( - RefinedMultiPhaseAdaptiveHybridDEPSO, - ) + from nevergrad.optimization.lama.RefinedMultiPhaseAdaptiveHybridDEPSO import RefinedMultiPhaseAdaptiveHybridDEPSO lama_register["RefinedMultiPhaseAdaptiveHybridDEPSO"] = RefinedMultiPhaseAdaptiveHybridDEPSO - LLAMARefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( - method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO" - ).set_name("LLAMARefinedMultiPhaseAdaptiveHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMARefinedMultiPhaseAdaptiveHybridDEPSO", register=True) except Exception as e: print("RefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedMultiStageAdaptiveSearch import RefinedMultiStageAdaptiveSearch lama_register["RefinedMultiStageAdaptiveSearch"] = RefinedMultiStageAdaptiveSearch - LLAMARefinedMultiStageAdaptiveSearch = NonObjectOptimizer( - method="LLAMARefinedMultiStageAdaptiveSearch" - ).set_name("LLAMARefinedMultiStageAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMARefinedMultiStageAdaptiveSearch").set_name("LLAMARefinedMultiStageAdaptiveSearch", register=True) except Exception as e: print("RefinedMultiStageAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiStrategyDifferentialEvolution import ( - RefinedMultiStrategyDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedMultiStrategyDifferentialEvolution import RefinedMultiStrategyDifferentialEvolution lama_register["RefinedMultiStrategyDifferentialEvolution"] = RefinedMultiStrategyDifferentialEvolution - LLAMARefinedMultiStrategyDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedMultiStrategyDifferentialEvolution" - ).set_name("LLAMARefinedMultiStrategyDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMultiStrategyDifferentialEvolution").set_name("LLAMARefinedMultiStrategyDifferentialEvolution", register=True) except Exception as e: print("RefinedMultiStrategyDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiStrategySelfAdaptiveDE import ( - RefinedMultiStrategySelfAdaptiveDE, - ) + from nevergrad.optimization.lama.RefinedMultiStrategySelfAdaptiveDE import RefinedMultiStrategySelfAdaptiveDE lama_register["RefinedMultiStrategySelfAdaptiveDE"] = RefinedMultiStrategySelfAdaptiveDE - LLAMARefinedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedMultiStrategySelfAdaptiveDE" - ).set_name("LLAMARefinedMultiStrategySelfAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedMultiStrategySelfAdaptiveDE").set_name("LLAMARefinedMultiStrategySelfAdaptiveDE", register=True) except Exception as e: print("RefinedMultiStrategySelfAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedMultiStrategySwarmDifferentialEvolution import ( - RefinedMultiStrategySwarmDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedMultiStrategySwarmDifferentialEvolution import RefinedMultiStrategySwarmDifferentialEvolution - lama_register["RefinedMultiStrategySwarmDifferentialEvolution"] = ( - RefinedMultiStrategySwarmDifferentialEvolution - ) - LLAMARefinedMultiStrategySwarmDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedMultiStrategySwarmDifferentialEvolution" - ).set_name("LLAMARefinedMultiStrategySwarmDifferentialEvolution", register=True) + lama_register["RefinedMultiStrategySwarmDifferentialEvolution"] = RefinedMultiStrategySwarmDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedMultiStrategySwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMultiStrategySwarmDifferentialEvolution").set_name("LLAMARefinedMultiStrategySwarmDifferentialEvolution", register=True) except Exception as e: print("RefinedMultiStrategySwarmDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedNicheDifferentialParticleSwarmOptimizer import ( - RefinedNicheDifferentialParticleSwarmOptimizer, - ) + from nevergrad.optimization.lama.RefinedNicheDifferentialParticleSwarmOptimizer import RefinedNicheDifferentialParticleSwarmOptimizer - lama_register["RefinedNicheDifferentialParticleSwarmOptimizer"] = ( - RefinedNicheDifferentialParticleSwarmOptimizer - ) - LLAMARefinedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( - method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer" - ).set_name("LLAMARefinedNicheDifferentialParticleSwarmOptimizer", register=True) + lama_register["RefinedNicheDifferentialParticleSwarmOptimizer"] = RefinedNicheDifferentialParticleSwarmOptimizer + res = NonObjectOptimizer(method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMARefinedNicheDifferentialParticleSwarmOptimizer", register=True) except Exception as e: print("RefinedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimalDynamicPrecisionOptimizerV15 import ( - RefinedOptimalDynamicPrecisionOptimizerV15, - ) + from nevergrad.optimization.lama.RefinedOptimalDynamicPrecisionOptimizerV15 import RefinedOptimalDynamicPrecisionOptimizerV15 lama_register["RefinedOptimalDynamicPrecisionOptimizerV15"] = RefinedOptimalDynamicPrecisionOptimizerV15 - LLAMARefinedOptimalDynamicPrecisionOptimizerV15 = NonObjectOptimizer( - method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15" - ).set_name("LLAMARefinedOptimalDynamicPrecisionOptimizerV15", register=True) + res = NonObjectOptimizer(method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimalDynamicPrecisionOptimizerV15 = NonObjectOptimizer(method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15").set_name("LLAMARefinedOptimalDynamicPrecisionOptimizerV15", register=True) except Exception as e: print("RefinedOptimalDynamicPrecisionOptimizerV15 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedOptimalEnhancedRAMEDS import RefinedOptimalEnhancedRAMEDS lama_register["RefinedOptimalEnhancedRAMEDS"] = RefinedOptimalEnhancedRAMEDS - LLAMARefinedOptimalEnhancedRAMEDS = NonObjectOptimizer( - method="LLAMARefinedOptimalEnhancedRAMEDS" - ).set_name("LLAMARefinedOptimalEnhancedRAMEDS", register=True) + res = NonObjectOptimizer(method="LLAMARefinedOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMARefinedOptimalEnhancedRAMEDS").set_name("LLAMARefinedOptimalEnhancedRAMEDS", register=True) except Exception as e: print("RefinedOptimalEnhancedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimalEvolutionaryGradientOptimizerV12 import ( - RefinedOptimalEvolutionaryGradientOptimizerV12, - ) + from nevergrad.optimization.lama.RefinedOptimalEvolutionaryGradientOptimizerV12 import RefinedOptimalEvolutionaryGradientOptimizerV12 - lama_register["RefinedOptimalEvolutionaryGradientOptimizerV12"] = ( - RefinedOptimalEvolutionaryGradientOptimizerV12 - ) - LLAMARefinedOptimalEvolutionaryGradientOptimizerV12 = NonObjectOptimizer( - method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12" - ).set_name("LLAMARefinedOptimalEvolutionaryGradientOptimizerV12", register=True) + lama_register["RefinedOptimalEvolutionaryGradientOptimizerV12"] = RefinedOptimalEvolutionaryGradientOptimizerV12 + res = NonObjectOptimizer(method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimalEvolutionaryGradientOptimizerV12 = NonObjectOptimizer(method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12").set_name("LLAMARefinedOptimalEvolutionaryGradientOptimizerV12", register=True) except Exception as e: print("RefinedOptimalEvolutionaryGradientOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 import ( - RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5, - ) + from nevergrad.optimization.lama.RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 import RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 - lama_register["RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5"] = ( - RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 - ) - LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 = NonObjectOptimizer( - method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5" - ).set_name("LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5", register=True) + lama_register["RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5"] = RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 + res = NonObjectOptimizer(method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 = NonObjectOptimizer(method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5").set_name("LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5", register=True) except Exception as e: print("RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing import ( - RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing, - ) + from nevergrad.optimization.lama.RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing import RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing - lama_register["RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( - RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing - ) - LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( - method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing" - ).set_name("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) + lama_register["RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing"] = RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing + res = NonObjectOptimizer(method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) except Exception as e: print("RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimizedEnhancedDualStrategyAdaptiveDE import ( - RefinedOptimizedEnhancedDualStrategyAdaptiveDE, - ) + from nevergrad.optimization.lama.RefinedOptimizedEnhancedDualStrategyAdaptiveDE import RefinedOptimizedEnhancedDualStrategyAdaptiveDE - lama_register["RefinedOptimizedEnhancedDualStrategyAdaptiveDE"] = ( - RefinedOptimizedEnhancedDualStrategyAdaptiveDE - ) - LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE" - ).set_name("LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE", register=True) + lama_register["RefinedOptimizedEnhancedDualStrategyAdaptiveDE"] = RefinedOptimizedEnhancedDualStrategyAdaptiveDE + res = NonObjectOptimizer(method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE").set_name("LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE", register=True) except Exception as e: print("RefinedOptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedOptimizedHybridAdaptiveMultiStageOptimization import ( - RefinedOptimizedHybridAdaptiveMultiStageOptimization, - ) + from nevergrad.optimization.lama.RefinedOptimizedHybridAdaptiveMultiStageOptimization import RefinedOptimizedHybridAdaptiveMultiStageOptimization - lama_register["RefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( - RefinedOptimizedHybridAdaptiveMultiStageOptimization - ) - LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( - method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization" - ).set_name("LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) + lama_register["RefinedOptimizedHybridAdaptiveMultiStageOptimization"] = RefinedOptimizedHybridAdaptiveMultiStageOptimization + res = NonObjectOptimizer(method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) except Exception as e: print("RefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedPrecisionAdaptivePSO import RefinedPrecisionAdaptivePSO lama_register["RefinedPrecisionAdaptivePSO"] = RefinedPrecisionAdaptivePSO - LLAMARefinedPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO").set_name( - "LLAMARefinedPrecisionAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO").set_name("LLAMARefinedPrecisionAdaptivePSO", register=True) except Exception as e: print("RefinedPrecisionAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedPrecisionEnhancedDualStrategyOptimizer import ( - RefinedPrecisionEnhancedDualStrategyOptimizer, - ) + from nevergrad.optimization.lama.RefinedPrecisionEnhancedDualStrategyOptimizer import RefinedPrecisionEnhancedDualStrategyOptimizer - lama_register["RefinedPrecisionEnhancedDualStrategyOptimizer"] = ( - RefinedPrecisionEnhancedDualStrategyOptimizer - ) - LLAMARefinedPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( - method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer" - ).set_name("LLAMARefinedPrecisionEnhancedDualStrategyOptimizer", register=True) + lama_register["RefinedPrecisionEnhancedDualStrategyOptimizer"] = RefinedPrecisionEnhancedDualStrategyOptimizer + res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer").set_name("LLAMARefinedPrecisionEnhancedDualStrategyOptimizer", register=True) except Exception as e: print("RefinedPrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedPrecisionEnhancedSpatialAdaptiveEvolver import ( - RefinedPrecisionEnhancedSpatialAdaptiveEvolver, - ) + from nevergrad.optimization.lama.RefinedPrecisionEnhancedSpatialAdaptiveEvolver import RefinedPrecisionEnhancedSpatialAdaptiveEvolver - lama_register["RefinedPrecisionEnhancedSpatialAdaptiveEvolver"] = ( - RefinedPrecisionEnhancedSpatialAdaptiveEvolver - ) - LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( - method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver" - ).set_name("LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver", register=True) + lama_register["RefinedPrecisionEnhancedSpatialAdaptiveEvolver"] = RefinedPrecisionEnhancedSpatialAdaptiveEvolver + res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver").set_name("LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver", register=True) except Exception as e: print("RefinedPrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedPrecisionEvolutionaryThermalOptimizer import ( - RefinedPrecisionEvolutionaryThermalOptimizer, - ) + from nevergrad.optimization.lama.RefinedPrecisionEvolutionaryThermalOptimizer import RefinedPrecisionEvolutionaryThermalOptimizer - lama_register["RefinedPrecisionEvolutionaryThermalOptimizer"] = ( - RefinedPrecisionEvolutionaryThermalOptimizer - ) - LLAMARefinedPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( - method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer" - ).set_name("LLAMARefinedPrecisionEvolutionaryThermalOptimizer", register=True) + lama_register["RefinedPrecisionEvolutionaryThermalOptimizer"] = RefinedPrecisionEvolutionaryThermalOptimizer + res = NonObjectOptimizer(method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer").set_name("LLAMARefinedPrecisionEvolutionaryThermalOptimizer", register=True) except Exception as e: print("RefinedPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedPrecisionTunedCrossoverElitistStrategyV12 import ( - RefinedPrecisionTunedCrossoverElitistStrategyV12, - ) + from nevergrad.optimization.lama.RefinedPrecisionTunedCrossoverElitistStrategyV12 import RefinedPrecisionTunedCrossoverElitistStrategyV12 - lama_register["RefinedPrecisionTunedCrossoverElitistStrategyV12"] = ( - RefinedPrecisionTunedCrossoverElitistStrategyV12 - ) - LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12 = NonObjectOptimizer( - method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12" - ).set_name("LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12", register=True) + lama_register["RefinedPrecisionTunedCrossoverElitistStrategyV12"] = RefinedPrecisionTunedCrossoverElitistStrategyV12 + res = NonObjectOptimizer(method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12 = NonObjectOptimizer(method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12").set_name("LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12", register=True) except Exception as e: print("RefinedPrecisionTunedCrossoverElitistStrategyV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedProgressiveParticleSwarmOptimization import ( - RefinedProgressiveParticleSwarmOptimization, - ) + from nevergrad.optimization.lama.RefinedProgressiveParticleSwarmOptimization import RefinedProgressiveParticleSwarmOptimization lama_register["RefinedProgressiveParticleSwarmOptimization"] = RefinedProgressiveParticleSwarmOptimization - LLAMARefinedProgressiveParticleSwarmOptimization = NonObjectOptimizer( - method="LLAMARefinedProgressiveParticleSwarmOptimization" - ).set_name("LLAMARefinedProgressiveParticleSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedProgressiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMARefinedProgressiveParticleSwarmOptimization").set_name("LLAMARefinedProgressiveParticleSwarmOptimization", register=True) except Exception as e: print("RefinedProgressiveParticleSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedProgressiveQuorumEvolutionStrategy import ( - RefinedProgressiveQuorumEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedProgressiveQuorumEvolutionStrategy import RefinedProgressiveQuorumEvolutionStrategy lama_register["RefinedProgressiveQuorumEvolutionStrategy"] = RefinedProgressiveQuorumEvolutionStrategy - LLAMARefinedProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedProgressiveQuorumEvolutionStrategy" - ).set_name("LLAMARefinedProgressiveQuorumEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedProgressiveQuorumEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedProgressiveQuorumEvolutionStrategy").set_name("LLAMARefinedProgressiveQuorumEvolutionStrategy", register=True) except Exception as e: print("RefinedProgressiveQuorumEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuadraticAdaptiveEvolutionStrategy import ( - RefinedQuadraticAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.RefinedQuadraticAdaptiveEvolutionStrategy import RefinedQuadraticAdaptiveEvolutionStrategy lama_register["RefinedQuadraticAdaptiveEvolutionStrategy"] = RefinedQuadraticAdaptiveEvolutionStrategy - LLAMARefinedQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy" - ).set_name("LLAMARefinedQuadraticAdaptiveEvolutionStrategy", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy").set_name("LLAMARefinedQuadraticAdaptiveEvolutionStrategy", register=True) except Exception as e: print("RefinedQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveExplorationOptimization import ( - RefinedQuantumAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveExplorationOptimization import RefinedQuantumAdaptiveExplorationOptimization - lama_register["RefinedQuantumAdaptiveExplorationOptimization"] = ( - RefinedQuantumAdaptiveExplorationOptimization - ) - LLAMARefinedQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveExplorationOptimization" - ).set_name("LLAMARefinedQuantumAdaptiveExplorationOptimization", register=True) + lama_register["RefinedQuantumAdaptiveExplorationOptimization"] = RefinedQuantumAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveExplorationOptimization").set_name("LLAMARefinedQuantumAdaptiveExplorationOptimization", register=True) except Exception as e: print("RefinedQuantumAdaptiveExplorationOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridOptimizerV4 import ( - RefinedQuantumAdaptiveHybridOptimizerV4, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridOptimizerV4 import RefinedQuantumAdaptiveHybridOptimizerV4 lama_register["RefinedQuantumAdaptiveHybridOptimizerV4"] = RefinedQuantumAdaptiveHybridOptimizerV4 - LLAMARefinedQuantumAdaptiveHybridOptimizerV4 = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4" - ).set_name("LLAMARefinedQuantumAdaptiveHybridOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveHybridOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4").set_name("LLAMARefinedQuantumAdaptiveHybridOptimizerV4", register=True) except Exception as e: print("RefinedQuantumAdaptiveHybridOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridSearchV3 import ( - RefinedQuantumAdaptiveHybridSearchV3, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridSearchV3 import RefinedQuantumAdaptiveHybridSearchV3 lama_register["RefinedQuantumAdaptiveHybridSearchV3"] = RefinedQuantumAdaptiveHybridSearchV3 - LLAMARefinedQuantumAdaptiveHybridSearchV3 = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveHybridSearchV3" - ).set_name("LLAMARefinedQuantumAdaptiveHybridSearchV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveHybridSearchV3 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridSearchV3").set_name("LLAMARefinedQuantumAdaptiveHybridSearchV3", register=True) except Exception as e: print("RefinedQuantumAdaptiveHybridSearchV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveLevySwarmOptimization import ( - RefinedQuantumAdaptiveLevySwarmOptimization, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveLevySwarmOptimization import RefinedQuantumAdaptiveLevySwarmOptimization lama_register["RefinedQuantumAdaptiveLevySwarmOptimization"] = RefinedQuantumAdaptiveLevySwarmOptimization - LLAMARefinedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization" - ).set_name("LLAMARefinedQuantumAdaptiveLevySwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization").set_name("LLAMARefinedQuantumAdaptiveLevySwarmOptimization", register=True) except Exception as e: print("RefinedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveMultiPopulationDE import ( - RefinedQuantumAdaptiveMultiPopulationDE, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveMultiPopulationDE import RefinedQuantumAdaptiveMultiPopulationDE lama_register["RefinedQuantumAdaptiveMultiPopulationDE"] = RefinedQuantumAdaptiveMultiPopulationDE - LLAMARefinedQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveMultiPopulationDE" - ).set_name("LLAMARefinedQuantumAdaptiveMultiPopulationDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveMultiPopulationDE").set_name("LLAMARefinedQuantumAdaptiveMultiPopulationDE", register=True) except Exception as e: print("RefinedQuantumAdaptiveMultiPopulationDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveOptimizerV2 import ( - RefinedQuantumAdaptiveOptimizerV2, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveOptimizerV2 import RefinedQuantumAdaptiveOptimizerV2 lama_register["RefinedQuantumAdaptiveOptimizerV2"] = RefinedQuantumAdaptiveOptimizerV2 - LLAMARefinedQuantumAdaptiveOptimizerV2 = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveOptimizerV2" - ).set_name("LLAMARefinedQuantumAdaptiveOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveOptimizerV2").set_name("LLAMARefinedQuantumAdaptiveOptimizerV2", register=True) except Exception as e: print("RefinedQuantumAdaptiveOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveVelocityOptimizer import ( - RefinedQuantumAdaptiveVelocityOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumAdaptiveVelocityOptimizer import RefinedQuantumAdaptiveVelocityOptimizer lama_register["RefinedQuantumAdaptiveVelocityOptimizer"] = RefinedQuantumAdaptiveVelocityOptimizer - LLAMARefinedQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumAdaptiveVelocityOptimizer" - ).set_name("LLAMARefinedQuantumAdaptiveVelocityOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveVelocityOptimizer").set_name("LLAMARefinedQuantumAdaptiveVelocityOptimizer", register=True) except Exception as e: print("RefinedQuantumAdaptiveVelocityOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumCognitionAdaptiveTuningOptimizerV15 import ( - RefinedQuantumCognitionAdaptiveTuningOptimizerV15, - ) + from nevergrad.optimization.lama.RefinedQuantumCognitionAdaptiveTuningOptimizerV15 import RefinedQuantumCognitionAdaptiveTuningOptimizerV15 - lama_register["RefinedQuantumCognitionAdaptiveTuningOptimizerV15"] = ( - RefinedQuantumCognitionAdaptiveTuningOptimizerV15 - ) - LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15 = NonObjectOptimizer( - method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15" - ).set_name("LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15", register=True) + lama_register["RefinedQuantumCognitionAdaptiveTuningOptimizerV15"] = RefinedQuantumCognitionAdaptiveTuningOptimizerV15 + res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15").set_name("LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15", register=True) except Exception as e: print("RefinedQuantumCognitionAdaptiveTuningOptimizerV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumCognitionHybridOptimizerV22 import ( - RefinedQuantumCognitionHybridOptimizerV22, - ) + from nevergrad.optimization.lama.RefinedQuantumCognitionHybridOptimizerV22 import RefinedQuantumCognitionHybridOptimizerV22 lama_register["RefinedQuantumCognitionHybridOptimizerV22"] = RefinedQuantumCognitionHybridOptimizerV22 - LLAMARefinedQuantumCognitionHybridOptimizerV22 = NonObjectOptimizer( - method="LLAMARefinedQuantumCognitionHybridOptimizerV22" - ).set_name("LLAMARefinedQuantumCognitionHybridOptimizerV22", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionHybridOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumCognitionHybridOptimizerV22 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionHybridOptimizerV22").set_name("LLAMARefinedQuantumCognitionHybridOptimizerV22", register=True) except Exception as e: print("RefinedQuantumCognitionHybridOptimizerV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV13 import ( - RefinedQuantumCognitionOptimizerV13, - ) + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV13 import RefinedQuantumCognitionOptimizerV13 lama_register["RefinedQuantumCognitionOptimizerV13"] = RefinedQuantumCognitionOptimizerV13 - LLAMARefinedQuantumCognitionOptimizerV13 = NonObjectOptimizer( - method="LLAMARefinedQuantumCognitionOptimizerV13" - ).set_name("LLAMARefinedQuantumCognitionOptimizerV13", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumCognitionOptimizerV13 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV13").set_name("LLAMARefinedQuantumCognitionOptimizerV13", register=True) except Exception as e: print("RefinedQuantumCognitionOptimizerV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV4 import ( - RefinedQuantumCognitionOptimizerV4, - ) + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV4 import RefinedQuantumCognitionOptimizerV4 lama_register["RefinedQuantumCognitionOptimizerV4"] = RefinedQuantumCognitionOptimizerV4 - LLAMARefinedQuantumCognitionOptimizerV4 = NonObjectOptimizer( - method="LLAMARefinedQuantumCognitionOptimizerV4" - ).set_name("LLAMARefinedQuantumCognitionOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumCognitionOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV4").set_name("LLAMARefinedQuantumCognitionOptimizerV4", register=True) except Exception as e: print("RefinedQuantumCognitionOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 import ( - RefinedQuantumCovarianceMatrixDifferentialEvolutionV4, - ) + from nevergrad.optimization.lama.RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 import RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 - lama_register["RefinedQuantumCovarianceMatrixDifferentialEvolutionV4"] = ( - RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 - ) - LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( - method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4" - ).set_name("LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4", register=True) + lama_register["RefinedQuantumCovarianceMatrixDifferentialEvolutionV4"] = RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 + res = NonObjectOptimizer(method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4", register=True) except Exception as e: print("RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism import ( - RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism import RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism"] = ( - RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism - ) - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism" - ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism", register=True) + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism"] = RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism", register=True) except Exception as e: print("RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveLearning import ( - RefinedQuantumDifferentialEvolutionWithAdaptiveLearning, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveLearning import RefinedQuantumDifferentialEvolutionWithAdaptiveLearning - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( - RefinedQuantumDifferentialEvolutionWithAdaptiveLearning - ) - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning" - ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveLearning"] = RefinedQuantumDifferentialEvolutionWithAdaptiveLearning + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) except Exception as e: print("RefinedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( - RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( - RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - ) - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" - ).set_name( - "LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True - ) + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True) except Exception as e: - print( - "RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e - ) - + print("RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e) try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism import ( - RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism import RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism"] = ( - RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism - ) - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism" - ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism", register=True) + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism"] = RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism", register=True) except Exception as e: print("RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialMemeticOptimizer import ( - RefinedQuantumDifferentialMemeticOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialMemeticOptimizer import RefinedQuantumDifferentialMemeticOptimizer lama_register["RefinedQuantumDifferentialMemeticOptimizer"] = RefinedQuantumDifferentialMemeticOptimizer - LLAMARefinedQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialMemeticOptimizer" - ).set_name("LLAMARefinedQuantumDifferentialMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialMemeticOptimizer").set_name("LLAMARefinedQuantumDifferentialMemeticOptimizer", register=True) except Exception as e: print("RefinedQuantumDifferentialMemeticOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialParticleOptimizerWithElitism import ( - RefinedQuantumDifferentialParticleOptimizerWithElitism, - ) + from nevergrad.optimization.lama.RefinedQuantumDifferentialParticleOptimizerWithElitism import RefinedQuantumDifferentialParticleOptimizerWithElitism - lama_register["RefinedQuantumDifferentialParticleOptimizerWithElitism"] = ( - RefinedQuantumDifferentialParticleOptimizerWithElitism - ) - LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( - method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism" - ).set_name("LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism", register=True) + lama_register["RefinedQuantumDifferentialParticleOptimizerWithElitism"] = RefinedQuantumDifferentialParticleOptimizerWithElitism + res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism").set_name("LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism", register=True) except Exception as e: print("RefinedQuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE import ( - RefinedQuantumEnhancedAdaptiveMultiPhaseDE, - ) + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE import RefinedQuantumEnhancedAdaptiveMultiPhaseDE lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE"] = RefinedQuantumEnhancedAdaptiveMultiPhaseDE - LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( - method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE" - ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE").set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE", register=True) except Exception as e: print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 import ( - RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2, - ) + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 import RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 - lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2"] = ( - RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 - ) - LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 = NonObjectOptimizer( - method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2" - ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2", register=True) + lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2"] = RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 + res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2").set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2", register=True) except Exception as e: print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 import ( - RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6, - ) + from nevergrad.optimization.lama.RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 import RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 - lama_register["RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6"] = ( - RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 - ) - LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 = NonObjectOptimizer( - method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6" - ).set_name("LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6", register=True) + lama_register["RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6"] = RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 + res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6").set_name("LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6", register=True) except Exception as e: print("RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedHybridDEPSO import ( - RefinedQuantumEnhancedHybridDEPSO, - ) + from nevergrad.optimization.lama.RefinedQuantumEnhancedHybridDEPSO import RefinedQuantumEnhancedHybridDEPSO lama_register["RefinedQuantumEnhancedHybridDEPSO"] = RefinedQuantumEnhancedHybridDEPSO - LLAMARefinedQuantumEnhancedHybridDEPSO = NonObjectOptimizer( - method="LLAMARefinedQuantumEnhancedHybridDEPSO" - ).set_name("LLAMARefinedQuantumEnhancedHybridDEPSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedHybridDEPSO").set_name("LLAMARefinedQuantumEnhancedHybridDEPSO", register=True) except Exception as e: print("RefinedQuantumEnhancedHybridDEPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptation import ( - RefinedQuantumEvolutionaryAdaptation, - ) + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptation import RefinedQuantumEvolutionaryAdaptation lama_register["RefinedQuantumEvolutionaryAdaptation"] = RefinedQuantumEvolutionaryAdaptation - LLAMARefinedQuantumEvolutionaryAdaptation = NonObjectOptimizer( - method="LLAMARefinedQuantumEvolutionaryAdaptation" - ).set_name("LLAMARefinedQuantumEvolutionaryAdaptation", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEvolutionaryAdaptation = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptation").set_name("LLAMARefinedQuantumEvolutionaryAdaptation", register=True) except Exception as e: print("RefinedQuantumEvolutionaryAdaptation can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptiveOptimizer import ( - RefinedQuantumEvolutionaryAdaptiveOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptiveOptimizer import RefinedQuantumEvolutionaryAdaptiveOptimizer lama_register["RefinedQuantumEvolutionaryAdaptiveOptimizer"] = RefinedQuantumEvolutionaryAdaptiveOptimizer - LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer" - ).set_name("LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer").set_name("LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer", register=True) except Exception as e: print("RefinedQuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumFluxDifferentialSwarm import ( - RefinedQuantumFluxDifferentialSwarm, - ) + from nevergrad.optimization.lama.RefinedQuantumFluxDifferentialSwarm import RefinedQuantumFluxDifferentialSwarm lama_register["RefinedQuantumFluxDifferentialSwarm"] = RefinedQuantumFluxDifferentialSwarm - LLAMARefinedQuantumFluxDifferentialSwarm = NonObjectOptimizer( - method="LLAMARefinedQuantumFluxDifferentialSwarm" - ).set_name("LLAMARefinedQuantumFluxDifferentialSwarm", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMARefinedQuantumFluxDifferentialSwarm").set_name("LLAMARefinedQuantumFluxDifferentialSwarm", register=True) except Exception as e: print("RefinedQuantumFluxDifferentialSwarm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumGradientAdaptiveExplorationOptimization import ( - RefinedQuantumGradientAdaptiveExplorationOptimization, - ) + from nevergrad.optimization.lama.RefinedQuantumGradientAdaptiveExplorationOptimization import RefinedQuantumGradientAdaptiveExplorationOptimization - lama_register["RefinedQuantumGradientAdaptiveExplorationOptimization"] = ( - RefinedQuantumGradientAdaptiveExplorationOptimization - ) - LLAMARefinedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( - method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization" - ).set_name("LLAMARefinedQuantumGradientAdaptiveExplorationOptimization", register=True) + lama_register["RefinedQuantumGradientAdaptiveExplorationOptimization"] = RefinedQuantumGradientAdaptiveExplorationOptimization + res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMARefinedQuantumGradientAdaptiveExplorationOptimization", register=True) except Exception as e: print("RefinedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedQuantumGradientSearch import RefinedQuantumGradientSearch lama_register["RefinedQuantumGradientSearch"] = RefinedQuantumGradientSearch - LLAMARefinedQuantumGradientSearch = NonObjectOptimizer( - method="LLAMARefinedQuantumGradientSearch" - ).set_name("LLAMARefinedQuantumGradientSearch", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumGradientSearch = NonObjectOptimizer(method="LLAMARefinedQuantumGradientSearch").set_name("LLAMARefinedQuantumGradientSearch", register=True) except Exception as e: print("RefinedQuantumGradientSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV6 import ( - RefinedQuantumGuidedHybridSearchV6, - ) + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV6 import RefinedQuantumGuidedHybridSearchV6 lama_register["RefinedQuantumGuidedHybridSearchV6"] = RefinedQuantumGuidedHybridSearchV6 - LLAMARefinedQuantumGuidedHybridSearchV6 = NonObjectOptimizer( - method="LLAMARefinedQuantumGuidedHybridSearchV6" - ).set_name("LLAMARefinedQuantumGuidedHybridSearchV6", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumGuidedHybridSearchV6 = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV6").set_name("LLAMARefinedQuantumGuidedHybridSearchV6", register=True) except Exception as e: print("RefinedQuantumGuidedHybridSearchV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV8 import ( - RefinedQuantumGuidedHybridSearchV8, - ) + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV8 import RefinedQuantumGuidedHybridSearchV8 lama_register["RefinedQuantumGuidedHybridSearchV8"] = RefinedQuantumGuidedHybridSearchV8 - LLAMARefinedQuantumGuidedHybridSearchV8 = NonObjectOptimizer( - method="LLAMARefinedQuantumGuidedHybridSearchV8" - ).set_name("LLAMARefinedQuantumGuidedHybridSearchV8", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumGuidedHybridSearchV8 = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV8").set_name("LLAMARefinedQuantumGuidedHybridSearchV8", register=True) except Exception as e: print("RefinedQuantumGuidedHybridSearchV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumHybridAdaptiveStrategyV3 import ( - RefinedQuantumHybridAdaptiveStrategyV3, - ) + from nevergrad.optimization.lama.RefinedQuantumHybridAdaptiveStrategyV3 import RefinedQuantumHybridAdaptiveStrategyV3 lama_register["RefinedQuantumHybridAdaptiveStrategyV3"] = RefinedQuantumHybridAdaptiveStrategyV3 - LLAMARefinedQuantumHybridAdaptiveStrategyV3 = NonObjectOptimizer( - method="LLAMARefinedQuantumHybridAdaptiveStrategyV3" - ).set_name("LLAMARefinedQuantumHybridAdaptiveStrategyV3", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumHybridAdaptiveStrategyV3 = NonObjectOptimizer(method="LLAMARefinedQuantumHybridAdaptiveStrategyV3").set_name("LLAMARefinedQuantumHybridAdaptiveStrategyV3", register=True) except Exception as e: print("RefinedQuantumHybridAdaptiveStrategyV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumHybridDynamicAdaptiveDE import ( - RefinedQuantumHybridDynamicAdaptiveDE, - ) + from nevergrad.optimization.lama.RefinedQuantumHybridDynamicAdaptiveDE import RefinedQuantumHybridDynamicAdaptiveDE lama_register["RefinedQuantumHybridDynamicAdaptiveDE"] = RefinedQuantumHybridDynamicAdaptiveDE - LLAMARefinedQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedQuantumHybridDynamicAdaptiveDE" - ).set_name("LLAMARefinedQuantumHybridDynamicAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedQuantumHybridDynamicAdaptiveDE").set_name("LLAMARefinedQuantumHybridDynamicAdaptiveDE", register=True) except Exception as e: print("RefinedQuantumHybridDynamicAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumHybridEliteAdaptiveDE import ( - RefinedQuantumHybridEliteAdaptiveDE, - ) + from nevergrad.optimization.lama.RefinedQuantumHybridEliteAdaptiveDE import RefinedQuantumHybridEliteAdaptiveDE lama_register["RefinedQuantumHybridEliteAdaptiveDE"] = RefinedQuantumHybridEliteAdaptiveDE - LLAMARefinedQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( - method="LLAMARefinedQuantumHybridEliteAdaptiveDE" - ).set_name("LLAMARefinedQuantumHybridEliteAdaptiveDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumHybridEliteAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedQuantumHybridEliteAdaptiveDE").set_name("LLAMARefinedQuantumHybridEliteAdaptiveDE", register=True) except Exception as e: print("RefinedQuantumHybridEliteAdaptiveDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInfluenceLocalSearchOptimizer import ( - RefinedQuantumInfluenceLocalSearchOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumInfluenceLocalSearchOptimizer import RefinedQuantumInfluenceLocalSearchOptimizer lama_register["RefinedQuantumInfluenceLocalSearchOptimizer"] = RefinedQuantumInfluenceLocalSearchOptimizer - LLAMARefinedQuantumInfluenceLocalSearchOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer" - ).set_name("LLAMARefinedQuantumInfluenceLocalSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInfluenceLocalSearchOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer").set_name("LLAMARefinedQuantumInfluenceLocalSearchOptimizer", register=True) except Exception as e: print("RefinedQuantumInfluenceLocalSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInformedAdaptiveInertiaOptimizer import ( - RefinedQuantumInformedAdaptiveInertiaOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptiveInertiaOptimizer import RefinedQuantumInformedAdaptiveInertiaOptimizer - lama_register["RefinedQuantumInformedAdaptiveInertiaOptimizer"] = ( - RefinedQuantumInformedAdaptiveInertiaOptimizer - ) - LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer" - ).set_name("LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer", register=True) + lama_register["RefinedQuantumInformedAdaptiveInertiaOptimizer"] = RefinedQuantumInformedAdaptiveInertiaOptimizer + res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer").set_name("LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer", register=True) except Exception as e: print("RefinedQuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInformedAdaptivePSO import ( - RefinedQuantumInformedAdaptivePSO, - ) + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptivePSO import RefinedQuantumInformedAdaptivePSO lama_register["RefinedQuantumInformedAdaptivePSO"] = RefinedQuantumInformedAdaptivePSO - LLAMARefinedQuantumInformedAdaptivePSO = NonObjectOptimizer( - method="LLAMARefinedQuantumInformedAdaptivePSO" - ).set_name("LLAMARefinedQuantumInformedAdaptivePSO", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptivePSO").set_name("LLAMARefinedQuantumInformedAdaptivePSO", register=True) except Exception as e: print("RefinedQuantumInformedAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInformedDifferentialStrategyV2 import ( - RefinedQuantumInformedDifferentialStrategyV2, - ) + from nevergrad.optimization.lama.RefinedQuantumInformedDifferentialStrategyV2 import RefinedQuantumInformedDifferentialStrategyV2 - lama_register["RefinedQuantumInformedDifferentialStrategyV2"] = ( - RefinedQuantumInformedDifferentialStrategyV2 - ) - LLAMARefinedQuantumInformedDifferentialStrategyV2 = NonObjectOptimizer( - method="LLAMARefinedQuantumInformedDifferentialStrategyV2" - ).set_name("LLAMARefinedQuantumInformedDifferentialStrategyV2", register=True) + lama_register["RefinedQuantumInformedDifferentialStrategyV2"] = RefinedQuantumInformedDifferentialStrategyV2 + res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedDifferentialStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInformedDifferentialStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumInformedDifferentialStrategyV2").set_name("LLAMARefinedQuantumInformedDifferentialStrategyV2", register=True) except Exception as e: print("RefinedQuantumInformedDifferentialStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInformedGradientOptimizer import ( - RefinedQuantumInformedGradientOptimizer, - ) + from nevergrad.optimization.lama.RefinedQuantumInformedGradientOptimizer import RefinedQuantumInformedGradientOptimizer lama_register["RefinedQuantumInformedGradientOptimizer"] = RefinedQuantumInformedGradientOptimizer - LLAMARefinedQuantumInformedGradientOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumInformedGradientOptimizer" - ).set_name("LLAMARefinedQuantumInformedGradientOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInformedGradientOptimizer").set_name("LLAMARefinedQuantumInformedGradientOptimizer", register=True) except Exception as e: print("RefinedQuantumInformedGradientOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedQuantumInformedPSO import RefinedQuantumInformedPSO lama_register["RefinedQuantumInformedPSO"] = RefinedQuantumInformedPSO - LLAMARefinedQuantumInformedPSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO").set_name( - "LLAMARefinedQuantumInformedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInformedPSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO").set_name("LLAMARefinedQuantumInformedPSO", register=True) except Exception as e: print("RefinedQuantumInformedPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumInfusedAdaptiveStrategyV2 import ( - RefinedQuantumInfusedAdaptiveStrategyV2, - ) + from nevergrad.optimization.lama.RefinedQuantumInfusedAdaptiveStrategyV2 import RefinedQuantumInfusedAdaptiveStrategyV2 lama_register["RefinedQuantumInfusedAdaptiveStrategyV2"] = RefinedQuantumInfusedAdaptiveStrategyV2 - LLAMARefinedQuantumInfusedAdaptiveStrategyV2 = NonObjectOptimizer( - method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2" - ).set_name("LLAMARefinedQuantumInfusedAdaptiveStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumInfusedAdaptiveStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2").set_name("LLAMARefinedQuantumInfusedAdaptiveStrategyV2", register=True) except Exception as e: print("RefinedQuantumInfusedAdaptiveStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumLevyMemeticDifferentialEvolution import ( - RefinedQuantumLevyMemeticDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedQuantumLevyMemeticDifferentialEvolution import RefinedQuantumLevyMemeticDifferentialEvolution - lama_register["RefinedQuantumLevyMemeticDifferentialEvolution"] = ( - RefinedQuantumLevyMemeticDifferentialEvolution - ) - LLAMARefinedQuantumLevyMemeticDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution" - ).set_name("LLAMARefinedQuantumLevyMemeticDifferentialEvolution", register=True) + lama_register["RefinedQuantumLevyMemeticDifferentialEvolution"] = RefinedQuantumLevyMemeticDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumLevyMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution").set_name("LLAMARefinedQuantumLevyMemeticDifferentialEvolution", register=True) except Exception as e: print("RefinedQuantumLevyMemeticDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumMultiStrategyOptimization import ( - RefinedQuantumMultiStrategyOptimization, - ) + from nevergrad.optimization.lama.RefinedQuantumMultiStrategyOptimization import RefinedQuantumMultiStrategyOptimization lama_register["RefinedQuantumMultiStrategyOptimization"] = RefinedQuantumMultiStrategyOptimization - LLAMARefinedQuantumMultiStrategyOptimization = NonObjectOptimizer( - method="LLAMARefinedQuantumMultiStrategyOptimization" - ).set_name("LLAMARefinedQuantumMultiStrategyOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumMultiStrategyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumMultiStrategyOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumMultiStrategyOptimization").set_name("LLAMARefinedQuantumMultiStrategyOptimization", register=True) except Exception as e: print("RefinedQuantumMultiStrategyOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedQuantumNesterovSynergyV2 import RefinedQuantumNesterovSynergyV2 lama_register["RefinedQuantumNesterovSynergyV2"] = RefinedQuantumNesterovSynergyV2 - LLAMARefinedQuantumNesterovSynergyV2 = NonObjectOptimizer( - method="LLAMARefinedQuantumNesterovSynergyV2" - ).set_name("LLAMARefinedQuantumNesterovSynergyV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumNesterovSynergyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumNesterovSynergyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumNesterovSynergyV2").set_name("LLAMARefinedQuantumNesterovSynergyV2", register=True) except Exception as e: print("RefinedQuantumNesterovSynergyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumResilientCrossoverEnhancer import ( - RefinedQuantumResilientCrossoverEnhancer, - ) + from nevergrad.optimization.lama.RefinedQuantumResilientCrossoverEnhancer import RefinedQuantumResilientCrossoverEnhancer lama_register["RefinedQuantumResilientCrossoverEnhancer"] = RefinedQuantumResilientCrossoverEnhancer - LLAMARefinedQuantumResilientCrossoverEnhancer = NonObjectOptimizer( - method="LLAMARefinedQuantumResilientCrossoverEnhancer" - ).set_name("LLAMARefinedQuantumResilientCrossoverEnhancer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumResilientCrossoverEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumResilientCrossoverEnhancer = NonObjectOptimizer(method="LLAMARefinedQuantumResilientCrossoverEnhancer").set_name("LLAMARefinedQuantumResilientCrossoverEnhancer", register=True) except Exception as e: print("RefinedQuantumResilientCrossoverEnhancer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedQuantumSwarmOptimizer import RefinedQuantumSwarmOptimizer lama_register["RefinedQuantumSwarmOptimizer"] = RefinedQuantumSwarmOptimizer - LLAMARefinedQuantumSwarmOptimizer = NonObjectOptimizer( - method="LLAMARefinedQuantumSwarmOptimizer" - ).set_name("LLAMARefinedQuantumSwarmOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumSwarmOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumSwarmOptimizer").set_name("LLAMARefinedQuantumSwarmOptimizer", register=True) except Exception as e: print("RefinedQuantumSwarmOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV2 import ( - RefinedQuantumSymbioticStrategyV2, - ) + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV2 import RefinedQuantumSymbioticStrategyV2 lama_register["RefinedQuantumSymbioticStrategyV2"] = RefinedQuantumSymbioticStrategyV2 - LLAMARefinedQuantumSymbioticStrategyV2 = NonObjectOptimizer( - method="LLAMARefinedQuantumSymbioticStrategyV2" - ).set_name("LLAMARefinedQuantumSymbioticStrategyV2", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumSymbioticStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV2").set_name("LLAMARefinedQuantumSymbioticStrategyV2", register=True) except Exception as e: print("RefinedQuantumSymbioticStrategyV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV4 import ( - RefinedQuantumSymbioticStrategyV4, - ) + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV4 import RefinedQuantumSymbioticStrategyV4 lama_register["RefinedQuantumSymbioticStrategyV4"] = RefinedQuantumSymbioticStrategyV4 - LLAMARefinedQuantumSymbioticStrategyV4 = NonObjectOptimizer( - method="LLAMARefinedQuantumSymbioticStrategyV4" - ).set_name("LLAMARefinedQuantumSymbioticStrategyV4", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumSymbioticStrategyV4 = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV4").set_name("LLAMARefinedQuantumSymbioticStrategyV4", register=True) except Exception as e: print("RefinedQuantumSymbioticStrategyV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedQuantumTunnelingOptimizerV19 import ( - RefinedQuantumTunnelingOptimizerV19, - ) + from nevergrad.optimization.lama.RefinedQuantumTunnelingOptimizerV19 import RefinedQuantumTunnelingOptimizerV19 lama_register["RefinedQuantumTunnelingOptimizerV19"] = RefinedQuantumTunnelingOptimizerV19 - LLAMARefinedQuantumTunnelingOptimizerV19 = NonObjectOptimizer( - method="LLAMARefinedQuantumTunnelingOptimizerV19" - ).set_name("LLAMARefinedQuantumTunnelingOptimizerV19", register=True) + res = NonObjectOptimizer(method="LLAMARefinedQuantumTunnelingOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedQuantumTunnelingOptimizerV19 = NonObjectOptimizer(method="LLAMARefinedQuantumTunnelingOptimizerV19").set_name("LLAMARefinedQuantumTunnelingOptimizerV19", register=True) except Exception as e: print("RefinedQuantumTunnelingOptimizerV19 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedRAMEDSPro import RefinedRAMEDSPro lama_register["RefinedRAMEDSPro"] = RefinedRAMEDSPro - LLAMARefinedRAMEDSPro = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro").set_name( - "LLAMARefinedRAMEDSPro", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedRAMEDSPro = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro").set_name("LLAMARefinedRAMEDSPro", register=True) except Exception as e: print("RefinedRAMEDSPro can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedRAMEDSv2 import RefinedRAMEDSv2 lama_register["RefinedRAMEDSv2"] = RefinedRAMEDSv2 - LLAMARefinedRAMEDSv2 = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2").set_name( - "LLAMARefinedRAMEDSv2", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedRAMEDSv2 = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2").set_name("LLAMARefinedRAMEDSv2", register=True) except Exception as e: print("RefinedRAMEDSv2 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedSpatialAdaptiveOptimizer import RefinedSpatialAdaptiveOptimizer lama_register["RefinedSpatialAdaptiveOptimizer"] = RefinedSpatialAdaptiveOptimizer - LLAMARefinedSpatialAdaptiveOptimizer = NonObjectOptimizer( - method="LLAMARefinedSpatialAdaptiveOptimizer" - ).set_name("LLAMARefinedSpatialAdaptiveOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedSpatialAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedSpatialAdaptiveOptimizer").set_name("LLAMARefinedSpatialAdaptiveOptimizer", register=True) except Exception as e: print("RefinedSpatialAdaptiveOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedSpiralSearchOptimizer import RefinedSpiralSearchOptimizer lama_register["RefinedSpiralSearchOptimizer"] = RefinedSpiralSearchOptimizer - LLAMARefinedSpiralSearchOptimizer = NonObjectOptimizer( - method="LLAMARefinedSpiralSearchOptimizer" - ).set_name("LLAMARefinedSpiralSearchOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedSpiralSearchOptimizer = NonObjectOptimizer(method="LLAMARefinedSpiralSearchOptimizer").set_name("LLAMARefinedSpiralSearchOptimizer", register=True) except Exception as e: print("RefinedSpiralSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedStochasticBalancingOptimizer import ( - RefinedStochasticBalancingOptimizer, - ) + from nevergrad.optimization.lama.RefinedStochasticBalancingOptimizer import RefinedStochasticBalancingOptimizer lama_register["RefinedStochasticBalancingOptimizer"] = RefinedStochasticBalancingOptimizer - LLAMARefinedStochasticBalancingOptimizer = NonObjectOptimizer( - method="LLAMARefinedStochasticBalancingOptimizer" - ).set_name("LLAMARefinedStochasticBalancingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedStochasticBalancingOptimizer = NonObjectOptimizer(method="LLAMARefinedStochasticBalancingOptimizer").set_name("LLAMARefinedStochasticBalancingOptimizer", register=True) except Exception as e: print("RefinedStochasticBalancingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedStrategicAdaptiveDifferentialEvolution import ( - RefinedStrategicAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedStrategicAdaptiveDifferentialEvolution import RefinedStrategicAdaptiveDifferentialEvolution - lama_register["RefinedStrategicAdaptiveDifferentialEvolution"] = ( - RefinedStrategicAdaptiveDifferentialEvolution - ) - LLAMARefinedStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedStrategicAdaptiveDifferentialEvolution" - ).set_name("LLAMARefinedStrategicAdaptiveDifferentialEvolution", register=True) + lama_register["RefinedStrategicAdaptiveDifferentialEvolution"] = RefinedStrategicAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedStrategicAdaptiveDifferentialEvolution").set_name("LLAMARefinedStrategicAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RefinedStrategicAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedStrategicDiminishingEvolver import ( - RefinedStrategicDiminishingEvolver, - ) + from nevergrad.optimization.lama.RefinedStrategicDiminishingEvolver import RefinedStrategicDiminishingEvolver lama_register["RefinedStrategicDiminishingEvolver"] = RefinedStrategicDiminishingEvolver - LLAMARefinedStrategicDiminishingEvolver = NonObjectOptimizer( - method="LLAMARefinedStrategicDiminishingEvolver" - ).set_name("LLAMARefinedStrategicDiminishingEvolver", register=True) + res = NonObjectOptimizer(method="LLAMARefinedStrategicDiminishingEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedStrategicDiminishingEvolver = NonObjectOptimizer(method="LLAMARefinedStrategicDiminishingEvolver").set_name("LLAMARefinedStrategicDiminishingEvolver", register=True) except Exception as e: print("RefinedStrategicDiminishingEvolver can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedStrategicQuorumWithDirectionalBias import ( - RefinedStrategicQuorumWithDirectionalBias, - ) + from nevergrad.optimization.lama.RefinedStrategicQuorumWithDirectionalBias import RefinedStrategicQuorumWithDirectionalBias lama_register["RefinedStrategicQuorumWithDirectionalBias"] = RefinedStrategicQuorumWithDirectionalBias - LLAMARefinedStrategicQuorumWithDirectionalBias = NonObjectOptimizer( - method="LLAMARefinedStrategicQuorumWithDirectionalBias" - ).set_name("LLAMARefinedStrategicQuorumWithDirectionalBias", register=True) + res = NonObjectOptimizer(method="LLAMARefinedStrategicQuorumWithDirectionalBias")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedStrategicQuorumWithDirectionalBias = NonObjectOptimizer(method="LLAMARefinedStrategicQuorumWithDirectionalBias").set_name("LLAMARefinedStrategicQuorumWithDirectionalBias", register=True) except Exception as e: print("RefinedStrategicQuorumWithDirectionalBias can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedSuperiorAdaptiveStrategyDE import ( - RefinedSuperiorAdaptiveStrategyDE, - ) + from nevergrad.optimization.lama.RefinedSuperiorAdaptiveStrategyDE import RefinedSuperiorAdaptiveStrategyDE lama_register["RefinedSuperiorAdaptiveStrategyDE"] = RefinedSuperiorAdaptiveStrategyDE - LLAMARefinedSuperiorAdaptiveStrategyDE = NonObjectOptimizer( - method="LLAMARefinedSuperiorAdaptiveStrategyDE" - ).set_name("LLAMARefinedSuperiorAdaptiveStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMARefinedSuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedSuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMARefinedSuperiorAdaptiveStrategyDE").set_name("LLAMARefinedSuperiorAdaptiveStrategyDE", register=True) except Exception as e: print("RefinedSuperiorAdaptiveStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedTemporalAdaptiveDifferentialEvolution import ( - RefinedTemporalAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RefinedTemporalAdaptiveDifferentialEvolution import RefinedTemporalAdaptiveDifferentialEvolution - lama_register["RefinedTemporalAdaptiveDifferentialEvolution"] = ( - RefinedTemporalAdaptiveDifferentialEvolution - ) - LLAMARefinedTemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARefinedTemporalAdaptiveDifferentialEvolution" - ).set_name("LLAMARefinedTemporalAdaptiveDifferentialEvolution", register=True) + lama_register["RefinedTemporalAdaptiveDifferentialEvolution"] = RefinedTemporalAdaptiveDifferentialEvolution + res = NonObjectOptimizer(method="LLAMARefinedTemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedTemporalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedTemporalAdaptiveDifferentialEvolution").set_name("LLAMARefinedTemporalAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RefinedTemporalAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimateEnhancedGuidedMassQGSA_v71 import ( - RefinedUltimateEnhancedGuidedMassQGSA_v71, - ) + from nevergrad.optimization.lama.RefinedUltimateEnhancedGuidedMassQGSA_v71 import RefinedUltimateEnhancedGuidedMassQGSA_v71 lama_register["RefinedUltimateEnhancedGuidedMassQGSA_v71"] = RefinedUltimateEnhancedGuidedMassQGSA_v71 - LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71 = NonObjectOptimizer( - method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71" - ).set_name("LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71", register=True) + res = NonObjectOptimizer(method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71 = NonObjectOptimizer(method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71").set_name("LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71", register=True) except Exception as e: print("RefinedUltimateEnhancedGuidedMassQGSA_v71 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV16 import ( - RefinedUltimateEvolutionaryGradientOptimizerV16, - ) + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV16 import RefinedUltimateEvolutionaryGradientOptimizerV16 - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV16"] = ( - RefinedUltimateEvolutionaryGradientOptimizerV16 - ) - LLAMARefinedUltimateEvolutionaryGradientOptimizerV16 = NonObjectOptimizer( - method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16" - ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV16", register=True) + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV16"] = RefinedUltimateEvolutionaryGradientOptimizerV16 + res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV16 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV16", register=True) except Exception as e: print("RefinedUltimateEvolutionaryGradientOptimizerV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV17 import ( - RefinedUltimateEvolutionaryGradientOptimizerV17, - ) + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV17 import RefinedUltimateEvolutionaryGradientOptimizerV17 - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV17"] = ( - RefinedUltimateEvolutionaryGradientOptimizerV17 - ) - LLAMARefinedUltimateEvolutionaryGradientOptimizerV17 = NonObjectOptimizer( - method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17" - ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV17", register=True) + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV17"] = RefinedUltimateEvolutionaryGradientOptimizerV17 + res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV17 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV17", register=True) except Exception as e: print("RefinedUltimateEvolutionaryGradientOptimizerV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV34 import ( - RefinedUltimateEvolutionaryGradientOptimizerV34, - ) + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV34 import RefinedUltimateEvolutionaryGradientOptimizerV34 - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV34"] = ( - RefinedUltimateEvolutionaryGradientOptimizerV34 - ) - LLAMARefinedUltimateEvolutionaryGradientOptimizerV34 = NonObjectOptimizer( - method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34" - ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV34", register=True) + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV34"] = RefinedUltimateEvolutionaryGradientOptimizerV34 + res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV34 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV34", register=True) except Exception as e: print("RefinedUltimateEvolutionaryGradientOptimizerV34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryOptimizer import ( - RefinedUltimateEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryOptimizer import RefinedUltimateEvolutionaryOptimizer lama_register["RefinedUltimateEvolutionaryOptimizer"] = RefinedUltimateEvolutionaryOptimizer - LLAMARefinedUltimateEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMARefinedUltimateEvolutionaryOptimizer" - ).set_name("LLAMARefinedUltimateEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimateEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryOptimizer").set_name("LLAMARefinedUltimateEvolutionaryOptimizer", register=True) except Exception as e: print("RefinedUltimateEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltimatePrecisionEvolutionaryOptimizerV42 import ( - RefinedUltimatePrecisionEvolutionaryOptimizerV42, - ) + from nevergrad.optimization.lama.RefinedUltimatePrecisionEvolutionaryOptimizerV42 import RefinedUltimatePrecisionEvolutionaryOptimizerV42 - lama_register["RefinedUltimatePrecisionEvolutionaryOptimizerV42"] = ( - RefinedUltimatePrecisionEvolutionaryOptimizerV42 - ) - LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42 = NonObjectOptimizer( - method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42" - ).set_name("LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42", register=True) + lama_register["RefinedUltimatePrecisionEvolutionaryOptimizerV42"] = RefinedUltimatePrecisionEvolutionaryOptimizerV42 + res = NonObjectOptimizer(method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42 = NonObjectOptimizer(method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42").set_name("LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42", register=True) except Exception as e: print("RefinedUltimatePrecisionEvolutionaryOptimizerV42 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( - RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer import RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer - lama_register["RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( - RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer - ) - LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) + lama_register["RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer + res = NonObjectOptimizer(method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltraEvolutionaryGradientOptimizerV28 import ( - RefinedUltraEvolutionaryGradientOptimizerV28, - ) + from nevergrad.optimization.lama.RefinedUltraEvolutionaryGradientOptimizerV28 import RefinedUltraEvolutionaryGradientOptimizerV28 - lama_register["RefinedUltraEvolutionaryGradientOptimizerV28"] = ( - RefinedUltraEvolutionaryGradientOptimizerV28 - ) - LLAMARefinedUltraEvolutionaryGradientOptimizerV28 = NonObjectOptimizer( - method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28" - ).set_name("LLAMARefinedUltraEvolutionaryGradientOptimizerV28", register=True) + lama_register["RefinedUltraEvolutionaryGradientOptimizerV28"] = RefinedUltraEvolutionaryGradientOptimizerV28 + res = NonObjectOptimizer(method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltraEvolutionaryGradientOptimizerV28 = NonObjectOptimizer(method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28").set_name("LLAMARefinedUltraEvolutionaryGradientOptimizerV28", register=True) except Exception as e: print("RefinedUltraEvolutionaryGradientOptimizerV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltraOptimizedDynamicPrecisionOptimizerV20 import ( - RefinedUltraOptimizedDynamicPrecisionOptimizerV20, - ) + from nevergrad.optimization.lama.RefinedUltraOptimizedDynamicPrecisionOptimizerV20 import RefinedUltraOptimizedDynamicPrecisionOptimizerV20 - lama_register["RefinedUltraOptimizedDynamicPrecisionOptimizerV20"] = ( - RefinedUltraOptimizedDynamicPrecisionOptimizerV20 - ) - LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20 = NonObjectOptimizer( - method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20" - ).set_name("LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20", register=True) + lama_register["RefinedUltraOptimizedDynamicPrecisionOptimizerV20"] = RefinedUltraOptimizedDynamicPrecisionOptimizerV20 + res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20 = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20").set_name("LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20", register=True) except Exception as e: print("RefinedUltraOptimizedDynamicPrecisionOptimizerV20 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 import ( - RefinedUltraOptimizedEvolutionaryGradientOptimizerV31, - ) + from nevergrad.optimization.lama.RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 import RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 - lama_register["RefinedUltraOptimizedEvolutionaryGradientOptimizerV31"] = ( - RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 - ) - LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31 = NonObjectOptimizer( - method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31" - ).set_name("LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31", register=True) + lama_register["RefinedUltraOptimizedEvolutionaryGradientOptimizerV31"] = RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 + res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31 = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31").set_name("LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31", register=True) except Exception as e: print("RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinedUltraRefinedRAMEDS import RefinedUltraRefinedRAMEDS lama_register["RefinedUltraRefinedRAMEDS"] = RefinedUltraRefinedRAMEDS - LLAMARefinedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS").set_name( - "LLAMARefinedUltraRefinedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS").set_name("LLAMARefinedUltraRefinedRAMEDS", register=True) except Exception as e: print("RefinedUltraRefinedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinementEnhancedHybridOptimizer import ( - RefinementEnhancedHybridOptimizer, - ) + from nevergrad.optimization.lama.RefinementEnhancedHybridOptimizer import RefinementEnhancedHybridOptimizer lama_register["RefinementEnhancedHybridOptimizer"] = RefinementEnhancedHybridOptimizer - LLAMARefinementEnhancedHybridOptimizer = NonObjectOptimizer( - method="LLAMARefinementEnhancedHybridOptimizer" - ).set_name("LLAMARefinementEnhancedHybridOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMARefinementEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinementEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinementEnhancedHybridOptimizer").set_name("LLAMARefinementEnhancedHybridOptimizer", register=True) except Exception as e: print("RefinementEnhancedHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.RefinementSelectiveCohortOptimization import ( - RefinementSelectiveCohortOptimization, - ) + from nevergrad.optimization.lama.RefinementSelectiveCohortOptimization import RefinementSelectiveCohortOptimization lama_register["RefinementSelectiveCohortOptimization"] = RefinementSelectiveCohortOptimization - LLAMARefinementSelectiveCohortOptimization = NonObjectOptimizer( - method="LLAMARefinementSelectiveCohortOptimization" - ).set_name("LLAMARefinementSelectiveCohortOptimization", register=True) + res = NonObjectOptimizer(method="LLAMARefinementSelectiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinementSelectiveCohortOptimization = NonObjectOptimizer(method="LLAMARefinementSelectiveCohortOptimization").set_name("LLAMARefinementSelectiveCohortOptimization", register=True) except Exception as e: print("RefinementSelectiveCohortOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.RefinementTunedPSO import RefinementTunedPSO lama_register["RefinementTunedPSO"] = RefinementTunedPSO - LLAMARefinementTunedPSO = NonObjectOptimizer(method="LLAMARefinementTunedPSO").set_name( - "LLAMARefinementTunedPSO", register=True - ) + res = NonObjectOptimizer(method="LLAMARefinementTunedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARefinementTunedPSO = NonObjectOptimizer(method="LLAMARefinementTunedPSO").set_name("LLAMARefinementTunedPSO", register=True) except Exception as e: print("RefinementTunedPSO can not be imported: ", e) - try: from nevergrad.optimization.lama.ResilientAdaptivePSO import ResilientAdaptivePSO lama_register["ResilientAdaptivePSO"] = ResilientAdaptivePSO - LLAMAResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO").set_name( - "LLAMAResilientAdaptivePSO", register=True - ) + res = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO").set_name("LLAMAResilientAdaptivePSO", register=True) except Exception as e: print("ResilientAdaptivePSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.ResponsiveAdaptiveMemoryStrategyV52 import ( - ResponsiveAdaptiveMemoryStrategyV52, - ) + from nevergrad.optimization.lama.ResponsiveAdaptiveMemoryStrategyV52 import ResponsiveAdaptiveMemoryStrategyV52 lama_register["ResponsiveAdaptiveMemoryStrategyV52"] = ResponsiveAdaptiveMemoryStrategyV52 - LLAMAResponsiveAdaptiveMemoryStrategyV52 = NonObjectOptimizer( - method="LLAMAResponsiveAdaptiveMemoryStrategyV52" - ).set_name("LLAMAResponsiveAdaptiveMemoryStrategyV52", register=True) + res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveMemoryStrategyV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAResponsiveAdaptiveMemoryStrategyV52 = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveMemoryStrategyV52").set_name("LLAMAResponsiveAdaptiveMemoryStrategyV52", register=True) except Exception as e: print("ResponsiveAdaptiveMemoryStrategyV52 can not be imported: ", e) - try: from nevergrad.optimization.lama.ResponsiveAdaptiveStrategyV27 import ResponsiveAdaptiveStrategyV27 lama_register["ResponsiveAdaptiveStrategyV27"] = ResponsiveAdaptiveStrategyV27 - LLAMAResponsiveAdaptiveStrategyV27 = NonObjectOptimizer( - method="LLAMAResponsiveAdaptiveStrategyV27" - ).set_name("LLAMAResponsiveAdaptiveStrategyV27", register=True) + res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveStrategyV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAResponsiveAdaptiveStrategyV27 = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveStrategyV27").set_name("LLAMAResponsiveAdaptiveStrategyV27", register=True) except Exception as e: print("ResponsiveAdaptiveStrategyV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RestartAdaptiveDifferentialEvolutionPSO import ( - RestartAdaptiveDifferentialEvolutionPSO, - ) + from nevergrad.optimization.lama.RestartAdaptiveDifferentialEvolutionPSO import RestartAdaptiveDifferentialEvolutionPSO lama_register["RestartAdaptiveDifferentialEvolutionPSO"] = RestartAdaptiveDifferentialEvolutionPSO - LLAMARestartAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( - method="LLAMARestartAdaptiveDifferentialEvolutionPSO" - ).set_name("LLAMARestartAdaptiveDifferentialEvolutionPSO", register=True) + res = NonObjectOptimizer(method="LLAMARestartAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARestartAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMARestartAdaptiveDifferentialEvolutionPSO").set_name("LLAMARestartAdaptiveDifferentialEvolutionPSO", register=True) except Exception as e: print("RestartAdaptiveDifferentialEvolutionPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.RevisedEnhancedDifferentialEvolutionLSRefinement_v20 import ( - RevisedEnhancedDifferentialEvolutionLSRefinement_v20, - ) + from nevergrad.optimization.lama.RevisedEnhancedDifferentialEvolutionLSRefinement_v20 import RevisedEnhancedDifferentialEvolutionLSRefinement_v20 - lama_register["RevisedEnhancedDifferentialEvolutionLSRefinement_v20"] = ( - RevisedEnhancedDifferentialEvolutionLSRefinement_v20 - ) - LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20 = NonObjectOptimizer( - method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20" - ).set_name("LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20", register=True) + lama_register["RevisedEnhancedDifferentialEvolutionLSRefinement_v20"] = RevisedEnhancedDifferentialEvolutionLSRefinement_v20 + res = NonObjectOptimizer(method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20 = NonObjectOptimizer(method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20").set_name("LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20", register=True) except Exception as e: print("RevisedEnhancedDifferentialEvolutionLSRefinement_v20 can not be imported: ", e) - try: from nevergrad.optimization.lama.RevolutionaryFireworkAlgorithm import RevolutionaryFireworkAlgorithm lama_register["RevolutionaryFireworkAlgorithm"] = RevolutionaryFireworkAlgorithm - LLAMARevolutionaryFireworkAlgorithm = NonObjectOptimizer( - method="LLAMARevolutionaryFireworkAlgorithm" - ).set_name("LLAMARevolutionaryFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMARevolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARevolutionaryFireworkAlgorithm = NonObjectOptimizer(method="LLAMARevolutionaryFireworkAlgorithm").set_name("LLAMARevolutionaryFireworkAlgorithm", register=True) except Exception as e: print("RevolutionaryFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.RobustAdaptiveDifferentialEvolution import ( - RobustAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.RobustAdaptiveDifferentialEvolution import RobustAdaptiveDifferentialEvolution lama_register["RobustAdaptiveDifferentialEvolution"] = RobustAdaptiveDifferentialEvolution - LLAMARobustAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMARobustAdaptiveDifferentialEvolution" - ).set_name("LLAMARobustAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMARobustAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARobustAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARobustAdaptiveDifferentialEvolution").set_name("LLAMARobustAdaptiveDifferentialEvolution", register=True) except Exception as e: print("RobustAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.RobustAdaptiveMemoryLeveragedStrategyV43 import ( - RobustAdaptiveMemoryLeveragedStrategyV43, - ) + from nevergrad.optimization.lama.RobustAdaptiveMemoryLeveragedStrategyV43 import RobustAdaptiveMemoryLeveragedStrategyV43 lama_register["RobustAdaptiveMemoryLeveragedStrategyV43"] = RobustAdaptiveMemoryLeveragedStrategyV43 - LLAMARobustAdaptiveMemoryLeveragedStrategyV43 = NonObjectOptimizer( - method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43" - ).set_name("LLAMARobustAdaptiveMemoryLeveragedStrategyV43", register=True) + res = NonObjectOptimizer(method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARobustAdaptiveMemoryLeveragedStrategyV43 = NonObjectOptimizer(method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43").set_name("LLAMARobustAdaptiveMemoryLeveragedStrategyV43", register=True) except Exception as e: print("RobustAdaptiveMemoryLeveragedStrategyV43 can not be imported: ", e) - try: - from nevergrad.optimization.lama.RobustCovarianceMatrixAdaptationMemeticSearch import ( - RobustCovarianceMatrixAdaptationMemeticSearch, - ) + from nevergrad.optimization.lama.RobustCovarianceMatrixAdaptationMemeticSearch import RobustCovarianceMatrixAdaptationMemeticSearch - lama_register["RobustCovarianceMatrixAdaptationMemeticSearch"] = ( - RobustCovarianceMatrixAdaptationMemeticSearch - ) - LLAMARobustCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( - method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch" - ).set_name("LLAMARobustCovarianceMatrixAdaptationMemeticSearch", register=True) + lama_register["RobustCovarianceMatrixAdaptationMemeticSearch"] = RobustCovarianceMatrixAdaptationMemeticSearch + res = NonObjectOptimizer(method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMARobustCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer(method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch").set_name("LLAMARobustCovarianceMatrixAdaptationMemeticSearch", register=True) except Exception as e: print("RobustCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.SADE import SADE lama_register["SADE"] = SADE + res = NonObjectOptimizer(method="LLAMASADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASADE = NonObjectOptimizer(method="LLAMASADE").set_name("LLAMASADE", register=True) except Exception as e: print("SADE can not be imported: ", e) - try: from nevergrad.optimization.lama.SADEEM import SADEEM lama_register["SADEEM"] = SADEEM + res = NonObjectOptimizer(method="LLAMASADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASADEEM = NonObjectOptimizer(method="LLAMASADEEM").set_name("LLAMASADEEM", register=True) except Exception as e: print("SADEEM can not be imported: ", e) - try: from nevergrad.optimization.lama.SADEIOL import SADEIOL lama_register["SADEIOL"] = SADEIOL + res = NonObjectOptimizer(method="LLAMASADEIOL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASADEIOL = NonObjectOptimizer(method="LLAMASADEIOL").set_name("LLAMASADEIOL", register=True) except Exception as e: print("SADEIOL can not be imported: ", e) - try: from nevergrad.optimization.lama.SADEPF import SADEPF lama_register["SADEPF"] = SADEPF + res = NonObjectOptimizer(method="LLAMASADEPF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASADEPF = NonObjectOptimizer(method="LLAMASADEPF").set_name("LLAMASADEPF", register=True) except Exception as e: print("SADEPF can not be imported: ", e) - try: from nevergrad.optimization.lama.SAGEA import SAGEA lama_register["SAGEA"] = SAGEA + res = NonObjectOptimizer(method="LLAMASAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASAGEA = NonObjectOptimizer(method="LLAMASAGEA").set_name("LLAMASAGEA", register=True) except Exception as e: print("SAGEA can not be imported: ", e) - try: from nevergrad.optimization.lama.SGAE import SGAE lama_register["SGAE"] = SGAE + res = NonObjectOptimizer(method="LLAMASGAE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASGAE = NonObjectOptimizer(method="LLAMASGAE").set_name("LLAMASGAE", register=True) except Exception as e: print("SGAE can not be imported: ", e) - try: from nevergrad.optimization.lama.SGE import SGE lama_register["SGE"] = SGE + res = NonObjectOptimizer(method="LLAMASGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASGE = NonObjectOptimizer(method="LLAMASGE").set_name("LLAMASGE", register=True) except Exception as e: print("SGE can not be imported: ", e) - try: from nevergrad.optimization.lama.SORAMED import SORAMED lama_register["SORAMED"] = SORAMED + res = NonObjectOptimizer(method="LLAMASORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMASORAMED = NonObjectOptimizer(method="LLAMASORAMED").set_name("LLAMASORAMED", register=True) except Exception as e: print("SORAMED can not be imported: ", e) - try: - from nevergrad.optimization.lama.ScaledHybridDifferentialEvolution import ( - ScaledHybridDifferentialEvolution, - ) + from nevergrad.optimization.lama.ScaledHybridDifferentialEvolution import ScaledHybridDifferentialEvolution lama_register["ScaledHybridDifferentialEvolution"] = ScaledHybridDifferentialEvolution - LLAMAScaledHybridDifferentialEvolution = NonObjectOptimizer( - method="LLAMAScaledHybridDifferentialEvolution" - ).set_name("LLAMAScaledHybridDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAScaledHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAScaledHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAScaledHybridDifferentialEvolution").set_name("LLAMAScaledHybridDifferentialEvolution", register=True) except Exception as e: print("ScaledHybridDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptingDifferentialEvolutionOptimizer import ( - SelfAdaptingDifferentialEvolutionOptimizer, - ) + from nevergrad.optimization.lama.SelfAdaptingDifferentialEvolutionOptimizer import SelfAdaptingDifferentialEvolutionOptimizer lama_register["SelfAdaptingDifferentialEvolutionOptimizer"] = SelfAdaptingDifferentialEvolutionOptimizer - LLAMASelfAdaptingDifferentialEvolutionOptimizer = NonObjectOptimizer( - method="LLAMASelfAdaptingDifferentialEvolutionOptimizer" - ).set_name("LLAMASelfAdaptingDifferentialEvolutionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptingDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptingDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptingDifferentialEvolutionOptimizer").set_name("LLAMASelfAdaptingDifferentialEvolutionOptimizer", register=True) except Exception as e: print("SelfAdaptingDifferentialEvolutionOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveCovarianceMatrixDifferentialEvolution import ( - SelfAdaptiveCovarianceMatrixDifferentialEvolution, - ) + from nevergrad.optimization.lama.SelfAdaptiveCovarianceMatrixDifferentialEvolution import SelfAdaptiveCovarianceMatrixDifferentialEvolution - lama_register["SelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( - SelfAdaptiveCovarianceMatrixDifferentialEvolution - ) - LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( - method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution" - ).set_name("LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) + lama_register["SelfAdaptiveCovarianceMatrixDifferentialEvolution"] = SelfAdaptiveCovarianceMatrixDifferentialEvolution + res = NonObjectOptimizer(method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) except Exception as e: print("SelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolution import ( - SelfAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolution import SelfAdaptiveDifferentialEvolution lama_register["SelfAdaptiveDifferentialEvolution"] = SelfAdaptiveDifferentialEvolution - LLAMASelfAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMASelfAdaptiveDifferentialEvolution" - ).set_name("LLAMASelfAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolution").set_name("LLAMASelfAdaptiveDifferentialEvolution", register=True) except Exception as e: print("SelfAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithLocalRestart import ( - SelfAdaptiveDifferentialEvolutionWithLocalRestart, - ) + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithLocalRestart import SelfAdaptiveDifferentialEvolutionWithLocalRestart - lama_register["SelfAdaptiveDifferentialEvolutionWithLocalRestart"] = ( - SelfAdaptiveDifferentialEvolutionWithLocalRestart - ) - LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart = NonObjectOptimizer( - method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart" - ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart", register=True) + lama_register["SelfAdaptiveDifferentialEvolutionWithLocalRestart"] = SelfAdaptiveDifferentialEvolutionWithLocalRestart + res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart", register=True) except Exception as e: print("SelfAdaptiveDifferentialEvolutionWithLocalRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithMemeticSearch import ( - SelfAdaptiveDifferentialEvolutionWithMemeticSearch, - ) + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithMemeticSearch import SelfAdaptiveDifferentialEvolutionWithMemeticSearch - lama_register["SelfAdaptiveDifferentialEvolutionWithMemeticSearch"] = ( - SelfAdaptiveDifferentialEvolutionWithMemeticSearch - ) - LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( - method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch" - ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) + lama_register["SelfAdaptiveDifferentialEvolutionWithMemeticSearch"] = SelfAdaptiveDifferentialEvolutionWithMemeticSearch + res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) except Exception as e: print("SelfAdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithRestart import ( - SelfAdaptiveDifferentialEvolutionWithRestart, - ) + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithRestart import SelfAdaptiveDifferentialEvolutionWithRestart - lama_register["SelfAdaptiveDifferentialEvolutionWithRestart"] = ( - SelfAdaptiveDifferentialEvolutionWithRestart - ) - LLAMASelfAdaptiveDifferentialEvolutionWithRestart = NonObjectOptimizer( - method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart" - ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithRestart", register=True) + lama_register["SelfAdaptiveDifferentialEvolutionWithRestart"] = SelfAdaptiveDifferentialEvolutionWithRestart + res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveDifferentialEvolutionWithRestart = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithRestart", register=True) except Exception as e: print("SelfAdaptiveDifferentialEvolutionWithRestart can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialSwarmOptimization import ( - SelfAdaptiveDifferentialSwarmOptimization, - ) + from nevergrad.optimization.lama.SelfAdaptiveDifferentialSwarmOptimization import SelfAdaptiveDifferentialSwarmOptimization lama_register["SelfAdaptiveDifferentialSwarmOptimization"] = SelfAdaptiveDifferentialSwarmOptimization - LLAMASelfAdaptiveDifferentialSwarmOptimization = NonObjectOptimizer( - method="LLAMASelfAdaptiveDifferentialSwarmOptimization" - ).set_name("LLAMASelfAdaptiveDifferentialSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialSwarmOptimization").set_name("LLAMASelfAdaptiveDifferentialSwarmOptimization", register=True) except Exception as e: print("SelfAdaptiveDifferentialSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveEvolutionaryAlgorithm import ( - SelfAdaptiveEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.SelfAdaptiveEvolutionaryAlgorithm import SelfAdaptiveEvolutionaryAlgorithm lama_register["SelfAdaptiveEvolutionaryAlgorithm"] = SelfAdaptiveEvolutionaryAlgorithm - LLAMASelfAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMASelfAdaptiveEvolutionaryAlgorithm" - ).set_name("LLAMASelfAdaptiveEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveEvolutionaryAlgorithm").set_name("LLAMASelfAdaptiveEvolutionaryAlgorithm", register=True) except Exception as e: print("SelfAdaptiveEvolutionaryAlgorithm can not be imported: ", e) - try: from nevergrad.optimization.lama.SelfAdaptiveHybridOptimizer import SelfAdaptiveHybridOptimizer lama_register["SelfAdaptiveHybridOptimizer"] = SelfAdaptiveHybridOptimizer - LLAMASelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer").set_name( - "LLAMASelfAdaptiveHybridOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer").set_name("LLAMASelfAdaptiveHybridOptimizer", register=True) except Exception as e: print("SelfAdaptiveHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveInterleavedOptimization import ( - SelfAdaptiveInterleavedOptimization, - ) + from nevergrad.optimization.lama.SelfAdaptiveInterleavedOptimization import SelfAdaptiveInterleavedOptimization lama_register["SelfAdaptiveInterleavedOptimization"] = SelfAdaptiveInterleavedOptimization - LLAMASelfAdaptiveInterleavedOptimization = NonObjectOptimizer( - method="LLAMASelfAdaptiveInterleavedOptimization" - ).set_name("LLAMASelfAdaptiveInterleavedOptimization", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveInterleavedOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveInterleavedOptimization = NonObjectOptimizer(method="LLAMASelfAdaptiveInterleavedOptimization").set_name("LLAMASelfAdaptiveInterleavedOptimization", register=True) except Exception as e: print("SelfAdaptiveInterleavedOptimization can not be imported: ", e) - try: from nevergrad.optimization.lama.SelfAdaptiveMemeticAlgorithmV2 import SelfAdaptiveMemeticAlgorithmV2 lama_register["SelfAdaptiveMemeticAlgorithmV2"] = SelfAdaptiveMemeticAlgorithmV2 - LLAMASelfAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( - method="LLAMASelfAdaptiveMemeticAlgorithmV2" - ).set_name("LLAMASelfAdaptiveMemeticAlgorithmV2", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticAlgorithmV2").set_name("LLAMASelfAdaptiveMemeticAlgorithmV2", register=True) except Exception as e: print("SelfAdaptiveMemeticAlgorithmV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveMemeticEvolutionaryAlgorithm import ( - SelfAdaptiveMemeticEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.SelfAdaptiveMemeticEvolutionaryAlgorithm import SelfAdaptiveMemeticEvolutionaryAlgorithm lama_register["SelfAdaptiveMemeticEvolutionaryAlgorithm"] = SelfAdaptiveMemeticEvolutionaryAlgorithm - LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm" - ).set_name("LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm", register=True) except Exception as e: print("SelfAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveOppositionBasedHarmonySearchDE import ( - SelfAdaptiveOppositionBasedHarmonySearchDE, - ) + from nevergrad.optimization.lama.SelfAdaptiveOppositionBasedHarmonySearchDE import SelfAdaptiveOppositionBasedHarmonySearchDE lama_register["SelfAdaptiveOppositionBasedHarmonySearchDE"] = SelfAdaptiveOppositionBasedHarmonySearchDE - LLAMASelfAdaptiveOppositionBasedHarmonySearchDE = NonObjectOptimizer( - method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE" - ).set_name("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveOppositionBasedHarmonySearchDE = NonObjectOptimizer(method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE").set_name("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE", register=True) except Exception as e: print("SelfAdaptiveOppositionBasedHarmonySearchDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.SelfAdaptiveQuantumMemeticAlgorithm import ( - SelfAdaptiveQuantumMemeticAlgorithm, - ) + from nevergrad.optimization.lama.SelfAdaptiveQuantumMemeticAlgorithm import SelfAdaptiveQuantumMemeticAlgorithm lama_register["SelfAdaptiveQuantumMemeticAlgorithm"] = SelfAdaptiveQuantumMemeticAlgorithm - LLAMASelfAdaptiveQuantumMemeticAlgorithm = NonObjectOptimizer( - method="LLAMASelfAdaptiveQuantumMemeticAlgorithm" - ).set_name("LLAMASelfAdaptiveQuantumMemeticAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMASelfAdaptiveQuantumMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASelfAdaptiveQuantumMemeticAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveQuantumMemeticAlgorithm").set_name("LLAMASelfAdaptiveQuantumMemeticAlgorithm", register=True) except Exception as e: print("SelfAdaptiveQuantumMemeticAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.SequentialAdaptiveDifferentialEvolution import ( - SequentialAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.SequentialAdaptiveDifferentialEvolution import SequentialAdaptiveDifferentialEvolution lama_register["SequentialAdaptiveDifferentialEvolution"] = SequentialAdaptiveDifferentialEvolution - LLAMASequentialAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMASequentialAdaptiveDifferentialEvolution" - ).set_name("LLAMASequentialAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMASequentialAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASequentialAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMASequentialAdaptiveDifferentialEvolution").set_name("LLAMASequentialAdaptiveDifferentialEvolution", register=True) except Exception as e: print("SequentialAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.SequentialQuadraticAdaptiveEvolutionStrategy import ( - SequentialQuadraticAdaptiveEvolutionStrategy, - ) + from nevergrad.optimization.lama.SequentialQuadraticAdaptiveEvolutionStrategy import SequentialQuadraticAdaptiveEvolutionStrategy - lama_register["SequentialQuadraticAdaptiveEvolutionStrategy"] = ( - SequentialQuadraticAdaptiveEvolutionStrategy - ) - LLAMASequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( - method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy" - ).set_name("LLAMASequentialQuadraticAdaptiveEvolutionStrategy", register=True) + lama_register["SequentialQuadraticAdaptiveEvolutionStrategy"] = SequentialQuadraticAdaptiveEvolutionStrategy + res = NonObjectOptimizer(method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy").set_name("LLAMASequentialQuadraticAdaptiveEvolutionStrategy", register=True) except Exception as e: print("SequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.SequentialQuadraticExploitationSearch import ( - SequentialQuadraticExploitationSearch, - ) + from nevergrad.optimization.lama.SequentialQuadraticExploitationSearch import SequentialQuadraticExploitationSearch lama_register["SequentialQuadraticExploitationSearch"] = SequentialQuadraticExploitationSearch - LLAMASequentialQuadraticExploitationSearch = NonObjectOptimizer( - method="LLAMASequentialQuadraticExploitationSearch" - ).set_name("LLAMASequentialQuadraticExploitationSearch", register=True) + res = NonObjectOptimizer(method="LLAMASequentialQuadraticExploitationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASequentialQuadraticExploitationSearch = NonObjectOptimizer(method="LLAMASequentialQuadraticExploitationSearch").set_name("LLAMASequentialQuadraticExploitationSearch", register=True) except Exception as e: print("SequentialQuadraticExploitationSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.SimpleHybridDE import SimpleHybridDE lama_register["SimpleHybridDE"] = SimpleHybridDE - LLAMASimpleHybridDE = NonObjectOptimizer(method="LLAMASimpleHybridDE").set_name( - "LLAMASimpleHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMASimpleHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASimpleHybridDE = NonObjectOptimizer(method="LLAMASimpleHybridDE").set_name("LLAMASimpleHybridDE", register=True) except Exception as e: print("SimpleHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.SimplifiedAdaptiveDynamicDualPhaseStrategyV18 import ( - SimplifiedAdaptiveDynamicDualPhaseStrategyV18, - ) + from nevergrad.optimization.lama.SimplifiedAdaptiveDynamicDualPhaseStrategyV18 import SimplifiedAdaptiveDynamicDualPhaseStrategyV18 - lama_register["SimplifiedAdaptiveDynamicDualPhaseStrategyV18"] = ( - SimplifiedAdaptiveDynamicDualPhaseStrategyV18 - ) - LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18 = NonObjectOptimizer( - method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18" - ).set_name("LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18", register=True) + lama_register["SimplifiedAdaptiveDynamicDualPhaseStrategyV18"] = SimplifiedAdaptiveDynamicDualPhaseStrategyV18 + res = NonObjectOptimizer(method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18 = NonObjectOptimizer(method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18").set_name("LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18", register=True) except Exception as e: print("SimplifiedAdaptiveDynamicDualPhaseStrategyV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.SimulatedAnnealingOptimizer import SimulatedAnnealingOptimizer lama_register["SimulatedAnnealingOptimizer"] = SimulatedAnnealingOptimizer - LLAMASimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer").set_name( - "LLAMASimulatedAnnealingOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer").set_name("LLAMASimulatedAnnealingOptimizer", register=True) except Exception as e: print("SimulatedAnnealingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.SpiralSearchOptimizer import SpiralSearchOptimizer lama_register["SpiralSearchOptimizer"] = SpiralSearchOptimizer - LLAMASpiralSearchOptimizer = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer").set_name( - "LLAMASpiralSearchOptimizer", register=True - ) + res = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASpiralSearchOptimizer = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer").set_name("LLAMASpiralSearchOptimizer", register=True) except Exception as e: print("SpiralSearchOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.StabilizedQuantumCognitionOptimizerV11 import ( - StabilizedQuantumCognitionOptimizerV11, - ) + from nevergrad.optimization.lama.StabilizedQuantumCognitionOptimizerV11 import StabilizedQuantumCognitionOptimizerV11 lama_register["StabilizedQuantumCognitionOptimizerV11"] = StabilizedQuantumCognitionOptimizerV11 - LLAMAStabilizedQuantumCognitionOptimizerV11 = NonObjectOptimizer( - method="LLAMAStabilizedQuantumCognitionOptimizerV11" - ).set_name("LLAMAStabilizedQuantumCognitionOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAStabilizedQuantumCognitionOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStabilizedQuantumCognitionOptimizerV11 = NonObjectOptimizer(method="LLAMAStabilizedQuantumCognitionOptimizerV11").set_name("LLAMAStabilizedQuantumCognitionOptimizerV11", register=True) except Exception as e: print("StabilizedQuantumCognitionOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.StabilizedQuantumConcentricOptimizer import ( - StabilizedQuantumConcentricOptimizer, - ) + from nevergrad.optimization.lama.StabilizedQuantumConcentricOptimizer import StabilizedQuantumConcentricOptimizer lama_register["StabilizedQuantumConcentricOptimizer"] = StabilizedQuantumConcentricOptimizer - LLAMAStabilizedQuantumConcentricOptimizer = NonObjectOptimizer( - method="LLAMAStabilizedQuantumConcentricOptimizer" - ).set_name("LLAMAStabilizedQuantumConcentricOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAStabilizedQuantumConcentricOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStabilizedQuantumConcentricOptimizer = NonObjectOptimizer(method="LLAMAStabilizedQuantumConcentricOptimizer").set_name("LLAMAStabilizedQuantumConcentricOptimizer", register=True) except Exception as e: print("StabilizedQuantumConcentricOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.StabilizedRefinedEnhancedDynamicBalancingPSO import ( - StabilizedRefinedEnhancedDynamicBalancingPSO, - ) + from nevergrad.optimization.lama.StabilizedRefinedEnhancedDynamicBalancingPSO import StabilizedRefinedEnhancedDynamicBalancingPSO - lama_register["StabilizedRefinedEnhancedDynamicBalancingPSO"] = ( - StabilizedRefinedEnhancedDynamicBalancingPSO - ) - LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO = NonObjectOptimizer( - method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO" - ).set_name("LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO", register=True) + lama_register["StabilizedRefinedEnhancedDynamicBalancingPSO"] = StabilizedRefinedEnhancedDynamicBalancingPSO + res = NonObjectOptimizer(method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO").set_name("LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO", register=True) except Exception as e: print("StabilizedRefinedEnhancedDynamicBalancingPSO can not be imported: ", e) - try: - from nevergrad.optimization.lama.StochasticAdaptiveEvolutionaryOptimizer import ( - StochasticAdaptiveEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.StochasticAdaptiveEvolutionaryOptimizer import StochasticAdaptiveEvolutionaryOptimizer lama_register["StochasticAdaptiveEvolutionaryOptimizer"] = StochasticAdaptiveEvolutionaryOptimizer - LLAMAStochasticAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAStochasticAdaptiveEvolutionaryOptimizer" - ).set_name("LLAMAStochasticAdaptiveEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAStochasticAdaptiveEvolutionaryOptimizer").set_name("LLAMAStochasticAdaptiveEvolutionaryOptimizer", register=True) except Exception as e: print("StochasticAdaptiveEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.StochasticBalancingOptimizer import StochasticBalancingOptimizer lama_register["StochasticBalancingOptimizer"] = StochasticBalancingOptimizer - LLAMAStochasticBalancingOptimizer = NonObjectOptimizer( - method="LLAMAStochasticBalancingOptimizer" - ).set_name("LLAMAStochasticBalancingOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticBalancingOptimizer = NonObjectOptimizer(method="LLAMAStochasticBalancingOptimizer").set_name("LLAMAStochasticBalancingOptimizer", register=True) except Exception as e: print("StochasticBalancingOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.StochasticGradientEnhancedDE import StochasticGradientEnhancedDE lama_register["StochasticGradientEnhancedDE"] = StochasticGradientEnhancedDE - LLAMAStochasticGradientEnhancedDE = NonObjectOptimizer( - method="LLAMAStochasticGradientEnhancedDE" - ).set_name("LLAMAStochasticGradientEnhancedDE", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticGradientEnhancedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticGradientEnhancedDE = NonObjectOptimizer(method="LLAMAStochasticGradientEnhancedDE").set_name("LLAMAStochasticGradientEnhancedDE", register=True) except Exception as e: print("StochasticGradientEnhancedDE can not be imported: ", e) - try: from nevergrad.optimization.lama.StochasticGradientExploration import StochasticGradientExploration lama_register["StochasticGradientExploration"] = StochasticGradientExploration - LLAMAStochasticGradientExploration = NonObjectOptimizer( - method="LLAMAStochasticGradientExploration" - ).set_name("LLAMAStochasticGradientExploration", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticGradientExploration = NonObjectOptimizer(method="LLAMAStochasticGradientExploration").set_name("LLAMAStochasticGradientExploration", register=True) except Exception as e: print("StochasticGradientExploration can not be imported: ", e) - try: - from nevergrad.optimization.lama.StochasticGradientHybridOptimization import ( - StochasticGradientHybridOptimization, - ) + from nevergrad.optimization.lama.StochasticGradientHybridOptimization import StochasticGradientHybridOptimization lama_register["StochasticGradientHybridOptimization"] = StochasticGradientHybridOptimization - LLAMAStochasticGradientHybridOptimization = NonObjectOptimizer( - method="LLAMAStochasticGradientHybridOptimization" - ).set_name("LLAMAStochasticGradientHybridOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticGradientHybridOptimization = NonObjectOptimizer(method="LLAMAStochasticGradientHybridOptimization").set_name("LLAMAStochasticGradientHybridOptimization", register=True) except Exception as e: print("StochasticGradientHybridOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.StochasticGradientQuorumOptimization import ( - StochasticGradientQuorumOptimization, - ) + from nevergrad.optimization.lama.StochasticGradientQuorumOptimization import StochasticGradientQuorumOptimization lama_register["StochasticGradientQuorumOptimization"] = StochasticGradientQuorumOptimization - LLAMAStochasticGradientQuorumOptimization = NonObjectOptimizer( - method="LLAMAStochasticGradientQuorumOptimization" - ).set_name("LLAMAStochasticGradientQuorumOptimization", register=True) + res = NonObjectOptimizer(method="LLAMAStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMAStochasticGradientQuorumOptimization").set_name("LLAMAStochasticGradientQuorumOptimization", register=True) except Exception as e: print("StochasticGradientQuorumOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.StrategicAdaptiveDifferentialEvolution import ( - StrategicAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.StrategicAdaptiveDifferentialEvolution import StrategicAdaptiveDifferentialEvolution lama_register["StrategicAdaptiveDifferentialEvolution"] = StrategicAdaptiveDifferentialEvolution - LLAMAStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMAStrategicAdaptiveDifferentialEvolution" - ).set_name("LLAMAStrategicAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAStrategicAdaptiveDifferentialEvolution").set_name("LLAMAStrategicAdaptiveDifferentialEvolution", register=True) except Exception as e: print("StrategicAdaptiveDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.StrategicDifferentialEvolution import StrategicDifferentialEvolution lama_register["StrategicDifferentialEvolution"] = StrategicDifferentialEvolution - LLAMAStrategicDifferentialEvolution = NonObjectOptimizer( - method="LLAMAStrategicDifferentialEvolution" - ).set_name("LLAMAStrategicDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicDifferentialEvolution = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution").set_name("LLAMAStrategicDifferentialEvolution", register=True) except Exception as e: print("StrategicDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.StrategicDiminishingAdaptiveEvolver import ( - StrategicDiminishingAdaptiveEvolver, - ) + from nevergrad.optimization.lama.StrategicDiminishingAdaptiveEvolver import StrategicDiminishingAdaptiveEvolver lama_register["StrategicDiminishingAdaptiveEvolver"] = StrategicDiminishingAdaptiveEvolver - LLAMAStrategicDiminishingAdaptiveEvolver = NonObjectOptimizer( - method="LLAMAStrategicDiminishingAdaptiveEvolver" - ).set_name("LLAMAStrategicDiminishingAdaptiveEvolver", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicDiminishingAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicDiminishingAdaptiveEvolver = NonObjectOptimizer(method="LLAMAStrategicDiminishingAdaptiveEvolver").set_name("LLAMAStrategicDiminishingAdaptiveEvolver", register=True) except Exception as e: print("StrategicDiminishingAdaptiveEvolver can not be imported: ", e) - try: from nevergrad.optimization.lama.StrategicHybridDE import StrategicHybridDE lama_register["StrategicHybridDE"] = StrategicHybridDE - LLAMAStrategicHybridDE = NonObjectOptimizer(method="LLAMAStrategicHybridDE").set_name( - "LLAMAStrategicHybridDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicHybridDE = NonObjectOptimizer(method="LLAMAStrategicHybridDE").set_name("LLAMAStrategicHybridDE", register=True) except Exception as e: print("StrategicHybridDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.StrategicMultiPhaseEvolutionaryAlgorithm import ( - StrategicMultiPhaseEvolutionaryAlgorithm, - ) + from nevergrad.optimization.lama.StrategicMultiPhaseEvolutionaryAlgorithm import StrategicMultiPhaseEvolutionaryAlgorithm lama_register["StrategicMultiPhaseEvolutionaryAlgorithm"] = StrategicMultiPhaseEvolutionaryAlgorithm - LLAMAStrategicMultiPhaseEvolutionaryAlgorithm = NonObjectOptimizer( - method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm" - ).set_name("LLAMAStrategicMultiPhaseEvolutionaryAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicMultiPhaseEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm").set_name("LLAMAStrategicMultiPhaseEvolutionaryAlgorithm", register=True) except Exception as e: print("StrategicMultiPhaseEvolutionaryAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.StrategicQuorumMutationWithAdaptiveElites import ( - StrategicQuorumMutationWithAdaptiveElites, - ) + from nevergrad.optimization.lama.StrategicQuorumMutationWithAdaptiveElites import StrategicQuorumMutationWithAdaptiveElites lama_register["StrategicQuorumMutationWithAdaptiveElites"] = StrategicQuorumMutationWithAdaptiveElites - LLAMAStrategicQuorumMutationWithAdaptiveElites = NonObjectOptimizer( - method="LLAMAStrategicQuorumMutationWithAdaptiveElites" - ).set_name("LLAMAStrategicQuorumMutationWithAdaptiveElites", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicQuorumMutationWithAdaptiveElites")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicQuorumMutationWithAdaptiveElites = NonObjectOptimizer(method="LLAMAStrategicQuorumMutationWithAdaptiveElites").set_name("LLAMAStrategicQuorumMutationWithAdaptiveElites", register=True) except Exception as e: print("StrategicQuorumMutationWithAdaptiveElites can not be imported: ", e) - try: - from nevergrad.optimization.lama.StrategicResilienceAdaptiveSearch import ( - StrategicResilienceAdaptiveSearch, - ) + from nevergrad.optimization.lama.StrategicResilienceAdaptiveSearch import StrategicResilienceAdaptiveSearch lama_register["StrategicResilienceAdaptiveSearch"] = StrategicResilienceAdaptiveSearch - LLAMAStrategicResilienceAdaptiveSearch = NonObjectOptimizer( - method="LLAMAStrategicResilienceAdaptiveSearch" - ).set_name("LLAMAStrategicResilienceAdaptiveSearch", register=True) + res = NonObjectOptimizer(method="LLAMAStrategicResilienceAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAStrategicResilienceAdaptiveSearch = NonObjectOptimizer(method="LLAMAStrategicResilienceAdaptiveSearch").set_name("LLAMAStrategicResilienceAdaptiveSearch", register=True) except Exception as e: print("StrategicResilienceAdaptiveSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimization import ( - SuperDynamicQuantumSwarmOptimization, - ) + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimization import SuperDynamicQuantumSwarmOptimization lama_register["SuperDynamicQuantumSwarmOptimization"] = SuperDynamicQuantumSwarmOptimization - LLAMASuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( - method="LLAMASuperDynamicQuantumSwarmOptimization" - ).set_name("LLAMASuperDynamicQuantumSwarmOptimization", register=True) + res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimization").set_name("LLAMASuperDynamicQuantumSwarmOptimization", register=True) except Exception as e: print("SuperDynamicQuantumSwarmOptimization can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimizationImproved import ( - SuperDynamicQuantumSwarmOptimizationImproved, - ) + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimizationImproved import SuperDynamicQuantumSwarmOptimizationImproved - lama_register["SuperDynamicQuantumSwarmOptimizationImproved"] = ( - SuperDynamicQuantumSwarmOptimizationImproved - ) - LLAMASuperDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( - method="LLAMASuperDynamicQuantumSwarmOptimizationImproved" - ).set_name("LLAMASuperDynamicQuantumSwarmOptimizationImproved", register=True) + lama_register["SuperDynamicQuantumSwarmOptimizationImproved"] = SuperDynamicQuantumSwarmOptimizationImproved + res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimizationImproved").set_name("LLAMASuperDynamicQuantumSwarmOptimizationImproved", register=True) except Exception as e: print("SuperDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) - try: from nevergrad.optimization.lama.SuperOptimizedRAMEDS import SuperOptimizedRAMEDS lama_register["SuperOptimizedRAMEDS"] = SuperOptimizedRAMEDS - LLAMASuperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS").set_name( - "LLAMASuperOptimizedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS").set_name("LLAMASuperOptimizedRAMEDS", register=True) except Exception as e: print("SuperOptimizedRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.SuperRefinedRAMEDSv5 import SuperRefinedRAMEDSv5 lama_register["SuperRefinedRAMEDSv5"] = SuperRefinedRAMEDSv5 - LLAMASuperRefinedRAMEDSv5 = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5").set_name( - "LLAMASuperRefinedRAMEDSv5", register=True - ) + res = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperRefinedRAMEDSv5 = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5").set_name("LLAMASuperRefinedRAMEDSv5", register=True) except Exception as e: print("SuperRefinedRAMEDSv5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 import ( - SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5, - ) + from nevergrad.optimization.lama.SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 import SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 - lama_register["SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5"] = ( - SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 - ) - LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 = NonObjectOptimizer( - method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5" - ).set_name("LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5", register=True) + lama_register["SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5"] = SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 + res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5").set_name("LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5", register=True) except Exception as e: print("SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 import ( - SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16, - ) + from nevergrad.optimization.lama.SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 import SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 - lama_register["SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16"] = ( - SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 - ) - LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 = NonObjectOptimizer( - method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16" - ).set_name("LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16", register=True) + lama_register["SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16"] = SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 + res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16").set_name("LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16", register=True) except Exception as e: print("SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 can not be imported: ", e) - try: from nevergrad.optimization.lama.SuperiorAdaptiveStrategyDE import SuperiorAdaptiveStrategyDE lama_register["SuperiorAdaptiveStrategyDE"] = SuperiorAdaptiveStrategyDE - LLAMASuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE").set_name( - "LLAMASuperiorAdaptiveStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE").set_name("LLAMASuperiorAdaptiveStrategyDE", register=True) except Exception as e: print("SuperiorAdaptiveStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperiorEnhancedDynamicPrecisionOptimizerV1 import ( - SuperiorEnhancedDynamicPrecisionOptimizerV1, - ) + from nevergrad.optimization.lama.SuperiorEnhancedDynamicPrecisionOptimizerV1 import SuperiorEnhancedDynamicPrecisionOptimizerV1 lama_register["SuperiorEnhancedDynamicPrecisionOptimizerV1"] = SuperiorEnhancedDynamicPrecisionOptimizerV1 - LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1 = NonObjectOptimizer( - method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1" - ).set_name("LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1").set_name("LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1", register=True) except Exception as e: print("SuperiorEnhancedDynamicPrecisionOptimizerV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperiorHybridEvolutionaryAnnealingOptimizer import ( - SuperiorHybridEvolutionaryAnnealingOptimizer, - ) + from nevergrad.optimization.lama.SuperiorHybridEvolutionaryAnnealingOptimizer import SuperiorHybridEvolutionaryAnnealingOptimizer - lama_register["SuperiorHybridEvolutionaryAnnealingOptimizer"] = ( - SuperiorHybridEvolutionaryAnnealingOptimizer - ) - LLAMASuperiorHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( - method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer" - ).set_name("LLAMASuperiorHybridEvolutionaryAnnealingOptimizer", register=True) + lama_register["SuperiorHybridEvolutionaryAnnealingOptimizer"] = SuperiorHybridEvolutionaryAnnealingOptimizer + res = NonObjectOptimizer(method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperiorHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer").set_name("LLAMASuperiorHybridEvolutionaryAnnealingOptimizer", register=True) except Exception as e: print("SuperiorHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperiorOptimalEnhancedStrategyDE import ( - SuperiorOptimalEnhancedStrategyDE, - ) + from nevergrad.optimization.lama.SuperiorOptimalEnhancedStrategyDE import SuperiorOptimalEnhancedStrategyDE lama_register["SuperiorOptimalEnhancedStrategyDE"] = SuperiorOptimalEnhancedStrategyDE - LLAMASuperiorOptimalEnhancedStrategyDE = NonObjectOptimizer( - method="LLAMASuperiorOptimalEnhancedStrategyDE" - ).set_name("LLAMASuperiorOptimalEnhancedStrategyDE", register=True) + res = NonObjectOptimizer(method="LLAMASuperiorOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperiorOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMASuperiorOptimalEnhancedStrategyDE").set_name("LLAMASuperiorOptimalEnhancedStrategyDE", register=True) except Exception as e: print("SuperiorOptimalEnhancedStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.SuperiorRefinedEvolutionaryGradientOptimizerV13 import ( - SuperiorRefinedEvolutionaryGradientOptimizerV13, - ) + from nevergrad.optimization.lama.SuperiorRefinedEvolutionaryGradientOptimizerV13 import SuperiorRefinedEvolutionaryGradientOptimizerV13 - lama_register["SuperiorRefinedEvolutionaryGradientOptimizerV13"] = ( - SuperiorRefinedEvolutionaryGradientOptimizerV13 - ) - LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13 = NonObjectOptimizer( - method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13" - ).set_name("LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13", register=True) + lama_register["SuperiorRefinedEvolutionaryGradientOptimizerV13"] = SuperiorRefinedEvolutionaryGradientOptimizerV13 + res = NonObjectOptimizer(method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13 = NonObjectOptimizer(method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13").set_name("LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13", register=True) except Exception as e: print("SuperiorRefinedEvolutionaryGradientOptimizerV13 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeDynamicAdaptiveOptimizerV5 import ( - SupremeDynamicAdaptiveOptimizerV5, - ) + from nevergrad.optimization.lama.SupremeDynamicAdaptiveOptimizerV5 import SupremeDynamicAdaptiveOptimizerV5 lama_register["SupremeDynamicAdaptiveOptimizerV5"] = SupremeDynamicAdaptiveOptimizerV5 - LLAMASupremeDynamicAdaptiveOptimizerV5 = NonObjectOptimizer( - method="LLAMASupremeDynamicAdaptiveOptimizerV5" - ).set_name("LLAMASupremeDynamicAdaptiveOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMASupremeDynamicAdaptiveOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeDynamicAdaptiveOptimizerV5 = NonObjectOptimizer(method="LLAMASupremeDynamicAdaptiveOptimizerV5").set_name("LLAMASupremeDynamicAdaptiveOptimizerV5", register=True) except Exception as e: print("SupremeDynamicAdaptiveOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV1 import ( - SupremeDynamicPrecisionOptimizerV1, - ) + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV1 import SupremeDynamicPrecisionOptimizerV1 lama_register["SupremeDynamicPrecisionOptimizerV1"] = SupremeDynamicPrecisionOptimizerV1 - LLAMASupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( - method="LLAMASupremeDynamicPrecisionOptimizerV1" - ).set_name("LLAMASupremeDynamicPrecisionOptimizerV1", register=True) + res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV1").set_name("LLAMASupremeDynamicPrecisionOptimizerV1", register=True) except Exception as e: print("SupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV2 import ( - SupremeDynamicPrecisionOptimizerV2, - ) + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV2 import SupremeDynamicPrecisionOptimizerV2 lama_register["SupremeDynamicPrecisionOptimizerV2"] = SupremeDynamicPrecisionOptimizerV2 - LLAMASupremeDynamicPrecisionOptimizerV2 = NonObjectOptimizer( - method="LLAMASupremeDynamicPrecisionOptimizerV2" - ).set_name("LLAMASupremeDynamicPrecisionOptimizerV2", register=True) + res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeDynamicPrecisionOptimizerV2 = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV2").set_name("LLAMASupremeDynamicPrecisionOptimizerV2", register=True) except Exception as e: print("SupremeDynamicPrecisionOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeEvolutionaryGradientHybridOptimizerV6 import ( - SupremeEvolutionaryGradientHybridOptimizerV6, - ) + from nevergrad.optimization.lama.SupremeEvolutionaryGradientHybridOptimizerV6 import SupremeEvolutionaryGradientHybridOptimizerV6 - lama_register["SupremeEvolutionaryGradientHybridOptimizerV6"] = ( - SupremeEvolutionaryGradientHybridOptimizerV6 - ) - LLAMASupremeEvolutionaryGradientHybridOptimizerV6 = NonObjectOptimizer( - method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6" - ).set_name("LLAMASupremeEvolutionaryGradientHybridOptimizerV6", register=True) + lama_register["SupremeEvolutionaryGradientHybridOptimizerV6"] = SupremeEvolutionaryGradientHybridOptimizerV6 + res = NonObjectOptimizer(method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeEvolutionaryGradientHybridOptimizerV6 = NonObjectOptimizer(method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6").set_name("LLAMASupremeEvolutionaryGradientHybridOptimizerV6", register=True) except Exception as e: print("SupremeEvolutionaryGradientHybridOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeOptimalPrecisionEvolutionaryThermalOptimizer import ( - SupremeOptimalPrecisionEvolutionaryThermalOptimizer, - ) + from nevergrad.optimization.lama.SupremeOptimalPrecisionEvolutionaryThermalOptimizer import SupremeOptimalPrecisionEvolutionaryThermalOptimizer - lama_register["SupremeOptimalPrecisionEvolutionaryThermalOptimizer"] = ( - SupremeOptimalPrecisionEvolutionaryThermalOptimizer - ) - LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( - method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer" - ).set_name("LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer", register=True) + lama_register["SupremeOptimalPrecisionEvolutionaryThermalOptimizer"] = SupremeOptimalPrecisionEvolutionaryThermalOptimizer + res = NonObjectOptimizer(method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer", register=True) except Exception as e: print("SupremeOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.SupremeUltraEnhancedEvolutionaryOptimizer import ( - SupremeUltraEnhancedEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.SupremeUltraEnhancedEvolutionaryOptimizer import SupremeUltraEnhancedEvolutionaryOptimizer lama_register["SupremeUltraEnhancedEvolutionaryOptimizer"] = SupremeUltraEnhancedEvolutionaryOptimizer - LLAMASupremeUltraEnhancedEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer" - ).set_name("LLAMASupremeUltraEnhancedEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMASupremeUltraEnhancedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer").set_name("LLAMASupremeUltraEnhancedEvolutionaryOptimizer", register=True) except Exception as e: print("SupremeUltraEnhancedEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.TemporalAdaptiveDifferentialEvolution import ( - TemporalAdaptiveDifferentialEvolution, - ) + from nevergrad.optimization.lama.TemporalAdaptiveDifferentialEvolution import TemporalAdaptiveDifferentialEvolution lama_register["TemporalAdaptiveDifferentialEvolution"] = TemporalAdaptiveDifferentialEvolution - LLAMATemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( - method="LLAMATemporalAdaptiveDifferentialEvolution" - ).set_name("LLAMATemporalAdaptiveDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMATemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMATemporalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMATemporalAdaptiveDifferentialEvolution").set_name("LLAMATemporalAdaptiveDifferentialEvolution", register=True) except Exception as e: print("TemporalAdaptiveDifferentialEvolution can not be imported: ", e) - try: - from nevergrad.optimization.lama.TurbochargedDifferentialEvolution import ( - TurbochargedDifferentialEvolution, - ) + from nevergrad.optimization.lama.TurbochargedDifferentialEvolution import TurbochargedDifferentialEvolution lama_register["TurbochargedDifferentialEvolution"] = TurbochargedDifferentialEvolution - LLAMATurbochargedDifferentialEvolution = NonObjectOptimizer( - method="LLAMATurbochargedDifferentialEvolution" - ).set_name("LLAMATurbochargedDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMATurbochargedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMATurbochargedDifferentialEvolution = NonObjectOptimizer(method="LLAMATurbochargedDifferentialEvolution").set_name("LLAMATurbochargedDifferentialEvolution", register=True) except Exception as e: print("TurbochargedDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithm import UltimateDynamicFireworkAlgorithm lama_register["UltimateDynamicFireworkAlgorithm"] = UltimateDynamicFireworkAlgorithm - LLAMAUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( - method="LLAMAUltimateDynamicFireworkAlgorithm" - ).set_name("LLAMAUltimateDynamicFireworkAlgorithm", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithm").set_name("LLAMAUltimateDynamicFireworkAlgorithm", register=True) except Exception as e: print("UltimateDynamicFireworkAlgorithm can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithmImproved import ( - UltimateDynamicFireworkAlgorithmImproved, - ) + from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithmImproved import UltimateDynamicFireworkAlgorithmImproved lama_register["UltimateDynamicFireworkAlgorithmImproved"] = UltimateDynamicFireworkAlgorithmImproved - LLAMAUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( - method="LLAMAUltimateDynamicFireworkAlgorithmImproved" - ).set_name("LLAMAUltimateDynamicFireworkAlgorithmImproved", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithmImproved").set_name("LLAMAUltimateDynamicFireworkAlgorithmImproved", register=True) except Exception as e: print("UltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 import ( - UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19, - ) + from nevergrad.optimization.lama.UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 import UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 - lama_register["UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19"] = ( - UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 - ) - LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 = NonObjectOptimizer( - method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19" - ).set_name("LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19", register=True) + lama_register["UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19"] = UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 + res = NonObjectOptimizer(method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 = NonObjectOptimizer(method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19").set_name("LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19", register=True) except Exception as e: print("UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV15 import ( - UltimateEvolutionaryGradientOptimizerV15, - ) + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV15 import UltimateEvolutionaryGradientOptimizerV15 lama_register["UltimateEvolutionaryGradientOptimizerV15"] = UltimateEvolutionaryGradientOptimizerV15 - LLAMAUltimateEvolutionaryGradientOptimizerV15 = NonObjectOptimizer( - method="LLAMAUltimateEvolutionaryGradientOptimizerV15" - ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV15", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateEvolutionaryGradientOptimizerV15 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV15").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV15", register=True) except Exception as e: print("UltimateEvolutionaryGradientOptimizerV15 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV26 import ( - UltimateEvolutionaryGradientOptimizerV26, - ) + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV26 import UltimateEvolutionaryGradientOptimizerV26 lama_register["UltimateEvolutionaryGradientOptimizerV26"] = UltimateEvolutionaryGradientOptimizerV26 - LLAMAUltimateEvolutionaryGradientOptimizerV26 = NonObjectOptimizer( - method="LLAMAUltimateEvolutionaryGradientOptimizerV26" - ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV26", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateEvolutionaryGradientOptimizerV26 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV26").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV26", register=True) except Exception as e: print("UltimateEvolutionaryGradientOptimizerV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV33 import ( - UltimateEvolutionaryGradientOptimizerV33, - ) + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV33 import UltimateEvolutionaryGradientOptimizerV33 lama_register["UltimateEvolutionaryGradientOptimizerV33"] = UltimateEvolutionaryGradientOptimizerV33 - LLAMAUltimateEvolutionaryGradientOptimizerV33 = NonObjectOptimizer( - method="LLAMAUltimateEvolutionaryGradientOptimizerV33" - ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV33", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateEvolutionaryGradientOptimizerV33 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV33").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV33", register=True) except Exception as e: print("UltimateEvolutionaryGradientOptimizerV33 can not be imported: ", e) - try: from nevergrad.optimization.lama.UltimateEvolutionaryOptimizer import UltimateEvolutionaryOptimizer lama_register["UltimateEvolutionaryOptimizer"] = UltimateEvolutionaryOptimizer - LLAMAUltimateEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAUltimateEvolutionaryOptimizer" - ).set_name("LLAMAUltimateEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryOptimizer").set_name("LLAMAUltimateEvolutionaryOptimizer", register=True) except Exception as e: print("UltimateEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.UltimateRefinedAQAPSO_LS_DIW_AP import UltimateRefinedAQAPSO_LS_DIW_AP lama_register["UltimateRefinedAQAPSO_LS_DIW_AP"] = UltimateRefinedAQAPSO_LS_DIW_AP - LLAMAUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( - method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP" - ).set_name("LLAMAUltimateRefinedAQAPSO_LS_DIW_AP", register=True) + res = NonObjectOptimizer(method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAUltimateRefinedAQAPSO_LS_DIW_AP", register=True) except Exception as e: print("UltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateRefinedPrecisionEvolutionaryOptimizerV41 import ( - UltimateRefinedPrecisionEvolutionaryOptimizerV41, - ) + from nevergrad.optimization.lama.UltimateRefinedPrecisionEvolutionaryOptimizerV41 import UltimateRefinedPrecisionEvolutionaryOptimizerV41 - lama_register["UltimateRefinedPrecisionEvolutionaryOptimizerV41"] = ( - UltimateRefinedPrecisionEvolutionaryOptimizerV41 - ) - LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41 = NonObjectOptimizer( - method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41" - ).set_name("LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41", register=True) + lama_register["UltimateRefinedPrecisionEvolutionaryOptimizerV41"] = UltimateRefinedPrecisionEvolutionaryOptimizerV41 + res = NonObjectOptimizer(method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41 = NonObjectOptimizer(method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41").set_name("LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41", register=True) except Exception as e: print("UltimateRefinedPrecisionEvolutionaryOptimizerV41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 import ( - UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18, - ) + from nevergrad.optimization.lama.UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 import UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 - lama_register["UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18"] = ( - UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 - ) - LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 = NonObjectOptimizer( - method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18" - ).set_name("LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18", register=True) + lama_register["UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18"] = UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 + res = NonObjectOptimizer(method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 = NonObjectOptimizer(method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18").set_name("LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18", register=True) except Exception as e: print("UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraDynamicAdaptiveRAMEDS import UltraDynamicAdaptiveRAMEDS lama_register["UltraDynamicAdaptiveRAMEDS"] = UltraDynamicAdaptiveRAMEDS - LLAMAUltraDynamicAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS").set_name( - "LLAMAUltraDynamicAdaptiveRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraDynamicAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS").set_name("LLAMAUltraDynamicAdaptiveRAMEDS", register=True) except Exception as e: print("UltraDynamicAdaptiveRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraDynamicDualPhaseOptimizedStrategyV16 import ( - UltraDynamicDualPhaseOptimizedStrategyV16, - ) + from nevergrad.optimization.lama.UltraDynamicDualPhaseOptimizedStrategyV16 import UltraDynamicDualPhaseOptimizedStrategyV16 lama_register["UltraDynamicDualPhaseOptimizedStrategyV16"] = UltraDynamicDualPhaseOptimizedStrategyV16 - LLAMAUltraDynamicDualPhaseOptimizedStrategyV16 = NonObjectOptimizer( - method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16" - ).set_name("LLAMAUltraDynamicDualPhaseOptimizedStrategyV16", register=True) + res = NonObjectOptimizer(method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraDynamicDualPhaseOptimizedStrategyV16 = NonObjectOptimizer(method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16").set_name("LLAMAUltraDynamicDualPhaseOptimizedStrategyV16", register=True) except Exception as e: print("UltraDynamicDualPhaseOptimizedStrategyV16 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV10 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV10, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV10 import UltraEnhancedAdaptiveMemoryHybridOptimizerV10 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV10"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV10 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV10"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV10 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV11 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV11, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV11 import UltraEnhancedAdaptiveMemoryHybridOptimizerV11 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV11"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV11 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV11"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV11 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV12 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV12, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV12 import UltraEnhancedAdaptiveMemoryHybridOptimizerV12 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV12"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV12 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV12"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV12 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV12 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV2 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV2, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV2 import UltraEnhancedAdaptiveMemoryHybridOptimizerV2 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV2"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV2 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV2"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV2 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV3 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV3, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV3 import UltraEnhancedAdaptiveMemoryHybridOptimizerV3 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV3"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV3 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV3"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV3 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV4 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV4, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV4 import UltraEnhancedAdaptiveMemoryHybridOptimizerV4 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV4"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV4 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV4"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV4 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV7 import ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV7, - ) + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV7 import UltraEnhancedAdaptiveMemoryHybridOptimizerV7 - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV7"] = ( - UltraEnhancedAdaptiveMemoryHybridOptimizerV7 - ) - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( - method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7" - ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7", register=True) + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV7"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV7 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7", register=True) except Exception as e: print("UltraEnhancedAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraEnhancedAdaptiveRAMEDS import UltraEnhancedAdaptiveRAMEDS lama_register["UltraEnhancedAdaptiveRAMEDS"] = UltraEnhancedAdaptiveRAMEDS - LLAMAUltraEnhancedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS").set_name( - "LLAMAUltraEnhancedAdaptiveRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS").set_name("LLAMAUltraEnhancedAdaptiveRAMEDS", register=True) except Exception as e: print("UltraEnhancedAdaptiveRAMEDS can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraEnhancedDynamicDE import UltraEnhancedDynamicDE lama_register["UltraEnhancedDynamicDE"] = UltraEnhancedDynamicDE - LLAMAUltraEnhancedDynamicDE = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE").set_name( - "LLAMAUltraEnhancedDynamicDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedDynamicDE = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE").set_name("LLAMAUltraEnhancedDynamicDE", register=True) except Exception as e: print("UltraEnhancedDynamicDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( - UltraEnhancedEliteAdaptiveMemoryHybridOptimizer, - ) + from nevergrad.optimization.lama.UltraEnhancedEliteAdaptiveMemoryHybridOptimizer import UltraEnhancedEliteAdaptiveMemoryHybridOptimizer - lama_register["UltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( - UltraEnhancedEliteAdaptiveMemoryHybridOptimizer - ) - LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( - method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" - ).set_name("LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) + lama_register["UltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = UltraEnhancedEliteAdaptiveMemoryHybridOptimizer + res = NonObjectOptimizer(method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) except Exception as e: print("UltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedEvolutionaryGradientOptimizerV14 import ( - UltraEnhancedEvolutionaryGradientOptimizerV14, - ) + from nevergrad.optimization.lama.UltraEnhancedEvolutionaryGradientOptimizerV14 import UltraEnhancedEvolutionaryGradientOptimizerV14 - lama_register["UltraEnhancedEvolutionaryGradientOptimizerV14"] = ( - UltraEnhancedEvolutionaryGradientOptimizerV14 - ) - LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14 = NonObjectOptimizer( - method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14" - ).set_name("LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14", register=True) + lama_register["UltraEnhancedEvolutionaryGradientOptimizerV14"] = UltraEnhancedEvolutionaryGradientOptimizerV14 + res = NonObjectOptimizer(method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14 = NonObjectOptimizer(method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14").set_name("LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14", register=True) except Exception as e: print("UltraEnhancedEvolutionaryGradientOptimizerV14 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEnhancedPrecisionEvolutionaryOptimizer import ( - UltraEnhancedPrecisionEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.UltraEnhancedPrecisionEvolutionaryOptimizer import UltraEnhancedPrecisionEvolutionaryOptimizer lama_register["UltraEnhancedPrecisionEvolutionaryOptimizer"] = UltraEnhancedPrecisionEvolutionaryOptimizer - LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer" - ).set_name("LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer").set_name("LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer", register=True) except Exception as e: print("UltraEnhancedPrecisionEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraEvolutionaryGradientOptimizerV27 import ( - UltraEvolutionaryGradientOptimizerV27, - ) + from nevergrad.optimization.lama.UltraEvolutionaryGradientOptimizerV27 import UltraEvolutionaryGradientOptimizerV27 lama_register["UltraEvolutionaryGradientOptimizerV27"] = UltraEvolutionaryGradientOptimizerV27 - LLAMAUltraEvolutionaryGradientOptimizerV27 = NonObjectOptimizer( - method="LLAMAUltraEvolutionaryGradientOptimizerV27" - ).set_name("LLAMAUltraEvolutionaryGradientOptimizerV27", register=True) + res = NonObjectOptimizer(method="LLAMAUltraEvolutionaryGradientOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraEvolutionaryGradientOptimizerV27 = NonObjectOptimizer(method="LLAMAUltraEvolutionaryGradientOptimizerV27").set_name("LLAMAUltraEvolutionaryGradientOptimizerV27", register=True) except Exception as e: print("UltraEvolutionaryGradientOptimizerV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraFineSpiralDifferentialOptimizerV7 import ( - UltraFineSpiralDifferentialOptimizerV7, - ) + from nevergrad.optimization.lama.UltraFineSpiralDifferentialOptimizerV7 import UltraFineSpiralDifferentialOptimizerV7 lama_register["UltraFineSpiralDifferentialOptimizerV7"] = UltraFineSpiralDifferentialOptimizerV7 - LLAMAUltraFineSpiralDifferentialOptimizerV7 = NonObjectOptimizer( - method="LLAMAUltraFineSpiralDifferentialOptimizerV7" - ).set_name("LLAMAUltraFineSpiralDifferentialOptimizerV7", register=True) + res = NonObjectOptimizer(method="LLAMAUltraFineSpiralDifferentialOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraFineSpiralDifferentialOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraFineSpiralDifferentialOptimizerV7").set_name("LLAMAUltraFineSpiralDifferentialOptimizerV7", register=True) except Exception as e: print("UltraFineSpiralDifferentialOptimizerV7 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizer import ( - UltraFineTunedEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizer import UltraFineTunedEvolutionaryOptimizer lama_register["UltraFineTunedEvolutionaryOptimizer"] = UltraFineTunedEvolutionaryOptimizer - LLAMAUltraFineTunedEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAUltraFineTunedEvolutionaryOptimizer" - ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraFineTunedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizer").set_name("LLAMAUltraFineTunedEvolutionaryOptimizer", register=True) except Exception as e: print("UltraFineTunedEvolutionaryOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizerV24 import ( - UltraFineTunedEvolutionaryOptimizerV24, - ) + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizerV24 import UltraFineTunedEvolutionaryOptimizerV24 lama_register["UltraFineTunedEvolutionaryOptimizerV24"] = UltraFineTunedEvolutionaryOptimizerV24 - LLAMAUltraFineTunedEvolutionaryOptimizerV24 = NonObjectOptimizer( - method="LLAMAUltraFineTunedEvolutionaryOptimizerV24" - ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizerV24", register=True) + res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraFineTunedEvolutionaryOptimizerV24 = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizerV24").set_name("LLAMAUltraFineTunedEvolutionaryOptimizerV24", register=True) except Exception as e: print("UltraFineTunedEvolutionaryOptimizerV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV18 import ( - UltraOptimizedDynamicPrecisionOptimizerV18, - ) + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV18 import UltraOptimizedDynamicPrecisionOptimizerV18 lama_register["UltraOptimizedDynamicPrecisionOptimizerV18"] = UltraOptimizedDynamicPrecisionOptimizerV18 - LLAMAUltraOptimizedDynamicPrecisionOptimizerV18 = NonObjectOptimizer( - method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18" - ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV18", register=True) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV18 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV18", register=True) except Exception as e: print("UltraOptimizedDynamicPrecisionOptimizerV18 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV19 import ( - UltraOptimizedDynamicPrecisionOptimizerV19, - ) + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV19 import UltraOptimizedDynamicPrecisionOptimizerV19 lama_register["UltraOptimizedDynamicPrecisionOptimizerV19"] = UltraOptimizedDynamicPrecisionOptimizerV19 - LLAMAUltraOptimizedDynamicPrecisionOptimizerV19 = NonObjectOptimizer( - method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19" - ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV19", register=True) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV19 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV19", register=True) except Exception as e: print("UltraOptimizedDynamicPrecisionOptimizerV19 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV52 import ( - UltraOptimizedDynamicPrecisionOptimizerV52, - ) + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV52 import UltraOptimizedDynamicPrecisionOptimizerV52 lama_register["UltraOptimizedDynamicPrecisionOptimizerV52"] = UltraOptimizedDynamicPrecisionOptimizerV52 - LLAMAUltraOptimizedDynamicPrecisionOptimizerV52 = NonObjectOptimizer( - method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52" - ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV52", register=True) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV52 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV52", register=True) except Exception as e: print("UltraOptimizedDynamicPrecisionOptimizerV52 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV53 import ( - UltraOptimizedDynamicPrecisionOptimizerV53, - ) + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV53 import UltraOptimizedDynamicPrecisionOptimizerV53 lama_register["UltraOptimizedDynamicPrecisionOptimizerV53"] = UltraOptimizedDynamicPrecisionOptimizerV53 - LLAMAUltraOptimizedDynamicPrecisionOptimizerV53 = NonObjectOptimizer( - method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53" - ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV53", register=True) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV53 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV53", register=True) except Exception as e: print("UltraOptimizedDynamicPrecisionOptimizerV53 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedEvolutionaryGradientOptimizerV30 import ( - UltraOptimizedEvolutionaryGradientOptimizerV30, - ) + from nevergrad.optimization.lama.UltraOptimizedEvolutionaryGradientOptimizerV30 import UltraOptimizedEvolutionaryGradientOptimizerV30 - lama_register["UltraOptimizedEvolutionaryGradientOptimizerV30"] = ( - UltraOptimizedEvolutionaryGradientOptimizerV30 - ) - LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30 = NonObjectOptimizer( - method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30" - ).set_name("LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30", register=True) + lama_register["UltraOptimizedEvolutionaryGradientOptimizerV30"] = UltraOptimizedEvolutionaryGradientOptimizerV30 + res = NonObjectOptimizer(method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30 = NonObjectOptimizer(method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30").set_name("LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30", register=True) except Exception as e: print("UltraOptimizedEvolutionaryGradientOptimizerV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer import ( - UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer, - ) + from nevergrad.optimization.lama.UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer import UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer - lama_register["UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer"] = ( - UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer - ) - LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( - method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer" - ).set_name("LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer", register=True) + lama_register["UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer"] = UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer + res = NonObjectOptimizer(method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer").set_name("LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer", register=True) except Exception as e: print("UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraOptimizedRAMEDS import UltraOptimizedRAMEDS lama_register["UltraOptimizedRAMEDS"] = UltraOptimizedRAMEDS - LLAMAUltraOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS").set_name( - "LLAMAUltraOptimizedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS").set_name("LLAMAUltraOptimizedRAMEDS", register=True) except Exception as e: print("UltraOptimizedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraOptimizedSpiralDifferentialEvolution import ( - UltraOptimizedSpiralDifferentialEvolution, - ) + from nevergrad.optimization.lama.UltraOptimizedSpiralDifferentialEvolution import UltraOptimizedSpiralDifferentialEvolution lama_register["UltraOptimizedSpiralDifferentialEvolution"] = UltraOptimizedSpiralDifferentialEvolution - LLAMAUltraOptimizedSpiralDifferentialEvolution = NonObjectOptimizer( - method="LLAMAUltraOptimizedSpiralDifferentialEvolution" - ).set_name("LLAMAUltraOptimizedSpiralDifferentialEvolution", register=True) + res = NonObjectOptimizer(method="LLAMAUltraOptimizedSpiralDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraOptimizedSpiralDifferentialEvolution = NonObjectOptimizer(method="LLAMAUltraOptimizedSpiralDifferentialEvolution").set_name("LLAMAUltraOptimizedSpiralDifferentialEvolution", register=True) except Exception as e: print("UltraOptimizedSpiralDifferentialEvolution can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraPreciseDynamicOptimizerV26 import UltraPreciseDynamicOptimizerV26 lama_register["UltraPreciseDynamicOptimizerV26"] = UltraPreciseDynamicOptimizerV26 - LLAMAUltraPreciseDynamicOptimizerV26 = NonObjectOptimizer( - method="LLAMAUltraPreciseDynamicOptimizerV26" - ).set_name("LLAMAUltraPreciseDynamicOptimizerV26", register=True) + res = NonObjectOptimizer(method="LLAMAUltraPreciseDynamicOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraPreciseDynamicOptimizerV26 = NonObjectOptimizer(method="LLAMAUltraPreciseDynamicOptimizerV26").set_name("LLAMAUltraPreciseDynamicOptimizerV26", register=True) except Exception as e: print("UltraPreciseDynamicOptimizerV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraPrecisionSpiralDifferentialOptimizerV9 import ( - UltraPrecisionSpiralDifferentialOptimizerV9, - ) + from nevergrad.optimization.lama.UltraPrecisionSpiralDifferentialOptimizerV9 import UltraPrecisionSpiralDifferentialOptimizerV9 lama_register["UltraPrecisionSpiralDifferentialOptimizerV9"] = UltraPrecisionSpiralDifferentialOptimizerV9 - LLAMAUltraPrecisionSpiralDifferentialOptimizerV9 = NonObjectOptimizer( - method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9" - ).set_name("LLAMAUltraPrecisionSpiralDifferentialOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraPrecisionSpiralDifferentialOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9").set_name("LLAMAUltraPrecisionSpiralDifferentialOptimizerV9", register=True) except Exception as e: print("UltraPrecisionSpiralDifferentialOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraQuantumReactiveHybridStrategy import ( - UltraQuantumReactiveHybridStrategy, - ) + from nevergrad.optimization.lama.UltraQuantumReactiveHybridStrategy import UltraQuantumReactiveHybridStrategy lama_register["UltraQuantumReactiveHybridStrategy"] = UltraQuantumReactiveHybridStrategy - LLAMAUltraQuantumReactiveHybridStrategy = NonObjectOptimizer( - method="LLAMAUltraQuantumReactiveHybridStrategy" - ).set_name("LLAMAUltraQuantumReactiveHybridStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAUltraQuantumReactiveHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraQuantumReactiveHybridStrategy = NonObjectOptimizer(method="LLAMAUltraQuantumReactiveHybridStrategy").set_name("LLAMAUltraQuantumReactiveHybridStrategy", register=True) except Exception as e: print("UltraQuantumReactiveHybridStrategy can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraRAMEDS import UltraRAMEDS lama_register["UltraRAMEDS"] = UltraRAMEDS - LLAMAUltraRAMEDS = NonObjectOptimizer(method="LLAMAUltraRAMEDS").set_name( - "LLAMAUltraRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRAMEDS = NonObjectOptimizer(method="LLAMAUltraRAMEDS").set_name("LLAMAUltraRAMEDS", register=True) except Exception as e: print("UltraRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveConvergenceStrategy import ( - UltraRefinedAdaptiveConvergenceStrategy, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptiveConvergenceStrategy import UltraRefinedAdaptiveConvergenceStrategy lama_register["UltraRefinedAdaptiveConvergenceStrategy"] = UltraRefinedAdaptiveConvergenceStrategy - LLAMAUltraRefinedAdaptiveConvergenceStrategy = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptiveConvergenceStrategy" - ).set_name("LLAMAUltraRefinedAdaptiveConvergenceStrategy", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveConvergenceStrategy = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveConvergenceStrategy").set_name("LLAMAUltraRefinedAdaptiveConvergenceStrategy", register=True) except Exception as e: print("UltraRefinedAdaptiveConvergenceStrategy can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV5 import ( - UltraRefinedAdaptiveMemoryHybridOptimizerV5, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV5 import UltraRefinedAdaptiveMemoryHybridOptimizerV5 lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV5"] = UltraRefinedAdaptiveMemoryHybridOptimizerV5 - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5" - ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5", register=True) except Exception as e: print("UltraRefinedAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV6 import ( - UltraRefinedAdaptiveMemoryHybridOptimizerV6, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV6 import UltraRefinedAdaptiveMemoryHybridOptimizerV6 lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV6"] = UltraRefinedAdaptiveMemoryHybridOptimizerV6 - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6" - ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6", register=True) except Exception as e: print("UltraRefinedAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV8 import ( - UltraRefinedAdaptiveMemoryHybridOptimizerV8, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV8 import UltraRefinedAdaptiveMemoryHybridOptimizerV8 lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV8"] = UltraRefinedAdaptiveMemoryHybridOptimizerV8 - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8 = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8" - ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8", register=True) except Exception as e: print("UltraRefinedAdaptiveMemoryHybridOptimizerV8 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV9 import ( - UltraRefinedAdaptiveMemoryHybridOptimizerV9, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV9 import UltraRefinedAdaptiveMemoryHybridOptimizerV9 lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV9"] = UltraRefinedAdaptiveMemoryHybridOptimizerV9 - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9 = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9" - ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9", register=True) except Exception as e: print("UltraRefinedAdaptiveMemoryHybridOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedAdaptivePrecisionOptimizer import ( - UltraRefinedAdaptivePrecisionOptimizer, - ) + from nevergrad.optimization.lama.UltraRefinedAdaptivePrecisionOptimizer import UltraRefinedAdaptivePrecisionOptimizer lama_register["UltraRefinedAdaptivePrecisionOptimizer"] = UltraRefinedAdaptivePrecisionOptimizer - LLAMAUltraRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( - method="LLAMAUltraRefinedAdaptivePrecisionOptimizer" - ).set_name("LLAMAUltraRefinedAdaptivePrecisionOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptivePrecisionOptimizer").set_name("LLAMAUltraRefinedAdaptivePrecisionOptimizer", register=True) except Exception as e: print("UltraRefinedAdaptivePrecisionOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraRefinedAdaptiveRAMEDS import UltraRefinedAdaptiveRAMEDS lama_register["UltraRefinedAdaptiveRAMEDS"] = UltraRefinedAdaptiveRAMEDS - LLAMAUltraRefinedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS").set_name( - "LLAMAUltraRefinedAdaptiveRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS").set_name("LLAMAUltraRefinedAdaptiveRAMEDS", register=True) except Exception as e: print("UltraRefinedAdaptiveRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedConvergenceSpiralSearch import ( - UltraRefinedConvergenceSpiralSearch, - ) + from nevergrad.optimization.lama.UltraRefinedConvergenceSpiralSearch import UltraRefinedConvergenceSpiralSearch lama_register["UltraRefinedConvergenceSpiralSearch"] = UltraRefinedConvergenceSpiralSearch - LLAMAUltraRefinedConvergenceSpiralSearch = NonObjectOptimizer( - method="LLAMAUltraRefinedConvergenceSpiralSearch" - ).set_name("LLAMAUltraRefinedConvergenceSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedConvergenceSpiralSearch = NonObjectOptimizer(method="LLAMAUltraRefinedConvergenceSpiralSearch").set_name("LLAMAUltraRefinedConvergenceSpiralSearch", register=True) except Exception as e: print("UltraRefinedConvergenceSpiralSearch can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV10 import ( - UltraRefinedDynamicPrecisionOptimizerV10, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV10 import UltraRefinedDynamicPrecisionOptimizerV10 lama_register["UltraRefinedDynamicPrecisionOptimizerV10"] = UltraRefinedDynamicPrecisionOptimizerV10 - LLAMAUltraRefinedDynamicPrecisionOptimizerV10 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV10", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV10", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV11 import ( - UltraRefinedDynamicPrecisionOptimizerV11, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV11 import UltraRefinedDynamicPrecisionOptimizerV11 lama_register["UltraRefinedDynamicPrecisionOptimizerV11"] = UltraRefinedDynamicPrecisionOptimizerV11 - LLAMAUltraRefinedDynamicPrecisionOptimizerV11 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV11", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV11 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV11", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV11 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV17 import ( - UltraRefinedDynamicPrecisionOptimizerV17, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV17 import UltraRefinedDynamicPrecisionOptimizerV17 lama_register["UltraRefinedDynamicPrecisionOptimizerV17"] = UltraRefinedDynamicPrecisionOptimizerV17 - LLAMAUltraRefinedDynamicPrecisionOptimizerV17 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV17", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV17 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV17", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV17 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV22 import ( - UltraRefinedDynamicPrecisionOptimizerV22, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV22 import UltraRefinedDynamicPrecisionOptimizerV22 lama_register["UltraRefinedDynamicPrecisionOptimizerV22"] = UltraRefinedDynamicPrecisionOptimizerV22 - LLAMAUltraRefinedDynamicPrecisionOptimizerV22 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV22", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV22 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV22", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV22 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV23 import ( - UltraRefinedDynamicPrecisionOptimizerV23, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV23 import UltraRefinedDynamicPrecisionOptimizerV23 lama_register["UltraRefinedDynamicPrecisionOptimizerV23"] = UltraRefinedDynamicPrecisionOptimizerV23 - LLAMAUltraRefinedDynamicPrecisionOptimizerV23 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV23", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV23 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV23", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV23 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV24 import ( - UltraRefinedDynamicPrecisionOptimizerV24, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV24 import UltraRefinedDynamicPrecisionOptimizerV24 lama_register["UltraRefinedDynamicPrecisionOptimizerV24"] = UltraRefinedDynamicPrecisionOptimizerV24 - LLAMAUltraRefinedDynamicPrecisionOptimizerV24 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV24", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV24 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV24", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV24 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV25 import ( - UltraRefinedDynamicPrecisionOptimizerV25, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV25 import UltraRefinedDynamicPrecisionOptimizerV25 lama_register["UltraRefinedDynamicPrecisionOptimizerV25"] = UltraRefinedDynamicPrecisionOptimizerV25 - LLAMAUltraRefinedDynamicPrecisionOptimizerV25 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV25", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV25 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV25", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV25 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV26 import ( - UltraRefinedDynamicPrecisionOptimizerV26, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV26 import UltraRefinedDynamicPrecisionOptimizerV26 lama_register["UltraRefinedDynamicPrecisionOptimizerV26"] = UltraRefinedDynamicPrecisionOptimizerV26 - LLAMAUltraRefinedDynamicPrecisionOptimizerV26 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV26", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV26 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV26", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV26 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV27 import ( - UltraRefinedDynamicPrecisionOptimizerV27, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV27 import UltraRefinedDynamicPrecisionOptimizerV27 lama_register["UltraRefinedDynamicPrecisionOptimizerV27"] = UltraRefinedDynamicPrecisionOptimizerV27 - LLAMAUltraRefinedDynamicPrecisionOptimizerV27 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV27", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV27 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV27", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV27 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV28 import ( - UltraRefinedDynamicPrecisionOptimizerV28, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV28 import UltraRefinedDynamicPrecisionOptimizerV28 lama_register["UltraRefinedDynamicPrecisionOptimizerV28"] = UltraRefinedDynamicPrecisionOptimizerV28 - LLAMAUltraRefinedDynamicPrecisionOptimizerV28 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV28", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV28 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV28", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV28 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV29 import ( - UltraRefinedDynamicPrecisionOptimizerV29, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV29 import UltraRefinedDynamicPrecisionOptimizerV29 lama_register["UltraRefinedDynamicPrecisionOptimizerV29"] = UltraRefinedDynamicPrecisionOptimizerV29 - LLAMAUltraRefinedDynamicPrecisionOptimizerV29 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV29", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV29 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV29", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV29 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV30 import ( - UltraRefinedDynamicPrecisionOptimizerV30, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV30 import UltraRefinedDynamicPrecisionOptimizerV30 lama_register["UltraRefinedDynamicPrecisionOptimizerV30"] = UltraRefinedDynamicPrecisionOptimizerV30 - LLAMAUltraRefinedDynamicPrecisionOptimizerV30 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV30", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV30 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV30", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV30 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV31 import ( - UltraRefinedDynamicPrecisionOptimizerV31, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV31 import UltraRefinedDynamicPrecisionOptimizerV31 lama_register["UltraRefinedDynamicPrecisionOptimizerV31"] = UltraRefinedDynamicPrecisionOptimizerV31 - LLAMAUltraRefinedDynamicPrecisionOptimizerV31 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV31", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV31 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV31", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV31 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV32 import ( - UltraRefinedDynamicPrecisionOptimizerV32, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV32 import UltraRefinedDynamicPrecisionOptimizerV32 lama_register["UltraRefinedDynamicPrecisionOptimizerV32"] = UltraRefinedDynamicPrecisionOptimizerV32 - LLAMAUltraRefinedDynamicPrecisionOptimizerV32 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV32", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV32 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV32", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV32 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV33 import ( - UltraRefinedDynamicPrecisionOptimizerV33, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV33 import UltraRefinedDynamicPrecisionOptimizerV33 lama_register["UltraRefinedDynamicPrecisionOptimizerV33"] = UltraRefinedDynamicPrecisionOptimizerV33 - LLAMAUltraRefinedDynamicPrecisionOptimizerV33 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV33", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV33 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV33", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV33 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV34 import ( - UltraRefinedDynamicPrecisionOptimizerV34, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV34 import UltraRefinedDynamicPrecisionOptimizerV34 lama_register["UltraRefinedDynamicPrecisionOptimizerV34"] = UltraRefinedDynamicPrecisionOptimizerV34 - LLAMAUltraRefinedDynamicPrecisionOptimizerV34 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV34", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV34 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV34", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV34 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV35 import ( - UltraRefinedDynamicPrecisionOptimizerV35, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV35 import UltraRefinedDynamicPrecisionOptimizerV35 lama_register["UltraRefinedDynamicPrecisionOptimizerV35"] = UltraRefinedDynamicPrecisionOptimizerV35 - LLAMAUltraRefinedDynamicPrecisionOptimizerV35 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV35", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV35 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV35", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV35 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV36 import ( - UltraRefinedDynamicPrecisionOptimizerV36, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV36 import UltraRefinedDynamicPrecisionOptimizerV36 lama_register["UltraRefinedDynamicPrecisionOptimizerV36"] = UltraRefinedDynamicPrecisionOptimizerV36 - LLAMAUltraRefinedDynamicPrecisionOptimizerV36 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV36", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV36 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV36", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV36 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV37 import ( - UltraRefinedDynamicPrecisionOptimizerV37, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV37 import UltraRefinedDynamicPrecisionOptimizerV37 lama_register["UltraRefinedDynamicPrecisionOptimizerV37"] = UltraRefinedDynamicPrecisionOptimizerV37 - LLAMAUltraRefinedDynamicPrecisionOptimizerV37 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV37", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV37 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV37", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV37 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV38 import ( - UltraRefinedDynamicPrecisionOptimizerV38, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV38 import UltraRefinedDynamicPrecisionOptimizerV38 lama_register["UltraRefinedDynamicPrecisionOptimizerV38"] = UltraRefinedDynamicPrecisionOptimizerV38 - LLAMAUltraRefinedDynamicPrecisionOptimizerV38 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV38", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV38 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV38", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV38 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV39 import ( - UltraRefinedDynamicPrecisionOptimizerV39, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV39 import UltraRefinedDynamicPrecisionOptimizerV39 lama_register["UltraRefinedDynamicPrecisionOptimizerV39"] = UltraRefinedDynamicPrecisionOptimizerV39 - LLAMAUltraRefinedDynamicPrecisionOptimizerV39 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV39", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV39 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV39", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV39 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV4 import ( - UltraRefinedDynamicPrecisionOptimizerV4, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV4 import UltraRefinedDynamicPrecisionOptimizerV4 lama_register["UltraRefinedDynamicPrecisionOptimizerV4"] = UltraRefinedDynamicPrecisionOptimizerV4 - LLAMAUltraRefinedDynamicPrecisionOptimizerV4 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV4", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV4 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV4", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV4 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV40 import ( - UltraRefinedDynamicPrecisionOptimizerV40, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV40 import UltraRefinedDynamicPrecisionOptimizerV40 lama_register["UltraRefinedDynamicPrecisionOptimizerV40"] = UltraRefinedDynamicPrecisionOptimizerV40 - LLAMAUltraRefinedDynamicPrecisionOptimizerV40 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV40", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV40 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV40", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV40 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV41 import ( - UltraRefinedDynamicPrecisionOptimizerV41, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV41 import UltraRefinedDynamicPrecisionOptimizerV41 lama_register["UltraRefinedDynamicPrecisionOptimizerV41"] = UltraRefinedDynamicPrecisionOptimizerV41 - LLAMAUltraRefinedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV41", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV41 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV41", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV41 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV44 import ( - UltraRefinedDynamicPrecisionOptimizerV44, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV44 import UltraRefinedDynamicPrecisionOptimizerV44 lama_register["UltraRefinedDynamicPrecisionOptimizerV44"] = UltraRefinedDynamicPrecisionOptimizerV44 - LLAMAUltraRefinedDynamicPrecisionOptimizerV44 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV44", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV44 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV44", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV44 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV45 import ( - UltraRefinedDynamicPrecisionOptimizerV45, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV45 import UltraRefinedDynamicPrecisionOptimizerV45 lama_register["UltraRefinedDynamicPrecisionOptimizerV45"] = UltraRefinedDynamicPrecisionOptimizerV45 - LLAMAUltraRefinedDynamicPrecisionOptimizerV45 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV45", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV45 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV45", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV45 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV46 import ( - UltraRefinedDynamicPrecisionOptimizerV46, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV46 import UltraRefinedDynamicPrecisionOptimizerV46 lama_register["UltraRefinedDynamicPrecisionOptimizerV46"] = UltraRefinedDynamicPrecisionOptimizerV46 - LLAMAUltraRefinedDynamicPrecisionOptimizerV46 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV46", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV46 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV46", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV46 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV47 import ( - UltraRefinedDynamicPrecisionOptimizerV47, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV47 import UltraRefinedDynamicPrecisionOptimizerV47 lama_register["UltraRefinedDynamicPrecisionOptimizerV47"] = UltraRefinedDynamicPrecisionOptimizerV47 - LLAMAUltraRefinedDynamicPrecisionOptimizerV47 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV47", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV47 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV47", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV47 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV5 import ( - UltraRefinedDynamicPrecisionOptimizerV5, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV5 import UltraRefinedDynamicPrecisionOptimizerV5 lama_register["UltraRefinedDynamicPrecisionOptimizerV5"] = UltraRefinedDynamicPrecisionOptimizerV5 - LLAMAUltraRefinedDynamicPrecisionOptimizerV5 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV5", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV5", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV54 import ( - UltraRefinedDynamicPrecisionOptimizerV54, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV54 import UltraRefinedDynamicPrecisionOptimizerV54 lama_register["UltraRefinedDynamicPrecisionOptimizerV54"] = UltraRefinedDynamicPrecisionOptimizerV54 - LLAMAUltraRefinedDynamicPrecisionOptimizerV54 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV54", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV54 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV54", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV54 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV55 import ( - UltraRefinedDynamicPrecisionOptimizerV55, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV55 import UltraRefinedDynamicPrecisionOptimizerV55 lama_register["UltraRefinedDynamicPrecisionOptimizerV55"] = UltraRefinedDynamicPrecisionOptimizerV55 - LLAMAUltraRefinedDynamicPrecisionOptimizerV55 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV55", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV55 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV55", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV55 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV56 import ( - UltraRefinedDynamicPrecisionOptimizerV56, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV56 import UltraRefinedDynamicPrecisionOptimizerV56 lama_register["UltraRefinedDynamicPrecisionOptimizerV56"] = UltraRefinedDynamicPrecisionOptimizerV56 - LLAMAUltraRefinedDynamicPrecisionOptimizerV56 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV56", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV56 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV56", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV56 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV9 import ( - UltraRefinedDynamicPrecisionOptimizerV9, - ) + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV9 import UltraRefinedDynamicPrecisionOptimizerV9 lama_register["UltraRefinedDynamicPrecisionOptimizerV9"] = UltraRefinedDynamicPrecisionOptimizerV9 - LLAMAUltraRefinedDynamicPrecisionOptimizerV9 = NonObjectOptimizer( - method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9" - ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV9", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV9", register=True) except Exception as e: print("UltraRefinedDynamicPrecisionOptimizerV9 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( - UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, - ) + from nevergrad.optimization.lama.UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - lama_register["UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( - UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - ) - LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( - method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" - ).set_name("LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) + lama_register["UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + res = NonObjectOptimizer(method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) except Exception as e: print("UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientHybridOptimizerV5 import ( - UltraRefinedEvolutionaryGradientHybridOptimizerV5, - ) + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientHybridOptimizerV5 import UltraRefinedEvolutionaryGradientHybridOptimizerV5 - lama_register["UltraRefinedEvolutionaryGradientHybridOptimizerV5"] = ( - UltraRefinedEvolutionaryGradientHybridOptimizerV5 - ) - LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5 = NonObjectOptimizer( - method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5" - ).set_name("LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5", register=True) + lama_register["UltraRefinedEvolutionaryGradientHybridOptimizerV5"] = UltraRefinedEvolutionaryGradientHybridOptimizerV5 + res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5").set_name("LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5", register=True) except Exception as e: print("UltraRefinedEvolutionaryGradientHybridOptimizerV5 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV10 import ( - UltraRefinedEvolutionaryGradientOptimizerV10, - ) + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV10 import UltraRefinedEvolutionaryGradientOptimizerV10 - lama_register["UltraRefinedEvolutionaryGradientOptimizerV10"] = ( - UltraRefinedEvolutionaryGradientOptimizerV10 - ) - LLAMAUltraRefinedEvolutionaryGradientOptimizerV10 = NonObjectOptimizer( - method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10" - ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV10", register=True) + lama_register["UltraRefinedEvolutionaryGradientOptimizerV10"] = UltraRefinedEvolutionaryGradientOptimizerV10 + res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedEvolutionaryGradientOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10").set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV10", register=True) except Exception as e: print("UltraRefinedEvolutionaryGradientOptimizerV10 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV32 import ( - UltraRefinedEvolutionaryGradientOptimizerV32, - ) + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV32 import UltraRefinedEvolutionaryGradientOptimizerV32 - lama_register["UltraRefinedEvolutionaryGradientOptimizerV32"] = ( - UltraRefinedEvolutionaryGradientOptimizerV32 - ) - LLAMAUltraRefinedEvolutionaryGradientOptimizerV32 = NonObjectOptimizer( - method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32" - ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV32", register=True) + lama_register["UltraRefinedEvolutionaryGradientOptimizerV32"] = UltraRefinedEvolutionaryGradientOptimizerV32 + res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedEvolutionaryGradientOptimizerV32 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32").set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV32", register=True) except Exception as e: print("UltraRefinedEvolutionaryGradientOptimizerV32 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedHybridEvolutionaryAnnealingOptimizer import ( - UltraRefinedHybridEvolutionaryAnnealingOptimizer, - ) + from nevergrad.optimization.lama.UltraRefinedHybridEvolutionaryAnnealingOptimizer import UltraRefinedHybridEvolutionaryAnnealingOptimizer - lama_register["UltraRefinedHybridEvolutionaryAnnealingOptimizer"] = ( - UltraRefinedHybridEvolutionaryAnnealingOptimizer - ) - LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( - method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer" - ).set_name("LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer", register=True) + lama_register["UltraRefinedHybridEvolutionaryAnnealingOptimizer"] = UltraRefinedHybridEvolutionaryAnnealingOptimizer + res = NonObjectOptimizer(method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer", register=True) except Exception as e: print("UltraRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV50 import ( - UltraRefinedHyperStrategicOptimizerV50, - ) + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV50 import UltraRefinedHyperStrategicOptimizerV50 lama_register["UltraRefinedHyperStrategicOptimizerV50"] = UltraRefinedHyperStrategicOptimizerV50 - LLAMAUltraRefinedHyperStrategicOptimizerV50 = NonObjectOptimizer( - method="LLAMAUltraRefinedHyperStrategicOptimizerV50" - ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV50", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedHyperStrategicOptimizerV50 = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV50").set_name("LLAMAUltraRefinedHyperStrategicOptimizerV50", register=True) except Exception as e: print("UltraRefinedHyperStrategicOptimizerV50 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV54 import ( - UltraRefinedHyperStrategicOptimizerV54, - ) + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV54 import UltraRefinedHyperStrategicOptimizerV54 lama_register["UltraRefinedHyperStrategicOptimizerV54"] = UltraRefinedHyperStrategicOptimizerV54 - LLAMAUltraRefinedHyperStrategicOptimizerV54 = NonObjectOptimizer( - method="LLAMAUltraRefinedHyperStrategicOptimizerV54" - ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV54", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedHyperStrategicOptimizerV54 = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV54").set_name("LLAMAUltraRefinedHyperStrategicOptimizerV54", register=True) except Exception as e: print("UltraRefinedHyperStrategicOptimizerV54 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedPrecisionEvolutionaryOptimizerV43 import ( - UltraRefinedPrecisionEvolutionaryOptimizerV43, - ) + from nevergrad.optimization.lama.UltraRefinedPrecisionEvolutionaryOptimizerV43 import UltraRefinedPrecisionEvolutionaryOptimizerV43 - lama_register["UltraRefinedPrecisionEvolutionaryOptimizerV43"] = ( - UltraRefinedPrecisionEvolutionaryOptimizerV43 - ) - LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( - method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43" - ).set_name("LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43", register=True) + lama_register["UltraRefinedPrecisionEvolutionaryOptimizerV43"] = UltraRefinedPrecisionEvolutionaryOptimizerV43 + res = NonObjectOptimizer(method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer(method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43").set_name("LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43", register=True) except Exception as e: print("UltraRefinedPrecisionEvolutionaryOptimizerV43 can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraRefinedRAMEDS import UltraRefinedRAMEDS lama_register["UltraRefinedRAMEDS"] = UltraRefinedRAMEDS - LLAMAUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS").set_name( - "LLAMAUltraRefinedRAMEDS", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS").set_name("LLAMAUltraRefinedRAMEDS", register=True) except Exception as e: print("UltraRefinedRAMEDS can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedSpiralDifferentialClimberV3 import ( - UltraRefinedSpiralDifferentialClimberV3, - ) + from nevergrad.optimization.lama.UltraRefinedSpiralDifferentialClimberV3 import UltraRefinedSpiralDifferentialClimberV3 lama_register["UltraRefinedSpiralDifferentialClimberV3"] = UltraRefinedSpiralDifferentialClimberV3 - LLAMAUltraRefinedSpiralDifferentialClimberV3 = NonObjectOptimizer( - method="LLAMAUltraRefinedSpiralDifferentialClimberV3" - ).set_name("LLAMAUltraRefinedSpiralDifferentialClimberV3", register=True) + res = NonObjectOptimizer(method="LLAMAUltraRefinedSpiralDifferentialClimberV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedSpiralDifferentialClimberV3 = NonObjectOptimizer(method="LLAMAUltraRefinedSpiralDifferentialClimberV3").set_name("LLAMAUltraRefinedSpiralDifferentialClimberV3", register=True) except Exception as e: print("UltraRefinedSpiralDifferentialClimberV3 can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraRefinedStrategicEvolutionaryOptimizerV60 import ( - UltraRefinedStrategicEvolutionaryOptimizerV60, - ) + from nevergrad.optimization.lama.UltraRefinedStrategicEvolutionaryOptimizerV60 import UltraRefinedStrategicEvolutionaryOptimizerV60 - lama_register["UltraRefinedStrategicEvolutionaryOptimizerV60"] = ( - UltraRefinedStrategicEvolutionaryOptimizerV60 - ) - LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60 = NonObjectOptimizer( - method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60" - ).set_name("LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60", register=True) + lama_register["UltraRefinedStrategicEvolutionaryOptimizerV60"] = UltraRefinedStrategicEvolutionaryOptimizerV60 + res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60 = NonObjectOptimizer(method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60").set_name("LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60", register=True) except Exception as e: print("UltraRefinedStrategicEvolutionaryOptimizerV60 can not be imported: ", e) - try: from nevergrad.optimization.lama.UltraRefinedStrategyDE import UltraRefinedStrategyDE lama_register["UltraRefinedStrategyDE"] = UltraRefinedStrategyDE - LLAMAUltraRefinedStrategyDE = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE").set_name( - "LLAMAUltraRefinedStrategyDE", register=True - ) + res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraRefinedStrategyDE = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE").set_name("LLAMAUltraRefinedStrategyDE", register=True) except Exception as e: print("UltraRefinedStrategyDE can not be imported: ", e) - try: - from nevergrad.optimization.lama.UltraSupremeEvolutionaryGradientHybridOptimizerV7 import ( - UltraSupremeEvolutionaryGradientHybridOptimizerV7, - ) + from nevergrad.optimization.lama.UltraSupremeEvolutionaryGradientHybridOptimizerV7 import UltraSupremeEvolutionaryGradientHybridOptimizerV7 - lama_register["UltraSupremeEvolutionaryGradientHybridOptimizerV7"] = ( - UltraSupremeEvolutionaryGradientHybridOptimizerV7 - ) - LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7 = NonObjectOptimizer( - method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7" - ).set_name("LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7", register=True) + lama_register["UltraSupremeEvolutionaryGradientHybridOptimizerV7"] = UltraSupremeEvolutionaryGradientHybridOptimizerV7 + res = NonObjectOptimizer(method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7").set_name("LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7", register=True) except Exception as e: print("UltraSupremeEvolutionaryGradientHybridOptimizerV7 can not be imported: ", e) - try: from nevergrad.optimization.lama.UnifiedAdaptiveMemeticOptimizer import UnifiedAdaptiveMemeticOptimizer lama_register["UnifiedAdaptiveMemeticOptimizer"] = UnifiedAdaptiveMemeticOptimizer - LLAMAUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( - method="LLAMAUnifiedAdaptiveMemeticOptimizer" - ).set_name("LLAMAUnifiedAdaptiveMemeticOptimizer", register=True) + res = NonObjectOptimizer(method="LLAMAUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAUnifiedAdaptiveMemeticOptimizer").set_name("LLAMAUnifiedAdaptiveMemeticOptimizer", register=True) except Exception as e: print("UnifiedAdaptiveMemeticOptimizer can not be imported: ", e) - try: from nevergrad.optimization.lama.VectorizedRefinedSpiralSearch import VectorizedRefinedSpiralSearch lama_register["VectorizedRefinedSpiralSearch"] = VectorizedRefinedSpiralSearch - LLAMAVectorizedRefinedSpiralSearch = NonObjectOptimizer( - method="LLAMAVectorizedRefinedSpiralSearch" - ).set_name("LLAMAVectorizedRefinedSpiralSearch", register=True) + res = NonObjectOptimizer(method="LLAMAVectorizedRefinedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + LLAMAVectorizedRefinedSpiralSearch = NonObjectOptimizer(method="LLAMAVectorizedRefinedSpiralSearch").set_name("LLAMAVectorizedRefinedSpiralSearch", register=True) except Exception as e: print("VectorizedRefinedSpiralSearch can not be imported: ", e) - try: from nevergrad.optimization.lama.eQGSA_v2 import eQGSA_v2 lama_register["eQGSA_v2"] = eQGSA_v2 + res = NonObjectOptimizer(method="LLAMAeQGSA_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value LLAMAeQGSA_v2 = NonObjectOptimizer(method="LLAMAeQGSA_v2").set_name("LLAMAeQGSA_v2", register=True) except Exception as e: print("eQGSA_v2 can not be imported: ", e) From 4e7790d45f99b01c2db890cf6c5d2a793df24b87 Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Mon, 24 Jun 2024 17:18:48 +0200 Subject: [PATCH 4/6] po --- nevergrad/optimization/recastlib.py | 53430 ++++++++++++++++---------- 1 file changed, 33935 insertions(+), 19495 deletions(-) diff --git a/nevergrad/optimization/recastlib.py b/nevergrad/optimization/recastlib.py index 5c00f9f9d..5801c7b09 100644 --- a/nevergrad/optimization/recastlib.py +++ b/nevergrad/optimization/recastlib.py @@ -1025,30107 +1025,44547 @@ def _evaluate(self, X, out, *args, **kwargs): ###### LLAMA ####### lama_register = {} -try: +try: # AADCCS from nevergrad.optimization.lama.AADCCS import AADCCS lama_register["AADCCS"] = AADCCS - res = NonObjectOptimizer(method="LLAMAAADCCS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAADCCS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAADCCS = NonObjectOptimizer(method="LLAMAAADCCS").set_name("LLAMAAADCCS", register=True) -except Exception as e: +except Exception as e: # AADCCS print("AADCCS can not be imported: ", e) -try: +try: # AADEHLS from nevergrad.optimization.lama.AADEHLS import AADEHLS lama_register["AADEHLS"] = AADEHLS - res = NonObjectOptimizer(method="LLAMAAADEHLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAADEHLS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAADEHLS = NonObjectOptimizer(method="LLAMAAADEHLS").set_name("LLAMAAADEHLS", register=True) -except Exception as e: +except Exception as e: # AADEHLS print("AADEHLS can not be imported: ", e) -try: +try: # AADMEM from nevergrad.optimization.lama.AADMEM import AADMEM lama_register["AADMEM"] = AADMEM - res = NonObjectOptimizer(method="LLAMAAADMEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAADMEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAADMEM = NonObjectOptimizer(method="LLAMAAADMEM").set_name("LLAMAAADMEM", register=True) -except Exception as e: +except Exception as e: # AADMEM print("AADMEM can not be imported: ", e) -try: +try: # AAES from nevergrad.optimization.lama.AAES import AAES lama_register["AAES"] = AAES - res = NonObjectOptimizer(method="LLAMAAAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAAES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAAES = NonObjectOptimizer(method="LLAMAAAES").set_name("LLAMAAAES", register=True) -except Exception as e: +except Exception as e: # AAES print("AAES can not be imported: ", e) -try: +try: # ACDE from nevergrad.optimization.lama.ACDE import ACDE lama_register["ACDE"] = ACDE - res = NonObjectOptimizer(method="LLAMAACDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAACDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAACDE = NonObjectOptimizer(method="LLAMAACDE").set_name("LLAMAACDE", register=True) -except Exception as e: +except Exception as e: # ACDE print("ACDE can not be imported: ", e) -try: +try: # ACMDEOBD from nevergrad.optimization.lama.ACMDEOBD import ACMDEOBD lama_register["ACMDEOBD"] = ACMDEOBD - res = NonObjectOptimizer(method="LLAMAACMDEOBD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAACMDEOBD")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAACMDEOBD = NonObjectOptimizer(method="LLAMAACMDEOBD").set_name("LLAMAACMDEOBD", register=True) -except Exception as e: +except Exception as e: # ACMDEOBD print("ACMDEOBD can not be imported: ", e) -try: +try: # ADAEDA from nevergrad.optimization.lama.ADAEDA import ADAEDA lama_register["ADAEDA"] = ADAEDA - res = NonObjectOptimizer(method="LLAMAADAEDA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADAEDA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADAEDA = NonObjectOptimizer(method="LLAMAADAEDA").set_name("LLAMAADAEDA", register=True) -except Exception as e: +except Exception as e: # ADAEDA print("ADAEDA can not be imported: ", e) -try: +try: # ADCE from nevergrad.optimization.lama.ADCE import ADCE lama_register["ADCE"] = ADCE - res = NonObjectOptimizer(method="LLAMAADCE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADCE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADCE = NonObjectOptimizer(method="LLAMAADCE").set_name("LLAMAADCE", register=True) -except Exception as e: +except Exception as e: # ADCE print("ADCE can not be imported: ", e) -try: +try: # ADEA from nevergrad.optimization.lama.ADEA import ADEA lama_register["ADEA"] = ADEA - res = NonObjectOptimizer(method="LLAMAADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEA = NonObjectOptimizer(method="LLAMAADEA").set_name("LLAMAADEA", register=True) -except Exception as e: +except Exception as e: # ADEA print("ADEA can not be imported: ", e) -try: +try: # ADEAS from nevergrad.optimization.lama.ADEAS import ADEAS lama_register["ADEAS"] = ADEAS - res = NonObjectOptimizer(method="LLAMAADEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEAS = NonObjectOptimizer(method="LLAMAADEAS").set_name("LLAMAADEAS", register=True) -except Exception as e: +except Exception as e: # ADEAS print("ADEAS can not be imported: ", e) -try: +try: # ADECMS from nevergrad.optimization.lama.ADECMS import ADECMS lama_register["ADECMS"] = ADECMS - res = NonObjectOptimizer(method="LLAMAADECMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADECMS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADECMS = NonObjectOptimizer(method="LLAMAADECMS").set_name("LLAMAADECMS", register=True) -except Exception as e: +except Exception as e: # ADECMS print("ADECMS can not be imported: ", e) -try: +try: # ADEDCA from nevergrad.optimization.lama.ADEDCA import ADEDCA lama_register["ADEDCA"] = ADEDCA - res = NonObjectOptimizer(method="LLAMAADEDCA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEDCA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEDCA = NonObjectOptimizer(method="LLAMAADEDCA").set_name("LLAMAADEDCA", register=True) -except Exception as e: +except Exception as e: # ADEDCA print("ADEDCA can not be imported: ", e) -try: +try: # ADEDE from nevergrad.optimization.lama.ADEDE import ADEDE lama_register["ADEDE"] = ADEDE - res = NonObjectOptimizer(method="LLAMAADEDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEDE = NonObjectOptimizer(method="LLAMAADEDE").set_name("LLAMAADEDE", register=True) -except Exception as e: +except Exception as e: # ADEDE print("ADEDE can not be imported: ", e) -try: +try: # ADEDLR from nevergrad.optimization.lama.ADEDLR import ADEDLR lama_register["ADEDLR"] = ADEDLR - res = NonObjectOptimizer(method="LLAMAADEDLR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEDLR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEDLR = NonObjectOptimizer(method="LLAMAADEDLR").set_name("LLAMAADEDLR", register=True) -except Exception as e: +except Exception as e: # ADEDLR print("ADEDLR can not be imported: ", e) -try: +try: # ADEDM from nevergrad.optimization.lama.ADEDM import ADEDM lama_register["ADEDM"] = ADEDM - res = NonObjectOptimizer(method="LLAMAADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEDM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEDM = NonObjectOptimizer(method="LLAMAADEDM").set_name("LLAMAADEDM", register=True) -except Exception as e: +except Exception as e: # ADEDM print("ADEDM can not be imported: ", e) -try: +try: # ADEEM from nevergrad.optimization.lama.ADEEM import ADEEM lama_register["ADEEM"] = ADEEM - res = NonObjectOptimizer(method="LLAMAADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEEM = NonObjectOptimizer(method="LLAMAADEEM").set_name("LLAMAADEEM", register=True) -except Exception as e: +except Exception as e: # ADEEM print("ADEEM can not be imported: ", e) -try: +try: # ADEGE from nevergrad.optimization.lama.ADEGE import ADEGE lama_register["ADEGE"] = ADEGE - res = NonObjectOptimizer(method="LLAMAADEGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEGE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEGE = NonObjectOptimizer(method="LLAMAADEGE").set_name("LLAMAADEGE", register=True) -except Exception as e: +except Exception as e: # ADEGE print("ADEGE can not be imported: ", e) -try: +try: # ADEGM from nevergrad.optimization.lama.ADEGM import ADEGM lama_register["ADEGM"] = ADEGM - res = NonObjectOptimizer(method="LLAMAADEGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEGM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEGM = NonObjectOptimizer(method="LLAMAADEGM").set_name("LLAMAADEGM", register=True) -except Exception as e: +except Exception as e: # ADEGM print("ADEGM can not be imported: ", e) -try: +try: # ADEGS from nevergrad.optimization.lama.ADEGS import ADEGS lama_register["ADEGS"] = ADEGS - res = NonObjectOptimizer(method="LLAMAADEGS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEGS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEGS = NonObjectOptimizer(method="LLAMAADEGS").set_name("LLAMAADEGS", register=True) -except Exception as e: +except Exception as e: # ADEGS print("ADEGS can not be imported: ", e) -try: +try: # ADEM from nevergrad.optimization.lama.ADEM import ADEM lama_register["ADEM"] = ADEM - res = NonObjectOptimizer(method="LLAMAADEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEM = NonObjectOptimizer(method="LLAMAADEM").set_name("LLAMAADEM", register=True) -except Exception as e: +except Exception as e: # ADEM print("ADEM can not be imported: ", e) -try: +try: # ADEMSC from nevergrad.optimization.lama.ADEMSC import ADEMSC lama_register["ADEMSC"] = ADEMSC - res = NonObjectOptimizer(method="LLAMAADEMSC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEMSC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEMSC = NonObjectOptimizer(method="LLAMAADEMSC").set_name("LLAMAADEMSC", register=True) -except Exception as e: +except Exception as e: # ADEMSC print("ADEMSC can not be imported: ", e) -try: +try: # ADEPF from nevergrad.optimization.lama.ADEPF import ADEPF lama_register["ADEPF"] = ADEPF - res = NonObjectOptimizer(method="LLAMAADEPF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEPF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEPF = NonObjectOptimizer(method="LLAMAADEPF").set_name("LLAMAADEPF", register=True) -except Exception as e: +except Exception as e: # ADEPF print("ADEPF can not be imported: ", e) -try: +try: # ADEPM from nevergrad.optimization.lama.ADEPM import ADEPM lama_register["ADEPM"] = ADEPM - res = NonObjectOptimizer(method="LLAMAADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEPM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEPM = NonObjectOptimizer(method="LLAMAADEPM").set_name("LLAMAADEPM", register=True) -except Exception as e: +except Exception as e: # ADEPM print("ADEPM can not be imported: ", e) -try: +try: # ADEPMC from nevergrad.optimization.lama.ADEPMC import ADEPMC lama_register["ADEPMC"] = ADEPMC - res = NonObjectOptimizer(method="LLAMAADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEPMC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEPMC = NonObjectOptimizer(method="LLAMAADEPMC").set_name("LLAMAADEPMC", register=True) -except Exception as e: +except Exception as e: # ADEPMC print("ADEPMC can not be imported: ", e) -try: +try: # ADEPMI from nevergrad.optimization.lama.ADEPMI import ADEPMI lama_register["ADEPMI"] = ADEPMI - res = NonObjectOptimizer(method="LLAMAADEPMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEPMI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEPMI = NonObjectOptimizer(method="LLAMAADEPMI").set_name("LLAMAADEPMI", register=True) -except Exception as e: +except Exception as e: # ADEPMI print("ADEPMI can not be imported: ", e) -try: +try: # ADEPR from nevergrad.optimization.lama.ADEPR import ADEPR lama_register["ADEPR"] = ADEPR - res = NonObjectOptimizer(method="LLAMAADEPR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADEPR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADEPR = NonObjectOptimizer(method="LLAMAADEPR").set_name("LLAMAADEPR", register=True) -except Exception as e: +except Exception as e: # ADEPR print("ADEPR can not be imported: ", e) -try: +try: # ADES from nevergrad.optimization.lama.ADES import ADES lama_register["ADES"] = ADES - res = NonObjectOptimizer(method="LLAMAADES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADES = NonObjectOptimizer(method="LLAMAADES").set_name("LLAMAADES", register=True) -except Exception as e: +except Exception as e: # ADES print("ADES can not be imported: ", e) -try: +try: # ADESA from nevergrad.optimization.lama.ADESA import ADESA lama_register["ADESA"] = ADESA - res = NonObjectOptimizer(method="LLAMAADESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADESA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADESA = NonObjectOptimizer(method="LLAMAADESA").set_name("LLAMAADESA", register=True) -except Exception as e: +except Exception as e: # ADESA print("ADESA can not be imported: ", e) -try: +try: # ADE_FPC from nevergrad.optimization.lama.ADE_FPC import ADE_FPC lama_register["ADE_FPC"] = ADE_FPC - res = NonObjectOptimizer(method="LLAMAADE_FPC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADE_FPC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADE_FPC = NonObjectOptimizer(method="LLAMAADE_FPC").set_name("LLAMAADE_FPC", register=True) -except Exception as e: +except Exception as e: # ADE_FPC print("ADE_FPC can not be imported: ", e) -try: +try: # ADGD from nevergrad.optimization.lama.ADGD import ADGD lama_register["ADGD"] = ADGD - res = NonObjectOptimizer(method="LLAMAADGD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADGD")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADGD = NonObjectOptimizer(method="LLAMAADGD").set_name("LLAMAADGD", register=True) -except Exception as e: +except Exception as e: # ADGD print("ADGD can not be imported: ", e) -try: +try: # ADGE from nevergrad.optimization.lama.ADGE import ADGE lama_register["ADGE"] = ADGE - res = NonObjectOptimizer(method="LLAMAADGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADGE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADGE = NonObjectOptimizer(method="LLAMAADGE").set_name("LLAMAADGE", register=True) -except Exception as e: +except Exception as e: # ADGE print("ADGE can not be imported: ", e) -try: +try: # ADMDE from nevergrad.optimization.lama.ADMDE import ADMDE lama_register["ADMDE"] = ADMDE - res = NonObjectOptimizer(method="LLAMAADMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADMDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADMDE = NonObjectOptimizer(method="LLAMAADMDE").set_name("LLAMAADMDE", register=True) -except Exception as e: +except Exception as e: # ADMDE print("ADMDE can not be imported: ", e) -try: +try: # ADMEMS from nevergrad.optimization.lama.ADMEMS import ADMEMS lama_register["ADMEMS"] = ADMEMS - res = NonObjectOptimizer(method="LLAMAADMEMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADMEMS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADMEMS = NonObjectOptimizer(method="LLAMAADMEMS").set_name("LLAMAADMEMS", register=True) -except Exception as e: +except Exception as e: # ADMEMS print("ADMEMS can not be imported: ", e) -try: +try: # ADSDiffEvo from nevergrad.optimization.lama.ADSDiffEvo import ADSDiffEvo lama_register["ADSDiffEvo"] = ADSDiffEvo - res = NonObjectOptimizer(method="LLAMAADSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADSDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADSDiffEvo = NonObjectOptimizer(method="LLAMAADSDiffEvo").set_name("LLAMAADSDiffEvo", register=True) -except Exception as e: +except Exception as e: # ADSDiffEvo print("ADSDiffEvo can not be imported: ", e) -try: +try: # ADSEA from nevergrad.optimization.lama.ADSEA import ADSEA lama_register["ADSEA"] = ADSEA - res = NonObjectOptimizer(method="LLAMAADSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADSEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADSEA = NonObjectOptimizer(method="LLAMAADSEA").set_name("LLAMAADSEA", register=True) -except Exception as e: +except Exception as e: # ADSEA print("ADSEA can not be imported: ", e) -try: +try: # ADSEAPlus from nevergrad.optimization.lama.ADSEAPlus import ADSEAPlus lama_register["ADSEAPlus"] = ADSEAPlus - res = NonObjectOptimizer(method="LLAMAADSEAPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAADSEAPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAADSEAPlus = NonObjectOptimizer(method="LLAMAADSEAPlus").set_name("LLAMAADSEAPlus", register=True) -except Exception as e: +except Exception as e: # ADSEAPlus print("ADSEAPlus can not be imported: ", e) -try: +try: # AGBES from nevergrad.optimization.lama.AGBES import AGBES lama_register["AGBES"] = AGBES - res = NonObjectOptimizer(method="LLAMAAGBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGBES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGBES = NonObjectOptimizer(method="LLAMAAGBES").set_name("LLAMAAGBES", register=True) -except Exception as e: +except Exception as e: # AGBES print("AGBES can not be imported: ", e) -try: +try: # AGCES from nevergrad.optimization.lama.AGCES import AGCES lama_register["AGCES"] = AGCES - res = NonObjectOptimizer(method="LLAMAAGCES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGCES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGCES = NonObjectOptimizer(method="LLAMAAGCES").set_name("LLAMAAGCES", register=True) -except Exception as e: +except Exception as e: # AGCES print("AGCES can not be imported: ", e) -try: +try: # AGDE from nevergrad.optimization.lama.AGDE import AGDE lama_register["AGDE"] = AGDE - res = NonObjectOptimizer(method="LLAMAAGDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGDE = NonObjectOptimizer(method="LLAMAAGDE").set_name("LLAMAAGDE", register=True) -except Exception as e: +except Exception as e: # AGDE print("AGDE can not be imported: ", e) -try: +try: # AGDELS from nevergrad.optimization.lama.AGDELS import AGDELS lama_register["AGDELS"] = AGDELS - res = NonObjectOptimizer(method="LLAMAAGDELS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGDELS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGDELS = NonObjectOptimizer(method="LLAMAAGDELS").set_name("LLAMAAGDELS", register=True) -except Exception as e: +except Exception as e: # AGDELS print("AGDELS can not be imported: ", e) -try: +try: # AGDiffEvo from nevergrad.optimization.lama.AGDiffEvo import AGDiffEvo lama_register["AGDiffEvo"] = AGDiffEvo - res = NonObjectOptimizer(method="LLAMAAGDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGDiffEvo = NonObjectOptimizer(method="LLAMAAGDiffEvo").set_name("LLAMAAGDiffEvo", register=True) -except Exception as e: +except Exception as e: # AGDiffEvo print("AGDiffEvo can not be imported: ", e) -try: +try: # AGEA from nevergrad.optimization.lama.AGEA import AGEA lama_register["AGEA"] = AGEA - res = NonObjectOptimizer(method="LLAMAAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGEA = NonObjectOptimizer(method="LLAMAAGEA").set_name("LLAMAAGEA", register=True) -except Exception as e: +except Exception as e: # AGEA print("AGEA can not be imported: ", e) -try: +try: # AGESA from nevergrad.optimization.lama.AGESA import AGESA lama_register["AGESA"] = AGESA - res = NonObjectOptimizer(method="LLAMAAGESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGESA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGESA = NonObjectOptimizer(method="LLAMAAGESA").set_name("LLAMAAGESA", register=True) -except Exception as e: +except Exception as e: # AGESA print("AGESA can not be imported: ", e) -try: +try: # AGGE from nevergrad.optimization.lama.AGGE import AGGE lama_register["AGGE"] = AGGE - res = NonObjectOptimizer(method="LLAMAAGGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGGE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGGE = NonObjectOptimizer(method="LLAMAAGGE").set_name("LLAMAAGGE", register=True) -except Exception as e: +except Exception as e: # AGGE print("AGGE can not be imported: ", e) -try: +try: # AGGES from nevergrad.optimization.lama.AGGES import AGGES lama_register["AGGES"] = AGGES - res = NonObjectOptimizer(method="LLAMAAGGES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGGES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGGES = NonObjectOptimizer(method="LLAMAAGGES").set_name("LLAMAAGGES", register=True) -except Exception as e: +except Exception as e: # AGGES print("AGGES can not be imported: ", e) -try: +try: # AGIDE from nevergrad.optimization.lama.AGIDE import AGIDE lama_register["AGIDE"] = AGIDE - res = NonObjectOptimizer(method="LLAMAAGIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAGIDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAGIDE = NonObjectOptimizer(method="LLAMAAGIDE").set_name("LLAMAAGIDE", register=True) -except Exception as e: +except Exception as e: # AGIDE print("AGIDE can not be imported: ", e) -try: +try: # AHDEMI from nevergrad.optimization.lama.AHDEMI import AHDEMI lama_register["AHDEMI"] = AHDEMI - res = NonObjectOptimizer(method="LLAMAAHDEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAHDEMI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAHDEMI = NonObjectOptimizer(method="LLAMAAHDEMI").set_name("LLAMAAHDEMI", register=True) -except Exception as e: +except Exception as e: # AHDEMI print("AHDEMI can not be imported: ", e) -try: +try: # ALDEEM from nevergrad.optimization.lama.ALDEEM import ALDEEM lama_register["ALDEEM"] = ALDEEM - res = NonObjectOptimizer(method="LLAMAALDEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAALDEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAALDEEM = NonObjectOptimizer(method="LLAMAALDEEM").set_name("LLAMAALDEEM", register=True) -except Exception as e: +except Exception as e: # ALDEEM print("ALDEEM can not be imported: ", e) -try: +try: # ALES from nevergrad.optimization.lama.ALES import ALES lama_register["ALES"] = ALES - res = NonObjectOptimizer(method="LLAMAALES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAALES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAALES = NonObjectOptimizer(method="LLAMAALES").set_name("LLAMAALES", register=True) -except Exception as e: +except Exception as e: # ALES print("ALES can not be imported: ", e) -try: +try: # ALSS from nevergrad.optimization.lama.ALSS import ALSS lama_register["ALSS"] = ALSS - res = NonObjectOptimizer(method="LLAMAALSS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAALSS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAALSS = NonObjectOptimizer(method="LLAMAALSS").set_name("LLAMAALSS", register=True) -except Exception as e: +except Exception as e: # ALSS print("ALSS can not be imported: ", e) -try: +try: # AMDE from nevergrad.optimization.lama.AMDE import AMDE lama_register["AMDE"] = AMDE - res = NonObjectOptimizer(method="LLAMAAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAMDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAMDE = NonObjectOptimizer(method="LLAMAAMDE").set_name("LLAMAAMDE", register=True) -except Exception as e: +except Exception as e: # AMDE print("AMDE can not be imported: ", e) -try: +try: # AMES from nevergrad.optimization.lama.AMES import AMES lama_register["AMES"] = AMES - res = NonObjectOptimizer(method="LLAMAAMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAMES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAMES = NonObjectOptimizer(method="LLAMAAMES").set_name("LLAMAAMES", register=True) -except Exception as e: +except Exception as e: # AMES print("AMES can not be imported: ", e) -try: +try: # AMSDiffEvo from nevergrad.optimization.lama.AMSDiffEvo import AMSDiffEvo lama_register["AMSDiffEvo"] = AMSDiffEvo - res = NonObjectOptimizer(method="LLAMAAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAMSDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAMSDiffEvo = NonObjectOptimizer(method="LLAMAAMSDiffEvo").set_name("LLAMAAMSDiffEvo", register=True) -except Exception as e: +except Exception as e: # AMSDiffEvo print("AMSDiffEvo can not be imported: ", e) -try: +try: # AMSEA from nevergrad.optimization.lama.AMSEA import AMSEA lama_register["AMSEA"] = AMSEA - res = NonObjectOptimizer(method="LLAMAAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAMSEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAMSEA = NonObjectOptimizer(method="LLAMAAMSEA").set_name("LLAMAAMSEA", register=True) -except Exception as e: +except Exception as e: # AMSEA print("AMSEA can not be imported: ", e) -try: +try: # AN_MDEPSO from nevergrad.optimization.lama.AN_MDEPSO import AN_MDEPSO lama_register["AN_MDEPSO"] = AN_MDEPSO - res = NonObjectOptimizer(method="LLAMAAN_MDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAN_MDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAN_MDEPSO = NonObjectOptimizer(method="LLAMAAN_MDEPSO").set_name("LLAMAAN_MDEPSO", register=True) -except Exception as e: +except Exception as e: # AN_MDEPSO print("AN_MDEPSO can not be imported: ", e) -try: +try: # APBES from nevergrad.optimization.lama.APBES import APBES lama_register["APBES"] = APBES - res = NonObjectOptimizer(method="LLAMAAPBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAPBES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAPBES = NonObjectOptimizer(method="LLAMAAPBES").set_name("LLAMAAPBES", register=True) -except Exception as e: +except Exception as e: # APBES print("APBES can not be imported: ", e) -try: +try: # APDE from nevergrad.optimization.lama.APDE import APDE lama_register["APDE"] = APDE - res = NonObjectOptimizer(method="LLAMAAPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAPDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAPDE = NonObjectOptimizer(method="LLAMAAPDE").set_name("LLAMAAPDE", register=True) -except Exception as e: +except Exception as e: # APDE print("APDE can not be imported: ", e) -try: +try: # APDETL from nevergrad.optimization.lama.APDETL import APDETL lama_register["APDETL"] = APDETL - res = NonObjectOptimizer(method="LLAMAAPDETL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAPDETL")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAPDETL = NonObjectOptimizer(method="LLAMAAPDETL").set_name("LLAMAAPDETL", register=True) -except Exception as e: +except Exception as e: # APDETL print("APDETL can not be imported: ", e) -try: +try: # APES from nevergrad.optimization.lama.APES import APES lama_register["APES"] = APES - res = NonObjectOptimizer(method="LLAMAAPES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAPES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAPES = NonObjectOptimizer(method="LLAMAAPES").set_name("LLAMAAPES", register=True) -except Exception as e: +except Exception as e: # APES print("APES can not be imported: ", e) -try: +try: # AQAPSO_LS_DIW from nevergrad.optimization.lama.AQAPSO_LS_DIW import AQAPSO_LS_DIW lama_register["AQAPSO_LS_DIW"] = AQAPSO_LS_DIW - res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAQAPSO_LS_DIW = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW").set_name("LLAMAAQAPSO_LS_DIW", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAQAPSO_LS_DIW = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW").set_name( + "LLAMAAQAPSO_LS_DIW", register=True + ) +except Exception as e: # AQAPSO_LS_DIW print("AQAPSO_LS_DIW can not be imported: ", e) -try: +try: # AQAPSO_LS_DIW_AP from nevergrad.optimization.lama.AQAPSO_LS_DIW_AP import AQAPSO_LS_DIW_AP lama_register["AQAPSO_LS_DIW_AP"] = AQAPSO_LS_DIW_AP - res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP").set_name("LLAMAAQAPSO_LS_DIW_AP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAAQAPSO_LS_DIW_AP").set_name( + "LLAMAAQAPSO_LS_DIW_AP", register=True + ) +except Exception as e: # AQAPSO_LS_DIW_AP print("AQAPSO_LS_DIW_AP can not be imported: ", e) -try: +try: # ARDLS from nevergrad.optimization.lama.ARDLS import ARDLS lama_register["ARDLS"] = ARDLS - res = NonObjectOptimizer(method="LLAMAARDLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAARDLS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAARDLS = NonObjectOptimizer(method="LLAMAARDLS").set_name("LLAMAARDLS", register=True) -except Exception as e: +except Exception as e: # ARDLS print("ARDLS can not be imported: ", e) -try: +try: # ARESM from nevergrad.optimization.lama.ARESM import ARESM lama_register["ARESM"] = ARESM - res = NonObjectOptimizer(method="LLAMAARESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAARESM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAARESM = NonObjectOptimizer(method="LLAMAARESM").set_name("LLAMAARESM", register=True) -except Exception as e: +except Exception as e: # ARESM print("ARESM can not be imported: ", e) -try: +try: # ARISA from nevergrad.optimization.lama.ARISA import ARISA lama_register["ARISA"] = ARISA - res = NonObjectOptimizer(method="LLAMAARISA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAARISA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAARISA = NonObjectOptimizer(method="LLAMAARISA").set_name("LLAMAARISA", register=True) -except Exception as e: +except Exception as e: # ARISA print("ARISA can not be imported: ", e) -try: +try: # ASADEA from nevergrad.optimization.lama.ASADEA import ASADEA lama_register["ASADEA"] = ASADEA - res = NonObjectOptimizer(method="LLAMAASADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAASADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAASADEA = NonObjectOptimizer(method="LLAMAASADEA").set_name("LLAMAASADEA", register=True) -except Exception as e: +except Exception as e: # ASADEA print("ASADEA can not be imported: ", e) -try: +try: # ASO from nevergrad.optimization.lama.ASO import ASO lama_register["ASO"] = ASO - res = NonObjectOptimizer(method="LLAMAASO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAASO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAASO = NonObjectOptimizer(method="LLAMAASO").set_name("LLAMAASO", register=True) -except Exception as e: +except Exception as e: # ASO print("ASO can not be imported: ", e) -try: +try: # AVDE from nevergrad.optimization.lama.AVDE import AVDE lama_register["AVDE"] = AVDE - res = NonObjectOptimizer(method="LLAMAAVDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAAVDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAAVDE = NonObjectOptimizer(method="LLAMAAVDE").set_name("LLAMAAVDE", register=True) -except Exception as e: +except Exception as e: # AVDE print("AVDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AcceleratedAdaptivePrecisionCrossoverEvolution import AcceleratedAdaptivePrecisionCrossoverEvolution - - lama_register["AcceleratedAdaptivePrecisionCrossoverEvolution"] = AcceleratedAdaptivePrecisionCrossoverEvolution - res = NonObjectOptimizer(method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer(method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution").set_name("LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution", register=True) -except Exception as e: +try: # AcceleratedAdaptivePrecisionCrossoverEvolution + from nevergrad.optimization.lama.AcceleratedAdaptivePrecisionCrossoverEvolution import ( + AcceleratedAdaptivePrecisionCrossoverEvolution, + ) + + lama_register["AcceleratedAdaptivePrecisionCrossoverEvolution"] = ( + AcceleratedAdaptivePrecisionCrossoverEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution" + ).set_name("LLAMAAcceleratedAdaptivePrecisionCrossoverEvolution", register=True) +except Exception as e: # AcceleratedAdaptivePrecisionCrossoverEvolution print("AcceleratedAdaptivePrecisionCrossoverEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveAnnealingDifferentialEvolution import AdaptiveAnnealingDifferentialEvolution +try: # AdaptiveAnnealingDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveAnnealingDifferentialEvolution import ( + AdaptiveAnnealingDifferentialEvolution, + ) lama_register["AdaptiveAnnealingDifferentialEvolution"] = AdaptiveAnnealingDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveAnnealingDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveAnnealingDifferentialEvolution").set_name("LLAMAAdaptiveAnnealingDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveAnnealingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveAnnealingDifferentialEvolution" + ).set_name("LLAMAAdaptiveAnnealingDifferentialEvolution", register=True) +except Exception as e: # AdaptiveAnnealingDifferentialEvolution print("AdaptiveAnnealingDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveArchiveDE from nevergrad.optimization.lama.AdaptiveArchiveDE import AdaptiveArchiveDE lama_register["AdaptiveArchiveDE"] = AdaptiveArchiveDE - res = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveArchiveDE = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE").set_name("LLAMAAdaptiveArchiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveArchiveDE = NonObjectOptimizer(method="LLAMAAdaptiveArchiveDE").set_name( + "LLAMAAdaptiveArchiveDE", register=True + ) +except Exception as e: # AdaptiveArchiveDE print("AdaptiveArchiveDE can not be imported: ", e) -try: +try: # AdaptiveCMADiffEvoPSO from nevergrad.optimization.lama.AdaptiveCMADiffEvoPSO import AdaptiveCMADiffEvoPSO lama_register["AdaptiveCMADiffEvoPSO"] = AdaptiveCMADiffEvoPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCMADiffEvoPSO = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO").set_name("LLAMAAdaptiveCMADiffEvoPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCMADiffEvoPSO = NonObjectOptimizer(method="LLAMAAdaptiveCMADiffEvoPSO").set_name( + "LLAMAAdaptiveCMADiffEvoPSO", register=True + ) +except Exception as e: # AdaptiveCMADiffEvoPSO print("AdaptiveCMADiffEvoPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveChaoticFireworksOptimization import AdaptiveChaoticFireworksOptimization +try: # AdaptiveChaoticFireworksOptimization + from nevergrad.optimization.lama.AdaptiveChaoticFireworksOptimization import ( + AdaptiveChaoticFireworksOptimization, + ) lama_register["AdaptiveChaoticFireworksOptimization"] = AdaptiveChaoticFireworksOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveChaoticFireworksOptimization = NonObjectOptimizer(method="LLAMAAdaptiveChaoticFireworksOptimization").set_name("LLAMAAdaptiveChaoticFireworksOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveChaoticFireworksOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveChaoticFireworksOptimization" + ).set_name("LLAMAAdaptiveChaoticFireworksOptimization", register=True) +except Exception as e: # AdaptiveChaoticFireworksOptimization print("AdaptiveChaoticFireworksOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveClusterBasedHybridOptimization import AdaptiveClusterBasedHybridOptimization +try: # AdaptiveClusterBasedHybridOptimization + from nevergrad.optimization.lama.AdaptiveClusterBasedHybridOptimization import ( + AdaptiveClusterBasedHybridOptimization, + ) lama_register["AdaptiveClusterBasedHybridOptimization"] = AdaptiveClusterBasedHybridOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveClusterBasedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveClusterBasedHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveClusterBasedHybridOptimization").set_name("LLAMAAdaptiveClusterBasedHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveClusterBasedHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveClusterBasedHybridOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveClusterBasedHybridOptimization" + ).set_name("LLAMAAdaptiveClusterBasedHybridOptimization", register=True) +except Exception as e: # AdaptiveClusterBasedHybridOptimization print("AdaptiveClusterBasedHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveClusterHybridOptimizationV5 import AdaptiveClusterHybridOptimizationV5 +try: # AdaptiveClusterHybridOptimizationV5 + from nevergrad.optimization.lama.AdaptiveClusterHybridOptimizationV5 import ( + AdaptiveClusterHybridOptimizationV5, + ) lama_register["AdaptiveClusterHybridOptimizationV5"] = AdaptiveClusterHybridOptimizationV5 - res = NonObjectOptimizer(method="LLAMAAdaptiveClusterHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveClusterHybridOptimizationV5 = NonObjectOptimizer(method="LLAMAAdaptiveClusterHybridOptimizationV5").set_name("LLAMAAdaptiveClusterHybridOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveClusterHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveClusterHybridOptimizationV5 = NonObjectOptimizer( + method="LLAMAAdaptiveClusterHybridOptimizationV5" + ).set_name("LLAMAAdaptiveClusterHybridOptimizationV5", register=True) +except Exception as e: # AdaptiveClusterHybridOptimizationV5 print("AdaptiveClusterHybridOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveClusteredDifferentialEvolutionV2 import AdaptiveClusteredDifferentialEvolutionV2 +try: # AdaptiveClusteredDifferentialEvolutionV2 + from nevergrad.optimization.lama.AdaptiveClusteredDifferentialEvolutionV2 import ( + AdaptiveClusteredDifferentialEvolutionV2, + ) lama_register["AdaptiveClusteredDifferentialEvolutionV2"] = AdaptiveClusteredDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveClusteredDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveClusteredDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveClusteredDifferentialEvolutionV2").set_name("LLAMAAdaptiveClusteredDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveClusteredDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveClusteredDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveClusteredDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveClusteredDifferentialEvolutionV2", register=True) +except Exception as e: # AdaptiveClusteredDifferentialEvolutionV2 print("AdaptiveClusteredDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCohortHarmonizationOptimization import AdaptiveCohortHarmonizationOptimization +try: # AdaptiveCohortHarmonizationOptimization + from nevergrad.optimization.lama.AdaptiveCohortHarmonizationOptimization import ( + AdaptiveCohortHarmonizationOptimization, + ) lama_register["AdaptiveCohortHarmonizationOptimization"] = AdaptiveCohortHarmonizationOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveCohortHarmonizationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCohortHarmonizationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveCohortHarmonizationOptimization").set_name("LLAMAAdaptiveCohortHarmonizationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCohortHarmonizationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCohortHarmonizationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveCohortHarmonizationOptimization" + ).set_name("LLAMAAdaptiveCohortHarmonizationOptimization", register=True) +except Exception as e: # AdaptiveCohortHarmonizationOptimization print("AdaptiveCohortHarmonizationOptimization can not be imported: ", e) -try: +try: # AdaptiveCohortMemeticAlgorithm from nevergrad.optimization.lama.AdaptiveCohortMemeticAlgorithm import AdaptiveCohortMemeticAlgorithm lama_register["AdaptiveCohortMemeticAlgorithm"] = AdaptiveCohortMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCohortMemeticAlgorithm").set_name("LLAMAAdaptiveCohortMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCohortMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCohortMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveCohortMemeticAlgorithm print("AdaptiveCohortMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveControlledMemoryAnnealing import AdaptiveControlledMemoryAnnealing +try: # AdaptiveControlledMemoryAnnealing + from nevergrad.optimization.lama.AdaptiveControlledMemoryAnnealing import ( + AdaptiveControlledMemoryAnnealing, + ) lama_register["AdaptiveControlledMemoryAnnealing"] = AdaptiveControlledMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveControlledMemoryAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveControlledMemoryAnnealing").set_name("LLAMAAdaptiveControlledMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveControlledMemoryAnnealing" + ).set_name("LLAMAAdaptiveControlledMemoryAnnealing", register=True) +except Exception as e: # AdaptiveControlledMemoryAnnealing print("AdaptiveControlledMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialEvolution import AdaptiveCooperativeDifferentialEvolution +try: # AdaptiveCooperativeDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialEvolution import ( + AdaptiveCooperativeDifferentialEvolution, + ) lama_register["AdaptiveCooperativeDifferentialEvolution"] = AdaptiveCooperativeDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCooperativeDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialEvolution").set_name("LLAMAAdaptiveCooperativeDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCooperativeDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCooperativeDifferentialEvolution" + ).set_name("LLAMAAdaptiveCooperativeDifferentialEvolution", register=True) +except Exception as e: # AdaptiveCooperativeDifferentialEvolution print("AdaptiveCooperativeDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialMemeticAlgorithm import AdaptiveCooperativeDifferentialMemeticAlgorithm - - lama_register["AdaptiveCooperativeDifferentialMemeticAlgorithm"] = AdaptiveCooperativeDifferentialMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm").set_name("LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm", register=True) -except Exception as e: +try: # AdaptiveCooperativeDifferentialMemeticAlgorithm + from nevergrad.optimization.lama.AdaptiveCooperativeDifferentialMemeticAlgorithm import ( + AdaptiveCooperativeDifferentialMemeticAlgorithm, + ) + + lama_register["AdaptiveCooperativeDifferentialMemeticAlgorithm"] = ( + AdaptiveCooperativeDifferentialMemeticAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCooperativeDifferentialMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveCooperativeDifferentialMemeticAlgorithm print("AdaptiveCooperativeDifferentialMemeticAlgorithm can not be imported: ", e) -try: +try: # AdaptiveCovarianceGradientSearch from nevergrad.optimization.lama.AdaptiveCovarianceGradientSearch import AdaptiveCovarianceGradientSearch lama_register["AdaptiveCovarianceGradientSearch"] = AdaptiveCovarianceGradientSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceGradientSearch").set_name("LLAMAAdaptiveCovarianceGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceGradientSearch" + ).set_name("LLAMAAdaptiveCovarianceGradientSearch", register=True) +except Exception as e: # AdaptiveCovarianceGradientSearch print("AdaptiveCovarianceGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolution import AdaptiveCovarianceMatrixDifferentialEvolution - - lama_register["AdaptiveCovarianceMatrixDifferentialEvolution"] = AdaptiveCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolution import ( + AdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["AdaptiveCovarianceMatrixDifferentialEvolution"] = ( + AdaptiveCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # AdaptiveCovarianceMatrixDifferentialEvolution print("AdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching import AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching - - lama_register["AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching"] = AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching").set_name("LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching", register=True) -except Exception as e: - print("AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolution import AdaptiveCovarianceMatrixEvolution +try: # AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching import ( + AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching, + ) + + lama_register["AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching"] = ( + AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching" + ).set_name( + "LLAMAAdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching", register=True + ) +except Exception as e: # AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching + print( + "AdaptiveCovarianceMatrixDifferentialEvolutionWithDynamicStrategySwitching can not be imported: ", e + ) +try: # AdaptiveCovarianceMatrixEvolution + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolution import ( + AdaptiveCovarianceMatrixEvolution, + ) lama_register["AdaptiveCovarianceMatrixEvolution"] = AdaptiveCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolution").set_name("LLAMAAdaptiveCovarianceMatrixEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: # AdaptiveCovarianceMatrixEvolution print("AdaptiveCovarianceMatrixEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionStrategy import AdaptiveCovarianceMatrixEvolutionStrategy +try: # AdaptiveCovarianceMatrixEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionStrategy import ( + AdaptiveCovarianceMatrixEvolutionStrategy, + ) lama_register["AdaptiveCovarianceMatrixEvolutionStrategy"] = AdaptiveCovarianceMatrixEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy").set_name("LLAMAAdaptiveCovarianceMatrixEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolutionStrategy" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionStrategy", register=True) +except Exception as e: # AdaptiveCovarianceMatrixEvolutionStrategy print("AdaptiveCovarianceMatrixEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation import AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation - - lama_register["AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation"] = AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation").set_name("LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation", register=True) -except Exception as e: +try: # AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation import ( + AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation, + ) + + lama_register["AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation"] = ( + AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation" + ).set_name("LLAMAAdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation", register=True) +except Exception as e: # AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation print("AdaptiveCovarianceMatrixEvolutionWithSelfAdaptiveMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptation import AdaptiveCovarianceMatrixSelfAdaptation +try: # AdaptiveCovarianceMatrixSelfAdaptation + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptation import ( + AdaptiveCovarianceMatrixSelfAdaptation, + ) lama_register["AdaptiveCovarianceMatrixSelfAdaptation"] = AdaptiveCovarianceMatrixSelfAdaptation - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixSelfAdaptation = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation").set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixSelfAdaptation = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixSelfAdaptation" + ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptation", register=True) +except Exception as e: # AdaptiveCovarianceMatrixSelfAdaptation print("AdaptiveCovarianceMatrixSelfAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptationV2 import AdaptiveCovarianceMatrixSelfAdaptationV2 +try: # AdaptiveCovarianceMatrixSelfAdaptationV2 + from nevergrad.optimization.lama.AdaptiveCovarianceMatrixSelfAdaptationV2 import ( + AdaptiveCovarianceMatrixSelfAdaptationV2, + ) lama_register["AdaptiveCovarianceMatrixSelfAdaptationV2"] = AdaptiveCovarianceMatrixSelfAdaptationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2 = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2").set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2" + ).set_name("LLAMAAdaptiveCovarianceMatrixSelfAdaptationV2", register=True) +except Exception as e: # AdaptiveCovarianceMatrixSelfAdaptationV2 print("AdaptiveCovarianceMatrixSelfAdaptationV2 can not be imported: ", e) -try: +try: # AdaptiveCrossoverDEPSO from nevergrad.optimization.lama.AdaptiveCrossoverDEPSO import AdaptiveCrossoverDEPSO lama_register["AdaptiveCrossoverDEPSO"] = AdaptiveCrossoverDEPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCrossoverDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO").set_name("LLAMAAdaptiveCrossoverDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCrossoverDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverDEPSO").set_name( + "LLAMAAdaptiveCrossoverDEPSO", register=True + ) +except Exception as e: # AdaptiveCrossoverDEPSO print("AdaptiveCrossoverDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCrossoverElitistStrategyV6 import AdaptiveCrossoverElitistStrategyV6 +try: # AdaptiveCrossoverElitistStrategyV6 + from nevergrad.optimization.lama.AdaptiveCrossoverElitistStrategyV6 import ( + AdaptiveCrossoverElitistStrategyV6, + ) lama_register["AdaptiveCrossoverElitistStrategyV6"] = AdaptiveCrossoverElitistStrategyV6 - res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverElitistStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCrossoverElitistStrategyV6 = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverElitistStrategyV6").set_name("LLAMAAdaptiveCrossoverElitistStrategyV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverElitistStrategyV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCrossoverElitistStrategyV6 = NonObjectOptimizer( + method="LLAMAAdaptiveCrossoverElitistStrategyV6" + ).set_name("LLAMAAdaptiveCrossoverElitistStrategyV6", register=True) +except Exception as e: # AdaptiveCrossoverElitistStrategyV6 print("AdaptiveCrossoverElitistStrategyV6 can not be imported: ", e) -try: +try: # AdaptiveCrossoverSearch from nevergrad.optimization.lama.AdaptiveCrossoverSearch import AdaptiveCrossoverSearch lama_register["AdaptiveCrossoverSearch"] = AdaptiveCrossoverSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCrossoverSearch = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch").set_name("LLAMAAdaptiveCrossoverSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCrossoverSearch = NonObjectOptimizer(method="LLAMAAdaptiveCrossoverSearch").set_name( + "LLAMAAdaptiveCrossoverSearch", register=True + ) +except Exception as e: # AdaptiveCrossoverSearch print("AdaptiveCrossoverSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalCooperativeSearch import AdaptiveCulturalCooperativeSearch +try: # AdaptiveCulturalCooperativeSearch + from nevergrad.optimization.lama.AdaptiveCulturalCooperativeSearch import ( + AdaptiveCulturalCooperativeSearch, + ) lama_register["AdaptiveCulturalCooperativeSearch"] = AdaptiveCulturalCooperativeSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalCooperativeSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalCooperativeSearch = NonObjectOptimizer(method="LLAMAAdaptiveCulturalCooperativeSearch").set_name("LLAMAAdaptiveCulturalCooperativeSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalCooperativeSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalCooperativeSearch = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalCooperativeSearch" + ).set_name("LLAMAAdaptiveCulturalCooperativeSearch", register=True) +except Exception as e: # AdaptiveCulturalCooperativeSearch print("AdaptiveCulturalCooperativeSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalDifferentialEvolution import AdaptiveCulturalDifferentialEvolution +try: # AdaptiveCulturalDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialEvolution import ( + AdaptiveCulturalDifferentialEvolution, + ) lama_register["AdaptiveCulturalDifferentialEvolution"] = AdaptiveCulturalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialEvolution").set_name("LLAMAAdaptiveCulturalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalDifferentialEvolution" + ).set_name("LLAMAAdaptiveCulturalDifferentialEvolution", register=True) +except Exception as e: # AdaptiveCulturalDifferentialEvolution print("AdaptiveCulturalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalDifferentialMemeticEvolution import AdaptiveCulturalDifferentialMemeticEvolution - - lama_register["AdaptiveCulturalDifferentialMemeticEvolution"] = AdaptiveCulturalDifferentialMemeticEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalDifferentialMemeticEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution").set_name("LLAMAAdaptiveCulturalDifferentialMemeticEvolution", register=True) -except Exception as e: +try: # AdaptiveCulturalDifferentialMemeticEvolution + from nevergrad.optimization.lama.AdaptiveCulturalDifferentialMemeticEvolution import ( + AdaptiveCulturalDifferentialMemeticEvolution, + ) + + lama_register["AdaptiveCulturalDifferentialMemeticEvolution"] = ( + AdaptiveCulturalDifferentialMemeticEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalDifferentialMemeticEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalDifferentialMemeticEvolution" + ).set_name("LLAMAAdaptiveCulturalDifferentialMemeticEvolution", register=True) +except Exception as e: # AdaptiveCulturalDifferentialMemeticEvolution print("AdaptiveCulturalDifferentialMemeticEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalEvolutionStrategy import AdaptiveCulturalEvolutionStrategy +try: # AdaptiveCulturalEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionStrategy import ( + AdaptiveCulturalEvolutionStrategy, + ) lama_register["AdaptiveCulturalEvolutionStrategy"] = AdaptiveCulturalEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionStrategy").set_name("LLAMAAdaptiveCulturalEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalEvolutionStrategy" + ).set_name("LLAMAAdaptiveCulturalEvolutionStrategy", register=True) +except Exception as e: # AdaptiveCulturalEvolutionStrategy print("AdaptiveCulturalEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalEvolutionaryAlgorithm import AdaptiveCulturalEvolutionaryAlgorithm +try: # AdaptiveCulturalEvolutionaryAlgorithm + from nevergrad.optimization.lama.AdaptiveCulturalEvolutionaryAlgorithm import ( + AdaptiveCulturalEvolutionaryAlgorithm, + ) lama_register["AdaptiveCulturalEvolutionaryAlgorithm"] = AdaptiveCulturalEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm").set_name("LLAMAAdaptiveCulturalEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveCulturalEvolutionaryAlgorithm", register=True) +except Exception as e: # AdaptiveCulturalEvolutionaryAlgorithm print("AdaptiveCulturalEvolutionaryAlgorithm can not be imported: ", e) -try: +try: # AdaptiveCulturalMemeticAlgorithm from nevergrad.optimization.lama.AdaptiveCulturalMemeticAlgorithm import AdaptiveCulturalMemeticAlgorithm lama_register["AdaptiveCulturalMemeticAlgorithm"] = AdaptiveCulturalMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticAlgorithm").set_name("LLAMAAdaptiveCulturalMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalMemeticAlgorithm" + ).set_name("LLAMAAdaptiveCulturalMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveCulturalMemeticAlgorithm print("AdaptiveCulturalMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveCulturalMemeticDifferentialEvolution import AdaptiveCulturalMemeticDifferentialEvolution - - lama_register["AdaptiveCulturalMemeticDifferentialEvolution"] = AdaptiveCulturalMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveCulturalMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution").set_name("LLAMAAdaptiveCulturalMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveCulturalMemeticDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveCulturalMemeticDifferentialEvolution import ( + AdaptiveCulturalMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveCulturalMemeticDifferentialEvolution"] = ( + AdaptiveCulturalMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveCulturalMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveCulturalMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveCulturalMemeticDifferentialEvolution", register=True) +except Exception as e: # AdaptiveCulturalMemeticDifferentialEvolution print("AdaptiveCulturalMemeticDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveDEPSOOptimizer from nevergrad.optimization.lama.AdaptiveDEPSOOptimizer import AdaptiveDEPSOOptimizer lama_register["AdaptiveDEPSOOptimizer"] = AdaptiveDEPSOOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer").set_name("LLAMAAdaptiveDEPSOOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDEPSOOptimizer").set_name( + "LLAMAAdaptiveDEPSOOptimizer", register=True + ) +except Exception as e: # AdaptiveDEPSOOptimizer print("AdaptiveDEPSOOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDEWithElitismAndLocalSearch import AdaptiveDEWithElitismAndLocalSearch +try: # AdaptiveDEWithElitismAndLocalSearch + from nevergrad.optimization.lama.AdaptiveDEWithElitismAndLocalSearch import ( + AdaptiveDEWithElitismAndLocalSearch, + ) lama_register["AdaptiveDEWithElitismAndLocalSearch"] = AdaptiveDEWithElitismAndLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDEWithElitismAndLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDEWithElitismAndLocalSearch").set_name("LLAMAAdaptiveDEWithElitismAndLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDEWithElitismAndLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDEWithElitismAndLocalSearch" + ).set_name("LLAMAAdaptiveDEWithElitismAndLocalSearch", register=True) +except Exception as e: # AdaptiveDEWithElitismAndLocalSearch print("AdaptiveDEWithElitismAndLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDEWithOrthogonalCrossover import AdaptiveDEWithOrthogonalCrossover +try: # AdaptiveDEWithOrthogonalCrossover + from nevergrad.optimization.lama.AdaptiveDEWithOrthogonalCrossover import ( + AdaptiveDEWithOrthogonalCrossover, + ) lama_register["AdaptiveDEWithOrthogonalCrossover"] = AdaptiveDEWithOrthogonalCrossover - res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithOrthogonalCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDEWithOrthogonalCrossover = NonObjectOptimizer(method="LLAMAAdaptiveDEWithOrthogonalCrossover").set_name("LLAMAAdaptiveDEWithOrthogonalCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDEWithOrthogonalCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDEWithOrthogonalCrossover = NonObjectOptimizer( + method="LLAMAAdaptiveDEWithOrthogonalCrossover" + ).set_name("LLAMAAdaptiveDEWithOrthogonalCrossover", register=True) +except Exception as e: # AdaptiveDEWithOrthogonalCrossover print("AdaptiveDEWithOrthogonalCrossover can not be imported: ", e) -try: +try: # AdaptiveDecayOptimizer from nevergrad.optimization.lama.AdaptiveDecayOptimizer import AdaptiveDecayOptimizer lama_register["AdaptiveDecayOptimizer"] = AdaptiveDecayOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer").set_name("LLAMAAdaptiveDecayOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDecayOptimizer").set_name( + "LLAMAAdaptiveDecayOptimizer", register=True + ) +except Exception as e: # AdaptiveDecayOptimizer print("AdaptiveDecayOptimizer can not be imported: ", e) -try: +try: # AdaptiveDifferentialCrossover from nevergrad.optimization.lama.AdaptiveDifferentialCrossover import AdaptiveDifferentialCrossover lama_register["AdaptiveDifferentialCrossover"] = AdaptiveDifferentialCrossover - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialCrossover = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialCrossover").set_name("LLAMAAdaptiveDifferentialCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialCrossover = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialCrossover" + ).set_name("LLAMAAdaptiveDifferentialCrossover", register=True) +except Exception as e: # AdaptiveDifferentialCrossover print("AdaptiveDifferentialCrossover can not be imported: ", e) -try: +try: # AdaptiveDifferentialEvolution from nevergrad.optimization.lama.AdaptiveDifferentialEvolution import AdaptiveDifferentialEvolution lama_register["AdaptiveDifferentialEvolution"] = AdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolution").set_name("LLAMAAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolution" + ).set_name("LLAMAAdaptiveDifferentialEvolution", register=True) +except Exception as e: # AdaptiveDifferentialEvolution print("AdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionHarmonySearch import AdaptiveDifferentialEvolutionHarmonySearch +try: # AdaptiveDifferentialEvolutionHarmonySearch + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionHarmonySearch import ( + AdaptiveDifferentialEvolutionHarmonySearch, + ) lama_register["AdaptiveDifferentialEvolutionHarmonySearch"] = AdaptiveDifferentialEvolutionHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch").set_name("LLAMAAdaptiveDifferentialEvolutionHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionHarmonySearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionHarmonySearch", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionHarmonySearch print("AdaptiveDifferentialEvolutionHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionOptimizer import AdaptiveDifferentialEvolutionOptimizer +try: # AdaptiveDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionOptimizer import ( + AdaptiveDifferentialEvolutionOptimizer, + ) lama_register["AdaptiveDifferentialEvolutionOptimizer"] = AdaptiveDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveDifferentialEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveDifferentialEvolutionOptimizer", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionOptimizer print("AdaptiveDifferentialEvolutionOptimizer can not be imported: ", e) -try: +try: # AdaptiveDifferentialEvolutionPSO from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPSO import AdaptiveDifferentialEvolutionPSO lama_register["AdaptiveDifferentialEvolutionPSO"] = AdaptiveDifferentialEvolutionPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPSO").set_name("LLAMAAdaptiveDifferentialEvolutionPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionPSO" + ).set_name("LLAMAAdaptiveDifferentialEvolutionPSO", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionPSO print("AdaptiveDifferentialEvolutionPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPlus import AdaptiveDifferentialEvolutionPlus +try: # AdaptiveDifferentialEvolutionPlus + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionPlus import ( + AdaptiveDifferentialEvolutionPlus, + ) lama_register["AdaptiveDifferentialEvolutionPlus"] = AdaptiveDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPlus").set_name("LLAMAAdaptiveDifferentialEvolutionPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveDifferentialEvolutionPlus", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionPlus print("AdaptiveDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithAdaptivePerturbation import AdaptiveDifferentialEvolutionWithAdaptivePerturbation - - lama_register["AdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = AdaptiveDifferentialEvolutionWithAdaptivePerturbation - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation").set_name("LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithAdaptivePerturbation + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( + AdaptiveDifferentialEvolutionWithAdaptivePerturbation, + ) + + lama_register["AdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( + AdaptiveDifferentialEvolutionWithAdaptivePerturbation + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithAdaptivePerturbation print("AdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithBayesianLocalSearch import AdaptiveDifferentialEvolutionWithBayesianLocalSearch - - lama_register["AdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = AdaptiveDifferentialEvolutionWithBayesianLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithBayesianLocalSearch + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( + AdaptiveDifferentialEvolutionWithBayesianLocalSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( + AdaptiveDifferentialEvolutionWithBayesianLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithBayesianLocalSearch print("AdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation import AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation - - lama_register["AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation"] = AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation").set_name("LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation import ( + AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation, + ) + + lama_register["AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation"] = ( + AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation print("AdaptiveDifferentialEvolutionWithCovarianceMatrixAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithDynamicPopulationV2 import AdaptiveDifferentialEvolutionWithDynamicPopulationV2 - - lama_register["AdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = AdaptiveDifferentialEvolutionWithDynamicPopulationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2").set_name("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithDynamicPopulationV2 + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( + AdaptiveDifferentialEvolutionWithDynamicPopulationV2, + ) + + lama_register["AdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( + AdaptiveDifferentialEvolutionWithDynamicPopulationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithDynamicPopulationV2 print("AdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGradientBoost import AdaptiveDifferentialEvolutionWithGradientBoost - - lama_register["AdaptiveDifferentialEvolutionWithGradientBoost"] = AdaptiveDifferentialEvolutionWithGradientBoost - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMAAdaptiveDifferentialEvolutionWithGradientBoost", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithGradientBoost + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGradientBoost import ( + AdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["AdaptiveDifferentialEvolutionWithGradientBoost"] = ( + AdaptiveDifferentialEvolutionWithGradientBoost + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithGradientBoost print("AdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGuidedSearch import AdaptiveDifferentialEvolutionWithGuidedSearch - - lama_register["AdaptiveDifferentialEvolutionWithGuidedSearch"] = AdaptiveDifferentialEvolutionWithGuidedSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithGuidedSearch + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithGuidedSearch import ( + AdaptiveDifferentialEvolutionWithGuidedSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithGuidedSearch"] = ( + AdaptiveDifferentialEvolutionWithGuidedSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithGuidedSearch", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithGuidedSearch print("AdaptiveDifferentialEvolutionWithGuidedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithLocalSearch import AdaptiveDifferentialEvolutionWithLocalSearch - - lama_register["AdaptiveDifferentialEvolutionWithLocalSearch"] = AdaptiveDifferentialEvolutionWithLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithLocalSearch", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithLocalSearch + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithLocalSearch import ( + AdaptiveDifferentialEvolutionWithLocalSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithLocalSearch"] = ( + AdaptiveDifferentialEvolutionWithLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithLocalSearch print("AdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithMemeticSearch import AdaptiveDifferentialEvolutionWithMemeticSearch - - lama_register["AdaptiveDifferentialEvolutionWithMemeticSearch"] = AdaptiveDifferentialEvolutionWithMemeticSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch").set_name("LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithMemeticSearch + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithMemeticSearch import ( + AdaptiveDifferentialEvolutionWithMemeticSearch, + ) + + lama_register["AdaptiveDifferentialEvolutionWithMemeticSearch"] = ( + AdaptiveDifferentialEvolutionWithMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithMemeticSearch print("AdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithSurrogateAssistance import AdaptiveDifferentialEvolutionWithSurrogateAssistance - - lama_register["AdaptiveDifferentialEvolutionWithSurrogateAssistance"] = AdaptiveDifferentialEvolutionWithSurrogateAssistance - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance").set_name("LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance", register=True) -except Exception as e: +try: # AdaptiveDifferentialEvolutionWithSurrogateAssistance + from nevergrad.optimization.lama.AdaptiveDifferentialEvolutionWithSurrogateAssistance import ( + AdaptiveDifferentialEvolutionWithSurrogateAssistance, + ) + + lama_register["AdaptiveDifferentialEvolutionWithSurrogateAssistance"] = ( + AdaptiveDifferentialEvolutionWithSurrogateAssistance + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance" + ).set_name("LLAMAAdaptiveDifferentialEvolutionWithSurrogateAssistance", register=True) +except Exception as e: # AdaptiveDifferentialEvolutionWithSurrogateAssistance print("AdaptiveDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialHarmonySearch import AdaptiveDifferentialHarmonySearch +try: # AdaptiveDifferentialHarmonySearch + from nevergrad.optimization.lama.AdaptiveDifferentialHarmonySearch import ( + AdaptiveDifferentialHarmonySearch, + ) lama_register["AdaptiveDifferentialHarmonySearch"] = AdaptiveDifferentialHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialHarmonySearch").set_name("LLAMAAdaptiveDifferentialHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialHarmonySearch" + ).set_name("LLAMAAdaptiveDifferentialHarmonySearch", register=True) +except Exception as e: # AdaptiveDifferentialHarmonySearch print("AdaptiveDifferentialHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialMemeticAlgorithm import AdaptiveDifferentialMemeticAlgorithm +try: # AdaptiveDifferentialMemeticAlgorithm + from nevergrad.optimization.lama.AdaptiveDifferentialMemeticAlgorithm import ( + AdaptiveDifferentialMemeticAlgorithm, + ) lama_register["AdaptiveDifferentialMemeticAlgorithm"] = AdaptiveDifferentialMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialMemeticAlgorithm").set_name("LLAMAAdaptiveDifferentialMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialMemeticAlgorithm" + ).set_name("LLAMAAdaptiveDifferentialMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveDifferentialMemeticAlgorithm print("AdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialQuantumEvolution import AdaptiveDifferentialQuantumEvolution +try: # AdaptiveDifferentialQuantumEvolution + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumEvolution import ( + AdaptiveDifferentialQuantumEvolution, + ) lama_register["AdaptiveDifferentialQuantumEvolution"] = AdaptiveDifferentialQuantumEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialQuantumEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumEvolution").set_name("LLAMAAdaptiveDifferentialQuantumEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialQuantumEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialQuantumEvolution" + ).set_name("LLAMAAdaptiveDifferentialQuantumEvolution", register=True) +except Exception as e: # AdaptiveDifferentialQuantumEvolution print("AdaptiveDifferentialQuantumEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDifferentialQuantumMetaheuristic import AdaptiveDifferentialQuantumMetaheuristic +try: # AdaptiveDifferentialQuantumMetaheuristic + from nevergrad.optimization.lama.AdaptiveDifferentialQuantumMetaheuristic import ( + AdaptiveDifferentialQuantumMetaheuristic, + ) lama_register["AdaptiveDifferentialQuantumMetaheuristic"] = AdaptiveDifferentialQuantumMetaheuristic - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialQuantumMetaheuristic = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumMetaheuristic").set_name("LLAMAAdaptiveDifferentialQuantumMetaheuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialQuantumMetaheuristic" + ).set_name("LLAMAAdaptiveDifferentialQuantumMetaheuristic", register=True) +except Exception as e: # AdaptiveDifferentialQuantumMetaheuristic print("AdaptiveDifferentialQuantumMetaheuristic can not be imported: ", e) -try: +try: # AdaptiveDifferentialSpiralSearch from nevergrad.optimization.lama.AdaptiveDifferentialSpiralSearch import AdaptiveDifferentialSpiralSearch lama_register["AdaptiveDifferentialSpiralSearch"] = AdaptiveDifferentialSpiralSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialSpiralSearch").set_name("LLAMAAdaptiveDifferentialSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMAAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: # AdaptiveDifferentialSpiralSearch print("AdaptiveDifferentialSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDimensionalClimbingEvolutionStrategy import AdaptiveDimensionalClimbingEvolutionStrategy - - lama_register["AdaptiveDimensionalClimbingEvolutionStrategy"] = AdaptiveDimensionalClimbingEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDimensionalClimbingEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy").set_name("LLAMAAdaptiveDimensionalClimbingEvolutionStrategy", register=True) -except Exception as e: +try: # AdaptiveDimensionalClimbingEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveDimensionalClimbingEvolutionStrategy import ( + AdaptiveDimensionalClimbingEvolutionStrategy, + ) + + lama_register["AdaptiveDimensionalClimbingEvolutionStrategy"] = ( + AdaptiveDimensionalClimbingEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDimensionalClimbingEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDimensionalClimbingEvolutionStrategy" + ).set_name("LLAMAAdaptiveDimensionalClimbingEvolutionStrategy", register=True) +except Exception as e: # AdaptiveDimensionalClimbingEvolutionStrategy print("AdaptiveDimensionalClimbingEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDimensionalCrossoverEvolver import AdaptiveDimensionalCrossoverEvolver +try: # AdaptiveDimensionalCrossoverEvolver + from nevergrad.optimization.lama.AdaptiveDimensionalCrossoverEvolver import ( + AdaptiveDimensionalCrossoverEvolver, + ) lama_register["AdaptiveDimensionalCrossoverEvolver"] = AdaptiveDimensionalCrossoverEvolver - res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalCrossoverEvolver").set_name("LLAMAAdaptiveDimensionalCrossoverEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( + method="LLAMAAdaptiveDimensionalCrossoverEvolver" + ).set_name("LLAMAAdaptiveDimensionalCrossoverEvolver", register=True) +except Exception as e: # AdaptiveDimensionalCrossoverEvolver print("AdaptiveDimensionalCrossoverEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDirectionalBiasQuorumOptimization import AdaptiveDirectionalBiasQuorumOptimization +try: # AdaptiveDirectionalBiasQuorumOptimization + from nevergrad.optimization.lama.AdaptiveDirectionalBiasQuorumOptimization import ( + AdaptiveDirectionalBiasQuorumOptimization, + ) lama_register["AdaptiveDirectionalBiasQuorumOptimization"] = AdaptiveDirectionalBiasQuorumOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMAAdaptiveDirectionalBiasQuorumOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMAAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: # AdaptiveDirectionalBiasQuorumOptimization print("AdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) -try: +try: # AdaptiveDirectionalSearch from nevergrad.optimization.lama.AdaptiveDirectionalSearch import AdaptiveDirectionalSearch lama_register["AdaptiveDirectionalSearch"] = AdaptiveDirectionalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDirectionalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch").set_name("LLAMAAdaptiveDirectionalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDirectionalSearch = NonObjectOptimizer(method="LLAMAAdaptiveDirectionalSearch").set_name( + "LLAMAAdaptiveDirectionalSearch", register=True + ) +except Exception as e: # AdaptiveDirectionalSearch print("AdaptiveDirectionalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDivergenceClusteringSearch import AdaptiveDivergenceClusteringSearch +try: # AdaptiveDivergenceClusteringSearch + from nevergrad.optimization.lama.AdaptiveDivergenceClusteringSearch import ( + AdaptiveDivergenceClusteringSearch, + ) lama_register["AdaptiveDivergenceClusteringSearch"] = AdaptiveDivergenceClusteringSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDivergenceClusteringSearch = NonObjectOptimizer(method="LLAMAAdaptiveDivergenceClusteringSearch").set_name("LLAMAAdaptiveDivergenceClusteringSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( + method="LLAMAAdaptiveDivergenceClusteringSearch" + ).set_name("LLAMAAdaptiveDivergenceClusteringSearch", register=True) +except Exception as e: # AdaptiveDivergenceClusteringSearch print("AdaptiveDivergenceClusteringSearch can not be imported: ", e) -try: +try: # AdaptiveDiverseHybridOptimizer from nevergrad.optimization.lama.AdaptiveDiverseHybridOptimizer import AdaptiveDiverseHybridOptimizer lama_register["AdaptiveDiverseHybridOptimizer"] = AdaptiveDiverseHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiverseHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDiverseHybridOptimizer").set_name("LLAMAAdaptiveDiverseHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiverseHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDiverseHybridOptimizer" + ).set_name("LLAMAAdaptiveDiverseHybridOptimizer", register=True) +except Exception as e: # AdaptiveDiverseHybridOptimizer print("AdaptiveDiverseHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversifiedEvolutionStrategy import AdaptiveDiversifiedEvolutionStrategy +try: # AdaptiveDiversifiedEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveDiversifiedEvolutionStrategy import ( + AdaptiveDiversifiedEvolutionStrategy, + ) lama_register["AdaptiveDiversifiedEvolutionStrategy"] = AdaptiveDiversifiedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedEvolutionStrategy").set_name("LLAMAAdaptiveDiversifiedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedEvolutionStrategy" + ).set_name("LLAMAAdaptiveDiversifiedEvolutionStrategy", register=True) +except Exception as e: # AdaptiveDiversifiedEvolutionStrategy print("AdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) -try: +try: # AdaptiveDiversifiedHarmonySearch from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearch import AdaptiveDiversifiedHarmonySearch lama_register["AdaptiveDiversifiedHarmonySearch"] = AdaptiveDiversifiedHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearch").set_name("LLAMAAdaptiveDiversifiedHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedHarmonySearch" + ).set_name("LLAMAAdaptiveDiversifiedHarmonySearch", register=True) +except Exception as e: # AdaptiveDiversifiedHarmonySearch print("AdaptiveDiversifiedHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearchOptimizer import AdaptiveDiversifiedHarmonySearchOptimizer +try: # AdaptiveDiversifiedHarmonySearchOptimizer + from nevergrad.optimization.lama.AdaptiveDiversifiedHarmonySearchOptimizer import ( + AdaptiveDiversifiedHarmonySearchOptimizer, + ) lama_register["AdaptiveDiversifiedHarmonySearchOptimizer"] = AdaptiveDiversifiedHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer").set_name("LLAMAAdaptiveDiversifiedHarmonySearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAAdaptiveDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: # AdaptiveDiversifiedHarmonySearchOptimizer print("AdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) -try: +try: # AdaptiveDiversifiedSearch from nevergrad.optimization.lama.AdaptiveDiversifiedSearch import AdaptiveDiversifiedSearch lama_register["AdaptiveDiversifiedSearch"] = AdaptiveDiversifiedSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch").set_name("LLAMAAdaptiveDiversifiedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAAdaptiveDiversifiedSearch").set_name( + "LLAMAAdaptiveDiversifiedSearch", register=True + ) +except Exception as e: # AdaptiveDiversifiedSearch print("AdaptiveDiversifiedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversityDifferentialHybrid import AdaptiveDiversityDifferentialHybrid +try: # AdaptiveDiversityDifferentialHybrid + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialHybrid import ( + AdaptiveDiversityDifferentialHybrid, + ) lama_register["AdaptiveDiversityDifferentialHybrid"] = AdaptiveDiversityDifferentialHybrid - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversityDifferentialHybrid = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialHybrid").set_name("LLAMAAdaptiveDiversityDifferentialHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversityDifferentialHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityDifferentialHybrid" + ).set_name("LLAMAAdaptiveDiversityDifferentialHybrid", register=True) +except Exception as e: # AdaptiveDiversityDifferentialHybrid print("AdaptiveDiversityDifferentialHybrid can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversityDifferentialMemeticHybrid import AdaptiveDiversityDifferentialMemeticHybrid +try: # AdaptiveDiversityDifferentialMemeticHybrid + from nevergrad.optimization.lama.AdaptiveDiversityDifferentialMemeticHybrid import ( + AdaptiveDiversityDifferentialMemeticHybrid, + ) lama_register["AdaptiveDiversityDifferentialMemeticHybrid"] = AdaptiveDiversityDifferentialMemeticHybrid - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversityDifferentialMemeticHybrid = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid").set_name("LLAMAAdaptiveDiversityDifferentialMemeticHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversityDifferentialMemeticHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityDifferentialMemeticHybrid" + ).set_name("LLAMAAdaptiveDiversityDifferentialMemeticHybrid", register=True) +except Exception as e: # AdaptiveDiversityDifferentialMemeticHybrid print("AdaptiveDiversityDifferentialMemeticHybrid can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversityMaintainedDifferentialEvolution import AdaptiveDiversityMaintainedDifferentialEvolution - - lama_register["AdaptiveDiversityMaintainedDifferentialEvolution"] = AdaptiveDiversityMaintainedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversityMaintainedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution").set_name("LLAMAAdaptiveDiversityMaintainedDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveDiversityMaintainedDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveDiversityMaintainedDifferentialEvolution import ( + AdaptiveDiversityMaintainedDifferentialEvolution, + ) + + lama_register["AdaptiveDiversityMaintainedDifferentialEvolution"] = ( + AdaptiveDiversityMaintainedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversityMaintainedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityMaintainedDifferentialEvolution" + ).set_name("LLAMAAdaptiveDiversityMaintainedDifferentialEvolution", register=True) +except Exception as e: # AdaptiveDiversityMaintainedDifferentialEvolution print("AdaptiveDiversityMaintainedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDiversityMaintainingGradientEvolution import AdaptiveDiversityMaintainingGradientEvolution - - lama_register["AdaptiveDiversityMaintainingGradientEvolution"] = AdaptiveDiversityMaintainingGradientEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainingGradientEvolution").set_name("LLAMAAdaptiveDiversityMaintainingGradientEvolution", register=True) -except Exception as e: +try: # AdaptiveDiversityMaintainingGradientEvolution + from nevergrad.optimization.lama.AdaptiveDiversityMaintainingGradientEvolution import ( + AdaptiveDiversityMaintainingGradientEvolution, + ) + + lama_register["AdaptiveDiversityMaintainingGradientEvolution"] = ( + AdaptiveDiversityMaintainingGradientEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDiversityMaintainingGradientEvolution" + ).set_name("LLAMAAdaptiveDiversityMaintainingGradientEvolution", register=True) +except Exception as e: # AdaptiveDiversityMaintainingGradientEvolution print("AdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) -try: +try: # AdaptiveDiversityPSO from nevergrad.optimization.lama.AdaptiveDiversityPSO import AdaptiveDiversityPSO lama_register["AdaptiveDiversityPSO"] = AdaptiveDiversityPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO").set_name("LLAMAAdaptiveDiversityPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMAAdaptiveDiversityPSO").set_name( + "LLAMAAdaptiveDiversityPSO", register=True + ) +except Exception as e: # AdaptiveDiversityPSO print("AdaptiveDiversityPSO can not be imported: ", e) -try: +try: # AdaptiveDolphinPodOptimization from nevergrad.optimization.lama.AdaptiveDolphinPodOptimization import AdaptiveDolphinPodOptimization lama_register["AdaptiveDolphinPodOptimization"] = AdaptiveDolphinPodOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDolphinPodOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDolphinPodOptimization").set_name("LLAMAAdaptiveDolphinPodOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDolphinPodOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDolphinPodOptimization" + ).set_name("LLAMAAdaptiveDolphinPodOptimization", register=True) +except Exception as e: # AdaptiveDolphinPodOptimization print("AdaptiveDolphinPodOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDualPhaseDifferentialEvolution import AdaptiveDualPhaseDifferentialEvolution +try: # AdaptiveDualPhaseDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveDualPhaseDifferentialEvolution import ( + AdaptiveDualPhaseDifferentialEvolution, + ) lama_register["AdaptiveDualPhaseDifferentialEvolution"] = AdaptiveDualPhaseDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseDifferentialEvolution").set_name("LLAMAAdaptiveDualPhaseDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseDifferentialEvolution" + ).set_name("LLAMAAdaptiveDualPhaseDifferentialEvolution", register=True) +except Exception as e: # AdaptiveDualPhaseDifferentialEvolution print("AdaptiveDualPhaseDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDualPhaseEvolutionarySwarmOptimization import AdaptiveDualPhaseEvolutionarySwarmOptimization - - lama_register["AdaptiveDualPhaseEvolutionarySwarmOptimization"] = AdaptiveDualPhaseEvolutionarySwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization").set_name("LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) -except Exception as e: +try: # AdaptiveDualPhaseEvolutionarySwarmOptimization + from nevergrad.optimization.lama.AdaptiveDualPhaseEvolutionarySwarmOptimization import ( + AdaptiveDualPhaseEvolutionarySwarmOptimization, + ) + + lama_register["AdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( + AdaptiveDualPhaseEvolutionarySwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization" + ).set_name("LLAMAAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) +except Exception as e: # AdaptiveDualPhaseEvolutionarySwarmOptimization print("AdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDualPhaseOptimizationWithDynamicParameterControl import AdaptiveDualPhaseOptimizationWithDynamicParameterControl - - lama_register["AdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = AdaptiveDualPhaseOptimizationWithDynamicParameterControl - res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl").set_name("LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) -except Exception as e: +try: # AdaptiveDualPhaseOptimizationWithDynamicParameterControl + from nevergrad.optimization.lama.AdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( + AdaptiveDualPhaseOptimizationWithDynamicParameterControl, + ) + + lama_register["AdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( + AdaptiveDualPhaseOptimizationWithDynamicParameterControl + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( + method="LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl" + ).set_name("LLAMAAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) +except Exception as e: # AdaptiveDualPhaseOptimizationWithDynamicParameterControl print("AdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) -try: +try: # AdaptiveDualPhaseStrategy from nevergrad.optimization.lama.AdaptiveDualPhaseStrategy import AdaptiveDualPhaseStrategy lama_register["AdaptiveDualPhaseStrategy"] = AdaptiveDualPhaseStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy").set_name("LLAMAAdaptiveDualPhaseStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDualPhaseStrategy").set_name( + "LLAMAAdaptiveDualPhaseStrategy", register=True + ) +except Exception as e: # AdaptiveDualPhaseStrategy print("AdaptiveDualPhaseStrategy can not be imported: ", e) -try: +try: # AdaptiveDualPopulationDE_LS from nevergrad.optimization.lama.AdaptiveDualPopulationDE_LS import AdaptiveDualPopulationDE_LS lama_register["AdaptiveDualPopulationDE_LS"] = AdaptiveDualPopulationDE_LS - res = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualPopulationDE_LS = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS").set_name("LLAMAAdaptiveDualPopulationDE_LS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualPopulationDE_LS = NonObjectOptimizer(method="LLAMAAdaptiveDualPopulationDE_LS").set_name( + "LLAMAAdaptiveDualPopulationDE_LS", register=True + ) +except Exception as e: # AdaptiveDualPopulationDE_LS print("AdaptiveDualPopulationDE_LS can not be imported: ", e) -try: +try: # AdaptiveDualStrategyOptimizer from nevergrad.optimization.lama.AdaptiveDualStrategyOptimizer import AdaptiveDualStrategyOptimizer lama_register["AdaptiveDualStrategyOptimizer"] = AdaptiveDualStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDualStrategyOptimizer").set_name("LLAMAAdaptiveDualStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDualStrategyOptimizer" + ).set_name("LLAMAAdaptiveDualStrategyOptimizer", register=True) +except Exception as e: # AdaptiveDualStrategyOptimizer print("AdaptiveDualStrategyOptimizer can not be imported: ", e) -try: +try: # AdaptiveDynamicDE from nevergrad.optimization.lama.AdaptiveDynamicDE import AdaptiveDynamicDE lama_register["AdaptiveDynamicDE"] = AdaptiveDynamicDE - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE").set_name("LLAMAAdaptiveDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDE").set_name( + "LLAMAAdaptiveDynamicDE", register=True + ) +except Exception as e: # AdaptiveDynamicDE print("AdaptiveDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicDifferentialEvolution import AdaptiveDynamicDifferentialEvolution +try: # AdaptiveDynamicDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveDynamicDifferentialEvolution import ( + AdaptiveDynamicDifferentialEvolution, + ) lama_register["AdaptiveDynamicDifferentialEvolution"] = AdaptiveDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDifferentialEvolution").set_name("LLAMAAdaptiveDynamicDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDifferentialEvolution" + ).set_name("LLAMAAdaptiveDynamicDifferentialEvolution", register=True) +except Exception as e: # AdaptiveDynamicDifferentialEvolution print("AdaptiveDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseEnhancedStrategyV20 import AdaptiveDynamicDualPhaseEnhancedStrategyV20 +try: # AdaptiveDynamicDualPhaseEnhancedStrategyV20 + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseEnhancedStrategyV20 import ( + AdaptiveDynamicDualPhaseEnhancedStrategyV20, + ) lama_register["AdaptiveDynamicDualPhaseEnhancedStrategyV20"] = AdaptiveDynamicDualPhaseEnhancedStrategyV20 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20").set_name("LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20" + ).set_name("LLAMAAdaptiveDynamicDualPhaseEnhancedStrategyV20", register=True) +except Exception as e: # AdaptiveDynamicDualPhaseEnhancedStrategyV20 print("AdaptiveDynamicDualPhaseEnhancedStrategyV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseStrategyV11 import AdaptiveDynamicDualPhaseStrategyV11 +try: # AdaptiveDynamicDualPhaseStrategyV11 + from nevergrad.optimization.lama.AdaptiveDynamicDualPhaseStrategyV11 import ( + AdaptiveDynamicDualPhaseStrategyV11, + ) lama_register["AdaptiveDynamicDualPhaseStrategyV11"] = AdaptiveDynamicDualPhaseStrategyV11 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicDualPhaseStrategyV11 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseStrategyV11").set_name("LLAMAAdaptiveDynamicDualPhaseStrategyV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicDualPhaseStrategyV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicDualPhaseStrategyV11 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicDualPhaseStrategyV11" + ).set_name("LLAMAAdaptiveDynamicDualPhaseStrategyV11", register=True) +except Exception as e: # AdaptiveDynamicDualPhaseStrategyV11 print("AdaptiveDynamicDualPhaseStrategyV11 can not be imported: ", e) -try: +try: # AdaptiveDynamicEvolutionStrategy from nevergrad.optimization.lama.AdaptiveDynamicEvolutionStrategy import AdaptiveDynamicEvolutionStrategy lama_register["AdaptiveDynamicEvolutionStrategy"] = AdaptiveDynamicEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveDynamicEvolutionStrategy").set_name("LLAMAAdaptiveDynamicEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicEvolutionStrategy" + ).set_name("LLAMAAdaptiveDynamicEvolutionStrategy", register=True) +except Exception as e: # AdaptiveDynamicEvolutionStrategy print("AdaptiveDynamicEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithm import AdaptiveDynamicExplorationExploitationAlgorithm - - lama_register["AdaptiveDynamicExplorationExploitationAlgorithm"] = AdaptiveDynamicExplorationExploitationAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithm", register=True) -except Exception as e: +try: # AdaptiveDynamicExplorationExploitationAlgorithm + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithm import ( + AdaptiveDynamicExplorationExploitationAlgorithm, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithm"] = ( + AdaptiveDynamicExplorationExploitationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithm" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithm", register=True) +except Exception as e: # AdaptiveDynamicExplorationExploitationAlgorithm print("AdaptiveDynamicExplorationExploitationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV2 import AdaptiveDynamicExplorationExploitationAlgorithmV2 - - lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV2"] = AdaptiveDynamicExplorationExploitationAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2", register=True) -except Exception as e: +try: # AdaptiveDynamicExplorationExploitationAlgorithmV2 + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV2 import ( + AdaptiveDynamicExplorationExploitationAlgorithmV2, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV2"] = ( + AdaptiveDynamicExplorationExploitationAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV2", register=True) +except Exception as e: # AdaptiveDynamicExplorationExploitationAlgorithmV2 print("AdaptiveDynamicExplorationExploitationAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV3 import AdaptiveDynamicExplorationExploitationAlgorithmV3 - - lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV3"] = AdaptiveDynamicExplorationExploitationAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3").set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3", register=True) -except Exception as e: +try: # AdaptiveDynamicExplorationExploitationAlgorithmV3 + from nevergrad.optimization.lama.AdaptiveDynamicExplorationExploitationAlgorithmV3 import ( + AdaptiveDynamicExplorationExploitationAlgorithmV3, + ) + + lama_register["AdaptiveDynamicExplorationExploitationAlgorithmV3"] = ( + AdaptiveDynamicExplorationExploitationAlgorithmV3 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3" + ).set_name("LLAMAAdaptiveDynamicExplorationExploitationAlgorithmV3", register=True) +except Exception as e: # AdaptiveDynamicExplorationExploitationAlgorithmV3 print("AdaptiveDynamicExplorationExploitationAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicExplorationOptimization import AdaptiveDynamicExplorationOptimization +try: # AdaptiveDynamicExplorationOptimization + from nevergrad.optimization.lama.AdaptiveDynamicExplorationOptimization import ( + AdaptiveDynamicExplorationOptimization, + ) lama_register["AdaptiveDynamicExplorationOptimization"] = AdaptiveDynamicExplorationOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationOptimization").set_name("LLAMAAdaptiveDynamicExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: # AdaptiveDynamicExplorationOptimization print("AdaptiveDynamicExplorationOptimization can not be imported: ", e) -try: +try: # AdaptiveDynamicFireworkAlgorithm from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithm import AdaptiveDynamicFireworkAlgorithm lama_register["AdaptiveDynamicFireworkAlgorithm"] = AdaptiveDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithm").set_name("LLAMAAdaptiveDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveDynamicFireworkAlgorithm print("AdaptiveDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithmRedesigned import AdaptiveDynamicFireworkAlgorithmRedesigned +try: # AdaptiveDynamicFireworkAlgorithmRedesigned + from nevergrad.optimization.lama.AdaptiveDynamicFireworkAlgorithmRedesigned import ( + AdaptiveDynamicFireworkAlgorithmRedesigned, + ) lama_register["AdaptiveDynamicFireworkAlgorithmRedesigned"] = AdaptiveDynamicFireworkAlgorithmRedesigned - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned").set_name("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned" + ).set_name("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned", register=True) +except Exception as e: # AdaptiveDynamicFireworkAlgorithmRedesigned print("AdaptiveDynamicFireworkAlgorithmRedesigned can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicFireworkDifferentialEvolutionV4 import AdaptiveDynamicFireworkDifferentialEvolutionV4 - - lama_register["AdaptiveDynamicFireworkDifferentialEvolutionV4"] = AdaptiveDynamicFireworkDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4").set_name("LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4", register=True) -except Exception as e: +try: # AdaptiveDynamicFireworkDifferentialEvolutionV4 + from nevergrad.optimization.lama.AdaptiveDynamicFireworkDifferentialEvolutionV4 import ( + AdaptiveDynamicFireworkDifferentialEvolutionV4, + ) + + lama_register["AdaptiveDynamicFireworkDifferentialEvolutionV4"] = ( + AdaptiveDynamicFireworkDifferentialEvolutionV4 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4" + ).set_name("LLAMAAdaptiveDynamicFireworkDifferentialEvolutionV4", register=True) +except Exception as e: # AdaptiveDynamicFireworkDifferentialEvolutionV4 print("AdaptiveDynamicFireworkDifferentialEvolutionV4 can not be imported: ", e) -try: +try: # AdaptiveDynamicHarmonySearch from nevergrad.optimization.lama.AdaptiveDynamicHarmonySearch import AdaptiveDynamicHarmonySearch lama_register["AdaptiveDynamicHarmonySearch"] = AdaptiveDynamicHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHarmonySearch").set_name("LLAMAAdaptiveDynamicHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHarmonySearch" + ).set_name("LLAMAAdaptiveDynamicHarmonySearch", register=True) +except Exception as e: # AdaptiveDynamicHarmonySearch print("AdaptiveDynamicHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizationV2 import AdaptiveDynamicHybridOptimizationV2 +try: # AdaptiveDynamicHybridOptimizationV2 + from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizationV2 import ( + AdaptiveDynamicHybridOptimizationV2, + ) lama_register["AdaptiveDynamicHybridOptimizationV2"] = AdaptiveDynamicHybridOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizationV2").set_name("LLAMAAdaptiveDynamicHybridOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHybridOptimizationV2" + ).set_name("LLAMAAdaptiveDynamicHybridOptimizationV2", register=True) +except Exception as e: # AdaptiveDynamicHybridOptimizationV2 print("AdaptiveDynamicHybridOptimizationV2 can not be imported: ", e) -try: +try: # AdaptiveDynamicHybridOptimizer from nevergrad.optimization.lama.AdaptiveDynamicHybridOptimizer import AdaptiveDynamicHybridOptimizer lama_register["AdaptiveDynamicHybridOptimizer"] = AdaptiveDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizer").set_name("LLAMAAdaptiveDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMAAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: # AdaptiveDynamicHybridOptimizer print("AdaptiveDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicMemeticEvolutionaryAlgorithm import AdaptiveDynamicMemeticEvolutionaryAlgorithm +try: # AdaptiveDynamicMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.AdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + AdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) lama_register["AdaptiveDynamicMemeticEvolutionaryAlgorithm"] = AdaptiveDynamicMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # AdaptiveDynamicMemeticEvolutionaryAlgorithm print("AdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicMultiStrategyDifferentialEvolution import AdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["AdaptiveDynamicMultiStrategyDifferentialEvolution"] = AdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveDynamicMultiStrategyDifferentialEvolution import ( + AdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["AdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + AdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # AdaptiveDynamicMultiStrategyDifferentialEvolution print("AdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveDynamicQuantumSwarmOptimization import AdaptiveDynamicQuantumSwarmOptimization +try: # AdaptiveDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.AdaptiveDynamicQuantumSwarmOptimization import ( + AdaptiveDynamicQuantumSwarmOptimization, + ) lama_register["AdaptiveDynamicQuantumSwarmOptimization"] = AdaptiveDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveDynamicQuantumSwarmOptimization").set_name("LLAMAAdaptiveDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveDynamicQuantumSwarmOptimization" + ).set_name("LLAMAAdaptiveDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # AdaptiveDynamicQuantumSwarmOptimization print("AdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEliteCovarianceMatrixMemeticSearch import AdaptiveEliteCovarianceMatrixMemeticSearch +try: # AdaptiveEliteCovarianceMatrixMemeticSearch + from nevergrad.optimization.lama.AdaptiveEliteCovarianceMatrixMemeticSearch import ( + AdaptiveEliteCovarianceMatrixMemeticSearch, + ) lama_register["AdaptiveEliteCovarianceMatrixMemeticSearch"] = AdaptiveEliteCovarianceMatrixMemeticSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch").set_name("LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch" + ).set_name("LLAMAAdaptiveEliteCovarianceMatrixMemeticSearch", register=True) +except Exception as e: # AdaptiveEliteCovarianceMatrixMemeticSearch print("AdaptiveEliteCovarianceMatrixMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEliteDifferentialEvolution import AdaptiveEliteDifferentialEvolution +try: # AdaptiveEliteDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveEliteDifferentialEvolution import ( + AdaptiveEliteDifferentialEvolution, + ) lama_register["AdaptiveEliteDifferentialEvolution"] = AdaptiveEliteDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteDifferentialEvolution").set_name("LLAMAAdaptiveEliteDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteDifferentialEvolution", register=True) +except Exception as e: # AdaptiveEliteDifferentialEvolution print("AdaptiveEliteDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEliteDiverseHybridOptimizer import AdaptiveEliteDiverseHybridOptimizer +try: # AdaptiveEliteDiverseHybridOptimizer + from nevergrad.optimization.lama.AdaptiveEliteDiverseHybridOptimizer import ( + AdaptiveEliteDiverseHybridOptimizer, + ) lama_register["AdaptiveEliteDiverseHybridOptimizer"] = AdaptiveEliteDiverseHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteDiverseHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteDiverseHybridOptimizer").set_name("LLAMAAdaptiveEliteDiverseHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteDiverseHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteDiverseHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteDiverseHybridOptimizer" + ).set_name("LLAMAAdaptiveEliteDiverseHybridOptimizer", register=True) +except Exception as e: # AdaptiveEliteDiverseHybridOptimizer print("AdaptiveEliteDiverseHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedDE_LS_v2 from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_LS_v2 import AdaptiveEliteGuidedDE_LS_v2 lama_register["AdaptiveEliteGuidedDE_LS_v2"] = AdaptiveEliteGuidedDE_LS_v2 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedDE_LS_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2").set_name("LLAMAAdaptiveEliteGuidedDE_LS_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedDE_LS_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_LS_v2").set_name( + "LLAMAAdaptiveEliteGuidedDE_LS_v2", register=True + ) +except Exception as e: # AdaptiveEliteGuidedDE_LS_v2 print("AdaptiveEliteGuidedDE_LS_v2 can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedDE_v2 from nevergrad.optimization.lama.AdaptiveEliteGuidedDE_v2 import AdaptiveEliteGuidedDE_v2 lama_register["AdaptiveEliteGuidedDE_v2"] = AdaptiveEliteGuidedDE_v2 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedDE_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2").set_name("LLAMAAdaptiveEliteGuidedDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedDE_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedDE_v2").set_name( + "LLAMAAdaptiveEliteGuidedDE_v2", register=True + ) +except Exception as e: # AdaptiveEliteGuidedDE_v2 print("AdaptiveEliteGuidedDE_v2 can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedMutationDE from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE import AdaptiveEliteGuidedMutationDE lama_register["AdaptiveEliteGuidedMutationDE"] = AdaptiveEliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE").set_name("LLAMAAdaptiveEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE", register=True) +except Exception as e: # AdaptiveEliteGuidedMutationDE print("AdaptiveEliteGuidedMutationDE can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedMutationDE_v3 from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v3 import AdaptiveEliteGuidedMutationDE_v3 lama_register["AdaptiveEliteGuidedMutationDE_v3"] = AdaptiveEliteGuidedMutationDE_v3 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v3").set_name("LLAMAAdaptiveEliteGuidedMutationDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE_v3" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v3", register=True) +except Exception as e: # AdaptiveEliteGuidedMutationDE_v3 print("AdaptiveEliteGuidedMutationDE_v3 can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedMutationDE_v4 from nevergrad.optimization.lama.AdaptiveEliteGuidedMutationDE_v4 import AdaptiveEliteGuidedMutationDE_v4 lama_register["AdaptiveEliteGuidedMutationDE_v4"] = AdaptiveEliteGuidedMutationDE_v4 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedMutationDE_v4 = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v4").set_name("LLAMAAdaptiveEliteGuidedMutationDE_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedMutationDE_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedMutationDE_v4 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedMutationDE_v4" + ).set_name("LLAMAAdaptiveEliteGuidedMutationDE_v4", register=True) +except Exception as e: # AdaptiveEliteGuidedMutationDE_v4 print("AdaptiveEliteGuidedMutationDE_v4 can not be imported: ", e) -try: +try: # AdaptiveEliteGuidedRestartDE from nevergrad.optimization.lama.AdaptiveEliteGuidedRestartDE import AdaptiveEliteGuidedRestartDE lama_register["AdaptiveEliteGuidedRestartDE"] = AdaptiveEliteGuidedRestartDE - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteGuidedRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedRestartDE").set_name("LLAMAAdaptiveEliteGuidedRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( + method="LLAMAAdaptiveEliteGuidedRestartDE" + ).set_name("LLAMAAdaptiveEliteGuidedRestartDE", register=True) +except Exception as e: # AdaptiveEliteGuidedRestartDE print("AdaptiveEliteGuidedRestartDE can not be imported: ", e) -try: +try: # AdaptiveEliteHybridOptimizer from nevergrad.optimization.lama.AdaptiveEliteHybridOptimizer import AdaptiveEliteHybridOptimizer lama_register["AdaptiveEliteHybridOptimizer"] = AdaptiveEliteHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteHybridOptimizer").set_name("LLAMAAdaptiveEliteHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteHybridOptimizer" + ).set_name("LLAMAAdaptiveEliteHybridOptimizer", register=True) +except Exception as e: # AdaptiveEliteHybridOptimizer print("AdaptiveEliteHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEliteMemeticDifferentialEvolution import AdaptiveEliteMemeticDifferentialEvolution +try: # AdaptiveEliteMemeticDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveEliteMemeticDifferentialEvolution import ( + AdaptiveEliteMemeticDifferentialEvolution, + ) lama_register["AdaptiveEliteMemeticDifferentialEvolution"] = AdaptiveEliteMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticDifferentialEvolution").set_name("LLAMAAdaptiveEliteMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteMemeticDifferentialEvolution", register=True) +except Exception as e: # AdaptiveEliteMemeticDifferentialEvolution print("AdaptiveEliteMemeticDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveEliteMemeticOptimizer from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizer import AdaptiveEliteMemeticOptimizer lama_register["AdaptiveEliteMemeticOptimizer"] = AdaptiveEliteMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizer").set_name("LLAMAAdaptiveEliteMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizer" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizer", register=True) +except Exception as e: # AdaptiveEliteMemeticOptimizer print("AdaptiveEliteMemeticOptimizer can not be imported: ", e) -try: +try: # AdaptiveEliteMemeticOptimizerV5 from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV5 import AdaptiveEliteMemeticOptimizerV5 lama_register["AdaptiveEliteMemeticOptimizerV5"] = AdaptiveEliteMemeticOptimizerV5 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteMemeticOptimizerV5 = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV5").set_name("LLAMAAdaptiveEliteMemeticOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteMemeticOptimizerV5 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizerV5" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV5", register=True) +except Exception as e: # AdaptiveEliteMemeticOptimizerV5 print("AdaptiveEliteMemeticOptimizerV5 can not be imported: ", e) -try: +try: # AdaptiveEliteMemeticOptimizerV6 from nevergrad.optimization.lama.AdaptiveEliteMemeticOptimizerV6 import AdaptiveEliteMemeticOptimizerV6 lama_register["AdaptiveEliteMemeticOptimizerV6"] = AdaptiveEliteMemeticOptimizerV6 - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteMemeticOptimizerV6 = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV6").set_name("LLAMAAdaptiveEliteMemeticOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMemeticOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteMemeticOptimizerV6 = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMemeticOptimizerV6" + ).set_name("LLAMAAdaptiveEliteMemeticOptimizerV6", register=True) +except Exception as e: # AdaptiveEliteMemeticOptimizerV6 print("AdaptiveEliteMemeticOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEliteMultiStrategyDifferentialEvolution import AdaptiveEliteMultiStrategyDifferentialEvolution - - lama_register["AdaptiveEliteMultiStrategyDifferentialEvolution"] = AdaptiveEliteMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveEliteMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveEliteMultiStrategyDifferentialEvolution import ( + AdaptiveEliteMultiStrategyDifferentialEvolution, + ) + + lama_register["AdaptiveEliteMultiStrategyDifferentialEvolution"] = ( + AdaptiveEliteMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # AdaptiveEliteMultiStrategyDifferentialEvolution print("AdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveElitistDE from nevergrad.optimization.lama.AdaptiveElitistDE import AdaptiveElitistDE lama_register["AdaptiveElitistDE"] = AdaptiveElitistDE - res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveElitistDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE").set_name("LLAMAAdaptiveElitistDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveElitistDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE").set_name( + "LLAMAAdaptiveElitistDE", register=True + ) +except Exception as e: # AdaptiveElitistDE print("AdaptiveElitistDE can not be imported: ", e) -try: +try: # AdaptiveElitistDE_v3 from nevergrad.optimization.lama.AdaptiveElitistDE_v3 import AdaptiveElitistDE_v3 lama_register["AdaptiveElitistDE_v3"] = AdaptiveElitistDE_v3 - res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveElitistDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3").set_name("LLAMAAdaptiveElitistDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveElitistDE_v3 = NonObjectOptimizer(method="LLAMAAdaptiveElitistDE_v3").set_name( + "LLAMAAdaptiveElitistDE_v3", register=True + ) +except Exception as e: # AdaptiveElitistDE_v3 print("AdaptiveElitistDE_v3 can not be imported: ", e) -try: +try: # AdaptiveElitistMutationDE from nevergrad.optimization.lama.AdaptiveElitistMutationDE import AdaptiveElitistMutationDE lama_register["AdaptiveElitistMutationDE"] = AdaptiveElitistMutationDE - res = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveElitistMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE").set_name("LLAMAAdaptiveElitistMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveElitistMutationDE = NonObjectOptimizer(method="LLAMAAdaptiveElitistMutationDE").set_name( + "LLAMAAdaptiveElitistMutationDE", register=True + ) +except Exception as e: # AdaptiveElitistMutationDE print("AdaptiveElitistMutationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveElitistPopulationStrategy import AdaptiveElitistPopulationStrategy +try: # AdaptiveElitistPopulationStrategy + from nevergrad.optimization.lama.AdaptiveElitistPopulationStrategy import ( + AdaptiveElitistPopulationStrategy, + ) lama_register["AdaptiveElitistPopulationStrategy"] = AdaptiveElitistPopulationStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveElitistPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveElitistPopulationStrategy = NonObjectOptimizer(method="LLAMAAdaptiveElitistPopulationStrategy").set_name("LLAMAAdaptiveElitistPopulationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveElitistPopulationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveElitistPopulationStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveElitistPopulationStrategy" + ).set_name("LLAMAAdaptiveElitistPopulationStrategy", register=True) +except Exception as e: # AdaptiveElitistPopulationStrategy print("AdaptiveElitistPopulationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveElitistQuasiRandomDEGradientAnnealing import AdaptiveElitistQuasiRandomDEGradientAnnealing - - lama_register["AdaptiveElitistQuasiRandomDEGradientAnnealing"] = AdaptiveElitistQuasiRandomDEGradientAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing").set_name("LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing", register=True) -except Exception as e: +try: # AdaptiveElitistQuasiRandomDEGradientAnnealing + from nevergrad.optimization.lama.AdaptiveElitistQuasiRandomDEGradientAnnealing import ( + AdaptiveElitistQuasiRandomDEGradientAnnealing, + ) + + lama_register["AdaptiveElitistQuasiRandomDEGradientAnnealing"] = ( + AdaptiveElitistQuasiRandomDEGradientAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing" + ).set_name("LLAMAAdaptiveElitistQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: # AdaptiveElitistQuasiRandomDEGradientAnnealing print("AdaptiveElitistQuasiRandomDEGradientAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm import AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm - - lama_register["AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm"] = AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) -except Exception as e: +try: # AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm + from nevergrad.optimization.lama.AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm import ( + AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm, + ) + + lama_register["AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm"] = ( + AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm print("AdaptiveEnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch import AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch - - lama_register["AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch"] = AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch").set_name("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) -except Exception as e: +try: # AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch + from nevergrad.optimization.lama.AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch import ( + AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch, + ) + + lama_register["AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( + AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch" + ).set_name("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) +except Exception as e: # AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch print("AdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch import AdaptiveEnhancedEvolutionaryFireworksSearch +try: # AdaptiveEnhancedEvolutionaryFireworksSearch + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch import ( + AdaptiveEnhancedEvolutionaryFireworksSearch, + ) lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch"] = AdaptiveEnhancedEvolutionaryFireworksSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch").set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: # AdaptiveEnhancedEvolutionaryFireworksSearch print("AdaptiveEnhancedEvolutionaryFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch_v2 import AdaptiveEnhancedEvolutionaryFireworksSearch_v2 - - lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch_v2"] = AdaptiveEnhancedEvolutionaryFireworksSearch_v2 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2").set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2", register=True) -except Exception as e: +try: # AdaptiveEnhancedEvolutionaryFireworksSearch_v2 + from nevergrad.optimization.lama.AdaptiveEnhancedEvolutionaryFireworksSearch_v2 import ( + AdaptiveEnhancedEvolutionaryFireworksSearch_v2, + ) + + lama_register["AdaptiveEnhancedEvolutionaryFireworksSearch_v2"] = ( + AdaptiveEnhancedEvolutionaryFireworksSearch_v2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2" + ).set_name("LLAMAAdaptiveEnhancedEvolutionaryFireworksSearch_v2", register=True) +except Exception as e: # AdaptiveEnhancedEvolutionaryFireworksSearch_v2 print("AdaptiveEnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedExplorationGravitationalSwarmOptimization import AdaptiveEnhancedExplorationGravitationalSwarmOptimization - - lama_register["AdaptiveEnhancedExplorationGravitationalSwarmOptimization"] = AdaptiveEnhancedExplorationGravitationalSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization").set_name("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization", register=True) -except Exception as e: +try: # AdaptiveEnhancedExplorationGravitationalSwarmOptimization + from nevergrad.optimization.lama.AdaptiveEnhancedExplorationGravitationalSwarmOptimization import ( + AdaptiveEnhancedExplorationGravitationalSwarmOptimization, + ) + + lama_register["AdaptiveEnhancedExplorationGravitationalSwarmOptimization"] = ( + AdaptiveEnhancedExplorationGravitationalSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization" + ).set_name("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization", register=True) +except Exception as e: # AdaptiveEnhancedExplorationGravitationalSwarmOptimization print("AdaptiveEnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithm import AdaptiveEnhancedFireworkAlgorithm +try: # AdaptiveEnhancedFireworkAlgorithm + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithm import ( + AdaptiveEnhancedFireworkAlgorithm, + ) lama_register["AdaptiveEnhancedFireworkAlgorithm"] = AdaptiveEnhancedFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveEnhancedFireworkAlgorithm print("AdaptiveEnhancedFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithmWithLocalSearch import AdaptiveEnhancedFireworkAlgorithmWithLocalSearch - - lama_register["AdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = AdaptiveEnhancedFireworkAlgorithmWithLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) -except Exception as e: +try: # AdaptiveEnhancedFireworkAlgorithmWithLocalSearch + from nevergrad.optimization.lama.AdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( + AdaptiveEnhancedFireworkAlgorithmWithLocalSearch, + ) + + lama_register["AdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( + AdaptiveEnhancedFireworkAlgorithmWithLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: # AdaptiveEnhancedFireworkAlgorithmWithLocalSearch print("AdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGradientGuidedHybridPSO import AdaptiveEnhancedGradientGuidedHybridPSO +try: # AdaptiveEnhancedGradientGuidedHybridPSO + from nevergrad.optimization.lama.AdaptiveEnhancedGradientGuidedHybridPSO import ( + AdaptiveEnhancedGradientGuidedHybridPSO, + ) lama_register["AdaptiveEnhancedGradientGuidedHybridPSO"] = AdaptiveEnhancedGradientGuidedHybridPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMAAdaptiveEnhancedGradientGuidedHybridPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: # AdaptiveEnhancedGradientGuidedHybridPSO print("AdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligence import AdaptiveEnhancedGravitationalSwarmIntelligence - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligence"] = AdaptiveEnhancedGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligence + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligence import ( + AdaptiveEnhancedGravitationalSwarmIntelligence, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligence"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligence + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligence print("AdaptiveEnhancedGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV18 import AdaptiveEnhancedGravitationalSwarmIntelligenceV18 - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV18"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV18 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligenceV18 + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV18 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV18, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV18"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV18 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligenceV18 print("AdaptiveEnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV2 import AdaptiveEnhancedGravitationalSwarmIntelligenceV2 - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV2"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligenceV2 + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV2 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV2, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV2"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligenceV2 print("AdaptiveEnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV22 import AdaptiveEnhancedGravitationalSwarmIntelligenceV22 - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV22"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV22 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligenceV22 + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV22 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV22, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV22"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV22 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligenceV22 print("AdaptiveEnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV29 import AdaptiveEnhancedGravitationalSwarmIntelligenceV29 - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV29"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV29 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligenceV29 + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV29 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV29, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV29"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV29 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligenceV29 print("AdaptiveEnhancedGravitationalSwarmIntelligenceV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV33 import AdaptiveEnhancedGravitationalSwarmIntelligenceV33 - - lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV33"] = AdaptiveEnhancedGravitationalSwarmIntelligenceV33 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33").set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33", register=True) -except Exception as e: +try: # AdaptiveEnhancedGravitationalSwarmIntelligenceV33 + from nevergrad.optimization.lama.AdaptiveEnhancedGravitationalSwarmIntelligenceV33 import ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV33, + ) + + lama_register["AdaptiveEnhancedGravitationalSwarmIntelligenceV33"] = ( + AdaptiveEnhancedGravitationalSwarmIntelligenceV33 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33" + ).set_name("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33", register=True) +except Exception as e: # AdaptiveEnhancedGravitationalSwarmIntelligenceV33 print("AdaptiveEnhancedGravitationalSwarmIntelligenceV33 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonicFireworkAlgorithm import AdaptiveEnhancedHarmonicFireworkAlgorithm +try: # AdaptiveEnhancedHarmonicFireworkAlgorithm + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonicFireworkAlgorithm import ( + AdaptiveEnhancedHarmonicFireworkAlgorithm, + ) lama_register["AdaptiveEnhancedHarmonicFireworkAlgorithm"] = AdaptiveEnhancedHarmonicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm").set_name("LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedHarmonicFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveEnhancedHarmonicFireworkAlgorithm print("AdaptiveEnhancedHarmonicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch import AdaptiveEnhancedHarmonyFireworksSearch +try: # AdaptiveEnhancedHarmonyFireworksSearch + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch import ( + AdaptiveEnhancedHarmonyFireworksSearch, + ) lama_register["AdaptiveEnhancedHarmonyFireworksSearch"] = AdaptiveEnhancedHarmonyFireworksSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch").set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch" + ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch", register=True) +except Exception as e: # AdaptiveEnhancedHarmonyFireworksSearch print("AdaptiveEnhancedHarmonyFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch_v2 import AdaptiveEnhancedHarmonyFireworksSearch_v2 +try: # AdaptiveEnhancedHarmonyFireworksSearch_v2 + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonyFireworksSearch_v2 import ( + AdaptiveEnhancedHarmonyFireworksSearch_v2, + ) lama_register["AdaptiveEnhancedHarmonyFireworksSearch_v2"] = AdaptiveEnhancedHarmonyFireworksSearch_v2 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2").set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2" + ).set_name("LLAMAAdaptiveEnhancedHarmonyFireworksSearch_v2", register=True) +except Exception as e: # AdaptiveEnhancedHarmonyFireworksSearch_v2 print("AdaptiveEnhancedHarmonyFireworksSearch_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration import AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration - - lama_register["AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration"] = AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration").set_name("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration", register=True) -except Exception as e: +try: # AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration + from nevergrad.optimization.lama.AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration import ( + AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration, + ) + + lama_register["AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration"] = ( + AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration" + ).set_name("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration", register=True) +except Exception as e: # AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration print("AdaptiveEnhancedHarmonySearchWithLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedMemeticDifferentialEvolution import AdaptiveEnhancedMemeticDifferentialEvolution - - lama_register["AdaptiveEnhancedMemeticDifferentialEvolution"] = AdaptiveEnhancedMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution").set_name("LLAMAAdaptiveEnhancedMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveEnhancedMemeticDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticDifferentialEvolution import ( + AdaptiveEnhancedMemeticDifferentialEvolution, + ) + + lama_register["AdaptiveEnhancedMemeticDifferentialEvolution"] = ( + AdaptiveEnhancedMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveEnhancedMemeticDifferentialEvolution", register=True) +except Exception as e: # AdaptiveEnhancedMemeticDifferentialEvolution print("AdaptiveEnhancedMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 import AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 - - lama_register["AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3"] = AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3").set_name("LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3", register=True) -except Exception as e: +try: # AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 + from nevergrad.optimization.lama.AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 import ( + AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3, + ) + + lama_register["AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3"] = ( + AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3" + ).set_name("LLAMAAdaptiveEnhancedMemeticEvolutionaryAlgorithmV3", register=True) +except Exception as e: # AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 print("AdaptiveEnhancedMemeticEvolutionaryAlgorithmV3 can not be imported: ", e) -try: +try: # AdaptiveEnhancedMetaNetAQAPSOv10 from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv10 import AdaptiveEnhancedMetaNetAQAPSOv10 lama_register["AdaptiveEnhancedMetaNetAQAPSOv10"] = AdaptiveEnhancedMetaNetAQAPSOv10 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMetaNetAQAPSOv10 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10").set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMetaNetAQAPSOv10 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv10" + ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv10", register=True) +except Exception as e: # AdaptiveEnhancedMetaNetAQAPSOv10 print("AdaptiveEnhancedMetaNetAQAPSOv10 can not be imported: ", e) -try: +try: # AdaptiveEnhancedMetaNetAQAPSOv11 from nevergrad.optimization.lama.AdaptiveEnhancedMetaNetAQAPSOv11 import AdaptiveEnhancedMetaNetAQAPSOv11 lama_register["AdaptiveEnhancedMetaNetAQAPSOv11"] = AdaptiveEnhancedMetaNetAQAPSOv11 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMetaNetAQAPSOv11 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11").set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMetaNetAQAPSOv11 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMetaNetAQAPSOv11" + ).set_name("LLAMAAdaptiveEnhancedMetaNetAQAPSOv11", register=True) +except Exception as e: # AdaptiveEnhancedMetaNetAQAPSOv11 print("AdaptiveEnhancedMetaNetAQAPSOv11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseDifferentialEvolution import AdaptiveEnhancedMultiPhaseDifferentialEvolution - - lama_register["AdaptiveEnhancedMultiPhaseDifferentialEvolution"] = AdaptiveEnhancedMultiPhaseDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution").set_name("LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveEnhancedMultiPhaseDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseDifferentialEvolution import ( + AdaptiveEnhancedMultiPhaseDifferentialEvolution, + ) + + lama_register["AdaptiveEnhancedMultiPhaseDifferentialEvolution"] = ( + AdaptiveEnhancedMultiPhaseDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution" + ).set_name("LLAMAAdaptiveEnhancedMultiPhaseDifferentialEvolution", register=True) +except Exception as e: # AdaptiveEnhancedMultiPhaseDifferentialEvolution print("AdaptiveEnhancedMultiPhaseDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseOptimizationAlgorithm import AdaptiveEnhancedMultiPhaseOptimizationAlgorithm - - lama_register["AdaptiveEnhancedMultiPhaseOptimizationAlgorithm"] = AdaptiveEnhancedMultiPhaseOptimizationAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm").set_name("LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm", register=True) -except Exception as e: +try: # AdaptiveEnhancedMultiPhaseOptimizationAlgorithm + from nevergrad.optimization.lama.AdaptiveEnhancedMultiPhaseOptimizationAlgorithm import ( + AdaptiveEnhancedMultiPhaseOptimizationAlgorithm, + ) + + lama_register["AdaptiveEnhancedMultiPhaseOptimizationAlgorithm"] = ( + AdaptiveEnhancedMultiPhaseOptimizationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm" + ).set_name("LLAMAAdaptiveEnhancedMultiPhaseOptimizationAlgorithm", register=True) +except Exception as e: # AdaptiveEnhancedMultiPhaseOptimizationAlgorithm print("AdaptiveEnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) -try: +try: # AdaptiveEnhancedQGSA_v7 from nevergrad.optimization.lama.AdaptiveEnhancedQGSA_v7 import AdaptiveEnhancedQGSA_v7 lama_register["AdaptiveEnhancedQGSA_v7"] = AdaptiveEnhancedQGSA_v7 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedQGSA_v7 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7").set_name("LLAMAAdaptiveEnhancedQGSA_v7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedQGSA_v7 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQGSA_v7").set_name( + "LLAMAAdaptiveEnhancedQGSA_v7", register=True + ) +except Exception as e: # AdaptiveEnhancedQGSA_v7 print("AdaptiveEnhancedQGSA_v7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedQuantumHarmonySearch import AdaptiveEnhancedQuantumHarmonySearch +try: # AdaptiveEnhancedQuantumHarmonySearch + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumHarmonySearch import ( + AdaptiveEnhancedQuantumHarmonySearch, + ) lama_register["AdaptiveEnhancedQuantumHarmonySearch"] = AdaptiveEnhancedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumHarmonySearch").set_name("LLAMAAdaptiveEnhancedQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedQuantumHarmonySearch" + ).set_name("LLAMAAdaptiveEnhancedQuantumHarmonySearch", register=True) +except Exception as e: # AdaptiveEnhancedQuantumHarmonySearch print("AdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedQuantumSimulatedAnnealing import AdaptiveEnhancedQuantumSimulatedAnnealing +try: # AdaptiveEnhancedQuantumSimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveEnhancedQuantumSimulatedAnnealing import ( + AdaptiveEnhancedQuantumSimulatedAnnealing, + ) lama_register["AdaptiveEnhancedQuantumSimulatedAnnealing"] = AdaptiveEnhancedQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveEnhancedQuantumSimulatedAnnealing", register=True) +except Exception as e: # AdaptiveEnhancedQuantumSimulatedAnnealing print("AdaptiveEnhancedQuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 - - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11", register=True) -except Exception as e: +try: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11", register=True) +except Exception as e: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 - - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14", register=True) -except Exception as e: +try: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14", register=True) +except Exception as e: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 import AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 - - lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28"] = AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 - res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28").set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28", register=True) -except Exception as e: +try: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 + from nevergrad.optimization.lama.AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 import ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28, + ) + + lama_register["AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28"] = ( + AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 = NonObjectOptimizer( + method="LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28" + ).set_name("LLAMAAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28", register=True) +except Exception as e: # AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 print("AdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V28 can not be imported: ", e) -try: +try: # AdaptiveEnsembleMemeticAlgorithm from nevergrad.optimization.lama.AdaptiveEnsembleMemeticAlgorithm import AdaptiveEnsembleMemeticAlgorithm lama_register["AdaptiveEnsembleMemeticAlgorithm"] = AdaptiveEnsembleMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveEnsembleMemeticAlgorithm").set_name("LLAMAAdaptiveEnsembleMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEnsembleMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveEnsembleMemeticAlgorithm" + ).set_name("LLAMAAdaptiveEnsembleMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveEnsembleMemeticAlgorithm print("AdaptiveEnsembleMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialOptimization import AdaptiveEvolutionaryDifferentialOptimization - - lama_register["AdaptiveEvolutionaryDifferentialOptimization"] = AdaptiveEvolutionaryDifferentialOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEvolutionaryDifferentialOptimization = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialOptimization").set_name("LLAMAAdaptiveEvolutionaryDifferentialOptimization", register=True) -except Exception as e: +try: # AdaptiveEvolutionaryDifferentialOptimization + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialOptimization import ( + AdaptiveEvolutionaryDifferentialOptimization, + ) + + lama_register["AdaptiveEvolutionaryDifferentialOptimization"] = ( + AdaptiveEvolutionaryDifferentialOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEvolutionaryDifferentialOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryDifferentialOptimization" + ).set_name("LLAMAAdaptiveEvolutionaryDifferentialOptimization", register=True) +except Exception as e: # AdaptiveEvolutionaryDifferentialOptimization print("AdaptiveEvolutionaryDifferentialOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialPopulationStrategy import AdaptiveEvolutionaryDifferentialPopulationStrategy - - lama_register["AdaptiveEvolutionaryDifferentialPopulationStrategy"] = AdaptiveEvolutionaryDifferentialPopulationStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy").set_name("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) -except Exception as e: +try: # AdaptiveEvolutionaryDifferentialPopulationStrategy + from nevergrad.optimization.lama.AdaptiveEvolutionaryDifferentialPopulationStrategy import ( + AdaptiveEvolutionaryDifferentialPopulationStrategy, + ) + + lama_register["AdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( + AdaptiveEvolutionaryDifferentialPopulationStrategy + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy" + ).set_name("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) +except Exception as e: # AdaptiveEvolutionaryDifferentialPopulationStrategy print("AdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryFireworksSearch_v1 import AdaptiveEvolutionaryFireworksSearch_v1 +try: # AdaptiveEvolutionaryFireworksSearch_v1 + from nevergrad.optimization.lama.AdaptiveEvolutionaryFireworksSearch_v1 import ( + AdaptiveEvolutionaryFireworksSearch_v1, + ) lama_register["AdaptiveEvolutionaryFireworksSearch_v1"] = AdaptiveEvolutionaryFireworksSearch_v1 - res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEvolutionaryFireworksSearch_v1 = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1").set_name("LLAMAAdaptiveEvolutionaryFireworksSearch_v1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEvolutionaryFireworksSearch_v1 = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryFireworksSearch_v1" + ).set_name("LLAMAAdaptiveEvolutionaryFireworksSearch_v1", register=True) +except Exception as e: # AdaptiveEvolutionaryFireworksSearch_v1 print("AdaptiveEvolutionaryFireworksSearch_v1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveEvolutionaryGradientSearch import AdaptiveEvolutionaryGradientSearch +try: # AdaptiveEvolutionaryGradientSearch + from nevergrad.optimization.lama.AdaptiveEvolutionaryGradientSearch import ( + AdaptiveEvolutionaryGradientSearch, + ) lama_register["AdaptiveEvolutionaryGradientSearch"] = AdaptiveEvolutionaryGradientSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryGradientSearch").set_name("LLAMAAdaptiveEvolutionaryGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveEvolutionaryGradientSearch" + ).set_name("LLAMAAdaptiveEvolutionaryGradientSearch", register=True) +except Exception as e: # AdaptiveEvolutionaryGradientSearch print("AdaptiveEvolutionaryGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveExplorationEvolutionStrategy import AdaptiveExplorationEvolutionStrategy +try: # AdaptiveExplorationEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveExplorationEvolutionStrategy import ( + AdaptiveExplorationEvolutionStrategy, + ) lama_register["AdaptiveExplorationEvolutionStrategy"] = AdaptiveExplorationEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveExplorationEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveExplorationEvolutionStrategy").set_name("LLAMAAdaptiveExplorationEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveExplorationEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationEvolutionStrategy" + ).set_name("LLAMAAdaptiveExplorationEvolutionStrategy", register=True) +except Exception as e: # AdaptiveExplorationEvolutionStrategy print("AdaptiveExplorationEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveExplorationExploitationDifferentialEvolution import AdaptiveExplorationExploitationDifferentialEvolution - - lama_register["AdaptiveExplorationExploitationDifferentialEvolution"] = AdaptiveExplorationExploitationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveExplorationExploitationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution").set_name("LLAMAAdaptiveExplorationExploitationDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveExplorationExploitationDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveExplorationExploitationDifferentialEvolution import ( + AdaptiveExplorationExploitationDifferentialEvolution, + ) + + lama_register["AdaptiveExplorationExploitationDifferentialEvolution"] = ( + AdaptiveExplorationExploitationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveExplorationExploitationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationExploitationDifferentialEvolution" + ).set_name("LLAMAAdaptiveExplorationExploitationDifferentialEvolution", register=True) +except Exception as e: # AdaptiveExplorationExploitationDifferentialEvolution print("AdaptiveExplorationExploitationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveExplorationExploitationHybridAlgorithm import AdaptiveExplorationExploitationHybridAlgorithm - - lama_register["AdaptiveExplorationExploitationHybridAlgorithm"] = AdaptiveExplorationExploitationHybridAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveExplorationExploitationHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm").set_name("LLAMAAdaptiveExplorationExploitationHybridAlgorithm", register=True) -except Exception as e: +try: # AdaptiveExplorationExploitationHybridAlgorithm + from nevergrad.optimization.lama.AdaptiveExplorationExploitationHybridAlgorithm import ( + AdaptiveExplorationExploitationHybridAlgorithm, + ) + + lama_register["AdaptiveExplorationExploitationHybridAlgorithm"] = ( + AdaptiveExplorationExploitationHybridAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveExplorationExploitationHybridAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveExplorationExploitationHybridAlgorithm" + ).set_name("LLAMAAdaptiveExplorationExploitationHybridAlgorithm", register=True) +except Exception as e: # AdaptiveExplorationExploitationHybridAlgorithm print("AdaptiveExplorationExploitationHybridAlgorithm can not be imported: ", e) -try: +try: # AdaptiveExploratoryOptimizer from nevergrad.optimization.lama.AdaptiveExploratoryOptimizer import AdaptiveExploratoryOptimizer lama_register["AdaptiveExploratoryOptimizer"] = AdaptiveExploratoryOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveExploratoryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveExploratoryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveExploratoryOptimizer").set_name("LLAMAAdaptiveExploratoryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveExploratoryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveExploratoryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveExploratoryOptimizer" + ).set_name("LLAMAAdaptiveExploratoryOptimizer", register=True) +except Exception as e: # AdaptiveExploratoryOptimizer print("AdaptiveExploratoryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveFeedbackControlStrategyV61 import AdaptiveFeedbackControlStrategyV61 +try: # AdaptiveFeedbackControlStrategyV61 + from nevergrad.optimization.lama.AdaptiveFeedbackControlStrategyV61 import ( + AdaptiveFeedbackControlStrategyV61, + ) lama_register["AdaptiveFeedbackControlStrategyV61"] = AdaptiveFeedbackControlStrategyV61 - res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackControlStrategyV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFeedbackControlStrategyV61 = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackControlStrategyV61").set_name("LLAMAAdaptiveFeedbackControlStrategyV61", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackControlStrategyV61")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFeedbackControlStrategyV61 = NonObjectOptimizer( + method="LLAMAAdaptiveFeedbackControlStrategyV61" + ).set_name("LLAMAAdaptiveFeedbackControlStrategyV61", register=True) +except Exception as e: # AdaptiveFeedbackControlStrategyV61 print("AdaptiveFeedbackControlStrategyV61 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveFeedbackEnhancedMemoryStrategyV71 import AdaptiveFeedbackEnhancedMemoryStrategyV71 +try: # AdaptiveFeedbackEnhancedMemoryStrategyV71 + from nevergrad.optimization.lama.AdaptiveFeedbackEnhancedMemoryStrategyV71 import ( + AdaptiveFeedbackEnhancedMemoryStrategyV71, + ) lama_register["AdaptiveFeedbackEnhancedMemoryStrategyV71"] = AdaptiveFeedbackEnhancedMemoryStrategyV71 - res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71 = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71").set_name("LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71 = NonObjectOptimizer( + method="LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71" + ).set_name("LLAMAAdaptiveFeedbackEnhancedMemoryStrategyV71", register=True) +except Exception as e: # AdaptiveFeedbackEnhancedMemoryStrategyV71 print("AdaptiveFeedbackEnhancedMemoryStrategyV71 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmEnhanced import AdaptiveFireworkAlgorithmEnhanced +try: # AdaptiveFireworkAlgorithmEnhanced + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmEnhanced import ( + AdaptiveFireworkAlgorithmEnhanced, + ) lama_register["AdaptiveFireworkAlgorithmEnhanced"] = AdaptiveFireworkAlgorithmEnhanced - res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFireworkAlgorithmEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmEnhanced").set_name("LLAMAAdaptiveFireworkAlgorithmEnhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFireworkAlgorithmEnhanced = NonObjectOptimizer( + method="LLAMAAdaptiveFireworkAlgorithmEnhanced" + ).set_name("LLAMAAdaptiveFireworkAlgorithmEnhanced", register=True) +except Exception as e: # AdaptiveFireworkAlgorithmEnhanced print("AdaptiveFireworkAlgorithmEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmOptimization import AdaptiveFireworkAlgorithmOptimization +try: # AdaptiveFireworkAlgorithmOptimization + from nevergrad.optimization.lama.AdaptiveFireworkAlgorithmOptimization import ( + AdaptiveFireworkAlgorithmOptimization, + ) lama_register["AdaptiveFireworkAlgorithmOptimization"] = AdaptiveFireworkAlgorithmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmOptimization").set_name("LLAMAAdaptiveFireworkAlgorithmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveFireworkAlgorithmOptimization" + ).set_name("LLAMAAdaptiveFireworkAlgorithmOptimization", register=True) +except Exception as e: # AdaptiveFireworkAlgorithmOptimization print("AdaptiveFireworkAlgorithmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveFireworksEnhancedHarmonySearch import AdaptiveFireworksEnhancedHarmonySearch +try: # AdaptiveFireworksEnhancedHarmonySearch + from nevergrad.optimization.lama.AdaptiveFireworksEnhancedHarmonySearch import ( + AdaptiveFireworksEnhancedHarmonySearch, + ) lama_register["AdaptiveFireworksEnhancedHarmonySearch"] = AdaptiveFireworksEnhancedHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveFireworksEnhancedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFireworksEnhancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveFireworksEnhancedHarmonySearch").set_name("LLAMAAdaptiveFireworksEnhancedHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFireworksEnhancedHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFireworksEnhancedHarmonySearch = NonObjectOptimizer( + method="LLAMAAdaptiveFireworksEnhancedHarmonySearch" + ).set_name("LLAMAAdaptiveFireworksEnhancedHarmonySearch", register=True) +except Exception as e: # AdaptiveFireworksEnhancedHarmonySearch print("AdaptiveFireworksEnhancedHarmonySearch can not be imported: ", e) -try: +try: # AdaptiveFocusedEvolutionStrategy from nevergrad.optimization.lama.AdaptiveFocusedEvolutionStrategy import AdaptiveFocusedEvolutionStrategy lama_register["AdaptiveFocusedEvolutionStrategy"] = AdaptiveFocusedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveFocusedEvolutionStrategy").set_name("LLAMAAdaptiveFocusedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveFocusedEvolutionStrategy" + ).set_name("LLAMAAdaptiveFocusedEvolutionStrategy", register=True) +except Exception as e: # AdaptiveFocusedEvolutionStrategy print("AdaptiveFocusedEvolutionStrategy can not be imported: ", e) -try: +try: # AdaptiveFuzzyDynamicDE from nevergrad.optimization.lama.AdaptiveFuzzyDynamicDE import AdaptiveFuzzyDynamicDE lama_register["AdaptiveFuzzyDynamicDE"] = AdaptiveFuzzyDynamicDE - res = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveFuzzyDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE").set_name("LLAMAAdaptiveFuzzyDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveFuzzyDynamicDE = NonObjectOptimizer(method="LLAMAAdaptiveFuzzyDynamicDE").set_name( + "LLAMAAdaptiveFuzzyDynamicDE", register=True + ) +except Exception as e: # AdaptiveFuzzyDynamicDE print("AdaptiveFuzzyDynamicDE can not be imported: ", e) -try: +try: # AdaptiveGaussianSearch from nevergrad.optimization.lama.AdaptiveGaussianSearch import AdaptiveGaussianSearch lama_register["AdaptiveGaussianSearch"] = AdaptiveGaussianSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch").set_name("LLAMAAdaptiveGaussianSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAAdaptiveGaussianSearch").set_name( + "LLAMAAdaptiveGaussianSearch", register=True + ) +except Exception as e: # AdaptiveGaussianSearch print("AdaptiveGaussianSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGlobalLocalSearchStrategyV62 import AdaptiveGlobalLocalSearchStrategyV62 +try: # AdaptiveGlobalLocalSearchStrategyV62 + from nevergrad.optimization.lama.AdaptiveGlobalLocalSearchStrategyV62 import ( + AdaptiveGlobalLocalSearchStrategyV62, + ) lama_register["AdaptiveGlobalLocalSearchStrategyV62"] = AdaptiveGlobalLocalSearchStrategyV62 - res = NonObjectOptimizer(method="LLAMAAdaptiveGlobalLocalSearchStrategyV62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGlobalLocalSearchStrategyV62 = NonObjectOptimizer(method="LLAMAAdaptiveGlobalLocalSearchStrategyV62").set_name("LLAMAAdaptiveGlobalLocalSearchStrategyV62", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGlobalLocalSearchStrategyV62")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGlobalLocalSearchStrategyV62 = NonObjectOptimizer( + method="LLAMAAdaptiveGlobalLocalSearchStrategyV62" + ).set_name("LLAMAAdaptiveGlobalLocalSearchStrategyV62", register=True) +except Exception as e: # AdaptiveGlobalLocalSearchStrategyV62 print("AdaptiveGlobalLocalSearchStrategyV62 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientAssistedEvolution import AdaptiveGradientAssistedEvolution +try: # AdaptiveGradientAssistedEvolution + from nevergrad.optimization.lama.AdaptiveGradientAssistedEvolution import ( + AdaptiveGradientAssistedEvolution, + ) lama_register["AdaptiveGradientAssistedEvolution"] = AdaptiveGradientAssistedEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientAssistedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientAssistedEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientAssistedEvolution").set_name("LLAMAAdaptiveGradientAssistedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientAssistedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientAssistedEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientAssistedEvolution" + ).set_name("LLAMAAdaptiveGradientAssistedEvolution", register=True) +except Exception as e: # AdaptiveGradientAssistedEvolution print("AdaptiveGradientAssistedEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBalancedCrossoverPSO import AdaptiveGradientBalancedCrossoverPSO +try: # AdaptiveGradientBalancedCrossoverPSO + from nevergrad.optimization.lama.AdaptiveGradientBalancedCrossoverPSO import ( + AdaptiveGradientBalancedCrossoverPSO, + ) lama_register["AdaptiveGradientBalancedCrossoverPSO"] = AdaptiveGradientBalancedCrossoverPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMAAdaptiveGradientBalancedCrossoverPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMAAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: # AdaptiveGradientBalancedCrossoverPSO print("AdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBalancedEvolutionStrategy import AdaptiveGradientBalancedEvolutionStrategy +try: # AdaptiveGradientBalancedEvolutionStrategy + from nevergrad.optimization.lama.AdaptiveGradientBalancedEvolutionStrategy import ( + AdaptiveGradientBalancedEvolutionStrategy, + ) lama_register["AdaptiveGradientBalancedEvolutionStrategy"] = AdaptiveGradientBalancedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedEvolutionStrategy").set_name("LLAMAAdaptiveGradientBalancedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBalancedEvolutionStrategy" + ).set_name("LLAMAAdaptiveGradientBalancedEvolutionStrategy", register=True) +except Exception as e: # AdaptiveGradientBalancedEvolutionStrategy print("AdaptiveGradientBalancedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingPlus import AdaptiveGradientBoostedMemoryAnnealingPlus +try: # AdaptiveGradientBoostedMemoryAnnealingPlus + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingPlus import ( + AdaptiveGradientBoostedMemoryAnnealingPlus, + ) lama_register["AdaptiveGradientBoostedMemoryAnnealingPlus"] = AdaptiveGradientBoostedMemoryAnnealingPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus").set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus", register=True) +except Exception as e: # AdaptiveGradientBoostedMemoryAnnealingPlus print("AdaptiveGradientBoostedMemoryAnnealingPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl import AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl - - lama_register["AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl"] = AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl").set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl", register=True) -except Exception as e: +try: # AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl import ( + AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl, + ) + + lama_register["AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl"] = ( + AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl", register=True) +except Exception as e: # AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl print("AdaptiveGradientBoostedMemoryAnnealingWithExplorationControl can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryExploration import AdaptiveGradientBoostedMemoryExploration +try: # AdaptiveGradientBoostedMemoryExploration + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemoryExploration import ( + AdaptiveGradientBoostedMemoryExploration, + ) lama_register["AdaptiveGradientBoostedMemoryExploration"] = AdaptiveGradientBoostedMemoryExploration - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryExploration").set_name("LLAMAAdaptiveGradientBoostedMemoryExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemoryExploration" + ).set_name("LLAMAAdaptiveGradientBoostedMemoryExploration", register=True) +except Exception as e: # AdaptiveGradientBoostedMemoryExploration print("AdaptiveGradientBoostedMemoryExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientBoostedMemorySimulatedAnnealing import AdaptiveGradientBoostedMemorySimulatedAnnealing - - lama_register["AdaptiveGradientBoostedMemorySimulatedAnnealing"] = AdaptiveGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # AdaptiveGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveGradientBoostedMemorySimulatedAnnealing import ( + AdaptiveGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( + AdaptiveGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # AdaptiveGradientBoostedMemorySimulatedAnnealing print("AdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientClusteringEvolution import AdaptiveGradientClusteringEvolution +try: # AdaptiveGradientClusteringEvolution + from nevergrad.optimization.lama.AdaptiveGradientClusteringEvolution import ( + AdaptiveGradientClusteringEvolution, + ) lama_register["AdaptiveGradientClusteringEvolution"] = AdaptiveGradientClusteringEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientClusteringEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientClusteringEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientClusteringEvolution").set_name("LLAMAAdaptiveGradientClusteringEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientClusteringEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientClusteringEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientClusteringEvolution" + ).set_name("LLAMAAdaptiveGradientClusteringEvolution", register=True) +except Exception as e: # AdaptiveGradientClusteringEvolution print("AdaptiveGradientClusteringEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientCrossoverOptimizer import AdaptiveGradientCrossoverOptimizer +try: # AdaptiveGradientCrossoverOptimizer + from nevergrad.optimization.lama.AdaptiveGradientCrossoverOptimizer import ( + AdaptiveGradientCrossoverOptimizer, + ) lama_register["AdaptiveGradientCrossoverOptimizer"] = AdaptiveGradientCrossoverOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGradientCrossoverOptimizer").set_name("LLAMAAdaptiveGradientCrossoverOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGradientCrossoverOptimizer" + ).set_name("LLAMAAdaptiveGradientCrossoverOptimizer", register=True) +except Exception as e: # AdaptiveGradientCrossoverOptimizer print("AdaptiveGradientCrossoverOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolution import AdaptiveGradientDifferentialEvolution +try: # AdaptiveGradientDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolution import ( + AdaptiveGradientDifferentialEvolution, + ) lama_register["AdaptiveGradientDifferentialEvolution"] = AdaptiveGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolution").set_name("LLAMAAdaptiveGradientDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: # AdaptiveGradientDifferentialEvolution print("AdaptiveGradientDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionEnhanced import AdaptiveGradientDifferentialEvolutionEnhanced - - lama_register["AdaptiveGradientDifferentialEvolutionEnhanced"] = AdaptiveGradientDifferentialEvolutionEnhanced - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientDifferentialEvolutionEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced").set_name("LLAMAAdaptiveGradientDifferentialEvolutionEnhanced", register=True) -except Exception as e: +try: # AdaptiveGradientDifferentialEvolutionEnhanced + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionEnhanced import ( + AdaptiveGradientDifferentialEvolutionEnhanced, + ) + + lama_register["AdaptiveGradientDifferentialEvolutionEnhanced"] = ( + AdaptiveGradientDifferentialEvolutionEnhanced + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientDifferentialEvolutionEnhanced = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolutionEnhanced" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionEnhanced", register=True) +except Exception as e: # AdaptiveGradientDifferentialEvolutionEnhanced print("AdaptiveGradientDifferentialEvolutionEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionPlus import AdaptiveGradientDifferentialEvolutionPlus +try: # AdaptiveGradientDifferentialEvolutionPlus + from nevergrad.optimization.lama.AdaptiveGradientDifferentialEvolutionPlus import ( + AdaptiveGradientDifferentialEvolutionPlus, + ) lama_register["AdaptiveGradientDifferentialEvolutionPlus"] = AdaptiveGradientDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionPlus").set_name("LLAMAAdaptiveGradientDifferentialEvolutionPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveGradientDifferentialEvolutionPlus", register=True) +except Exception as e: # AdaptiveGradientDifferentialEvolutionPlus print("AdaptiveGradientDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientDifferentialHybrid import AdaptiveGradientDifferentialHybrid +try: # AdaptiveGradientDifferentialHybrid + from nevergrad.optimization.lama.AdaptiveGradientDifferentialHybrid import ( + AdaptiveGradientDifferentialHybrid, + ) lama_register["AdaptiveGradientDifferentialHybrid"] = AdaptiveGradientDifferentialHybrid - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientDifferentialHybrid = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialHybrid").set_name("LLAMAAdaptiveGradientDifferentialHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientDifferentialHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientDifferentialHybrid = NonObjectOptimizer( + method="LLAMAAdaptiveGradientDifferentialHybrid" + ).set_name("LLAMAAdaptiveGradientDifferentialHybrid", register=True) +except Exception as e: # AdaptiveGradientDifferentialHybrid print("AdaptiveGradientDifferentialHybrid can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientEnhancedExplorationPSO import AdaptiveGradientEnhancedExplorationPSO +try: # AdaptiveGradientEnhancedExplorationPSO + from nevergrad.optimization.lama.AdaptiveGradientEnhancedExplorationPSO import ( + AdaptiveGradientEnhancedExplorationPSO, + ) lama_register["AdaptiveGradientEnhancedExplorationPSO"] = AdaptiveGradientEnhancedExplorationPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedExplorationPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientEnhancedExplorationPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedExplorationPSO").set_name("LLAMAAdaptiveGradientEnhancedExplorationPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedExplorationPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientEnhancedExplorationPSO = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedExplorationPSO" + ).set_name("LLAMAAdaptiveGradientEnhancedExplorationPSO", register=True) +except Exception as e: # AdaptiveGradientEnhancedExplorationPSO print("AdaptiveGradientEnhancedExplorationPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGradientEnhancedMultiPhaseAnnealing import AdaptiveGradientEnhancedMultiPhaseAnnealing +try: # AdaptiveGradientEnhancedMultiPhaseAnnealing + from nevergrad.optimization.lama.AdaptiveGradientEnhancedMultiPhaseAnnealing import ( + AdaptiveGradientEnhancedMultiPhaseAnnealing, + ) lama_register["AdaptiveGradientEnhancedMultiPhaseAnnealing"] = AdaptiveGradientEnhancedMultiPhaseAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing").set_name("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing" + ).set_name("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing", register=True) +except Exception as e: # AdaptiveGradientEnhancedMultiPhaseAnnealing print("AdaptiveGradientEnhancedMultiPhaseAnnealing can not be imported: ", e) -try: +try: # AdaptiveGradientEnhancedRAMEDS from nevergrad.optimization.lama.AdaptiveGradientEnhancedRAMEDS import AdaptiveGradientEnhancedRAMEDS lama_register["AdaptiveGradientEnhancedRAMEDS"] = AdaptiveGradientEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedRAMEDS").set_name("LLAMAAdaptiveGradientEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMAAdaptiveGradientEnhancedRAMEDS" + ).set_name("LLAMAAdaptiveGradientEnhancedRAMEDS", register=True) +except Exception as e: # AdaptiveGradientEnhancedRAMEDS print("AdaptiveGradientEnhancedRAMEDS can not be imported: ", e) -try: +try: # AdaptiveGradientEvolution from nevergrad.optimization.lama.AdaptiveGradientEvolution import AdaptiveGradientEvolution lama_register["AdaptiveGradientEvolution"] = AdaptiveGradientEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution").set_name("LLAMAAdaptiveGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientEvolution").set_name( + "LLAMAAdaptiveGradientEvolution", register=True + ) +except Exception as e: # AdaptiveGradientEvolution print("AdaptiveGradientEvolution can not be imported: ", e) -try: +try: # AdaptiveGradientExploration from nevergrad.optimization.lama.AdaptiveGradientExploration import AdaptiveGradientExploration lama_register["AdaptiveGradientExploration"] = AdaptiveGradientExploration - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration").set_name("LLAMAAdaptiveGradientExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientExploration = NonObjectOptimizer(method="LLAMAAdaptiveGradientExploration").set_name( + "LLAMAAdaptiveGradientExploration", register=True + ) +except Exception as e: # AdaptiveGradientExploration print("AdaptiveGradientExploration can not be imported: ", e) -try: +try: # AdaptiveGradientExplorationV2 from nevergrad.optimization.lama.AdaptiveGradientExplorationV2 import AdaptiveGradientExplorationV2 lama_register["AdaptiveGradientExplorationV2"] = AdaptiveGradientExplorationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExplorationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientExplorationV2 = NonObjectOptimizer(method="LLAMAAdaptiveGradientExplorationV2").set_name("LLAMAAdaptiveGradientExplorationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientExplorationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientExplorationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveGradientExplorationV2" + ).set_name("LLAMAAdaptiveGradientExplorationV2", register=True) +except Exception as e: # AdaptiveGradientExplorationV2 print("AdaptiveGradientExplorationV2 can not be imported: ", e) -try: +try: # AdaptiveGradientGuidedEvolution from nevergrad.optimization.lama.AdaptiveGradientGuidedEvolution import AdaptiveGradientGuidedEvolution lama_register["AdaptiveGradientGuidedEvolution"] = AdaptiveGradientGuidedEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientGuidedEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGradientGuidedEvolution").set_name("LLAMAAdaptiveGradientGuidedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGradientGuidedEvolution" + ).set_name("LLAMAAdaptiveGradientGuidedEvolution", register=True) +except Exception as e: # AdaptiveGradientGuidedEvolution print("AdaptiveGradientGuidedEvolution can not be imported: ", e) -try: +try: # AdaptiveGradientInformedPSO from nevergrad.optimization.lama.AdaptiveGradientInformedPSO import AdaptiveGradientInformedPSO lama_register["AdaptiveGradientInformedPSO"] = AdaptiveGradientInformedPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientInformedPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO").set_name("LLAMAAdaptiveGradientInformedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientInformedPSO = NonObjectOptimizer(method="LLAMAAdaptiveGradientInformedPSO").set_name( + "LLAMAAdaptiveGradientInformedPSO", register=True + ) +except Exception as e: # AdaptiveGradientInformedPSO print("AdaptiveGradientInformedPSO can not be imported: ", e) -try: +try: # AdaptiveGradientSampling from nevergrad.optimization.lama.AdaptiveGradientSampling import AdaptiveGradientSampling lama_register["AdaptiveGradientSampling"] = AdaptiveGradientSampling - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientSampling = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling").set_name("LLAMAAdaptiveGradientSampling", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientSampling = NonObjectOptimizer(method="LLAMAAdaptiveGradientSampling").set_name( + "LLAMAAdaptiveGradientSampling", register=True + ) +except Exception as e: # AdaptiveGradientSampling print("AdaptiveGradientSampling can not be imported: ", e) -try: +try: # AdaptiveGradientSearch from nevergrad.optimization.lama.AdaptiveGradientSearch import AdaptiveGradientSearch lama_register["AdaptiveGradientSearch"] = AdaptiveGradientSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch").set_name("LLAMAAdaptiveGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveGradientSearch").set_name( + "LLAMAAdaptiveGradientSearch", register=True + ) +except Exception as e: # AdaptiveGradientSearch print("AdaptiveGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligence import AdaptiveGravitationalSwarmIntelligence +try: # AdaptiveGravitationalSwarmIntelligence + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligence import ( + AdaptiveGravitationalSwarmIntelligence, + ) lama_register["AdaptiveGravitationalSwarmIntelligence"] = AdaptiveGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAAdaptiveGravitationalSwarmIntelligence", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligence print("AdaptiveGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV15 import AdaptiveGravitationalSwarmIntelligenceV15 +try: # AdaptiveGravitationalSwarmIntelligenceV15 + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV15 import ( + AdaptiveGravitationalSwarmIntelligenceV15, + ) lama_register["AdaptiveGravitationalSwarmIntelligenceV15"] = AdaptiveGravitationalSwarmIntelligenceV15 - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligenceV15 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV15" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV15", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligenceV15 print("AdaptiveGravitationalSwarmIntelligenceV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV2 import AdaptiveGravitationalSwarmIntelligenceV2 +try: # AdaptiveGravitationalSwarmIntelligenceV2 + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV2 import ( + AdaptiveGravitationalSwarmIntelligenceV2, + ) lama_register["AdaptiveGravitationalSwarmIntelligenceV2"] = AdaptiveGravitationalSwarmIntelligenceV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligenceV2 print("AdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV26 import AdaptiveGravitationalSwarmIntelligenceV26 +try: # AdaptiveGravitationalSwarmIntelligenceV26 + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV26 import ( + AdaptiveGravitationalSwarmIntelligenceV26, + ) lama_register["AdaptiveGravitationalSwarmIntelligenceV26"] = AdaptiveGravitationalSwarmIntelligenceV26 - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligenceV26 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV26 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV26" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV26", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligenceV26 print("AdaptiveGravitationalSwarmIntelligenceV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV3 import AdaptiveGravitationalSwarmIntelligenceV3 +try: # AdaptiveGravitationalSwarmIntelligenceV3 + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV3 import ( + AdaptiveGravitationalSwarmIntelligenceV3, + ) lama_register["AdaptiveGravitationalSwarmIntelligenceV3"] = AdaptiveGravitationalSwarmIntelligenceV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligenceV3 print("AdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV4 import AdaptiveGravitationalSwarmIntelligenceV4 +try: # AdaptiveGravitationalSwarmIntelligenceV4 + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmIntelligenceV4 import ( + AdaptiveGravitationalSwarmIntelligenceV4, + ) lama_register["AdaptiveGravitationalSwarmIntelligenceV4"] = AdaptiveGravitationalSwarmIntelligenceV4 - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4").set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAAdaptiveGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: # AdaptiveGravitationalSwarmIntelligenceV4 print("AdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - - lama_register["AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation").set_name("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) -except Exception as e: +try: # AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + from nevergrad.optimization.lama.AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( + AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, + ) + + lama_register["AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( + AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( + method="LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" + ).set_name("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) +except Exception as e: # AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation print("AdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", e) -try: +try: # AdaptiveGuidedCulturalSearch from nevergrad.optimization.lama.AdaptiveGuidedCulturalSearch import AdaptiveGuidedCulturalSearch lama_register["AdaptiveGuidedCulturalSearch"] = AdaptiveGuidedCulturalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGuidedCulturalSearch = NonObjectOptimizer(method="LLAMAAdaptiveGuidedCulturalSearch").set_name("LLAMAAdaptiveGuidedCulturalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedCulturalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGuidedCulturalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedCulturalSearch" + ).set_name("LLAMAAdaptiveGuidedCulturalSearch", register=True) +except Exception as e: # AdaptiveGuidedCulturalSearch print("AdaptiveGuidedCulturalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveGuidedDifferentialEvolution import AdaptiveGuidedDifferentialEvolution +try: # AdaptiveGuidedDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveGuidedDifferentialEvolution import ( + AdaptiveGuidedDifferentialEvolution, + ) lama_register["AdaptiveGuidedDifferentialEvolution"] = AdaptiveGuidedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveGuidedDifferentialEvolution").set_name("LLAMAAdaptiveGuidedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedDifferentialEvolution" + ).set_name("LLAMAAdaptiveGuidedDifferentialEvolution", register=True) +except Exception as e: # AdaptiveGuidedDifferentialEvolution print("AdaptiveGuidedDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveGuidedEvolutionStrategy from nevergrad.optimization.lama.AdaptiveGuidedEvolutionStrategy import AdaptiveGuidedEvolutionStrategy lama_register["AdaptiveGuidedEvolutionStrategy"] = AdaptiveGuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveGuidedEvolutionStrategy").set_name("LLAMAAdaptiveGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedEvolutionStrategy" + ).set_name("LLAMAAdaptiveGuidedEvolutionStrategy", register=True) +except Exception as e: # AdaptiveGuidedEvolutionStrategy print("AdaptiveGuidedEvolutionStrategy can not be imported: ", e) -try: +try: # AdaptiveGuidedHybridOptimizer from nevergrad.optimization.lama.AdaptiveGuidedHybridOptimizer import AdaptiveGuidedHybridOptimizer lama_register["AdaptiveGuidedHybridOptimizer"] = AdaptiveGuidedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGuidedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGuidedHybridOptimizer").set_name("LLAMAAdaptiveGuidedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGuidedHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedHybridOptimizer" + ).set_name("LLAMAAdaptiveGuidedHybridOptimizer", register=True) +except Exception as e: # AdaptiveGuidedHybridOptimizer print("AdaptiveGuidedHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveGuidedMutationOptimizer from nevergrad.optimization.lama.AdaptiveGuidedMutationOptimizer import AdaptiveGuidedMutationOptimizer lama_register["AdaptiveGuidedMutationOptimizer"] = AdaptiveGuidedMutationOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveGuidedMutationOptimizer").set_name("LLAMAAdaptiveGuidedMutationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: # AdaptiveGuidedMutationOptimizer print("AdaptiveGuidedMutationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonicFireworkAlgorithm import AdaptiveHarmonicFireworkAlgorithm +try: # AdaptiveHarmonicFireworkAlgorithm + from nevergrad.optimization.lama.AdaptiveHarmonicFireworkAlgorithm import ( + AdaptiveHarmonicFireworkAlgorithm, + ) lama_register["AdaptiveHarmonicFireworkAlgorithm"] = AdaptiveHarmonicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicFireworkAlgorithm").set_name("LLAMAAdaptiveHarmonicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicFireworkAlgorithm" + ).set_name("LLAMAAdaptiveHarmonicFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveHarmonicFireworkAlgorithm print("AdaptiveHarmonicFireworkAlgorithm can not be imported: ", e) -try: +try: # AdaptiveHarmonicSearchOptimizer from nevergrad.optimization.lama.AdaptiveHarmonicSearchOptimizer import AdaptiveHarmonicSearchOptimizer lama_register["AdaptiveHarmonicSearchOptimizer"] = AdaptiveHarmonicSearchOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSearchOptimizer").set_name("LLAMAAdaptiveHarmonicSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSearchOptimizer" + ).set_name("LLAMAAdaptiveHarmonicSearchOptimizer", register=True) +except Exception as e: # AdaptiveHarmonicSearchOptimizer print("AdaptiveHarmonicSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimization import AdaptiveHarmonicSwarmOptimization +try: # AdaptiveHarmonicSwarmOptimization + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimization import ( + AdaptiveHarmonicSwarmOptimization, + ) lama_register["AdaptiveHarmonicSwarmOptimization"] = AdaptiveHarmonicSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimization").set_name("LLAMAAdaptiveHarmonicSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimization" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimization", register=True) +except Exception as e: # AdaptiveHarmonicSwarmOptimization print("AdaptiveHarmonicSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV2 import AdaptiveHarmonicSwarmOptimizationV2 +try: # AdaptiveHarmonicSwarmOptimizationV2 + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV2 import ( + AdaptiveHarmonicSwarmOptimizationV2, + ) lama_register["AdaptiveHarmonicSwarmOptimizationV2"] = AdaptiveHarmonicSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV2").set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV2", register=True) +except Exception as e: # AdaptiveHarmonicSwarmOptimizationV2 print("AdaptiveHarmonicSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV3 import AdaptiveHarmonicSwarmOptimizationV3 +try: # AdaptiveHarmonicSwarmOptimizationV3 + from nevergrad.optimization.lama.AdaptiveHarmonicSwarmOptimizationV3 import ( + AdaptiveHarmonicSwarmOptimizationV3, + ) lama_register["AdaptiveHarmonicSwarmOptimizationV3"] = AdaptiveHarmonicSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV3").set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicSwarmOptimizationV3" + ).set_name("LLAMAAdaptiveHarmonicSwarmOptimizationV3", register=True) +except Exception as e: # AdaptiveHarmonicSwarmOptimizationV3 print("AdaptiveHarmonicSwarmOptimizationV3 can not be imported: ", e) -try: +try: # AdaptiveHarmonicTabuSearchV12 from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV12 import AdaptiveHarmonicTabuSearchV12 lama_register["AdaptiveHarmonicTabuSearchV12"] = AdaptiveHarmonicTabuSearchV12 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicTabuSearchV12 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV12").set_name("LLAMAAdaptiveHarmonicTabuSearchV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicTabuSearchV12 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV12" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV12", register=True) +except Exception as e: # AdaptiveHarmonicTabuSearchV12 print("AdaptiveHarmonicTabuSearchV12 can not be imported: ", e) -try: +try: # AdaptiveHarmonicTabuSearchV17 from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV17 import AdaptiveHarmonicTabuSearchV17 lama_register["AdaptiveHarmonicTabuSearchV17"] = AdaptiveHarmonicTabuSearchV17 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicTabuSearchV17 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV17").set_name("LLAMAAdaptiveHarmonicTabuSearchV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicTabuSearchV17 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV17" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV17", register=True) +except Exception as e: # AdaptiveHarmonicTabuSearchV17 print("AdaptiveHarmonicTabuSearchV17 can not be imported: ", e) -try: +try: # AdaptiveHarmonicTabuSearchV20 from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV20 import AdaptiveHarmonicTabuSearchV20 lama_register["AdaptiveHarmonicTabuSearchV20"] = AdaptiveHarmonicTabuSearchV20 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicTabuSearchV20 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV20").set_name("LLAMAAdaptiveHarmonicTabuSearchV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicTabuSearchV20 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV20" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV20", register=True) +except Exception as e: # AdaptiveHarmonicTabuSearchV20 print("AdaptiveHarmonicTabuSearchV20 can not be imported: ", e) -try: +try: # AdaptiveHarmonicTabuSearchV8 from nevergrad.optimization.lama.AdaptiveHarmonicTabuSearchV8 import AdaptiveHarmonicTabuSearchV8 lama_register["AdaptiveHarmonicTabuSearchV8"] = AdaptiveHarmonicTabuSearchV8 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonicTabuSearchV8 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV8").set_name("LLAMAAdaptiveHarmonicTabuSearchV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonicTabuSearchV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonicTabuSearchV8 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonicTabuSearchV8" + ).set_name("LLAMAAdaptiveHarmonicTabuSearchV8", register=True) +except Exception as e: # AdaptiveHarmonicTabuSearchV8 print("AdaptiveHarmonicTabuSearchV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonyFireworksAlgorithm import AdaptiveHarmonyFireworksAlgorithm +try: # AdaptiveHarmonyFireworksAlgorithm + from nevergrad.optimization.lama.AdaptiveHarmonyFireworksAlgorithm import ( + AdaptiveHarmonyFireworksAlgorithm, + ) lama_register["AdaptiveHarmonyFireworksAlgorithm"] = AdaptiveHarmonyFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyFireworksAlgorithm").set_name("LLAMAAdaptiveHarmonyFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyFireworksAlgorithm" + ).set_name("LLAMAAdaptiveHarmonyFireworksAlgorithm", register=True) +except Exception as e: # AdaptiveHarmonyFireworksAlgorithm print("AdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) -try: +try: # AdaptiveHarmonyMemeticAlgorithm from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithm import AdaptiveHarmonyMemeticAlgorithm lama_register["AdaptiveHarmonyMemeticAlgorithm"] = AdaptiveHarmonyMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithm").set_name("LLAMAAdaptiveHarmonyMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticAlgorithm" + ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveHarmonyMemeticAlgorithm print("AdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithmV15 import AdaptiveHarmonyMemeticAlgorithmV15 +try: # AdaptiveHarmonyMemeticAlgorithmV15 + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticAlgorithmV15 import ( + AdaptiveHarmonyMemeticAlgorithmV15, + ) lama_register["AdaptiveHarmonyMemeticAlgorithmV15"] = AdaptiveHarmonyMemeticAlgorithmV15 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyMemeticAlgorithmV15 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15").set_name("LLAMAAdaptiveHarmonyMemeticAlgorithmV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyMemeticAlgorithmV15 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticAlgorithmV15" + ).set_name("LLAMAAdaptiveHarmonyMemeticAlgorithmV15", register=True) +except Exception as e: # AdaptiveHarmonyMemeticAlgorithmV15 print("AdaptiveHarmonyMemeticAlgorithmV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV2 import AdaptiveHarmonyMemeticOptimizationV2 +try: # AdaptiveHarmonyMemeticOptimizationV2 + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV2 import ( + AdaptiveHarmonyMemeticOptimizationV2, + ) lama_register["AdaptiveHarmonyMemeticOptimizationV2"] = AdaptiveHarmonyMemeticOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyMemeticOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV2").set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyMemeticOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV2", register=True) +except Exception as e: # AdaptiveHarmonyMemeticOptimizationV2 print("AdaptiveHarmonyMemeticOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV27 import AdaptiveHarmonyMemeticOptimizationV27 +try: # AdaptiveHarmonyMemeticOptimizationV27 + from nevergrad.optimization.lama.AdaptiveHarmonyMemeticOptimizationV27 import ( + AdaptiveHarmonyMemeticOptimizationV27, + ) lama_register["AdaptiveHarmonyMemeticOptimizationV27"] = AdaptiveHarmonyMemeticOptimizationV27 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyMemeticOptimizationV27 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV27").set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticOptimizationV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyMemeticOptimizationV27 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticOptimizationV27" + ).set_name("LLAMAAdaptiveHarmonyMemeticOptimizationV27", register=True) +except Exception as e: # AdaptiveHarmonyMemeticOptimizationV27 print("AdaptiveHarmonyMemeticOptimizationV27 can not be imported: ", e) -try: +try: # AdaptiveHarmonyMemeticSearchV2 from nevergrad.optimization.lama.AdaptiveHarmonyMemeticSearchV2 import AdaptiveHarmonyMemeticSearchV2 lama_register["AdaptiveHarmonyMemeticSearchV2"] = AdaptiveHarmonyMemeticSearchV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticSearchV2").set_name("LLAMAAdaptiveHarmonyMemeticSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyMemeticSearchV2" + ).set_name("LLAMAAdaptiveHarmonyMemeticSearchV2", register=True) +except Exception as e: # AdaptiveHarmonyMemeticSearchV2 print("AdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) -try: +try: # AdaptiveHarmonySearchOptimizerV2 from nevergrad.optimization.lama.AdaptiveHarmonySearchOptimizerV2 import AdaptiveHarmonySearchOptimizerV2 lama_register["AdaptiveHarmonySearchOptimizerV2"] = AdaptiveHarmonySearchOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAAdaptiveHarmonySearchOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: # AdaptiveHarmonySearchOptimizerV2 print("AdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithCuckooInspiration import AdaptiveHarmonySearchWithCuckooInspiration +try: # AdaptiveHarmonySearchWithCuckooInspiration + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithCuckooInspiration import ( + AdaptiveHarmonySearchWithCuckooInspiration, + ) lama_register["AdaptiveHarmonySearchWithCuckooInspiration"] = AdaptiveHarmonySearchWithCuckooInspiration - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration").set_name("LLAMAAdaptiveHarmonySearchWithCuckooInspiration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithCuckooInspiration" + ).set_name("LLAMAAdaptiveHarmonySearchWithCuckooInspiration", register=True) +except Exception as e: # AdaptiveHarmonySearchWithCuckooInspiration print("AdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 import AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 - - lama_register["AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2"] = AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2").set_name("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 import ( + AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2, + ) + + lama_register["AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2"] = ( + AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2", register=True) +except Exception as e: # AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 print("AdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlight import AdaptiveHarmonySearchWithImprovedLevyFlight +try: # AdaptiveHarmonySearchWithImprovedLevyFlight + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlight import ( + AdaptiveHarmonySearchWithImprovedLevyFlight, + ) lama_register["AdaptiveHarmonySearchWithImprovedLevyFlight"] = AdaptiveHarmonySearchWithImprovedLevyFlight - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight").set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight" + ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) +except Exception as e: # AdaptiveHarmonySearchWithImprovedLevyFlight print("AdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlightInspiration import AdaptiveHarmonySearchWithImprovedLevyFlightInspiration - - lama_register["AdaptiveHarmonySearchWithImprovedLevyFlightInspiration"] = AdaptiveHarmonySearchWithImprovedLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration").set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration", register=True) -except Exception as e: +try: # AdaptiveHarmonySearchWithImprovedLevyFlightInspiration + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithImprovedLevyFlightInspiration import ( + AdaptiveHarmonySearchWithImprovedLevyFlightInspiration, + ) + + lama_register["AdaptiveHarmonySearchWithImprovedLevyFlightInspiration"] = ( + AdaptiveHarmonySearchWithImprovedLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration" + ).set_name("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration", register=True) +except Exception as e: # AdaptiveHarmonySearchWithImprovedLevyFlightInspiration print("AdaptiveHarmonySearchWithImprovedLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLevyFlightImprovement import AdaptiveHarmonySearchWithLevyFlightImprovement - - lama_register["AdaptiveHarmonySearchWithLevyFlightImprovement"] = AdaptiveHarmonySearchWithLevyFlightImprovement - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement").set_name("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement", register=True) -except Exception as e: +try: # AdaptiveHarmonySearchWithLevyFlightImprovement + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLevyFlightImprovement import ( + AdaptiveHarmonySearchWithLevyFlightImprovement, + ) + + lama_register["AdaptiveHarmonySearchWithLevyFlightImprovement"] = ( + AdaptiveHarmonySearchWithLevyFlightImprovement + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement" + ).set_name("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement", register=True) +except Exception as e: # AdaptiveHarmonySearchWithLevyFlightImprovement print("AdaptiveHarmonySearchWithLevyFlightImprovement can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimization import AdaptiveHarmonySearchWithLocalOptimization +try: # AdaptiveHarmonySearchWithLocalOptimization + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimization import ( + AdaptiveHarmonySearchWithLocalOptimization, + ) lama_register["AdaptiveHarmonySearchWithLocalOptimization"] = AdaptiveHarmonySearchWithLocalOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimization").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimization" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimization", register=True) +except Exception as e: # AdaptiveHarmonySearchWithLocalOptimization print("AdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationImproved import AdaptiveHarmonySearchWithLocalOptimizationImproved - - lama_register["AdaptiveHarmonySearchWithLocalOptimizationImproved"] = AdaptiveHarmonySearchWithLocalOptimizationImproved - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved", register=True) -except Exception as e: +try: # AdaptiveHarmonySearchWithLocalOptimizationImproved + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationImproved import ( + AdaptiveHarmonySearchWithLocalOptimizationImproved, + ) + + lama_register["AdaptiveHarmonySearchWithLocalOptimizationImproved"] = ( + AdaptiveHarmonySearchWithLocalOptimizationImproved + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved", register=True) +except Exception as e: # AdaptiveHarmonySearchWithLocalOptimizationImproved print("AdaptiveHarmonySearchWithLocalOptimizationImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationV2 import AdaptiveHarmonySearchWithLocalOptimizationV2 - - lama_register["AdaptiveHarmonySearchWithLocalOptimizationV2"] = AdaptiveHarmonySearchWithLocalOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2").set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveHarmonySearchWithLocalOptimizationV2 + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithLocalOptimizationV2 import ( + AdaptiveHarmonySearchWithLocalOptimizationV2, + ) + + lama_register["AdaptiveHarmonySearchWithLocalOptimizationV2"] = ( + AdaptiveHarmonySearchWithLocalOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2" + ).set_name("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2", register=True) +except Exception as e: # AdaptiveHarmonySearchWithLocalOptimizationV2 print("AdaptiveHarmonySearchWithLocalOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHarmonySearchWithSimulatedAnnealing import AdaptiveHarmonySearchWithSimulatedAnnealing +try: # AdaptiveHarmonySearchWithSimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveHarmonySearchWithSimulatedAnnealing import ( + AdaptiveHarmonySearchWithSimulatedAnnealing, + ) lama_register["AdaptiveHarmonySearchWithSimulatedAnnealing"] = AdaptiveHarmonySearchWithSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing").set_name("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing" + ).set_name("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) +except Exception as e: # AdaptiveHarmonySearchWithSimulatedAnnealing print("AdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) -try: +try: # AdaptiveHarmonyTabuOptimization from nevergrad.optimization.lama.AdaptiveHarmonyTabuOptimization import AdaptiveHarmonyTabuOptimization lama_register["AdaptiveHarmonyTabuOptimization"] = AdaptiveHarmonyTabuOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyTabuOptimization").set_name("LLAMAAdaptiveHarmonyTabuOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHarmonyTabuOptimization" + ).set_name("LLAMAAdaptiveHarmonyTabuOptimization", register=True) +except Exception as e: # AdaptiveHarmonyTabuOptimization print("AdaptiveHarmonyTabuOptimization can not be imported: ", e) -try: +try: # AdaptiveHybridAlgorithm from nevergrad.optimization.lama.AdaptiveHybridAlgorithm import AdaptiveHybridAlgorithm lama_register["AdaptiveHybridAlgorithm"] = AdaptiveHybridAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm").set_name("LLAMAAdaptiveHybridAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridAlgorithm").set_name( + "LLAMAAdaptiveHybridAlgorithm", register=True + ) +except Exception as e: # AdaptiveHybridAlgorithm print("AdaptiveHybridAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithGradientBoost import AdaptiveHybridAnnealingWithGradientBoost +try: # AdaptiveHybridAnnealingWithGradientBoost + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithGradientBoost import ( + AdaptiveHybridAnnealingWithGradientBoost, + ) lama_register["AdaptiveHybridAnnealingWithGradientBoost"] = AdaptiveHybridAnnealingWithGradientBoost - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridAnnealingWithGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithGradientBoost").set_name("LLAMAAdaptiveHybridAnnealingWithGradientBoost", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridAnnealingWithGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveHybridAnnealingWithGradientBoost" + ).set_name("LLAMAAdaptiveHybridAnnealingWithGradientBoost", register=True) +except Exception as e: # AdaptiveHybridAnnealingWithGradientBoost print("AdaptiveHybridAnnealingWithGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithMemoryRefinement import AdaptiveHybridAnnealingWithMemoryRefinement +try: # AdaptiveHybridAnnealingWithMemoryRefinement + from nevergrad.optimization.lama.AdaptiveHybridAnnealingWithMemoryRefinement import ( + AdaptiveHybridAnnealingWithMemoryRefinement, + ) lama_register["AdaptiveHybridAnnealingWithMemoryRefinement"] = AdaptiveHybridAnnealingWithMemoryRefinement - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridAnnealingWithMemoryRefinement = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement").set_name("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridAnnealingWithMemoryRefinement = NonObjectOptimizer( + method="LLAMAAdaptiveHybridAnnealingWithMemoryRefinement" + ).set_name("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement", register=True) +except Exception as e: # AdaptiveHybridAnnealingWithMemoryRefinement print("AdaptiveHybridAnnealingWithMemoryRefinement can not be imported: ", e) -try: +try: # AdaptiveHybridCMAESDE from nevergrad.optimization.lama.AdaptiveHybridCMAESDE import AdaptiveHybridCMAESDE lama_register["AdaptiveHybridCMAESDE"] = AdaptiveHybridCMAESDE - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridCMAESDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE").set_name("LLAMAAdaptiveHybridCMAESDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridCMAESDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridCMAESDE").set_name( + "LLAMAAdaptiveHybridCMAESDE", register=True + ) +except Exception as e: # AdaptiveHybridCMAESDE print("AdaptiveHybridCMAESDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 import AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 - - lama_register["AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3"] = AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3", register=True) -except Exception as e: +try: # AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 + from nevergrad.optimization.lama.AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 import ( + AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAAdaptiveHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: # AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 print("AdaptiveHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) -try: +try: # AdaptiveHybridCulturalOptimizer from nevergrad.optimization.lama.AdaptiveHybridCulturalOptimizer import AdaptiveHybridCulturalOptimizer lama_register["AdaptiveHybridCulturalOptimizer"] = AdaptiveHybridCulturalOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCulturalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridCulturalOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridCulturalOptimizer").set_name("LLAMAAdaptiveHybridCulturalOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridCulturalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridCulturalOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHybridCulturalOptimizer" + ).set_name("LLAMAAdaptiveHybridCulturalOptimizer", register=True) +except Exception as e: # AdaptiveHybridCulturalOptimizer print("AdaptiveHybridCulturalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridDEPSOWithDynamicRestart import AdaptiveHybridDEPSOWithDynamicRestart +try: # AdaptiveHybridDEPSOWithDynamicRestart + from nevergrad.optimization.lama.AdaptiveHybridDEPSOWithDynamicRestart import ( + AdaptiveHybridDEPSOWithDynamicRestart, + ) lama_register["AdaptiveHybridDEPSOWithDynamicRestart"] = AdaptiveHybridDEPSOWithDynamicRestart - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridDEPSOWithDynamicRestart = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart").set_name("LLAMAAdaptiveHybridDEPSOWithDynamicRestart", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridDEPSOWithDynamicRestart = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDEPSOWithDynamicRestart" + ).set_name("LLAMAAdaptiveHybridDEPSOWithDynamicRestart", register=True) +except Exception as e: # AdaptiveHybridDEPSOWithDynamicRestart print("AdaptiveHybridDEPSOWithDynamicRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridDEWithIntensifiedLocalSearch import AdaptiveHybridDEWithIntensifiedLocalSearch +try: # AdaptiveHybridDEWithIntensifiedLocalSearch + from nevergrad.optimization.lama.AdaptiveHybridDEWithIntensifiedLocalSearch import ( + AdaptiveHybridDEWithIntensifiedLocalSearch, + ) lama_register["AdaptiveHybridDEWithIntensifiedLocalSearch"] = AdaptiveHybridDEWithIntensifiedLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch").set_name("LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch" + ).set_name("LLAMAAdaptiveHybridDEWithIntensifiedLocalSearch", register=True) +except Exception as e: # AdaptiveHybridDEWithIntensifiedLocalSearch print("AdaptiveHybridDEWithIntensifiedLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridDifferentialEvolution import AdaptiveHybridDifferentialEvolution +try: # AdaptiveHybridDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveHybridDifferentialEvolution import ( + AdaptiveHybridDifferentialEvolution, + ) lama_register["AdaptiveHybridDifferentialEvolution"] = AdaptiveHybridDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveHybridDifferentialEvolution").set_name("LLAMAAdaptiveHybridDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveHybridDifferentialEvolution" + ).set_name("LLAMAAdaptiveHybridDifferentialEvolution", register=True) +except Exception as e: # AdaptiveHybridDifferentialEvolution print("AdaptiveHybridDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridEvolutionStrategyV5 import AdaptiveHybridEvolutionStrategyV5 +try: # AdaptiveHybridEvolutionStrategyV5 + from nevergrad.optimization.lama.AdaptiveHybridEvolutionStrategyV5 import ( + AdaptiveHybridEvolutionStrategyV5, + ) lama_register["AdaptiveHybridEvolutionStrategyV5"] = AdaptiveHybridEvolutionStrategyV5 - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridEvolutionStrategyV5 = NonObjectOptimizer(method="LLAMAAdaptiveHybridEvolutionStrategyV5").set_name("LLAMAAdaptiveHybridEvolutionStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridEvolutionStrategyV5 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridEvolutionStrategyV5" + ).set_name("LLAMAAdaptiveHybridEvolutionStrategyV5", register=True) +except Exception as e: # AdaptiveHybridEvolutionStrategyV5 print("AdaptiveHybridEvolutionStrategyV5 can not be imported: ", e) -try: +try: # AdaptiveHybridFireworkAlgorithm from nevergrad.optimization.lama.AdaptiveHybridFireworkAlgorithm import AdaptiveHybridFireworkAlgorithm lama_register["AdaptiveHybridFireworkAlgorithm"] = AdaptiveHybridFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveHybridFireworkAlgorithm").set_name("LLAMAAdaptiveHybridFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveHybridFireworkAlgorithm" + ).set_name("LLAMAAdaptiveHybridFireworkAlgorithm", register=True) +except Exception as e: # AdaptiveHybridFireworkAlgorithm print("AdaptiveHybridFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridGradientAnnealingWithVariableMemory import AdaptiveHybridGradientAnnealingWithVariableMemory - - lama_register["AdaptiveHybridGradientAnnealingWithVariableMemory"] = AdaptiveHybridGradientAnnealingWithVariableMemory - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory = NonObjectOptimizer(method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory").set_name("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory", register=True) -except Exception as e: +try: # AdaptiveHybridGradientAnnealingWithVariableMemory + from nevergrad.optimization.lama.AdaptiveHybridGradientAnnealingWithVariableMemory import ( + AdaptiveHybridGradientAnnealingWithVariableMemory, + ) + + lama_register["AdaptiveHybridGradientAnnealingWithVariableMemory"] = ( + AdaptiveHybridGradientAnnealingWithVariableMemory + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory = NonObjectOptimizer( + method="LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory" + ).set_name("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory", register=True) +except Exception as e: # AdaptiveHybridGradientAnnealingWithVariableMemory print("AdaptiveHybridGradientAnnealingWithVariableMemory can not be imported: ", e) -try: +try: # AdaptiveHybridHarmonySearch from nevergrad.optimization.lama.AdaptiveHybridHarmonySearch import AdaptiveHybridHarmonySearch lama_register["AdaptiveHybridHarmonySearch"] = AdaptiveHybridHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch").set_name("LLAMAAdaptiveHybridHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveHybridHarmonySearch").set_name( + "LLAMAAdaptiveHybridHarmonySearch", register=True + ) +except Exception as e: # AdaptiveHybridHarmonySearch print("AdaptiveHybridHarmonySearch can not be imported: ", e) -try: +try: # AdaptiveHybridMetaOptimizer from nevergrad.optimization.lama.AdaptiveHybridMetaOptimizer import AdaptiveHybridMetaOptimizer lama_register["AdaptiveHybridMetaOptimizer"] = AdaptiveHybridMetaOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer").set_name("LLAMAAdaptiveHybridMetaOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridMetaOptimizer").set_name( + "LLAMAAdaptiveHybridMetaOptimizer", register=True + ) +except Exception as e: # AdaptiveHybridMetaOptimizer print("AdaptiveHybridMetaOptimizer can not be imported: ", e) -try: +try: # AdaptiveHybridOptimization from nevergrad.optimization.lama.AdaptiveHybridOptimization import AdaptiveHybridOptimization lama_register["AdaptiveHybridOptimization"] = AdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization").set_name("LLAMAAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimization").set_name( + "LLAMAAdaptiveHybridOptimization", register=True + ) +except Exception as e: # AdaptiveHybridOptimization print("AdaptiveHybridOptimization can not be imported: ", e) -try: +try: # AdaptiveHybridOptimizationV2 from nevergrad.optimization.lama.AdaptiveHybridOptimizationV2 import AdaptiveHybridOptimizationV2 lama_register["AdaptiveHybridOptimizationV2"] = AdaptiveHybridOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV2").set_name("LLAMAAdaptiveHybridOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridOptimizationV2" + ).set_name("LLAMAAdaptiveHybridOptimizationV2", register=True) +except Exception as e: # AdaptiveHybridOptimizationV2 print("AdaptiveHybridOptimizationV2 can not be imported: ", e) -try: +try: # AdaptiveHybridOptimizationV3 from nevergrad.optimization.lama.AdaptiveHybridOptimizationV3 import AdaptiveHybridOptimizationV3 lama_register["AdaptiveHybridOptimizationV3"] = AdaptiveHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV3").set_name("LLAMAAdaptiveHybridOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptiveHybridOptimizationV3" + ).set_name("LLAMAAdaptiveHybridOptimizationV3", register=True) +except Exception as e: # AdaptiveHybridOptimizationV3 print("AdaptiveHybridOptimizationV3 can not be imported: ", e) -try: +try: # AdaptiveHybridOptimizer from nevergrad.optimization.lama.AdaptiveHybridOptimizer import AdaptiveHybridOptimizer lama_register["AdaptiveHybridOptimizer"] = AdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer").set_name("LLAMAAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridOptimizer").set_name( + "LLAMAAdaptiveHybridOptimizer", register=True + ) +except Exception as e: # AdaptiveHybridOptimizer print("AdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolution import AdaptiveHybridParticleSwarmDifferentialEvolution - - lama_register["AdaptiveHybridParticleSwarmDifferentialEvolution"] = AdaptiveHybridParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveHybridParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolution import ( + AdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + AdaptiveHybridParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # AdaptiveHybridParticleSwarmDifferentialEvolution print("AdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolutionPlus import AdaptiveHybridParticleSwarmDifferentialEvolutionPlus - - lama_register["AdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = AdaptiveHybridParticleSwarmDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus").set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) -except Exception as e: +try: # AdaptiveHybridParticleSwarmDifferentialEvolutionPlus + from nevergrad.optimization.lama.AdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( + AdaptiveHybridParticleSwarmDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( + AdaptiveHybridParticleSwarmDifferentialEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) +except Exception as e: # AdaptiveHybridParticleSwarmDifferentialEvolutionPlus print("AdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridQuasiRandomGradientDE import AdaptiveHybridQuasiRandomGradientDE +try: # AdaptiveHybridQuasiRandomGradientDE + from nevergrad.optimization.lama.AdaptiveHybridQuasiRandomGradientDE import ( + AdaptiveHybridQuasiRandomGradientDE, + ) lama_register["AdaptiveHybridQuasiRandomGradientDE"] = AdaptiveHybridQuasiRandomGradientDE - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMAAdaptiveHybridQuasiRandomGradientDE").set_name("LLAMAAdaptiveHybridQuasiRandomGradientDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMAAdaptiveHybridQuasiRandomGradientDE" + ).set_name("LLAMAAdaptiveHybridQuasiRandomGradientDE", register=True) +except Exception as e: # AdaptiveHybridQuasiRandomGradientDE print("AdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridRecombinativeStrategy import AdaptiveHybridRecombinativeStrategy +try: # AdaptiveHybridRecombinativeStrategy + from nevergrad.optimization.lama.AdaptiveHybridRecombinativeStrategy import ( + AdaptiveHybridRecombinativeStrategy, + ) lama_register["AdaptiveHybridRecombinativeStrategy"] = AdaptiveHybridRecombinativeStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridRecombinativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridRecombinativeStrategy = NonObjectOptimizer(method="LLAMAAdaptiveHybridRecombinativeStrategy").set_name("LLAMAAdaptiveHybridRecombinativeStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridRecombinativeStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridRecombinativeStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveHybridRecombinativeStrategy" + ).set_name("LLAMAAdaptiveHybridRecombinativeStrategy", register=True) +except Exception as e: # AdaptiveHybridRecombinativeStrategy print("AdaptiveHybridRecombinativeStrategy can not be imported: ", e) -try: +try: # AdaptiveHybridSearchOptimizer from nevergrad.optimization.lama.AdaptiveHybridSearchOptimizer import AdaptiveHybridSearchOptimizer lama_register["AdaptiveHybridSearchOptimizer"] = AdaptiveHybridSearchOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveHybridSearchOptimizer").set_name("LLAMAAdaptiveHybridSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveHybridSearchOptimizer" + ).set_name("LLAMAAdaptiveHybridSearchOptimizer", register=True) +except Exception as e: # AdaptiveHybridSearchOptimizer print("AdaptiveHybridSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHybridSwarmEvolutionOptimization import AdaptiveHybridSwarmEvolutionOptimization +try: # AdaptiveHybridSwarmEvolutionOptimization + from nevergrad.optimization.lama.AdaptiveHybridSwarmEvolutionOptimization import ( + AdaptiveHybridSwarmEvolutionOptimization, + ) lama_register["AdaptiveHybridSwarmEvolutionOptimization"] = AdaptiveHybridSwarmEvolutionOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer(method="LLAMAAdaptiveHybridSwarmEvolutionOptimization").set_name("LLAMAAdaptiveHybridSwarmEvolutionOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveHybridSwarmEvolutionOptimization" + ).set_name("LLAMAAdaptiveHybridSwarmEvolutionOptimization", register=True) +except Exception as e: # AdaptiveHybridSwarmEvolutionOptimization print("AdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveHyperQuantumStateCrossoverOptimizationV2 import AdaptiveHyperQuantumStateCrossoverOptimizationV2 - - lama_register["AdaptiveHyperQuantumStateCrossoverOptimizationV2"] = AdaptiveHyperQuantumStateCrossoverOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2").set_name("LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveHyperQuantumStateCrossoverOptimizationV2 + from nevergrad.optimization.lama.AdaptiveHyperQuantumStateCrossoverOptimizationV2 import ( + AdaptiveHyperQuantumStateCrossoverOptimizationV2, + ) + + lama_register["AdaptiveHyperQuantumStateCrossoverOptimizationV2"] = ( + AdaptiveHyperQuantumStateCrossoverOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2" + ).set_name("LLAMAAdaptiveHyperQuantumStateCrossoverOptimizationV2", register=True) +except Exception as e: # AdaptiveHyperQuantumStateCrossoverOptimizationV2 print("AdaptiveHyperQuantumStateCrossoverOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveIncrementalCrossoverEnhancement import AdaptiveIncrementalCrossoverEnhancement +try: # AdaptiveIncrementalCrossoverEnhancement + from nevergrad.optimization.lama.AdaptiveIncrementalCrossoverEnhancement import ( + AdaptiveIncrementalCrossoverEnhancement, + ) lama_register["AdaptiveIncrementalCrossoverEnhancement"] = AdaptiveIncrementalCrossoverEnhancement - res = NonObjectOptimizer(method="LLAMAAdaptiveIncrementalCrossoverEnhancement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveIncrementalCrossoverEnhancement = NonObjectOptimizer(method="LLAMAAdaptiveIncrementalCrossoverEnhancement").set_name("LLAMAAdaptiveIncrementalCrossoverEnhancement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveIncrementalCrossoverEnhancement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveIncrementalCrossoverEnhancement = NonObjectOptimizer( + method="LLAMAAdaptiveIncrementalCrossoverEnhancement" + ).set_name("LLAMAAdaptiveIncrementalCrossoverEnhancement", register=True) +except Exception as e: # AdaptiveIncrementalCrossoverEnhancement print("AdaptiveIncrementalCrossoverEnhancement can not be imported: ", e) -try: +try: # AdaptiveInertiaHybridOptimizer from nevergrad.optimization.lama.AdaptiveInertiaHybridOptimizer import AdaptiveInertiaHybridOptimizer lama_register["AdaptiveInertiaHybridOptimizer"] = AdaptiveInertiaHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveInertiaHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveInertiaHybridOptimizer").set_name("LLAMAAdaptiveInertiaHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaHybridOptimizer" + ).set_name("LLAMAAdaptiveInertiaHybridOptimizer", register=True) +except Exception as e: # AdaptiveInertiaHybridOptimizer print("AdaptiveInertiaHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveInertiaParticleOptimizer from nevergrad.optimization.lama.AdaptiveInertiaParticleOptimizer import AdaptiveInertiaParticleOptimizer lama_register["AdaptiveInertiaParticleOptimizer"] = AdaptiveInertiaParticleOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveInertiaParticleOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleOptimizer").set_name("LLAMAAdaptiveInertiaParticleOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveInertiaParticleOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaParticleOptimizer" + ).set_name("LLAMAAdaptiveInertiaParticleOptimizer", register=True) +except Exception as e: # AdaptiveInertiaParticleOptimizer print("AdaptiveInertiaParticleOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveInertiaParticleSwarmOptimization import AdaptiveInertiaParticleSwarmOptimization +try: # AdaptiveInertiaParticleSwarmOptimization + from nevergrad.optimization.lama.AdaptiveInertiaParticleSwarmOptimization import ( + AdaptiveInertiaParticleSwarmOptimization, + ) lama_register["AdaptiveInertiaParticleSwarmOptimization"] = AdaptiveInertiaParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveInertiaParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleSwarmOptimization").set_name("LLAMAAdaptiveInertiaParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveInertiaParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveInertiaParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveInertiaParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveInertiaParticleSwarmOptimization", register=True) +except Exception as e: # AdaptiveInertiaParticleSwarmOptimization print("AdaptiveInertiaParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveLearningDifferentialEvolutionOptimizer import AdaptiveLearningDifferentialEvolutionOptimizer - - lama_register["AdaptiveLearningDifferentialEvolutionOptimizer"] = AdaptiveLearningDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLearningDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveLearningDifferentialEvolutionOptimizer", register=True) -except Exception as e: +try: # AdaptiveLearningDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.AdaptiveLearningDifferentialEvolutionOptimizer import ( + AdaptiveLearningDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveLearningDifferentialEvolutionOptimizer"] = ( + AdaptiveLearningDifferentialEvolutionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLearningDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveLearningDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveLearningDifferentialEvolutionOptimizer", register=True) +except Exception as e: # AdaptiveLearningDifferentialEvolutionOptimizer print("AdaptiveLearningDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveLevyDiversifiedMetaHeuristicAlgorithm import AdaptiveLevyDiversifiedMetaHeuristicAlgorithm - - lama_register["AdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = AdaptiveLevyDiversifiedMetaHeuristicAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) -except Exception as e: +try: # AdaptiveLevyDiversifiedMetaHeuristicAlgorithm + from nevergrad.optimization.lama.AdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( + AdaptiveLevyDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["AdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( + AdaptiveLevyDiversifiedMetaHeuristicAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: # AdaptiveLevyDiversifiedMetaHeuristicAlgorithm print("AdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) -try: +try: # AdaptiveLevyHarmonySearch from nevergrad.optimization.lama.AdaptiveLevyHarmonySearch import AdaptiveLevyHarmonySearch lama_register["AdaptiveLevyHarmonySearch"] = AdaptiveLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch").set_name("LLAMAAdaptiveLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAAdaptiveLevyHarmonySearch").set_name( + "LLAMAAdaptiveLevyHarmonySearch", register=True + ) +except Exception as e: # AdaptiveLevyHarmonySearch print("AdaptiveLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing import AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing - - lama_register["AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing"] = AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing", register=True) -except Exception as e: +try: # AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing import ( + AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing, + ) + + lama_register["AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing"] = ( + AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveLocalSearchImprovedQuantumSimulatedAnnealing", register=True) +except Exception as e: # AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing print("AdaptiveLocalSearchImprovedQuantumSimulatedAnnealing can not be imported: ", e) -try: +try: # AdaptiveLocalSearchOptimizer from nevergrad.optimization.lama.AdaptiveLocalSearchOptimizer import AdaptiveLocalSearchOptimizer lama_register["AdaptiveLocalSearchOptimizer"] = AdaptiveLocalSearchOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLocalSearchOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchOptimizer").set_name("LLAMAAdaptiveLocalSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchOptimizer" + ).set_name("LLAMAAdaptiveLocalSearchOptimizer", register=True) +except Exception as e: # AdaptiveLocalSearchOptimizer print("AdaptiveLocalSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveLocalSearchQuantumSimulatedAnnealing import AdaptiveLocalSearchQuantumSimulatedAnnealing - - lama_register["AdaptiveLocalSearchQuantumSimulatedAnnealing"] = AdaptiveLocalSearchQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing").set_name("LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) -except Exception as e: +try: # AdaptiveLocalSearchQuantumSimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveLocalSearchQuantumSimulatedAnnealing import ( + AdaptiveLocalSearchQuantumSimulatedAnnealing, + ) + + lama_register["AdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( + AdaptiveLocalSearchQuantumSimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing" + ).set_name("LLAMAAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) +except Exception as e: # AdaptiveLocalSearchQuantumSimulatedAnnealing print("AdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) -try: +try: # AdaptiveMemeticAlgorithm from nevergrad.optimization.lama.AdaptiveMemeticAlgorithm import AdaptiveMemeticAlgorithm lama_register["AdaptiveMemeticAlgorithm"] = AdaptiveMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm").set_name("LLAMAAdaptiveMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticAlgorithm").set_name( + "LLAMAAdaptiveMemeticAlgorithm", register=True + ) +except Exception as e: # AdaptiveMemeticAlgorithm print("AdaptiveMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer import AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer - - lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer"] = AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer", register=True) -except Exception as e: +try: # AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer import ( + AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer"] = ( + AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionOptimizer", register=True) +except Exception as e: # AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer print("AdaptiveMemeticCrossoverDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer import AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer - - lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer"] = AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer").set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer", register=True) -except Exception as e: +try: # AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer + from nevergrad.optimization.lama.AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer import ( + AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer, + ) + + lama_register["AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer"] = ( + AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer" + ).set_name("LLAMAAdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer", register=True) +except Exception as e: # AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer print("AdaptiveMemeticCrossoverDifferentialEvolutionWithElitismOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolution import AdaptiveMemeticDifferentialEvolution +try: # AdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolution import ( + AdaptiveMemeticDifferentialEvolution, + ) lama_register["AdaptiveMemeticDifferentialEvolution"] = AdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolution").set_name("LLAMAAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolution print("AdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionOptimizer import AdaptiveMemeticDifferentialEvolutionOptimizer - - lama_register["AdaptiveMemeticDifferentialEvolutionOptimizer"] = AdaptiveMemeticDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer", register=True) -except Exception as e: +try: # AdaptiveMemeticDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionOptimizer import ( + AdaptiveMemeticDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionOptimizer"] = ( + AdaptiveMemeticDifferentialEvolutionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionOptimizer", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionOptimizer print("AdaptiveMemeticDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV2 import AdaptiveMemeticDifferentialEvolutionV2 +try: # AdaptiveMemeticDifferentialEvolutionV2 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV2 import ( + AdaptiveMemeticDifferentialEvolutionV2, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV2"] = AdaptiveMemeticDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV2").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV2", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV2 print("AdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV3 import AdaptiveMemeticDifferentialEvolutionV3 +try: # AdaptiveMemeticDifferentialEvolutionV3 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV3 import ( + AdaptiveMemeticDifferentialEvolutionV3, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV3"] = AdaptiveMemeticDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV3").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV3" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV3", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV3 print("AdaptiveMemeticDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV4 import AdaptiveMemeticDifferentialEvolutionV4 +try: # AdaptiveMemeticDifferentialEvolutionV4 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV4 import ( + AdaptiveMemeticDifferentialEvolutionV4, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV4"] = AdaptiveMemeticDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV4").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV4" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV4", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV4 print("AdaptiveMemeticDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV5 import AdaptiveMemeticDifferentialEvolutionV5 +try: # AdaptiveMemeticDifferentialEvolutionV5 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV5 import ( + AdaptiveMemeticDifferentialEvolutionV5, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV5"] = AdaptiveMemeticDifferentialEvolutionV5 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV5").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV5" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV5", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV5 print("AdaptiveMemeticDifferentialEvolutionV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV6 import AdaptiveMemeticDifferentialEvolutionV6 +try: # AdaptiveMemeticDifferentialEvolutionV6 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV6 import ( + AdaptiveMemeticDifferentialEvolutionV6, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV6"] = AdaptiveMemeticDifferentialEvolutionV6 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV6").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV6" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV6", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV6 print("AdaptiveMemeticDifferentialEvolutionV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV7 import AdaptiveMemeticDifferentialEvolutionV7 +try: # AdaptiveMemeticDifferentialEvolutionV7 + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionV7 import ( + AdaptiveMemeticDifferentialEvolutionV7, + ) lama_register["AdaptiveMemeticDifferentialEvolutionV7"] = AdaptiveMemeticDifferentialEvolutionV7 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV7").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionV7" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionV7", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionV7 print("AdaptiveMemeticDifferentialEvolutionV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR import AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR - - lama_register["AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR"] = AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR", register=True) -except Exception as e: +try: # AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR import ( + AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR"] = ( + AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR print("AdaptiveMemeticDifferentialEvolutionWithElitismAndDynamicFCR can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance import AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance - - lama_register["AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance"] = AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance").set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance", register=True) -except Exception as e: +try: # AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance import ( + AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance, + ) + + lama_register["AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance"] = ( + AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance" + ).set_name("LLAMAAdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance", register=True) +except Exception as e: # AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance print("AdaptiveMemeticDifferentialEvolutionWithSurrogateAssistance can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialQuantumSearch import AdaptiveMemeticDifferentialQuantumSearch +try: # AdaptiveMemeticDifferentialQuantumSearch + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialQuantumSearch import ( + AdaptiveMemeticDifferentialQuantumSearch, + ) lama_register["AdaptiveMemeticDifferentialQuantumSearch"] = AdaptiveMemeticDifferentialQuantumSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialQuantumSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialQuantumSearch").set_name("LLAMAAdaptiveMemeticDifferentialQuantumSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialQuantumSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialQuantumSearch" + ).set_name("LLAMAAdaptiveMemeticDifferentialQuantumSearch", register=True) +except Exception as e: # AdaptiveMemeticDifferentialQuantumSearch print("AdaptiveMemeticDifferentialQuantumSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticDifferentialSearch import AdaptiveMemeticDifferentialSearch +try: # AdaptiveMemeticDifferentialSearch + from nevergrad.optimization.lama.AdaptiveMemeticDifferentialSearch import ( + AdaptiveMemeticDifferentialSearch, + ) lama_register["AdaptiveMemeticDifferentialSearch"] = AdaptiveMemeticDifferentialSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialSearch").set_name("LLAMAAdaptiveMemeticDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDifferentialSearch" + ).set_name("LLAMAAdaptiveMemeticDifferentialSearch", register=True) +except Exception as e: # AdaptiveMemeticDifferentialSearch print("AdaptiveMemeticDifferentialSearch can not be imported: ", e) -try: +try: # AdaptiveMemeticDiverseOptimizer from nevergrad.optimization.lama.AdaptiveMemeticDiverseOptimizer import AdaptiveMemeticDiverseOptimizer lama_register["AdaptiveMemeticDiverseOptimizer"] = AdaptiveMemeticDiverseOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDiverseOptimizer").set_name("LLAMAAdaptiveMemeticDiverseOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: # AdaptiveMemeticDiverseOptimizer print("AdaptiveMemeticDiverseOptimizer can not be imported: ", e) -try: +try: # AdaptiveMemeticEvolutionStrategy from nevergrad.optimization.lama.AdaptiveMemeticEvolutionStrategy import AdaptiveMemeticEvolutionStrategy lama_register["AdaptiveMemeticEvolutionStrategy"] = AdaptiveMemeticEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionStrategy").set_name("LLAMAAdaptiveMemeticEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionStrategy" + ).set_name("LLAMAAdaptiveMemeticEvolutionStrategy", register=True) +except Exception as e: # AdaptiveMemeticEvolutionStrategy print("AdaptiveMemeticEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryAlgorithm import AdaptiveMemeticEvolutionaryAlgorithm +try: # AdaptiveMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryAlgorithm import ( + AdaptiveMemeticEvolutionaryAlgorithm, + ) lama_register["AdaptiveMemeticEvolutionaryAlgorithm"] = AdaptiveMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMAAdaptiveMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # AdaptiveMemeticEvolutionaryAlgorithm print("AdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryOptimizer import AdaptiveMemeticEvolutionaryOptimizer +try: # AdaptiveMemeticEvolutionaryOptimizer + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionaryOptimizer import ( + AdaptiveMemeticEvolutionaryOptimizer, + ) lama_register["AdaptiveMemeticEvolutionaryOptimizer"] = AdaptiveMemeticEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryOptimizer").set_name("LLAMAAdaptiveMemeticEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveMemeticEvolutionaryOptimizer", register=True) +except Exception as e: # AdaptiveMemeticEvolutionaryOptimizer print("AdaptiveMemeticEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticEvolutionarySearch import AdaptiveMemeticEvolutionarySearch +try: # AdaptiveMemeticEvolutionarySearch + from nevergrad.optimization.lama.AdaptiveMemeticEvolutionarySearch import ( + AdaptiveMemeticEvolutionarySearch, + ) lama_register["AdaptiveMemeticEvolutionarySearch"] = AdaptiveMemeticEvolutionarySearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticEvolutionarySearch = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionarySearch").set_name("LLAMAAdaptiveMemeticEvolutionarySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticEvolutionarySearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticEvolutionarySearch" + ).set_name("LLAMAAdaptiveMemeticEvolutionarySearch", register=True) +except Exception as e: # AdaptiveMemeticEvolutionarySearch print("AdaptiveMemeticEvolutionarySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimization import AdaptiveMemeticHarmonyOptimization +try: # AdaptiveMemeticHarmonyOptimization + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimization import ( + AdaptiveMemeticHarmonyOptimization, + ) lama_register["AdaptiveMemeticHarmonyOptimization"] = AdaptiveMemeticHarmonyOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimization").set_name("LLAMAAdaptiveMemeticHarmonyOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHarmonyOptimization" + ).set_name("LLAMAAdaptiveMemeticHarmonyOptimization", register=True) +except Exception as e: # AdaptiveMemeticHarmonyOptimization print("AdaptiveMemeticHarmonyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimizationV5 import AdaptiveMemeticHarmonyOptimizationV5 +try: # AdaptiveMemeticHarmonyOptimizationV5 + from nevergrad.optimization.lama.AdaptiveMemeticHarmonyOptimizationV5 import ( + AdaptiveMemeticHarmonyOptimizationV5, + ) lama_register["AdaptiveMemeticHarmonyOptimizationV5"] = AdaptiveMemeticHarmonyOptimizationV5 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticHarmonyOptimizationV5 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimizationV5").set_name("LLAMAAdaptiveMemeticHarmonyOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHarmonyOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticHarmonyOptimizationV5 = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHarmonyOptimizationV5" + ).set_name("LLAMAAdaptiveMemeticHarmonyOptimizationV5", register=True) +except Exception as e: # AdaptiveMemeticHarmonyOptimizationV5 print("AdaptiveMemeticHarmonyOptimizationV5 can not be imported: ", e) -try: +try: # AdaptiveMemeticHybridOptimizer from nevergrad.optimization.lama.AdaptiveMemeticHybridOptimizer import AdaptiveMemeticHybridOptimizer lama_register["AdaptiveMemeticHybridOptimizer"] = AdaptiveMemeticHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHybridOptimizer").set_name("LLAMAAdaptiveMemeticHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: # AdaptiveMemeticHybridOptimizer print("AdaptiveMemeticHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveMemeticOptimizer from nevergrad.optimization.lama.AdaptiveMemeticOptimizer import AdaptiveMemeticOptimizer lama_register["AdaptiveMemeticOptimizer"] = AdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer").set_name("LLAMAAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizer").set_name( + "LLAMAAdaptiveMemeticOptimizer", register=True + ) +except Exception as e: # AdaptiveMemeticOptimizer print("AdaptiveMemeticOptimizer can not be imported: ", e) -try: +try: # AdaptiveMemeticOptimizerV2 from nevergrad.optimization.lama.AdaptiveMemeticOptimizerV2 import AdaptiveMemeticOptimizerV2 lama_register["AdaptiveMemeticOptimizerV2"] = AdaptiveMemeticOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2").set_name("LLAMAAdaptiveMemeticOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMemeticOptimizerV2").set_name( + "LLAMAAdaptiveMemeticOptimizerV2", register=True + ) +except Exception as e: # AdaptiveMemeticOptimizerV2 print("AdaptiveMemeticOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemeticParticleSwarmOptimization import AdaptiveMemeticParticleSwarmOptimization +try: # AdaptiveMemeticParticleSwarmOptimization + from nevergrad.optimization.lama.AdaptiveMemeticParticleSwarmOptimization import ( + AdaptiveMemeticParticleSwarmOptimization, + ) lama_register["AdaptiveMemeticParticleSwarmOptimization"] = AdaptiveMemeticParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemeticParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMemeticParticleSwarmOptimization").set_name("LLAMAAdaptiveMemeticParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemeticParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemeticParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMemeticParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveMemeticParticleSwarmOptimization", register=True) +except Exception as e: # AdaptiveMemeticParticleSwarmOptimization print("AdaptiveMemeticParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryAssistedStrategyV41 import AdaptiveMemoryAssistedStrategyV41 +try: # AdaptiveMemoryAssistedStrategyV41 + from nevergrad.optimization.lama.AdaptiveMemoryAssistedStrategyV41 import ( + AdaptiveMemoryAssistedStrategyV41, + ) lama_register["AdaptiveMemoryAssistedStrategyV41"] = AdaptiveMemoryAssistedStrategyV41 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryAssistedStrategyV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryAssistedStrategyV41 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryAssistedStrategyV41").set_name("LLAMAAdaptiveMemoryAssistedStrategyV41", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryAssistedStrategyV41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryAssistedStrategyV41 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryAssistedStrategyV41" + ).set_name("LLAMAAdaptiveMemoryAssistedStrategyV41", register=True) +except Exception as e: # AdaptiveMemoryAssistedStrategyV41 print("AdaptiveMemoryAssistedStrategyV41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryEnhancedDualStrategyV45 import AdaptiveMemoryEnhancedDualStrategyV45 +try: # AdaptiveMemoryEnhancedDualStrategyV45 + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedDualStrategyV45 import ( + AdaptiveMemoryEnhancedDualStrategyV45, + ) lama_register["AdaptiveMemoryEnhancedDualStrategyV45"] = AdaptiveMemoryEnhancedDualStrategyV45 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryEnhancedDualStrategyV45 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45").set_name("LLAMAAdaptiveMemoryEnhancedDualStrategyV45", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryEnhancedDualStrategyV45 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedDualStrategyV45" + ).set_name("LLAMAAdaptiveMemoryEnhancedDualStrategyV45", register=True) +except Exception as e: # AdaptiveMemoryEnhancedDualStrategyV45 print("AdaptiveMemoryEnhancedDualStrategyV45 can not be imported: ", e) -try: +try: # AdaptiveMemoryEnhancedSearch from nevergrad.optimization.lama.AdaptiveMemoryEnhancedSearch import AdaptiveMemoryEnhancedSearch lama_register["AdaptiveMemoryEnhancedSearch"] = AdaptiveMemoryEnhancedSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedSearch").set_name("LLAMAAdaptiveMemoryEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMAAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: # AdaptiveMemoryEnhancedSearch print("AdaptiveMemoryEnhancedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryEnhancedStrategyV42 import AdaptiveMemoryEnhancedStrategyV42 +try: # AdaptiveMemoryEnhancedStrategyV42 + from nevergrad.optimization.lama.AdaptiveMemoryEnhancedStrategyV42 import ( + AdaptiveMemoryEnhancedStrategyV42, + ) lama_register["AdaptiveMemoryEnhancedStrategyV42"] = AdaptiveMemoryEnhancedStrategyV42 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedStrategyV42").set_name("LLAMAAdaptiveMemoryEnhancedStrategyV42", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEnhancedStrategyV42" + ).set_name("LLAMAAdaptiveMemoryEnhancedStrategyV42", register=True) +except Exception as e: # AdaptiveMemoryEnhancedStrategyV42 print("AdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryEvolutionaryOptimizer import AdaptiveMemoryEvolutionaryOptimizer +try: # AdaptiveMemoryEvolutionaryOptimizer + from nevergrad.optimization.lama.AdaptiveMemoryEvolutionaryOptimizer import ( + AdaptiveMemoryEvolutionaryOptimizer, + ) lama_register["AdaptiveMemoryEvolutionaryOptimizer"] = AdaptiveMemoryEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEvolutionaryOptimizer").set_name("LLAMAAdaptiveMemoryEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveMemoryEvolutionaryOptimizer", register=True) +except Exception as e: # AdaptiveMemoryEvolutionaryOptimizer print("AdaptiveMemoryEvolutionaryOptimizer can not be imported: ", e) -try: +try: # AdaptiveMemoryGradientAnnealing from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealing import AdaptiveMemoryGradientAnnealing lama_register["AdaptiveMemoryGradientAnnealing"] = AdaptiveMemoryGradientAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryGradientAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealing").set_name("LLAMAAdaptiveMemoryGradientAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryGradientAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealing" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealing", register=True) +except Exception as e: # AdaptiveMemoryGradientAnnealing print("AdaptiveMemoryGradientAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingPlus import AdaptiveMemoryGradientAnnealingPlus +try: # AdaptiveMemoryGradientAnnealingPlus + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingPlus import ( + AdaptiveMemoryGradientAnnealingPlus, + ) lama_register["AdaptiveMemoryGradientAnnealingPlus"] = AdaptiveMemoryGradientAnnealingPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryGradientAnnealingPlus = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingPlus").set_name("LLAMAAdaptiveMemoryGradientAnnealingPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryGradientAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealingPlus" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealingPlus", register=True) +except Exception as e: # AdaptiveMemoryGradientAnnealingPlus print("AdaptiveMemoryGradientAnnealingPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingWithExplorationBoost import AdaptiveMemoryGradientAnnealingWithExplorationBoost - - lama_register["AdaptiveMemoryGradientAnnealingWithExplorationBoost"] = AdaptiveMemoryGradientAnnealingWithExplorationBoost - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost").set_name("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) -except Exception as e: +try: # AdaptiveMemoryGradientAnnealingWithExplorationBoost + from nevergrad.optimization.lama.AdaptiveMemoryGradientAnnealingWithExplorationBoost import ( + AdaptiveMemoryGradientAnnealingWithExplorationBoost, + ) + + lama_register["AdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( + AdaptiveMemoryGradientAnnealingWithExplorationBoost + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost" + ).set_name("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) +except Exception as e: # AdaptiveMemoryGradientAnnealingWithExplorationBoost print("AdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryGradientSimulatedAnnealing import AdaptiveMemoryGradientSimulatedAnnealing +try: # AdaptiveMemoryGradientSimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveMemoryGradientSimulatedAnnealing import ( + AdaptiveMemoryGradientSimulatedAnnealing, + ) lama_register["AdaptiveMemoryGradientSimulatedAnnealing"] = AdaptiveMemoryGradientSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryGradientSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing").set_name("LLAMAAdaptiveMemoryGradientSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryGradientSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGradientSimulatedAnnealing" + ).set_name("LLAMAAdaptiveMemoryGradientSimulatedAnnealing", register=True) +except Exception as e: # AdaptiveMemoryGradientSimulatedAnnealing print("AdaptiveMemoryGradientSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryGuidedEvolutionStrategyV57 import AdaptiveMemoryGuidedEvolutionStrategyV57 +try: # AdaptiveMemoryGuidedEvolutionStrategyV57 + from nevergrad.optimization.lama.AdaptiveMemoryGuidedEvolutionStrategyV57 import ( + AdaptiveMemoryGuidedEvolutionStrategyV57, + ) lama_register["AdaptiveMemoryGuidedEvolutionStrategyV57"] = AdaptiveMemoryGuidedEvolutionStrategyV57 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57").set_name("LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57" + ).set_name("LLAMAAdaptiveMemoryGuidedEvolutionStrategyV57", register=True) +except Exception as e: # AdaptiveMemoryGuidedEvolutionStrategyV57 print("AdaptiveMemoryGuidedEvolutionStrategyV57 can not be imported: ", e) -try: +try: # AdaptiveMemoryHybridAnnealing from nevergrad.optimization.lama.AdaptiveMemoryHybridAnnealing import AdaptiveMemoryHybridAnnealing lama_register["AdaptiveMemoryHybridAnnealing"] = AdaptiveMemoryHybridAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryHybridAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridAnnealing").set_name("LLAMAAdaptiveMemoryHybridAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryHybridAnnealing" + ).set_name("LLAMAAdaptiveMemoryHybridAnnealing", register=True) +except Exception as e: # AdaptiveMemoryHybridAnnealing print("AdaptiveMemoryHybridAnnealing can not be imported: ", e) -try: +try: # AdaptiveMemoryHybridDEPSO from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO import AdaptiveMemoryHybridDEPSO lama_register["AdaptiveMemoryHybridDEPSO"] = AdaptiveMemoryHybridDEPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO").set_name("LLAMAAdaptiveMemoryHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO").set_name( + "LLAMAAdaptiveMemoryHybridDEPSO", register=True + ) +except Exception as e: # AdaptiveMemoryHybridDEPSO print("AdaptiveMemoryHybridDEPSO can not be imported: ", e) -try: +try: # AdaptiveMemoryHybridDEPSO_V2 from nevergrad.optimization.lama.AdaptiveMemoryHybridDEPSO_V2 import AdaptiveMemoryHybridDEPSO_V2 lama_register["AdaptiveMemoryHybridDEPSO_V2"] = AdaptiveMemoryHybridDEPSO_V2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryHybridDEPSO_V2 = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO_V2").set_name("LLAMAAdaptiveMemoryHybridDEPSO_V2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryHybridDEPSO_V2 = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryHybridDEPSO_V2" + ).set_name("LLAMAAdaptiveMemoryHybridDEPSO_V2", register=True) +except Exception as e: # AdaptiveMemoryHybridDEPSO_V2 print("AdaptiveMemoryHybridDEPSO_V2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemoryParticleDifferentialSearch import AdaptiveMemoryParticleDifferentialSearch +try: # AdaptiveMemoryParticleDifferentialSearch + from nevergrad.optimization.lama.AdaptiveMemoryParticleDifferentialSearch import ( + AdaptiveMemoryParticleDifferentialSearch, + ) lama_register["AdaptiveMemoryParticleDifferentialSearch"] = AdaptiveMemoryParticleDifferentialSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemoryParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveMemoryParticleDifferentialSearch").set_name("LLAMAAdaptiveMemoryParticleDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemoryParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemoryParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveMemoryParticleDifferentialSearch" + ).set_name("LLAMAAdaptiveMemoryParticleDifferentialSearch", register=True) +except Exception as e: # AdaptiveMemoryParticleDifferentialSearch print("AdaptiveMemoryParticleDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMemorySelfTuningStrategyV60 import AdaptiveMemorySelfTuningStrategyV60 +try: # AdaptiveMemorySelfTuningStrategyV60 + from nevergrad.optimization.lama.AdaptiveMemorySelfTuningStrategyV60 import ( + AdaptiveMemorySelfTuningStrategyV60, + ) lama_register["AdaptiveMemorySelfTuningStrategyV60"] = AdaptiveMemorySelfTuningStrategyV60 - res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySelfTuningStrategyV60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemorySelfTuningStrategyV60 = NonObjectOptimizer(method="LLAMAAdaptiveMemorySelfTuningStrategyV60").set_name("LLAMAAdaptiveMemorySelfTuningStrategyV60", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySelfTuningStrategyV60")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemorySelfTuningStrategyV60 = NonObjectOptimizer( + method="LLAMAAdaptiveMemorySelfTuningStrategyV60" + ).set_name("LLAMAAdaptiveMemorySelfTuningStrategyV60", register=True) +except Exception as e: # AdaptiveMemorySelfTuningStrategyV60 print("AdaptiveMemorySelfTuningStrategyV60 can not be imported: ", e) -try: +try: # AdaptiveMemorySimulatedAnnealing from nevergrad.optimization.lama.AdaptiveMemorySimulatedAnnealing import AdaptiveMemorySimulatedAnnealing lama_register["AdaptiveMemorySimulatedAnnealing"] = AdaptiveMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMemorySimulatedAnnealing").set_name("LLAMAAdaptiveMemorySimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveMemorySimulatedAnnealing", register=True) +except Exception as e: # AdaptiveMemorySimulatedAnnealing print("AdaptiveMemorySimulatedAnnealing can not be imported: ", e) -try: +try: # AdaptiveMetaNetAQAPSO from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSO import AdaptiveMetaNetAQAPSO lama_register["AdaptiveMetaNetAQAPSO"] = AdaptiveMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO").set_name("LLAMAAdaptiveMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSO").set_name( + "LLAMAAdaptiveMetaNetAQAPSO", register=True + ) +except Exception as e: # AdaptiveMetaNetAQAPSO print("AdaptiveMetaNetAQAPSO can not be imported: ", e) -try: +try: # AdaptiveMetaNetAQAPSOv13 from nevergrad.optimization.lama.AdaptiveMetaNetAQAPSOv13 import AdaptiveMetaNetAQAPSOv13 lama_register["AdaptiveMetaNetAQAPSOv13"] = AdaptiveMetaNetAQAPSOv13 - res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMetaNetAQAPSOv13 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13").set_name("LLAMAAdaptiveMetaNetAQAPSOv13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMetaNetAQAPSOv13 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetAQAPSOv13").set_name( + "LLAMAAdaptiveMetaNetAQAPSOv13", register=True + ) +except Exception as e: # AdaptiveMetaNetAQAPSOv13 print("AdaptiveMetaNetAQAPSOv13 can not be imported: ", e) -try: +try: # AdaptiveMetaNetPSO_v3 from nevergrad.optimization.lama.AdaptiveMetaNetPSO_v3 import AdaptiveMetaNetPSO_v3 lama_register["AdaptiveMetaNetPSO_v3"] = AdaptiveMetaNetPSO_v3 - res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3").set_name("LLAMAAdaptiveMetaNetPSO_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSO_v3").set_name( + "LLAMAAdaptiveMetaNetPSO_v3", register=True + ) +except Exception as e: # AdaptiveMetaNetPSO_v3 print("AdaptiveMetaNetPSO_v3 can not be imported: ", e) -try: +try: # AdaptiveMetaNetPSOv3 from nevergrad.optimization.lama.AdaptiveMetaNetPSOv3 import AdaptiveMetaNetPSOv3 lama_register["AdaptiveMetaNetPSOv3"] = AdaptiveMetaNetPSOv3 - res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMetaNetPSOv3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3").set_name("LLAMAAdaptiveMetaNetPSOv3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMetaNetPSOv3 = NonObjectOptimizer(method="LLAMAAdaptiveMetaNetPSOv3").set_name( + "LLAMAAdaptiveMetaNetPSOv3", register=True + ) +except Exception as e: # AdaptiveMetaNetPSOv3 print("AdaptiveMetaNetPSOv3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMetaheuristicOptimization import AdaptiveMetaheuristicOptimization +try: # AdaptiveMetaheuristicOptimization + from nevergrad.optimization.lama.AdaptiveMetaheuristicOptimization import ( + AdaptiveMetaheuristicOptimization, + ) lama_register["AdaptiveMetaheuristicOptimization"] = AdaptiveMetaheuristicOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMetaheuristicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMetaheuristicOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMetaheuristicOptimization").set_name("LLAMAAdaptiveMetaheuristicOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMetaheuristicOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMetaheuristicOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMetaheuristicOptimization" + ).set_name("LLAMAAdaptiveMetaheuristicOptimization", register=True) +except Exception as e: # AdaptiveMetaheuristicOptimization print("AdaptiveMetaheuristicOptimization can not be imported: ", e) -try: +try: # AdaptiveMomentumOptimization from nevergrad.optimization.lama.AdaptiveMomentumOptimization import AdaptiveMomentumOptimization lama_register["AdaptiveMomentumOptimization"] = AdaptiveMomentumOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMomentumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMomentumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMomentumOptimization").set_name("LLAMAAdaptiveMomentumOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMomentumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMomentumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMomentumOptimization" + ).set_name("LLAMAAdaptiveMomentumOptimization", register=True) +except Exception as e: # AdaptiveMomentumOptimization print("AdaptiveMomentumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiExplorationAlgorithm import AdaptiveMultiExplorationAlgorithm +try: # AdaptiveMultiExplorationAlgorithm + from nevergrad.optimization.lama.AdaptiveMultiExplorationAlgorithm import ( + AdaptiveMultiExplorationAlgorithm, + ) lama_register["AdaptiveMultiExplorationAlgorithm"] = AdaptiveMultiExplorationAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiExplorationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiExplorationAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveMultiExplorationAlgorithm").set_name("LLAMAAdaptiveMultiExplorationAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiExplorationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiExplorationAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveMultiExplorationAlgorithm" + ).set_name("LLAMAAdaptiveMultiExplorationAlgorithm", register=True) +except Exception as e: # AdaptiveMultiExplorationAlgorithm print("AdaptiveMultiExplorationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiMemorySimulatedAnnealing import AdaptiveMultiMemorySimulatedAnnealing +try: # AdaptiveMultiMemorySimulatedAnnealing + from nevergrad.optimization.lama.AdaptiveMultiMemorySimulatedAnnealing import ( + AdaptiveMultiMemorySimulatedAnnealing, + ) lama_register["AdaptiveMultiMemorySimulatedAnnealing"] = AdaptiveMultiMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiMemorySimulatedAnnealing").set_name("LLAMAAdaptiveMultiMemorySimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveMultiMemorySimulatedAnnealing" + ).set_name("LLAMAAdaptiveMultiMemorySimulatedAnnealing", register=True) +except Exception as e: # AdaptiveMultiMemorySimulatedAnnealing print("AdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiOperatorDifferentialEvolution import AdaptiveMultiOperatorDifferentialEvolution +try: # AdaptiveMultiOperatorDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveMultiOperatorDifferentialEvolution import ( + AdaptiveMultiOperatorDifferentialEvolution, + ) lama_register["AdaptiveMultiOperatorDifferentialEvolution"] = AdaptiveMultiOperatorDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiOperatorDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorDifferentialEvolution").set_name("LLAMAAdaptiveMultiOperatorDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiOperatorDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiOperatorDifferentialEvolution", register=True) +except Exception as e: # AdaptiveMultiOperatorDifferentialEvolution print("AdaptiveMultiOperatorDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveMultiOperatorSearch from nevergrad.optimization.lama.AdaptiveMultiOperatorSearch import AdaptiveMultiOperatorSearch lama_register["AdaptiveMultiOperatorSearch"] = AdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch").set_name("LLAMAAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearch").set_name( + "LLAMAAdaptiveMultiOperatorSearch", register=True + ) +except Exception as e: # AdaptiveMultiOperatorSearch print("AdaptiveMultiOperatorSearch can not be imported: ", e) -try: +try: # AdaptiveMultiOperatorSearchV2 from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV2 import AdaptiveMultiOperatorSearchV2 lama_register["AdaptiveMultiOperatorSearchV2"] = AdaptiveMultiOperatorSearchV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiOperatorSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV2").set_name("LLAMAAdaptiveMultiOperatorSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiOperatorSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorSearchV2" + ).set_name("LLAMAAdaptiveMultiOperatorSearchV2", register=True) +except Exception as e: # AdaptiveMultiOperatorSearchV2 print("AdaptiveMultiOperatorSearchV2 can not be imported: ", e) -try: +try: # AdaptiveMultiOperatorSearchV3 from nevergrad.optimization.lama.AdaptiveMultiOperatorSearchV3 import AdaptiveMultiOperatorSearchV3 lama_register["AdaptiveMultiOperatorSearchV3"] = AdaptiveMultiOperatorSearchV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiOperatorSearchV3 = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV3").set_name("LLAMAAdaptiveMultiOperatorSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiOperatorSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiOperatorSearchV3 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiOperatorSearchV3" + ).set_name("LLAMAAdaptiveMultiOperatorSearchV3", register=True) +except Exception as e: # AdaptiveMultiOperatorSearchV3 print("AdaptiveMultiOperatorSearchV3 can not be imported: ", e) -try: +try: # AdaptiveMultiPhaseAnnealing from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealing import AdaptiveMultiPhaseAnnealing lama_register["AdaptiveMultiPhaseAnnealing"] = AdaptiveMultiPhaseAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing").set_name("LLAMAAdaptiveMultiPhaseAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealing").set_name( + "LLAMAAdaptiveMultiPhaseAnnealing", register=True + ) +except Exception as e: # AdaptiveMultiPhaseAnnealing print("AdaptiveMultiPhaseAnnealing can not be imported: ", e) -try: +try: # AdaptiveMultiPhaseAnnealingV2 from nevergrad.optimization.lama.AdaptiveMultiPhaseAnnealingV2 import AdaptiveMultiPhaseAnnealingV2 lama_register["AdaptiveMultiPhaseAnnealingV2"] = AdaptiveMultiPhaseAnnealingV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiPhaseAnnealingV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealingV2").set_name("LLAMAAdaptiveMultiPhaseAnnealingV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiPhaseAnnealingV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPhaseAnnealingV2" + ).set_name("LLAMAAdaptiveMultiPhaseAnnealingV2", register=True) +except Exception as e: # AdaptiveMultiPhaseAnnealingV2 print("AdaptiveMultiPhaseAnnealingV2 can not be imported: ", e) -try: +try: # AdaptiveMultiPhaseOptimization from nevergrad.optimization.lama.AdaptiveMultiPhaseOptimization import AdaptiveMultiPhaseOptimization lama_register["AdaptiveMultiPhaseOptimization"] = AdaptiveMultiPhaseOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiPhaseOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseOptimization").set_name("LLAMAAdaptiveMultiPhaseOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPhaseOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiPhaseOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPhaseOptimization" + ).set_name("LLAMAAdaptiveMultiPhaseOptimization", register=True) +except Exception as e: # AdaptiveMultiPhaseOptimization print("AdaptiveMultiPhaseOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiPopulationDifferentialEvolution import AdaptiveMultiPopulationDifferentialEvolution - - lama_register["AdaptiveMultiPopulationDifferentialEvolution"] = AdaptiveMultiPopulationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiPopulationDifferentialEvolution").set_name("LLAMAAdaptiveMultiPopulationDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveMultiPopulationDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveMultiPopulationDifferentialEvolution import ( + AdaptiveMultiPopulationDifferentialEvolution, + ) + + lama_register["AdaptiveMultiPopulationDifferentialEvolution"] = ( + AdaptiveMultiPopulationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiPopulationDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiPopulationDifferentialEvolution", register=True) +except Exception as e: # AdaptiveMultiPopulationDifferentialEvolution print("AdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveMultiStageOptimization from nevergrad.optimization.lama.AdaptiveMultiStageOptimization import AdaptiveMultiStageOptimization lama_register["AdaptiveMultiStageOptimization"] = AdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAAdaptiveMultiStageOptimization").set_name("LLAMAAdaptiveMultiStageOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStageOptimization" + ).set_name("LLAMAAdaptiveMultiStageOptimization", register=True) +except Exception as e: # AdaptiveMultiStageOptimization print("AdaptiveMultiStageOptimization can not be imported: ", e) -try: +try: # AdaptiveMultiStrategicOptimizer from nevergrad.optimization.lama.AdaptiveMultiStrategicOptimizer import AdaptiveMultiStrategicOptimizer lama_register["AdaptiveMultiStrategicOptimizer"] = AdaptiveMultiStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategicOptimizer").set_name("LLAMAAdaptiveMultiStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategicOptimizer" + ).set_name("LLAMAAdaptiveMultiStrategicOptimizer", register=True) +except Exception as e: # AdaptiveMultiStrategicOptimizer print("AdaptiveMultiStrategicOptimizer can not be imported: ", e) -try: +try: # AdaptiveMultiStrategyDE from nevergrad.optimization.lama.AdaptiveMultiStrategyDE import AdaptiveMultiStrategyDE lama_register["AdaptiveMultiStrategyDE"] = AdaptiveMultiStrategyDE - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE").set_name("LLAMAAdaptiveMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDE").set_name( + "LLAMAAdaptiveMultiStrategyDE", register=True + ) +except Exception as e: # AdaptiveMultiStrategyDE print("AdaptiveMultiStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDEWithMemory import AdaptiveMultiStrategyDEWithMemory +try: # AdaptiveMultiStrategyDEWithMemory + from nevergrad.optimization.lama.AdaptiveMultiStrategyDEWithMemory import ( + AdaptiveMultiStrategyDEWithMemory, + ) lama_register["AdaptiveMultiStrategyDEWithMemory"] = AdaptiveMultiStrategyDEWithMemory - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDEWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyDEWithMemory = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDEWithMemory").set_name("LLAMAAdaptiveMultiStrategyDEWithMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDEWithMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyDEWithMemory = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDEWithMemory" + ).set_name("LLAMAAdaptiveMultiStrategyDEWithMemory", register=True) +except Exception as e: # AdaptiveMultiStrategyDEWithMemory print("AdaptiveMultiStrategyDEWithMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolution import AdaptiveMultiStrategyDifferentialEvolution +try: # AdaptiveMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolution import ( + AdaptiveMultiStrategyDifferentialEvolution, + ) lama_register["AdaptiveMultiStrategyDifferentialEvolution"] = AdaptiveMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # AdaptiveMultiStrategyDifferentialEvolution print("AdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolutionPlus import AdaptiveMultiStrategyDifferentialEvolutionPlus - - lama_register["AdaptiveMultiStrategyDifferentialEvolutionPlus"] = AdaptiveMultiStrategyDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus").set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus", register=True) -except Exception as e: +try: # AdaptiveMultiStrategyDifferentialEvolutionPlus + from nevergrad.optimization.lama.AdaptiveMultiStrategyDifferentialEvolutionPlus import ( + AdaptiveMultiStrategyDifferentialEvolutionPlus, + ) + + lama_register["AdaptiveMultiStrategyDifferentialEvolutionPlus"] = ( + AdaptiveMultiStrategyDifferentialEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveMultiStrategyDifferentialEvolutionPlus", register=True) +except Exception as e: # AdaptiveMultiStrategyDifferentialEvolutionPlus print("AdaptiveMultiStrategyDifferentialEvolutionPlus can not be imported: ", e) -try: +try: # AdaptiveMultiStrategyOptimizer from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizer import AdaptiveMultiStrategyOptimizer lama_register["AdaptiveMultiStrategyOptimizer"] = AdaptiveMultiStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizer").set_name("LLAMAAdaptiveMultiStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: # AdaptiveMultiStrategyOptimizer print("AdaptiveMultiStrategyOptimizer can not be imported: ", e) -try: +try: # AdaptiveMultiStrategyOptimizerV2 from nevergrad.optimization.lama.AdaptiveMultiStrategyOptimizerV2 import AdaptiveMultiStrategyOptimizerV2 lama_register["AdaptiveMultiStrategyOptimizerV2"] = AdaptiveMultiStrategyOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveMultiStrategyOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizerV2").set_name("LLAMAAdaptiveMultiStrategyOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveMultiStrategyOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveMultiStrategyOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveMultiStrategyOptimizerV2" + ).set_name("LLAMAAdaptiveMultiStrategyOptimizerV2", register=True) +except Exception as e: # AdaptiveMultiStrategyOptimizerV2 print("AdaptiveMultiStrategyOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveNicheDifferentialParticleSwarmOptimizer import AdaptiveNicheDifferentialParticleSwarmOptimizer - - lama_register["AdaptiveNicheDifferentialParticleSwarmOptimizer"] = AdaptiveNicheDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # AdaptiveNicheDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.AdaptiveNicheDifferentialParticleSwarmOptimizer import ( + AdaptiveNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["AdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( + AdaptiveNicheDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # AdaptiveNicheDifferentialParticleSwarmOptimizer print("AdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: +try: # AdaptiveNichingDE_PSO from nevergrad.optimization.lama.AdaptiveNichingDE_PSO import AdaptiveNichingDE_PSO lama_register["AdaptiveNichingDE_PSO"] = AdaptiveNichingDE_PSO - res = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveNichingDE_PSO = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO").set_name("LLAMAAdaptiveNichingDE_PSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveNichingDE_PSO = NonObjectOptimizer(method="LLAMAAdaptiveNichingDE_PSO").set_name( + "LLAMAAdaptiveNichingDE_PSO", register=True + ) +except Exception as e: # AdaptiveNichingDE_PSO print("AdaptiveNichingDE_PSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolution import AdaptiveOppositionBasedDifferentialEvolution - - lama_register["AdaptiveOppositionBasedDifferentialEvolution"] = AdaptiveOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolution import ( + AdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["AdaptiveOppositionBasedDifferentialEvolution"] = ( + AdaptiveOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # AdaptiveOppositionBasedDifferentialEvolution print("AdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolutionImproved import AdaptiveOppositionBasedDifferentialEvolutionImproved - - lama_register["AdaptiveOppositionBasedDifferentialEvolutionImproved"] = AdaptiveOppositionBasedDifferentialEvolutionImproved - res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved").set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved", register=True) -except Exception as e: +try: # AdaptiveOppositionBasedDifferentialEvolutionImproved + from nevergrad.optimization.lama.AdaptiveOppositionBasedDifferentialEvolutionImproved import ( + AdaptiveOppositionBasedDifferentialEvolutionImproved, + ) + + lama_register["AdaptiveOppositionBasedDifferentialEvolutionImproved"] = ( + AdaptiveOppositionBasedDifferentialEvolutionImproved + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved" + ).set_name("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved", register=True) +except Exception as e: # AdaptiveOppositionBasedDifferentialEvolutionImproved print("AdaptiveOppositionBasedDifferentialEvolutionImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE import AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE - - lama_register["AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE"] = AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE - res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE").set_name("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE", register=True) -except Exception as e: +try: # AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE + from nevergrad.optimization.lama.AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE import ( + AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE, + ) + + lama_register["AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE"] = ( + AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE = NonObjectOptimizer( + method="LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE" + ).set_name("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE", register=True) +except Exception as e: # AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE print("AdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveOrthogonalDifferentialEvolution import AdaptiveOrthogonalDifferentialEvolution +try: # AdaptiveOrthogonalDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveOrthogonalDifferentialEvolution import ( + AdaptiveOrthogonalDifferentialEvolution, + ) lama_register["AdaptiveOrthogonalDifferentialEvolution"] = AdaptiveOrthogonalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAAdaptiveOrthogonalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: # AdaptiveOrthogonalDifferentialEvolution print("AdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveOscillatoryCrossoverDifferentialEvolution import AdaptiveOscillatoryCrossoverDifferentialEvolution - - lama_register["AdaptiveOscillatoryCrossoverDifferentialEvolution"] = AdaptiveOscillatoryCrossoverDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution").set_name("LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveOscillatoryCrossoverDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveOscillatoryCrossoverDifferentialEvolution import ( + AdaptiveOscillatoryCrossoverDifferentialEvolution, + ) + + lama_register["AdaptiveOscillatoryCrossoverDifferentialEvolution"] = ( + AdaptiveOscillatoryCrossoverDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution" + ).set_name("LLAMAAdaptiveOscillatoryCrossoverDifferentialEvolution", register=True) +except Exception as e: # AdaptiveOscillatoryCrossoverDifferentialEvolution print("AdaptiveOscillatoryCrossoverDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveParticleDifferentialSearch import AdaptiveParticleDifferentialSearch +try: # AdaptiveParticleDifferentialSearch + from nevergrad.optimization.lama.AdaptiveParticleDifferentialSearch import ( + AdaptiveParticleDifferentialSearch, + ) lama_register["AdaptiveParticleDifferentialSearch"] = AdaptiveParticleDifferentialSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAAdaptiveParticleDifferentialSearch").set_name("LLAMAAdaptiveParticleDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAAdaptiveParticleDifferentialSearch" + ).set_name("LLAMAAdaptiveParticleDifferentialSearch", register=True) +except Exception as e: # AdaptiveParticleDifferentialSearch print("AdaptiveParticleDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveParticleSwarmOptimization import AdaptiveParticleSwarmOptimization +try: # AdaptiveParticleSwarmOptimization + from nevergrad.optimization.lama.AdaptiveParticleSwarmOptimization import ( + AdaptiveParticleSwarmOptimization, + ) lama_register["AdaptiveParticleSwarmOptimization"] = AdaptiveParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveParticleSwarmOptimization").set_name("LLAMAAdaptiveParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveParticleSwarmOptimization", register=True) +except Exception as e: # AdaptiveParticleSwarmOptimization print("AdaptiveParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePerturbationDifferentialEvolution import AdaptivePerturbationDifferentialEvolution +try: # AdaptivePerturbationDifferentialEvolution + from nevergrad.optimization.lama.AdaptivePerturbationDifferentialEvolution import ( + AdaptivePerturbationDifferentialEvolution, + ) lama_register["AdaptivePerturbationDifferentialEvolution"] = AdaptivePerturbationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptivePerturbationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePerturbationDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePerturbationDifferentialEvolution").set_name("LLAMAAdaptivePerturbationDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePerturbationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePerturbationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePerturbationDifferentialEvolution" + ).set_name("LLAMAAdaptivePerturbationDifferentialEvolution", register=True) +except Exception as e: # AdaptivePerturbationDifferentialEvolution print("AdaptivePerturbationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePopulationDifferentialEvolutionOptimizer import AdaptivePopulationDifferentialEvolutionOptimizer - - lama_register["AdaptivePopulationDifferentialEvolutionOptimizer"] = AdaptivePopulationDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePopulationDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer").set_name("LLAMAAdaptivePopulationDifferentialEvolutionOptimizer", register=True) -except Exception as e: +try: # AdaptivePopulationDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.AdaptivePopulationDifferentialEvolutionOptimizer import ( + AdaptivePopulationDifferentialEvolutionOptimizer, + ) + + lama_register["AdaptivePopulationDifferentialEvolutionOptimizer"] = ( + AdaptivePopulationDifferentialEvolutionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePopulationDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationDifferentialEvolutionOptimizer" + ).set_name("LLAMAAdaptivePopulationDifferentialEvolutionOptimizer", register=True) +except Exception as e: # AdaptivePopulationDifferentialEvolutionOptimizer print("AdaptivePopulationDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - - lama_register["AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - res = NonObjectOptimizer(method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) -except Exception as e: +try: # AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + from nevergrad.optimization.lama.AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( + AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( + AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: # AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch print("AdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePopulationMemeticOptimizer import AdaptivePopulationMemeticOptimizer +try: # AdaptivePopulationMemeticOptimizer + from nevergrad.optimization.lama.AdaptivePopulationMemeticOptimizer import ( + AdaptivePopulationMemeticOptimizer, + ) lama_register["AdaptivePopulationMemeticOptimizer"] = AdaptivePopulationMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePopulationMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationMemeticOptimizer").set_name("LLAMAAdaptivePopulationMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationMemeticOptimizer" + ).set_name("LLAMAAdaptivePopulationMemeticOptimizer", register=True) +except Exception as e: # AdaptivePopulationMemeticOptimizer print("AdaptivePopulationMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePopulationResizingOptimizer import AdaptivePopulationResizingOptimizer +try: # AdaptivePopulationResizingOptimizer + from nevergrad.optimization.lama.AdaptivePopulationResizingOptimizer import ( + AdaptivePopulationResizingOptimizer, + ) lama_register["AdaptivePopulationResizingOptimizer"] = AdaptivePopulationResizingOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptivePopulationResizingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePopulationResizingOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePopulationResizingOptimizer").set_name("LLAMAAdaptivePopulationResizingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePopulationResizingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePopulationResizingOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePopulationResizingOptimizer" + ).set_name("LLAMAAdaptivePopulationResizingOptimizer", register=True) +except Exception as e: # AdaptivePopulationResizingOptimizer print("AdaptivePopulationResizingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionCohortOptimizationV3 import AdaptivePrecisionCohortOptimizationV3 +try: # AdaptivePrecisionCohortOptimizationV3 + from nevergrad.optimization.lama.AdaptivePrecisionCohortOptimizationV3 import ( + AdaptivePrecisionCohortOptimizationV3, + ) lama_register["AdaptivePrecisionCohortOptimizationV3"] = AdaptivePrecisionCohortOptimizationV3 - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCohortOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionCohortOptimizationV3 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCohortOptimizationV3").set_name("LLAMAAdaptivePrecisionCohortOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCohortOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionCohortOptimizationV3 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionCohortOptimizationV3" + ).set_name("LLAMAAdaptivePrecisionCohortOptimizationV3", register=True) +except Exception as e: # AdaptivePrecisionCohortOptimizationV3 print("AdaptivePrecisionCohortOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionControlDifferentialEvolution import AdaptivePrecisionControlDifferentialEvolution - - lama_register["AdaptivePrecisionControlDifferentialEvolution"] = AdaptivePrecisionControlDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionControlDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionControlDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionControlDifferentialEvolution").set_name("LLAMAAdaptivePrecisionControlDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptivePrecisionControlDifferentialEvolution + from nevergrad.optimization.lama.AdaptivePrecisionControlDifferentialEvolution import ( + AdaptivePrecisionControlDifferentialEvolution, + ) + + lama_register["AdaptivePrecisionControlDifferentialEvolution"] = ( + AdaptivePrecisionControlDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionControlDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionControlDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionControlDifferentialEvolution" + ).set_name("LLAMAAdaptivePrecisionControlDifferentialEvolution", register=True) +except Exception as e: # AdaptivePrecisionControlDifferentialEvolution print("AdaptivePrecisionControlDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionCrossoverEvolution import AdaptivePrecisionCrossoverEvolution +try: # AdaptivePrecisionCrossoverEvolution + from nevergrad.optimization.lama.AdaptivePrecisionCrossoverEvolution import ( + AdaptivePrecisionCrossoverEvolution, + ) lama_register["AdaptivePrecisionCrossoverEvolution"] = AdaptivePrecisionCrossoverEvolution - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCrossoverEvolution").set_name("LLAMAAdaptivePrecisionCrossoverEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionCrossoverEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionCrossoverEvolution" + ).set_name("LLAMAAdaptivePrecisionCrossoverEvolution", register=True) +except Exception as e: # AdaptivePrecisionCrossoverEvolution print("AdaptivePrecisionCrossoverEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionDifferentialEvolution import AdaptivePrecisionDifferentialEvolution +try: # AdaptivePrecisionDifferentialEvolution + from nevergrad.optimization.lama.AdaptivePrecisionDifferentialEvolution import ( + AdaptivePrecisionDifferentialEvolution, + ) lama_register["AdaptivePrecisionDifferentialEvolution"] = AdaptivePrecisionDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDifferentialEvolution").set_name("LLAMAAdaptivePrecisionDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDifferentialEvolution" + ).set_name("LLAMAAdaptivePrecisionDifferentialEvolution", register=True) +except Exception as e: # AdaptivePrecisionDifferentialEvolution print("AdaptivePrecisionDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptivePrecisionDivideSearch from nevergrad.optimization.lama.AdaptivePrecisionDivideSearch import AdaptivePrecisionDivideSearch lama_register["AdaptivePrecisionDivideSearch"] = AdaptivePrecisionDivideSearch - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionDivideSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDivideSearch").set_name("LLAMAAdaptivePrecisionDivideSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionDivideSearch = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDivideSearch" + ).set_name("LLAMAAdaptivePrecisionDivideSearch", register=True) +except Exception as e: # AdaptivePrecisionDivideSearch print("AdaptivePrecisionDivideSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionDynamicMemoryStrategyV48 import AdaptivePrecisionDynamicMemoryStrategyV48 +try: # AdaptivePrecisionDynamicMemoryStrategyV48 + from nevergrad.optimization.lama.AdaptivePrecisionDynamicMemoryStrategyV48 import ( + AdaptivePrecisionDynamicMemoryStrategyV48, + ) lama_register["AdaptivePrecisionDynamicMemoryStrategyV48"] = AdaptivePrecisionDynamicMemoryStrategyV48 - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionDynamicMemoryStrategyV48 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48").set_name("LLAMAAdaptivePrecisionDynamicMemoryStrategyV48", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionDynamicMemoryStrategyV48 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionDynamicMemoryStrategyV48" + ).set_name("LLAMAAdaptivePrecisionDynamicMemoryStrategyV48", register=True) +except Exception as e: # AdaptivePrecisionDynamicMemoryStrategyV48 print("AdaptivePrecisionDynamicMemoryStrategyV48 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionEvolutionStrategy import AdaptivePrecisionEvolutionStrategy +try: # AdaptivePrecisionEvolutionStrategy + from nevergrad.optimization.lama.AdaptivePrecisionEvolutionStrategy import ( + AdaptivePrecisionEvolutionStrategy, + ) lama_register["AdaptivePrecisionEvolutionStrategy"] = AdaptivePrecisionEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptivePrecisionEvolutionStrategy").set_name("LLAMAAdaptivePrecisionEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionEvolutionStrategy" + ).set_name("LLAMAAdaptivePrecisionEvolutionStrategy", register=True) +except Exception as e: # AdaptivePrecisionEvolutionStrategy print("AdaptivePrecisionEvolutionStrategy can not be imported: ", e) -try: +try: # AdaptivePrecisionFocalStrategy from nevergrad.optimization.lama.AdaptivePrecisionFocalStrategy import AdaptivePrecisionFocalStrategy lama_register["AdaptivePrecisionFocalStrategy"] = AdaptivePrecisionFocalStrategy - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionFocalStrategy = NonObjectOptimizer(method="LLAMAAdaptivePrecisionFocalStrategy").set_name("LLAMAAdaptivePrecisionFocalStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionFocalStrategy = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionFocalStrategy" + ).set_name("LLAMAAdaptivePrecisionFocalStrategy", register=True) +except Exception as e: # AdaptivePrecisionFocalStrategy print("AdaptivePrecisionFocalStrategy can not be imported: ", e) -try: +try: # AdaptivePrecisionHybridSearch from nevergrad.optimization.lama.AdaptivePrecisionHybridSearch import AdaptivePrecisionHybridSearch lama_register["AdaptivePrecisionHybridSearch"] = AdaptivePrecisionHybridSearch - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionHybridSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionHybridSearch").set_name("LLAMAAdaptivePrecisionHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionHybridSearch" + ).set_name("LLAMAAdaptivePrecisionHybridSearch", register=True) +except Exception as e: # AdaptivePrecisionHybridSearch print("AdaptivePrecisionHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionMemoryStrategyV47 import AdaptivePrecisionMemoryStrategyV47 +try: # AdaptivePrecisionMemoryStrategyV47 + from nevergrad.optimization.lama.AdaptivePrecisionMemoryStrategyV47 import ( + AdaptivePrecisionMemoryStrategyV47, + ) lama_register["AdaptivePrecisionMemoryStrategyV47"] = AdaptivePrecisionMemoryStrategyV47 - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionMemoryStrategyV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionMemoryStrategyV47 = NonObjectOptimizer(method="LLAMAAdaptivePrecisionMemoryStrategyV47").set_name("LLAMAAdaptivePrecisionMemoryStrategyV47", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionMemoryStrategyV47")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionMemoryStrategyV47 = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionMemoryStrategyV47" + ).set_name("LLAMAAdaptivePrecisionMemoryStrategyV47", register=True) +except Exception as e: # AdaptivePrecisionMemoryStrategyV47 print("AdaptivePrecisionMemoryStrategyV47 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionRotationalClimbOptimizer import AdaptivePrecisionRotationalClimbOptimizer +try: # AdaptivePrecisionRotationalClimbOptimizer + from nevergrad.optimization.lama.AdaptivePrecisionRotationalClimbOptimizer import ( + AdaptivePrecisionRotationalClimbOptimizer, + ) lama_register["AdaptivePrecisionRotationalClimbOptimizer"] = AdaptivePrecisionRotationalClimbOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePrecisionRotationalClimbOptimizer").set_name("LLAMAAdaptivePrecisionRotationalClimbOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionRotationalClimbOptimizer" + ).set_name("LLAMAAdaptivePrecisionRotationalClimbOptimizer", register=True) +except Exception as e: # AdaptivePrecisionRotationalClimbOptimizer print("AdaptivePrecisionRotationalClimbOptimizer can not be imported: ", e) -try: +try: # AdaptivePrecisionSearch from nevergrad.optimization.lama.AdaptivePrecisionSearch import AdaptivePrecisionSearch lama_register["AdaptivePrecisionSearch"] = AdaptivePrecisionSearch - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch").set_name("LLAMAAdaptivePrecisionSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAAdaptivePrecisionSearch").set_name( + "LLAMAAdaptivePrecisionSearch", register=True + ) +except Exception as e: # AdaptivePrecisionSearch print("AdaptivePrecisionSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptivePrecisionStrategicOptimizer import AdaptivePrecisionStrategicOptimizer +try: # AdaptivePrecisionStrategicOptimizer + from nevergrad.optimization.lama.AdaptivePrecisionStrategicOptimizer import ( + AdaptivePrecisionStrategicOptimizer, + ) lama_register["AdaptivePrecisionStrategicOptimizer"] = AdaptivePrecisionStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptivePrecisionStrategicOptimizer").set_name("LLAMAAdaptivePrecisionStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptivePrecisionStrategicOptimizer" + ).set_name("LLAMAAdaptivePrecisionStrategicOptimizer", register=True) +except Exception as e: # AdaptivePrecisionStrategicOptimizer print("AdaptivePrecisionStrategicOptimizer can not be imported: ", e) -try: +try: # AdaptiveQGSA from nevergrad.optimization.lama.AdaptiveQGSA import AdaptiveQGSA lama_register["AdaptiveQGSA"] = AdaptiveQGSA - res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQGSA = NonObjectOptimizer(method="LLAMAAdaptiveQGSA").set_name("LLAMAAdaptiveQGSA", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQGSA = NonObjectOptimizer(method="LLAMAAdaptiveQGSA").set_name( + "LLAMAAdaptiveQGSA", register=True + ) +except Exception as e: # AdaptiveQGSA print("AdaptiveQGSA can not be imported: ", e) -try: +try: # AdaptiveQGSA_EC from nevergrad.optimization.lama.AdaptiveQGSA_EC import AdaptiveQGSA_EC lama_register["AdaptiveQGSA_EC"] = AdaptiveQGSA_EC - res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQGSA_EC = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC").set_name("LLAMAAdaptiveQGSA_EC", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQGSA_EC = NonObjectOptimizer(method="LLAMAAdaptiveQGSA_EC").set_name( + "LLAMAAdaptiveQGSA_EC", register=True + ) +except Exception as e: # AdaptiveQGSA_EC print("AdaptiveQGSA_EC can not be imported: ", e) -try: +try: # AdaptiveQuantumAnnealingDE from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDE import AdaptiveQuantumAnnealingDE lama_register["AdaptiveQuantumAnnealingDE"] = AdaptiveQuantumAnnealingDE - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumAnnealingDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE").set_name("LLAMAAdaptiveQuantumAnnealingDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumAnnealingDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDE").set_name( + "LLAMAAdaptiveQuantumAnnealingDE", register=True + ) +except Exception as e: # AdaptiveQuantumAnnealingDE print("AdaptiveQuantumAnnealingDE can not be imported: ", e) -try: +try: # AdaptiveQuantumAnnealingDEv2 from nevergrad.optimization.lama.AdaptiveQuantumAnnealingDEv2 import AdaptiveQuantumAnnealingDEv2 lama_register["AdaptiveQuantumAnnealingDEv2"] = AdaptiveQuantumAnnealingDEv2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDEv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumAnnealingDEv2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDEv2").set_name("LLAMAAdaptiveQuantumAnnealingDEv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumAnnealingDEv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumAnnealingDEv2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumAnnealingDEv2" + ).set_name("LLAMAAdaptiveQuantumAnnealingDEv2", register=True) +except Exception as e: # AdaptiveQuantumAnnealingDEv2 print("AdaptiveQuantumAnnealingDEv2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumCognitionOptimizerV3 import AdaptiveQuantumCognitionOptimizerV3 +try: # AdaptiveQuantumCognitionOptimizerV3 + from nevergrad.optimization.lama.AdaptiveQuantumCognitionOptimizerV3 import ( + AdaptiveQuantumCognitionOptimizerV3, + ) lama_register["AdaptiveQuantumCognitionOptimizerV3"] = AdaptiveQuantumCognitionOptimizerV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCognitionOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumCognitionOptimizerV3 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCognitionOptimizerV3").set_name("LLAMAAdaptiveQuantumCognitionOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCognitionOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumCognitionOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumCognitionOptimizerV3" + ).set_name("LLAMAAdaptiveQuantumCognitionOptimizerV3", register=True) +except Exception as e: # AdaptiveQuantumCognitionOptimizerV3 print("AdaptiveQuantumCognitionOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumCrossoverOptimizer import AdaptiveQuantumCrossoverOptimizer +try: # AdaptiveQuantumCrossoverOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumCrossoverOptimizer import ( + AdaptiveQuantumCrossoverOptimizer, + ) lama_register["AdaptiveQuantumCrossoverOptimizer"] = AdaptiveQuantumCrossoverOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCrossoverOptimizer").set_name("LLAMAAdaptiveQuantumCrossoverOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumCrossoverOptimizer" + ).set_name("LLAMAAdaptiveQuantumCrossoverOptimizer", register=True) +except Exception as e: # AdaptiveQuantumCrossoverOptimizer print("AdaptiveQuantumCrossoverOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolution import AdaptiveQuantumDifferentialEvolution +try: # AdaptiveQuantumDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolution import ( + AdaptiveQuantumDifferentialEvolution, + ) lama_register["AdaptiveQuantumDifferentialEvolution"] = AdaptiveQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolution").set_name("LLAMAAdaptiveQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolution print("AdaptiveQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionPlus import AdaptiveQuantumDifferentialEvolutionPlus +try: # AdaptiveQuantumDifferentialEvolutionPlus + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionPlus import ( + AdaptiveQuantumDifferentialEvolutionPlus, + ) lama_register["AdaptiveQuantumDifferentialEvolutionPlus"] = AdaptiveQuantumDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionPlus" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionPlus", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionPlus print("AdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionV2 import AdaptiveQuantumDifferentialEvolutionV2 +try: # AdaptiveQuantumDifferentialEvolutionV2 + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionV2 import ( + AdaptiveQuantumDifferentialEvolutionV2, + ) lama_register["AdaptiveQuantumDifferentialEvolutionV2"] = AdaptiveQuantumDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionV2").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionV2" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionV2", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionV2 print("AdaptiveQuantumDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - - lama_register["AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True) -except Exception as e: - print("AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - - lama_register["AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) -except Exception as e: +try: # AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( + AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" + ).set_name( + "LLAMAAdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True + ) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + print( + "AdaptiveQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e + ) +try: # AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( + AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch print("AdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch import AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch - - lama_register["AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch"] = AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch", register=True) -except Exception as e: +try: # AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch import ( + AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch print("AdaptiveQuantumDifferentialEvolutionWithEliteGuidedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory import AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory - - lama_register["AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory"] = AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory", register=True) -except Exception as e: +try: # AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory import ( + AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory"] = ( + AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory print("AdaptiveQuantumDifferentialEvolutionWithElitistLearningAndMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement import AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement - - lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement"] = AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement", register=True) -except Exception as e: - print("AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch import AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch - - lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch"] = AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch").set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch", register=True) -except Exception as e: +try: # AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement import ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement"] = ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement" + ).set_name( + "LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement", register=True + ) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement + print( + "AdaptiveQuantumDifferentialEvolutionWithEnhancedElitismAndMemoryRefinement can not be imported: ", e + ) +try: # AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch + from nevergrad.optimization.lama.AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch import ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch, + ) + + lama_register["AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch"] = ( + AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch" + ).set_name("LLAMAAdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch", register=True) +except Exception as e: # AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch print("AdaptiveQuantumDifferentialEvolutionWithEnhancedLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDiversityEnhancerV7 import AdaptiveQuantumDiversityEnhancerV7 +try: # AdaptiveQuantumDiversityEnhancerV7 + from nevergrad.optimization.lama.AdaptiveQuantumDiversityEnhancerV7 import ( + AdaptiveQuantumDiversityEnhancerV7, + ) lama_register["AdaptiveQuantumDiversityEnhancerV7"] = AdaptiveQuantumDiversityEnhancerV7 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDiversityEnhancerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDiversityEnhancerV7 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDiversityEnhancerV7").set_name("LLAMAAdaptiveQuantumDiversityEnhancerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDiversityEnhancerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDiversityEnhancerV7 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDiversityEnhancerV7" + ).set_name("LLAMAAdaptiveQuantumDiversityEnhancerV7", register=True) +except Exception as e: # AdaptiveQuantumDiversityEnhancerV7 print("AdaptiveQuantumDiversityEnhancerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumDynamicTuningOptimizer import AdaptiveQuantumDynamicTuningOptimizer +try: # AdaptiveQuantumDynamicTuningOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumDynamicTuningOptimizer import ( + AdaptiveQuantumDynamicTuningOptimizer, + ) lama_register["AdaptiveQuantumDynamicTuningOptimizer"] = AdaptiveQuantumDynamicTuningOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDynamicTuningOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumDynamicTuningOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDynamicTuningOptimizer").set_name("LLAMAAdaptiveQuantumDynamicTuningOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumDynamicTuningOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumDynamicTuningOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumDynamicTuningOptimizer" + ).set_name("LLAMAAdaptiveQuantumDynamicTuningOptimizer", register=True) +except Exception as e: # AdaptiveQuantumDynamicTuningOptimizer print("AdaptiveQuantumDynamicTuningOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumEliteDifferentialEvolution import AdaptiveQuantumEliteDifferentialEvolution +try: # AdaptiveQuantumEliteDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveQuantumEliteDifferentialEvolution import ( + AdaptiveQuantumEliteDifferentialEvolution, + ) lama_register["AdaptiveQuantumEliteDifferentialEvolution"] = AdaptiveQuantumEliteDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteDifferentialEvolution").set_name("LLAMAAdaptiveQuantumEliteDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEliteDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuantumEliteDifferentialEvolution", register=True) +except Exception as e: # AdaptiveQuantumEliteDifferentialEvolution print("AdaptiveQuantumEliteDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumEliteMemeticOptimizer import AdaptiveQuantumEliteMemeticOptimizer +try: # AdaptiveQuantumEliteMemeticOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumEliteMemeticOptimizer import ( + AdaptiveQuantumEliteMemeticOptimizer, + ) lama_register["AdaptiveQuantumEliteMemeticOptimizer"] = AdaptiveQuantumEliteMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteMemeticOptimizer").set_name("LLAMAAdaptiveQuantumEliteMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEliteMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumEliteMemeticOptimizer", register=True) +except Exception as e: # AdaptiveQuantumEliteMemeticOptimizer print("AdaptiveQuantumEliteMemeticOptimizer can not be imported: ", e) -try: +try: # AdaptiveQuantumEntropyDE from nevergrad.optimization.lama.AdaptiveQuantumEntropyDE import AdaptiveQuantumEntropyDE lama_register["AdaptiveQuantumEntropyDE"] = AdaptiveQuantumEntropyDE - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE").set_name("LLAMAAdaptiveQuantumEntropyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEntropyDE").set_name( + "LLAMAAdaptiveQuantumEntropyDE", register=True + ) +except Exception as e: # AdaptiveQuantumEntropyDE print("AdaptiveQuantumEntropyDE can not be imported: ", e) -try: +try: # AdaptiveQuantumEvolutionStrategy from nevergrad.optimization.lama.AdaptiveQuantumEvolutionStrategy import AdaptiveQuantumEvolutionStrategy lama_register["AdaptiveQuantumEvolutionStrategy"] = AdaptiveQuantumEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolutionStrategy").set_name("LLAMAAdaptiveQuantumEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEvolutionStrategy" + ).set_name("LLAMAAdaptiveQuantumEvolutionStrategy", register=True) +except Exception as e: # AdaptiveQuantumEvolutionStrategy print("AdaptiveQuantumEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumEvolvedDiversityExplorerV15 import AdaptiveQuantumEvolvedDiversityExplorerV15 +try: # AdaptiveQuantumEvolvedDiversityExplorerV15 + from nevergrad.optimization.lama.AdaptiveQuantumEvolvedDiversityExplorerV15 import ( + AdaptiveQuantumEvolvedDiversityExplorerV15, + ) lama_register["AdaptiveQuantumEvolvedDiversityExplorerV15"] = AdaptiveQuantumEvolvedDiversityExplorerV15 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15").set_name("LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15" + ).set_name("LLAMAAdaptiveQuantumEvolvedDiversityExplorerV15", register=True) +except Exception as e: # AdaptiveQuantumEvolvedDiversityExplorerV15 print("AdaptiveQuantumEvolvedDiversityExplorerV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch import AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch - - lama_register["AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch"] = AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch").set_name("LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch", register=True) -except Exception as e: +try: # AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch import ( + AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch, + ) + + lama_register["AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch"] = ( + AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch" + ).set_name("LLAMAAdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch", register=True) +except Exception as e: # AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch print("AdaptiveQuantumGradientBoostedEvolutionaryMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedMemeticSearch import AdaptiveQuantumGradientBoostedMemeticSearch +try: # AdaptiveQuantumGradientBoostedMemeticSearch + from nevergrad.optimization.lama.AdaptiveQuantumGradientBoostedMemeticSearch import ( + AdaptiveQuantumGradientBoostedMemeticSearch, + ) lama_register["AdaptiveQuantumGradientBoostedMemeticSearch"] = AdaptiveQuantumGradientBoostedMemeticSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch").set_name("LLAMAAdaptiveQuantumGradientBoostedMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMAAdaptiveQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: # AdaptiveQuantumGradientBoostedMemeticSearch print("AdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientEnhancedOptimizer import AdaptiveQuantumGradientEnhancedOptimizer +try: # AdaptiveQuantumGradientEnhancedOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumGradientEnhancedOptimizer import ( + AdaptiveQuantumGradientEnhancedOptimizer, + ) lama_register["AdaptiveQuantumGradientEnhancedOptimizer"] = AdaptiveQuantumGradientEnhancedOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientEnhancedOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer").set_name("LLAMAAdaptiveQuantumGradientEnhancedOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientEnhancedOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientEnhancedOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientEnhancedOptimizer", register=True) +except Exception as e: # AdaptiveQuantumGradientEnhancedOptimizer print("AdaptiveQuantumGradientEnhancedOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimization import AdaptiveQuantumGradientExplorationOptimization - - lama_register["AdaptiveQuantumGradientExplorationOptimization"] = AdaptiveQuantumGradientExplorationOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimization").set_name("LLAMAAdaptiveQuantumGradientExplorationOptimization", register=True) -except Exception as e: +try: # AdaptiveQuantumGradientExplorationOptimization + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimization import ( + AdaptiveQuantumGradientExplorationOptimization, + ) + + lama_register["AdaptiveQuantumGradientExplorationOptimization"] = ( + AdaptiveQuantumGradientExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientExplorationOptimization" + ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimization", register=True) +except Exception as e: # AdaptiveQuantumGradientExplorationOptimization print("AdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimizationV2 import AdaptiveQuantumGradientExplorationOptimizationV2 - - lama_register["AdaptiveQuantumGradientExplorationOptimizationV2"] = AdaptiveQuantumGradientExplorationOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2").set_name("LLAMAAdaptiveQuantumGradientExplorationOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveQuantumGradientExplorationOptimizationV2 + from nevergrad.optimization.lama.AdaptiveQuantumGradientExplorationOptimizationV2 import ( + AdaptiveQuantumGradientExplorationOptimizationV2, + ) + + lama_register["AdaptiveQuantumGradientExplorationOptimizationV2"] = ( + AdaptiveQuantumGradientExplorationOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientExplorationOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumGradientExplorationOptimizationV2", register=True) +except Exception as e: # AdaptiveQuantumGradientExplorationOptimizationV2 print("AdaptiveQuantumGradientExplorationOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumGradientHybridOptimizer import AdaptiveQuantumGradientHybridOptimizer +try: # AdaptiveQuantumGradientHybridOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumGradientHybridOptimizer import ( + AdaptiveQuantumGradientHybridOptimizer, + ) lama_register["AdaptiveQuantumGradientHybridOptimizer"] = AdaptiveQuantumGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientHybridOptimizer").set_name("LLAMAAdaptiveQuantumGradientHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientHybridOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientHybridOptimizer", register=True) +except Exception as e: # AdaptiveQuantumGradientHybridOptimizer print("AdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveQuantumGradientOptimizer from nevergrad.optimization.lama.AdaptiveQuantumGradientOptimizer import AdaptiveQuantumGradientOptimizer lama_register["AdaptiveQuantumGradientOptimizer"] = AdaptiveQuantumGradientOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumGradientOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientOptimizer").set_name("LLAMAAdaptiveQuantumGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumGradientOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumGradientOptimizer" + ).set_name("LLAMAAdaptiveQuantumGradientOptimizer", register=True) +except Exception as e: # AdaptiveQuantumGradientOptimizer print("AdaptiveQuantumGradientOptimizer can not be imported: ", e) -try: +try: # AdaptiveQuantumHarmonizedPSO from nevergrad.optimization.lama.AdaptiveQuantumHarmonizedPSO import AdaptiveQuantumHarmonizedPSO lama_register["AdaptiveQuantumHarmonizedPSO"] = AdaptiveQuantumHarmonizedPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHarmonizedPSO").set_name("LLAMAAdaptiveQuantumHarmonizedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumHarmonizedPSO = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHarmonizedPSO" + ).set_name("LLAMAAdaptiveQuantumHarmonizedPSO", register=True) +except Exception as e: # AdaptiveQuantumHarmonizedPSO print("AdaptiveQuantumHarmonizedPSO can not be imported: ", e) -try: +try: # AdaptiveQuantumHybridOptimizer from nevergrad.optimization.lama.AdaptiveQuantumHybridOptimizer import AdaptiveQuantumHybridOptimizer lama_register["AdaptiveQuantumHybridOptimizer"] = AdaptiveQuantumHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridOptimizer").set_name("LLAMAAdaptiveQuantumHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHybridOptimizer" + ).set_name("LLAMAAdaptiveQuantumHybridOptimizer", register=True) +except Exception as e: # AdaptiveQuantumHybridOptimizer print("AdaptiveQuantumHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveQuantumHybridSearchV2 from nevergrad.optimization.lama.AdaptiveQuantumHybridSearchV2 import AdaptiveQuantumHybridSearchV2 lama_register["AdaptiveQuantumHybridSearchV2"] = AdaptiveQuantumHybridSearchV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumHybridSearchV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridSearchV2").set_name("LLAMAAdaptiveQuantumHybridSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumHybridSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumHybridSearchV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumHybridSearchV2" + ).set_name("LLAMAAdaptiveQuantumHybridSearchV2", register=True) +except Exception as e: # AdaptiveQuantumHybridSearchV2 print("AdaptiveQuantumHybridSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumInfluencedMemeticAlgorithm import AdaptiveQuantumInfluencedMemeticAlgorithm +try: # AdaptiveQuantumInfluencedMemeticAlgorithm + from nevergrad.optimization.lama.AdaptiveQuantumInfluencedMemeticAlgorithm import ( + AdaptiveQuantumInfluencedMemeticAlgorithm, + ) lama_register["AdaptiveQuantumInfluencedMemeticAlgorithm"] = AdaptiveQuantumInfluencedMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm").set_name("LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm" + ).set_name("LLAMAAdaptiveQuantumInfluencedMemeticAlgorithm", register=True) +except Exception as e: # AdaptiveQuantumInfluencedMemeticAlgorithm print("AdaptiveQuantumInfluencedMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumInformedDifferentialStrategy import AdaptiveQuantumInformedDifferentialStrategy +try: # AdaptiveQuantumInformedDifferentialStrategy + from nevergrad.optimization.lama.AdaptiveQuantumInformedDifferentialStrategy import ( + AdaptiveQuantumInformedDifferentialStrategy, + ) lama_register["AdaptiveQuantumInformedDifferentialStrategy"] = AdaptiveQuantumInformedDifferentialStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumInformedDifferentialStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedDifferentialStrategy").set_name("LLAMAAdaptiveQuantumInformedDifferentialStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumInformedDifferentialStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInformedDifferentialStrategy" + ).set_name("LLAMAAdaptiveQuantumInformedDifferentialStrategy", register=True) +except Exception as e: # AdaptiveQuantumInformedDifferentialStrategy print("AdaptiveQuantumInformedDifferentialStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumInformedGradientEnhancer import AdaptiveQuantumInformedGradientEnhancer +try: # AdaptiveQuantumInformedGradientEnhancer + from nevergrad.optimization.lama.AdaptiveQuantumInformedGradientEnhancer import ( + AdaptiveQuantumInformedGradientEnhancer, + ) lama_register["AdaptiveQuantumInformedGradientEnhancer"] = AdaptiveQuantumInformedGradientEnhancer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedGradientEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumInformedGradientEnhancer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedGradientEnhancer").set_name("LLAMAAdaptiveQuantumInformedGradientEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumInformedGradientEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumInformedGradientEnhancer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumInformedGradientEnhancer" + ).set_name("LLAMAAdaptiveQuantumInformedGradientEnhancer", register=True) +except Exception as e: # AdaptiveQuantumInformedGradientEnhancer print("AdaptiveQuantumInformedGradientEnhancer can not be imported: ", e) -try: +try: # AdaptiveQuantumLeapOptimizer from nevergrad.optimization.lama.AdaptiveQuantumLeapOptimizer import AdaptiveQuantumLeapOptimizer lama_register["AdaptiveQuantumLeapOptimizer"] = AdaptiveQuantumLeapOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLeapOptimizer").set_name("LLAMAAdaptiveQuantumLeapOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLeapOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLeapOptimizer" + ).set_name("LLAMAAdaptiveQuantumLeapOptimizer", register=True) +except Exception as e: # AdaptiveQuantumLeapOptimizer print("AdaptiveQuantumLeapOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialEnhancedOptimizer import AdaptiveQuantumLevyDifferentialEnhancedOptimizer - - lama_register["AdaptiveQuantumLevyDifferentialEnhancedOptimizer"] = AdaptiveQuantumLevyDifferentialEnhancedOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer").set_name("LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer", register=True) -except Exception as e: +try: # AdaptiveQuantumLevyDifferentialEnhancedOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialEnhancedOptimizer import ( + AdaptiveQuantumLevyDifferentialEnhancedOptimizer, + ) + + lama_register["AdaptiveQuantumLevyDifferentialEnhancedOptimizer"] = ( + AdaptiveQuantumLevyDifferentialEnhancedOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialEnhancedOptimizer", register=True) +except Exception as e: # AdaptiveQuantumLevyDifferentialEnhancedOptimizer print("AdaptiveQuantumLevyDifferentialEnhancedOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizer import AdaptiveQuantumLevyDifferentialOptimizer +try: # AdaptiveQuantumLevyDifferentialOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizer import ( + AdaptiveQuantumLevyDifferentialOptimizer, + ) lama_register["AdaptiveQuantumLevyDifferentialOptimizer"] = AdaptiveQuantumLevyDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer").set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizer", register=True) +except Exception as e: # AdaptiveQuantumLevyDifferentialOptimizer print("AdaptiveQuantumLevyDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizerV2 import AdaptiveQuantumLevyDifferentialOptimizerV2 +try: # AdaptiveQuantumLevyDifferentialOptimizerV2 + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialOptimizerV2 import ( + AdaptiveQuantumLevyDifferentialOptimizerV2, + ) lama_register["AdaptiveQuantumLevyDifferentialOptimizerV2"] = AdaptiveQuantumLevyDifferentialOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2").set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialOptimizerV2", register=True) +except Exception as e: # AdaptiveQuantumLevyDifferentialOptimizerV2 print("AdaptiveQuantumLevyDifferentialOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 import AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 - - lama_register["AdaptiveQuantumLevyDifferentialSwarmOptimizationV2"] = AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 + from nevergrad.optimization.lama.AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 import ( + AdaptiveQuantumLevyDifferentialSwarmOptimizationV2, + ) + + lama_register["AdaptiveQuantumLevyDifferentialSwarmOptimizationV2"] = ( + AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumLevyDifferentialSwarmOptimizationV2", register=True) +except Exception as e: # AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 print("AdaptiveQuantumLevyDifferentialSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicOptimization import AdaptiveQuantumLevyDynamicOptimization +try: # AdaptiveQuantumLevyDynamicOptimization + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicOptimization import ( + AdaptiveQuantumLevyDynamicOptimization, + ) lama_register["AdaptiveQuantumLevyDynamicOptimization"] = AdaptiveQuantumLevyDynamicOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDynamicOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicOptimization").set_name("LLAMAAdaptiveQuantumLevyDynamicOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDynamicOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicOptimization", register=True) +except Exception as e: # AdaptiveQuantumLevyDynamicOptimization print("AdaptiveQuantumLevyDynamicOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimization import AdaptiveQuantumLevyDynamicSwarmOptimization +try: # AdaptiveQuantumLevyDynamicSwarmOptimization + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimization import ( + AdaptiveQuantumLevyDynamicSwarmOptimization, + ) lama_register["AdaptiveQuantumLevyDynamicSwarmOptimization"] = AdaptiveQuantumLevyDynamicSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization").set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimization", register=True) +except Exception as e: # AdaptiveQuantumLevyDynamicSwarmOptimization print("AdaptiveQuantumLevyDynamicSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimizationV2 import AdaptiveQuantumLevyDynamicSwarmOptimizationV2 - - lama_register["AdaptiveQuantumLevyDynamicSwarmOptimizationV2"] = AdaptiveQuantumLevyDynamicSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2", register=True) -except Exception as e: +try: # AdaptiveQuantumLevyDynamicSwarmOptimizationV2 + from nevergrad.optimization.lama.AdaptiveQuantumLevyDynamicSwarmOptimizationV2 import ( + AdaptiveQuantumLevyDynamicSwarmOptimizationV2, + ) + + lama_register["AdaptiveQuantumLevyDynamicSwarmOptimizationV2"] = ( + AdaptiveQuantumLevyDynamicSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumLevyDynamicSwarmOptimizationV2", register=True) +except Exception as e: # AdaptiveQuantumLevyDynamicSwarmOptimizationV2 print("AdaptiveQuantumLevyDynamicSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyEnhancedDifferentialOptimizer import AdaptiveQuantumLevyEnhancedDifferentialOptimizer - - lama_register["AdaptiveQuantumLevyEnhancedDifferentialOptimizer"] = AdaptiveQuantumLevyEnhancedDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer").set_name("LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer", register=True) -except Exception as e: +try: # AdaptiveQuantumLevyEnhancedDifferentialOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumLevyEnhancedDifferentialOptimizer import ( + AdaptiveQuantumLevyEnhancedDifferentialOptimizer, + ) + + lama_register["AdaptiveQuantumLevyEnhancedDifferentialOptimizer"] = ( + AdaptiveQuantumLevyEnhancedDifferentialOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyEnhancedDifferentialOptimizer", register=True) +except Exception as e: # AdaptiveQuantumLevyEnhancedDifferentialOptimizer print("AdaptiveQuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizer import AdaptiveQuantumLevyMemeticOptimizer +try: # AdaptiveQuantumLevyMemeticOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizer import ( + AdaptiveQuantumLevyMemeticOptimizer, + ) lama_register["AdaptiveQuantumLevyMemeticOptimizer"] = AdaptiveQuantumLevyMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizer").set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizer", register=True) +except Exception as e: # AdaptiveQuantumLevyMemeticOptimizer print("AdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizerV2 import AdaptiveQuantumLevyMemeticOptimizerV2 +try: # AdaptiveQuantumLevyMemeticOptimizerV2 + from nevergrad.optimization.lama.AdaptiveQuantumLevyMemeticOptimizerV2 import ( + AdaptiveQuantumLevyMemeticOptimizerV2, + ) lama_register["AdaptiveQuantumLevyMemeticOptimizerV2"] = AdaptiveQuantumLevyMemeticOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2").set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyMemeticOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumLevyMemeticOptimizerV2", register=True) +except Exception as e: # AdaptiveQuantumLevyMemeticOptimizerV2 print("AdaptiveQuantumLevyMemeticOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevySwarmOptimization import AdaptiveQuantumLevySwarmOptimization +try: # AdaptiveQuantumLevySwarmOptimization + from nevergrad.optimization.lama.AdaptiveQuantumLevySwarmOptimization import ( + AdaptiveQuantumLevySwarmOptimization, + ) lama_register["AdaptiveQuantumLevySwarmOptimization"] = AdaptiveQuantumLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevySwarmOptimization").set_name("LLAMAAdaptiveQuantumLevySwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevySwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumLevySwarmOptimization", register=True) +except Exception as e: # AdaptiveQuantumLevySwarmOptimization print("AdaptiveQuantumLevySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumLevyTreeOptimization import AdaptiveQuantumLevyTreeOptimization +try: # AdaptiveQuantumLevyTreeOptimization + from nevergrad.optimization.lama.AdaptiveQuantumLevyTreeOptimization import ( + AdaptiveQuantumLevyTreeOptimization, + ) lama_register["AdaptiveQuantumLevyTreeOptimization"] = AdaptiveQuantumLevyTreeOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyTreeOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLevyTreeOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyTreeOptimization").set_name("LLAMAAdaptiveQuantumLevyTreeOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLevyTreeOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLevyTreeOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumLevyTreeOptimization" + ).set_name("LLAMAAdaptiveQuantumLevyTreeOptimization", register=True) +except Exception as e: # AdaptiveQuantumLevyTreeOptimization print("AdaptiveQuantumLevyTreeOptimization can not be imported: ", e) -try: +try: # AdaptiveQuantumLocalSearch from nevergrad.optimization.lama.AdaptiveQuantumLocalSearch import AdaptiveQuantumLocalSearch lama_register["AdaptiveQuantumLocalSearch"] = AdaptiveQuantumLocalSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch").set_name("LLAMAAdaptiveQuantumLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAAdaptiveQuantumLocalSearch").set_name( + "LLAMAAdaptiveQuantumLocalSearch", register=True + ) +except Exception as e: # AdaptiveQuantumLocalSearch print("AdaptiveQuantumLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticEvolutionaryOptimizer import AdaptiveQuantumMemeticEvolutionaryOptimizer +try: # AdaptiveQuantumMemeticEvolutionaryOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumMemeticEvolutionaryOptimizer import ( + AdaptiveQuantumMemeticEvolutionaryOptimizer, + ) lama_register["AdaptiveQuantumMemeticEvolutionaryOptimizer"] = AdaptiveQuantumMemeticEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer").set_name("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer" + ).set_name("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer", register=True) +except Exception as e: # AdaptiveQuantumMemeticEvolutionaryOptimizer print("AdaptiveQuantumMemeticEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticGradientBoost import AdaptiveQuantumMemeticGradientBoost +try: # AdaptiveQuantumMemeticGradientBoost + from nevergrad.optimization.lama.AdaptiveQuantumMemeticGradientBoost import ( + AdaptiveQuantumMemeticGradientBoost, + ) lama_register["AdaptiveQuantumMemeticGradientBoost"] = AdaptiveQuantumMemeticGradientBoost - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticGradientBoost = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticGradientBoost").set_name("LLAMAAdaptiveQuantumMemeticGradientBoost", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticGradientBoost = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticGradientBoost" + ).set_name("LLAMAAdaptiveQuantumMemeticGradientBoost", register=True) +except Exception as e: # AdaptiveQuantumMemeticGradientBoost print("AdaptiveQuantumMemeticGradientBoost can not be imported: ", e) -try: +try: # AdaptiveQuantumMemeticOptimizer from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizer import AdaptiveQuantumMemeticOptimizer lama_register["AdaptiveQuantumMemeticOptimizer"] = AdaptiveQuantumMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizer").set_name("LLAMAAdaptiveQuantumMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizer" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizer", register=True) +except Exception as e: # AdaptiveQuantumMemeticOptimizer print("AdaptiveQuantumMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerPlus import AdaptiveQuantumMemeticOptimizerPlus +try: # AdaptiveQuantumMemeticOptimizerPlus + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerPlus import ( + AdaptiveQuantumMemeticOptimizerPlus, + ) lama_register["AdaptiveQuantumMemeticOptimizerPlus"] = AdaptiveQuantumMemeticOptimizerPlus - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticOptimizerPlus = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerPlus").set_name("LLAMAAdaptiveQuantumMemeticOptimizerPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticOptimizerPlus = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerPlus" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerPlus", register=True) +except Exception as e: # AdaptiveQuantumMemeticOptimizerPlus print("AdaptiveQuantumMemeticOptimizerPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV2 import AdaptiveQuantumMemeticOptimizerV2 +try: # AdaptiveQuantumMemeticOptimizerV2 + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV2 import ( + AdaptiveQuantumMemeticOptimizerV2, + ) lama_register["AdaptiveQuantumMemeticOptimizerV2"] = AdaptiveQuantumMemeticOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV2").set_name("LLAMAAdaptiveQuantumMemeticOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV2", register=True) +except Exception as e: # AdaptiveQuantumMemeticOptimizerV2 print("AdaptiveQuantumMemeticOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV3 import AdaptiveQuantumMemeticOptimizerV3 +try: # AdaptiveQuantumMemeticOptimizerV3 + from nevergrad.optimization.lama.AdaptiveQuantumMemeticOptimizerV3 import ( + AdaptiveQuantumMemeticOptimizerV3, + ) lama_register["AdaptiveQuantumMemeticOptimizerV3"] = AdaptiveQuantumMemeticOptimizerV3 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMemeticOptimizerV3 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV3").set_name("LLAMAAdaptiveQuantumMemeticOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMemeticOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMemeticOptimizerV3" + ).set_name("LLAMAAdaptiveQuantumMemeticOptimizerV3", register=True) +except Exception as e: # AdaptiveQuantumMemeticOptimizerV3 print("AdaptiveQuantumMemeticOptimizerV3 can not be imported: ", e) -try: +try: # AdaptiveQuantumMetaheuristic from nevergrad.optimization.lama.AdaptiveQuantumMetaheuristic import AdaptiveQuantumMetaheuristic lama_register["AdaptiveQuantumMetaheuristic"] = AdaptiveQuantumMetaheuristic - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumMetaheuristic = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMetaheuristic").set_name("LLAMAAdaptiveQuantumMetaheuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumMetaheuristic" + ).set_name("LLAMAAdaptiveQuantumMetaheuristic", register=True) +except Exception as e: # AdaptiveQuantumMetaheuristic print("AdaptiveQuantumMetaheuristic can not be imported: ", e) -try: +try: # AdaptiveQuantumPSO from nevergrad.optimization.lama.AdaptiveQuantumPSO import AdaptiveQuantumPSO lama_register["AdaptiveQuantumPSO"] = AdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO").set_name("LLAMAAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSO").set_name( + "LLAMAAdaptiveQuantumPSO", register=True + ) +except Exception as e: # AdaptiveQuantumPSO print("AdaptiveQuantumPSO can not be imported: ", e) -try: +try: # AdaptiveQuantumPSOEnhanced from nevergrad.optimization.lama.AdaptiveQuantumPSOEnhanced import AdaptiveQuantumPSOEnhanced lama_register["AdaptiveQuantumPSOEnhanced"] = AdaptiveQuantumPSOEnhanced - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumPSOEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced").set_name("LLAMAAdaptiveQuantumPSOEnhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumPSOEnhanced = NonObjectOptimizer(method="LLAMAAdaptiveQuantumPSOEnhanced").set_name( + "LLAMAAdaptiveQuantumPSOEnhanced", register=True + ) +except Exception as e: # AdaptiveQuantumPSOEnhanced print("AdaptiveQuantumPSOEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumParticleDifferentialSwarm import AdaptiveQuantumParticleDifferentialSwarm +try: # AdaptiveQuantumParticleDifferentialSwarm + from nevergrad.optimization.lama.AdaptiveQuantumParticleDifferentialSwarm import ( + AdaptiveQuantumParticleDifferentialSwarm, + ) lama_register["AdaptiveQuantumParticleDifferentialSwarm"] = AdaptiveQuantumParticleDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumParticleDifferentialSwarm = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleDifferentialSwarm").set_name("LLAMAAdaptiveQuantumParticleDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumParticleDifferentialSwarm = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumParticleDifferentialSwarm" + ).set_name("LLAMAAdaptiveQuantumParticleDifferentialSwarm", register=True) +except Exception as e: # AdaptiveQuantumParticleDifferentialSwarm print("AdaptiveQuantumParticleDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumParticleSwarmOptimization import AdaptiveQuantumParticleSwarmOptimization +try: # AdaptiveQuantumParticleSwarmOptimization + from nevergrad.optimization.lama.AdaptiveQuantumParticleSwarmOptimization import ( + AdaptiveQuantumParticleSwarmOptimization, + ) lama_register["AdaptiveQuantumParticleSwarmOptimization"] = AdaptiveQuantumParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleSwarmOptimization").set_name("LLAMAAdaptiveQuantumParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumParticleSwarmOptimization" + ).set_name("LLAMAAdaptiveQuantumParticleSwarmOptimization", register=True) +except Exception as e: # AdaptiveQuantumParticleSwarmOptimization print("AdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumResonanceOptimizer import AdaptiveQuantumResonanceOptimizer +try: # AdaptiveQuantumResonanceOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumResonanceOptimizer import ( + AdaptiveQuantumResonanceOptimizer, + ) lama_register["AdaptiveQuantumResonanceOptimizer"] = AdaptiveQuantumResonanceOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumResonanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumResonanceOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumResonanceOptimizer").set_name("LLAMAAdaptiveQuantumResonanceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumResonanceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumResonanceOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumResonanceOptimizer" + ).set_name("LLAMAAdaptiveQuantumResonanceOptimizer", register=True) +except Exception as e: # AdaptiveQuantumResonanceOptimizer print("AdaptiveQuantumResonanceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumStrategicOptimizer import AdaptiveQuantumStrategicOptimizer +try: # AdaptiveQuantumStrategicOptimizer + from nevergrad.optimization.lama.AdaptiveQuantumStrategicOptimizer import ( + AdaptiveQuantumStrategicOptimizer, + ) lama_register["AdaptiveQuantumStrategicOptimizer"] = AdaptiveQuantumStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumStrategicOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveQuantumStrategicOptimizer").set_name("LLAMAAdaptiveQuantumStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumStrategicOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumStrategicOptimizer" + ).set_name("LLAMAAdaptiveQuantumStrategicOptimizer", register=True) +except Exception as e: # AdaptiveQuantumStrategicOptimizer print("AdaptiveQuantumStrategicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizationV2 import AdaptiveQuantumSwarmOptimizationV2 +try: # AdaptiveQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizationV2 import ( + AdaptiveQuantumSwarmOptimizationV2, + ) lama_register["AdaptiveQuantumSwarmOptimizationV2"] = AdaptiveQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAAdaptiveQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: # AdaptiveQuantumSwarmOptimizationV2 print("AdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) -try: +try: # AdaptiveQuantumSwarmOptimizerV2 from nevergrad.optimization.lama.AdaptiveQuantumSwarmOptimizerV2 import AdaptiveQuantumSwarmOptimizerV2 lama_register["AdaptiveQuantumSwarmOptimizerV2"] = AdaptiveQuantumSwarmOptimizerV2 - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizerV2").set_name("LLAMAAdaptiveQuantumSwarmOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSwarmOptimizerV2" + ).set_name("LLAMAAdaptiveQuantumSwarmOptimizerV2", register=True) +except Exception as e: # AdaptiveQuantumSwarmOptimizerV2 print("AdaptiveQuantumSwarmOptimizerV2 can not be imported: ", e) -try: +try: # AdaptiveQuantumSymbioticStrategy from nevergrad.optimization.lama.AdaptiveQuantumSymbioticStrategy import AdaptiveQuantumSymbioticStrategy lama_register["AdaptiveQuantumSymbioticStrategy"] = AdaptiveQuantumSymbioticStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSymbioticStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuantumSymbioticStrategy = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSymbioticStrategy").set_name("LLAMAAdaptiveQuantumSymbioticStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuantumSymbioticStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuantumSymbioticStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveQuantumSymbioticStrategy" + ).set_name("LLAMAAdaptiveQuantumSymbioticStrategy", register=True) +except Exception as e: # AdaptiveQuantumSymbioticStrategy print("AdaptiveQuantumSymbioticStrategy can not be imported: ", e) -try: +try: # AdaptiveQuasiGradientEvolution from nevergrad.optimization.lama.AdaptiveQuasiGradientEvolution import AdaptiveQuasiGradientEvolution lama_register["AdaptiveQuasiGradientEvolution"] = AdaptiveQuasiGradientEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuasiGradientEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuasiGradientEvolution").set_name("LLAMAAdaptiveQuasiGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuasiGradientEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiGradientEvolution" + ).set_name("LLAMAAdaptiveQuasiGradientEvolution", register=True) +except Exception as e: # AdaptiveQuasiGradientEvolution print("AdaptiveQuasiGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuasiRandomEnhancedDifferentialEvolution import AdaptiveQuasiRandomEnhancedDifferentialEvolution - - lama_register["AdaptiveQuasiRandomEnhancedDifferentialEvolution"] = AdaptiveQuasiRandomEnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution").set_name("LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) -except Exception as e: +try: # AdaptiveQuasiRandomEnhancedDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveQuasiRandomEnhancedDifferentialEvolution import ( + AdaptiveQuasiRandomEnhancedDifferentialEvolution, + ) + + lama_register["AdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( + AdaptiveQuasiRandomEnhancedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution" + ).set_name("LLAMAAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) +except Exception as e: # AdaptiveQuasiRandomEnhancedDifferentialEvolution print("AdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) -try: +try: # AdaptiveQuasiRandomGradientDE from nevergrad.optimization.lama.AdaptiveQuasiRandomGradientDE import AdaptiveQuasiRandomGradientDE lama_register["AdaptiveQuasiRandomGradientDE"] = AdaptiveQuasiRandomGradientDE - res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomGradientDE").set_name("LLAMAAdaptiveQuasiRandomGradientDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMAAdaptiveQuasiRandomGradientDE" + ).set_name("LLAMAAdaptiveQuasiRandomGradientDE", register=True) +except Exception as e: # AdaptiveQuasiRandomGradientDE print("AdaptiveQuasiRandomGradientDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveQuorumWithStrategicMutation import AdaptiveQuorumWithStrategicMutation +try: # AdaptiveQuorumWithStrategicMutation + from nevergrad.optimization.lama.AdaptiveQuorumWithStrategicMutation import ( + AdaptiveQuorumWithStrategicMutation, + ) lama_register["AdaptiveQuorumWithStrategicMutation"] = AdaptiveQuorumWithStrategicMutation - res = NonObjectOptimizer(method="LLAMAAdaptiveQuorumWithStrategicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveQuorumWithStrategicMutation = NonObjectOptimizer(method="LLAMAAdaptiveQuorumWithStrategicMutation").set_name("LLAMAAdaptiveQuorumWithStrategicMutation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveQuorumWithStrategicMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveQuorumWithStrategicMutation = NonObjectOptimizer( + method="LLAMAAdaptiveQuorumWithStrategicMutation" + ).set_name("LLAMAAdaptiveQuorumWithStrategicMutation", register=True) +except Exception as e: # AdaptiveQuorumWithStrategicMutation print("AdaptiveQuorumWithStrategicMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveRefinedGradientBoostedAnnealing import AdaptiveRefinedGradientBoostedAnnealing +try: # AdaptiveRefinedGradientBoostedAnnealing + from nevergrad.optimization.lama.AdaptiveRefinedGradientBoostedAnnealing import ( + AdaptiveRefinedGradientBoostedAnnealing, + ) lama_register["AdaptiveRefinedGradientBoostedAnnealing"] = AdaptiveRefinedGradientBoostedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRefinedGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveRefinedGradientBoostedAnnealing").set_name("LLAMAAdaptiveRefinedGradientBoostedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRefinedGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAAdaptiveRefinedGradientBoostedAnnealing" + ).set_name("LLAMAAdaptiveRefinedGradientBoostedAnnealing", register=True) +except Exception as e: # AdaptiveRefinedGradientBoostedAnnealing print("AdaptiveRefinedGradientBoostedAnnealing can not be imported: ", e) -try: +try: # AdaptiveRefinedHybridPSO_DE from nevergrad.optimization.lama.AdaptiveRefinedHybridPSO_DE import AdaptiveRefinedHybridPSO_DE lama_register["AdaptiveRefinedHybridPSO_DE"] = AdaptiveRefinedHybridPSO_DE - res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE").set_name("LLAMAAdaptiveRefinedHybridPSO_DE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMAAdaptiveRefinedHybridPSO_DE").set_name( + "LLAMAAdaptiveRefinedHybridPSO_DE", register=True + ) +except Exception as e: # AdaptiveRefinedHybridPSO_DE print("AdaptiveRefinedHybridPSO_DE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveRefinementEvolutiveStrategy import AdaptiveRefinementEvolutiveStrategy +try: # AdaptiveRefinementEvolutiveStrategy + from nevergrad.optimization.lama.AdaptiveRefinementEvolutiveStrategy import ( + AdaptiveRefinementEvolutiveStrategy, + ) lama_register["AdaptiveRefinementEvolutiveStrategy"] = AdaptiveRefinementEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRefinementEvolutiveStrategy = NonObjectOptimizer(method="LLAMAAdaptiveRefinementEvolutiveStrategy").set_name("LLAMAAdaptiveRefinementEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRefinementEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveRefinementEvolutiveStrategy" + ).set_name("LLAMAAdaptiveRefinementEvolutiveStrategy", register=True) +except Exception as e: # AdaptiveRefinementEvolutiveStrategy print("AdaptiveRefinementEvolutiveStrategy can not be imported: ", e) -try: +try: # AdaptiveRefinementPSO from nevergrad.optimization.lama.AdaptiveRefinementPSO import AdaptiveRefinementPSO lama_register["AdaptiveRefinementPSO"] = AdaptiveRefinementPSO - res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO").set_name("LLAMAAdaptiveRefinementPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAAdaptiveRefinementPSO").set_name( + "LLAMAAdaptiveRefinementPSO", register=True + ) +except Exception as e: # AdaptiveRefinementPSO print("AdaptiveRefinementPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveRefinementSearchStrategyV30 import AdaptiveRefinementSearchStrategyV30 +try: # AdaptiveRefinementSearchStrategyV30 + from nevergrad.optimization.lama.AdaptiveRefinementSearchStrategyV30 import ( + AdaptiveRefinementSearchStrategyV30, + ) lama_register["AdaptiveRefinementSearchStrategyV30"] = AdaptiveRefinementSearchStrategyV30 - res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementSearchStrategyV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRefinementSearchStrategyV30 = NonObjectOptimizer(method="LLAMAAdaptiveRefinementSearchStrategyV30").set_name("LLAMAAdaptiveRefinementSearchStrategyV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRefinementSearchStrategyV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRefinementSearchStrategyV30 = NonObjectOptimizer( + method="LLAMAAdaptiveRefinementSearchStrategyV30" + ).set_name("LLAMAAdaptiveRefinementSearchStrategyV30", register=True) +except Exception as e: # AdaptiveRefinementSearchStrategyV30 print("AdaptiveRefinementSearchStrategyV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveResilientQuantumCrossoverStrategy import AdaptiveResilientQuantumCrossoverStrategy +try: # AdaptiveResilientQuantumCrossoverStrategy + from nevergrad.optimization.lama.AdaptiveResilientQuantumCrossoverStrategy import ( + AdaptiveResilientQuantumCrossoverStrategy, + ) lama_register["AdaptiveResilientQuantumCrossoverStrategy"] = AdaptiveResilientQuantumCrossoverStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveResilientQuantumCrossoverStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveResilientQuantumCrossoverStrategy = NonObjectOptimizer(method="LLAMAAdaptiveResilientQuantumCrossoverStrategy").set_name("LLAMAAdaptiveResilientQuantumCrossoverStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveResilientQuantumCrossoverStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveResilientQuantumCrossoverStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveResilientQuantumCrossoverStrategy" + ).set_name("LLAMAAdaptiveResilientQuantumCrossoverStrategy", register=True) +except Exception as e: # AdaptiveResilientQuantumCrossoverStrategy print("AdaptiveResilientQuantumCrossoverStrategy can not be imported: ", e) -try: +try: # AdaptiveRestartDE from nevergrad.optimization.lama.AdaptiveRestartDE import AdaptiveRestartDE lama_register["AdaptiveRestartDE"] = AdaptiveRestartDE - res = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE").set_name("LLAMAAdaptiveRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAAdaptiveRestartDE").set_name( + "LLAMAAdaptiveRestartDE", register=True + ) +except Exception as e: # AdaptiveRestartDE print("AdaptiveRestartDE can not be imported: ", e) -try: +try: # AdaptiveRestartHybridOptimizer from nevergrad.optimization.lama.AdaptiveRestartHybridOptimizer import AdaptiveRestartHybridOptimizer lama_register["AdaptiveRestartHybridOptimizer"] = AdaptiveRestartHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveRestartHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRestartHybridOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveRestartHybridOptimizer").set_name("LLAMAAdaptiveRestartHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRestartHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRestartHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveRestartHybridOptimizer" + ).set_name("LLAMAAdaptiveRestartHybridOptimizer", register=True) +except Exception as e: # AdaptiveRestartHybridOptimizer print("AdaptiveRestartHybridOptimizer can not be imported: ", e) -try: +try: # AdaptiveRotationalClimbOptimizer from nevergrad.optimization.lama.AdaptiveRotationalClimbOptimizer import AdaptiveRotationalClimbOptimizer lama_register["AdaptiveRotationalClimbOptimizer"] = AdaptiveRotationalClimbOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveRotationalClimbOptimizer").set_name("LLAMAAdaptiveRotationalClimbOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveRotationalClimbOptimizer" + ).set_name("LLAMAAdaptiveRotationalClimbOptimizer", register=True) +except Exception as e: # AdaptiveRotationalClimbOptimizer print("AdaptiveRotationalClimbOptimizer can not be imported: ", e) -try: +try: # AdaptiveSigmaCrossoverEvolution from nevergrad.optimization.lama.AdaptiveSigmaCrossoverEvolution import AdaptiveSigmaCrossoverEvolution lama_register["AdaptiveSigmaCrossoverEvolution"] = AdaptiveSigmaCrossoverEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveSigmaCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSigmaCrossoverEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSigmaCrossoverEvolution").set_name("LLAMAAdaptiveSigmaCrossoverEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSigmaCrossoverEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSigmaCrossoverEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSigmaCrossoverEvolution" + ).set_name("LLAMAAdaptiveSigmaCrossoverEvolution", register=True) +except Exception as e: # AdaptiveSigmaCrossoverEvolution print("AdaptiveSigmaCrossoverEvolution can not be imported: ", e) -try: +try: # AdaptiveSimulatedAnnealing from nevergrad.optimization.lama.AdaptiveSimulatedAnnealing import AdaptiveSimulatedAnnealing lama_register["AdaptiveSimulatedAnnealing"] = AdaptiveSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing").set_name("LLAMAAdaptiveSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealing").set_name( + "LLAMAAdaptiveSimulatedAnnealing", register=True + ) +except Exception as e: # AdaptiveSimulatedAnnealing print("AdaptiveSimulatedAnnealing can not be imported: ", e) -try: +try: # AdaptiveSimulatedAnnealingSearch from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingSearch import AdaptiveSimulatedAnnealingSearch lama_register["AdaptiveSimulatedAnnealingSearch"] = AdaptiveSimulatedAnnealingSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSimulatedAnnealingSearch = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingSearch").set_name("LLAMAAdaptiveSimulatedAnnealingSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSimulatedAnnealingSearch = NonObjectOptimizer( + method="LLAMAAdaptiveSimulatedAnnealingSearch" + ).set_name("LLAMAAdaptiveSimulatedAnnealingSearch", register=True) +except Exception as e: # AdaptiveSimulatedAnnealingSearch print("AdaptiveSimulatedAnnealingSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingWithSmartMemory import AdaptiveSimulatedAnnealingWithSmartMemory +try: # AdaptiveSimulatedAnnealingWithSmartMemory + from nevergrad.optimization.lama.AdaptiveSimulatedAnnealingWithSmartMemory import ( + AdaptiveSimulatedAnnealingWithSmartMemory, + ) lama_register["AdaptiveSimulatedAnnealingWithSmartMemory"] = AdaptiveSimulatedAnnealingWithSmartMemory - res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMAAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: # AdaptiveSimulatedAnnealingWithSmartMemory print("AdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSineCosineDifferentialEvolution import AdaptiveSineCosineDifferentialEvolution +try: # AdaptiveSineCosineDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveSineCosineDifferentialEvolution import ( + AdaptiveSineCosineDifferentialEvolution, + ) lama_register["AdaptiveSineCosineDifferentialEvolution"] = AdaptiveSineCosineDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveSineCosineDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSineCosineDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSineCosineDifferentialEvolution").set_name("LLAMAAdaptiveSineCosineDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSineCosineDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSineCosineDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSineCosineDifferentialEvolution" + ).set_name("LLAMAAdaptiveSineCosineDifferentialEvolution", register=True) +except Exception as e: # AdaptiveSineCosineDifferentialEvolution print("AdaptiveSineCosineDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSinusoidalDifferentialSwarm import AdaptiveSinusoidalDifferentialSwarm +try: # AdaptiveSinusoidalDifferentialSwarm + from nevergrad.optimization.lama.AdaptiveSinusoidalDifferentialSwarm import ( + AdaptiveSinusoidalDifferentialSwarm, + ) lama_register["AdaptiveSinusoidalDifferentialSwarm"] = AdaptiveSinusoidalDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAAdaptiveSinusoidalDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: # AdaptiveSinusoidalDifferentialSwarm print("AdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSpatialExplorationOptimizer import AdaptiveSpatialExplorationOptimizer +try: # AdaptiveSpatialExplorationOptimizer + from nevergrad.optimization.lama.AdaptiveSpatialExplorationOptimizer import ( + AdaptiveSpatialExplorationOptimizer, + ) lama_register["AdaptiveSpatialExplorationOptimizer"] = AdaptiveSpatialExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveSpatialExplorationOptimizer").set_name("LLAMAAdaptiveSpatialExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveSpatialExplorationOptimizer" + ).set_name("LLAMAAdaptiveSpatialExplorationOptimizer", register=True) +except Exception as e: # AdaptiveSpatialExplorationOptimizer print("AdaptiveSpatialExplorationOptimizer can not be imported: ", e) -try: +try: # AdaptiveSpiralGradientSearch from nevergrad.optimization.lama.AdaptiveSpiralGradientSearch import AdaptiveSpiralGradientSearch lama_register["AdaptiveSpiralGradientSearch"] = AdaptiveSpiralGradientSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMAAdaptiveSpiralGradientSearch").set_name("LLAMAAdaptiveSpiralGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMAAdaptiveSpiralGradientSearch" + ).set_name("LLAMAAdaptiveSpiralGradientSearch", register=True) +except Exception as e: # AdaptiveSpiralGradientSearch print("AdaptiveSpiralGradientSearch can not be imported: ", e) -try: +try: # AdaptiveStepSearch from nevergrad.optimization.lama.AdaptiveStepSearch import AdaptiveStepSearch lama_register["AdaptiveStepSearch"] = AdaptiveStepSearch - res = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveStepSearch = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch").set_name("LLAMAAdaptiveStepSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveStepSearch = NonObjectOptimizer(method="LLAMAAdaptiveStepSearch").set_name( + "LLAMAAdaptiveStepSearch", register=True + ) +except Exception as e: # AdaptiveStepSearch print("AdaptiveStepSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveStochasticGradientQuorumOptimization import AdaptiveStochasticGradientQuorumOptimization - - lama_register["AdaptiveStochasticGradientQuorumOptimization"] = AdaptiveStochasticGradientQuorumOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMAAdaptiveStochasticGradientQuorumOptimization").set_name("LLAMAAdaptiveStochasticGradientQuorumOptimization", register=True) -except Exception as e: +try: # AdaptiveStochasticGradientQuorumOptimization + from nevergrad.optimization.lama.AdaptiveStochasticGradientQuorumOptimization import ( + AdaptiveStochasticGradientQuorumOptimization, + ) + + lama_register["AdaptiveStochasticGradientQuorumOptimization"] = ( + AdaptiveStochasticGradientQuorumOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveStochasticGradientQuorumOptimization" + ).set_name("LLAMAAdaptiveStochasticGradientQuorumOptimization", register=True) +except Exception as e: # AdaptiveStochasticGradientQuorumOptimization print("AdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveStochasticHybridEvolution import AdaptiveStochasticHybridEvolution +try: # AdaptiveStochasticHybridEvolution + from nevergrad.optimization.lama.AdaptiveStochasticHybridEvolution import ( + AdaptiveStochasticHybridEvolution, + ) lama_register["AdaptiveStochasticHybridEvolution"] = AdaptiveStochasticHybridEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveStochasticHybridEvolution = NonObjectOptimizer(method="LLAMAAdaptiveStochasticHybridEvolution").set_name("LLAMAAdaptiveStochasticHybridEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveStochasticHybridEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveStochasticHybridEvolution" + ).set_name("LLAMAAdaptiveStochasticHybridEvolution", register=True) +except Exception as e: # AdaptiveStochasticHybridEvolution print("AdaptiveStochasticHybridEvolution can not be imported: ", e) -try: +try: # AdaptiveStochasticTunneling from nevergrad.optimization.lama.AdaptiveStochasticTunneling import AdaptiveStochasticTunneling lama_register["AdaptiveStochasticTunneling"] = AdaptiveStochasticTunneling - res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveStochasticTunneling = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling").set_name("LLAMAAdaptiveStochasticTunneling", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveStochasticTunneling = NonObjectOptimizer(method="LLAMAAdaptiveStochasticTunneling").set_name( + "LLAMAAdaptiveStochasticTunneling", register=True + ) +except Exception as e: # AdaptiveStochasticTunneling print("AdaptiveStochasticTunneling can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveStrategicExplorationOptimizer import AdaptiveStrategicExplorationOptimizer +try: # AdaptiveStrategicExplorationOptimizer + from nevergrad.optimization.lama.AdaptiveStrategicExplorationOptimizer import ( + AdaptiveStrategicExplorationOptimizer, + ) lama_register["AdaptiveStrategicExplorationOptimizer"] = AdaptiveStrategicExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAAdaptiveStrategicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveStrategicExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdaptiveStrategicExplorationOptimizer").set_name("LLAMAAdaptiveStrategicExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveStrategicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveStrategicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdaptiveStrategicExplorationOptimizer" + ).set_name("LLAMAAdaptiveStrategicExplorationOptimizer", register=True) +except Exception as e: # AdaptiveStrategicExplorationOptimizer print("AdaptiveStrategicExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSwarmDifferentialEvolution import AdaptiveSwarmDifferentialEvolution +try: # AdaptiveSwarmDifferentialEvolution + from nevergrad.optimization.lama.AdaptiveSwarmDifferentialEvolution import ( + AdaptiveSwarmDifferentialEvolution, + ) lama_register["AdaptiveSwarmDifferentialEvolution"] = AdaptiveSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdaptiveSwarmDifferentialEvolution").set_name("LLAMAAdaptiveSwarmDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: # AdaptiveSwarmDifferentialEvolution print("AdaptiveSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSwarmGradientOptimization import AdaptiveSwarmGradientOptimization +try: # AdaptiveSwarmGradientOptimization + from nevergrad.optimization.lama.AdaptiveSwarmGradientOptimization import ( + AdaptiveSwarmGradientOptimization, + ) lama_register["AdaptiveSwarmGradientOptimization"] = AdaptiveSwarmGradientOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmGradientOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSwarmGradientOptimization = NonObjectOptimizer(method="LLAMAAdaptiveSwarmGradientOptimization").set_name("LLAMAAdaptiveSwarmGradientOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmGradientOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSwarmGradientOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmGradientOptimization" + ).set_name("LLAMAAdaptiveSwarmGradientOptimization", register=True) +except Exception as e: # AdaptiveSwarmGradientOptimization print("AdaptiveSwarmGradientOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveSwarmHarmonicOptimizationV4 import AdaptiveSwarmHarmonicOptimizationV4 +try: # AdaptiveSwarmHarmonicOptimizationV4 + from nevergrad.optimization.lama.AdaptiveSwarmHarmonicOptimizationV4 import ( + AdaptiveSwarmHarmonicOptimizationV4, + ) lama_register["AdaptiveSwarmHarmonicOptimizationV4"] = AdaptiveSwarmHarmonicOptimizationV4 - res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHarmonicOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSwarmHarmonicOptimizationV4 = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHarmonicOptimizationV4").set_name("LLAMAAdaptiveSwarmHarmonicOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHarmonicOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSwarmHarmonicOptimizationV4 = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmHarmonicOptimizationV4" + ).set_name("LLAMAAdaptiveSwarmHarmonicOptimizationV4", register=True) +except Exception as e: # AdaptiveSwarmHarmonicOptimizationV4 print("AdaptiveSwarmHarmonicOptimizationV4 can not be imported: ", e) -try: +try: # AdaptiveSwarmHybridOptimization from nevergrad.optimization.lama.AdaptiveSwarmHybridOptimization import AdaptiveSwarmHybridOptimization lama_register["AdaptiveSwarmHybridOptimization"] = AdaptiveSwarmHybridOptimization - res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveSwarmHybridOptimization = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHybridOptimization").set_name("LLAMAAdaptiveSwarmHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveSwarmHybridOptimization = NonObjectOptimizer( + method="LLAMAAdaptiveSwarmHybridOptimization" + ).set_name("LLAMAAdaptiveSwarmHybridOptimization", register=True) +except Exception as e: # AdaptiveSwarmHybridOptimization print("AdaptiveSwarmHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdaptiveThresholdDifferentialStrategy import AdaptiveThresholdDifferentialStrategy +try: # AdaptiveThresholdDifferentialStrategy + from nevergrad.optimization.lama.AdaptiveThresholdDifferentialStrategy import ( + AdaptiveThresholdDifferentialStrategy, + ) lama_register["AdaptiveThresholdDifferentialStrategy"] = AdaptiveThresholdDifferentialStrategy - res = NonObjectOptimizer(method="LLAMAAdaptiveThresholdDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdaptiveThresholdDifferentialStrategy = NonObjectOptimizer(method="LLAMAAdaptiveThresholdDifferentialStrategy").set_name("LLAMAAdaptiveThresholdDifferentialStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdaptiveThresholdDifferentialStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdaptiveThresholdDifferentialStrategy = NonObjectOptimizer( + method="LLAMAAdaptiveThresholdDifferentialStrategy" + ).set_name("LLAMAAdaptiveThresholdDifferentialStrategy", register=True) +except Exception as e: # AdaptiveThresholdDifferentialStrategy print("AdaptiveThresholdDifferentialStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveDifferentialEvolution import AdvancedAdaptiveDifferentialEvolution +try: # AdvancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.AdvancedAdaptiveDifferentialEvolution import ( + AdvancedAdaptiveDifferentialEvolution, + ) lama_register["AdvancedAdaptiveDifferentialEvolution"] = AdvancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDifferentialEvolution").set_name("LLAMAAdvancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAAdvancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # AdvancedAdaptiveDifferentialEvolution print("AdvancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveDualPhaseStrategy import AdvancedAdaptiveDualPhaseStrategy +try: # AdvancedAdaptiveDualPhaseStrategy + from nevergrad.optimization.lama.AdvancedAdaptiveDualPhaseStrategy import ( + AdvancedAdaptiveDualPhaseStrategy, + ) lama_register["AdvancedAdaptiveDualPhaseStrategy"] = AdvancedAdaptiveDualPhaseStrategy - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDualPhaseStrategy").set_name("LLAMAAdvancedAdaptiveDualPhaseStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDualPhaseStrategy" + ).set_name("LLAMAAdvancedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: # AdvancedAdaptiveDualPhaseStrategy print("AdvancedAdaptiveDualPhaseStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMemoryStrategyV64 import AdvancedAdaptiveDynamicMemoryStrategyV64 +try: # AdvancedAdaptiveDynamicMemoryStrategyV64 + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMemoryStrategyV64 import ( + AdvancedAdaptiveDynamicMemoryStrategyV64, + ) lama_register["AdvancedAdaptiveDynamicMemoryStrategyV64"] = AdvancedAdaptiveDynamicMemoryStrategyV64 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64").set_name("LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64" + ).set_name("LLAMAAdvancedAdaptiveDynamicMemoryStrategyV64", register=True) +except Exception as e: # AdvancedAdaptiveDynamicMemoryStrategyV64 print("AdvancedAdaptiveDynamicMemoryStrategyV64 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution print("AdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveExplorationExploitationAlgorithm import AdvancedAdaptiveExplorationExploitationAlgorithm - - lama_register["AdvancedAdaptiveExplorationExploitationAlgorithm"] = AdvancedAdaptiveExplorationExploitationAlgorithm - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm", register=True) -except Exception as e: +try: # AdvancedAdaptiveExplorationExploitationAlgorithm + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationExploitationAlgorithm import ( + AdvancedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["AdvancedAdaptiveExplorationExploitationAlgorithm"] = ( + AdvancedAdaptiveExplorationExploitationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: # AdvancedAdaptiveExplorationExploitationAlgorithm print("AdvancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveExplorationOptimizationAlgorithm import AdvancedAdaptiveExplorationOptimizationAlgorithm - - lama_register["AdvancedAdaptiveExplorationOptimizationAlgorithm"] = AdvancedAdaptiveExplorationOptimizationAlgorithm - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm").set_name("LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm", register=True) -except Exception as e: +try: # AdvancedAdaptiveExplorationOptimizationAlgorithm + from nevergrad.optimization.lama.AdvancedAdaptiveExplorationOptimizationAlgorithm import ( + AdvancedAdaptiveExplorationOptimizationAlgorithm, + ) + + lama_register["AdvancedAdaptiveExplorationOptimizationAlgorithm"] = ( + AdvancedAdaptiveExplorationOptimizationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveExplorationOptimizationAlgorithm", register=True) +except Exception as e: # AdvancedAdaptiveExplorationOptimizationAlgorithm print("AdvancedAdaptiveExplorationOptimizationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveFireworkAlgorithm import AdvancedAdaptiveFireworkAlgorithm +try: # AdvancedAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.AdvancedAdaptiveFireworkAlgorithm import ( + AdvancedAdaptiveFireworkAlgorithm, + ) lama_register["AdvancedAdaptiveFireworkAlgorithm"] = AdvancedAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveFireworkAlgorithm").set_name("LLAMAAdvancedAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAAdvancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # AdvancedAdaptiveFireworkAlgorithm print("AdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveGlobalClimbingOptimizerV6 import AdvancedAdaptiveGlobalClimbingOptimizerV6 +try: # AdvancedAdaptiveGlobalClimbingOptimizerV6 + from nevergrad.optimization.lama.AdvancedAdaptiveGlobalClimbingOptimizerV6 import ( + AdvancedAdaptiveGlobalClimbingOptimizerV6, + ) lama_register["AdvancedAdaptiveGlobalClimbingOptimizerV6"] = AdvancedAdaptiveGlobalClimbingOptimizerV6 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6").set_name("LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6" + ).set_name("LLAMAAdvancedAdaptiveGlobalClimbingOptimizerV6", register=True) +except Exception as e: # AdvancedAdaptiveGlobalClimbingOptimizerV6 print("AdvancedAdaptiveGlobalClimbingOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveGradientBoostedMemoryExploration import AdvancedAdaptiveGradientBoostedMemoryExploration - - lama_register["AdvancedAdaptiveGradientBoostedMemoryExploration"] = AdvancedAdaptiveGradientBoostedMemoryExploration - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration").set_name("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration", register=True) -except Exception as e: +try: # AdvancedAdaptiveGradientBoostedMemoryExploration + from nevergrad.optimization.lama.AdvancedAdaptiveGradientBoostedMemoryExploration import ( + AdvancedAdaptiveGradientBoostedMemoryExploration, + ) + + lama_register["AdvancedAdaptiveGradientBoostedMemoryExploration"] = ( + AdvancedAdaptiveGradientBoostedMemoryExploration + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration" + ).set_name("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration", register=True) +except Exception as e: # AdvancedAdaptiveGradientBoostedMemoryExploration print("AdvancedAdaptiveGradientBoostedMemoryExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveGradientHybridOptimizer import AdvancedAdaptiveGradientHybridOptimizer +try: # AdvancedAdaptiveGradientHybridOptimizer + from nevergrad.optimization.lama.AdvancedAdaptiveGradientHybridOptimizer import ( + AdvancedAdaptiveGradientHybridOptimizer, + ) lama_register["AdvancedAdaptiveGradientHybridOptimizer"] = AdvancedAdaptiveGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientHybridOptimizer").set_name("LLAMAAdvancedAdaptiveGradientHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveGradientHybridOptimizer" + ).set_name("LLAMAAdvancedAdaptiveGradientHybridOptimizer", register=True) +except Exception as e: # AdvancedAdaptiveGradientHybridOptimizer print("AdvancedAdaptiveGradientHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV56 import AdvancedAdaptiveMemoryEnhancedStrategyV56 +try: # AdvancedAdaptiveMemoryEnhancedStrategyV56 + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV56 import ( + AdvancedAdaptiveMemoryEnhancedStrategyV56, + ) lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV56"] = AdvancedAdaptiveMemoryEnhancedStrategyV56 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56").set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56" + ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV56", register=True) +except Exception as e: # AdvancedAdaptiveMemoryEnhancedStrategyV56 print("AdvancedAdaptiveMemoryEnhancedStrategyV56 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV73 import AdvancedAdaptiveMemoryEnhancedStrategyV73 +try: # AdvancedAdaptiveMemoryEnhancedStrategyV73 + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryEnhancedStrategyV73 import ( + AdvancedAdaptiveMemoryEnhancedStrategyV73, + ) lama_register["AdvancedAdaptiveMemoryEnhancedStrategyV73"] = AdvancedAdaptiveMemoryEnhancedStrategyV73 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73").set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73" + ).set_name("LLAMAAdvancedAdaptiveMemoryEnhancedStrategyV73", register=True) +except Exception as e: # AdvancedAdaptiveMemoryEnhancedStrategyV73 print("AdvancedAdaptiveMemoryEnhancedStrategyV73 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemoryGuidedStrategyV77 import AdvancedAdaptiveMemoryGuidedStrategyV77 +try: # AdvancedAdaptiveMemoryGuidedStrategyV77 + from nevergrad.optimization.lama.AdvancedAdaptiveMemoryGuidedStrategyV77 import ( + AdvancedAdaptiveMemoryGuidedStrategyV77, + ) lama_register["AdvancedAdaptiveMemoryGuidedStrategyV77"] = AdvancedAdaptiveMemoryGuidedStrategyV77 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77").set_name("LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77" + ).set_name("LLAMAAdvancedAdaptiveMemoryGuidedStrategyV77", register=True) +except Exception as e: # AdvancedAdaptiveMemoryGuidedStrategyV77 print("AdvancedAdaptiveMemoryGuidedStrategyV77 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveMemorySimulatedAnnealing import AdvancedAdaptiveMemorySimulatedAnnealing +try: # AdvancedAdaptiveMemorySimulatedAnnealing + from nevergrad.optimization.lama.AdvancedAdaptiveMemorySimulatedAnnealing import ( + AdvancedAdaptiveMemorySimulatedAnnealing, + ) lama_register["AdvancedAdaptiveMemorySimulatedAnnealing"] = AdvancedAdaptiveMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing").set_name("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing", register=True) +except Exception as e: # AdvancedAdaptiveMemorySimulatedAnnealing print("AdvancedAdaptiveMemorySimulatedAnnealing can not be imported: ", e) -try: +try: # AdvancedAdaptivePSO from nevergrad.optimization.lama.AdvancedAdaptivePSO import AdvancedAdaptivePSO lama_register["AdvancedAdaptivePSO"] = AdvancedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptivePSO = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO").set_name("LLAMAAdvancedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptivePSO = NonObjectOptimizer(method="LLAMAAdvancedAdaptivePSO").set_name( + "LLAMAAdvancedAdaptivePSO", register=True + ) +except Exception as e: # AdvancedAdaptivePSO print("AdvancedAdaptivePSO can not be imported: ", e) -try: +try: # AdvancedAdaptiveQuantumEntropyDE from nevergrad.optimization.lama.AdvancedAdaptiveQuantumEntropyDE import AdvancedAdaptiveQuantumEntropyDE lama_register["AdvancedAdaptiveQuantumEntropyDE"] = AdvancedAdaptiveQuantumEntropyDE - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumEntropyDE").set_name("LLAMAAdvancedAdaptiveQuantumEntropyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumEntropyDE" + ).set_name("LLAMAAdvancedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: # AdvancedAdaptiveQuantumEntropyDE print("AdvancedAdaptiveQuantumEntropyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumLevyOptimizer import AdvancedAdaptiveQuantumLevyOptimizer +try: # AdvancedAdaptiveQuantumLevyOptimizer + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumLevyOptimizer import ( + AdvancedAdaptiveQuantumLevyOptimizer, + ) lama_register["AdvancedAdaptiveQuantumLevyOptimizer"] = AdvancedAdaptiveQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer").set_name("LLAMAAdvancedAdaptiveQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAAdvancedAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: # AdvancedAdaptiveQuantumLevyOptimizer print("AdvancedAdaptiveQuantumLevyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV1 import AdvancedAdaptiveQuantumSwarmOptimizationV1 +try: # AdvancedAdaptiveQuantumSwarmOptimizationV1 + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV1 import ( + AdvancedAdaptiveQuantumSwarmOptimizationV1, + ) lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV1"] = AdvancedAdaptiveQuantumSwarmOptimizationV1 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1").set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1" + ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV1", register=True) +except Exception as e: # AdvancedAdaptiveQuantumSwarmOptimizationV1 print("AdvancedAdaptiveQuantumSwarmOptimizationV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV2 import AdvancedAdaptiveQuantumSwarmOptimizationV2 +try: # AdvancedAdaptiveQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.AdvancedAdaptiveQuantumSwarmOptimizationV2 import ( + AdvancedAdaptiveQuantumSwarmOptimizationV2, + ) lama_register["AdvancedAdaptiveQuantumSwarmOptimizationV2"] = AdvancedAdaptiveQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAAdvancedAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: # AdvancedAdaptiveQuantumSwarmOptimizationV2 print("AdvancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAdaptiveStrategyOptimizer import AdvancedAdaptiveStrategyOptimizer +try: # AdvancedAdaptiveStrategyOptimizer + from nevergrad.optimization.lama.AdvancedAdaptiveStrategyOptimizer import ( + AdvancedAdaptiveStrategyOptimizer, + ) lama_register["AdvancedAdaptiveStrategyOptimizer"] = AdvancedAdaptiveStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAdaptiveStrategyOptimizer = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveStrategyOptimizer").set_name("LLAMAAdvancedAdaptiveStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAdaptiveStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAdaptiveStrategyOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedAdaptiveStrategyOptimizer" + ).set_name("LLAMAAdvancedAdaptiveStrategyOptimizer", register=True) +except Exception as e: # AdvancedAdaptiveStrategyOptimizer print("AdvancedAdaptiveStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedAttenuatedAdaptiveEvolver import AdvancedAttenuatedAdaptiveEvolver +try: # AdvancedAttenuatedAdaptiveEvolver + from nevergrad.optimization.lama.AdvancedAttenuatedAdaptiveEvolver import ( + AdvancedAttenuatedAdaptiveEvolver, + ) lama_register["AdvancedAttenuatedAdaptiveEvolver"] = AdvancedAttenuatedAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAAdvancedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAdvancedAttenuatedAdaptiveEvolver").set_name("LLAMAAdvancedAttenuatedAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedAttenuatedAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAAdvancedAttenuatedAdaptiveEvolver" + ).set_name("LLAMAAdvancedAttenuatedAdaptiveEvolver", register=True) +except Exception as e: # AdvancedAttenuatedAdaptiveEvolver print("AdvancedAttenuatedAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedBalancedAdaptiveElitistStrategyV2 import AdvancedBalancedAdaptiveElitistStrategyV2 +try: # AdvancedBalancedAdaptiveElitistStrategyV2 + from nevergrad.optimization.lama.AdvancedBalancedAdaptiveElitistStrategyV2 import ( + AdvancedBalancedAdaptiveElitistStrategyV2, + ) lama_register["AdvancedBalancedAdaptiveElitistStrategyV2"] = AdvancedBalancedAdaptiveElitistStrategyV2 - res = NonObjectOptimizer(method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedBalancedAdaptiveElitistStrategyV2 = NonObjectOptimizer(method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2").set_name("LLAMAAdvancedBalancedAdaptiveElitistStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedBalancedAdaptiveElitistStrategyV2 = NonObjectOptimizer( + method="LLAMAAdvancedBalancedAdaptiveElitistStrategyV2" + ).set_name("LLAMAAdvancedBalancedAdaptiveElitistStrategyV2", register=True) +except Exception as e: # AdvancedBalancedAdaptiveElitistStrategyV2 print("AdvancedBalancedAdaptiveElitistStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedBalancedExplorationOptimizer import AdvancedBalancedExplorationOptimizer +try: # AdvancedBalancedExplorationOptimizer + from nevergrad.optimization.lama.AdvancedBalancedExplorationOptimizer import ( + AdvancedBalancedExplorationOptimizer, + ) lama_register["AdvancedBalancedExplorationOptimizer"] = AdvancedBalancedExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedBalancedExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedBalancedExplorationOptimizer").set_name("LLAMAAdvancedBalancedExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedBalancedExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedBalancedExplorationOptimizer" + ).set_name("LLAMAAdvancedBalancedExplorationOptimizer", register=True) +except Exception as e: # AdvancedBalancedExplorationOptimizer print("AdvancedBalancedExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRate import AdvancedDifferentialEvolutionWithAdaptiveLearningRate - - lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRate"] = AdvancedDifferentialEvolutionWithAdaptiveLearningRate - res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate").set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate", register=True) -except Exception as e: +try: # AdvancedDifferentialEvolutionWithAdaptiveLearningRate + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRate import ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRate, + ) + + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRate"] = ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRate + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate" + ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRate", register=True) +except Exception as e: # AdvancedDifferentialEvolutionWithAdaptiveLearningRate print("AdvancedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 import AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 - - lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2"] = AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 - res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2").set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2", register=True) -except Exception as e: +try: # AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 + from nevergrad.optimization.lama.AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 import ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2, + ) + + lama_register["AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2"] = ( + AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2" + ).set_name("LLAMAAdvancedDifferentialEvolutionWithAdaptiveLearningRateV2", register=True) +except Exception as e: # AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 print("AdvancedDifferentialEvolutionWithAdaptiveLearningRateV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDifferentialParticleSwarmOptimization import AdvancedDifferentialParticleSwarmOptimization - - lama_register["AdvancedDifferentialParticleSwarmOptimization"] = AdvancedDifferentialParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAAdvancedDifferentialParticleSwarmOptimization").set_name("LLAMAAdvancedDifferentialParticleSwarmOptimization", register=True) -except Exception as e: +try: # AdvancedDifferentialParticleSwarmOptimization + from nevergrad.optimization.lama.AdvancedDifferentialParticleSwarmOptimization import ( + AdvancedDifferentialParticleSwarmOptimization, + ) + + lama_register["AdvancedDifferentialParticleSwarmOptimization"] = ( + AdvancedDifferentialParticleSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdvancedDifferentialParticleSwarmOptimization" + ).set_name("LLAMAAdvancedDifferentialParticleSwarmOptimization", register=True) +except Exception as e: # AdvancedDifferentialParticleSwarmOptimization print("AdvancedDifferentialParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDimensionalCyclicCrossoverEvolver import AdvancedDimensionalCyclicCrossoverEvolver +try: # AdvancedDimensionalCyclicCrossoverEvolver + from nevergrad.optimization.lama.AdvancedDimensionalCyclicCrossoverEvolver import ( + AdvancedDimensionalCyclicCrossoverEvolver, + ) lama_register["AdvancedDimensionalCyclicCrossoverEvolver"] = AdvancedDimensionalCyclicCrossoverEvolver - res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer(method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver").set_name("LLAMAAdvancedDimensionalCyclicCrossoverEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( + method="LLAMAAdvancedDimensionalCyclicCrossoverEvolver" + ).set_name("LLAMAAdvancedDimensionalCyclicCrossoverEvolver", register=True) +except Exception as e: # AdvancedDimensionalCyclicCrossoverEvolver print("AdvancedDimensionalCyclicCrossoverEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDimensionalFeedbackEvolver import AdvancedDimensionalFeedbackEvolver +try: # AdvancedDimensionalFeedbackEvolver + from nevergrad.optimization.lama.AdvancedDimensionalFeedbackEvolver import ( + AdvancedDimensionalFeedbackEvolver, + ) lama_register["AdvancedDimensionalFeedbackEvolver"] = AdvancedDimensionalFeedbackEvolver - res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalFeedbackEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDimensionalFeedbackEvolver = NonObjectOptimizer(method="LLAMAAdvancedDimensionalFeedbackEvolver").set_name("LLAMAAdvancedDimensionalFeedbackEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDimensionalFeedbackEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDimensionalFeedbackEvolver = NonObjectOptimizer( + method="LLAMAAdvancedDimensionalFeedbackEvolver" + ).set_name("LLAMAAdvancedDimensionalFeedbackEvolver", register=True) +except Exception as e: # AdvancedDimensionalFeedbackEvolver print("AdvancedDimensionalFeedbackEvolver can not be imported: ", e) -try: +try: # AdvancedDiversityAdaptiveDE from nevergrad.optimization.lama.AdvancedDiversityAdaptiveDE import AdvancedDiversityAdaptiveDE lama_register["AdvancedDiversityAdaptiveDE"] = AdvancedDiversityAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE").set_name("LLAMAAdvancedDiversityAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityAdaptiveDE").set_name( + "LLAMAAdvancedDiversityAdaptiveDE", register=True + ) +except Exception as e: # AdvancedDiversityAdaptiveDE print("AdvancedDiversityAdaptiveDE can not be imported: ", e) -try: +try: # AdvancedDiversityDE from nevergrad.optimization.lama.AdvancedDiversityDE import AdvancedDiversityDE lama_register["AdvancedDiversityDE"] = AdvancedDiversityDE - res = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDiversityDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE").set_name("LLAMAAdvancedDiversityDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDiversityDE = NonObjectOptimizer(method="LLAMAAdvancedDiversityDE").set_name( + "LLAMAAdvancedDiversityDE", register=True + ) +except Exception as e: # AdvancedDiversityDE print("AdvancedDiversityDE can not be imported: ", e) -try: +try: # AdvancedDualStrategyAdaptiveDE from nevergrad.optimization.lama.AdvancedDualStrategyAdaptiveDE import AdvancedDualStrategyAdaptiveDE lama_register["AdvancedDualStrategyAdaptiveDE"] = AdvancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyAdaptiveDE").set_name("LLAMAAdvancedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedDualStrategyAdaptiveDE" + ).set_name("LLAMAAdvancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # AdvancedDualStrategyAdaptiveDE print("AdvancedDualStrategyAdaptiveDE can not be imported: ", e) -try: +try: # AdvancedDualStrategyHybridDE from nevergrad.optimization.lama.AdvancedDualStrategyHybridDE import AdvancedDualStrategyHybridDE lama_register["AdvancedDualStrategyHybridDE"] = AdvancedDualStrategyHybridDE - res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDualStrategyHybridDE = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyHybridDE").set_name("LLAMAAdvancedDualStrategyHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDualStrategyHybridDE = NonObjectOptimizer( + method="LLAMAAdvancedDualStrategyHybridDE" + ).set_name("LLAMAAdvancedDualStrategyHybridDE", register=True) +except Exception as e: # AdvancedDualStrategyHybridDE print("AdvancedDualStrategyHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - - lama_register["AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) -except Exception as e: +try: # AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMAAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory print("AdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridOptimizer import AdvancedDynamicAdaptiveHybridOptimizer +try: # AdvancedDynamicAdaptiveHybridOptimizer + from nevergrad.optimization.lama.AdvancedDynamicAdaptiveHybridOptimizer import ( + AdvancedDynamicAdaptiveHybridOptimizer, + ) lama_register["AdvancedDynamicAdaptiveHybridOptimizer"] = AdvancedDynamicAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer").set_name("LLAMAAdvancedDynamicAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAAdvancedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: # AdvancedDynamicAdaptiveHybridOptimizer print("AdvancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) -try: +try: # AdvancedDynamicCrowdedDE from nevergrad.optimization.lama.AdvancedDynamicCrowdedDE import AdvancedDynamicCrowdedDE lama_register["AdvancedDynamicCrowdedDE"] = AdvancedDynamicCrowdedDE - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicCrowdedDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE").set_name("LLAMAAdvancedDynamicCrowdedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicCrowdedDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicCrowdedDE").set_name( + "LLAMAAdvancedDynamicCrowdedDE", register=True + ) +except Exception as e: # AdvancedDynamicCrowdedDE print("AdvancedDynamicCrowdedDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicDualPhaseStrategyV37 import AdvancedDynamicDualPhaseStrategyV37 +try: # AdvancedDynamicDualPhaseStrategyV37 + from nevergrad.optimization.lama.AdvancedDynamicDualPhaseStrategyV37 import ( + AdvancedDynamicDualPhaseStrategyV37, + ) lama_register["AdvancedDynamicDualPhaseStrategyV37"] = AdvancedDynamicDualPhaseStrategyV37 - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicDualPhaseStrategyV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicDualPhaseStrategyV37 = NonObjectOptimizer(method="LLAMAAdvancedDynamicDualPhaseStrategyV37").set_name("LLAMAAdvancedDynamicDualPhaseStrategyV37", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicDualPhaseStrategyV37")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicDualPhaseStrategyV37 = NonObjectOptimizer( + method="LLAMAAdvancedDynamicDualPhaseStrategyV37" + ).set_name("LLAMAAdvancedDynamicDualPhaseStrategyV37", register=True) +except Exception as e: # AdvancedDynamicDualPhaseStrategyV37 print("AdvancedDynamicDualPhaseStrategyV37 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicExplorationOptimizer import AdvancedDynamicExplorationOptimizer +try: # AdvancedDynamicExplorationOptimizer + from nevergrad.optimization.lama.AdvancedDynamicExplorationOptimizer import ( + AdvancedDynamicExplorationOptimizer, + ) lama_register["AdvancedDynamicExplorationOptimizer"] = AdvancedDynamicExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicExplorationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicExplorationOptimizer").set_name("LLAMAAdvancedDynamicExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicExplorationOptimizer" + ).set_name("LLAMAAdvancedDynamicExplorationOptimizer", register=True) +except Exception as e: # AdvancedDynamicExplorationOptimizer print("AdvancedDynamicExplorationOptimizer can not be imported: ", e) -try: +try: # AdvancedDynamicFireworkAlgorithm from nevergrad.optimization.lama.AdvancedDynamicFireworkAlgorithm import AdvancedDynamicFireworkAlgorithm lama_register["AdvancedDynamicFireworkAlgorithm"] = AdvancedDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedDynamicFireworkAlgorithm").set_name("LLAMAAdvancedDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedDynamicFireworkAlgorithm" + ).set_name("LLAMAAdvancedDynamicFireworkAlgorithm", register=True) +except Exception as e: # AdvancedDynamicFireworkAlgorithm print("AdvancedDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicGradientBoostedMemorySimulatedAnnealing import AdvancedDynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["AdvancedDynamicGradientBoostedMemorySimulatedAnnealing"] = AdvancedDynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # AdvancedDynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.AdvancedDynamicGradientBoostedMemorySimulatedAnnealing import ( + AdvancedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdvancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + AdvancedDynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # AdvancedDynamicGradientBoostedMemorySimulatedAnnealing print("AdvancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicHybridOptimization import AdvancedDynamicHybridOptimization +try: # AdvancedDynamicHybridOptimization + from nevergrad.optimization.lama.AdvancedDynamicHybridOptimization import ( + AdvancedDynamicHybridOptimization, + ) lama_register["AdvancedDynamicHybridOptimization"] = AdvancedDynamicHybridOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimization").set_name("LLAMAAdvancedDynamicHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicHybridOptimization = NonObjectOptimizer( + method="LLAMAAdvancedDynamicHybridOptimization" + ).set_name("LLAMAAdvancedDynamicHybridOptimization", register=True) +except Exception as e: # AdvancedDynamicHybridOptimization print("AdvancedDynamicHybridOptimization can not be imported: ", e) -try: +try: # AdvancedDynamicHybridOptimizer from nevergrad.optimization.lama.AdvancedDynamicHybridOptimizer import AdvancedDynamicHybridOptimizer lama_register["AdvancedDynamicHybridOptimizer"] = AdvancedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimizer").set_name("LLAMAAdvancedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedDynamicHybridOptimizer" + ).set_name("LLAMAAdvancedDynamicHybridOptimizer", register=True) +except Exception as e: # AdvancedDynamicHybridOptimizer print("AdvancedDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicMultimodalSimulatedAnnealing import AdvancedDynamicMultimodalSimulatedAnnealing +try: # AdvancedDynamicMultimodalSimulatedAnnealing + from nevergrad.optimization.lama.AdvancedDynamicMultimodalSimulatedAnnealing import ( + AdvancedDynamicMultimodalSimulatedAnnealing, + ) lama_register["AdvancedDynamicMultimodalSimulatedAnnealing"] = AdvancedDynamicMultimodalSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicMultimodalSimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing").set_name("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicMultimodalSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedDynamicMultimodalSimulatedAnnealing" + ).set_name("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing", register=True) +except Exception as e: # AdvancedDynamicMultimodalSimulatedAnnealing print("AdvancedDynamicMultimodalSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedDynamicStrategyAdaptiveDE import AdvancedDynamicStrategyAdaptiveDE +try: # AdvancedDynamicStrategyAdaptiveDE + from nevergrad.optimization.lama.AdvancedDynamicStrategyAdaptiveDE import ( + AdvancedDynamicStrategyAdaptiveDE, + ) lama_register["AdvancedDynamicStrategyAdaptiveDE"] = AdvancedDynamicStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedDynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedDynamicStrategyAdaptiveDE").set_name("LLAMAAdvancedDynamicStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedDynamicStrategyAdaptiveDE" + ).set_name("LLAMAAdvancedDynamicStrategyAdaptiveDE", register=True) +except Exception as e: # AdvancedDynamicStrategyAdaptiveDE print("AdvancedDynamicStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEliteAdaptiveCrowdingHybridOptimizer import AdvancedEliteAdaptiveCrowdingHybridOptimizer - - lama_register["AdvancedEliteAdaptiveCrowdingHybridOptimizer"] = AdvancedEliteAdaptiveCrowdingHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer").set_name("LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer", register=True) -except Exception as e: +try: # AdvancedEliteAdaptiveCrowdingHybridOptimizer + from nevergrad.optimization.lama.AdvancedEliteAdaptiveCrowdingHybridOptimizer import ( + AdvancedEliteAdaptiveCrowdingHybridOptimizer, + ) + + lama_register["AdvancedEliteAdaptiveCrowdingHybridOptimizer"] = ( + AdvancedEliteAdaptiveCrowdingHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer" + ).set_name("LLAMAAdvancedEliteAdaptiveCrowdingHybridOptimizer", register=True) +except Exception as e: # AdvancedEliteAdaptiveCrowdingHybridOptimizer print("AdvancedEliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEliteDynamicHybridOptimizer import AdvancedEliteDynamicHybridOptimizer +try: # AdvancedEliteDynamicHybridOptimizer + from nevergrad.optimization.lama.AdvancedEliteDynamicHybridOptimizer import ( + AdvancedEliteDynamicHybridOptimizer, + ) lama_register["AdvancedEliteDynamicHybridOptimizer"] = AdvancedEliteDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEliteDynamicHybridOptimizer").set_name("LLAMAAdvancedEliteDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEliteDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEliteDynamicHybridOptimizer" + ).set_name("LLAMAAdvancedEliteDynamicHybridOptimizer", register=True) +except Exception as e: # AdvancedEliteDynamicHybridOptimizer print("AdvancedEliteDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveFireworkAlgorithm import AdvancedEnhancedAdaptiveFireworkAlgorithm +try: # AdvancedEnhancedAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveFireworkAlgorithm import ( + AdvancedEnhancedAdaptiveFireworkAlgorithm, + ) lama_register["AdvancedEnhancedAdaptiveFireworkAlgorithm"] = AdvancedEnhancedAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm").set_name("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # AdvancedEnhancedAdaptiveFireworkAlgorithm print("AdvancedEnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveMetaNetAQAPSO import AdvancedEnhancedAdaptiveMetaNetAQAPSO +try: # AdvancedEnhancedAdaptiveMetaNetAQAPSO + from nevergrad.optimization.lama.AdvancedEnhancedAdaptiveMetaNetAQAPSO import ( + AdvancedEnhancedAdaptiveMetaNetAQAPSO, + ) lama_register["AdvancedEnhancedAdaptiveMetaNetAQAPSO"] = AdvancedEnhancedAdaptiveMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAAdvancedEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: # AdvancedEnhancedAdaptiveMetaNetAQAPSO print("AdvancedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 import AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 - - lama_register["AdvancedEnhancedDifferentialEvolutionLocalSearch_v55"] = AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55").set_name("LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55", register=True) -except Exception as e: +try: # AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 + from nevergrad.optimization.lama.AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 import ( + AdvancedEnhancedDifferentialEvolutionLocalSearch_v55, + ) + + lama_register["AdvancedEnhancedDifferentialEvolutionLocalSearch_v55"] = ( + AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55" + ).set_name("LLAMAAdvancedEnhancedDifferentialEvolutionLocalSearch_v55", register=True) +except Exception as e: # AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 print("AdvancedEnhancedDifferentialEvolutionLocalSearch_v55 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedEnhancedGuidedMassQGSA_v69 import AdvancedEnhancedEnhancedGuidedMassQGSA_v69 +try: # AdvancedEnhancedEnhancedGuidedMassQGSA_v69 + from nevergrad.optimization.lama.AdvancedEnhancedEnhancedGuidedMassQGSA_v69 import ( + AdvancedEnhancedEnhancedGuidedMassQGSA_v69, + ) lama_register["AdvancedEnhancedEnhancedGuidedMassQGSA_v69"] = AdvancedEnhancedEnhancedGuidedMassQGSA_v69 - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69").set_name("LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69" + ).set_name("LLAMAAdvancedEnhancedEnhancedGuidedMassQGSA_v69", register=True) +except Exception as e: # AdvancedEnhancedEnhancedGuidedMassQGSA_v69 print("AdvancedEnhancedEnhancedGuidedMassQGSA_v69 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedGuidedMassQGSA_v65 import AdvancedEnhancedGuidedMassQGSA_v65 +try: # AdvancedEnhancedGuidedMassQGSA_v65 + from nevergrad.optimization.lama.AdvancedEnhancedGuidedMassQGSA_v65 import ( + AdvancedEnhancedGuidedMassQGSA_v65, + ) lama_register["AdvancedEnhancedGuidedMassQGSA_v65"] = AdvancedEnhancedGuidedMassQGSA_v65 - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedGuidedMassQGSA_v65 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65").set_name("LLAMAAdvancedEnhancedGuidedMassQGSA_v65", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedGuidedMassQGSA_v65 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedGuidedMassQGSA_v65" + ).set_name("LLAMAAdvancedEnhancedGuidedMassQGSA_v65", register=True) +except Exception as e: # AdvancedEnhancedGuidedMassQGSA_v65 print("AdvancedEnhancedGuidedMassQGSA_v65 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizer import AdvancedEnhancedHybridMetaHeuristicOptimizer - - lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizer"] = AdvancedEnhancedHybridMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer", register=True) -except Exception as e: +try: # AdvancedEnhancedHybridMetaHeuristicOptimizer + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizer import ( + AdvancedEnhancedHybridMetaHeuristicOptimizer, + ) + + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizer"] = ( + AdvancedEnhancedHybridMetaHeuristicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: # AdvancedEnhancedHybridMetaHeuristicOptimizer print("AdvancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizerV16 import AdvancedEnhancedHybridMetaHeuristicOptimizerV16 - - lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizerV16"] = AdvancedEnhancedHybridMetaHeuristicOptimizerV16 - res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16 = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16").set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16", register=True) -except Exception as e: +try: # AdvancedEnhancedHybridMetaHeuristicOptimizerV16 + from nevergrad.optimization.lama.AdvancedEnhancedHybridMetaHeuristicOptimizerV16 import ( + AdvancedEnhancedHybridMetaHeuristicOptimizerV16, + ) + + lama_register["AdvancedEnhancedHybridMetaHeuristicOptimizerV16"] = ( + AdvancedEnhancedHybridMetaHeuristicOptimizerV16 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16 = NonObjectOptimizer( + method="LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16" + ).set_name("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16", register=True) +except Exception as e: # AdvancedEnhancedHybridMetaHeuristicOptimizerV16 print("AdvancedEnhancedHybridMetaHeuristicOptimizerV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedExplorativeConvergenceEnhancer import AdvancedExplorativeConvergenceEnhancer +try: # AdvancedExplorativeConvergenceEnhancer + from nevergrad.optimization.lama.AdvancedExplorativeConvergenceEnhancer import ( + AdvancedExplorativeConvergenceEnhancer, + ) lama_register["AdvancedExplorativeConvergenceEnhancer"] = AdvancedExplorativeConvergenceEnhancer - res = NonObjectOptimizer(method="LLAMAAdvancedExplorativeConvergenceEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedExplorativeConvergenceEnhancer = NonObjectOptimizer(method="LLAMAAdvancedExplorativeConvergenceEnhancer").set_name("LLAMAAdvancedExplorativeConvergenceEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedExplorativeConvergenceEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedExplorativeConvergenceEnhancer = NonObjectOptimizer( + method="LLAMAAdvancedExplorativeConvergenceEnhancer" + ).set_name("LLAMAAdvancedExplorativeConvergenceEnhancer", register=True) +except Exception as e: # AdvancedExplorativeConvergenceEnhancer print("AdvancedExplorativeConvergenceEnhancer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedFireworkAlgorithmWithAdaptiveMutation import AdvancedFireworkAlgorithmWithAdaptiveMutation - - lama_register["AdvancedFireworkAlgorithmWithAdaptiveMutation"] = AdvancedFireworkAlgorithmWithAdaptiveMutation - res = NonObjectOptimizer(method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer(method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation").set_name("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation", register=True) -except Exception as e: +try: # AdvancedFireworkAlgorithmWithAdaptiveMutation + from nevergrad.optimization.lama.AdvancedFireworkAlgorithmWithAdaptiveMutation import ( + AdvancedFireworkAlgorithmWithAdaptiveMutation, + ) + + lama_register["AdvancedFireworkAlgorithmWithAdaptiveMutation"] = ( + AdvancedFireworkAlgorithmWithAdaptiveMutation + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( + method="LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation" + ).set_name("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation", register=True) +except Exception as e: # AdvancedFireworkAlgorithmWithAdaptiveMutation print("AdvancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) -try: +try: # AdvancedFocusedAdaptiveOptimizer from nevergrad.optimization.lama.AdvancedFocusedAdaptiveOptimizer import AdvancedFocusedAdaptiveOptimizer lama_register["AdvancedFocusedAdaptiveOptimizer"] = AdvancedFocusedAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedFocusedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAAdvancedFocusedAdaptiveOptimizer").set_name("LLAMAAdvancedFocusedAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedFocusedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedFocusedAdaptiveOptimizer" + ).set_name("LLAMAAdvancedFocusedAdaptiveOptimizer", register=True) +except Exception as e: # AdvancedFocusedAdaptiveOptimizer print("AdvancedFocusedAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedGlobalClimbingOptimizerV4 import AdvancedGlobalClimbingOptimizerV4 +try: # AdvancedGlobalClimbingOptimizerV4 + from nevergrad.optimization.lama.AdvancedGlobalClimbingOptimizerV4 import ( + AdvancedGlobalClimbingOptimizerV4, + ) lama_register["AdvancedGlobalClimbingOptimizerV4"] = AdvancedGlobalClimbingOptimizerV4 - res = NonObjectOptimizer(method="LLAMAAdvancedGlobalClimbingOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedGlobalClimbingOptimizerV4 = NonObjectOptimizer(method="LLAMAAdvancedGlobalClimbingOptimizerV4").set_name("LLAMAAdvancedGlobalClimbingOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedGlobalClimbingOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedGlobalClimbingOptimizerV4 = NonObjectOptimizer( + method="LLAMAAdvancedGlobalClimbingOptimizerV4" + ).set_name("LLAMAAdvancedGlobalClimbingOptimizerV4", register=True) +except Exception as e: # AdvancedGlobalClimbingOptimizerV4 print("AdvancedGlobalClimbingOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedGlobalStructureAwareOptimizerV3 import AdvancedGlobalStructureAwareOptimizerV3 +try: # AdvancedGlobalStructureAwareOptimizerV3 + from nevergrad.optimization.lama.AdvancedGlobalStructureAwareOptimizerV3 import ( + AdvancedGlobalStructureAwareOptimizerV3, + ) lama_register["AdvancedGlobalStructureAwareOptimizerV3"] = AdvancedGlobalStructureAwareOptimizerV3 - res = NonObjectOptimizer(method="LLAMAAdvancedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer(method="LLAMAAdvancedGlobalStructureAwareOptimizerV3").set_name("LLAMAAdvancedGlobalStructureAwareOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( + method="LLAMAAdvancedGlobalStructureAwareOptimizerV3" + ).set_name("LLAMAAdvancedGlobalStructureAwareOptimizerV3", register=True) +except Exception as e: # AdvancedGlobalStructureAwareOptimizerV3 print("AdvancedGlobalStructureAwareOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration import AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration - - lama_register["AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration"] = AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration - res = NonObjectOptimizer(method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration = NonObjectOptimizer(method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration").set_name("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration", register=True) -except Exception as e: +try: # AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration + from nevergrad.optimization.lama.AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration import ( + AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration, + ) + + lama_register["AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration"] = ( + AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration = NonObjectOptimizer( + method="LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration" + ).set_name("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration", register=True) +except Exception as e: # AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration print("AdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategy import AdvancedGradientEvolutionStrategy +try: # AdvancedGradientEvolutionStrategy + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategy import ( + AdvancedGradientEvolutionStrategy, + ) lama_register["AdvancedGradientEvolutionStrategy"] = AdvancedGradientEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedGradientEvolutionStrategy = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategy").set_name("LLAMAAdvancedGradientEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedGradientEvolutionStrategy = NonObjectOptimizer( + method="LLAMAAdvancedGradientEvolutionStrategy" + ).set_name("LLAMAAdvancedGradientEvolutionStrategy", register=True) +except Exception as e: # AdvancedGradientEvolutionStrategy print("AdvancedGradientEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategyV2 import AdvancedGradientEvolutionStrategyV2 +try: # AdvancedGradientEvolutionStrategyV2 + from nevergrad.optimization.lama.AdvancedGradientEvolutionStrategyV2 import ( + AdvancedGradientEvolutionStrategyV2, + ) lama_register["AdvancedGradientEvolutionStrategyV2"] = AdvancedGradientEvolutionStrategyV2 - res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedGradientEvolutionStrategyV2 = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategyV2").set_name("LLAMAAdvancedGradientEvolutionStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedGradientEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedGradientEvolutionStrategyV2 = NonObjectOptimizer( + method="LLAMAAdvancedGradientEvolutionStrategyV2" + ).set_name("LLAMAAdvancedGradientEvolutionStrategyV2", register=True) +except Exception as e: # AdvancedGradientEvolutionStrategyV2 print("AdvancedGradientEvolutionStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHarmonyMemeticOptimization import AdvancedHarmonyMemeticOptimization +try: # AdvancedHarmonyMemeticOptimization + from nevergrad.optimization.lama.AdvancedHarmonyMemeticOptimization import ( + AdvancedHarmonyMemeticOptimization, + ) lama_register["AdvancedHarmonyMemeticOptimization"] = AdvancedHarmonyMemeticOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedHarmonyMemeticOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHarmonyMemeticOptimization = NonObjectOptimizer(method="LLAMAAdvancedHarmonyMemeticOptimization").set_name("LLAMAAdvancedHarmonyMemeticOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHarmonyMemeticOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHarmonyMemeticOptimization = NonObjectOptimizer( + method="LLAMAAdvancedHarmonyMemeticOptimization" + ).set_name("LLAMAAdvancedHarmonyMemeticOptimization", register=True) +except Exception as e: # AdvancedHarmonyMemeticOptimization print("AdvancedHarmonyMemeticOptimization can not be imported: ", e) -try: +try: # AdvancedHarmonySearch from nevergrad.optimization.lama.AdvancedHarmonySearch import AdvancedHarmonySearch lama_register["AdvancedHarmonySearch"] = AdvancedHarmonySearch - res = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch").set_name("LLAMAAdvancedHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHarmonySearch = NonObjectOptimizer(method="LLAMAAdvancedHarmonySearch").set_name( + "LLAMAAdvancedHarmonySearch", register=True + ) +except Exception as e: # AdvancedHarmonySearch print("AdvancedHarmonySearch can not be imported: ", e) -try: +try: # AdvancedHybridAdaptiveDE from nevergrad.optimization.lama.AdvancedHybridAdaptiveDE import AdvancedHybridAdaptiveDE lama_register["AdvancedHybridAdaptiveDE"] = AdvancedHybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE").set_name("LLAMAAdvancedHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveDE").set_name( + "LLAMAAdvancedHybridAdaptiveDE", register=True + ) +except Exception as e: # AdvancedHybridAdaptiveDE print("AdvancedHybridAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridAdaptiveOptimization import AdvancedHybridAdaptiveOptimization +try: # AdvancedHybridAdaptiveOptimization + from nevergrad.optimization.lama.AdvancedHybridAdaptiveOptimization import ( + AdvancedHybridAdaptiveOptimization, + ) lama_register["AdvancedHybridAdaptiveOptimization"] = AdvancedHybridAdaptiveOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveOptimization").set_name("LLAMAAdvancedHybridAdaptiveOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAAdvancedHybridAdaptiveOptimization" + ).set_name("LLAMAAdvancedHybridAdaptiveOptimization", register=True) +except Exception as e: # AdvancedHybridAdaptiveOptimization print("AdvancedHybridAdaptiveOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 import AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 - - lama_register["AdvancedHybridCovarianceMatrixDifferentialEvolutionV3"] = AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) -except Exception as e: +try: # AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 + from nevergrad.optimization.lama.AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( + AdvancedHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["AdvancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAAdvancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: # AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 print("AdvancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridDEPSOWithAdaptiveRestarts import AdvancedHybridDEPSOWithAdaptiveRestarts +try: # AdvancedHybridDEPSOWithAdaptiveRestarts + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithAdaptiveRestarts import ( + AdvancedHybridDEPSOWithAdaptiveRestarts, + ) lama_register["AdvancedHybridDEPSOWithAdaptiveRestarts"] = AdvancedHybridDEPSOWithAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts").set_name("LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts" + ).set_name("LLAMAAdvancedHybridDEPSOWithAdaptiveRestarts", register=True) +except Exception as e: # AdvancedHybridDEPSOWithAdaptiveRestarts print("AdvancedHybridDEPSOWithAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridDEPSOWithDynamicAdaptationAndRestart import AdvancedHybridDEPSOWithDynamicAdaptationAndRestart - - lama_register["AdvancedHybridDEPSOWithDynamicAdaptationAndRestart"] = AdvancedHybridDEPSOWithDynamicAdaptationAndRestart - res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart").set_name("LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart", register=True) -except Exception as e: +try: # AdvancedHybridDEPSOWithDynamicAdaptationAndRestart + from nevergrad.optimization.lama.AdvancedHybridDEPSOWithDynamicAdaptationAndRestart import ( + AdvancedHybridDEPSOWithDynamicAdaptationAndRestart, + ) + + lama_register["AdvancedHybridDEPSOWithDynamicAdaptationAndRestart"] = ( + AdvancedHybridDEPSOWithDynamicAdaptationAndRestart + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart = NonObjectOptimizer( + method="LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart" + ).set_name("LLAMAAdvancedHybridDEPSOWithDynamicAdaptationAndRestart", register=True) +except Exception as e: # AdvancedHybridDEPSOWithDynamicAdaptationAndRestart print("AdvancedHybridDEPSOWithDynamicAdaptationAndRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridExplorationExploitationOptimizer import AdvancedHybridExplorationExploitationOptimizer - - lama_register["AdvancedHybridExplorationExploitationOptimizer"] = AdvancedHybridExplorationExploitationOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedHybridExplorationExploitationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridExplorationExploitationOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridExplorationExploitationOptimizer").set_name("LLAMAAdvancedHybridExplorationExploitationOptimizer", register=True) -except Exception as e: +try: # AdvancedHybridExplorationExploitationOptimizer + from nevergrad.optimization.lama.AdvancedHybridExplorationExploitationOptimizer import ( + AdvancedHybridExplorationExploitationOptimizer, + ) + + lama_register["AdvancedHybridExplorationExploitationOptimizer"] = ( + AdvancedHybridExplorationExploitationOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridExplorationExploitationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridExplorationExploitationOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedHybridExplorationExploitationOptimizer" + ).set_name("LLAMAAdvancedHybridExplorationExploitationOptimizer", register=True) +except Exception as e: # AdvancedHybridExplorationExploitationOptimizer print("AdvancedHybridExplorationExploitationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridLocalOptimizationDE import AdvancedHybridLocalOptimizationDE +try: # AdvancedHybridLocalOptimizationDE + from nevergrad.optimization.lama.AdvancedHybridLocalOptimizationDE import ( + AdvancedHybridLocalOptimizationDE, + ) lama_register["AdvancedHybridLocalOptimizationDE"] = AdvancedHybridLocalOptimizationDE - res = NonObjectOptimizer(method="LLAMAAdvancedHybridLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridLocalOptimizationDE = NonObjectOptimizer(method="LLAMAAdvancedHybridLocalOptimizationDE").set_name("LLAMAAdvancedHybridLocalOptimizationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridLocalOptimizationDE = NonObjectOptimizer( + method="LLAMAAdvancedHybridLocalOptimizationDE" + ).set_name("LLAMAAdvancedHybridLocalOptimizationDE", register=True) +except Exception as e: # AdvancedHybridLocalOptimizationDE print("AdvancedHybridLocalOptimizationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridMetaHeuristicOptimizer import AdvancedHybridMetaHeuristicOptimizer +try: # AdvancedHybridMetaHeuristicOptimizer + from nevergrad.optimization.lama.AdvancedHybridMetaHeuristicOptimizer import ( + AdvancedHybridMetaHeuristicOptimizer, + ) lama_register["AdvancedHybridMetaHeuristicOptimizer"] = AdvancedHybridMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaHeuristicOptimizer").set_name("LLAMAAdvancedHybridMetaHeuristicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: # AdvancedHybridMetaHeuristicOptimizer print("AdvancedHybridMetaHeuristicOptimizer can not be imported: ", e) -try: +try: # AdvancedHybridMetaheuristic from nevergrad.optimization.lama.AdvancedHybridMetaheuristic import AdvancedHybridMetaheuristic lama_register["AdvancedHybridMetaheuristic"] = AdvancedHybridMetaheuristic - res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridMetaheuristic = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic").set_name("LLAMAAdvancedHybridMetaheuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridMetaheuristic = NonObjectOptimizer(method="LLAMAAdvancedHybridMetaheuristic").set_name( + "LLAMAAdvancedHybridMetaheuristic", register=True + ) +except Exception as e: # AdvancedHybridMetaheuristic print("AdvancedHybridMetaheuristic can not be imported: ", e) -try: +try: # AdvancedHybridOptimization from nevergrad.optimization.lama.AdvancedHybridOptimization import AdvancedHybridOptimization lama_register["AdvancedHybridOptimization"] = AdvancedHybridOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization").set_name("LLAMAAdvancedHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridOptimization = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimization").set_name( + "LLAMAAdvancedHybridOptimization", register=True + ) +except Exception as e: # AdvancedHybridOptimization print("AdvancedHybridOptimization can not be imported: ", e) -try: +try: # AdvancedHybridOptimizer from nevergrad.optimization.lama.AdvancedHybridOptimizer import AdvancedHybridOptimizer lama_register["AdvancedHybridOptimizer"] = AdvancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer").set_name("LLAMAAdvancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedHybridOptimizer").set_name( + "LLAMAAdvancedHybridOptimizer", register=True + ) +except Exception as e: # AdvancedHybridOptimizer print("AdvancedHybridOptimizer can not be imported: ", e) -try: +try: # AdvancedHybridQuantumAdaptiveDE from nevergrad.optimization.lama.AdvancedHybridQuantumAdaptiveDE import AdvancedHybridQuantumAdaptiveDE lama_register["AdvancedHybridQuantumAdaptiveDE"] = AdvancedHybridQuantumAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedHybridQuantumAdaptiveDE").set_name("LLAMAAdvancedHybridQuantumAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedHybridQuantumAdaptiveDE" + ).set_name("LLAMAAdvancedHybridQuantumAdaptiveDE", register=True) +except Exception as e: # AdvancedHybridQuantumAdaptiveDE print("AdvancedHybridQuantumAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithAdaptiveMemory import AdvancedHybridSimulatedAnnealingWithAdaptiveMemory - - lama_register["AdvancedHybridSimulatedAnnealingWithAdaptiveMemory"] = AdvancedHybridSimulatedAnnealingWithAdaptiveMemory - res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory").set_name("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory", register=True) -except Exception as e: +try: # AdvancedHybridSimulatedAnnealingWithAdaptiveMemory + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithAdaptiveMemory import ( + AdvancedHybridSimulatedAnnealingWithAdaptiveMemory, + ) + + lama_register["AdvancedHybridSimulatedAnnealingWithAdaptiveMemory"] = ( + AdvancedHybridSimulatedAnnealingWithAdaptiveMemory + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory = NonObjectOptimizer( + method="LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory" + ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory", register=True) +except Exception as e: # AdvancedHybridSimulatedAnnealingWithAdaptiveMemory print("AdvancedHybridSimulatedAnnealingWithAdaptiveMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithGuidedExploration import AdvancedHybridSimulatedAnnealingWithGuidedExploration - - lama_register["AdvancedHybridSimulatedAnnealingWithGuidedExploration"] = AdvancedHybridSimulatedAnnealingWithGuidedExploration - res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration").set_name("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration", register=True) -except Exception as e: +try: # AdvancedHybridSimulatedAnnealingWithGuidedExploration + from nevergrad.optimization.lama.AdvancedHybridSimulatedAnnealingWithGuidedExploration import ( + AdvancedHybridSimulatedAnnealingWithGuidedExploration, + ) + + lama_register["AdvancedHybridSimulatedAnnealingWithGuidedExploration"] = ( + AdvancedHybridSimulatedAnnealingWithGuidedExploration + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration = NonObjectOptimizer( + method="LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration" + ).set_name("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration", register=True) +except Exception as e: # AdvancedHybridSimulatedAnnealingWithGuidedExploration print("AdvancedHybridSimulatedAnnealingWithGuidedExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedImprovedMetaHeuristicOptimizer import AdvancedImprovedMetaHeuristicOptimizer +try: # AdvancedImprovedMetaHeuristicOptimizer + from nevergrad.optimization.lama.AdvancedImprovedMetaHeuristicOptimizer import ( + AdvancedImprovedMetaHeuristicOptimizer, + ) lama_register["AdvancedImprovedMetaHeuristicOptimizer"] = AdvancedImprovedMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedImprovedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedImprovedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAAdvancedImprovedMetaHeuristicOptimizer").set_name("LLAMAAdvancedImprovedMetaHeuristicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedImprovedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedImprovedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedImprovedMetaHeuristicOptimizer" + ).set_name("LLAMAAdvancedImprovedMetaHeuristicOptimizer", register=True) +except Exception as e: # AdvancedImprovedMetaHeuristicOptimizer print("AdvancedImprovedMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV5 import AdvancedIslandEvolutionStrategyV5 +try: # AdvancedIslandEvolutionStrategyV5 + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV5 import ( + AdvancedIslandEvolutionStrategyV5, + ) lama_register["AdvancedIslandEvolutionStrategyV5"] = AdvancedIslandEvolutionStrategyV5 - res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedIslandEvolutionStrategyV5 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV5").set_name("LLAMAAdvancedIslandEvolutionStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedIslandEvolutionStrategyV5 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV5" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV5", register=True) +except Exception as e: # AdvancedIslandEvolutionStrategyV5 print("AdvancedIslandEvolutionStrategyV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV8 import AdvancedIslandEvolutionStrategyV8 +try: # AdvancedIslandEvolutionStrategyV8 + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV8 import ( + AdvancedIslandEvolutionStrategyV8, + ) lama_register["AdvancedIslandEvolutionStrategyV8"] = AdvancedIslandEvolutionStrategyV8 - res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedIslandEvolutionStrategyV8 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV8").set_name("LLAMAAdvancedIslandEvolutionStrategyV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedIslandEvolutionStrategyV8 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV8" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV8", register=True) +except Exception as e: # AdvancedIslandEvolutionStrategyV8 print("AdvancedIslandEvolutionStrategyV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV9 import AdvancedIslandEvolutionStrategyV9 +try: # AdvancedIslandEvolutionStrategyV9 + from nevergrad.optimization.lama.AdvancedIslandEvolutionStrategyV9 import ( + AdvancedIslandEvolutionStrategyV9, + ) lama_register["AdvancedIslandEvolutionStrategyV9"] = AdvancedIslandEvolutionStrategyV9 - res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedIslandEvolutionStrategyV9 = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV9").set_name("LLAMAAdvancedIslandEvolutionStrategyV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedIslandEvolutionStrategyV9 = NonObjectOptimizer( + method="LLAMAAdvancedIslandEvolutionStrategyV9" + ).set_name("LLAMAAdvancedIslandEvolutionStrategyV9", register=True) +except Exception as e: # AdvancedIslandEvolutionStrategyV9 print("AdvancedIslandEvolutionStrategyV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMemeticQuantumDifferentialOptimizer import AdvancedMemeticQuantumDifferentialOptimizer +try: # AdvancedMemeticQuantumDifferentialOptimizer + from nevergrad.optimization.lama.AdvancedMemeticQuantumDifferentialOptimizer import ( + AdvancedMemeticQuantumDifferentialOptimizer, + ) lama_register["AdvancedMemeticQuantumDifferentialOptimizer"] = AdvancedMemeticQuantumDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer").set_name("LLAMAAdvancedMemeticQuantumDifferentialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMemeticQuantumDifferentialOptimizer" + ).set_name("LLAMAAdvancedMemeticQuantumDifferentialOptimizer", register=True) +except Exception as e: # AdvancedMemeticQuantumDifferentialOptimizer print("AdvancedMemeticQuantumDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMemoryAdaptiveStrategyV50 import AdvancedMemoryAdaptiveStrategyV50 +try: # AdvancedMemoryAdaptiveStrategyV50 + from nevergrad.optimization.lama.AdvancedMemoryAdaptiveStrategyV50 import ( + AdvancedMemoryAdaptiveStrategyV50, + ) lama_register["AdvancedMemoryAdaptiveStrategyV50"] = AdvancedMemoryAdaptiveStrategyV50 - res = NonObjectOptimizer(method="LLAMAAdvancedMemoryAdaptiveStrategyV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMemoryAdaptiveStrategyV50 = NonObjectOptimizer(method="LLAMAAdvancedMemoryAdaptiveStrategyV50").set_name("LLAMAAdvancedMemoryAdaptiveStrategyV50", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMemoryAdaptiveStrategyV50")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMemoryAdaptiveStrategyV50 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryAdaptiveStrategyV50" + ).set_name("LLAMAAdvancedMemoryAdaptiveStrategyV50", register=True) +except Exception as e: # AdvancedMemoryAdaptiveStrategyV50 print("AdvancedMemoryAdaptiveStrategyV50 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMemoryEnhancedHybridOptimizer import AdvancedMemoryEnhancedHybridOptimizer +try: # AdvancedMemoryEnhancedHybridOptimizer + from nevergrad.optimization.lama.AdvancedMemoryEnhancedHybridOptimizer import ( + AdvancedMemoryEnhancedHybridOptimizer, + ) lama_register["AdvancedMemoryEnhancedHybridOptimizer"] = AdvancedMemoryEnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMemoryEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMemoryEnhancedHybridOptimizer").set_name("LLAMAAdvancedMemoryEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMemoryEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMemoryEnhancedHybridOptimizer" + ).set_name("LLAMAAdvancedMemoryEnhancedHybridOptimizer", register=True) +except Exception as e: # AdvancedMemoryEnhancedHybridOptimizer print("AdvancedMemoryEnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMemoryGuidedAdaptiveStrategyV68 import AdvancedMemoryGuidedAdaptiveStrategyV68 +try: # AdvancedMemoryGuidedAdaptiveStrategyV68 + from nevergrad.optimization.lama.AdvancedMemoryGuidedAdaptiveStrategyV68 import ( + AdvancedMemoryGuidedAdaptiveStrategyV68, + ) lama_register["AdvancedMemoryGuidedAdaptiveStrategyV68"] = AdvancedMemoryGuidedAdaptiveStrategyV68 - res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68 = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68").set_name("LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68" + ).set_name("LLAMAAdvancedMemoryGuidedAdaptiveStrategyV68", register=True) +except Exception as e: # AdvancedMemoryGuidedAdaptiveStrategyV68 print("AdvancedMemoryGuidedAdaptiveStrategyV68 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMemoryGuidedDualStrategyV80 import AdvancedMemoryGuidedDualStrategyV80 +try: # AdvancedMemoryGuidedDualStrategyV80 + from nevergrad.optimization.lama.AdvancedMemoryGuidedDualStrategyV80 import ( + AdvancedMemoryGuidedDualStrategyV80, + ) lama_register["AdvancedMemoryGuidedDualStrategyV80"] = AdvancedMemoryGuidedDualStrategyV80 - res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedDualStrategyV80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMemoryGuidedDualStrategyV80 = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedDualStrategyV80").set_name("LLAMAAdvancedMemoryGuidedDualStrategyV80", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMemoryGuidedDualStrategyV80")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMemoryGuidedDualStrategyV80 = NonObjectOptimizer( + method="LLAMAAdvancedMemoryGuidedDualStrategyV80" + ).set_name("LLAMAAdvancedMemoryGuidedDualStrategyV80", register=True) +except Exception as e: # AdvancedMemoryGuidedDualStrategyV80 print("AdvancedMemoryGuidedDualStrategyV80 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMultiModalAdaptiveOptimizer import AdvancedMultiModalAdaptiveOptimizer +try: # AdvancedMultiModalAdaptiveOptimizer + from nevergrad.optimization.lama.AdvancedMultiModalAdaptiveOptimizer import ( + AdvancedMultiModalAdaptiveOptimizer, + ) lama_register["AdvancedMultiModalAdaptiveOptimizer"] = AdvancedMultiModalAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMultiModalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAAdvancedMultiModalAdaptiveOptimizer").set_name("LLAMAAdvancedMultiModalAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedMultiModalAdaptiveOptimizer" + ).set_name("LLAMAAdvancedMultiModalAdaptiveOptimizer", register=True) +except Exception as e: # AdvancedMultiModalAdaptiveOptimizer print("AdvancedMultiModalAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedMultiStrategySelfAdaptiveDE import AdvancedMultiStrategySelfAdaptiveDE +try: # AdvancedMultiStrategySelfAdaptiveDE + from nevergrad.optimization.lama.AdvancedMultiStrategySelfAdaptiveDE import ( + AdvancedMultiStrategySelfAdaptiveDE, + ) lama_register["AdvancedMultiStrategySelfAdaptiveDE"] = AdvancedMultiStrategySelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMAAdvancedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAAdvancedMultiStrategySelfAdaptiveDE").set_name("LLAMAAdvancedMultiStrategySelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAAdvancedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAAdvancedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: # AdvancedMultiStrategySelfAdaptiveDE print("AdvancedMultiStrategySelfAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedNicheDifferentialParticleSwarmOptimizer import AdvancedNicheDifferentialParticleSwarmOptimizer - - lama_register["AdvancedNicheDifferentialParticleSwarmOptimizer"] = AdvancedNicheDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # AdvancedNicheDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.AdvancedNicheDifferentialParticleSwarmOptimizer import ( + AdvancedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["AdvancedNicheDifferentialParticleSwarmOptimizer"] = ( + AdvancedNicheDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAAdvancedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # AdvancedNicheDifferentialParticleSwarmOptimizer print("AdvancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE import AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE - - lama_register["AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE - res = NonObjectOptimizer(method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) -except Exception as e: +try: # AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE + from nevergrad.optimization.lama.AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: # AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE print("AdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedOptimalHybridDifferentialAnnealingOptimizer import AdvancedOptimalHybridDifferentialAnnealingOptimizer - - lama_register["AdvancedOptimalHybridDifferentialAnnealingOptimizer"] = AdvancedOptimalHybridDifferentialAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer").set_name("LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer", register=True) -except Exception as e: +try: # AdvancedOptimalHybridDifferentialAnnealingOptimizer + from nevergrad.optimization.lama.AdvancedOptimalHybridDifferentialAnnealingOptimizer import ( + AdvancedOptimalHybridDifferentialAnnealingOptimizer, + ) + + lama_register["AdvancedOptimalHybridDifferentialAnnealingOptimizer"] = ( + AdvancedOptimalHybridDifferentialAnnealingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer" + ).set_name("LLAMAAdvancedOptimalHybridDifferentialAnnealingOptimizer", register=True) +except Exception as e: # AdvancedOptimalHybridDifferentialAnnealingOptimizer print("AdvancedOptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedParallelDifferentialEvolution import AdvancedParallelDifferentialEvolution +try: # AdvancedParallelDifferentialEvolution + from nevergrad.optimization.lama.AdvancedParallelDifferentialEvolution import ( + AdvancedParallelDifferentialEvolution, + ) lama_register["AdvancedParallelDifferentialEvolution"] = AdvancedParallelDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdvancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedParallelDifferentialEvolution").set_name("LLAMAAdvancedParallelDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedParallelDifferentialEvolution" + ).set_name("LLAMAAdvancedParallelDifferentialEvolution", register=True) +except Exception as e: # AdvancedParallelDifferentialEvolution print("AdvancedParallelDifferentialEvolution can not be imported: ", e) -try: +try: # AdvancedPrecisionEvolver from nevergrad.optimization.lama.AdvancedPrecisionEvolver import AdvancedPrecisionEvolver lama_register["AdvancedPrecisionEvolver"] = AdvancedPrecisionEvolver - res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedPrecisionEvolver = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver").set_name("LLAMAAdvancedPrecisionEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedPrecisionEvolver = NonObjectOptimizer(method="LLAMAAdvancedPrecisionEvolver").set_name( + "LLAMAAdvancedPrecisionEvolver", register=True + ) +except Exception as e: # AdvancedPrecisionEvolver print("AdvancedPrecisionEvolver can not be imported: ", e) -try: +try: # AdvancedPrecisionGuidedStrategy from nevergrad.optimization.lama.AdvancedPrecisionGuidedStrategy import AdvancedPrecisionGuidedStrategy lama_register["AdvancedPrecisionGuidedStrategy"] = AdvancedPrecisionGuidedStrategy - res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionGuidedStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedPrecisionGuidedStrategy = NonObjectOptimizer(method="LLAMAAdvancedPrecisionGuidedStrategy").set_name("LLAMAAdvancedPrecisionGuidedStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedPrecisionGuidedStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedPrecisionGuidedStrategy = NonObjectOptimizer( + method="LLAMAAdvancedPrecisionGuidedStrategy" + ).set_name("LLAMAAdvancedPrecisionGuidedStrategy", register=True) +except Exception as e: # AdvancedPrecisionGuidedStrategy print("AdvancedPrecisionGuidedStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumCognitionTrajectoryOptimizerV29 import AdvancedQuantumCognitionTrajectoryOptimizerV29 - - lama_register["AdvancedQuantumCognitionTrajectoryOptimizerV29"] = AdvancedQuantumCognitionTrajectoryOptimizerV29 - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29 = NonObjectOptimizer(method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29").set_name("LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29", register=True) -except Exception as e: +try: # AdvancedQuantumCognitionTrajectoryOptimizerV29 + from nevergrad.optimization.lama.AdvancedQuantumCognitionTrajectoryOptimizerV29 import ( + AdvancedQuantumCognitionTrajectoryOptimizerV29, + ) + + lama_register["AdvancedQuantumCognitionTrajectoryOptimizerV29"] = ( + AdvancedQuantumCognitionTrajectoryOptimizerV29 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29 = NonObjectOptimizer( + method="LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29" + ).set_name("LLAMAAdvancedQuantumCognitionTrajectoryOptimizerV29", register=True) +except Exception as e: # AdvancedQuantumCognitionTrajectoryOptimizerV29 print("AdvancedQuantumCognitionTrajectoryOptimizerV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumControlledDiversityStrategy import AdvancedQuantumControlledDiversityStrategy +try: # AdvancedQuantumControlledDiversityStrategy + from nevergrad.optimization.lama.AdvancedQuantumControlledDiversityStrategy import ( + AdvancedQuantumControlledDiversityStrategy, + ) lama_register["AdvancedQuantumControlledDiversityStrategy"] = AdvancedQuantumControlledDiversityStrategy - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumControlledDiversityStrategy = NonObjectOptimizer(method="LLAMAAdvancedQuantumControlledDiversityStrategy").set_name("LLAMAAdvancedQuantumControlledDiversityStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumControlledDiversityStrategy = NonObjectOptimizer( + method="LLAMAAdvancedQuantumControlledDiversityStrategy" + ).set_name("LLAMAAdvancedQuantumControlledDiversityStrategy", register=True) +except Exception as e: # AdvancedQuantumControlledDiversityStrategy print("AdvancedQuantumControlledDiversityStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumCrossoverOptimizer import AdvancedQuantumCrossoverOptimizer +try: # AdvancedQuantumCrossoverOptimizer + from nevergrad.optimization.lama.AdvancedQuantumCrossoverOptimizer import ( + AdvancedQuantumCrossoverOptimizer, + ) lama_register["AdvancedQuantumCrossoverOptimizer"] = AdvancedQuantumCrossoverOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumCrossoverOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumCrossoverOptimizer").set_name("LLAMAAdvancedQuantumCrossoverOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumCrossoverOptimizer" + ).set_name("LLAMAAdvancedQuantumCrossoverOptimizer", register=True) +except Exception as e: # AdvancedQuantumCrossoverOptimizer print("AdvancedQuantumCrossoverOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart import AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart - - lama_register["AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart"] = AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart = NonObjectOptimizer(method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart").set_name("LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart", register=True) -except Exception as e: - print("AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart can not be imported: ", e) -try: +try: # AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart + from nevergrad.optimization.lama.AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart import ( + AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart, + ) + + lama_register["AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart"] = ( + AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart = NonObjectOptimizer( + method="LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart" + ).set_name( + "LLAMAAdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart", register=True + ) +except Exception as e: # AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart + print( + "AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart can not be imported: ", e + ) +try: # AdvancedQuantumGradientDescent from nevergrad.optimization.lama.AdvancedQuantumGradientDescent import AdvancedQuantumGradientDescent lama_register["AdvancedQuantumGradientDescent"] = AdvancedQuantumGradientDescent - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientDescent")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumGradientDescent = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientDescent").set_name("LLAMAAdvancedQuantumGradientDescent", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientDescent")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumGradientDescent = NonObjectOptimizer( + method="LLAMAAdvancedQuantumGradientDescent" + ).set_name("LLAMAAdvancedQuantumGradientDescent", register=True) +except Exception as e: # AdvancedQuantumGradientDescent print("AdvancedQuantumGradientDescent can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumGradientExplorationOptimization import AdvancedQuantumGradientExplorationOptimization - - lama_register["AdvancedQuantumGradientExplorationOptimization"] = AdvancedQuantumGradientExplorationOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientExplorationOptimization").set_name("LLAMAAdvancedQuantumGradientExplorationOptimization", register=True) -except Exception as e: +try: # AdvancedQuantumGradientExplorationOptimization + from nevergrad.optimization.lama.AdvancedQuantumGradientExplorationOptimization import ( + AdvancedQuantumGradientExplorationOptimization, + ) + + lama_register["AdvancedQuantumGradientExplorationOptimization"] = ( + AdvancedQuantumGradientExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumGradientExplorationOptimization" + ).set_name("LLAMAAdvancedQuantumGradientExplorationOptimization", register=True) +except Exception as e: # AdvancedQuantumGradientExplorationOptimization print("AdvancedQuantumGradientExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumHarmonicFeedbackOptimizer import AdvancedQuantumHarmonicFeedbackOptimizer +try: # AdvancedQuantumHarmonicFeedbackOptimizer + from nevergrad.optimization.lama.AdvancedQuantumHarmonicFeedbackOptimizer import ( + AdvancedQuantumHarmonicFeedbackOptimizer, + ) lama_register["AdvancedQuantumHarmonicFeedbackOptimizer"] = AdvancedQuantumHarmonicFeedbackOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer").set_name("LLAMAAdvancedQuantumHarmonicFeedbackOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAAdvancedQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: # AdvancedQuantumHarmonicFeedbackOptimizer print("AdvancedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumInfusedAdaptiveStrategyV3 import AdvancedQuantumInfusedAdaptiveStrategyV3 +try: # AdvancedQuantumInfusedAdaptiveStrategyV3 + from nevergrad.optimization.lama.AdvancedQuantumInfusedAdaptiveStrategyV3 import ( + AdvancedQuantumInfusedAdaptiveStrategyV3, + ) lama_register["AdvancedQuantumInfusedAdaptiveStrategyV3"] = AdvancedQuantumInfusedAdaptiveStrategyV3 - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3 = NonObjectOptimizer(method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3").set_name("LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3 = NonObjectOptimizer( + method="LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3" + ).set_name("LLAMAAdvancedQuantumInfusedAdaptiveStrategyV3", register=True) +except Exception as e: # AdvancedQuantumInfusedAdaptiveStrategyV3 print("AdvancedQuantumInfusedAdaptiveStrategyV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumMemeticDifferentialEvolution import AdvancedQuantumMemeticDifferentialEvolution +try: # AdvancedQuantumMemeticDifferentialEvolution + from nevergrad.optimization.lama.AdvancedQuantumMemeticDifferentialEvolution import ( + AdvancedQuantumMemeticDifferentialEvolution, + ) lama_register["AdvancedQuantumMemeticDifferentialEvolution"] = AdvancedQuantumMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAAdvancedQuantumMemeticDifferentialEvolution").set_name("LLAMAAdvancedQuantumMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAAdvancedQuantumMemeticDifferentialEvolution" + ).set_name("LLAMAAdvancedQuantumMemeticDifferentialEvolution", register=True) +except Exception as e: # AdvancedQuantumMemeticDifferentialEvolution print("AdvancedQuantumMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedQuantumStateCrossoverOptimization import AdvancedQuantumStateCrossoverOptimization +try: # AdvancedQuantumStateCrossoverOptimization + from nevergrad.optimization.lama.AdvancedQuantumStateCrossoverOptimization import ( + AdvancedQuantumStateCrossoverOptimization, + ) lama_register["AdvancedQuantumStateCrossoverOptimization"] = AdvancedQuantumStateCrossoverOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumStateCrossoverOptimization").set_name("LLAMAAdvancedQuantumStateCrossoverOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumStateCrossoverOptimization" + ).set_name("LLAMAAdvancedQuantumStateCrossoverOptimization", register=True) +except Exception as e: # AdvancedQuantumStateCrossoverOptimization print("AdvancedQuantumStateCrossoverOptimization can not be imported: ", e) -try: +try: # AdvancedQuantumSwarmOptimization from nevergrad.optimization.lama.AdvancedQuantumSwarmOptimization import AdvancedQuantumSwarmOptimization lama_register["AdvancedQuantumSwarmOptimization"] = AdvancedQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAAdvancedQuantumSwarmOptimization").set_name("LLAMAAdvancedQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAAdvancedQuantumSwarmOptimization" + ).set_name("LLAMAAdvancedQuantumSwarmOptimization", register=True) +except Exception as e: # AdvancedQuantumSwarmOptimization print("AdvancedQuantumSwarmOptimization can not be imported: ", e) -try: +try: # AdvancedQuantumVelocityOptimizer from nevergrad.optimization.lama.AdvancedQuantumVelocityOptimizer import AdvancedQuantumVelocityOptimizer lama_register["AdvancedQuantumVelocityOptimizer"] = AdvancedQuantumVelocityOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedQuantumVelocityOptimizer = NonObjectOptimizer(method="LLAMAAdvancedQuantumVelocityOptimizer").set_name("LLAMAAdvancedQuantumVelocityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedQuantumVelocityOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedQuantumVelocityOptimizer" + ).set_name("LLAMAAdvancedQuantumVelocityOptimizer", register=True) +except Exception as e: # AdvancedQuantumVelocityOptimizer print("AdvancedQuantumVelocityOptimizer can not be imported: ", e) -try: +try: # AdvancedRAMEDSv6 from nevergrad.optimization.lama.AdvancedRAMEDSv6 import AdvancedRAMEDSv6 lama_register["AdvancedRAMEDSv6"] = AdvancedRAMEDSv6 - res = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRAMEDSv6 = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6").set_name("LLAMAAdvancedRAMEDSv6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRAMEDSv6 = NonObjectOptimizer(method="LLAMAAdvancedRAMEDSv6").set_name( + "LLAMAAdvancedRAMEDSv6", register=True + ) +except Exception as e: # AdvancedRAMEDSv6 print("AdvancedRAMEDSv6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedAdaptiveMemoryEnhancedSearch import AdvancedRefinedAdaptiveMemoryEnhancedSearch +try: # AdvancedRefinedAdaptiveMemoryEnhancedSearch + from nevergrad.optimization.lama.AdvancedRefinedAdaptiveMemoryEnhancedSearch import ( + AdvancedRefinedAdaptiveMemoryEnhancedSearch, + ) lama_register["AdvancedRefinedAdaptiveMemoryEnhancedSearch"] = AdvancedRefinedAdaptiveMemoryEnhancedSearch - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch").set_name("LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMAAdvancedRefinedAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: # AdvancedRefinedAdaptiveMemoryEnhancedSearch print("AdvancedRefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus import AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus - - lama_register["AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) -except Exception as e: +try: # AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus + from nevergrad.optimization.lama.AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: # AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus print("AdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - - lama_register["AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) -except Exception as e: +try: # AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: # AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer print("AdvancedRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer import AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer - - lama_register["AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: +try: # AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( + AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( + AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAAdvancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer print("AdvancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedAnnealing import AdvancedRefinedGradientBoostedAnnealing +try: # AdvancedRefinedGradientBoostedAnnealing + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedAnnealing import ( + AdvancedRefinedGradientBoostedAnnealing, + ) lama_register["AdvancedRefinedGradientBoostedAnnealing"] = AdvancedRefinedGradientBoostedAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedAnnealing", register=True) +except Exception as e: # AdvancedRefinedGradientBoostedAnnealing print("AdvancedRefinedGradientBoostedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemoryAnnealing import AdvancedRefinedGradientBoostedMemoryAnnealing - - lama_register["AdvancedRefinedGradientBoostedMemoryAnnealing"] = AdvancedRefinedGradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing", register=True) -except Exception as e: +try: # AdvancedRefinedGradientBoostedMemoryAnnealing + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemoryAnnealing import ( + AdvancedRefinedGradientBoostedMemoryAnnealing, + ) + + lama_register["AdvancedRefinedGradientBoostedMemoryAnnealing"] = ( + AdvancedRefinedGradientBoostedMemoryAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # AdvancedRefinedGradientBoostedMemoryAnnealing print("AdvancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemorySimulatedAnnealing import AdvancedRefinedGradientBoostedMemorySimulatedAnnealing - - lama_register["AdvancedRefinedGradientBoostedMemorySimulatedAnnealing"] = AdvancedRefinedGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # AdvancedRefinedGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.AdvancedRefinedGradientBoostedMemorySimulatedAnnealing import ( + AdvancedRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["AdvancedRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + AdvancedRefinedGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # AdvancedRefinedGradientBoostedMemorySimulatedAnnealing print("AdvancedRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedHybridEvolutionaryAnnealingOptimizer import AdvancedRefinedHybridEvolutionaryAnnealingOptimizer - - lama_register["AdvancedRefinedHybridEvolutionaryAnnealingOptimizer"] = AdvancedRefinedHybridEvolutionaryAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer", register=True) -except Exception as e: +try: # AdvancedRefinedHybridEvolutionaryAnnealingOptimizer + from nevergrad.optimization.lama.AdvancedRefinedHybridEvolutionaryAnnealingOptimizer import ( + AdvancedRefinedHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["AdvancedRefinedHybridEvolutionaryAnnealingOptimizer"] = ( + AdvancedRefinedHybridEvolutionaryAnnealingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAAdvancedRefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: # AdvancedRefinedHybridEvolutionaryAnnealingOptimizer print("AdvancedRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 import AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 - - lama_register["AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51"] = AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 = NonObjectOptimizer(method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51").set_name("LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51", register=True) -except Exception as e: +try: # AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 + from nevergrad.optimization.lama.AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 import ( + AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51, + ) + + lama_register["AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51"] = ( + AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 = NonObjectOptimizer( + method="LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51" + ).set_name("LLAMAAdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51", register=True) +except Exception as e: # AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 print("AdvancedRefinedHyperRefinedDynamicPrecisionOptimizerV51 can not be imported: ", e) -try: +try: # AdvancedRefinedRAMEDSPro from nevergrad.optimization.lama.AdvancedRefinedRAMEDSPro import AdvancedRefinedRAMEDSPro lama_register["AdvancedRefinedRAMEDSPro"] = AdvancedRefinedRAMEDSPro - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedRAMEDSPro = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro").set_name("LLAMAAdvancedRefinedRAMEDSPro", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedRAMEDSPro = NonObjectOptimizer(method="LLAMAAdvancedRefinedRAMEDSPro").set_name( + "LLAMAAdvancedRefinedRAMEDSPro", register=True + ) +except Exception as e: # AdvancedRefinedRAMEDSPro print("AdvancedRefinedRAMEDSPro can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedSpiralSearchOptimizer import AdvancedRefinedSpiralSearchOptimizer +try: # AdvancedRefinedSpiralSearchOptimizer + from nevergrad.optimization.lama.AdvancedRefinedSpiralSearchOptimizer import ( + AdvancedRefinedSpiralSearchOptimizer, + ) lama_register["AdvancedRefinedSpiralSearchOptimizer"] = AdvancedRefinedSpiralSearchOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedSpiralSearchOptimizer = NonObjectOptimizer(method="LLAMAAdvancedRefinedSpiralSearchOptimizer").set_name("LLAMAAdvancedRefinedSpiralSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedSpiralSearchOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedRefinedSpiralSearchOptimizer" + ).set_name("LLAMAAdvancedRefinedSpiralSearchOptimizer", register=True) +except Exception as e: # AdvancedRefinedSpiralSearchOptimizer print("AdvancedRefinedSpiralSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 import AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 - - lama_register["AdvancedRefinedUltraEvolutionaryGradientOptimizerV29"] = AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 - res = NonObjectOptimizer(method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29 = NonObjectOptimizer(method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29").set_name("LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29", register=True) -except Exception as e: +try: # AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 + from nevergrad.optimization.lama.AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 import ( + AdvancedRefinedUltraEvolutionaryGradientOptimizerV29, + ) + + lama_register["AdvancedRefinedUltraEvolutionaryGradientOptimizerV29"] = ( + AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 + ) + # res = NonObjectOptimizer(method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29 = NonObjectOptimizer( + method="LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29" + ).set_name("LLAMAAdvancedRefinedUltraEvolutionaryGradientOptimizerV29", register=True) +except Exception as e: # AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 print("AdvancedRefinedUltraEvolutionaryGradientOptimizerV29 can not be imported: ", e) -try: +try: # AdvancedSelfAdaptiveDE_v2 from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v2 import AdvancedSelfAdaptiveDE_v2 lama_register["AdvancedSelfAdaptiveDE_v2"] = AdvancedSelfAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedSelfAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2").set_name("LLAMAAdvancedSelfAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedSelfAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v2").set_name( + "LLAMAAdvancedSelfAdaptiveDE_v2", register=True + ) +except Exception as e: # AdvancedSelfAdaptiveDE_v2 print("AdvancedSelfAdaptiveDE_v2 can not be imported: ", e) -try: +try: # AdvancedSelfAdaptiveDE_v3 from nevergrad.optimization.lama.AdvancedSelfAdaptiveDE_v3 import AdvancedSelfAdaptiveDE_v3 lama_register["AdvancedSelfAdaptiveDE_v3"] = AdvancedSelfAdaptiveDE_v3 - res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedSelfAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3").set_name("LLAMAAdvancedSelfAdaptiveDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedSelfAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAAdvancedSelfAdaptiveDE_v3").set_name( + "LLAMAAdvancedSelfAdaptiveDE_v3", register=True + ) +except Exception as e: # AdvancedSelfAdaptiveDE_v3 print("AdvancedSelfAdaptiveDE_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.AdvancedSpatialAdaptiveConvergenceOptimizer import AdvancedSpatialAdaptiveConvergenceOptimizer +try: # AdvancedSpatialAdaptiveConvergenceOptimizer + from nevergrad.optimization.lama.AdvancedSpatialAdaptiveConvergenceOptimizer import ( + AdvancedSpatialAdaptiveConvergenceOptimizer, + ) lama_register["AdvancedSpatialAdaptiveConvergenceOptimizer"] = AdvancedSpatialAdaptiveConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer = NonObjectOptimizer(method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer").set_name("LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer" + ).set_name("LLAMAAdvancedSpatialAdaptiveConvergenceOptimizer", register=True) +except Exception as e: # AdvancedSpatialAdaptiveConvergenceOptimizer print("AdvancedSpatialAdaptiveConvergenceOptimizer can not be imported: ", e) -try: +try: # AdvancedSpatialGradientOptimizer from nevergrad.optimization.lama.AdvancedSpatialGradientOptimizer import AdvancedSpatialGradientOptimizer lama_register["AdvancedSpatialGradientOptimizer"] = AdvancedSpatialGradientOptimizer - res = NonObjectOptimizer(method="LLAMAAdvancedSpatialGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedSpatialGradientOptimizer = NonObjectOptimizer(method="LLAMAAdvancedSpatialGradientOptimizer").set_name("LLAMAAdvancedSpatialGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedSpatialGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedSpatialGradientOptimizer = NonObjectOptimizer( + method="LLAMAAdvancedSpatialGradientOptimizer" + ).set_name("LLAMAAdvancedSpatialGradientOptimizer", register=True) +except Exception as e: # AdvancedSpatialGradientOptimizer print("AdvancedSpatialGradientOptimizer can not be imported: ", e) -try: +try: # AdvancedStrategicHybridDE from nevergrad.optimization.lama.AdvancedStrategicHybridDE import AdvancedStrategicHybridDE lama_register["AdvancedStrategicHybridDE"] = AdvancedStrategicHybridDE - res = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAdvancedStrategicHybridDE = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE").set_name("LLAMAAdvancedStrategicHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAdvancedStrategicHybridDE = NonObjectOptimizer(method="LLAMAAdvancedStrategicHybridDE").set_name( + "LLAMAAdvancedStrategicHybridDE", register=True + ) +except Exception as e: # AdvancedStrategicHybridDE print("AdvancedStrategicHybridDE can not be imported: ", e) -try: +try: # ArchiveEnhancedAdaptiveDE from nevergrad.optimization.lama.ArchiveEnhancedAdaptiveDE import ArchiveEnhancedAdaptiveDE lama_register["ArchiveEnhancedAdaptiveDE"] = ArchiveEnhancedAdaptiveDE - res = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAArchiveEnhancedAdaptiveDE = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE").set_name("LLAMAArchiveEnhancedAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAArchiveEnhancedAdaptiveDE = NonObjectOptimizer(method="LLAMAArchiveEnhancedAdaptiveDE").set_name( + "LLAMAArchiveEnhancedAdaptiveDE", register=True + ) +except Exception as e: # ArchiveEnhancedAdaptiveDE print("ArchiveEnhancedAdaptiveDE can not be imported: ", e) -try: +try: # AttenuatedAdaptiveEvolver from nevergrad.optimization.lama.AttenuatedAdaptiveEvolver import AttenuatedAdaptiveEvolver lama_register["AttenuatedAdaptiveEvolver"] = AttenuatedAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver").set_name("LLAMAAttenuatedAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMAAttenuatedAdaptiveEvolver").set_name( + "LLAMAAttenuatedAdaptiveEvolver", register=True + ) +except Exception as e: # AttenuatedAdaptiveEvolver print("AttenuatedAdaptiveEvolver can not be imported: ", e) -try: +try: # BalancedAdaptiveMemeticDE from nevergrad.optimization.lama.BalancedAdaptiveMemeticDE import BalancedAdaptiveMemeticDE lama_register["BalancedAdaptiveMemeticDE"] = BalancedAdaptiveMemeticDE - res = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedAdaptiveMemeticDE = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE").set_name("LLAMABalancedAdaptiveMemeticDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedAdaptiveMemeticDE = NonObjectOptimizer(method="LLAMABalancedAdaptiveMemeticDE").set_name( + "LLAMABalancedAdaptiveMemeticDE", register=True + ) +except Exception as e: # BalancedAdaptiveMemeticDE print("BalancedAdaptiveMemeticDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.BalancedCulturalDifferentialEvolution import BalancedCulturalDifferentialEvolution +try: # BalancedCulturalDifferentialEvolution + from nevergrad.optimization.lama.BalancedCulturalDifferentialEvolution import ( + BalancedCulturalDifferentialEvolution, + ) lama_register["BalancedCulturalDifferentialEvolution"] = BalancedCulturalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMABalancedCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMABalancedCulturalDifferentialEvolution").set_name("LLAMABalancedCulturalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMABalancedCulturalDifferentialEvolution" + ).set_name("LLAMABalancedCulturalDifferentialEvolution", register=True) +except Exception as e: # BalancedCulturalDifferentialEvolution print("BalancedCulturalDifferentialEvolution can not be imported: ", e) -try: +try: # BalancedDualStrategyAdaptiveDE from nevergrad.optimization.lama.BalancedDualStrategyAdaptiveDE import BalancedDualStrategyAdaptiveDE lama_register["BalancedDualStrategyAdaptiveDE"] = BalancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMABalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMABalancedDualStrategyAdaptiveDE").set_name("LLAMABalancedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMABalancedDualStrategyAdaptiveDE" + ).set_name("LLAMABalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # BalancedDualStrategyAdaptiveDE print("BalancedDualStrategyAdaptiveDE can not be imported: ", e) -try: +try: # BalancedDynamicQuantumLevySwarm from nevergrad.optimization.lama.BalancedDynamicQuantumLevySwarm import BalancedDynamicQuantumLevySwarm lama_register["BalancedDynamicQuantumLevySwarm"] = BalancedDynamicQuantumLevySwarm - res = NonObjectOptimizer(method="LLAMABalancedDynamicQuantumLevySwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedDynamicQuantumLevySwarm = NonObjectOptimizer(method="LLAMABalancedDynamicQuantumLevySwarm").set_name("LLAMABalancedDynamicQuantumLevySwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedDynamicQuantumLevySwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedDynamicQuantumLevySwarm = NonObjectOptimizer( + method="LLAMABalancedDynamicQuantumLevySwarm" + ).set_name("LLAMABalancedDynamicQuantumLevySwarm", register=True) +except Exception as e: # BalancedDynamicQuantumLevySwarm print("BalancedDynamicQuantumLevySwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.BalancedQuantumLevyDifferentialSearch import BalancedQuantumLevyDifferentialSearch +try: # BalancedQuantumLevyDifferentialSearch + from nevergrad.optimization.lama.BalancedQuantumLevyDifferentialSearch import ( + BalancedQuantumLevyDifferentialSearch, + ) lama_register["BalancedQuantumLevyDifferentialSearch"] = BalancedQuantumLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMABalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMABalancedQuantumLevyDifferentialSearch").set_name("LLAMABalancedQuantumLevyDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMABalancedQuantumLevyDifferentialSearch" + ).set_name("LLAMABalancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: # BalancedQuantumLevyDifferentialSearch print("BalancedQuantumLevyDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.BalancedQuantumLevySwarmOptimization import BalancedQuantumLevySwarmOptimization +try: # BalancedQuantumLevySwarmOptimization + from nevergrad.optimization.lama.BalancedQuantumLevySwarmOptimization import ( + BalancedQuantumLevySwarmOptimization, + ) lama_register["BalancedQuantumLevySwarmOptimization"] = BalancedQuantumLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMABalancedQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABalancedQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMABalancedQuantumLevySwarmOptimization").set_name("LLAMABalancedQuantumLevySwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABalancedQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABalancedQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMABalancedQuantumLevySwarmOptimization" + ).set_name("LLAMABalancedQuantumLevySwarmOptimization", register=True) +except Exception as e: # BalancedQuantumLevySwarmOptimization print("BalancedQuantumLevySwarmOptimization can not be imported: ", e) -try: +try: # BayesianAdaptiveMemeticSearch from nevergrad.optimization.lama.BayesianAdaptiveMemeticSearch import BayesianAdaptiveMemeticSearch lama_register["BayesianAdaptiveMemeticSearch"] = BayesianAdaptiveMemeticSearch - res = NonObjectOptimizer(method="LLAMABayesianAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMABayesianAdaptiveMemeticSearch = NonObjectOptimizer(method="LLAMABayesianAdaptiveMemeticSearch").set_name("LLAMABayesianAdaptiveMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMABayesianAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMABayesianAdaptiveMemeticSearch = NonObjectOptimizer( + method="LLAMABayesianAdaptiveMemeticSearch" + ).set_name("LLAMABayesianAdaptiveMemeticSearch", register=True) +except Exception as e: # BayesianAdaptiveMemeticSearch print("BayesianAdaptiveMemeticSearch can not be imported: ", e) -try: +try: # CAMSQSOB from nevergrad.optimization.lama.CAMSQSOB import CAMSQSOB lama_register["CAMSQSOB"] = CAMSQSOB - res = NonObjectOptimizer(method="LLAMACAMSQSOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMACAMSQSOB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMACAMSQSOB = NonObjectOptimizer(method="LLAMACAMSQSOB").set_name("LLAMACAMSQSOB", register=True) -except Exception as e: +except Exception as e: # CAMSQSOB print("CAMSQSOB can not be imported: ", e) -try: +try: # CGES from nevergrad.optimization.lama.CGES import CGES lama_register["CGES"] = CGES - res = NonObjectOptimizer(method="LLAMACGES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMACGES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMACGES = NonObjectOptimizer(method="LLAMACGES").set_name("LLAMACGES", register=True) -except Exception as e: +except Exception as e: # CGES print("CGES can not be imported: ", e) -try: +try: # CMADifferentialEvolutionPSO from nevergrad.optimization.lama.CMADifferentialEvolutionPSO import CMADifferentialEvolutionPSO lama_register["CMADifferentialEvolutionPSO"] = CMADifferentialEvolutionPSO - res = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACMADifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO").set_name("LLAMACMADifferentialEvolutionPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACMADifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMACMADifferentialEvolutionPSO").set_name( + "LLAMACMADifferentialEvolutionPSO", register=True + ) +except Exception as e: # CMADifferentialEvolutionPSO print("CMADifferentialEvolutionPSO can not be imported: ", e) -try: +try: # CMDEALX from nevergrad.optimization.lama.CMDEALX import CMDEALX lama_register["CMDEALX"] = CMDEALX - res = NonObjectOptimizer(method="LLAMACMDEALX")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMACMDEALX")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMACMDEALX = NonObjectOptimizer(method="LLAMACMDEALX").set_name("LLAMACMDEALX", register=True) -except Exception as e: +except Exception as e: # CMDEALX print("CMDEALX can not be imported: ", e) -try: - from nevergrad.optimization.lama.ClusterAdaptiveQuantumLevyOptimizer import ClusterAdaptiveQuantumLevyOptimizer +try: # ClusterAdaptiveQuantumLevyOptimizer + from nevergrad.optimization.lama.ClusterAdaptiveQuantumLevyOptimizer import ( + ClusterAdaptiveQuantumLevyOptimizer, + ) lama_register["ClusterAdaptiveQuantumLevyOptimizer"] = ClusterAdaptiveQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMAClusterAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAClusterAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAClusterAdaptiveQuantumLevyOptimizer").set_name("LLAMAClusterAdaptiveQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAClusterAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAClusterAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAClusterAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAClusterAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: # ClusterAdaptiveQuantumLevyOptimizer print("ClusterAdaptiveQuantumLevyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ClusterBasedAdaptiveDifferentialEvolution import ClusterBasedAdaptiveDifferentialEvolution +try: # ClusterBasedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ClusterBasedAdaptiveDifferentialEvolution import ( + ClusterBasedAdaptiveDifferentialEvolution, + ) lama_register["ClusterBasedAdaptiveDifferentialEvolution"] = ClusterBasedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAClusterBasedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAClusterBasedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAClusterBasedAdaptiveDifferentialEvolution").set_name("LLAMAClusterBasedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAClusterBasedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAClusterBasedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAClusterBasedAdaptiveDifferentialEvolution" + ).set_name("LLAMAClusterBasedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ClusterBasedAdaptiveDifferentialEvolution print("ClusterBasedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ClusteredAdaptiveHybridPSODESimulatedAnnealing import ClusteredAdaptiveHybridPSODESimulatedAnnealing - - lama_register["ClusteredAdaptiveHybridPSODESimulatedAnnealing"] = ClusteredAdaptiveHybridPSODESimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing = NonObjectOptimizer(method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing").set_name("LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing", register=True) -except Exception as e: +try: # ClusteredAdaptiveHybridPSODESimulatedAnnealing + from nevergrad.optimization.lama.ClusteredAdaptiveHybridPSODESimulatedAnnealing import ( + ClusteredAdaptiveHybridPSODESimulatedAnnealing, + ) + + lama_register["ClusteredAdaptiveHybridPSODESimulatedAnnealing"] = ( + ClusteredAdaptiveHybridPSODESimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing = NonObjectOptimizer( + method="LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing" + ).set_name("LLAMAClusteredAdaptiveHybridPSODESimulatedAnnealing", register=True) +except Exception as e: # ClusteredAdaptiveHybridPSODESimulatedAnnealing print("ClusteredAdaptiveHybridPSODESimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.ClusteredDifferentialEvolutionWithLocalSearch import ClusteredDifferentialEvolutionWithLocalSearch - - lama_register["ClusteredDifferentialEvolutionWithLocalSearch"] = ClusteredDifferentialEvolutionWithLocalSearch - res = NonObjectOptimizer(method="LLAMAClusteredDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAClusteredDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAClusteredDifferentialEvolutionWithLocalSearch").set_name("LLAMAClusteredDifferentialEvolutionWithLocalSearch", register=True) -except Exception as e: +try: # ClusteredDifferentialEvolutionWithLocalSearch + from nevergrad.optimization.lama.ClusteredDifferentialEvolutionWithLocalSearch import ( + ClusteredDifferentialEvolutionWithLocalSearch, + ) + + lama_register["ClusteredDifferentialEvolutionWithLocalSearch"] = ( + ClusteredDifferentialEvolutionWithLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAClusteredDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAClusteredDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAClusteredDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAClusteredDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: # ClusteredDifferentialEvolutionWithLocalSearch print("ClusteredDifferentialEvolutionWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CoevolutionaryDualPopulationSearch import CoevolutionaryDualPopulationSearch +try: # CoevolutionaryDualPopulationSearch + from nevergrad.optimization.lama.CoevolutionaryDualPopulationSearch import ( + CoevolutionaryDualPopulationSearch, + ) lama_register["CoevolutionaryDualPopulationSearch"] = CoevolutionaryDualPopulationSearch - res = NonObjectOptimizer(method="LLAMACoevolutionaryDualPopulationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACoevolutionaryDualPopulationSearch = NonObjectOptimizer(method="LLAMACoevolutionaryDualPopulationSearch").set_name("LLAMACoevolutionaryDualPopulationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACoevolutionaryDualPopulationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACoevolutionaryDualPopulationSearch = NonObjectOptimizer( + method="LLAMACoevolutionaryDualPopulationSearch" + ).set_name("LLAMACoevolutionaryDualPopulationSearch", register=True) +except Exception as e: # CoevolutionaryDualPopulationSearch print("CoevolutionaryDualPopulationSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CohortDiversityDrivenOptimization import CohortDiversityDrivenOptimization +try: # CohortDiversityDrivenOptimization + from nevergrad.optimization.lama.CohortDiversityDrivenOptimization import ( + CohortDiversityDrivenOptimization, + ) lama_register["CohortDiversityDrivenOptimization"] = CohortDiversityDrivenOptimization - res = NonObjectOptimizer(method="LLAMACohortDiversityDrivenOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACohortDiversityDrivenOptimization = NonObjectOptimizer(method="LLAMACohortDiversityDrivenOptimization").set_name("LLAMACohortDiversityDrivenOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACohortDiversityDrivenOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACohortDiversityDrivenOptimization = NonObjectOptimizer( + method="LLAMACohortDiversityDrivenOptimization" + ).set_name("LLAMACohortDiversityDrivenOptimization", register=True) +except Exception as e: # CohortDiversityDrivenOptimization print("CohortDiversityDrivenOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.CohortEvolutionWithDynamicSelection import CohortEvolutionWithDynamicSelection +try: # CohortEvolutionWithDynamicSelection + from nevergrad.optimization.lama.CohortEvolutionWithDynamicSelection import ( + CohortEvolutionWithDynamicSelection, + ) lama_register["CohortEvolutionWithDynamicSelection"] = CohortEvolutionWithDynamicSelection - res = NonObjectOptimizer(method="LLAMACohortEvolutionWithDynamicSelection")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACohortEvolutionWithDynamicSelection = NonObjectOptimizer(method="LLAMACohortEvolutionWithDynamicSelection").set_name("LLAMACohortEvolutionWithDynamicSelection", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACohortEvolutionWithDynamicSelection")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACohortEvolutionWithDynamicSelection = NonObjectOptimizer( + method="LLAMACohortEvolutionWithDynamicSelection" + ).set_name("LLAMACohortEvolutionWithDynamicSelection", register=True) +except Exception as e: # CohortEvolutionWithDynamicSelection print("CohortEvolutionWithDynamicSelection can not be imported: ", e) -try: +try: # ConcentricConvergenceOptimizer from nevergrad.optimization.lama.ConcentricConvergenceOptimizer import ConcentricConvergenceOptimizer lama_register["ConcentricConvergenceOptimizer"] = ConcentricConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAConcentricConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConcentricConvergenceOptimizer = NonObjectOptimizer(method="LLAMAConcentricConvergenceOptimizer").set_name("LLAMAConcentricConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConcentricConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConcentricConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAConcentricConvergenceOptimizer" + ).set_name("LLAMAConcentricConvergenceOptimizer", register=True) +except Exception as e: # ConcentricConvergenceOptimizer print("ConcentricConvergenceOptimizer can not be imported: ", e) -try: +try: # ConcentricDiversityStrategy from nevergrad.optimization.lama.ConcentricDiversityStrategy import ConcentricDiversityStrategy lama_register["ConcentricDiversityStrategy"] = ConcentricDiversityStrategy - res = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy").set_name("LLAMAConcentricDiversityStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMAConcentricDiversityStrategy").set_name( + "LLAMAConcentricDiversityStrategy", register=True + ) +except Exception as e: # ConcentricDiversityStrategy print("ConcentricDiversityStrategy can not be imported: ", e) -try: +try: # ConcentricGradientDescentEvolver from nevergrad.optimization.lama.ConcentricGradientDescentEvolver import ConcentricGradientDescentEvolver lama_register["ConcentricGradientDescentEvolver"] = ConcentricGradientDescentEvolver - res = NonObjectOptimizer(method="LLAMAConcentricGradientDescentEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConcentricGradientDescentEvolver = NonObjectOptimizer(method="LLAMAConcentricGradientDescentEvolver").set_name("LLAMAConcentricGradientDescentEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConcentricGradientDescentEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConcentricGradientDescentEvolver = NonObjectOptimizer( + method="LLAMAConcentricGradientDescentEvolver" + ).set_name("LLAMAConcentricGradientDescentEvolver", register=True) +except Exception as e: # ConcentricGradientDescentEvolver print("ConcentricGradientDescentEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.ConcentricGradientEnhancedEvolver import ConcentricGradientEnhancedEvolver +try: # ConcentricGradientEnhancedEvolver + from nevergrad.optimization.lama.ConcentricGradientEnhancedEvolver import ( + ConcentricGradientEnhancedEvolver, + ) lama_register["ConcentricGradientEnhancedEvolver"] = ConcentricGradientEnhancedEvolver - res = NonObjectOptimizer(method="LLAMAConcentricGradientEnhancedEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConcentricGradientEnhancedEvolver = NonObjectOptimizer(method="LLAMAConcentricGradientEnhancedEvolver").set_name("LLAMAConcentricGradientEnhancedEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConcentricGradientEnhancedEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConcentricGradientEnhancedEvolver = NonObjectOptimizer( + method="LLAMAConcentricGradientEnhancedEvolver" + ).set_name("LLAMAConcentricGradientEnhancedEvolver", register=True) +except Exception as e: # ConcentricGradientEnhancedEvolver print("ConcentricGradientEnhancedEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.ConcentricQuantumCrossoverStrategyV4 import ConcentricQuantumCrossoverStrategyV4 +try: # ConcentricQuantumCrossoverStrategyV4 + from nevergrad.optimization.lama.ConcentricQuantumCrossoverStrategyV4 import ( + ConcentricQuantumCrossoverStrategyV4, + ) lama_register["ConcentricQuantumCrossoverStrategyV4"] = ConcentricQuantumCrossoverStrategyV4 - res = NonObjectOptimizer(method="LLAMAConcentricQuantumCrossoverStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConcentricQuantumCrossoverStrategyV4 = NonObjectOptimizer(method="LLAMAConcentricQuantumCrossoverStrategyV4").set_name("LLAMAConcentricQuantumCrossoverStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConcentricQuantumCrossoverStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConcentricQuantumCrossoverStrategyV4 = NonObjectOptimizer( + method="LLAMAConcentricQuantumCrossoverStrategyV4" + ).set_name("LLAMAConcentricQuantumCrossoverStrategyV4", register=True) +except Exception as e: # ConcentricQuantumCrossoverStrategyV4 print("ConcentricQuantumCrossoverStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ConvergenceAcceleratedSpiralSearch import ConvergenceAcceleratedSpiralSearch +try: # ConvergenceAcceleratedSpiralSearch + from nevergrad.optimization.lama.ConvergenceAcceleratedSpiralSearch import ( + ConvergenceAcceleratedSpiralSearch, + ) lama_register["ConvergenceAcceleratedSpiralSearch"] = ConvergenceAcceleratedSpiralSearch - res = NonObjectOptimizer(method="LLAMAConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConvergenceAcceleratedSpiralSearch = NonObjectOptimizer(method="LLAMAConvergenceAcceleratedSpiralSearch").set_name("LLAMAConvergenceAcceleratedSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( + method="LLAMAConvergenceAcceleratedSpiralSearch" + ).set_name("LLAMAConvergenceAcceleratedSpiralSearch", register=True) +except Exception as e: # ConvergenceAcceleratedSpiralSearch print("ConvergenceAcceleratedSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ConvergentAdaptiveEvolutionStrategy import ConvergentAdaptiveEvolutionStrategy +try: # ConvergentAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutionStrategy import ( + ConvergentAdaptiveEvolutionStrategy, + ) lama_register["ConvergentAdaptiveEvolutionStrategy"] = ConvergentAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutionStrategy").set_name("LLAMAConvergentAdaptiveEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMAConvergentAdaptiveEvolutionStrategy" + ).set_name("LLAMAConvergentAdaptiveEvolutionStrategy", register=True) +except Exception as e: # ConvergentAdaptiveEvolutionStrategy print("ConvergentAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.ConvergentAdaptiveEvolutiveStrategy import ConvergentAdaptiveEvolutiveStrategy +try: # ConvergentAdaptiveEvolutiveStrategy + from nevergrad.optimization.lama.ConvergentAdaptiveEvolutiveStrategy import ( + ConvergentAdaptiveEvolutiveStrategy, + ) lama_register["ConvergentAdaptiveEvolutiveStrategy"] = ConvergentAdaptiveEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAConvergentAdaptiveEvolutiveStrategy = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutiveStrategy").set_name("LLAMAConvergentAdaptiveEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAConvergentAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAConvergentAdaptiveEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAConvergentAdaptiveEvolutiveStrategy" + ).set_name("LLAMAConvergentAdaptiveEvolutiveStrategy", register=True) +except Exception as e: # ConvergentAdaptiveEvolutiveStrategy print("ConvergentAdaptiveEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeAdaptiveCulturalSearch import CooperativeAdaptiveCulturalSearch +try: # CooperativeAdaptiveCulturalSearch + from nevergrad.optimization.lama.CooperativeAdaptiveCulturalSearch import ( + CooperativeAdaptiveCulturalSearch, + ) lama_register["CooperativeAdaptiveCulturalSearch"] = CooperativeAdaptiveCulturalSearch - res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeAdaptiveCulturalSearch = NonObjectOptimizer(method="LLAMACooperativeAdaptiveCulturalSearch").set_name("LLAMACooperativeAdaptiveCulturalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveCulturalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeAdaptiveCulturalSearch = NonObjectOptimizer( + method="LLAMACooperativeAdaptiveCulturalSearch" + ).set_name("LLAMACooperativeAdaptiveCulturalSearch", register=True) +except Exception as e: # CooperativeAdaptiveCulturalSearch print("CooperativeAdaptiveCulturalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeAdaptiveEvolutionaryOptimizer import CooperativeAdaptiveEvolutionaryOptimizer +try: # CooperativeAdaptiveEvolutionaryOptimizer + from nevergrad.optimization.lama.CooperativeAdaptiveEvolutionaryOptimizer import ( + CooperativeAdaptiveEvolutionaryOptimizer, + ) lama_register["CooperativeAdaptiveEvolutionaryOptimizer"] = CooperativeAdaptiveEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMACooperativeAdaptiveEvolutionaryOptimizer").set_name("LLAMACooperativeAdaptiveEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMACooperativeAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMACooperativeAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: # CooperativeAdaptiveEvolutionaryOptimizer print("CooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeCulturalAdaptiveSearch import CooperativeCulturalAdaptiveSearch +try: # CooperativeCulturalAdaptiveSearch + from nevergrad.optimization.lama.CooperativeCulturalAdaptiveSearch import ( + CooperativeCulturalAdaptiveSearch, + ) lama_register["CooperativeCulturalAdaptiveSearch"] = CooperativeCulturalAdaptiveSearch - res = NonObjectOptimizer(method="LLAMACooperativeCulturalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeCulturalAdaptiveSearch = NonObjectOptimizer(method="LLAMACooperativeCulturalAdaptiveSearch").set_name("LLAMACooperativeCulturalAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeCulturalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeCulturalAdaptiveSearch = NonObjectOptimizer( + method="LLAMACooperativeCulturalAdaptiveSearch" + ).set_name("LLAMACooperativeCulturalAdaptiveSearch", register=True) +except Exception as e: # CooperativeCulturalAdaptiveSearch print("CooperativeCulturalAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeCulturalDifferentialSearch import CooperativeCulturalDifferentialSearch +try: # CooperativeCulturalDifferentialSearch + from nevergrad.optimization.lama.CooperativeCulturalDifferentialSearch import ( + CooperativeCulturalDifferentialSearch, + ) lama_register["CooperativeCulturalDifferentialSearch"] = CooperativeCulturalDifferentialSearch - res = NonObjectOptimizer(method="LLAMACooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeCulturalDifferentialSearch = NonObjectOptimizer(method="LLAMACooperativeCulturalDifferentialSearch").set_name("LLAMACooperativeCulturalDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeCulturalDifferentialSearch = NonObjectOptimizer( + method="LLAMACooperativeCulturalDifferentialSearch" + ).set_name("LLAMACooperativeCulturalDifferentialSearch", register=True) +except Exception as e: # CooperativeCulturalDifferentialSearch print("CooperativeCulturalDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeCulturalEvolutionStrategy import CooperativeCulturalEvolutionStrategy +try: # CooperativeCulturalEvolutionStrategy + from nevergrad.optimization.lama.CooperativeCulturalEvolutionStrategy import ( + CooperativeCulturalEvolutionStrategy, + ) lama_register["CooperativeCulturalEvolutionStrategy"] = CooperativeCulturalEvolutionStrategy - res = NonObjectOptimizer(method="LLAMACooperativeCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeCulturalEvolutionStrategy = NonObjectOptimizer(method="LLAMACooperativeCulturalEvolutionStrategy").set_name("LLAMACooperativeCulturalEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeCulturalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeCulturalEvolutionStrategy = NonObjectOptimizer( + method="LLAMACooperativeCulturalEvolutionStrategy" + ).set_name("LLAMACooperativeCulturalEvolutionStrategy", register=True) +except Exception as e: # CooperativeCulturalEvolutionStrategy print("CooperativeCulturalEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeEvolutionaryGradientSearch import CooperativeEvolutionaryGradientSearch +try: # CooperativeEvolutionaryGradientSearch + from nevergrad.optimization.lama.CooperativeEvolutionaryGradientSearch import ( + CooperativeEvolutionaryGradientSearch, + ) lama_register["CooperativeEvolutionaryGradientSearch"] = CooperativeEvolutionaryGradientSearch - res = NonObjectOptimizer(method="LLAMACooperativeEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMACooperativeEvolutionaryGradientSearch").set_name("LLAMACooperativeEvolutionaryGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMACooperativeEvolutionaryGradientSearch" + ).set_name("LLAMACooperativeEvolutionaryGradientSearch", register=True) +except Exception as e: # CooperativeEvolutionaryGradientSearch print("CooperativeEvolutionaryGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.CooperativeParticleSwarmOptimization import CooperativeParticleSwarmOptimization +try: # CooperativeParticleSwarmOptimization + from nevergrad.optimization.lama.CooperativeParticleSwarmOptimization import ( + CooperativeParticleSwarmOptimization, + ) lama_register["CooperativeParticleSwarmOptimization"] = CooperativeParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMACooperativeParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACooperativeParticleSwarmOptimization = NonObjectOptimizer(method="LLAMACooperativeParticleSwarmOptimization").set_name("LLAMACooperativeParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACooperativeParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACooperativeParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMACooperativeParticleSwarmOptimization" + ).set_name("LLAMACooperativeParticleSwarmOptimization", register=True) +except Exception as e: # CooperativeParticleSwarmOptimization print("CooperativeParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.CoordinatedAdaptiveHybridOptimizer import CoordinatedAdaptiveHybridOptimizer +try: # CoordinatedAdaptiveHybridOptimizer + from nevergrad.optimization.lama.CoordinatedAdaptiveHybridOptimizer import ( + CoordinatedAdaptiveHybridOptimizer, + ) lama_register["CoordinatedAdaptiveHybridOptimizer"] = CoordinatedAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMACoordinatedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACoordinatedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMACoordinatedAdaptiveHybridOptimizer").set_name("LLAMACoordinatedAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACoordinatedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACoordinatedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMACoordinatedAdaptiveHybridOptimizer" + ).set_name("LLAMACoordinatedAdaptiveHybridOptimizer", register=True) +except Exception as e: # CoordinatedAdaptiveHybridOptimizer print("CoordinatedAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.CovarianceMatrixAdaptationDifferentialEvolution import CovarianceMatrixAdaptationDifferentialEvolution - - lama_register["CovarianceMatrixAdaptationDifferentialEvolution"] = CovarianceMatrixAdaptationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMACovarianceMatrixAdaptationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACovarianceMatrixAdaptationDifferentialEvolution = NonObjectOptimizer(method="LLAMACovarianceMatrixAdaptationDifferentialEvolution").set_name("LLAMACovarianceMatrixAdaptationDifferentialEvolution", register=True) -except Exception as e: +try: # CovarianceMatrixAdaptationDifferentialEvolution + from nevergrad.optimization.lama.CovarianceMatrixAdaptationDifferentialEvolution import ( + CovarianceMatrixAdaptationDifferentialEvolution, + ) + + lama_register["CovarianceMatrixAdaptationDifferentialEvolution"] = ( + CovarianceMatrixAdaptationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMACovarianceMatrixAdaptationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACovarianceMatrixAdaptationDifferentialEvolution = NonObjectOptimizer( + method="LLAMACovarianceMatrixAdaptationDifferentialEvolution" + ).set_name("LLAMACovarianceMatrixAdaptationDifferentialEvolution", register=True) +except Exception as e: # CovarianceMatrixAdaptationDifferentialEvolution print("CovarianceMatrixAdaptationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.CulturalAdaptiveDifferentialEvolution import CulturalAdaptiveDifferentialEvolution +try: # CulturalAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.CulturalAdaptiveDifferentialEvolution import ( + CulturalAdaptiveDifferentialEvolution, + ) lama_register["CulturalAdaptiveDifferentialEvolution"] = CulturalAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMACulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACulturalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMACulturalAdaptiveDifferentialEvolution").set_name("LLAMACulturalAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMACulturalAdaptiveDifferentialEvolution" + ).set_name("LLAMACulturalAdaptiveDifferentialEvolution", register=True) +except Exception as e: # CulturalAdaptiveDifferentialEvolution print("CulturalAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.CulturalGuidedDifferentialEvolution import CulturalGuidedDifferentialEvolution +try: # CulturalGuidedDifferentialEvolution + from nevergrad.optimization.lama.CulturalGuidedDifferentialEvolution import ( + CulturalGuidedDifferentialEvolution, + ) lama_register["CulturalGuidedDifferentialEvolution"] = CulturalGuidedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMACulturalGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMACulturalGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMACulturalGuidedDifferentialEvolution").set_name("LLAMACulturalGuidedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMACulturalGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMACulturalGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMACulturalGuidedDifferentialEvolution" + ).set_name("LLAMACulturalGuidedDifferentialEvolution", register=True) +except Exception as e: # CulturalGuidedDifferentialEvolution print("CulturalGuidedDifferentialEvolution can not be imported: ", e) -try: +try: # DADERC from nevergrad.optimization.lama.DADERC import DADERC lama_register["DADERC"] = DADERC - res = NonObjectOptimizer(method="LLAMADADERC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADADERC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADADERC = NonObjectOptimizer(method="LLAMADADERC").set_name("LLAMADADERC", register=True) -except Exception as e: +except Exception as e: # DADERC print("DADERC can not be imported: ", e) -try: +try: # DADESM from nevergrad.optimization.lama.DADESM import DADESM lama_register["DADESM"] = DADESM - res = NonObjectOptimizer(method="LLAMADADESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADADESM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADADESM = NonObjectOptimizer(method="LLAMADADESM").set_name("LLAMADADESM", register=True) -except Exception as e: +except Exception as e: # DADESM print("DADESM can not be imported: ", e) -try: +try: # DADe from nevergrad.optimization.lama.DADe import DADe lama_register["DADe"] = DADe - res = NonObjectOptimizer(method="LLAMADADe")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADADe")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADADe = NonObjectOptimizer(method="LLAMADADe").set_name("LLAMADADe", register=True) -except Exception as e: +except Exception as e: # DADe print("DADe can not be imported: ", e) -try: +try: # DAEA from nevergrad.optimization.lama.DAEA import DAEA lama_register["DAEA"] = DAEA - res = NonObjectOptimizer(method="LLAMADAEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADAEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADAEA = NonObjectOptimizer(method="LLAMADAEA").set_name("LLAMADAEA", register=True) -except Exception as e: +except Exception as e: # DAEA print("DAEA can not be imported: ", e) -try: +try: # DAES from nevergrad.optimization.lama.DAES import DAES lama_register["DAES"] = DAES - res = NonObjectOptimizer(method="LLAMADAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADAES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADAES = NonObjectOptimizer(method="LLAMADAES").set_name("LLAMADAES", register=True) -except Exception as e: +except Exception as e: # DAES print("DAES can not be imported: ", e) -try: +try: # DAESF from nevergrad.optimization.lama.DAESF import DAESF lama_register["DAESF"] = DAESF - res = NonObjectOptimizer(method="LLAMADAESF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADAESF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADAESF = NonObjectOptimizer(method="LLAMADAESF").set_name("LLAMADAESF", register=True) -except Exception as e: +except Exception as e: # DAESF print("DAESF can not be imported: ", e) -try: +try: # DASES from nevergrad.optimization.lama.DASES import DASES lama_register["DASES"] = DASES - res = NonObjectOptimizer(method="LLAMADASES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADASES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADASES = NonObjectOptimizer(method="LLAMADASES").set_name("LLAMADASES", register=True) -except Exception as e: +except Exception as e: # DASES print("DASES can not be imported: ", e) -try: +try: # DASOGG from nevergrad.optimization.lama.DASOGG import DASOGG lama_register["DASOGG"] = DASOGG - res = NonObjectOptimizer(method="LLAMADASOGG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADASOGG")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADASOGG = NonObjectOptimizer(method="LLAMADASOGG").set_name("LLAMADASOGG", register=True) -except Exception as e: +except Exception as e: # DASOGG print("DASOGG can not be imported: ", e) -try: +try: # DDCEA from nevergrad.optimization.lama.DDCEA import DDCEA lama_register["DDCEA"] = DDCEA - res = NonObjectOptimizer(method="LLAMADDCEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADDCEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADDCEA = NonObjectOptimizer(method="LLAMADDCEA").set_name("LLAMADDCEA", register=True) -except Exception as e: +except Exception as e: # DDCEA print("DDCEA can not be imported: ", e) -try: +try: # DDPO from nevergrad.optimization.lama.DDPO import DDPO lama_register["DDPO"] = DDPO - res = NonObjectOptimizer(method="LLAMADDPO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADDPO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADDPO = NonObjectOptimizer(method="LLAMADDPO").set_name("LLAMADDPO", register=True) -except Exception as e: +except Exception as e: # DDPO print("DDPO can not be imported: ", e) -try: +try: # DEAMC from nevergrad.optimization.lama.DEAMC import DEAMC lama_register["DEAMC"] = DEAMC - res = NonObjectOptimizer(method="LLAMADEAMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADEAMC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADEAMC = NonObjectOptimizer(method="LLAMADEAMC").set_name("LLAMADEAMC", register=True) -except Exception as e: +except Exception as e: # DEAMC print("DEAMC can not be imported: ", e) -try: +try: # DEAMC_DSR from nevergrad.optimization.lama.DEAMC_DSR import DEAMC_DSR lama_register["DEAMC_DSR"] = DEAMC_DSR - res = NonObjectOptimizer(method="LLAMADEAMC_DSR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADEAMC_DSR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADEAMC_DSR = NonObjectOptimizer(method="LLAMADEAMC_DSR").set_name("LLAMADEAMC_DSR", register=True) -except Exception as e: +except Exception as e: # DEAMC_DSR print("DEAMC_DSR can not be imported: ", e) -try: +try: # DEAMC_LSI from nevergrad.optimization.lama.DEAMC_LSI import DEAMC_LSI lama_register["DEAMC_LSI"] = DEAMC_LSI - res = NonObjectOptimizer(method="LLAMADEAMC_LSI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADEAMC_LSI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADEAMC_LSI = NonObjectOptimizer(method="LLAMADEAMC_LSI").set_name("LLAMADEAMC_LSI", register=True) -except Exception as e: +except Exception as e: # DEAMC_LSI print("DEAMC_LSI can not be imported: ", e) -try: +try: # DEWithNelderMead from nevergrad.optimization.lama.DEWithNelderMead import DEWithNelderMead lama_register["DEWithNelderMead"] = DEWithNelderMead - res = NonObjectOptimizer(method="LLAMADEWithNelderMead")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADEWithNelderMead = NonObjectOptimizer(method="LLAMADEWithNelderMead").set_name("LLAMADEWithNelderMead", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADEWithNelderMead")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADEWithNelderMead = NonObjectOptimizer(method="LLAMADEWithNelderMead").set_name( + "LLAMADEWithNelderMead", register=True + ) +except Exception as e: # DEWithNelderMead print("DEWithNelderMead can not be imported: ", e) -try: +try: # DHDGE from nevergrad.optimization.lama.DHDGE import DHDGE lama_register["DHDGE"] = DHDGE - res = NonObjectOptimizer(method="LLAMADHDGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADHDGE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADHDGE = NonObjectOptimizer(method="LLAMADHDGE").set_name("LLAMADHDGE", register=True) -except Exception as e: +except Exception as e: # DHDGE print("DHDGE can not be imported: ", e) -try: +try: # DLASS from nevergrad.optimization.lama.DLASS import DLASS lama_register["DLASS"] = DLASS - res = NonObjectOptimizer(method="LLAMADLASS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADLASS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADLASS = NonObjectOptimizer(method="LLAMADLASS").set_name("LLAMADLASS", register=True) -except Exception as e: +except Exception as e: # DLASS print("DLASS can not be imported: ", e) -try: +try: # DMDE from nevergrad.optimization.lama.DMDE import DMDE lama_register["DMDE"] = DMDE - res = NonObjectOptimizer(method="LLAMADMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADMDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADMDE = NonObjectOptimizer(method="LLAMADMDE").set_name("LLAMADMDE", register=True) -except Exception as e: +except Exception as e: # DMDE print("DMDE can not be imported: ", e) -try: +try: # DMDESM from nevergrad.optimization.lama.DMDESM import DMDESM lama_register["DMDESM"] = DMDESM - res = NonObjectOptimizer(method="LLAMADMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADMDESM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADMDESM = NonObjectOptimizer(method="LLAMADMDESM").set_name("LLAMADMDESM", register=True) -except Exception as e: +except Exception as e: # DMDESM print("DMDESM can not be imported: ", e) -try: +try: # DMES from nevergrad.optimization.lama.DMES import DMES lama_register["DMES"] = DMES - res = NonObjectOptimizer(method="LLAMADMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADMES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADMES = NonObjectOptimizer(method="LLAMADMES").set_name("LLAMADMES", register=True) -except Exception as e: +except Exception as e: # DMES print("DMES can not be imported: ", e) -try: +try: # DNAS from nevergrad.optimization.lama.DNAS import DNAS lama_register["DNAS"] = DNAS - res = NonObjectOptimizer(method="LLAMADNAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADNAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADNAS = NonObjectOptimizer(method="LLAMADNAS").set_name("LLAMADNAS", register=True) -except Exception as e: +except Exception as e: # DNAS print("DNAS can not be imported: ", e) -try: +try: # DPADE from nevergrad.optimization.lama.DPADE import DPADE lama_register["DPADE"] = DPADE - res = NonObjectOptimizer(method="LLAMADPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADPADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADPADE = NonObjectOptimizer(method="LLAMADPADE").set_name("LLAMADPADE", register=True) -except Exception as e: +except Exception as e: # DPADE print("DPADE can not be imported: ", e) -try: +try: # DPES from nevergrad.optimization.lama.DPES import DPES lama_register["DPES"] = DPES - res = NonObjectOptimizer(method="LLAMADPES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADPES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADPES = NonObjectOptimizer(method="LLAMADPES").set_name("LLAMADPES", register=True) -except Exception as e: +except Exception as e: # DPES print("DPES can not be imported: ", e) -try: +try: # DSDE from nevergrad.optimization.lama.DSDE import DSDE lama_register["DSDE"] = DSDE - res = NonObjectOptimizer(method="LLAMADSDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADSDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADSDE = NonObjectOptimizer(method="LLAMADSDE").set_name("LLAMADSDE", register=True) -except Exception as e: +except Exception as e: # DSDE print("DSDE can not be imported: ", e) -try: +try: # DSEDES from nevergrad.optimization.lama.DSEDES import DSEDES lama_register["DSEDES"] = DSEDES - res = NonObjectOptimizer(method="LLAMADSEDES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMADSEDES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMADSEDES = NonObjectOptimizer(method="LLAMADSEDES").set_name("LLAMADSEDES", register=True) -except Exception as e: +except Exception as e: # DSEDES print("DSEDES can not be imported: ", e) -try: - from nevergrad.optimization.lama.DifferentialEvolutionAdaptiveCrossover import DifferentialEvolutionAdaptiveCrossover +try: # DifferentialEvolutionAdaptiveCrossover + from nevergrad.optimization.lama.DifferentialEvolutionAdaptiveCrossover import ( + DifferentialEvolutionAdaptiveCrossover, + ) lama_register["DifferentialEvolutionAdaptiveCrossover"] = DifferentialEvolutionAdaptiveCrossover - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionAdaptiveCrossover = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptiveCrossover").set_name("LLAMADifferentialEvolutionAdaptiveCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionAdaptiveCrossover = NonObjectOptimizer( + method="LLAMADifferentialEvolutionAdaptiveCrossover" + ).set_name("LLAMADifferentialEvolutionAdaptiveCrossover", register=True) +except Exception as e: # DifferentialEvolutionAdaptiveCrossover print("DifferentialEvolutionAdaptiveCrossover can not be imported: ", e) -try: +try: # DifferentialEvolutionAdaptivePSO from nevergrad.optimization.lama.DifferentialEvolutionAdaptivePSO import DifferentialEvolutionAdaptivePSO lama_register["DifferentialEvolutionAdaptivePSO"] = DifferentialEvolutionAdaptivePSO - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionAdaptivePSO = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptivePSO").set_name("LLAMADifferentialEvolutionAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionAdaptivePSO = NonObjectOptimizer( + method="LLAMADifferentialEvolutionAdaptivePSO" + ).set_name("LLAMADifferentialEvolutionAdaptivePSO", register=True) +except Exception as e: # DifferentialEvolutionAdaptivePSO print("DifferentialEvolutionAdaptivePSO can not be imported: ", e) -try: +try: # DifferentialEvolutionHybrid from nevergrad.optimization.lama.DifferentialEvolutionHybrid import DifferentialEvolutionHybrid lama_register["DifferentialEvolutionHybrid"] = DifferentialEvolutionHybrid - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid").set_name("LLAMADifferentialEvolutionHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionHybrid").set_name( + "LLAMADifferentialEvolutionHybrid", register=True + ) +except Exception as e: # DifferentialEvolutionHybrid print("DifferentialEvolutionHybrid can not be imported: ", e) -try: +try: # DifferentialEvolutionOptimizer from nevergrad.optimization.lama.DifferentialEvolutionOptimizer import DifferentialEvolutionOptimizer lama_register["DifferentialEvolutionOptimizer"] = DifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMADifferentialEvolutionOptimizer").set_name("LLAMADifferentialEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMADifferentialEvolutionOptimizer" + ).set_name("LLAMADifferentialEvolutionOptimizer", register=True) +except Exception as e: # DifferentialEvolutionOptimizer print("DifferentialEvolutionOptimizer can not be imported: ", e) -try: +try: # DifferentialEvolutionPSOHybrid from nevergrad.optimization.lama.DifferentialEvolutionPSOHybrid import DifferentialEvolutionPSOHybrid lama_register["DifferentialEvolutionPSOHybrid"] = DifferentialEvolutionPSOHybrid - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionPSOHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionPSOHybrid = NonObjectOptimizer(method="LLAMADifferentialEvolutionPSOHybrid").set_name("LLAMADifferentialEvolutionPSOHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionPSOHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionPSOHybrid = NonObjectOptimizer( + method="LLAMADifferentialEvolutionPSOHybrid" + ).set_name("LLAMADifferentialEvolutionPSOHybrid", register=True) +except Exception as e: # DifferentialEvolutionPSOHybrid print("DifferentialEvolutionPSOHybrid can not be imported: ", e) -try: +try: # DifferentialEvolutionSearch from nevergrad.optimization.lama.DifferentialEvolutionSearch import DifferentialEvolutionSearch lama_register["DifferentialEvolutionSearch"] = DifferentialEvolutionSearch - res = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialEvolutionSearch = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch").set_name("LLAMADifferentialEvolutionSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialEvolutionSearch = NonObjectOptimizer(method="LLAMADifferentialEvolutionSearch").set_name( + "LLAMADifferentialEvolutionSearch", register=True + ) +except Exception as e: # DifferentialEvolutionSearch print("DifferentialEvolutionSearch can not be imported: ", e) -try: +try: # DifferentialFireworkAlgorithm from nevergrad.optimization.lama.DifferentialFireworkAlgorithm import DifferentialFireworkAlgorithm lama_register["DifferentialFireworkAlgorithm"] = DifferentialFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMADifferentialFireworkAlgorithm").set_name("LLAMADifferentialFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADifferentialFireworkAlgorithm" + ).set_name("LLAMADifferentialFireworkAlgorithm", register=True) +except Exception as e: # DifferentialFireworkAlgorithm print("DifferentialFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.DifferentialGradientEvolutionStrategy import DifferentialGradientEvolutionStrategy +try: # DifferentialGradientEvolutionStrategy + from nevergrad.optimization.lama.DifferentialGradientEvolutionStrategy import ( + DifferentialGradientEvolutionStrategy, + ) lama_register["DifferentialGradientEvolutionStrategy"] = DifferentialGradientEvolutionStrategy - res = NonObjectOptimizer(method="LLAMADifferentialGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialGradientEvolutionStrategy = NonObjectOptimizer(method="LLAMADifferentialGradientEvolutionStrategy").set_name("LLAMADifferentialGradientEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialGradientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialGradientEvolutionStrategy = NonObjectOptimizer( + method="LLAMADifferentialGradientEvolutionStrategy" + ).set_name("LLAMADifferentialGradientEvolutionStrategy", register=True) +except Exception as e: # DifferentialGradientEvolutionStrategy print("DifferentialGradientEvolutionStrategy can not be imported: ", e) -try: +try: # DifferentialHarmonySearch from nevergrad.optimization.lama.DifferentialHarmonySearch import DifferentialHarmonySearch lama_register["DifferentialHarmonySearch"] = DifferentialHarmonySearch - res = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialHarmonySearch = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch").set_name("LLAMADifferentialHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialHarmonySearch = NonObjectOptimizer(method="LLAMADifferentialHarmonySearch").set_name( + "LLAMADifferentialHarmonySearch", register=True + ) +except Exception as e: # DifferentialHarmonySearch print("DifferentialHarmonySearch can not be imported: ", e) -try: +try: # DifferentialMemeticAlgorithm from nevergrad.optimization.lama.DifferentialMemeticAlgorithm import DifferentialMemeticAlgorithm lama_register["DifferentialMemeticAlgorithm"] = DifferentialMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMADifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMADifferentialMemeticAlgorithm").set_name("LLAMADifferentialMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADifferentialMemeticAlgorithm" + ).set_name("LLAMADifferentialMemeticAlgorithm", register=True) +except Exception as e: # DifferentialMemeticAlgorithm print("DifferentialMemeticAlgorithm can not be imported: ", e) -try: +try: # DifferentialQuantumMetaheuristic from nevergrad.optimization.lama.DifferentialQuantumMetaheuristic import DifferentialQuantumMetaheuristic lama_register["DifferentialQuantumMetaheuristic"] = DifferentialQuantumMetaheuristic - res = NonObjectOptimizer(method="LLAMADifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialQuantumMetaheuristic = NonObjectOptimizer(method="LLAMADifferentialQuantumMetaheuristic").set_name("LLAMADifferentialQuantumMetaheuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialQuantumMetaheuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialQuantumMetaheuristic = NonObjectOptimizer( + method="LLAMADifferentialQuantumMetaheuristic" + ).set_name("LLAMADifferentialQuantumMetaheuristic", register=True) +except Exception as e: # DifferentialQuantumMetaheuristic print("DifferentialQuantumMetaheuristic can not be imported: ", e) -try: - from nevergrad.optimization.lama.DifferentialSimulatedAnnealingOptimizer import DifferentialSimulatedAnnealingOptimizer +try: # DifferentialSimulatedAnnealingOptimizer + from nevergrad.optimization.lama.DifferentialSimulatedAnnealingOptimizer import ( + DifferentialSimulatedAnnealingOptimizer, + ) lama_register["DifferentialSimulatedAnnealingOptimizer"] = DifferentialSimulatedAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMADifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMADifferentialSimulatedAnnealingOptimizer").set_name("LLAMADifferentialSimulatedAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( + method="LLAMADifferentialSimulatedAnnealingOptimizer" + ).set_name("LLAMADifferentialSimulatedAnnealingOptimizer", register=True) +except Exception as e: # DifferentialSimulatedAnnealingOptimizer print("DifferentialSimulatedAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolution import DiversityEnhancedAdaptiveGradientEvolution +try: # DiversityEnhancedAdaptiveGradientEvolution + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolution import ( + DiversityEnhancedAdaptiveGradientEvolution, + ) lama_register["DiversityEnhancedAdaptiveGradientEvolution"] = DiversityEnhancedAdaptiveGradientEvolution - res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADiversityEnhancedAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolution").set_name("LLAMADiversityEnhancedAdaptiveGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADiversityEnhancedAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADiversityEnhancedAdaptiveGradientEvolution" + ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolution", register=True) +except Exception as e: # DiversityEnhancedAdaptiveGradientEvolution print("DiversityEnhancedAdaptiveGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolutionV2 import DiversityEnhancedAdaptiveGradientEvolutionV2 - - lama_register["DiversityEnhancedAdaptiveGradientEvolutionV2"] = DiversityEnhancedAdaptiveGradientEvolutionV2 - res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADiversityEnhancedAdaptiveGradientEvolutionV2 = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2").set_name("LLAMADiversityEnhancedAdaptiveGradientEvolutionV2", register=True) -except Exception as e: +try: # DiversityEnhancedAdaptiveGradientEvolutionV2 + from nevergrad.optimization.lama.DiversityEnhancedAdaptiveGradientEvolutionV2 import ( + DiversityEnhancedAdaptiveGradientEvolutionV2, + ) + + lama_register["DiversityEnhancedAdaptiveGradientEvolutionV2"] = ( + DiversityEnhancedAdaptiveGradientEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADiversityEnhancedAdaptiveGradientEvolutionV2 = NonObjectOptimizer( + method="LLAMADiversityEnhancedAdaptiveGradientEvolutionV2" + ).set_name("LLAMADiversityEnhancedAdaptiveGradientEvolutionV2", register=True) +except Exception as e: # DiversityEnhancedAdaptiveGradientEvolutionV2 print("DiversityEnhancedAdaptiveGradientEvolutionV2 can not be imported: ", e) -try: +try: # DolphinPodOptimization from nevergrad.optimization.lama.DolphinPodOptimization import DolphinPodOptimization lama_register["DolphinPodOptimization"] = DolphinPodOptimization - res = NonObjectOptimizer(method="LLAMADolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADolphinPodOptimization = NonObjectOptimizer(method="LLAMADolphinPodOptimization").set_name("LLAMADolphinPodOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADolphinPodOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADolphinPodOptimization = NonObjectOptimizer(method="LLAMADolphinPodOptimization").set_name( + "LLAMADolphinPodOptimization", register=True + ) +except Exception as e: # DolphinPodOptimization print("DolphinPodOptimization can not be imported: ", e) -try: +try: # DualAdaptiveRestartDE from nevergrad.optimization.lama.DualAdaptiveRestartDE import DualAdaptiveRestartDE lama_register["DualAdaptiveRestartDE"] = DualAdaptiveRestartDE - res = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualAdaptiveRestartDE = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE").set_name("LLAMADualAdaptiveRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualAdaptiveRestartDE = NonObjectOptimizer(method="LLAMADualAdaptiveRestartDE").set_name( + "LLAMADualAdaptiveRestartDE", register=True + ) +except Exception as e: # DualAdaptiveRestartDE print("DualAdaptiveRestartDE can not be imported: ", e) -try: +try: # DualAdaptiveSearch from nevergrad.optimization.lama.DualAdaptiveSearch import DualAdaptiveSearch lama_register["DualAdaptiveSearch"] = DualAdaptiveSearch - res = NonObjectOptimizer(method="LLAMADualAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualAdaptiveSearch = NonObjectOptimizer(method="LLAMADualAdaptiveSearch").set_name("LLAMADualAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualAdaptiveSearch = NonObjectOptimizer(method="LLAMADualAdaptiveSearch").set_name( + "LLAMADualAdaptiveSearch", register=True + ) +except Exception as e: # DualAdaptiveSearch print("DualAdaptiveSearch can not be imported: ", e) -try: +try: # DualConvergenceEvolutiveStrategy from nevergrad.optimization.lama.DualConvergenceEvolutiveStrategy import DualConvergenceEvolutiveStrategy lama_register["DualConvergenceEvolutiveStrategy"] = DualConvergenceEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMADualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualConvergenceEvolutiveStrategy = NonObjectOptimizer(method="LLAMADualConvergenceEvolutiveStrategy").set_name("LLAMADualConvergenceEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualConvergenceEvolutiveStrategy = NonObjectOptimizer( + method="LLAMADualConvergenceEvolutiveStrategy" + ).set_name("LLAMADualConvergenceEvolutiveStrategy", register=True) +except Exception as e: # DualConvergenceEvolutiveStrategy print("DualConvergenceEvolutiveStrategy can not be imported: ", e) -try: +try: # DualModeOptimization from nevergrad.optimization.lama.DualModeOptimization import DualModeOptimization lama_register["DualModeOptimization"] = DualModeOptimization - res = NonObjectOptimizer(method="LLAMADualModeOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualModeOptimization = NonObjectOptimizer(method="LLAMADualModeOptimization").set_name("LLAMADualModeOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualModeOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualModeOptimization = NonObjectOptimizer(method="LLAMADualModeOptimization").set_name( + "LLAMADualModeOptimization", register=True + ) +except Exception as e: # DualModeOptimization print("DualModeOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseAdaptiveGradientEvolution import DualPhaseAdaptiveGradientEvolution +try: # DualPhaseAdaptiveGradientEvolution + from nevergrad.optimization.lama.DualPhaseAdaptiveGradientEvolution import ( + DualPhaseAdaptiveGradientEvolution, + ) lama_register["DualPhaseAdaptiveGradientEvolution"] = DualPhaseAdaptiveGradientEvolution - res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveGradientEvolution").set_name("LLAMADualPhaseAdaptiveGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveGradientEvolution" + ).set_name("LLAMADualPhaseAdaptiveGradientEvolution", register=True) +except Exception as e: # DualPhaseAdaptiveGradientEvolution print("DualPhaseAdaptiveGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseAdaptiveHybridOptimizerV3 import DualPhaseAdaptiveHybridOptimizerV3 +try: # DualPhaseAdaptiveHybridOptimizerV3 + from nevergrad.optimization.lama.DualPhaseAdaptiveHybridOptimizerV3 import ( + DualPhaseAdaptiveHybridOptimizerV3, + ) lama_register["DualPhaseAdaptiveHybridOptimizerV3"] = DualPhaseAdaptiveHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveHybridOptimizerV3").set_name("LLAMADualPhaseAdaptiveHybridOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveHybridOptimizerV3" + ).set_name("LLAMADualPhaseAdaptiveHybridOptimizerV3", register=True) +except Exception as e: # DualPhaseAdaptiveHybridOptimizerV3 print("DualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolution import DualPhaseAdaptiveMemeticDifferentialEvolution - - lama_register["DualPhaseAdaptiveMemeticDifferentialEvolution"] = DualPhaseAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution").set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # DualPhaseAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolution import ( + DualPhaseAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolution"] = ( + DualPhaseAdaptiveMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # DualPhaseAdaptiveMemeticDifferentialEvolution print("DualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolutionV2 import DualPhaseAdaptiveMemeticDifferentialEvolutionV2 - - lama_register["DualPhaseAdaptiveMemeticDifferentialEvolutionV2"] = DualPhaseAdaptiveMemeticDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2").set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2", register=True) -except Exception as e: +try: # DualPhaseAdaptiveMemeticDifferentialEvolutionV2 + from nevergrad.optimization.lama.DualPhaseAdaptiveMemeticDifferentialEvolutionV2 import ( + DualPhaseAdaptiveMemeticDifferentialEvolutionV2, + ) + + lama_register["DualPhaseAdaptiveMemeticDifferentialEvolutionV2"] = ( + DualPhaseAdaptiveMemeticDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2" + ).set_name("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2", register=True) +except Exception as e: # DualPhaseAdaptiveMemeticDifferentialEvolutionV2 print("DualPhaseAdaptiveMemeticDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced import DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced - - lama_register["DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced"] = DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced - res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced").set_name("LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced", register=True) -except Exception as e: +try: # DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced + from nevergrad.optimization.lama.DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced import ( + DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced, + ) + + lama_register["DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced"] = ( + DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced + ) + # res = NonObjectOptimizer(method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced = NonObjectOptimizer( + method="LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced" + ).set_name("LLAMADualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced", register=True) +except Exception as e: # DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced print("DualPhaseAdaptiveParticleSwarmDifferentialEvolutionV3_Enhanced can not be imported: ", e) -try: +try: # DualPhaseDifferentialEvolution from nevergrad.optimization.lama.DualPhaseDifferentialEvolution import DualPhaseDifferentialEvolution lama_register["DualPhaseDifferentialEvolution"] = DualPhaseDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMADualPhaseDifferentialEvolution").set_name("LLAMADualPhaseDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualPhaseDifferentialEvolution" + ).set_name("LLAMADualPhaseDifferentialEvolution", register=True) +except Exception as e: # DualPhaseDifferentialEvolution print("DualPhaseDifferentialEvolution can not be imported: ", e) -try: +try: # DualPhaseOptimizationStrategy from nevergrad.optimization.lama.DualPhaseOptimizationStrategy import DualPhaseOptimizationStrategy lama_register["DualPhaseOptimizationStrategy"] = DualPhaseOptimizationStrategy - res = NonObjectOptimizer(method="LLAMADualPhaseOptimizationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseOptimizationStrategy = NonObjectOptimizer(method="LLAMADualPhaseOptimizationStrategy").set_name("LLAMADualPhaseOptimizationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseOptimizationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseOptimizationStrategy = NonObjectOptimizer( + method="LLAMADualPhaseOptimizationStrategy" + ).set_name("LLAMADualPhaseOptimizationStrategy", register=True) +except Exception as e: # DualPhaseOptimizationStrategy print("DualPhaseOptimizationStrategy can not be imported: ", e) -try: +try: # DualPhaseQuantumMemeticSearch from nevergrad.optimization.lama.DualPhaseQuantumMemeticSearch import DualPhaseQuantumMemeticSearch lama_register["DualPhaseQuantumMemeticSearch"] = DualPhaseQuantumMemeticSearch - res = NonObjectOptimizer(method="LLAMADualPhaseQuantumMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseQuantumMemeticSearch = NonObjectOptimizer(method="LLAMADualPhaseQuantumMemeticSearch").set_name("LLAMADualPhaseQuantumMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseQuantumMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseQuantumMemeticSearch = NonObjectOptimizer( + method="LLAMADualPhaseQuantumMemeticSearch" + ).set_name("LLAMADualPhaseQuantumMemeticSearch", register=True) +except Exception as e: # DualPhaseQuantumMemeticSearch print("DualPhaseQuantumMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPhaseRefinedQuantumLocalSearchOptimizer import DualPhaseRefinedQuantumLocalSearchOptimizer +try: # DualPhaseRefinedQuantumLocalSearchOptimizer + from nevergrad.optimization.lama.DualPhaseRefinedQuantumLocalSearchOptimizer import ( + DualPhaseRefinedQuantumLocalSearchOptimizer, + ) lama_register["DualPhaseRefinedQuantumLocalSearchOptimizer"] = DualPhaseRefinedQuantumLocalSearchOptimizer - res = NonObjectOptimizer(method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPhaseRefinedQuantumLocalSearchOptimizer = NonObjectOptimizer(method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer").set_name("LLAMADualPhaseRefinedQuantumLocalSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPhaseRefinedQuantumLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMADualPhaseRefinedQuantumLocalSearchOptimizer" + ).set_name("LLAMADualPhaseRefinedQuantumLocalSearchOptimizer", register=True) +except Exception as e: # DualPhaseRefinedQuantumLocalSearchOptimizer print("DualPhaseRefinedQuantumLocalSearchOptimizer can not be imported: ", e) -try: +try: # DualPopulationADE from nevergrad.optimization.lama.DualPopulationADE import DualPopulationADE lama_register["DualPopulationADE"] = DualPopulationADE - res = NonObjectOptimizer(method="LLAMADualPopulationADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPopulationADE = NonObjectOptimizer(method="LLAMADualPopulationADE").set_name("LLAMADualPopulationADE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPopulationADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPopulationADE = NonObjectOptimizer(method="LLAMADualPopulationADE").set_name( + "LLAMADualPopulationADE", register=True + ) +except Exception as e: # DualPopulationADE print("DualPopulationADE can not be imported: ", e) -try: +try: # DualPopulationAdaptiveSearch from nevergrad.optimization.lama.DualPopulationAdaptiveSearch import DualPopulationAdaptiveSearch lama_register["DualPopulationAdaptiveSearch"] = DualPopulationAdaptiveSearch - res = NonObjectOptimizer(method="LLAMADualPopulationAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPopulationAdaptiveSearch = NonObjectOptimizer(method="LLAMADualPopulationAdaptiveSearch").set_name("LLAMADualPopulationAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPopulationAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPopulationAdaptiveSearch = NonObjectOptimizer( + method="LLAMADualPopulationAdaptiveSearch" + ).set_name("LLAMADualPopulationAdaptiveSearch", register=True) +except Exception as e: # DualPopulationAdaptiveSearch print("DualPopulationAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualPopulationCovarianceMatrixGradientSearch import DualPopulationCovarianceMatrixGradientSearch - - lama_register["DualPopulationCovarianceMatrixGradientSearch"] = DualPopulationCovarianceMatrixGradientSearch - res = NonObjectOptimizer(method="LLAMADualPopulationCovarianceMatrixGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPopulationCovarianceMatrixGradientSearch = NonObjectOptimizer(method="LLAMADualPopulationCovarianceMatrixGradientSearch").set_name("LLAMADualPopulationCovarianceMatrixGradientSearch", register=True) -except Exception as e: +try: # DualPopulationCovarianceMatrixGradientSearch + from nevergrad.optimization.lama.DualPopulationCovarianceMatrixGradientSearch import ( + DualPopulationCovarianceMatrixGradientSearch, + ) + + lama_register["DualPopulationCovarianceMatrixGradientSearch"] = ( + DualPopulationCovarianceMatrixGradientSearch + ) + # res = NonObjectOptimizer(method="LLAMADualPopulationCovarianceMatrixGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPopulationCovarianceMatrixGradientSearch = NonObjectOptimizer( + method="LLAMADualPopulationCovarianceMatrixGradientSearch" + ).set_name("LLAMADualPopulationCovarianceMatrixGradientSearch", register=True) +except Exception as e: # DualPopulationCovarianceMatrixGradientSearch print("DualPopulationCovarianceMatrixGradientSearch can not be imported: ", e) -try: +try: # DualPopulationEnhancedSearch from nevergrad.optimization.lama.DualPopulationEnhancedSearch import DualPopulationEnhancedSearch lama_register["DualPopulationEnhancedSearch"] = DualPopulationEnhancedSearch - res = NonObjectOptimizer(method="LLAMADualPopulationEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualPopulationEnhancedSearch = NonObjectOptimizer(method="LLAMADualPopulationEnhancedSearch").set_name("LLAMADualPopulationEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualPopulationEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualPopulationEnhancedSearch = NonObjectOptimizer( + method="LLAMADualPopulationEnhancedSearch" + ).set_name("LLAMADualPopulationEnhancedSearch", register=True) +except Exception as e: # DualPopulationEnhancedSearch print("DualPopulationEnhancedSearch can not be imported: ", e) -try: +try: # DualStrategyAdaptiveDE from nevergrad.optimization.lama.DualStrategyAdaptiveDE import DualStrategyAdaptiveDE lama_register["DualStrategyAdaptiveDE"] = DualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE").set_name("LLAMADualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADualStrategyAdaptiveDE").set_name( + "LLAMADualStrategyAdaptiveDE", register=True + ) +except Exception as e: # DualStrategyAdaptiveDE print("DualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualStrategyDifferentialEvolution import DualStrategyDifferentialEvolution +try: # DualStrategyDifferentialEvolution + from nevergrad.optimization.lama.DualStrategyDifferentialEvolution import ( + DualStrategyDifferentialEvolution, + ) lama_register["DualStrategyDifferentialEvolution"] = DualStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADualStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMADualStrategyDifferentialEvolution").set_name("LLAMADualStrategyDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMADualStrategyDifferentialEvolution" + ).set_name("LLAMADualStrategyDifferentialEvolution", register=True) +except Exception as e: # DualStrategyDifferentialEvolution print("DualStrategyDifferentialEvolution can not be imported: ", e) -try: +try: # DualStrategyOptimizer from nevergrad.optimization.lama.DualStrategyOptimizer import DualStrategyOptimizer lama_register["DualStrategyOptimizer"] = DualStrategyOptimizer - res = NonObjectOptimizer(method="LLAMADualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualStrategyOptimizer = NonObjectOptimizer(method="LLAMADualStrategyOptimizer").set_name("LLAMADualStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualStrategyOptimizer = NonObjectOptimizer(method="LLAMADualStrategyOptimizer").set_name( + "LLAMADualStrategyOptimizer", register=True + ) +except Exception as e: # DualStrategyOptimizer print("DualStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DualStrategyQuantumEvolutionOptimizer import DualStrategyQuantumEvolutionOptimizer +try: # DualStrategyQuantumEvolutionOptimizer + from nevergrad.optimization.lama.DualStrategyQuantumEvolutionOptimizer import ( + DualStrategyQuantumEvolutionOptimizer, + ) lama_register["DualStrategyQuantumEvolutionOptimizer"] = DualStrategyQuantumEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMADualStrategyQuantumEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADualStrategyQuantumEvolutionOptimizer = NonObjectOptimizer(method="LLAMADualStrategyQuantumEvolutionOptimizer").set_name("LLAMADualStrategyQuantumEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADualStrategyQuantumEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADualStrategyQuantumEvolutionOptimizer = NonObjectOptimizer( + method="LLAMADualStrategyQuantumEvolutionOptimizer" + ).set_name("LLAMADualStrategyQuantumEvolutionOptimizer", register=True) +except Exception as e: # DualStrategyQuantumEvolutionOptimizer print("DualStrategyQuantumEvolutionOptimizer can not be imported: ", e) -try: +try: # DynamicAdaptiveClimbingStrategy from nevergrad.optimization.lama.DynamicAdaptiveClimbingStrategy import DynamicAdaptiveClimbingStrategy lama_register["DynamicAdaptiveClimbingStrategy"] = DynamicAdaptiveClimbingStrategy - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveClimbingStrategy = NonObjectOptimizer(method="LLAMADynamicAdaptiveClimbingStrategy").set_name("LLAMADynamicAdaptiveClimbingStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveClimbingStrategy = NonObjectOptimizer( + method="LLAMADynamicAdaptiveClimbingStrategy" + ).set_name("LLAMADynamicAdaptiveClimbingStrategy", register=True) +except Exception as e: # DynamicAdaptiveClimbingStrategy print("DynamicAdaptiveClimbingStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveCohortOptimization import DynamicAdaptiveCohortOptimization +try: # DynamicAdaptiveCohortOptimization + from nevergrad.optimization.lama.DynamicAdaptiveCohortOptimization import ( + DynamicAdaptiveCohortOptimization, + ) lama_register["DynamicAdaptiveCohortOptimization"] = DynamicAdaptiveCohortOptimization - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveCohortOptimization").set_name("LLAMADynamicAdaptiveCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveCohortOptimization" + ).set_name("LLAMADynamicAdaptiveCohortOptimization", register=True) +except Exception as e: # DynamicAdaptiveCohortOptimization print("DynamicAdaptiveCohortOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveEliteHybridOptimizer import DynamicAdaptiveEliteHybridOptimizer +try: # DynamicAdaptiveEliteHybridOptimizer + from nevergrad.optimization.lama.DynamicAdaptiveEliteHybridOptimizer import ( + DynamicAdaptiveEliteHybridOptimizer, + ) lama_register["DynamicAdaptiveEliteHybridOptimizer"] = DynamicAdaptiveEliteHybridOptimizer - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveEliteHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveEliteHybridOptimizer").set_name("LLAMADynamicAdaptiveEliteHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveEliteHybridOptimizer" + ).set_name("LLAMADynamicAdaptiveEliteHybridOptimizer", register=True) +except Exception as e: # DynamicAdaptiveEliteHybridOptimizer print("DynamicAdaptiveEliteHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveEnhancedDifferentialEvolution import DynamicAdaptiveEnhancedDifferentialEvolution - - lama_register["DynamicAdaptiveEnhancedDifferentialEvolution"] = DynamicAdaptiveEnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution").set_name("LLAMADynamicAdaptiveEnhancedDifferentialEvolution", register=True) -except Exception as e: +try: # DynamicAdaptiveEnhancedDifferentialEvolution + from nevergrad.optimization.lama.DynamicAdaptiveEnhancedDifferentialEvolution import ( + DynamicAdaptiveEnhancedDifferentialEvolution, + ) + + lama_register["DynamicAdaptiveEnhancedDifferentialEvolution"] = ( + DynamicAdaptiveEnhancedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveEnhancedDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveEnhancedDifferentialEvolution", register=True) +except Exception as e: # DynamicAdaptiveEnhancedDifferentialEvolution print("DynamicAdaptiveEnhancedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimization import DynamicAdaptiveExplorationOptimization +try: # DynamicAdaptiveExplorationOptimization + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimization import ( + DynamicAdaptiveExplorationOptimization, + ) lama_register["DynamicAdaptiveExplorationOptimization"] = DynamicAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimization").set_name("LLAMADynamicAdaptiveExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveExplorationOptimization" + ).set_name("LLAMADynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: # DynamicAdaptiveExplorationOptimization print("DynamicAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimizer import DynamicAdaptiveExplorationOptimizer +try: # DynamicAdaptiveExplorationOptimizer + from nevergrad.optimization.lama.DynamicAdaptiveExplorationOptimizer import ( + DynamicAdaptiveExplorationOptimizer, + ) lama_register["DynamicAdaptiveExplorationOptimizer"] = DynamicAdaptiveExplorationOptimizer - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimizer").set_name("LLAMADynamicAdaptiveExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveExplorationOptimizer" + ).set_name("LLAMADynamicAdaptiveExplorationOptimizer", register=True) +except Exception as e: # DynamicAdaptiveExplorationOptimizer print("DynamicAdaptiveExplorationOptimizer can not be imported: ", e) -try: +try: # DynamicAdaptiveFireworkAlgorithm from nevergrad.optimization.lama.DynamicAdaptiveFireworkAlgorithm import DynamicAdaptiveFireworkAlgorithm lama_register["DynamicAdaptiveFireworkAlgorithm"] = DynamicAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicAdaptiveFireworkAlgorithm").set_name("LLAMADynamicAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicAdaptiveFireworkAlgorithm" + ).set_name("LLAMADynamicAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # DynamicAdaptiveFireworkAlgorithm print("DynamicAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveGradientDifferentialEvolution import DynamicAdaptiveGradientDifferentialEvolution - - lama_register["DynamicAdaptiveGradientDifferentialEvolution"] = DynamicAdaptiveGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveGradientDifferentialEvolution").set_name("LLAMADynamicAdaptiveGradientDifferentialEvolution", register=True) -except Exception as e: +try: # DynamicAdaptiveGradientDifferentialEvolution + from nevergrad.optimization.lama.DynamicAdaptiveGradientDifferentialEvolution import ( + DynamicAdaptiveGradientDifferentialEvolution, + ) + + lama_register["DynamicAdaptiveGradientDifferentialEvolution"] = ( + DynamicAdaptiveGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: # DynamicAdaptiveGradientDifferentialEvolution print("DynamicAdaptiveGradientDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligence import DynamicAdaptiveGravitationalSwarmIntelligence - - lama_register["DynamicAdaptiveGravitationalSwarmIntelligence"] = DynamicAdaptiveGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligence", register=True) -except Exception as e: +try: # DynamicAdaptiveGravitationalSwarmIntelligence + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligence import ( + DynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["DynamicAdaptiveGravitationalSwarmIntelligence"] = ( + DynamicAdaptiveGravitationalSwarmIntelligence + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: # DynamicAdaptiveGravitationalSwarmIntelligence print("DynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligenceV2 import DynamicAdaptiveGravitationalSwarmIntelligenceV2 - - lama_register["DynamicAdaptiveGravitationalSwarmIntelligenceV2"] = DynamicAdaptiveGravitationalSwarmIntelligenceV2 - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) -except Exception as e: +try: # DynamicAdaptiveGravitationalSwarmIntelligenceV2 + from nevergrad.optimization.lama.DynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( + DynamicAdaptiveGravitationalSwarmIntelligenceV2, + ) + + lama_register["DynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( + DynamicAdaptiveGravitationalSwarmIntelligenceV2 + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: # DynamicAdaptiveGravitationalSwarmIntelligenceV2 print("DynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) -try: +try: # DynamicAdaptiveHybridAlgorithm from nevergrad.optimization.lama.DynamicAdaptiveHybridAlgorithm import DynamicAdaptiveHybridAlgorithm lama_register["DynamicAdaptiveHybridAlgorithm"] = DynamicAdaptiveHybridAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveHybridAlgorithm = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridAlgorithm").set_name("LLAMADynamicAdaptiveHybridAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveHybridAlgorithm = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridAlgorithm" + ).set_name("LLAMADynamicAdaptiveHybridAlgorithm", register=True) +except Exception as e: # DynamicAdaptiveHybridAlgorithm print("DynamicAdaptiveHybridAlgorithm can not be imported: ", e) -try: +try: # DynamicAdaptiveHybridDE from nevergrad.optimization.lama.DynamicAdaptiveHybridDE import DynamicAdaptiveHybridDE lama_register["DynamicAdaptiveHybridDE"] = DynamicAdaptiveHybridDE - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE").set_name("LLAMADynamicAdaptiveHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDE").set_name( + "LLAMADynamicAdaptiveHybridDE", register=True + ) +except Exception as e: # DynamicAdaptiveHybridDE print("DynamicAdaptiveHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveHybridDEPSOWithEliteMemory import DynamicAdaptiveHybridDEPSOWithEliteMemory +try: # DynamicAdaptiveHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.DynamicAdaptiveHybridDEPSOWithEliteMemory import ( + DynamicAdaptiveHybridDEPSOWithEliteMemory, + ) lama_register["DynamicAdaptiveHybridDEPSOWithEliteMemory"] = DynamicAdaptiveHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMADynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # DynamicAdaptiveHybridDEPSOWithEliteMemory print("DynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimization import DynamicAdaptiveHybridOptimization +try: # DynamicAdaptiveHybridOptimization + from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimization import ( + DynamicAdaptiveHybridOptimization, + ) lama_register["DynamicAdaptiveHybridOptimization"] = DynamicAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimization").set_name("LLAMADynamicAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridOptimization" + ).set_name("LLAMADynamicAdaptiveHybridOptimization", register=True) +except Exception as e: # DynamicAdaptiveHybridOptimization print("DynamicAdaptiveHybridOptimization can not be imported: ", e) -try: +try: # DynamicAdaptiveHybridOptimizer from nevergrad.optimization.lama.DynamicAdaptiveHybridOptimizer import DynamicAdaptiveHybridOptimizer lama_register["DynamicAdaptiveHybridOptimizer"] = DynamicAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimizer").set_name("LLAMADynamicAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveHybridOptimizer" + ).set_name("LLAMADynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: # DynamicAdaptiveHybridOptimizer print("DynamicAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch import DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch - - lama_register["DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch"] = DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch").set_name("LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch", register=True) -except Exception as e: +try: # DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch + from nevergrad.optimization.lama.DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch import ( + DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch, + ) + + lama_register["DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch"] = ( + DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch = NonObjectOptimizer( + method="LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch" + ).set_name("LLAMADynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch", register=True) +except Exception as e: # DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch print("DynamicAdaptiveMemeticDifferentialEvolutionWithSmartLocalSearch can not be imported: ", e) -try: +try: # DynamicAdaptiveMemeticOptimizer from nevergrad.optimization.lama.DynamicAdaptiveMemeticOptimizer import DynamicAdaptiveMemeticOptimizer lama_register["DynamicAdaptiveMemeticOptimizer"] = DynamicAdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticOptimizer").set_name("LLAMADynamicAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveMemeticOptimizer" + ).set_name("LLAMADynamicAdaptiveMemeticOptimizer", register=True) +except Exception as e: # DynamicAdaptiveMemeticOptimizer print("DynamicAdaptiveMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptivePopulationDifferentialEvolution import DynamicAdaptivePopulationDifferentialEvolution - - lama_register["DynamicAdaptivePopulationDifferentialEvolution"] = DynamicAdaptivePopulationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptivePopulationDifferentialEvolution").set_name("LLAMADynamicAdaptivePopulationDifferentialEvolution", register=True) -except Exception as e: +try: # DynamicAdaptivePopulationDifferentialEvolution + from nevergrad.optimization.lama.DynamicAdaptivePopulationDifferentialEvolution import ( + DynamicAdaptivePopulationDifferentialEvolution, + ) + + lama_register["DynamicAdaptivePopulationDifferentialEvolution"] = ( + DynamicAdaptivePopulationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptivePopulationDifferentialEvolution" + ).set_name("LLAMADynamicAdaptivePopulationDifferentialEvolution", register=True) +except Exception as e: # DynamicAdaptivePopulationDifferentialEvolution print("DynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveQuantumDifferentialEvolution import DynamicAdaptiveQuantumDifferentialEvolution +try: # DynamicAdaptiveQuantumDifferentialEvolution + from nevergrad.optimization.lama.DynamicAdaptiveQuantumDifferentialEvolution import ( + DynamicAdaptiveQuantumDifferentialEvolution, + ) lama_register["DynamicAdaptiveQuantumDifferentialEvolution"] = DynamicAdaptiveQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumDifferentialEvolution").set_name("LLAMADynamicAdaptiveQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMADynamicAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: # DynamicAdaptiveQuantumDifferentialEvolution print("DynamicAdaptiveQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveQuantumLevyOptimizer import DynamicAdaptiveQuantumLevyOptimizer +try: # DynamicAdaptiveQuantumLevyOptimizer + from nevergrad.optimization.lama.DynamicAdaptiveQuantumLevyOptimizer import ( + DynamicAdaptiveQuantumLevyOptimizer, + ) lama_register["DynamicAdaptiveQuantumLevyOptimizer"] = DynamicAdaptiveQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumLevyOptimizer").set_name("LLAMADynamicAdaptiveQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMADynamicAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: # DynamicAdaptiveQuantumLevyOptimizer print("DynamicAdaptiveQuantumLevyOptimizer can not be imported: ", e) -try: +try: # DynamicAdaptiveQuantumPSO from nevergrad.optimization.lama.DynamicAdaptiveQuantumPSO import DynamicAdaptiveQuantumPSO lama_register["DynamicAdaptiveQuantumPSO"] = DynamicAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO").set_name("LLAMADynamicAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuantumPSO").set_name( + "LLAMADynamicAdaptiveQuantumPSO", register=True + ) +except Exception as e: # DynamicAdaptiveQuantumPSO print("DynamicAdaptiveQuantumPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicAdaptiveQuasiRandomDEGradientAnnealing import DynamicAdaptiveQuasiRandomDEGradientAnnealing - - lama_register["DynamicAdaptiveQuasiRandomDEGradientAnnealing"] = DynamicAdaptiveQuasiRandomDEGradientAnnealing - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing").set_name("LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing", register=True) -except Exception as e: +try: # DynamicAdaptiveQuasiRandomDEGradientAnnealing + from nevergrad.optimization.lama.DynamicAdaptiveQuasiRandomDEGradientAnnealing import ( + DynamicAdaptiveQuasiRandomDEGradientAnnealing, + ) + + lama_register["DynamicAdaptiveQuasiRandomDEGradientAnnealing"] = ( + DynamicAdaptiveQuasiRandomDEGradientAnnealing + ) + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing" + ).set_name("LLAMADynamicAdaptiveQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: # DynamicAdaptiveQuasiRandomDEGradientAnnealing print("DynamicAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) -try: +try: # DynamicAdaptiveSwarmOptimization from nevergrad.optimization.lama.DynamicAdaptiveSwarmOptimization import DynamicAdaptiveSwarmOptimization lama_register["DynamicAdaptiveSwarmOptimization"] = DynamicAdaptiveSwarmOptimization - res = NonObjectOptimizer(method="LLAMADynamicAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicAdaptiveSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicAdaptiveSwarmOptimization").set_name("LLAMADynamicAdaptiveSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicAdaptiveSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicAdaptiveSwarmOptimization" + ).set_name("LLAMADynamicAdaptiveSwarmOptimization", register=True) +except Exception as e: # DynamicAdaptiveSwarmOptimization print("DynamicAdaptiveSwarmOptimization can not be imported: ", e) -try: +try: # DynamicBalancingPSO from nevergrad.optimization.lama.DynamicBalancingPSO import DynamicBalancingPSO lama_register["DynamicBalancingPSO"] = DynamicBalancingPSO - res = NonObjectOptimizer(method="LLAMADynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicBalancingPSO = NonObjectOptimizer(method="LLAMADynamicBalancingPSO").set_name("LLAMADynamicBalancingPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicBalancingPSO = NonObjectOptimizer(method="LLAMADynamicBalancingPSO").set_name( + "LLAMADynamicBalancingPSO", register=True + ) +except Exception as e: # DynamicBalancingPSO print("DynamicBalancingPSO can not be imported: ", e) -try: +try: # DynamicClusterHybridOptimization from nevergrad.optimization.lama.DynamicClusterHybridOptimization import DynamicClusterHybridOptimization lama_register["DynamicClusterHybridOptimization"] = DynamicClusterHybridOptimization - res = NonObjectOptimizer(method="LLAMADynamicClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicClusterHybridOptimization = NonObjectOptimizer(method="LLAMADynamicClusterHybridOptimization").set_name("LLAMADynamicClusterHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicClusterHybridOptimization = NonObjectOptimizer( + method="LLAMADynamicClusterHybridOptimization" + ).set_name("LLAMADynamicClusterHybridOptimization", register=True) +except Exception as e: # DynamicClusterHybridOptimization print("DynamicClusterHybridOptimization can not be imported: ", e) -try: +try: # DynamicCohortAdaptiveEvolution from nevergrad.optimization.lama.DynamicCohortAdaptiveEvolution import DynamicCohortAdaptiveEvolution lama_register["DynamicCohortAdaptiveEvolution"] = DynamicCohortAdaptiveEvolution - res = NonObjectOptimizer(method="LLAMADynamicCohortAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicCohortAdaptiveEvolution = NonObjectOptimizer(method="LLAMADynamicCohortAdaptiveEvolution").set_name("LLAMADynamicCohortAdaptiveEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicCohortAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicCohortAdaptiveEvolution = NonObjectOptimizer( + method="LLAMADynamicCohortAdaptiveEvolution" + ).set_name("LLAMADynamicCohortAdaptiveEvolution", register=True) +except Exception as e: # DynamicCohortAdaptiveEvolution print("DynamicCohortAdaptiveEvolution can not be imported: ", e) -try: +try: # DynamicCohortMemeticAlgorithm from nevergrad.optimization.lama.DynamicCohortMemeticAlgorithm import DynamicCohortMemeticAlgorithm lama_register["DynamicCohortMemeticAlgorithm"] = DynamicCohortMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMADynamicCohortMemeticAlgorithm").set_name("LLAMADynamicCohortMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADynamicCohortMemeticAlgorithm" + ).set_name("LLAMADynamicCohortMemeticAlgorithm", register=True) +except Exception as e: # DynamicCohortMemeticAlgorithm print("DynamicCohortMemeticAlgorithm can not be imported: ", e) -try: +try: # DynamicCohortOptimization from nevergrad.optimization.lama.DynamicCohortOptimization import DynamicCohortOptimization lama_register["DynamicCohortOptimization"] = DynamicCohortOptimization - res = NonObjectOptimizer(method="LLAMADynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicCohortOptimization = NonObjectOptimizer(method="LLAMADynamicCohortOptimization").set_name("LLAMADynamicCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicCohortOptimization = NonObjectOptimizer(method="LLAMADynamicCohortOptimization").set_name( + "LLAMADynamicCohortOptimization", register=True + ) +except Exception as e: # DynamicCohortOptimization print("DynamicCohortOptimization can not be imported: ", e) -try: +try: # DynamicCrowdedDE from nevergrad.optimization.lama.DynamicCrowdedDE import DynamicCrowdedDE lama_register["DynamicCrowdedDE"] = DynamicCrowdedDE - res = NonObjectOptimizer(method="LLAMADynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicCrowdedDE = NonObjectOptimizer(method="LLAMADynamicCrowdedDE").set_name("LLAMADynamicCrowdedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicCrowdedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicCrowdedDE = NonObjectOptimizer(method="LLAMADynamicCrowdedDE").set_name( + "LLAMADynamicCrowdedDE", register=True + ) +except Exception as e: # DynamicCrowdedDE print("DynamicCrowdedDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicCulturalDifferentialEvolution import DynamicCulturalDifferentialEvolution +try: # DynamicCulturalDifferentialEvolution + from nevergrad.optimization.lama.DynamicCulturalDifferentialEvolution import ( + DynamicCulturalDifferentialEvolution, + ) lama_register["DynamicCulturalDifferentialEvolution"] = DynamicCulturalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicCulturalDifferentialEvolution").set_name("LLAMADynamicCulturalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicCulturalDifferentialEvolution" + ).set_name("LLAMADynamicCulturalDifferentialEvolution", register=True) +except Exception as e: # DynamicCulturalDifferentialEvolution print("DynamicCulturalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicEliteAdaptiveHybridOptimizerV2 import DynamicEliteAdaptiveHybridOptimizerV2 +try: # DynamicEliteAdaptiveHybridOptimizerV2 + from nevergrad.optimization.lama.DynamicEliteAdaptiveHybridOptimizerV2 import ( + DynamicEliteAdaptiveHybridOptimizerV2, + ) lama_register["DynamicEliteAdaptiveHybridOptimizerV2"] = DynamicEliteAdaptiveHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMADynamicEliteAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEliteAdaptiveHybridOptimizerV2 = NonObjectOptimizer(method="LLAMADynamicEliteAdaptiveHybridOptimizerV2").set_name("LLAMADynamicEliteAdaptiveHybridOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicEliteAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEliteAdaptiveHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMADynamicEliteAdaptiveHybridOptimizerV2" + ).set_name("LLAMADynamicEliteAdaptiveHybridOptimizerV2", register=True) +except Exception as e: # DynamicEliteAdaptiveHybridOptimizerV2 print("DynamicEliteAdaptiveHybridOptimizerV2 can not be imported: ", e) -try: +try: # DynamicEliteAnnealingDE from nevergrad.optimization.lama.DynamicEliteAnnealingDE import DynamicEliteAnnealingDE lama_register["DynamicEliteAnnealingDE"] = DynamicEliteAnnealingDE - res = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE").set_name("LLAMADynamicEliteAnnealingDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMADynamicEliteAnnealingDE").set_name( + "LLAMADynamicEliteAnnealingDE", register=True + ) +except Exception as e: # DynamicEliteAnnealingDE print("DynamicEliteAnnealingDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicEliteCovarianceMemeticSearch import DynamicEliteCovarianceMemeticSearch +try: # DynamicEliteCovarianceMemeticSearch + from nevergrad.optimization.lama.DynamicEliteCovarianceMemeticSearch import ( + DynamicEliteCovarianceMemeticSearch, + ) lama_register["DynamicEliteCovarianceMemeticSearch"] = DynamicEliteCovarianceMemeticSearch - res = NonObjectOptimizer(method="LLAMADynamicEliteCovarianceMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEliteCovarianceMemeticSearch = NonObjectOptimizer(method="LLAMADynamicEliteCovarianceMemeticSearch").set_name("LLAMADynamicEliteCovarianceMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicEliteCovarianceMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEliteCovarianceMemeticSearch = NonObjectOptimizer( + method="LLAMADynamicEliteCovarianceMemeticSearch" + ).set_name("LLAMADynamicEliteCovarianceMemeticSearch", register=True) +except Exception as e: # DynamicEliteCovarianceMemeticSearch print("DynamicEliteCovarianceMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicEliteEnhancedDifferentialEvolution import DynamicEliteEnhancedDifferentialEvolution +try: # DynamicEliteEnhancedDifferentialEvolution + from nevergrad.optimization.lama.DynamicEliteEnhancedDifferentialEvolution import ( + DynamicEliteEnhancedDifferentialEvolution, + ) lama_register["DynamicEliteEnhancedDifferentialEvolution"] = DynamicEliteEnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicEliteEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEliteEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicEliteEnhancedDifferentialEvolution").set_name("LLAMADynamicEliteEnhancedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicEliteEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEliteEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicEliteEnhancedDifferentialEvolution" + ).set_name("LLAMADynamicEliteEnhancedDifferentialEvolution", register=True) +except Exception as e: # DynamicEliteEnhancedDifferentialEvolution print("DynamicEliteEnhancedDifferentialEvolution can not be imported: ", e) -try: +try: # DynamicElitistHybridOptimizer from nevergrad.optimization.lama.DynamicElitistHybridOptimizer import DynamicElitistHybridOptimizer lama_register["DynamicElitistHybridOptimizer"] = DynamicElitistHybridOptimizer - res = NonObjectOptimizer(method="LLAMADynamicElitistHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicElitistHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicElitistHybridOptimizer").set_name("LLAMADynamicElitistHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicElitistHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicElitistHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicElitistHybridOptimizer" + ).set_name("LLAMADynamicElitistHybridOptimizer", register=True) +except Exception as e: # DynamicElitistHybridOptimizer print("DynamicElitistHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicEnhancedDifferentialFireworkAlgorithm import DynamicEnhancedDifferentialFireworkAlgorithm - - lama_register["DynamicEnhancedDifferentialFireworkAlgorithm"] = DynamicEnhancedDifferentialFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm").set_name("LLAMADynamicEnhancedDifferentialFireworkAlgorithm", register=True) -except Exception as e: +try: # DynamicEnhancedDifferentialFireworkAlgorithm + from nevergrad.optimization.lama.DynamicEnhancedDifferentialFireworkAlgorithm import ( + DynamicEnhancedDifferentialFireworkAlgorithm, + ) + + lama_register["DynamicEnhancedDifferentialFireworkAlgorithm"] = ( + DynamicEnhancedDifferentialFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicEnhancedDifferentialFireworkAlgorithm" + ).set_name("LLAMADynamicEnhancedDifferentialFireworkAlgorithm", register=True) +except Exception as e: # DynamicEnhancedDifferentialFireworkAlgorithm print("DynamicEnhancedDifferentialFireworkAlgorithm can not be imported: ", e) -try: +try: # DynamicEnhancedHybridOptimizer from nevergrad.optimization.lama.DynamicEnhancedHybridOptimizer import DynamicEnhancedHybridOptimizer lama_register["DynamicEnhancedHybridOptimizer"] = DynamicEnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMADynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicEnhancedHybridOptimizer").set_name("LLAMADynamicEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMADynamicEnhancedHybridOptimizer" + ).set_name("LLAMADynamicEnhancedHybridOptimizer", register=True) +except Exception as e: # DynamicEnhancedHybridOptimizer print("DynamicEnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicExplorationExploitationAlgorithm import DynamicExplorationExploitationAlgorithm +try: # DynamicExplorationExploitationAlgorithm + from nevergrad.optimization.lama.DynamicExplorationExploitationAlgorithm import ( + DynamicExplorationExploitationAlgorithm, + ) lama_register["DynamicExplorationExploitationAlgorithm"] = DynamicExplorationExploitationAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationAlgorithm").set_name("LLAMADynamicExplorationExploitationAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationAlgorithm" + ).set_name("LLAMADynamicExplorationExploitationAlgorithm", register=True) +except Exception as e: # DynamicExplorationExploitationAlgorithm print("DynamicExplorationExploitationAlgorithm can not be imported: ", e) -try: +try: # DynamicExplorationExploitationDE from nevergrad.optimization.lama.DynamicExplorationExploitationDE import DynamicExplorationExploitationDE lama_register["DynamicExplorationExploitationDE"] = DynamicExplorationExploitationDE - res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicExplorationExploitationDE = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationDE").set_name("LLAMADynamicExplorationExploitationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicExplorationExploitationDE = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationDE" + ).set_name("LLAMADynamicExplorationExploitationDE", register=True) +except Exception as e: # DynamicExplorationExploitationDE print("DynamicExplorationExploitationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicExplorationExploitationMemeticAlgorithm import DynamicExplorationExploitationMemeticAlgorithm - - lama_register["DynamicExplorationExploitationMemeticAlgorithm"] = DynamicExplorationExploitationMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicExplorationExploitationMemeticAlgorithm = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationMemeticAlgorithm").set_name("LLAMADynamicExplorationExploitationMemeticAlgorithm", register=True) -except Exception as e: +try: # DynamicExplorationExploitationMemeticAlgorithm + from nevergrad.optimization.lama.DynamicExplorationExploitationMemeticAlgorithm import ( + DynamicExplorationExploitationMemeticAlgorithm, + ) + + lama_register["DynamicExplorationExploitationMemeticAlgorithm"] = ( + DynamicExplorationExploitationMemeticAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMADynamicExplorationExploitationMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicExplorationExploitationMemeticAlgorithm = NonObjectOptimizer( + method="LLAMADynamicExplorationExploitationMemeticAlgorithm" + ).set_name("LLAMADynamicExplorationExploitationMemeticAlgorithm", register=True) +except Exception as e: # DynamicExplorationExploitationMemeticAlgorithm print("DynamicExplorationExploitationMemeticAlgorithm can not be imported: ", e) -try: +try: # DynamicExplorationOptimization from nevergrad.optimization.lama.DynamicExplorationOptimization import DynamicExplorationOptimization lama_register["DynamicExplorationOptimization"] = DynamicExplorationOptimization - res = NonObjectOptimizer(method="LLAMADynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicExplorationOptimization = NonObjectOptimizer(method="LLAMADynamicExplorationOptimization").set_name("LLAMADynamicExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMADynamicExplorationOptimization" + ).set_name("LLAMADynamicExplorationOptimization", register=True) +except Exception as e: # DynamicExplorationOptimization print("DynamicExplorationOptimization can not be imported: ", e) -try: +try: # DynamicFireworkAlgorithm from nevergrad.optimization.lama.DynamicFireworkAlgorithm import DynamicFireworkAlgorithm lama_register["DynamicFireworkAlgorithm"] = DynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm").set_name("LLAMADynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicFireworkAlgorithm").set_name( + "LLAMADynamicFireworkAlgorithm", register=True + ) +except Exception as e: # DynamicFireworkAlgorithm print("DynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicFireworksSwarmOptimization import DynamicFireworksSwarmOptimization +try: # DynamicFireworksSwarmOptimization + from nevergrad.optimization.lama.DynamicFireworksSwarmOptimization import ( + DynamicFireworksSwarmOptimization, + ) lama_register["DynamicFireworksSwarmOptimization"] = DynamicFireworksSwarmOptimization - res = NonObjectOptimizer(method="LLAMADynamicFireworksSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicFireworksSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicFireworksSwarmOptimization").set_name("LLAMADynamicFireworksSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicFireworksSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicFireworksSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicFireworksSwarmOptimization" + ).set_name("LLAMADynamicFireworksSwarmOptimization", register=True) +except Exception as e: # DynamicFireworksSwarmOptimization print("DynamicFireworksSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicFractionalClusterOptimization import DynamicFractionalClusterOptimization +try: # DynamicFractionalClusterOptimization + from nevergrad.optimization.lama.DynamicFractionalClusterOptimization import ( + DynamicFractionalClusterOptimization, + ) lama_register["DynamicFractionalClusterOptimization"] = DynamicFractionalClusterOptimization - res = NonObjectOptimizer(method="LLAMADynamicFractionalClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicFractionalClusterOptimization = NonObjectOptimizer(method="LLAMADynamicFractionalClusterOptimization").set_name("LLAMADynamicFractionalClusterOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicFractionalClusterOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicFractionalClusterOptimization = NonObjectOptimizer( + method="LLAMADynamicFractionalClusterOptimization" + ).set_name("LLAMADynamicFractionalClusterOptimization", register=True) +except Exception as e: # DynamicFractionalClusterOptimization print("DynamicFractionalClusterOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealing import DynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["DynamicGradientBoostedMemorySimulatedAnnealing"] = DynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # DynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealing import ( + DynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["DynamicGradientBoostedMemorySimulatedAnnealing"] = ( + DynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # DynamicGradientBoostedMemorySimulatedAnnealing print("DynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealingV2 import DynamicGradientBoostedMemorySimulatedAnnealingV2 - - lama_register["DynamicGradientBoostedMemorySimulatedAnnealingV2"] = DynamicGradientBoostedMemorySimulatedAnnealingV2 - res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2").set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2", register=True) -except Exception as e: +try: # DynamicGradientBoostedMemorySimulatedAnnealingV2 + from nevergrad.optimization.lama.DynamicGradientBoostedMemorySimulatedAnnealingV2 import ( + DynamicGradientBoostedMemorySimulatedAnnealingV2, + ) + + lama_register["DynamicGradientBoostedMemorySimulatedAnnealingV2"] = ( + DynamicGradientBoostedMemorySimulatedAnnealingV2 + ) + # res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2" + ).set_name("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2", register=True) +except Exception as e: # DynamicGradientBoostedMemorySimulatedAnnealingV2 print("DynamicGradientBoostedMemorySimulatedAnnealingV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicGradientBoostedRefinementAnnealing import DynamicGradientBoostedRefinementAnnealing +try: # DynamicGradientBoostedRefinementAnnealing + from nevergrad.optimization.lama.DynamicGradientBoostedRefinementAnnealing import ( + DynamicGradientBoostedRefinementAnnealing, + ) lama_register["DynamicGradientBoostedRefinementAnnealing"] = DynamicGradientBoostedRefinementAnnealing - res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedRefinementAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicGradientBoostedRefinementAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientBoostedRefinementAnnealing").set_name("LLAMADynamicGradientBoostedRefinementAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicGradientBoostedRefinementAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicGradientBoostedRefinementAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientBoostedRefinementAnnealing" + ).set_name("LLAMADynamicGradientBoostedRefinementAnnealing", register=True) +except Exception as e: # DynamicGradientBoostedRefinementAnnealing print("DynamicGradientBoostedRefinementAnnealing can not be imported: ", e) -try: +try: # DynamicGradientEnhancedAnnealing from nevergrad.optimization.lama.DynamicGradientEnhancedAnnealing import DynamicGradientEnhancedAnnealing lama_register["DynamicGradientEnhancedAnnealing"] = DynamicGradientEnhancedAnnealing - res = NonObjectOptimizer(method="LLAMADynamicGradientEnhancedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicGradientEnhancedAnnealing = NonObjectOptimizer(method="LLAMADynamicGradientEnhancedAnnealing").set_name("LLAMADynamicGradientEnhancedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicGradientEnhancedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicGradientEnhancedAnnealing = NonObjectOptimizer( + method="LLAMADynamicGradientEnhancedAnnealing" + ).set_name("LLAMADynamicGradientEnhancedAnnealing", register=True) +except Exception as e: # DynamicGradientEnhancedAnnealing print("DynamicGradientEnhancedAnnealing can not be imported: ", e) -try: +try: # DynamicHybridAnnealing from nevergrad.optimization.lama.DynamicHybridAnnealing import DynamicHybridAnnealing lama_register["DynamicHybridAnnealing"] = DynamicHybridAnnealing - res = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicHybridAnnealing = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing").set_name("LLAMADynamicHybridAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicHybridAnnealing = NonObjectOptimizer(method="LLAMADynamicHybridAnnealing").set_name( + "LLAMADynamicHybridAnnealing", register=True + ) +except Exception as e: # DynamicHybridAnnealing print("DynamicHybridAnnealing can not be imported: ", e) -try: +try: # DynamicHybridOptimizer from nevergrad.optimization.lama.DynamicHybridOptimizer import DynamicHybridOptimizer lama_register["DynamicHybridOptimizer"] = DynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer").set_name("LLAMADynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicHybridOptimizer = NonObjectOptimizer(method="LLAMADynamicHybridOptimizer").set_name( + "LLAMADynamicHybridOptimizer", register=True + ) +except Exception as e: # DynamicHybridOptimizer print("DynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicHybridQuantumDifferentialEvolution import DynamicHybridQuantumDifferentialEvolution +try: # DynamicHybridQuantumDifferentialEvolution + from nevergrad.optimization.lama.DynamicHybridQuantumDifferentialEvolution import ( + DynamicHybridQuantumDifferentialEvolution, + ) lama_register["DynamicHybridQuantumDifferentialEvolution"] = DynamicHybridQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicHybridQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicHybridQuantumDifferentialEvolution").set_name("LLAMADynamicHybridQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicHybridQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicHybridQuantumDifferentialEvolution" + ).set_name("LLAMADynamicHybridQuantumDifferentialEvolution", register=True) +except Exception as e: # DynamicHybridQuantumDifferentialEvolution print("DynamicHybridQuantumDifferentialEvolution can not be imported: ", e) -try: +try: # DynamicHybridSelfAdaptiveDE from nevergrad.optimization.lama.DynamicHybridSelfAdaptiveDE import DynamicHybridSelfAdaptiveDE lama_register["DynamicHybridSelfAdaptiveDE"] = DynamicHybridSelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicHybridSelfAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE").set_name("LLAMADynamicHybridSelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicHybridSelfAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicHybridSelfAdaptiveDE").set_name( + "LLAMADynamicHybridSelfAdaptiveDE", register=True + ) +except Exception as e: # DynamicHybridSelfAdaptiveDE print("DynamicHybridSelfAdaptiveDE can not be imported: ", e) -try: +try: # DynamicLevyHarmonySearch from nevergrad.optimization.lama.DynamicLevyHarmonySearch import DynamicLevyHarmonySearch lama_register["DynamicLevyHarmonySearch"] = DynamicLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch").set_name("LLAMADynamicLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMADynamicLevyHarmonySearch").set_name( + "LLAMADynamicLevyHarmonySearch", register=True + ) +except Exception as e: # DynamicLevyHarmonySearch print("DynamicLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicLocalSearchFireworkAlgorithm import DynamicLocalSearchFireworkAlgorithm +try: # DynamicLocalSearchFireworkAlgorithm + from nevergrad.optimization.lama.DynamicLocalSearchFireworkAlgorithm import ( + DynamicLocalSearchFireworkAlgorithm, + ) lama_register["DynamicLocalSearchFireworkAlgorithm"] = DynamicLocalSearchFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicLocalSearchFireworkAlgorithm").set_name("LLAMADynamicLocalSearchFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMADynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: # DynamicLocalSearchFireworkAlgorithm print("DynamicLocalSearchFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicMemeticDifferentialEvolutionWithAdaptiveElitism import DynamicMemeticDifferentialEvolutionWithAdaptiveElitism - - lama_register["DynamicMemeticDifferentialEvolutionWithAdaptiveElitism"] = DynamicMemeticDifferentialEvolutionWithAdaptiveElitism - res = NonObjectOptimizer(method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism = NonObjectOptimizer(method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism").set_name("LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism", register=True) -except Exception as e: +try: # DynamicMemeticDifferentialEvolutionWithAdaptiveElitism + from nevergrad.optimization.lama.DynamicMemeticDifferentialEvolutionWithAdaptiveElitism import ( + DynamicMemeticDifferentialEvolutionWithAdaptiveElitism, + ) + + lama_register["DynamicMemeticDifferentialEvolutionWithAdaptiveElitism"] = ( + DynamicMemeticDifferentialEvolutionWithAdaptiveElitism + ) + # res = NonObjectOptimizer(method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism = NonObjectOptimizer( + method="LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism" + ).set_name("LLAMADynamicMemeticDifferentialEvolutionWithAdaptiveElitism", register=True) +except Exception as e: # DynamicMemeticDifferentialEvolutionWithAdaptiveElitism print("DynamicMemeticDifferentialEvolutionWithAdaptiveElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicMemoryAdaptiveConvergenceStrategyV76 import DynamicMemoryAdaptiveConvergenceStrategyV76 +try: # DynamicMemoryAdaptiveConvergenceStrategyV76 + from nevergrad.optimization.lama.DynamicMemoryAdaptiveConvergenceStrategyV76 import ( + DynamicMemoryAdaptiveConvergenceStrategyV76, + ) lama_register["DynamicMemoryAdaptiveConvergenceStrategyV76"] = DynamicMemoryAdaptiveConvergenceStrategyV76 - res = NonObjectOptimizer(method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMemoryAdaptiveConvergenceStrategyV76 = NonObjectOptimizer(method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76").set_name("LLAMADynamicMemoryAdaptiveConvergenceStrategyV76", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMemoryAdaptiveConvergenceStrategyV76 = NonObjectOptimizer( + method="LLAMADynamicMemoryAdaptiveConvergenceStrategyV76" + ).set_name("LLAMADynamicMemoryAdaptiveConvergenceStrategyV76", register=True) +except Exception as e: # DynamicMemoryAdaptiveConvergenceStrategyV76 print("DynamicMemoryAdaptiveConvergenceStrategyV76 can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicMemoryEnhancedDualPhaseStrategyV66 import DynamicMemoryEnhancedDualPhaseStrategyV66 +try: # DynamicMemoryEnhancedDualPhaseStrategyV66 + from nevergrad.optimization.lama.DynamicMemoryEnhancedDualPhaseStrategyV66 import ( + DynamicMemoryEnhancedDualPhaseStrategyV66, + ) lama_register["DynamicMemoryEnhancedDualPhaseStrategyV66"] = DynamicMemoryEnhancedDualPhaseStrategyV66 - res = NonObjectOptimizer(method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMemoryEnhancedDualPhaseStrategyV66 = NonObjectOptimizer(method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66").set_name("LLAMADynamicMemoryEnhancedDualPhaseStrategyV66", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMemoryEnhancedDualPhaseStrategyV66 = NonObjectOptimizer( + method="LLAMADynamicMemoryEnhancedDualPhaseStrategyV66" + ).set_name("LLAMADynamicMemoryEnhancedDualPhaseStrategyV66", register=True) +except Exception as e: # DynamicMemoryEnhancedDualPhaseStrategyV66 print("DynamicMemoryEnhancedDualPhaseStrategyV66 can not be imported: ", e) -try: +try: # DynamicMemoryHybridSearch from nevergrad.optimization.lama.DynamicMemoryHybridSearch import DynamicMemoryHybridSearch lama_register["DynamicMemoryHybridSearch"] = DynamicMemoryHybridSearch - res = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMemoryHybridSearch = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch").set_name("LLAMADynamicMemoryHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMemoryHybridSearch = NonObjectOptimizer(method="LLAMADynamicMemoryHybridSearch").set_name( + "LLAMADynamicMemoryHybridSearch", register=True + ) +except Exception as e: # DynamicMemoryHybridSearch print("DynamicMemoryHybridSearch can not be imported: ", e) -try: +try: # DynamicMultiPhaseAnnealingPlus from nevergrad.optimization.lama.DynamicMultiPhaseAnnealingPlus import DynamicMultiPhaseAnnealingPlus lama_register["DynamicMultiPhaseAnnealingPlus"] = DynamicMultiPhaseAnnealingPlus - res = NonObjectOptimizer(method="LLAMADynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMultiPhaseAnnealingPlus = NonObjectOptimizer(method="LLAMADynamicMultiPhaseAnnealingPlus").set_name("LLAMADynamicMultiPhaseAnnealingPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( + method="LLAMADynamicMultiPhaseAnnealingPlus" + ).set_name("LLAMADynamicMultiPhaseAnnealingPlus", register=True) +except Exception as e: # DynamicMultiPhaseAnnealingPlus print("DynamicMultiPhaseAnnealingPlus can not be imported: ", e) -try: +try: # DynamicMultiStrategyOptimizer from nevergrad.optimization.lama.DynamicMultiStrategyOptimizer import DynamicMultiStrategyOptimizer lama_register["DynamicMultiStrategyOptimizer"] = DynamicMultiStrategyOptimizer - res = NonObjectOptimizer(method="LLAMADynamicMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMADynamicMultiStrategyOptimizer").set_name("LLAMADynamicMultiStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMADynamicMultiStrategyOptimizer" + ).set_name("LLAMADynamicMultiStrategyOptimizer", register=True) +except Exception as e: # DynamicMultiStrategyOptimizer print("DynamicMultiStrategyOptimizer can not be imported: ", e) -try: +try: # DynamicNichePSO_DE_LS from nevergrad.optimization.lama.DynamicNichePSO_DE_LS import DynamicNichePSO_DE_LS lama_register["DynamicNichePSO_DE_LS"] = DynamicNichePSO_DE_LS - res = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS").set_name("LLAMADynamicNichePSO_DE_LS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMADynamicNichePSO_DE_LS").set_name( + "LLAMADynamicNichePSO_DE_LS", register=True + ) +except Exception as e: # DynamicNichePSO_DE_LS print("DynamicNichePSO_DE_LS can not be imported: ", e) -try: +try: # DynamicNichingDEPSOWithRestart from nevergrad.optimization.lama.DynamicNichingDEPSOWithRestart import DynamicNichingDEPSOWithRestart lama_register["DynamicNichingDEPSOWithRestart"] = DynamicNichingDEPSOWithRestart - res = NonObjectOptimizer(method="LLAMADynamicNichingDEPSOWithRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicNichingDEPSOWithRestart = NonObjectOptimizer(method="LLAMADynamicNichingDEPSOWithRestart").set_name("LLAMADynamicNichingDEPSOWithRestart", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicNichingDEPSOWithRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicNichingDEPSOWithRestart = NonObjectOptimizer( + method="LLAMADynamicNichingDEPSOWithRestart" + ).set_name("LLAMADynamicNichingDEPSOWithRestart", register=True) +except Exception as e: # DynamicNichingDEPSOWithRestart print("DynamicNichingDEPSOWithRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicPopulationAdaptiveGradientEvolution import DynamicPopulationAdaptiveGradientEvolution +try: # DynamicPopulationAdaptiveGradientEvolution + from nevergrad.optimization.lama.DynamicPopulationAdaptiveGradientEvolution import ( + DynamicPopulationAdaptiveGradientEvolution, + ) lama_register["DynamicPopulationAdaptiveGradientEvolution"] = DynamicPopulationAdaptiveGradientEvolution - res = NonObjectOptimizer(method="LLAMADynamicPopulationAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPopulationAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMADynamicPopulationAdaptiveGradientEvolution").set_name("LLAMADynamicPopulationAdaptiveGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicPopulationAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPopulationAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMADynamicPopulationAdaptiveGradientEvolution" + ).set_name("LLAMADynamicPopulationAdaptiveGradientEvolution", register=True) +except Exception as e: # DynamicPopulationAdaptiveGradientEvolution print("DynamicPopulationAdaptiveGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicPopulationMemeticDifferentialEvolution import DynamicPopulationMemeticDifferentialEvolution - - lama_register["DynamicPopulationMemeticDifferentialEvolution"] = DynamicPopulationMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicPopulationMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPopulationMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicPopulationMemeticDifferentialEvolution").set_name("LLAMADynamicPopulationMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # DynamicPopulationMemeticDifferentialEvolution + from nevergrad.optimization.lama.DynamicPopulationMemeticDifferentialEvolution import ( + DynamicPopulationMemeticDifferentialEvolution, + ) + + lama_register["DynamicPopulationMemeticDifferentialEvolution"] = ( + DynamicPopulationMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADynamicPopulationMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPopulationMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicPopulationMemeticDifferentialEvolution" + ).set_name("LLAMADynamicPopulationMemeticDifferentialEvolution", register=True) +except Exception as e: # DynamicPopulationMemeticDifferentialEvolution print("DynamicPopulationMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicPrecisionBalancedEvolution import DynamicPrecisionBalancedEvolution +try: # DynamicPrecisionBalancedEvolution + from nevergrad.optimization.lama.DynamicPrecisionBalancedEvolution import ( + DynamicPrecisionBalancedEvolution, + ) lama_register["DynamicPrecisionBalancedEvolution"] = DynamicPrecisionBalancedEvolution - res = NonObjectOptimizer(method="LLAMADynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPrecisionBalancedEvolution = NonObjectOptimizer(method="LLAMADynamicPrecisionBalancedEvolution").set_name("LLAMADynamicPrecisionBalancedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPrecisionBalancedEvolution = NonObjectOptimizer( + method="LLAMADynamicPrecisionBalancedEvolution" + ).set_name("LLAMADynamicPrecisionBalancedEvolution", register=True) +except Exception as e: # DynamicPrecisionBalancedEvolution print("DynamicPrecisionBalancedEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicPrecisionCosineDifferentialSwarm import DynamicPrecisionCosineDifferentialSwarm +try: # DynamicPrecisionCosineDifferentialSwarm + from nevergrad.optimization.lama.DynamicPrecisionCosineDifferentialSwarm import ( + DynamicPrecisionCosineDifferentialSwarm, + ) lama_register["DynamicPrecisionCosineDifferentialSwarm"] = DynamicPrecisionCosineDifferentialSwarm - res = NonObjectOptimizer(method="LLAMADynamicPrecisionCosineDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPrecisionCosineDifferentialSwarm = NonObjectOptimizer(method="LLAMADynamicPrecisionCosineDifferentialSwarm").set_name("LLAMADynamicPrecisionCosineDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicPrecisionCosineDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPrecisionCosineDifferentialSwarm = NonObjectOptimizer( + method="LLAMADynamicPrecisionCosineDifferentialSwarm" + ).set_name("LLAMADynamicPrecisionCosineDifferentialSwarm", register=True) +except Exception as e: # DynamicPrecisionCosineDifferentialSwarm print("DynamicPrecisionCosineDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicPrecisionExplorationOptimizer import DynamicPrecisionExplorationOptimizer +try: # DynamicPrecisionExplorationOptimizer + from nevergrad.optimization.lama.DynamicPrecisionExplorationOptimizer import ( + DynamicPrecisionExplorationOptimizer, + ) lama_register["DynamicPrecisionExplorationOptimizer"] = DynamicPrecisionExplorationOptimizer - res = NonObjectOptimizer(method="LLAMADynamicPrecisionExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPrecisionExplorationOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionExplorationOptimizer").set_name("LLAMADynamicPrecisionExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicPrecisionExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPrecisionExplorationOptimizer = NonObjectOptimizer( + method="LLAMADynamicPrecisionExplorationOptimizer" + ).set_name("LLAMADynamicPrecisionExplorationOptimizer", register=True) +except Exception as e: # DynamicPrecisionExplorationOptimizer print("DynamicPrecisionExplorationOptimizer can not be imported: ", e) -try: +try: # DynamicPrecisionOptimizer from nevergrad.optimization.lama.DynamicPrecisionOptimizer import DynamicPrecisionOptimizer lama_register["DynamicPrecisionOptimizer"] = DynamicPrecisionOptimizer - res = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer").set_name("LLAMADynamicPrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMADynamicPrecisionOptimizer").set_name( + "LLAMADynamicPrecisionOptimizer", register=True + ) +except Exception as e: # DynamicPrecisionOptimizer print("DynamicPrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumAdaptiveEvolutionStrategy import DynamicQuantumAdaptiveEvolutionStrategy +try: # DynamicQuantumAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.DynamicQuantumAdaptiveEvolutionStrategy import ( + DynamicQuantumAdaptiveEvolutionStrategy, + ) lama_register["DynamicQuantumAdaptiveEvolutionStrategy"] = DynamicQuantumAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMADynamicQuantumAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMADynamicQuantumAdaptiveEvolutionStrategy").set_name("LLAMADynamicQuantumAdaptiveEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMADynamicQuantumAdaptiveEvolutionStrategy" + ).set_name("LLAMADynamicQuantumAdaptiveEvolutionStrategy", register=True) +except Exception as e: # DynamicQuantumAdaptiveEvolutionStrategy print("DynamicQuantumAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolution import DynamicQuantumDifferentialEvolution +try: # DynamicQuantumDifferentialEvolution + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolution import ( + DynamicQuantumDifferentialEvolution, + ) lama_register["DynamicQuantumDifferentialEvolution"] = DynamicQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolution").set_name("LLAMADynamicQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolution" + ).set_name("LLAMADynamicQuantumDifferentialEvolution", register=True) +except Exception as e: # DynamicQuantumDifferentialEvolution print("DynamicQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch import DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch - - lama_register["DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch"] = DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch - res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch").set_name("LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch", register=True) -except Exception as e: +try: # DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch import ( + DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch, + ) + + lama_register["DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch"] = ( + DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch" + ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch", register=True) +except Exception as e: # DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch print("DynamicQuantumDifferentialEvolutionWithElitistMemoryAndHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart import DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart - - lama_register["DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart"] = DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart - res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart").set_name("LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart", register=True) -except Exception as e: +try: # DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart + from nevergrad.optimization.lama.DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart import ( + DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart, + ) + + lama_register["DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart"] = ( + DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart + ) + # res = NonObjectOptimizer(method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart = NonObjectOptimizer( + method="LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart" + ).set_name("LLAMADynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart", register=True) +except Exception as e: # DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart print("DynamicQuantumDifferentialEvolutionWithLocalSearchAndRestart can not be imported: ", e) -try: +try: # DynamicQuantumEvolution from nevergrad.optimization.lama.DynamicQuantumEvolution import DynamicQuantumEvolution lama_register["DynamicQuantumEvolution"] = DynamicQuantumEvolution - res = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution").set_name("LLAMADynamicQuantumEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumEvolution = NonObjectOptimizer(method="LLAMADynamicQuantumEvolution").set_name( + "LLAMADynamicQuantumEvolution", register=True + ) +except Exception as e: # DynamicQuantumEvolution print("DynamicQuantumEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumGuidedHybridSearchV7 import DynamicQuantumGuidedHybridSearchV7 +try: # DynamicQuantumGuidedHybridSearchV7 + from nevergrad.optimization.lama.DynamicQuantumGuidedHybridSearchV7 import ( + DynamicQuantumGuidedHybridSearchV7, + ) lama_register["DynamicQuantumGuidedHybridSearchV7"] = DynamicQuantumGuidedHybridSearchV7 - res = NonObjectOptimizer(method="LLAMADynamicQuantumGuidedHybridSearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumGuidedHybridSearchV7 = NonObjectOptimizer(method="LLAMADynamicQuantumGuidedHybridSearchV7").set_name("LLAMADynamicQuantumGuidedHybridSearchV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumGuidedHybridSearchV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumGuidedHybridSearchV7 = NonObjectOptimizer( + method="LLAMADynamicQuantumGuidedHybridSearchV7" + ).set_name("LLAMADynamicQuantumGuidedHybridSearchV7", register=True) +except Exception as e: # DynamicQuantumGuidedHybridSearchV7 print("DynamicQuantumGuidedHybridSearchV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialHybridSearch import DynamicQuantumLevyDifferentialHybridSearch +try: # DynamicQuantumLevyDifferentialHybridSearch + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialHybridSearch import ( + DynamicQuantumLevyDifferentialHybridSearch, + ) lama_register["DynamicQuantumLevyDifferentialHybridSearch"] = DynamicQuantumLevyDifferentialHybridSearch - res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumLevyDifferentialHybridSearch = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialHybridSearch").set_name("LLAMADynamicQuantumLevyDifferentialHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( + method="LLAMADynamicQuantumLevyDifferentialHybridSearch" + ).set_name("LLAMADynamicQuantumLevyDifferentialHybridSearch", register=True) +except Exception as e: # DynamicQuantumLevyDifferentialHybridSearch print("DynamicQuantumLevyDifferentialHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialSwarmOptimization import DynamicQuantumLevyDifferentialSwarmOptimization - - lama_register["DynamicQuantumLevyDifferentialSwarmOptimization"] = DynamicQuantumLevyDifferentialSwarmOptimization - res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumLevyDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization").set_name("LLAMADynamicQuantumLevyDifferentialSwarmOptimization", register=True) -except Exception as e: +try: # DynamicQuantumLevyDifferentialSwarmOptimization + from nevergrad.optimization.lama.DynamicQuantumLevyDifferentialSwarmOptimization import ( + DynamicQuantumLevyDifferentialSwarmOptimization, + ) + + lama_register["DynamicQuantumLevyDifferentialSwarmOptimization"] = ( + DynamicQuantumLevyDifferentialSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumLevyDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumLevyDifferentialSwarmOptimization" + ).set_name("LLAMADynamicQuantumLevyDifferentialSwarmOptimization", register=True) +except Exception as e: # DynamicQuantumLevyDifferentialSwarmOptimization print("DynamicQuantumLevyDifferentialSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumLevySwarmOptimization import DynamicQuantumLevySwarmOptimization +try: # DynamicQuantumLevySwarmOptimization + from nevergrad.optimization.lama.DynamicQuantumLevySwarmOptimization import ( + DynamicQuantumLevySwarmOptimization, + ) lama_register["DynamicQuantumLevySwarmOptimization"] = DynamicQuantumLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMADynamicQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumLevySwarmOptimization").set_name("LLAMADynamicQuantumLevySwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumLevySwarmOptimization" + ).set_name("LLAMADynamicQuantumLevySwarmOptimization", register=True) +except Exception as e: # DynamicQuantumLevySwarmOptimization print("DynamicQuantumLevySwarmOptimization can not be imported: ", e) -try: +try: # DynamicQuantumMemeticOptimizer from nevergrad.optimization.lama.DynamicQuantumMemeticOptimizer import DynamicQuantumMemeticOptimizer lama_register["DynamicQuantumMemeticOptimizer"] = DynamicQuantumMemeticOptimizer - res = NonObjectOptimizer(method="LLAMADynamicQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMADynamicQuantumMemeticOptimizer").set_name("LLAMADynamicQuantumMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMADynamicQuantumMemeticOptimizer" + ).set_name("LLAMADynamicQuantumMemeticOptimizer", register=True) +except Exception as e: # DynamicQuantumMemeticOptimizer print("DynamicQuantumMemeticOptimizer can not be imported: ", e) -try: +try: # DynamicQuantumSwarmOptimization from nevergrad.optimization.lama.DynamicQuantumSwarmOptimization import DynamicQuantumSwarmOptimization lama_register["DynamicQuantumSwarmOptimization"] = DynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimization").set_name("LLAMADynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMADynamicQuantumSwarmOptimization" + ).set_name("LLAMADynamicQuantumSwarmOptimization", register=True) +except Exception as e: # DynamicQuantumSwarmOptimization print("DynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuantumSwarmOptimizationRefined import DynamicQuantumSwarmOptimizationRefined +try: # DynamicQuantumSwarmOptimizationRefined + from nevergrad.optimization.lama.DynamicQuantumSwarmOptimizationRefined import ( + DynamicQuantumSwarmOptimizationRefined, + ) lama_register["DynamicQuantumSwarmOptimizationRefined"] = DynamicQuantumSwarmOptimizationRefined - res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuantumSwarmOptimizationRefined = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimizationRefined").set_name("LLAMADynamicQuantumSwarmOptimizationRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuantumSwarmOptimizationRefined = NonObjectOptimizer( + method="LLAMADynamicQuantumSwarmOptimizationRefined" + ).set_name("LLAMADynamicQuantumSwarmOptimizationRefined", register=True) +except Exception as e: # DynamicQuantumSwarmOptimizationRefined print("DynamicQuantumSwarmOptimizationRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicQuasiRandomAdaptiveDifferentialEvolution import DynamicQuasiRandomAdaptiveDifferentialEvolution - - lama_register["DynamicQuasiRandomAdaptiveDifferentialEvolution"] = DynamicQuasiRandomAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution").set_name("LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # DynamicQuasiRandomAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.DynamicQuasiRandomAdaptiveDifferentialEvolution import ( + DynamicQuasiRandomAdaptiveDifferentialEvolution, + ) + + lama_register["DynamicQuasiRandomAdaptiveDifferentialEvolution"] = ( + DynamicQuasiRandomAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution" + ).set_name("LLAMADynamicQuasiRandomAdaptiveDifferentialEvolution", register=True) +except Exception as e: # DynamicQuasiRandomAdaptiveDifferentialEvolution print("DynamicQuasiRandomAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicRefinedGradientBoostedMemorySimulatedAnnealing import DynamicRefinedGradientBoostedMemorySimulatedAnnealing - - lama_register["DynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = DynamicRefinedGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # DynamicRefinedGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.DynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( + DynamicRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["DynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + DynamicRefinedGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # DynamicRefinedGradientBoostedMemorySimulatedAnnealing print("DynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicRefinementGradientBoostedMemoryAnnealing import DynamicRefinementGradientBoostedMemoryAnnealing - - lama_register["DynamicRefinementGradientBoostedMemoryAnnealing"] = DynamicRefinementGradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing").set_name("LLAMADynamicRefinementGradientBoostedMemoryAnnealing", register=True) -except Exception as e: +try: # DynamicRefinementGradientBoostedMemoryAnnealing + from nevergrad.optimization.lama.DynamicRefinementGradientBoostedMemoryAnnealing import ( + DynamicRefinementGradientBoostedMemoryAnnealing, + ) + + lama_register["DynamicRefinementGradientBoostedMemoryAnnealing"] = ( + DynamicRefinementGradientBoostedMemoryAnnealing + ) + # res = NonObjectOptimizer(method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMADynamicRefinementGradientBoostedMemoryAnnealing" + ).set_name("LLAMADynamicRefinementGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # DynamicRefinementGradientBoostedMemoryAnnealing print("DynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) -try: +try: # DynamicScaleSearch from nevergrad.optimization.lama.DynamicScaleSearch import DynamicScaleSearch lama_register["DynamicScaleSearch"] = DynamicScaleSearch - res = NonObjectOptimizer(method="LLAMADynamicScaleSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicScaleSearch = NonObjectOptimizer(method="LLAMADynamicScaleSearch").set_name("LLAMADynamicScaleSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicScaleSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicScaleSearch = NonObjectOptimizer(method="LLAMADynamicScaleSearch").set_name( + "LLAMADynamicScaleSearch", register=True + ) +except Exception as e: # DynamicScaleSearch print("DynamicScaleSearch can not be imported: ", e) -try: +try: # DynamicSelfAdaptiveOptimizer from nevergrad.optimization.lama.DynamicSelfAdaptiveOptimizer import DynamicSelfAdaptiveOptimizer lama_register["DynamicSelfAdaptiveOptimizer"] = DynamicSelfAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMADynamicSelfAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicSelfAdaptiveOptimizer = NonObjectOptimizer(method="LLAMADynamicSelfAdaptiveOptimizer").set_name("LLAMADynamicSelfAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicSelfAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicSelfAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMADynamicSelfAdaptiveOptimizer" + ).set_name("LLAMADynamicSelfAdaptiveOptimizer", register=True) +except Exception as e: # DynamicSelfAdaptiveOptimizer print("DynamicSelfAdaptiveOptimizer can not be imported: ", e) -try: +try: # DynamicStrategyAdaptiveDE from nevergrad.optimization.lama.DynamicStrategyAdaptiveDE import DynamicStrategyAdaptiveDE lama_register["DynamicStrategyAdaptiveDE"] = DynamicStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE").set_name("LLAMADynamicStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMADynamicStrategyAdaptiveDE").set_name( + "LLAMADynamicStrategyAdaptiveDE", register=True + ) +except Exception as e: # DynamicStrategyAdaptiveDE print("DynamicStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.DynamicallyAdaptiveFireworkAlgorithm import DynamicallyAdaptiveFireworkAlgorithm +try: # DynamicallyAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.DynamicallyAdaptiveFireworkAlgorithm import ( + DynamicallyAdaptiveFireworkAlgorithm, + ) lama_register["DynamicallyAdaptiveFireworkAlgorithm"] = DynamicallyAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMADynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMADynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMADynamicallyAdaptiveFireworkAlgorithm").set_name("LLAMADynamicallyAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMADynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMADynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMADynamicallyAdaptiveFireworkAlgorithm" + ).set_name("LLAMADynamicallyAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # DynamicallyAdaptiveFireworkAlgorithm print("DynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) -try: +try: # EACDE from nevergrad.optimization.lama.EACDE import EACDE lama_register["EACDE"] = EACDE - res = NonObjectOptimizer(method="LLAMAEACDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEACDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEACDE = NonObjectOptimizer(method="LLAMAEACDE").set_name("LLAMAEACDE", register=True) -except Exception as e: +except Exception as e: # EACDE print("EACDE can not be imported: ", e) -try: +try: # EADE from nevergrad.optimization.lama.EADE import EADE lama_register["EADE"] = EADE - res = NonObjectOptimizer(method="LLAMAEADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADE = NonObjectOptimizer(method="LLAMAEADE").set_name("LLAMAEADE", register=True) -except Exception as e: +except Exception as e: # EADE print("EADE can not be imported: ", e) -try: +try: # EADEA from nevergrad.optimization.lama.EADEA import EADEA lama_register["EADEA"] = EADEA - res = NonObjectOptimizer(method="LLAMAEADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEA = NonObjectOptimizer(method="LLAMAEADEA").set_name("LLAMAEADEA", register=True) -except Exception as e: +except Exception as e: # EADEA print("EADEA can not be imported: ", e) -try: +try: # EADEDM from nevergrad.optimization.lama.EADEDM import EADEDM lama_register["EADEDM"] = EADEDM - res = NonObjectOptimizer(method="LLAMAEADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEDM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEDM = NonObjectOptimizer(method="LLAMAEADEDM").set_name("LLAMAEADEDM", register=True) -except Exception as e: +except Exception as e: # EADEDM print("EADEDM can not be imported: ", e) -try: +try: # EADEDMGM from nevergrad.optimization.lama.EADEDMGM import EADEDMGM lama_register["EADEDMGM"] = EADEDMGM - res = NonObjectOptimizer(method="LLAMAEADEDMGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEDMGM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEDMGM = NonObjectOptimizer(method="LLAMAEADEDMGM").set_name("LLAMAEADEDMGM", register=True) -except Exception as e: +except Exception as e: # EADEDMGM print("EADEDMGM can not be imported: ", e) -try: +try: # EADEPC from nevergrad.optimization.lama.EADEPC import EADEPC lama_register["EADEPC"] = EADEPC - res = NonObjectOptimizer(method="LLAMAEADEPC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEPC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEPC = NonObjectOptimizer(method="LLAMAEADEPC").set_name("LLAMAEADEPC", register=True) -except Exception as e: +except Exception as e: # EADEPC print("EADEPC can not be imported: ", e) -try: +try: # EADEPM from nevergrad.optimization.lama.EADEPM import EADEPM lama_register["EADEPM"] = EADEPM - res = NonObjectOptimizer(method="LLAMAEADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEPM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEPM = NonObjectOptimizer(method="LLAMAEADEPM").set_name("LLAMAEADEPM", register=True) -except Exception as e: +except Exception as e: # EADEPM print("EADEPM can not be imported: ", e) -try: +try: # EADEPMC from nevergrad.optimization.lama.EADEPMC import EADEPMC lama_register["EADEPMC"] = EADEPMC - res = NonObjectOptimizer(method="LLAMAEADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEPMC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEPMC = NonObjectOptimizer(method="LLAMAEADEPMC").set_name("LLAMAEADEPMC", register=True) -except Exception as e: +except Exception as e: # EADEPMC print("EADEPMC can not be imported: ", e) -try: +try: # EADES from nevergrad.optimization.lama.EADES import EADES lama_register["EADES"] = EADES - res = NonObjectOptimizer(method="LLAMAEADES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADES = NonObjectOptimizer(method="LLAMAEADES").set_name("LLAMAEADES", register=True) -except Exception as e: +except Exception as e: # EADES print("EADES can not be imported: ", e) -try: +try: # EADESC from nevergrad.optimization.lama.EADESC import EADESC lama_register["EADESC"] = EADESC - res = NonObjectOptimizer(method="LLAMAEADESC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADESC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADESC = NonObjectOptimizer(method="LLAMAEADESC").set_name("LLAMAEADESC", register=True) -except Exception as e: +except Exception as e: # EADESC print("EADESC can not be imported: ", e) -try: +try: # EADEWM from nevergrad.optimization.lama.EADEWM import EADEWM lama_register["EADEWM"] = EADEWM - res = NonObjectOptimizer(method="LLAMAEADEWM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADEWM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADEWM = NonObjectOptimizer(method="LLAMAEADEWM").set_name("LLAMAEADEWM", register=True) -except Exception as e: +except Exception as e: # EADEWM print("EADEWM can not be imported: ", e) -try: +try: # EADE_FIDM from nevergrad.optimization.lama.EADE_FIDM import EADE_FIDM lama_register["EADE_FIDM"] = EADE_FIDM - res = NonObjectOptimizer(method="LLAMAEADE_FIDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADE_FIDM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADE_FIDM = NonObjectOptimizer(method="LLAMAEADE_FIDM").set_name("LLAMAEADE_FIDM", register=True) -except Exception as e: +except Exception as e: # EADE_FIDM print("EADE_FIDM can not be imported: ", e) -try: +try: # EADGM from nevergrad.optimization.lama.EADGM import EADGM lama_register["EADGM"] = EADGM - res = NonObjectOptimizer(method="LLAMAEADGM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADGM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADGM = NonObjectOptimizer(method="LLAMAEADGM").set_name("LLAMAEADGM", register=True) -except Exception as e: +except Exception as e: # EADGM print("EADGM can not be imported: ", e) -try: +try: # EADMMMS from nevergrad.optimization.lama.EADMMMS import EADMMMS lama_register["EADMMMS"] = EADMMMS - res = NonObjectOptimizer(method="LLAMAEADMMMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADMMMS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADMMMS = NonObjectOptimizer(method="LLAMAEADMMMS").set_name("LLAMAEADMMMS", register=True) -except Exception as e: +except Exception as e: # EADMMMS print("EADMMMS can not be imported: ", e) -try: +try: # EADSEA from nevergrad.optimization.lama.EADSEA import EADSEA lama_register["EADSEA"] = EADSEA - res = NonObjectOptimizer(method="LLAMAEADSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADSEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADSEA = NonObjectOptimizer(method="LLAMAEADSEA").set_name("LLAMAEADSEA", register=True) -except Exception as e: +except Exception as e: # EADSEA print("EADSEA can not be imported: ", e) -try: +try: # EADSM from nevergrad.optimization.lama.EADSM import EADSM lama_register["EADSM"] = EADSM - res = NonObjectOptimizer(method="LLAMAEADSM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEADSM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEADSM = NonObjectOptimizer(method="LLAMAEADSM").set_name("LLAMAEADSM", register=True) -except Exception as e: +except Exception as e: # EADSM print("EADSM can not be imported: ", e) -try: +try: # EAMDE from nevergrad.optimization.lama.EAMDE import EAMDE lama_register["EAMDE"] = EAMDE - res = NonObjectOptimizer(method="LLAMAEAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEAMDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEAMDE = NonObjectOptimizer(method="LLAMAEAMDE").set_name("LLAMAEAMDE", register=True) -except Exception as e: +except Exception as e: # EAMDE print("EAMDE can not be imported: ", e) -try: +try: # EAMES from nevergrad.optimization.lama.EAMES import EAMES lama_register["EAMES"] = EAMES - res = NonObjectOptimizer(method="LLAMAEAMES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEAMES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEAMES = NonObjectOptimizer(method="LLAMAEAMES").set_name("LLAMAEAMES", register=True) -except Exception as e: +except Exception as e: # EAMES print("EAMES can not be imported: ", e) -try: +try: # EAMSDiffEvo from nevergrad.optimization.lama.EAMSDiffEvo import EAMSDiffEvo lama_register["EAMSDiffEvo"] = EAMSDiffEvo - res = NonObjectOptimizer(method="LLAMAEAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEAMSDiffEvo = NonObjectOptimizer(method="LLAMAEAMSDiffEvo").set_name("LLAMAEAMSDiffEvo", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEAMSDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEAMSDiffEvo = NonObjectOptimizer(method="LLAMAEAMSDiffEvo").set_name( + "LLAMAEAMSDiffEvo", register=True + ) +except Exception as e: # EAMSDiffEvo print("EAMSDiffEvo can not be imported: ", e) -try: +try: # EAMSEA from nevergrad.optimization.lama.EAMSEA import EAMSEA lama_register["EAMSEA"] = EAMSEA - res = NonObjectOptimizer(method="LLAMAEAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEAMSEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEAMSEA = NonObjectOptimizer(method="LLAMAEAMSEA").set_name("LLAMAEAMSEA", register=True) -except Exception as e: +except Exception as e: # EAMSEA print("EAMSEA can not be imported: ", e) -try: +try: # EAPBES from nevergrad.optimization.lama.EAPBES import EAPBES lama_register["EAPBES"] = EAPBES - res = NonObjectOptimizer(method="LLAMAEAPBES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEAPBES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEAPBES = NonObjectOptimizer(method="LLAMAEAPBES").set_name("LLAMAEAPBES", register=True) -except Exception as e: +except Exception as e: # EAPBES print("EAPBES can not be imported: ", e) -try: +try: # EAPDELS from nevergrad.optimization.lama.EAPDELS import EAPDELS lama_register["EAPDELS"] = EAPDELS - res = NonObjectOptimizer(method="LLAMAEAPDELS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEAPDELS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEAPDELS = NonObjectOptimizer(method="LLAMAEAPDELS").set_name("LLAMAEAPDELS", register=True) -except Exception as e: +except Exception as e: # EAPDELS print("EAPDELS can not be imported: ", e) -try: +try: # EARESDM from nevergrad.optimization.lama.EARESDM import EARESDM lama_register["EARESDM"] = EARESDM - res = NonObjectOptimizer(method="LLAMAEARESDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEARESDM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEARESDM = NonObjectOptimizer(method="LLAMAEARESDM").set_name("LLAMAEARESDM", register=True) -except Exception as e: +except Exception as e: # EARESDM print("EARESDM can not be imported: ", e) -try: +try: # EASO from nevergrad.optimization.lama.EASO import EASO lama_register["EASO"] = EASO - res = NonObjectOptimizer(method="LLAMAEASO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEASO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEASO = NonObjectOptimizer(method="LLAMAEASO").set_name("LLAMAEASO", register=True) -except Exception as e: +except Exception as e: # EASO print("EASO can not be imported: ", e) -try: +try: # EDAEA from nevergrad.optimization.lama.EDAEA import EDAEA lama_register["EDAEA"] = EDAEA - res = NonObjectOptimizer(method="LLAMAEDAEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDAEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDAEA = NonObjectOptimizer(method="LLAMAEDAEA").set_name("LLAMAEDAEA", register=True) -except Exception as e: +except Exception as e: # EDAEA print("EDAEA can not be imported: ", e) -try: +try: # EDAG from nevergrad.optimization.lama.EDAG import EDAG lama_register["EDAG"] = EDAG - res = NonObjectOptimizer(method="LLAMAEDAG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDAG")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDAG = NonObjectOptimizer(method="LLAMAEDAG").set_name("LLAMAEDAG", register=True) -except Exception as e: +except Exception as e: # EDAG print("EDAG can not be imported: ", e) -try: +try: # EDASOGG from nevergrad.optimization.lama.EDASOGG import EDASOGG lama_register["EDASOGG"] = EDASOGG - res = NonObjectOptimizer(method="LLAMAEDASOGG")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDASOGG")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDASOGG = NonObjectOptimizer(method="LLAMAEDASOGG").set_name("LLAMAEDASOGG", register=True) -except Exception as e: +except Exception as e: # EDASOGG print("EDASOGG can not be imported: ", e) -try: +try: # EDDCEA from nevergrad.optimization.lama.EDDCEA import EDDCEA lama_register["EDDCEA"] = EDDCEA - res = NonObjectOptimizer(method="LLAMAEDDCEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDDCEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDDCEA = NonObjectOptimizer(method="LLAMAEDDCEA").set_name("LLAMAEDDCEA", register=True) -except Exception as e: +except Exception as e: # EDDCEA print("EDDCEA can not be imported: ", e) -try: +try: # EDEAS from nevergrad.optimization.lama.EDEAS import EDEAS lama_register["EDEAS"] = EDEAS - res = NonObjectOptimizer(method="LLAMAEDEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDEAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDEAS = NonObjectOptimizer(method="LLAMAEDEAS").set_name("LLAMAEDEAS", register=True) -except Exception as e: +except Exception as e: # EDEAS print("EDEAS can not be imported: ", e) -try: +try: # EDEPM from nevergrad.optimization.lama.EDEPM import EDEPM lama_register["EDEPM"] = EDEPM - res = NonObjectOptimizer(method="LLAMAEDEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDEPM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDEPM = NonObjectOptimizer(method="LLAMAEDEPM").set_name("LLAMAEDEPM", register=True) -except Exception as e: +except Exception as e: # EDEPM print("EDEPM can not be imported: ", e) -try: +try: # EDGB from nevergrad.optimization.lama.EDGB import EDGB lama_register["EDGB"] = EDGB - res = NonObjectOptimizer(method="LLAMAEDGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDGB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDGB = NonObjectOptimizer(method="LLAMAEDGB").set_name("LLAMAEDGB", register=True) -except Exception as e: +except Exception as e: # EDGB print("EDGB can not be imported: ", e) -try: +try: # EDMDESM from nevergrad.optimization.lama.EDMDESM import EDMDESM lama_register["EDMDESM"] = EDMDESM - res = NonObjectOptimizer(method="LLAMAEDMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDMDESM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDMDESM = NonObjectOptimizer(method="LLAMAEDMDESM").set_name("LLAMAEDMDESM", register=True) -except Exception as e: +except Exception as e: # EDMDESM print("EDMDESM can not be imported: ", e) -try: +try: # EDMRL from nevergrad.optimization.lama.EDMRL import EDMRL lama_register["EDMRL"] = EDMRL - res = NonObjectOptimizer(method="LLAMAEDMRL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDMRL")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDMRL = NonObjectOptimizer(method="LLAMAEDMRL").set_name("LLAMAEDMRL", register=True) -except Exception as e: +except Exception as e: # EDMRL print("EDMRL can not be imported: ", e) -try: +try: # EDMS from nevergrad.optimization.lama.EDMS import EDMS lama_register["EDMS"] = EDMS - res = NonObjectOptimizer(method="LLAMAEDMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDMS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDMS = NonObjectOptimizer(method="LLAMAEDMS").set_name("LLAMAEDMS", register=True) -except Exception as e: +except Exception as e: # EDMS print("EDMS can not be imported: ", e) -try: +try: # EDNAS from nevergrad.optimization.lama.EDNAS import EDNAS lama_register["EDNAS"] = EDNAS - res = NonObjectOptimizer(method="LLAMAEDNAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEDNAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEDNAS = NonObjectOptimizer(method="LLAMAEDNAS").set_name("LLAMAEDNAS", register=True) -except Exception as e: +except Exception as e: # EDNAS print("EDNAS can not be imported: ", e) -try: +try: # EDNAS_SAMRA from nevergrad.optimization.lama.EDNAS_SAMRA import EDNAS_SAMRA lama_register["EDNAS_SAMRA"] = EDNAS_SAMRA - res = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEDNAS_SAMRA = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA").set_name("LLAMAEDNAS_SAMRA", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEDNAS_SAMRA = NonObjectOptimizer(method="LLAMAEDNAS_SAMRA").set_name( + "LLAMAEDNAS_SAMRA", register=True + ) +except Exception as e: # EDNAS_SAMRA print("EDNAS_SAMRA can not be imported: ", e) -try: +try: # EDSDiffEvoM from nevergrad.optimization.lama.EDSDiffEvoM import EDSDiffEvoM lama_register["EDSDiffEvoM"] = EDSDiffEvoM - res = NonObjectOptimizer(method="LLAMAEDSDiffEvoM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEDSDiffEvoM = NonObjectOptimizer(method="LLAMAEDSDiffEvoM").set_name("LLAMAEDSDiffEvoM", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEDSDiffEvoM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEDSDiffEvoM = NonObjectOptimizer(method="LLAMAEDSDiffEvoM").set_name( + "LLAMAEDSDiffEvoM", register=True + ) +except Exception as e: # EDSDiffEvoM print("EDSDiffEvoM can not be imported: ", e) -try: +try: # EGBDE from nevergrad.optimization.lama.EGBDE import EGBDE lama_register["EGBDE"] = EGBDE - res = NonObjectOptimizer(method="LLAMAEGBDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEGBDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEGBDE = NonObjectOptimizer(method="LLAMAEGBDE").set_name("LLAMAEGBDE", register=True) -except Exception as e: +except Exception as e: # EGBDE print("EGBDE can not be imported: ", e) -try: +try: # EGGEO from nevergrad.optimization.lama.EGGEO import EGGEO lama_register["EGGEO"] = EGGEO - res = NonObjectOptimizer(method="LLAMAEGGEO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEGGEO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEGGEO = NonObjectOptimizer(method="LLAMAEGGEO").set_name("LLAMAEGGEO", register=True) -except Exception as e: +except Exception as e: # EGGEO print("EGGEO can not be imported: ", e) -try: +try: # EHADEEM from nevergrad.optimization.lama.EHADEEM import EHADEEM lama_register["EHADEEM"] = EHADEEM - res = NonObjectOptimizer(method="LLAMAEHADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEHADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEHADEEM = NonObjectOptimizer(method="LLAMAEHADEEM").set_name("LLAMAEHADEEM", register=True) -except Exception as e: +except Exception as e: # EHADEEM print("EHADEEM can not be imported: ", e) -try: +try: # EHADEMI from nevergrad.optimization.lama.EHADEMI import EHADEMI lama_register["EHADEMI"] = EHADEMI - res = NonObjectOptimizer(method="LLAMAEHADEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEHADEMI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEHADEMI = NonObjectOptimizer(method="LLAMAEHADEMI").set_name("LLAMAEHADEMI", register=True) -except Exception as e: +except Exception as e: # EHADEMI print("EHADEMI can not be imported: ", e) -try: +try: # EHDAM from nevergrad.optimization.lama.EHDAM import EHDAM lama_register["EHDAM"] = EHDAM - res = NonObjectOptimizer(method="LLAMAEHDAM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEHDAM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEHDAM = NonObjectOptimizer(method="LLAMAEHDAM").set_name("LLAMAEHDAM", register=True) -except Exception as e: +except Exception as e: # EHDAM print("EHDAM can not be imported: ", e) -try: +try: # EHDE from nevergrad.optimization.lama.EHDE import EHDE lama_register["EHDE"] = EHDE - res = NonObjectOptimizer(method="LLAMAEHDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEHDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEHDE = NonObjectOptimizer(method="LLAMAEHDE").set_name("LLAMAEHDE", register=True) -except Exception as e: +except Exception as e: # EHDE print("EHDE can not be imported: ", e) -try: +try: # EIADEA from nevergrad.optimization.lama.EIADEA import EIADEA lama_register["EIADEA"] = EIADEA - res = NonObjectOptimizer(method="LLAMAEIADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEIADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEIADEA = NonObjectOptimizer(method="LLAMAEIADEA").set_name("LLAMAEIADEA", register=True) -except Exception as e: +except Exception as e: # EIADEA print("EIADEA can not be imported: ", e) -try: +try: # EMIDE from nevergrad.optimization.lama.EMIDE import EMIDE lama_register["EMIDE"] = EMIDE - res = NonObjectOptimizer(method="LLAMAEMIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEMIDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEMIDE = NonObjectOptimizer(method="LLAMAEMIDE").set_name("LLAMAEMIDE", register=True) -except Exception as e: +except Exception as e: # EMIDE print("EMIDE can not be imported: ", e) -try: +try: # EMSADE from nevergrad.optimization.lama.EMSADE import EMSADE lama_register["EMSADE"] = EMSADE - res = NonObjectOptimizer(method="LLAMAEMSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEMSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEMSADE = NonObjectOptimizer(method="LLAMAEMSADE").set_name("LLAMAEMSADE", register=True) -except Exception as e: +except Exception as e: # EMSADE print("EMSADE can not be imported: ", e) -try: +try: # EMSEAS from nevergrad.optimization.lama.EMSEAS import EMSEAS lama_register["EMSEAS"] = EMSEAS - res = NonObjectOptimizer(method="LLAMAEMSEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEMSEAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEMSEAS = NonObjectOptimizer(method="LLAMAEMSEAS").set_name("LLAMAEMSEAS", register=True) -except Exception as e: +except Exception as e: # EMSEAS print("EMSEAS can not be imported: ", e) -try: +try: # EORAMED from nevergrad.optimization.lama.EORAMED import EORAMED lama_register["EORAMED"] = EORAMED - res = NonObjectOptimizer(method="LLAMAEORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEORAMED")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEORAMED = NonObjectOptimizer(method="LLAMAEORAMED").set_name("LLAMAEORAMED", register=True) -except Exception as e: +except Exception as e: # EORAMED print("EORAMED can not be imported: ", e) -try: +try: # EPADE from nevergrad.optimization.lama.EPADE import EPADE lama_register["EPADE"] = EPADE - res = NonObjectOptimizer(method="LLAMAEPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEPADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEPADE = NonObjectOptimizer(method="LLAMAEPADE").set_name("LLAMAEPADE", register=True) -except Exception as e: +except Exception as e: # EPADE print("EPADE can not be imported: ", e) -try: +try: # EPDE from nevergrad.optimization.lama.EPDE import EPDE lama_register["EPDE"] = EPDE - res = NonObjectOptimizer(method="LLAMAEPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEPDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEPDE = NonObjectOptimizer(method="LLAMAEPDE").set_name("LLAMAEPDE", register=True) -except Exception as e: +except Exception as e: # EPDE print("EPDE can not be imported: ", e) -try: +try: # EPWDEM from nevergrad.optimization.lama.EPWDEM import EPWDEM lama_register["EPWDEM"] = EPWDEM - res = NonObjectOptimizer(method="LLAMAEPWDEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEPWDEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEPWDEM = NonObjectOptimizer(method="LLAMAEPWDEM").set_name("LLAMAEPWDEM", register=True) -except Exception as e: +except Exception as e: # EPWDEM print("EPWDEM can not be imported: ", e) -try: +try: # ERADE from nevergrad.optimization.lama.ERADE import ERADE lama_register["ERADE"] = ERADE - res = NonObjectOptimizer(method="LLAMAERADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAERADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAERADE = NonObjectOptimizer(method="LLAMAERADE").set_name("LLAMAERADE", register=True) -except Exception as e: +except Exception as e: # ERADE print("ERADE can not be imported: ", e) -try: +try: # ERADS from nevergrad.optimization.lama.ERADS import ERADS lama_register["ERADS"] = ERADS - res = NonObjectOptimizer(method="LLAMAERADS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAERADS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAERADS = NonObjectOptimizer(method="LLAMAERADS").set_name("LLAMAERADS", register=True) -except Exception as e: +except Exception as e: # ERADS print("ERADS can not be imported: ", e) -try: +try: # ERADS_AdaptiveDynamic from nevergrad.optimization.lama.ERADS_AdaptiveDynamic import ERADS_AdaptiveDynamic lama_register["ERADS_AdaptiveDynamic"] = ERADS_AdaptiveDynamic - res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptiveDynamic = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic").set_name("LLAMAERADS_AdaptiveDynamic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptiveDynamic = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamic").set_name( + "LLAMAERADS_AdaptiveDynamic", register=True + ) +except Exception as e: # ERADS_AdaptiveDynamic print("ERADS_AdaptiveDynamic can not be imported: ", e) -try: +try: # ERADS_AdaptiveDynamicPlus from nevergrad.optimization.lama.ERADS_AdaptiveDynamicPlus import ERADS_AdaptiveDynamicPlus lama_register["ERADS_AdaptiveDynamicPlus"] = ERADS_AdaptiveDynamicPlus - res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptiveDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus").set_name("LLAMAERADS_AdaptiveDynamicPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptiveDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_AdaptiveDynamicPlus").set_name( + "LLAMAERADS_AdaptiveDynamicPlus", register=True + ) +except Exception as e: # ERADS_AdaptiveDynamicPlus print("ERADS_AdaptiveDynamicPlus can not be imported: ", e) -try: +try: # ERADS_AdaptiveHybrid from nevergrad.optimization.lama.ERADS_AdaptiveHybrid import ERADS_AdaptiveHybrid lama_register["ERADS_AdaptiveHybrid"] = ERADS_AdaptiveHybrid - res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptiveHybrid = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid").set_name("LLAMAERADS_AdaptiveHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptiveHybrid = NonObjectOptimizer(method="LLAMAERADS_AdaptiveHybrid").set_name( + "LLAMAERADS_AdaptiveHybrid", register=True + ) +except Exception as e: # ERADS_AdaptiveHybrid print("ERADS_AdaptiveHybrid can not be imported: ", e) -try: +try: # ERADS_AdaptivePlus from nevergrad.optimization.lama.ERADS_AdaptivePlus import ERADS_AdaptivePlus lama_register["ERADS_AdaptivePlus"] = ERADS_AdaptivePlus - res = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus").set_name("LLAMAERADS_AdaptivePlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_AdaptivePlus").set_name( + "LLAMAERADS_AdaptivePlus", register=True + ) +except Exception as e: # ERADS_AdaptivePlus print("ERADS_AdaptivePlus can not be imported: ", e) -try: +try: # ERADS_AdaptiveProgressive from nevergrad.optimization.lama.ERADS_AdaptiveProgressive import ERADS_AdaptiveProgressive lama_register["ERADS_AdaptiveProgressive"] = ERADS_AdaptiveProgressive - res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptiveProgressive = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive").set_name("LLAMAERADS_AdaptiveProgressive", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptiveProgressive = NonObjectOptimizer(method="LLAMAERADS_AdaptiveProgressive").set_name( + "LLAMAERADS_AdaptiveProgressive", register=True + ) +except Exception as e: # ERADS_AdaptiveProgressive print("ERADS_AdaptiveProgressive can not be imported: ", e) -try: +try: # ERADS_AdaptiveRefinement from nevergrad.optimization.lama.ERADS_AdaptiveRefinement import ERADS_AdaptiveRefinement lama_register["ERADS_AdaptiveRefinement"] = ERADS_AdaptiveRefinement - res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdaptiveRefinement = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement").set_name("LLAMAERADS_AdaptiveRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdaptiveRefinement = NonObjectOptimizer(method="LLAMAERADS_AdaptiveRefinement").set_name( + "LLAMAERADS_AdaptiveRefinement", register=True + ) +except Exception as e: # ERADS_AdaptiveRefinement print("ERADS_AdaptiveRefinement can not be imported: ", e) -try: +try: # ERADS_Advanced from nevergrad.optimization.lama.ERADS_Advanced import ERADS_Advanced lama_register["ERADS_Advanced"] = ERADS_Advanced - res = NonObjectOptimizer(method="LLAMAERADS_Advanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Advanced = NonObjectOptimizer(method="LLAMAERADS_Advanced").set_name("LLAMAERADS_Advanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Advanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Advanced = NonObjectOptimizer(method="LLAMAERADS_Advanced").set_name( + "LLAMAERADS_Advanced", register=True + ) +except Exception as e: # ERADS_Advanced print("ERADS_Advanced can not be imported: ", e) -try: +try: # ERADS_AdvancedDynamic from nevergrad.optimization.lama.ERADS_AdvancedDynamic import ERADS_AdvancedDynamic lama_register["ERADS_AdvancedDynamic"] = ERADS_AdvancedDynamic - res = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdvancedDynamic = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic").set_name("LLAMAERADS_AdvancedDynamic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdvancedDynamic = NonObjectOptimizer(method="LLAMAERADS_AdvancedDynamic").set_name( + "LLAMAERADS_AdvancedDynamic", register=True + ) +except Exception as e: # ERADS_AdvancedDynamic print("ERADS_AdvancedDynamic can not be imported: ", e) -try: +try: # ERADS_AdvancedRefined from nevergrad.optimization.lama.ERADS_AdvancedRefined import ERADS_AdvancedRefined lama_register["ERADS_AdvancedRefined"] = ERADS_AdvancedRefined - res = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_AdvancedRefined = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined").set_name("LLAMAERADS_AdvancedRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_AdvancedRefined = NonObjectOptimizer(method="LLAMAERADS_AdvancedRefined").set_name( + "LLAMAERADS_AdvancedRefined", register=True + ) +except Exception as e: # ERADS_AdvancedRefined print("ERADS_AdvancedRefined can not be imported: ", e) -try: +try: # ERADS_DynamicPrecision from nevergrad.optimization.lama.ERADS_DynamicPrecision import ERADS_DynamicPrecision lama_register["ERADS_DynamicPrecision"] = ERADS_DynamicPrecision - res = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_DynamicPrecision = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision").set_name("LLAMAERADS_DynamicPrecision", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_DynamicPrecision = NonObjectOptimizer(method="LLAMAERADS_DynamicPrecision").set_name( + "LLAMAERADS_DynamicPrecision", register=True + ) +except Exception as e: # ERADS_DynamicPrecision print("ERADS_DynamicPrecision can not be imported: ", e) -try: +try: # ERADS_Enhanced from nevergrad.optimization.lama.ERADS_Enhanced import ERADS_Enhanced lama_register["ERADS_Enhanced"] = ERADS_Enhanced - res = NonObjectOptimizer(method="LLAMAERADS_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Enhanced = NonObjectOptimizer(method="LLAMAERADS_Enhanced").set_name("LLAMAERADS_Enhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Enhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Enhanced = NonObjectOptimizer(method="LLAMAERADS_Enhanced").set_name( + "LLAMAERADS_Enhanced", register=True + ) +except Exception as e: # ERADS_Enhanced print("ERADS_Enhanced can not be imported: ", e) -try: +try: # ERADS_EnhancedPrecision from nevergrad.optimization.lama.ERADS_EnhancedPrecision import ERADS_EnhancedPrecision lama_register["ERADS_EnhancedPrecision"] = ERADS_EnhancedPrecision - res = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_EnhancedPrecision = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision").set_name("LLAMAERADS_EnhancedPrecision", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_EnhancedPrecision = NonObjectOptimizer(method="LLAMAERADS_EnhancedPrecision").set_name( + "LLAMAERADS_EnhancedPrecision", register=True + ) +except Exception as e: # ERADS_EnhancedPrecision print("ERADS_EnhancedPrecision can not be imported: ", e) -try: +try: # ERADS_HyperOptimized from nevergrad.optimization.lama.ERADS_HyperOptimized import ERADS_HyperOptimized lama_register["ERADS_HyperOptimized"] = ERADS_HyperOptimized - res = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_HyperOptimized = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized").set_name("LLAMAERADS_HyperOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_HyperOptimized = NonObjectOptimizer(method="LLAMAERADS_HyperOptimized").set_name( + "LLAMAERADS_HyperOptimized", register=True + ) +except Exception as e: # ERADS_HyperOptimized print("ERADS_HyperOptimized can not be imported: ", e) -try: +try: # ERADS_NextGen from nevergrad.optimization.lama.ERADS_NextGen import ERADS_NextGen lama_register["ERADS_NextGen"] = ERADS_NextGen - res = NonObjectOptimizer(method="LLAMAERADS_NextGen")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_NextGen = NonObjectOptimizer(method="LLAMAERADS_NextGen").set_name("LLAMAERADS_NextGen", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_NextGen")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_NextGen = NonObjectOptimizer(method="LLAMAERADS_NextGen").set_name( + "LLAMAERADS_NextGen", register=True + ) +except Exception as e: # ERADS_NextGen print("ERADS_NextGen can not be imported: ", e) -try: +try: # ERADS_Optimized from nevergrad.optimization.lama.ERADS_Optimized import ERADS_Optimized lama_register["ERADS_Optimized"] = ERADS_Optimized - res = NonObjectOptimizer(method="LLAMAERADS_Optimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Optimized = NonObjectOptimizer(method="LLAMAERADS_Optimized").set_name("LLAMAERADS_Optimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Optimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Optimized = NonObjectOptimizer(method="LLAMAERADS_Optimized").set_name( + "LLAMAERADS_Optimized", register=True + ) +except Exception as e: # ERADS_Optimized print("ERADS_Optimized can not be imported: ", e) -try: +try: # ERADS_Precision from nevergrad.optimization.lama.ERADS_Precision import ERADS_Precision lama_register["ERADS_Precision"] = ERADS_Precision - res = NonObjectOptimizer(method="LLAMAERADS_Precision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Precision = NonObjectOptimizer(method="LLAMAERADS_Precision").set_name("LLAMAERADS_Precision", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Precision")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Precision = NonObjectOptimizer(method="LLAMAERADS_Precision").set_name( + "LLAMAERADS_Precision", register=True + ) +except Exception as e: # ERADS_Precision print("ERADS_Precision can not be imported: ", e) -try: +try: # ERADS_ProgressiveAdaptive from nevergrad.optimization.lama.ERADS_ProgressiveAdaptive import ERADS_ProgressiveAdaptive lama_register["ERADS_ProgressiveAdaptive"] = ERADS_ProgressiveAdaptive - res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressiveAdaptive = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive").set_name("LLAMAERADS_ProgressiveAdaptive", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressiveAdaptive = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptive").set_name( + "LLAMAERADS_ProgressiveAdaptive", register=True + ) +except Exception as e: # ERADS_ProgressiveAdaptive print("ERADS_ProgressiveAdaptive can not be imported: ", e) -try: +try: # ERADS_ProgressiveAdaptivePlus from nevergrad.optimization.lama.ERADS_ProgressiveAdaptivePlus import ERADS_ProgressiveAdaptivePlus lama_register["ERADS_ProgressiveAdaptivePlus"] = ERADS_ProgressiveAdaptivePlus - res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptivePlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressiveAdaptivePlus = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptivePlus").set_name("LLAMAERADS_ProgressiveAdaptivePlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveAdaptivePlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressiveAdaptivePlus = NonObjectOptimizer( + method="LLAMAERADS_ProgressiveAdaptivePlus" + ).set_name("LLAMAERADS_ProgressiveAdaptivePlus", register=True) +except Exception as e: # ERADS_ProgressiveAdaptivePlus print("ERADS_ProgressiveAdaptivePlus can not be imported: ", e) -try: +try: # ERADS_ProgressiveDynamic from nevergrad.optimization.lama.ERADS_ProgressiveDynamic import ERADS_ProgressiveDynamic lama_register["ERADS_ProgressiveDynamic"] = ERADS_ProgressiveDynamic - res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressiveDynamic = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic").set_name("LLAMAERADS_ProgressiveDynamic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressiveDynamic = NonObjectOptimizer(method="LLAMAERADS_ProgressiveDynamic").set_name( + "LLAMAERADS_ProgressiveDynamic", register=True + ) +except Exception as e: # ERADS_ProgressiveDynamic print("ERADS_ProgressiveDynamic can not be imported: ", e) -try: +try: # ERADS_ProgressiveOptimized from nevergrad.optimization.lama.ERADS_ProgressiveOptimized import ERADS_ProgressiveOptimized lama_register["ERADS_ProgressiveOptimized"] = ERADS_ProgressiveOptimized - res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressiveOptimized = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized").set_name("LLAMAERADS_ProgressiveOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressiveOptimized = NonObjectOptimizer(method="LLAMAERADS_ProgressiveOptimized").set_name( + "LLAMAERADS_ProgressiveOptimized", register=True + ) +except Exception as e: # ERADS_ProgressiveOptimized print("ERADS_ProgressiveOptimized can not be imported: ", e) -try: +try: # ERADS_ProgressivePrecision from nevergrad.optimization.lama.ERADS_ProgressivePrecision import ERADS_ProgressivePrecision lama_register["ERADS_ProgressivePrecision"] = ERADS_ProgressivePrecision - res = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressivePrecision = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision").set_name("LLAMAERADS_ProgressivePrecision", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressivePrecision = NonObjectOptimizer(method="LLAMAERADS_ProgressivePrecision").set_name( + "LLAMAERADS_ProgressivePrecision", register=True + ) +except Exception as e: # ERADS_ProgressivePrecision print("ERADS_ProgressivePrecision can not be imported: ", e) -try: +try: # ERADS_ProgressiveRefinement from nevergrad.optimization.lama.ERADS_ProgressiveRefinement import ERADS_ProgressiveRefinement lama_register["ERADS_ProgressiveRefinement"] = ERADS_ProgressiveRefinement - res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_ProgressiveRefinement = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement").set_name("LLAMAERADS_ProgressiveRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_ProgressiveRefinement = NonObjectOptimizer(method="LLAMAERADS_ProgressiveRefinement").set_name( + "LLAMAERADS_ProgressiveRefinement", register=True + ) +except Exception as e: # ERADS_ProgressiveRefinement print("ERADS_ProgressiveRefinement can not be imported: ", e) -try: +try: # ERADS_QuantumFlux from nevergrad.optimization.lama.ERADS_QuantumFlux import ERADS_QuantumFlux lama_register["ERADS_QuantumFlux"] = ERADS_QuantumFlux - res = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumFlux = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux").set_name("LLAMAERADS_QuantumFlux", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumFlux = NonObjectOptimizer(method="LLAMAERADS_QuantumFlux").set_name( + "LLAMAERADS_QuantumFlux", register=True + ) +except Exception as e: # ERADS_QuantumFlux print("ERADS_QuantumFlux can not be imported: ", e) -try: +try: # ERADS_QuantumFluxPro from nevergrad.optimization.lama.ERADS_QuantumFluxPro import ERADS_QuantumFluxPro lama_register["ERADS_QuantumFluxPro"] = ERADS_QuantumFluxPro - res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumFluxPro = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro").set_name("LLAMAERADS_QuantumFluxPro", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumFluxPro = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxPro").set_name( + "LLAMAERADS_QuantumFluxPro", register=True + ) +except Exception as e: # ERADS_QuantumFluxPro print("ERADS_QuantumFluxPro can not be imported: ", e) -try: +try: # ERADS_QuantumFluxUltra from nevergrad.optimization.lama.ERADS_QuantumFluxUltra import ERADS_QuantumFluxUltra lama_register["ERADS_QuantumFluxUltra"] = ERADS_QuantumFluxUltra - res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumFluxUltra = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra").set_name("LLAMAERADS_QuantumFluxUltra", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumFluxUltra = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltra").set_name( + "LLAMAERADS_QuantumFluxUltra", register=True + ) +except Exception as e: # ERADS_QuantumFluxUltra print("ERADS_QuantumFluxUltra can not be imported: ", e) -try: +try: # ERADS_QuantumFluxUltraRefined from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefined import ERADS_QuantumFluxUltraRefined lama_register["ERADS_QuantumFluxUltraRefined"] = ERADS_QuantumFluxUltraRefined - res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumFluxUltraRefined = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefined").set_name("LLAMAERADS_QuantumFluxUltraRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumFluxUltraRefined = NonObjectOptimizer( + method="LLAMAERADS_QuantumFluxUltraRefined" + ).set_name("LLAMAERADS_QuantumFluxUltraRefined", register=True) +except Exception as e: # ERADS_QuantumFluxUltraRefined print("ERADS_QuantumFluxUltraRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefinedPlus import ERADS_QuantumFluxUltraRefinedPlus +try: # ERADS_QuantumFluxUltraRefinedPlus + from nevergrad.optimization.lama.ERADS_QuantumFluxUltraRefinedPlus import ( + ERADS_QuantumFluxUltraRefinedPlus, + ) lama_register["ERADS_QuantumFluxUltraRefinedPlus"] = ERADS_QuantumFluxUltraRefinedPlus - res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumFluxUltraRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefinedPlus").set_name("LLAMAERADS_QuantumFluxUltraRefinedPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumFluxUltraRefinedPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumFluxUltraRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_QuantumFluxUltraRefinedPlus" + ).set_name("LLAMAERADS_QuantumFluxUltraRefinedPlus", register=True) +except Exception as e: # ERADS_QuantumFluxUltraRefinedPlus print("ERADS_QuantumFluxUltraRefinedPlus can not be imported: ", e) -try: +try: # ERADS_QuantumLeap from nevergrad.optimization.lama.ERADS_QuantumLeap import ERADS_QuantumLeap lama_register["ERADS_QuantumLeap"] = ERADS_QuantumLeap - res = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_QuantumLeap = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap").set_name("LLAMAERADS_QuantumLeap", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_QuantumLeap = NonObjectOptimizer(method="LLAMAERADS_QuantumLeap").set_name( + "LLAMAERADS_QuantumLeap", register=True + ) +except Exception as e: # ERADS_QuantumLeap print("ERADS_QuantumLeap can not be imported: ", e) -try: +try: # ERADS_Refined from nevergrad.optimization.lama.ERADS_Refined import ERADS_Refined lama_register["ERADS_Refined"] = ERADS_Refined - res = NonObjectOptimizer(method="LLAMAERADS_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Refined = NonObjectOptimizer(method="LLAMAERADS_Refined").set_name("LLAMAERADS_Refined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Refined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Refined = NonObjectOptimizer(method="LLAMAERADS_Refined").set_name( + "LLAMAERADS_Refined", register=True + ) +except Exception as e: # ERADS_Refined print("ERADS_Refined can not be imported: ", e) -try: +try: # ERADS_Superior from nevergrad.optimization.lama.ERADS_Superior import ERADS_Superior lama_register["ERADS_Superior"] = ERADS_Superior - res = NonObjectOptimizer(method="LLAMAERADS_Superior")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Superior = NonObjectOptimizer(method="LLAMAERADS_Superior").set_name("LLAMAERADS_Superior", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Superior")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Superior = NonObjectOptimizer(method="LLAMAERADS_Superior").set_name( + "LLAMAERADS_Superior", register=True + ) +except Exception as e: # ERADS_Superior print("ERADS_Superior can not be imported: ", e) -try: +try: # ERADS_Ultra from nevergrad.optimization.lama.ERADS_Ultra import ERADS_Ultra lama_register["ERADS_Ultra"] = ERADS_Ultra - res = NonObjectOptimizer(method="LLAMAERADS_Ultra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_Ultra = NonObjectOptimizer(method="LLAMAERADS_Ultra").set_name("LLAMAERADS_Ultra", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_Ultra")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_Ultra = NonObjectOptimizer(method="LLAMAERADS_Ultra").set_name( + "LLAMAERADS_Ultra", register=True + ) +except Exception as e: # ERADS_Ultra print("ERADS_Ultra can not be imported: ", e) -try: +try: # ERADS_UltraDynamic from nevergrad.optimization.lama.ERADS_UltraDynamic import ERADS_UltraDynamic lama_register["ERADS_UltraDynamic"] = ERADS_UltraDynamic - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamic = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic").set_name("LLAMAERADS_UltraDynamic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamic = NonObjectOptimizer(method="LLAMAERADS_UltraDynamic").set_name( + "LLAMAERADS_UltraDynamic", register=True + ) +except Exception as e: # ERADS_UltraDynamic print("ERADS_UltraDynamic can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMax from nevergrad.optimization.lama.ERADS_UltraDynamicMax import ERADS_UltraDynamicMax lama_register["ERADS_UltraDynamicMax"] = ERADS_UltraDynamicMax - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMax = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax").set_name("LLAMAERADS_UltraDynamicMax", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMax = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMax").set_name( + "LLAMAERADS_UltraDynamicMax", register=True + ) +except Exception as e: # ERADS_UltraDynamicMax print("ERADS_UltraDynamicMax can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxEnhanced from nevergrad.optimization.lama.ERADS_UltraDynamicMaxEnhanced import ERADS_UltraDynamicMaxEnhanced lama_register["ERADS_UltraDynamicMaxEnhanced"] = ERADS_UltraDynamicMaxEnhanced - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxEnhanced").set_name("LLAMAERADS_UltraDynamicMaxEnhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxEnhanced = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxEnhanced" + ).set_name("LLAMAERADS_UltraDynamicMaxEnhanced", register=True) +except Exception as e: # ERADS_UltraDynamicMaxEnhanced print("ERADS_UltraDynamicMaxEnhanced can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxHybrid from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHybrid import ERADS_UltraDynamicMaxHybrid lama_register["ERADS_UltraDynamicMaxHybrid"] = ERADS_UltraDynamicMaxHybrid - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHybrid = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid").set_name("LLAMAERADS_UltraDynamicMaxHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHybrid = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHybrid").set_name( + "LLAMAERADS_UltraDynamicMaxHybrid", register=True + ) +except Exception as e: # ERADS_UltraDynamicMaxHybrid print("ERADS_UltraDynamicMaxHybrid can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxHyper from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyper import ERADS_UltraDynamicMaxHyper lama_register["ERADS_UltraDynamicMaxHyper"] = ERADS_UltraDynamicMaxHyper - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyper = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper").set_name("LLAMAERADS_UltraDynamicMaxHyper", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyper = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyper").set_name( + "LLAMAERADS_UltraDynamicMaxHyper", register=True + ) +except Exception as e: # ERADS_UltraDynamicMaxHyper print("ERADS_UltraDynamicMaxHyper can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimized import ERADS_UltraDynamicMaxHyperOptimized +try: # ERADS_UltraDynamicMaxHyperOptimized + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimized import ( + ERADS_UltraDynamicMaxHyperOptimized, + ) lama_register["ERADS_UltraDynamicMaxHyperOptimized"] = ERADS_UltraDynamicMaxHyperOptimized - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimized").set_name("LLAMAERADS_UltraDynamicMaxHyperOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimized", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperOptimized print("ERADS_UltraDynamicMaxHyperOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimizedV4 import ERADS_UltraDynamicMaxHyperOptimizedV4 +try: # ERADS_UltraDynamicMaxHyperOptimizedV4 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperOptimizedV4 import ( + ERADS_UltraDynamicMaxHyperOptimizedV4, + ) lama_register["ERADS_UltraDynamicMaxHyperOptimizedV4"] = ERADS_UltraDynamicMaxHyperOptimizedV4 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperOptimizedV4 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4").set_name("LLAMAERADS_UltraDynamicMaxHyperOptimizedV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperOptimizedV4 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperOptimizedV4" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperOptimizedV4", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperOptimizedV4 print("ERADS_UltraDynamicMaxHyperOptimizedV4 can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxHyperPlus from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperPlus import ERADS_UltraDynamicMaxHyperPlus lama_register["ERADS_UltraDynamicMaxHyperPlus"] = ERADS_UltraDynamicMaxHyperPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperPlus").set_name("LLAMAERADS_UltraDynamicMaxHyperPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperPlus", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperPlus print("ERADS_UltraDynamicMaxHyperPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefined import ERADS_UltraDynamicMaxHyperRefined +try: # ERADS_UltraDynamicMaxHyperRefined + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefined import ( + ERADS_UltraDynamicMaxHyperRefined, + ) lama_register["ERADS_UltraDynamicMaxHyperRefined"] = ERADS_UltraDynamicMaxHyperRefined - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefined").set_name("LLAMAERADS_UltraDynamicMaxHyperRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefined", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperRefined print("ERADS_UltraDynamicMaxHyperRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimized import ERADS_UltraDynamicMaxHyperRefinedOptimized +try: # ERADS_UltraDynamicMaxHyperRefinedOptimized + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimized import ( + ERADS_UltraDynamicMaxHyperRefinedOptimized, + ) lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimized"] = ERADS_UltraDynamicMaxHyperRefinedOptimized - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimized", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperRefinedOptimized print("ERADS_UltraDynamicMaxHyperRefinedOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 import ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 - - lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV2"] = ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2", register=True) -except Exception as e: +try: # ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 import ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV2, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV2"] = ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 + ) + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV2", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 import ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 - - lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV3"] = ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3", register=True) -except Exception as e: +try: # ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 import ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV3, + ) + + lama_register["ERADS_UltraDynamicMaxHyperRefinedOptimizedV3"] = ( + ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 + ) + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedOptimizedV3", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 print("ERADS_UltraDynamicMaxHyperRefinedOptimizedV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedPlus import ERADS_UltraDynamicMaxHyperRefinedPlus +try: # ERADS_UltraDynamicMaxHyperRefinedPlus + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxHyperRefinedPlus import ( + ERADS_UltraDynamicMaxHyperRefinedPlus, + ) lama_register["ERADS_UltraDynamicMaxHyperRefinedPlus"] = ERADS_UltraDynamicMaxHyperRefinedPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxHyperRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus").set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxHyperRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxHyperRefinedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxHyperRefinedPlus", register=True) +except Exception as e: # ERADS_UltraDynamicMaxHyperRefinedPlus print("ERADS_UltraDynamicMaxHyperRefinedPlus can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxOptimal from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimal import ERADS_UltraDynamicMaxOptimal lama_register["ERADS_UltraDynamicMaxOptimal"] = ERADS_UltraDynamicMaxOptimal - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxOptimal = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimal").set_name("LLAMAERADS_UltraDynamicMaxOptimal", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxOptimal = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimal" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimal", register=True) +except Exception as e: # ERADS_UltraDynamicMaxOptimal print("ERADS_UltraDynamicMaxOptimal can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxOptimized from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimized import ERADS_UltraDynamicMaxOptimized lama_register["ERADS_UltraDynamicMaxOptimized"] = ERADS_UltraDynamicMaxOptimized - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimized").set_name("LLAMAERADS_UltraDynamicMaxOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimized" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimized", register=True) +except Exception as e: # ERADS_UltraDynamicMaxOptimized print("ERADS_UltraDynamicMaxOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimizedPlus import ERADS_UltraDynamicMaxOptimizedPlus +try: # ERADS_UltraDynamicMaxOptimizedPlus + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxOptimizedPlus import ( + ERADS_UltraDynamicMaxOptimizedPlus, + ) lama_register["ERADS_UltraDynamicMaxOptimizedPlus"] = ERADS_UltraDynamicMaxOptimizedPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimizedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxOptimizedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimizedPlus").set_name("LLAMAERADS_UltraDynamicMaxOptimizedPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxOptimizedPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxOptimizedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxOptimizedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxOptimizedPlus", register=True) +except Exception as e: # ERADS_UltraDynamicMaxOptimizedPlus print("ERADS_UltraDynamicMaxOptimizedPlus can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxPlus from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPlus import ERADS_UltraDynamicMaxPlus lama_register["ERADS_UltraDynamicMaxPlus"] = ERADS_UltraDynamicMaxPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus").set_name("LLAMAERADS_UltraDynamicMaxPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPlus").set_name( + "LLAMAERADS_UltraDynamicMaxPlus", register=True + ) +except Exception as e: # ERADS_UltraDynamicMaxPlus print("ERADS_UltraDynamicMaxPlus can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxPrecision from nevergrad.optimization.lama.ERADS_UltraDynamicMaxPrecision import ERADS_UltraDynamicMaxPrecision lama_register["ERADS_UltraDynamicMaxPrecision"] = ERADS_UltraDynamicMaxPrecision - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPrecision")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxPrecision = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPrecision").set_name("LLAMAERADS_UltraDynamicMaxPrecision", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxPrecision")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxPrecision = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxPrecision" + ).set_name("LLAMAERADS_UltraDynamicMaxPrecision", register=True) +except Exception as e: # ERADS_UltraDynamicMaxPrecision print("ERADS_UltraDynamicMaxPrecision can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxRefined from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefined import ERADS_UltraDynamicMaxRefined lama_register["ERADS_UltraDynamicMaxRefined"] = ERADS_UltraDynamicMaxRefined - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefined").set_name("LLAMAERADS_UltraDynamicMaxRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxRefined", register=True) +except Exception as e: # ERADS_UltraDynamicMaxRefined print("ERADS_UltraDynamicMaxRefined can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxRefinedPlus from nevergrad.optimization.lama.ERADS_UltraDynamicMaxRefinedPlus import ERADS_UltraDynamicMaxRefinedPlus lama_register["ERADS_UltraDynamicMaxRefinedPlus"] = ERADS_UltraDynamicMaxRefinedPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefinedPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxRefinedPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefinedPlus").set_name("LLAMAERADS_UltraDynamicMaxRefinedPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxRefinedPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxRefinedPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxRefinedPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxRefinedPlus", register=True) +except Exception as e: # ERADS_UltraDynamicMaxRefinedPlus print("ERADS_UltraDynamicMaxRefinedPlus can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxSupreme from nevergrad.optimization.lama.ERADS_UltraDynamicMaxSupreme import ERADS_UltraDynamicMaxSupreme lama_register["ERADS_UltraDynamicMaxSupreme"] = ERADS_UltraDynamicMaxSupreme - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxSupreme")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxSupreme = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxSupreme").set_name("LLAMAERADS_UltraDynamicMaxSupreme", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxSupreme")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxSupreme = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxSupreme" + ).set_name("LLAMAERADS_UltraDynamicMaxSupreme", register=True) +except Exception as e: # ERADS_UltraDynamicMaxSupreme print("ERADS_UltraDynamicMaxSupreme can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxUltra from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltra import ERADS_UltraDynamicMaxUltra lama_register["ERADS_UltraDynamicMaxUltra"] = ERADS_UltraDynamicMaxUltra - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltra = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra").set_name("LLAMAERADS_UltraDynamicMaxUltra", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltra = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltra").set_name( + "LLAMAERADS_UltraDynamicMaxUltra", register=True + ) +except Exception as e: # ERADS_UltraDynamicMaxUltra print("ERADS_UltraDynamicMaxUltra can not be imported: ", e) -try: +try: # ERADS_UltraDynamicMaxUltraPlus from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraPlus import ERADS_UltraDynamicMaxUltraPlus lama_register["ERADS_UltraDynamicMaxUltraPlus"] = ERADS_UltraDynamicMaxUltraPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraPlus").set_name("LLAMAERADS_UltraDynamicMaxUltraPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraPlus = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraPlus" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraPlus", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraPlus print("ERADS_UltraDynamicMaxUltraPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefined import ERADS_UltraDynamicMaxUltraRefined +try: # ERADS_UltraDynamicMaxUltraRefined + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefined import ( + ERADS_UltraDynamicMaxUltraRefined, + ) lama_register["ERADS_UltraDynamicMaxUltraRefined"] = ERADS_UltraDynamicMaxUltraRefined - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefined").set_name("LLAMAERADS_UltraDynamicMaxUltraRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefined = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefined" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefined", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefined print("ERADS_UltraDynamicMaxUltraRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV2 import ERADS_UltraDynamicMaxUltraRefinedV2 +try: # ERADS_UltraDynamicMaxUltraRefinedV2 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV2 import ( + ERADS_UltraDynamicMaxUltraRefinedV2, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV2"] = ERADS_UltraDynamicMaxUltraRefinedV2 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV2 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV2 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV2" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV2", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV2 print("ERADS_UltraDynamicMaxUltraRefinedV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV3 import ERADS_UltraDynamicMaxUltraRefinedV3 +try: # ERADS_UltraDynamicMaxUltraRefinedV3 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV3 import ( + ERADS_UltraDynamicMaxUltraRefinedV3, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV3"] = ERADS_UltraDynamicMaxUltraRefinedV3 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV3 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV3 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV3" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV3", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV3 print("ERADS_UltraDynamicMaxUltraRefinedV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV4 import ERADS_UltraDynamicMaxUltraRefinedV4 +try: # ERADS_UltraDynamicMaxUltraRefinedV4 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV4 import ( + ERADS_UltraDynamicMaxUltraRefinedV4, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV4"] = ERADS_UltraDynamicMaxUltraRefinedV4 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV4 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV4 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV4" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV4", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV4 print("ERADS_UltraDynamicMaxUltraRefinedV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV5 import ERADS_UltraDynamicMaxUltraRefinedV5 +try: # ERADS_UltraDynamicMaxUltraRefinedV5 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV5 import ( + ERADS_UltraDynamicMaxUltraRefinedV5, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV5"] = ERADS_UltraDynamicMaxUltraRefinedV5 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV5 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV5 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV5" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV5", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV5 print("ERADS_UltraDynamicMaxUltraRefinedV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV6 import ERADS_UltraDynamicMaxUltraRefinedV6 +try: # ERADS_UltraDynamicMaxUltraRefinedV6 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV6 import ( + ERADS_UltraDynamicMaxUltraRefinedV6, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV6"] = ERADS_UltraDynamicMaxUltraRefinedV6 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV6 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV6 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV6" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV6", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV6 print("ERADS_UltraDynamicMaxUltraRefinedV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV7 import ERADS_UltraDynamicMaxUltraRefinedV7 +try: # ERADS_UltraDynamicMaxUltraRefinedV7 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV7 import ( + ERADS_UltraDynamicMaxUltraRefinedV7, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV7"] = ERADS_UltraDynamicMaxUltraRefinedV7 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV7 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV7 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV7" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV7", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV7 print("ERADS_UltraDynamicMaxUltraRefinedV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV8 import ERADS_UltraDynamicMaxUltraRefinedV8 +try: # ERADS_UltraDynamicMaxUltraRefinedV8 + from nevergrad.optimization.lama.ERADS_UltraDynamicMaxUltraRefinedV8 import ( + ERADS_UltraDynamicMaxUltraRefinedV8, + ) lama_register["ERADS_UltraDynamicMaxUltraRefinedV8"] = ERADS_UltraDynamicMaxUltraRefinedV8 - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicMaxUltraRefinedV8 = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8").set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicMaxUltraRefinedV8 = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicMaxUltraRefinedV8" + ).set_name("LLAMAERADS_UltraDynamicMaxUltraRefinedV8", register=True) +except Exception as e: # ERADS_UltraDynamicMaxUltraRefinedV8 print("ERADS_UltraDynamicMaxUltraRefinedV8 can not be imported: ", e) -try: +try: # ERADS_UltraDynamicPlus from nevergrad.optimization.lama.ERADS_UltraDynamicPlus import ERADS_UltraDynamicPlus lama_register["ERADS_UltraDynamicPlus"] = ERADS_UltraDynamicPlus - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus").set_name("LLAMAERADS_UltraDynamicPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicPlus = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPlus").set_name( + "LLAMAERADS_UltraDynamicPlus", register=True + ) +except Exception as e: # ERADS_UltraDynamicPlus print("ERADS_UltraDynamicPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionEnhanced import ERADS_UltraDynamicPrecisionEnhanced +try: # ERADS_UltraDynamicPrecisionEnhanced + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionEnhanced import ( + ERADS_UltraDynamicPrecisionEnhanced, + ) lama_register["ERADS_UltraDynamicPrecisionEnhanced"] = ERADS_UltraDynamicPrecisionEnhanced - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicPrecisionEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionEnhanced").set_name("LLAMAERADS_UltraDynamicPrecisionEnhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicPrecisionEnhanced = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicPrecisionEnhanced" + ).set_name("LLAMAERADS_UltraDynamicPrecisionEnhanced", register=True) +except Exception as e: # ERADS_UltraDynamicPrecisionEnhanced print("ERADS_UltraDynamicPrecisionEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionOptimized import ERADS_UltraDynamicPrecisionOptimized +try: # ERADS_UltraDynamicPrecisionOptimized + from nevergrad.optimization.lama.ERADS_UltraDynamicPrecisionOptimized import ( + ERADS_UltraDynamicPrecisionOptimized, + ) lama_register["ERADS_UltraDynamicPrecisionOptimized"] = ERADS_UltraDynamicPrecisionOptimized - res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraDynamicPrecisionOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionOptimized").set_name("LLAMAERADS_UltraDynamicPrecisionOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraDynamicPrecisionOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraDynamicPrecisionOptimized = NonObjectOptimizer( + method="LLAMAERADS_UltraDynamicPrecisionOptimized" + ).set_name("LLAMAERADS_UltraDynamicPrecisionOptimized", register=True) +except Exception as e: # ERADS_UltraDynamicPrecisionOptimized print("ERADS_UltraDynamicPrecisionOptimized can not be imported: ", e) -try: +try: # ERADS_UltraEnhanced from nevergrad.optimization.lama.ERADS_UltraEnhanced import ERADS_UltraEnhanced lama_register["ERADS_UltraEnhanced"] = ERADS_UltraEnhanced - res = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced").set_name("LLAMAERADS_UltraEnhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraEnhanced = NonObjectOptimizer(method="LLAMAERADS_UltraEnhanced").set_name( + "LLAMAERADS_UltraEnhanced", register=True + ) +except Exception as e: # ERADS_UltraEnhanced print("ERADS_UltraEnhanced can not be imported: ", e) -try: +try: # ERADS_UltraMax from nevergrad.optimization.lama.ERADS_UltraMax import ERADS_UltraMax lama_register["ERADS_UltraMax"] = ERADS_UltraMax - res = NonObjectOptimizer(method="LLAMAERADS_UltraMax")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraMax = NonObjectOptimizer(method="LLAMAERADS_UltraMax").set_name("LLAMAERADS_UltraMax", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraMax")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraMax = NonObjectOptimizer(method="LLAMAERADS_UltraMax").set_name( + "LLAMAERADS_UltraMax", register=True + ) +except Exception as e: # ERADS_UltraMax print("ERADS_UltraMax can not be imported: ", e) -try: +try: # ERADS_UltraOptimized from nevergrad.optimization.lama.ERADS_UltraOptimized import ERADS_UltraOptimized lama_register["ERADS_UltraOptimized"] = ERADS_UltraOptimized - res = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized").set_name("LLAMAERADS_UltraOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraOptimized = NonObjectOptimizer(method="LLAMAERADS_UltraOptimized").set_name( + "LLAMAERADS_UltraOptimized", register=True + ) +except Exception as e: # ERADS_UltraOptimized print("ERADS_UltraOptimized can not be imported: ", e) -try: +try: # ERADS_UltraPrecise from nevergrad.optimization.lama.ERADS_UltraPrecise import ERADS_UltraPrecise lama_register["ERADS_UltraPrecise"] = ERADS_UltraPrecise - res = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraPrecise = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise").set_name("LLAMAERADS_UltraPrecise", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraPrecise = NonObjectOptimizer(method="LLAMAERADS_UltraPrecise").set_name( + "LLAMAERADS_UltraPrecise", register=True + ) +except Exception as e: # ERADS_UltraPrecise print("ERADS_UltraPrecise can not be imported: ", e) -try: +try: # ERADS_UltraRefined from nevergrad.optimization.lama.ERADS_UltraRefined import ERADS_UltraRefined lama_register["ERADS_UltraRefined"] = ERADS_UltraRefined - res = NonObjectOptimizer(method="LLAMAERADS_UltraRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAERADS_UltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraRefined").set_name("LLAMAERADS_UltraRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAERADS_UltraRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAERADS_UltraRefined = NonObjectOptimizer(method="LLAMAERADS_UltraRefined").set_name( + "LLAMAERADS_UltraRefined", register=True + ) +except Exception as e: # ERADS_UltraRefined print("ERADS_UltraRefined can not be imported: ", e) -try: +try: # ERAMEDS from nevergrad.optimization.lama.ERAMEDS import ERAMEDS lama_register["ERAMEDS"] = ERAMEDS - res = NonObjectOptimizer(method="LLAMAERAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAERAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAERAMEDS = NonObjectOptimizer(method="LLAMAERAMEDS").set_name("LLAMAERAMEDS", register=True) -except Exception as e: +except Exception as e: # ERAMEDS print("ERAMEDS can not be imported: ", e) -try: +try: # ESADE from nevergrad.optimization.lama.ESADE import ESADE lama_register["ESADE"] = ESADE - res = NonObjectOptimizer(method="LLAMAESADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAESADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAESADE = NonObjectOptimizer(method="LLAMAESADE").set_name("LLAMAESADE", register=True) -except Exception as e: +except Exception as e: # ESADE print("ESADE can not be imported: ", e) -try: +try: # ESADEPFLLP from nevergrad.optimization.lama.ESADEPFLLP import ESADEPFLLP lama_register["ESADEPFLLP"] = ESADEPFLLP - res = NonObjectOptimizer(method="LLAMAESADEPFLLP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAESADEPFLLP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAESADEPFLLP = NonObjectOptimizer(method="LLAMAESADEPFLLP").set_name("LLAMAESADEPFLLP", register=True) -except Exception as e: +except Exception as e: # ESADEPFLLP print("ESADEPFLLP can not be imported: ", e) -try: +try: # ESBASM from nevergrad.optimization.lama.ESBASM import ESBASM lama_register["ESBASM"] = ESBASM - res = NonObjectOptimizer(method="LLAMAESBASM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAESBASM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAESBASM = NonObjectOptimizer(method="LLAMAESBASM").set_name("LLAMAESBASM", register=True) -except Exception as e: +except Exception as e: # ESBASM print("ESBASM can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteAdaptiveCrowdingHybridOptimizer import EliteAdaptiveCrowdingHybridOptimizer +try: # EliteAdaptiveCrowdingHybridOptimizer + from nevergrad.optimization.lama.EliteAdaptiveCrowdingHybridOptimizer import ( + EliteAdaptiveCrowdingHybridOptimizer, + ) lama_register["EliteAdaptiveCrowdingHybridOptimizer"] = EliteAdaptiveCrowdingHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteAdaptiveCrowdingHybridOptimizer").set_name("LLAMAEliteAdaptiveCrowdingHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteAdaptiveCrowdingHybridOptimizer" + ).set_name("LLAMAEliteAdaptiveCrowdingHybridOptimizer", register=True) +except Exception as e: # EliteAdaptiveCrowdingHybridOptimizer print("EliteAdaptiveCrowdingHybridOptimizer can not be imported: ", e) -try: +try: # EliteAdaptiveHybridDEPSO from nevergrad.optimization.lama.EliteAdaptiveHybridDEPSO import EliteAdaptiveHybridDEPSO lama_register["EliteAdaptiveHybridDEPSO"] = EliteAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO").set_name("LLAMAEliteAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteAdaptiveHybridDEPSO").set_name( + "LLAMAEliteAdaptiveHybridDEPSO", register=True + ) +except Exception as e: # EliteAdaptiveHybridDEPSO print("EliteAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteAdaptiveMemeticDifferentialEvolution import EliteAdaptiveMemeticDifferentialEvolution +try: # EliteAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.EliteAdaptiveMemeticDifferentialEvolution import ( + EliteAdaptiveMemeticDifferentialEvolution, + ) lama_register["EliteAdaptiveMemeticDifferentialEvolution"] = EliteAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEliteAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEliteAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # EliteAdaptiveMemeticDifferentialEvolution print("EliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 import EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 - - lama_register["EliteAdaptiveMemoryDynamicCrowdingOptimizerV2"] = EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2 = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2").set_name("LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2", register=True) -except Exception as e: +try: # EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 + from nevergrad.optimization.lama.EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 import ( + EliteAdaptiveMemoryDynamicCrowdingOptimizerV2, + ) + + lama_register["EliteAdaptiveMemoryDynamicCrowdingOptimizerV2"] = ( + EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2 = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2" + ).set_name("LLAMAEliteAdaptiveMemoryDynamicCrowdingOptimizerV2", register=True) +except Exception as e: # EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 print("EliteAdaptiveMemoryDynamicCrowdingOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteAdaptiveMemoryHybridOptimizer import EliteAdaptiveMemoryHybridOptimizer +try: # EliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.EliteAdaptiveMemoryHybridOptimizer import ( + EliteAdaptiveMemoryHybridOptimizer, + ) lama_register["EliteAdaptiveMemoryHybridOptimizer"] = EliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # EliteAdaptiveMemoryHybridOptimizer print("EliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch import EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch - - lama_register["EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch"] = EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch - res = NonObjectOptimizer(method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch").set_name("LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch", register=True) -except Exception as e: +try: # EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch + from nevergrad.optimization.lama.EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch import ( + EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch, + ) + + lama_register["EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch"] = ( + EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch" + ).set_name("LLAMAEliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch", register=True) +except Exception as e: # EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch print("EliteAdaptiveQuantumDEWithAdaptiveMemoryAndHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteCovarianceMatrixAdaptationMemeticSearch import EliteCovarianceMatrixAdaptationMemeticSearch - - lama_register["EliteCovarianceMatrixAdaptationMemeticSearch"] = EliteCovarianceMatrixAdaptationMemeticSearch - res = NonObjectOptimizer(method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer(method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch").set_name("LLAMAEliteCovarianceMatrixAdaptationMemeticSearch", register=True) -except Exception as e: +try: # EliteCovarianceMatrixAdaptationMemeticSearch + from nevergrad.optimization.lama.EliteCovarianceMatrixAdaptationMemeticSearch import ( + EliteCovarianceMatrixAdaptationMemeticSearch, + ) + + lama_register["EliteCovarianceMatrixAdaptationMemeticSearch"] = ( + EliteCovarianceMatrixAdaptationMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( + method="LLAMAEliteCovarianceMatrixAdaptationMemeticSearch" + ).set_name("LLAMAEliteCovarianceMatrixAdaptationMemeticSearch", register=True) +except Exception as e: # EliteCovarianceMatrixAdaptationMemeticSearch print("EliteCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) -try: +try: # EliteDynamicHybridOptimizer from nevergrad.optimization.lama.EliteDynamicHybridOptimizer import EliteDynamicHybridOptimizer lama_register["EliteDynamicHybridOptimizer"] = EliteDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer").set_name("LLAMAEliteDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicHybridOptimizer").set_name( + "LLAMAEliteDynamicHybridOptimizer", register=True + ) +except Exception as e: # EliteDynamicHybridOptimizer print("EliteDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteDynamicMemoryHybridOptimizer import EliteDynamicMemoryHybridOptimizer +try: # EliteDynamicMemoryHybridOptimizer + from nevergrad.optimization.lama.EliteDynamicMemoryHybridOptimizer import ( + EliteDynamicMemoryHybridOptimizer, + ) lama_register["EliteDynamicMemoryHybridOptimizer"] = EliteDynamicMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteDynamicMemoryHybridOptimizer").set_name("LLAMAEliteDynamicMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMAEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: # EliteDynamicMemoryHybridOptimizer print("EliteDynamicMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteDynamicMultiStrategyHybridDEPSO import EliteDynamicMultiStrategyHybridDEPSO +try: # EliteDynamicMultiStrategyHybridDEPSO + from nevergrad.optimization.lama.EliteDynamicMultiStrategyHybridDEPSO import ( + EliteDynamicMultiStrategyHybridDEPSO, + ) lama_register["EliteDynamicMultiStrategyHybridDEPSO"] = EliteDynamicMultiStrategyHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEliteDynamicMultiStrategyHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteDynamicMultiStrategyHybridDEPSO = NonObjectOptimizer(method="LLAMAEliteDynamicMultiStrategyHybridDEPSO").set_name("LLAMAEliteDynamicMultiStrategyHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteDynamicMultiStrategyHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteDynamicMultiStrategyHybridDEPSO = NonObjectOptimizer( + method="LLAMAEliteDynamicMultiStrategyHybridDEPSO" + ).set_name("LLAMAEliteDynamicMultiStrategyHybridDEPSO", register=True) +except Exception as e: # EliteDynamicMultiStrategyHybridDEPSO print("EliteDynamicMultiStrategyHybridDEPSO can not be imported: ", e) -try: +try: # EliteGuidedAdaptiveRestartDE from nevergrad.optimization.lama.EliteGuidedAdaptiveRestartDE import EliteGuidedAdaptiveRestartDE lama_register["EliteGuidedAdaptiveRestartDE"] = EliteGuidedAdaptiveRestartDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAEliteGuidedAdaptiveRestartDE").set_name("LLAMAEliteGuidedAdaptiveRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMAEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMAEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: # EliteGuidedAdaptiveRestartDE print("EliteGuidedAdaptiveRestartDE can not be imported: ", e) -try: +try: # EliteGuidedDualStrategyDE from nevergrad.optimization.lama.EliteGuidedDualStrategyDE import EliteGuidedDualStrategyDE lama_register["EliteGuidedDualStrategyDE"] = EliteGuidedDualStrategyDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedDualStrategyDE = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE").set_name("LLAMAEliteGuidedDualStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedDualStrategyDE = NonObjectOptimizer(method="LLAMAEliteGuidedDualStrategyDE").set_name( + "LLAMAEliteGuidedDualStrategyDE", register=True + ) +except Exception as e: # EliteGuidedDualStrategyDE print("EliteGuidedDualStrategyDE can not be imported: ", e) -try: +try: # EliteGuidedHybridAdaptiveDE from nevergrad.optimization.lama.EliteGuidedHybridAdaptiveDE import EliteGuidedHybridAdaptiveDE lama_register["EliteGuidedHybridAdaptiveDE"] = EliteGuidedHybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE").set_name("LLAMAEliteGuidedHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridAdaptiveDE").set_name( + "LLAMAEliteGuidedHybridAdaptiveDE", register=True + ) +except Exception as e: # EliteGuidedHybridAdaptiveDE print("EliteGuidedHybridAdaptiveDE can not be imported: ", e) -try: +try: # EliteGuidedHybridDE from nevergrad.optimization.lama.EliteGuidedHybridDE import EliteGuidedHybridDE lama_register["EliteGuidedHybridDE"] = EliteGuidedHybridDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedHybridDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE").set_name("LLAMAEliteGuidedHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedHybridDE = NonObjectOptimizer(method="LLAMAEliteGuidedHybridDE").set_name( + "LLAMAEliteGuidedHybridDE", register=True + ) +except Exception as e: # EliteGuidedHybridDE print("EliteGuidedHybridDE can not be imported: ", e) -try: +try: # EliteGuidedMutationDE from nevergrad.optimization.lama.EliteGuidedMutationDE import EliteGuidedMutationDE lama_register["EliteGuidedMutationDE"] = EliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE").set_name("LLAMAEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE").set_name( + "LLAMAEliteGuidedMutationDE", register=True + ) +except Exception as e: # EliteGuidedMutationDE print("EliteGuidedMutationDE can not be imported: ", e) -try: +try: # EliteGuidedMutationDE_v2 from nevergrad.optimization.lama.EliteGuidedMutationDE_v2 import EliteGuidedMutationDE_v2 lama_register["EliteGuidedMutationDE_v2"] = EliteGuidedMutationDE_v2 - res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2").set_name("LLAMAEliteGuidedMutationDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEliteGuidedMutationDE_v2").set_name( + "LLAMAEliteGuidedMutationDE_v2", register=True + ) +except Exception as e: # EliteGuidedMutationDE_v2 print("EliteGuidedMutationDE_v2 can not be imported: ", e) -try: +try: # EliteGuidedQuantumAdaptiveDE from nevergrad.optimization.lama.EliteGuidedQuantumAdaptiveDE import EliteGuidedQuantumAdaptiveDE lama_register["EliteGuidedQuantumAdaptiveDE"] = EliteGuidedQuantumAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEliteGuidedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteGuidedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteGuidedQuantumAdaptiveDE").set_name("LLAMAEliteGuidedQuantumAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteGuidedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteGuidedQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMAEliteGuidedQuantumAdaptiveDE" + ).set_name("LLAMAEliteGuidedQuantumAdaptiveDE", register=True) +except Exception as e: # EliteGuidedQuantumAdaptiveDE print("EliteGuidedQuantumAdaptiveDE can not be imported: ", e) -try: +try: # EliteHybridAdaptiveOptimizer from nevergrad.optimization.lama.EliteHybridAdaptiveOptimizer import EliteHybridAdaptiveOptimizer lama_register["EliteHybridAdaptiveOptimizer"] = EliteHybridAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEliteHybridAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteHybridAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEliteHybridAdaptiveOptimizer").set_name("LLAMAEliteHybridAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteHybridAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteHybridAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEliteHybridAdaptiveOptimizer" + ).set_name("LLAMAEliteHybridAdaptiveOptimizer", register=True) +except Exception as e: # EliteHybridAdaptiveOptimizer print("EliteHybridAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteMemoryEnhancedDynamicHybridOptimizer import EliteMemoryEnhancedDynamicHybridOptimizer +try: # EliteMemoryEnhancedDynamicHybridOptimizer + from nevergrad.optimization.lama.EliteMemoryEnhancedDynamicHybridOptimizer import ( + EliteMemoryEnhancedDynamicHybridOptimizer, + ) lama_register["EliteMemoryEnhancedDynamicHybridOptimizer"] = EliteMemoryEnhancedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMAEliteMemoryEnhancedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEliteMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAEliteMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: # EliteMemoryEnhancedDynamicHybridOptimizer print("EliteMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) -try: +try: # EliteMultiStrategySelfAdaptiveDE from nevergrad.optimization.lama.EliteMultiStrategySelfAdaptiveDE import EliteMultiStrategySelfAdaptiveDE lama_register["EliteMultiStrategySelfAdaptiveDE"] = EliteMultiStrategySelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEliteMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEliteMultiStrategySelfAdaptiveDE").set_name("LLAMAEliteMultiStrategySelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAEliteMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAEliteMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: # EliteMultiStrategySelfAdaptiveDE print("EliteMultiStrategySelfAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.ElitePreservingDifferentialEvolution import ElitePreservingDifferentialEvolution +try: # ElitePreservingDifferentialEvolution + from nevergrad.optimization.lama.ElitePreservingDifferentialEvolution import ( + ElitePreservingDifferentialEvolution, + ) lama_register["ElitePreservingDifferentialEvolution"] = ElitePreservingDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAElitePreservingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAElitePreservingDifferentialEvolution = NonObjectOptimizer(method="LLAMAElitePreservingDifferentialEvolution").set_name("LLAMAElitePreservingDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAElitePreservingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAElitePreservingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAElitePreservingDifferentialEvolution" + ).set_name("LLAMAElitePreservingDifferentialEvolution", register=True) +except Exception as e: # ElitePreservingDifferentialEvolution print("ElitePreservingDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteQuantumAdaptiveExplorationOptimization import EliteQuantumAdaptiveExplorationOptimization +try: # EliteQuantumAdaptiveExplorationOptimization + from nevergrad.optimization.lama.EliteQuantumAdaptiveExplorationOptimization import ( + EliteQuantumAdaptiveExplorationOptimization, + ) lama_register["EliteQuantumAdaptiveExplorationOptimization"] = EliteQuantumAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEliteQuantumAdaptiveExplorationOptimization").set_name("LLAMAEliteQuantumAdaptiveExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEliteQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMAEliteQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: # EliteQuantumAdaptiveExplorationOptimization print("EliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteQuantumDifferentialMemeticOptimizer import EliteQuantumDifferentialMemeticOptimizer +try: # EliteQuantumDifferentialMemeticOptimizer + from nevergrad.optimization.lama.EliteQuantumDifferentialMemeticOptimizer import ( + EliteQuantumDifferentialMemeticOptimizer, + ) lama_register["EliteQuantumDifferentialMemeticOptimizer"] = EliteQuantumDifferentialMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMAEliteQuantumDifferentialMemeticOptimizer").set_name("LLAMAEliteQuantumDifferentialMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEliteQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMAEliteQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: # EliteQuantumDifferentialMemeticOptimizer print("EliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteRefinedAdaptivePrecisionOptimizer import EliteRefinedAdaptivePrecisionOptimizer +try: # EliteRefinedAdaptivePrecisionOptimizer + from nevergrad.optimization.lama.EliteRefinedAdaptivePrecisionOptimizer import ( + EliteRefinedAdaptivePrecisionOptimizer, + ) lama_register["EliteRefinedAdaptivePrecisionOptimizer"] = EliteRefinedAdaptivePrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAEliteRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAEliteRefinedAdaptivePrecisionOptimizer").set_name("LLAMAEliteRefinedAdaptivePrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAEliteRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAEliteRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: # EliteRefinedAdaptivePrecisionOptimizer print("EliteRefinedAdaptivePrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EliteTranscendentalEvolutionaryOptimizer import EliteTranscendentalEvolutionaryOptimizer +try: # EliteTranscendentalEvolutionaryOptimizer + from nevergrad.optimization.lama.EliteTranscendentalEvolutionaryOptimizer import ( + EliteTranscendentalEvolutionaryOptimizer, + ) lama_register["EliteTranscendentalEvolutionaryOptimizer"] = EliteTranscendentalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAEliteTranscendentalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEliteTranscendentalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEliteTranscendentalEvolutionaryOptimizer").set_name("LLAMAEliteTranscendentalEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEliteTranscendentalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEliteTranscendentalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEliteTranscendentalEvolutionaryOptimizer" + ).set_name("LLAMAEliteTranscendentalEvolutionaryOptimizer", register=True) +except Exception as e: # EliteTranscendentalEvolutionaryOptimizer print("EliteTranscendentalEvolutionaryOptimizer can not be imported: ", e) -try: +try: # ElitistAdaptiveDE from nevergrad.optimization.lama.ElitistAdaptiveDE import ElitistAdaptiveDE lama_register["ElitistAdaptiveDE"] = ElitistAdaptiveDE - res = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAElitistAdaptiveDE = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE").set_name("LLAMAElitistAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAElitistAdaptiveDE = NonObjectOptimizer(method="LLAMAElitistAdaptiveDE").set_name( + "LLAMAElitistAdaptiveDE", register=True + ) +except Exception as e: # ElitistAdaptiveDE print("ElitistAdaptiveDE can not be imported: ", e) -try: +try: # EnhancedAQAPSOHR_LSDIW from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW import EnhancedAQAPSOHR_LSDIW lama_register["EnhancedAQAPSOHR_LSDIW"] = EnhancedAQAPSOHR_LSDIW - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSOHR_LSDIW = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW").set_name("LLAMAEnhancedAQAPSOHR_LSDIW", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSOHR_LSDIW = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW").set_name( + "LLAMAEnhancedAQAPSOHR_LSDIW", register=True + ) +except Exception as e: # EnhancedAQAPSOHR_LSDIW print("EnhancedAQAPSOHR_LSDIW can not be imported: ", e) -try: +try: # EnhancedAQAPSOHR_LSDIW_AP from nevergrad.optimization.lama.EnhancedAQAPSOHR_LSDIW_AP import EnhancedAQAPSOHR_LSDIW_AP lama_register["EnhancedAQAPSOHR_LSDIW_AP"] = EnhancedAQAPSOHR_LSDIW_AP - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSOHR_LSDIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP").set_name("LLAMAEnhancedAQAPSOHR_LSDIW_AP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSOHR_LSDIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSOHR_LSDIW_AP").set_name( + "LLAMAEnhancedAQAPSOHR_LSDIW_AP", register=True + ) +except Exception as e: # EnhancedAQAPSOHR_LSDIW_AP print("EnhancedAQAPSOHR_LSDIW_AP can not be imported: ", e) -try: +try: # EnhancedAQAPSO_LS_DIW_AP from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP import EnhancedAQAPSO_LS_DIW_AP lama_register["EnhancedAQAPSO_LS_DIW_AP"] = EnhancedAQAPSO_LS_DIW_AP - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP").set_name( + "LLAMAEnhancedAQAPSO_LS_DIW_AP", register=True + ) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP print("EnhancedAQAPSO_LS_DIW_AP can not be imported: ", e) -try: +try: # EnhancedAQAPSO_LS_DIW_AP_Final from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Final import EnhancedAQAPSO_LS_DIW_AP_Final lama_register["EnhancedAQAPSO_LS_DIW_AP_Final"] = EnhancedAQAPSO_LS_DIW_AP_Final - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Final = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Final", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Final = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Final" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Final", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Final print("EnhancedAQAPSO_LS_DIW_AP_Final can not be imported: ", e) -try: +try: # EnhancedAQAPSO_LS_DIW_AP_Refined from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined import EnhancedAQAPSO_LS_DIW_AP_Refined lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Refined - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Refined print("EnhancedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined_Final import EnhancedAQAPSO_LS_DIW_AP_Refined_Final +try: # EnhancedAQAPSO_LS_DIW_AP_Refined_Final + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Refined_Final import ( + EnhancedAQAPSO_LS_DIW_AP_Refined_Final, + ) lama_register["EnhancedAQAPSO_LS_DIW_AP_Refined_Final"] = EnhancedAQAPSO_LS_DIW_AP_Refined_Final - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Refined_Final", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Refined_Final print("EnhancedAQAPSO_LS_DIW_AP_Refined_Final can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate import EnhancedAQAPSO_LS_DIW_AP_Ultimate +try: # EnhancedAQAPSO_LS_DIW_AP_Ultimate + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate, + ) lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Ultimate print("EnhancedAQAPSO_LS_DIW_AP_Ultimate can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined +try: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, + ) lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined - - lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined", register=True) -except Exception as e: +try: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined"] = ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined_Refined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined +try: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined, + ) lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined import EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined - - lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined"] = EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined - res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined").set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined", register=True) -except Exception as e: +try: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined + from nevergrad.optimization.lama.EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined import ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined, + ) + + lama_register["EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined"] = ( + EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined = NonObjectOptimizer( + method="LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined" + ).set_name("LLAMAEnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined", register=True) +except Exception as e: # EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined print("EnhancedAQAPSO_LS_DIW_AP_Ultimate_Refined_Redefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v2 import EnhancedAdaptiveChaoticFireworksOptimization_v2 - - lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v2"] = EnhancedAdaptiveChaoticFireworksOptimization_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2").set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2", register=True) -except Exception as e: +try: # EnhancedAdaptiveChaoticFireworksOptimization_v2 + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v2 import ( + EnhancedAdaptiveChaoticFireworksOptimization_v2, + ) + + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v2"] = ( + EnhancedAdaptiveChaoticFireworksOptimization_v2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2" + ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v2", register=True) +except Exception as e: # EnhancedAdaptiveChaoticFireworksOptimization_v2 print("EnhancedAdaptiveChaoticFireworksOptimization_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v3 import EnhancedAdaptiveChaoticFireworksOptimization_v3 - - lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v3"] = EnhancedAdaptiveChaoticFireworksOptimization_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3").set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3", register=True) -except Exception as e: +try: # EnhancedAdaptiveChaoticFireworksOptimization_v3 + from nevergrad.optimization.lama.EnhancedAdaptiveChaoticFireworksOptimization_v3 import ( + EnhancedAdaptiveChaoticFireworksOptimization_v3, + ) + + lama_register["EnhancedAdaptiveChaoticFireworksOptimization_v3"] = ( + EnhancedAdaptiveChaoticFireworksOptimization_v3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3" + ).set_name("LLAMAEnhancedAdaptiveChaoticFireworksOptimization_v3", register=True) +except Exception as e: # EnhancedAdaptiveChaoticFireworksOptimization_v3 print("EnhancedAdaptiveChaoticFireworksOptimization_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveCohortMemeticAlgorithm import EnhancedAdaptiveCohortMemeticAlgorithm +try: # EnhancedAdaptiveCohortMemeticAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveCohortMemeticAlgorithm import ( + EnhancedAdaptiveCohortMemeticAlgorithm, + ) lama_register["EnhancedAdaptiveCohortMemeticAlgorithm"] = EnhancedAdaptiveCohortMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveCohortMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveCohortMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCohortMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveCohortMemeticAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveCohortMemeticAlgorithm print("EnhancedAdaptiveCohortMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveControlledMemoryAnnealing import EnhancedAdaptiveControlledMemoryAnnealing +try: # EnhancedAdaptiveControlledMemoryAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveControlledMemoryAnnealing import ( + EnhancedAdaptiveControlledMemoryAnnealing, + ) lama_register["EnhancedAdaptiveControlledMemoryAnnealing"] = EnhancedAdaptiveControlledMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveControlledMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing").set_name("LLAMAEnhancedAdaptiveControlledMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveControlledMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveControlledMemoryAnnealing" + ).set_name("LLAMAEnhancedAdaptiveControlledMemoryAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveControlledMemoryAnnealing print("EnhancedAdaptiveControlledMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 import EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 - - lama_register["EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4"] = EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 import ( + EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4"] = ( + EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: # EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 print("EnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixEvolution import EnhancedAdaptiveCovarianceMatrixEvolution +try: # EnhancedAdaptiveCovarianceMatrixEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveCovarianceMatrixEvolution import ( + EnhancedAdaptiveCovarianceMatrixEvolution, + ) lama_register["EnhancedAdaptiveCovarianceMatrixEvolution"] = EnhancedAdaptiveCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution").set_name("LLAMAEnhancedAdaptiveCovarianceMatrixEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: # EnhancedAdaptiveCovarianceMatrixEvolution print("EnhancedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) -try: +try: # EnhancedAdaptiveDEPSOOptimizer from nevergrad.optimization.lama.EnhancedAdaptiveDEPSOOptimizer import EnhancedAdaptiveDEPSOOptimizer lama_register["EnhancedAdaptiveDEPSOOptimizer"] = EnhancedAdaptiveDEPSOOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDEPSOOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDEPSOOptimizer").set_name("LLAMAEnhancedAdaptiveDEPSOOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDEPSOOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDEPSOOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDEPSOOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDEPSOOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveDEPSOOptimizer print("EnhancedAdaptiveDEPSOOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiffEvolutionGradientDescent import EnhancedAdaptiveDiffEvolutionGradientDescent - - lama_register["EnhancedAdaptiveDiffEvolutionGradientDescent"] = EnhancedAdaptiveDiffEvolutionGradientDescent - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent").set_name("LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiffEvolutionGradientDescent + from nevergrad.optimization.lama.EnhancedAdaptiveDiffEvolutionGradientDescent import ( + EnhancedAdaptiveDiffEvolutionGradientDescent, + ) + + lama_register["EnhancedAdaptiveDiffEvolutionGradientDescent"] = ( + EnhancedAdaptiveDiffEvolutionGradientDescent + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent" + ).set_name("LLAMAEnhancedAdaptiveDiffEvolutionGradientDescent", register=True) +except Exception as e: # EnhancedAdaptiveDiffEvolutionGradientDescent print("EnhancedAdaptiveDiffEvolutionGradientDescent can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolution import EnhancedAdaptiveDifferentialEvolution +try: # EnhancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolution import ( + EnhancedAdaptiveDifferentialEvolution, + ) lama_register["EnhancedAdaptiveDifferentialEvolution"] = EnhancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolution print("EnhancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamic import EnhancedAdaptiveDifferentialEvolutionDynamic - - lama_register["EnhancedAdaptiveDifferentialEvolutionDynamic"] = EnhancedAdaptiveDifferentialEvolutionDynamic - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionDynamic + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamic import ( + EnhancedAdaptiveDifferentialEvolutionDynamic, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamic"] = ( + EnhancedAdaptiveDifferentialEvolutionDynamic + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionDynamic print("EnhancedAdaptiveDifferentialEvolutionDynamic can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamicImproved import EnhancedAdaptiveDifferentialEvolutionDynamicImproved - - lama_register["EnhancedAdaptiveDifferentialEvolutionDynamicImproved"] = EnhancedAdaptiveDifferentialEvolutionDynamicImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionDynamicImproved + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionDynamicImproved import ( + EnhancedAdaptiveDifferentialEvolutionDynamicImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionDynamicImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionDynamicImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionDynamicImproved print("EnhancedAdaptiveDifferentialEvolutionDynamicImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionEnhanced import EnhancedAdaptiveDifferentialEvolutionEnhanced - - lama_register["EnhancedAdaptiveDifferentialEvolutionEnhanced"] = EnhancedAdaptiveDifferentialEvolutionEnhanced - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionEnhanced + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionEnhanced import ( + EnhancedAdaptiveDifferentialEvolutionEnhanced, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionEnhanced"] = ( + EnhancedAdaptiveDifferentialEvolutionEnhanced + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionEnhanced print("EnhancedAdaptiveDifferentialEvolutionEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefined import EnhancedAdaptiveDifferentialEvolutionRefined - - lama_register["EnhancedAdaptiveDifferentialEvolutionRefined"] = EnhancedAdaptiveDifferentialEvolutionRefined - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionRefined + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefined import ( + EnhancedAdaptiveDifferentialEvolutionRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionRefined print("EnhancedAdaptiveDifferentialEvolutionRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedImproved import EnhancedAdaptiveDifferentialEvolutionRefinedImproved - - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedImproved"] = EnhancedAdaptiveDifferentialEvolutionRefinedImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionRefinedImproved + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedImproved import ( + EnhancedAdaptiveDifferentialEvolutionRefinedImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionRefinedImproved print("EnhancedAdaptiveDifferentialEvolutionRefinedImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV2 import EnhancedAdaptiveDifferentialEvolutionRefinedV2 - - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV2"] = EnhancedAdaptiveDifferentialEvolutionRefinedV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionRefinedV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV2 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV2"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionRefinedV2 print("EnhancedAdaptiveDifferentialEvolutionRefinedV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV3 import EnhancedAdaptiveDifferentialEvolutionRefinedV3 - - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV3"] = EnhancedAdaptiveDifferentialEvolutionRefinedV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionRefinedV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV3 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV3, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV3"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionRefinedV3 print("EnhancedAdaptiveDifferentialEvolutionRefinedV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV4 import EnhancedAdaptiveDifferentialEvolutionRefinedV4 - - lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV4"] = EnhancedAdaptiveDifferentialEvolutionRefinedV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionRefinedV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionRefinedV4 import ( + EnhancedAdaptiveDifferentialEvolutionRefinedV4, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionRefinedV4"] = ( + EnhancedAdaptiveDifferentialEvolutionRefinedV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionRefinedV4 print("EnhancedAdaptiveDifferentialEvolutionRefinedV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV10 import EnhancedAdaptiveDifferentialEvolutionV10 +try: # EnhancedAdaptiveDifferentialEvolutionV10 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV10 import ( + EnhancedAdaptiveDifferentialEvolutionV10, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV10"] = EnhancedAdaptiveDifferentialEvolutionV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV10" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV10", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV10 print("EnhancedAdaptiveDifferentialEvolutionV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV11 import EnhancedAdaptiveDifferentialEvolutionV11 +try: # EnhancedAdaptiveDifferentialEvolutionV11 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV11 import ( + EnhancedAdaptiveDifferentialEvolutionV11, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV11"] = EnhancedAdaptiveDifferentialEvolutionV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV11" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV11", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV11 print("EnhancedAdaptiveDifferentialEvolutionV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV12 import EnhancedAdaptiveDifferentialEvolutionV12 +try: # EnhancedAdaptiveDifferentialEvolutionV12 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV12 import ( + EnhancedAdaptiveDifferentialEvolutionV12, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV12"] = EnhancedAdaptiveDifferentialEvolutionV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV12" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV12", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV12 print("EnhancedAdaptiveDifferentialEvolutionV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV13 import EnhancedAdaptiveDifferentialEvolutionV13 +try: # EnhancedAdaptiveDifferentialEvolutionV13 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV13 import ( + EnhancedAdaptiveDifferentialEvolutionV13, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV13"] = EnhancedAdaptiveDifferentialEvolutionV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV13" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV13", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV13 print("EnhancedAdaptiveDifferentialEvolutionV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV14 import EnhancedAdaptiveDifferentialEvolutionV14 +try: # EnhancedAdaptiveDifferentialEvolutionV14 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV14 import ( + EnhancedAdaptiveDifferentialEvolutionV14, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV14"] = EnhancedAdaptiveDifferentialEvolutionV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV14" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV14", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV14 print("EnhancedAdaptiveDifferentialEvolutionV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV15 import EnhancedAdaptiveDifferentialEvolutionV15 +try: # EnhancedAdaptiveDifferentialEvolutionV15 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV15 import ( + EnhancedAdaptiveDifferentialEvolutionV15, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV15"] = EnhancedAdaptiveDifferentialEvolutionV15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV15" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV15", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV15 print("EnhancedAdaptiveDifferentialEvolutionV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV16 import EnhancedAdaptiveDifferentialEvolutionV16 +try: # EnhancedAdaptiveDifferentialEvolutionV16 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV16 import ( + EnhancedAdaptiveDifferentialEvolutionV16, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV16"] = EnhancedAdaptiveDifferentialEvolutionV16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV16" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV16", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV16 print("EnhancedAdaptiveDifferentialEvolutionV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV17 import EnhancedAdaptiveDifferentialEvolutionV17 +try: # EnhancedAdaptiveDifferentialEvolutionV17 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV17 import ( + EnhancedAdaptiveDifferentialEvolutionV17, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV17"] = EnhancedAdaptiveDifferentialEvolutionV17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV17" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV17", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV17 print("EnhancedAdaptiveDifferentialEvolutionV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV18 import EnhancedAdaptiveDifferentialEvolutionV18 +try: # EnhancedAdaptiveDifferentialEvolutionV18 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV18 import ( + EnhancedAdaptiveDifferentialEvolutionV18, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV18"] = EnhancedAdaptiveDifferentialEvolutionV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV18" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV18", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV18 print("EnhancedAdaptiveDifferentialEvolutionV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV19 import EnhancedAdaptiveDifferentialEvolutionV19 +try: # EnhancedAdaptiveDifferentialEvolutionV19 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV19 import ( + EnhancedAdaptiveDifferentialEvolutionV19, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV19"] = EnhancedAdaptiveDifferentialEvolutionV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV19" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV19", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV19 print("EnhancedAdaptiveDifferentialEvolutionV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV20 import EnhancedAdaptiveDifferentialEvolutionV20 +try: # EnhancedAdaptiveDifferentialEvolutionV20 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV20 import ( + EnhancedAdaptiveDifferentialEvolutionV20, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV20"] = EnhancedAdaptiveDifferentialEvolutionV20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV20" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV20", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV20 print("EnhancedAdaptiveDifferentialEvolutionV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV21 import EnhancedAdaptiveDifferentialEvolutionV21 +try: # EnhancedAdaptiveDifferentialEvolutionV21 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV21 import ( + EnhancedAdaptiveDifferentialEvolutionV21, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV21"] = EnhancedAdaptiveDifferentialEvolutionV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV21" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV21", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV21 print("EnhancedAdaptiveDifferentialEvolutionV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV22 import EnhancedAdaptiveDifferentialEvolutionV22 +try: # EnhancedAdaptiveDifferentialEvolutionV22 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV22 import ( + EnhancedAdaptiveDifferentialEvolutionV22, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV22"] = EnhancedAdaptiveDifferentialEvolutionV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV22" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV22", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV22 print("EnhancedAdaptiveDifferentialEvolutionV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV23 import EnhancedAdaptiveDifferentialEvolutionV23 +try: # EnhancedAdaptiveDifferentialEvolutionV23 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV23 import ( + EnhancedAdaptiveDifferentialEvolutionV23, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV23"] = EnhancedAdaptiveDifferentialEvolutionV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV23" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV23", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV23 print("EnhancedAdaptiveDifferentialEvolutionV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV24 import EnhancedAdaptiveDifferentialEvolutionV24 +try: # EnhancedAdaptiveDifferentialEvolutionV24 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV24 import ( + EnhancedAdaptiveDifferentialEvolutionV24, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV24"] = EnhancedAdaptiveDifferentialEvolutionV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV24" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV24", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV24 print("EnhancedAdaptiveDifferentialEvolutionV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV25 import EnhancedAdaptiveDifferentialEvolutionV25 +try: # EnhancedAdaptiveDifferentialEvolutionV25 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV25 import ( + EnhancedAdaptiveDifferentialEvolutionV25, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV25"] = EnhancedAdaptiveDifferentialEvolutionV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV25" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV25", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV25 print("EnhancedAdaptiveDifferentialEvolutionV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV26 import EnhancedAdaptiveDifferentialEvolutionV26 +try: # EnhancedAdaptiveDifferentialEvolutionV26 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV26 import ( + EnhancedAdaptiveDifferentialEvolutionV26, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV26"] = EnhancedAdaptiveDifferentialEvolutionV26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV26" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV26", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV26 print("EnhancedAdaptiveDifferentialEvolutionV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV27 import EnhancedAdaptiveDifferentialEvolutionV27 +try: # EnhancedAdaptiveDifferentialEvolutionV27 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV27 import ( + EnhancedAdaptiveDifferentialEvolutionV27, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV27"] = EnhancedAdaptiveDifferentialEvolutionV27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV27" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV27", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV27 print("EnhancedAdaptiveDifferentialEvolutionV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV28 import EnhancedAdaptiveDifferentialEvolutionV28 +try: # EnhancedAdaptiveDifferentialEvolutionV28 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV28 import ( + EnhancedAdaptiveDifferentialEvolutionV28, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV28"] = EnhancedAdaptiveDifferentialEvolutionV28 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV28" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV28", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV28 print("EnhancedAdaptiveDifferentialEvolutionV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV4 import EnhancedAdaptiveDifferentialEvolutionV4 +try: # EnhancedAdaptiveDifferentialEvolutionV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV4 import ( + EnhancedAdaptiveDifferentialEvolutionV4, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV4"] = EnhancedAdaptiveDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV4", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV4 print("EnhancedAdaptiveDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV5 import EnhancedAdaptiveDifferentialEvolutionV5 +try: # EnhancedAdaptiveDifferentialEvolutionV5 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV5 import ( + EnhancedAdaptiveDifferentialEvolutionV5, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV5"] = EnhancedAdaptiveDifferentialEvolutionV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV5" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV5", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV5 print("EnhancedAdaptiveDifferentialEvolutionV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV6 import EnhancedAdaptiveDifferentialEvolutionV6 +try: # EnhancedAdaptiveDifferentialEvolutionV6 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV6 import ( + EnhancedAdaptiveDifferentialEvolutionV6, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV6"] = EnhancedAdaptiveDifferentialEvolutionV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV6" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV6", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV6 print("EnhancedAdaptiveDifferentialEvolutionV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV7 import EnhancedAdaptiveDifferentialEvolutionV7 +try: # EnhancedAdaptiveDifferentialEvolutionV7 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV7 import ( + EnhancedAdaptiveDifferentialEvolutionV7, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV7"] = EnhancedAdaptiveDifferentialEvolutionV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV7" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV7", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV7 print("EnhancedAdaptiveDifferentialEvolutionV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV8 import EnhancedAdaptiveDifferentialEvolutionV8 +try: # EnhancedAdaptiveDifferentialEvolutionV8 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV8 import ( + EnhancedAdaptiveDifferentialEvolutionV8, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV8"] = EnhancedAdaptiveDifferentialEvolutionV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV8" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV8", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV8 print("EnhancedAdaptiveDifferentialEvolutionV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV9 import EnhancedAdaptiveDifferentialEvolutionV9 +try: # EnhancedAdaptiveDifferentialEvolutionV9 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionV9 import ( + EnhancedAdaptiveDifferentialEvolutionV9, + ) lama_register["EnhancedAdaptiveDifferentialEvolutionV9"] = EnhancedAdaptiveDifferentialEvolutionV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionV9" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionV9", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionV9 print("EnhancedAdaptiveDifferentialEvolutionV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch import EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch import ( + EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch"] = ( + EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch print("EnhancedAdaptiveDifferentialEvolutionWithBayesianLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation import EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation print("EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved import EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved", register=True) -except Exception as e: - print("EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters import EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved" + ).set_name( + "LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved", register=True + ) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved + print( + "EnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved can not be imported: ", + e, + ) +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParameters can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 import EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 print("EnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 import EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 print("EnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize import EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined import EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined"] = EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined import ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined"] = ( + EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined print("EnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters import EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters - - lama_register["EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters"] = EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters").set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters import ( + EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters, + ) + + lama_register["EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters"] = ( + EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters" + ).set_name("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters print("EnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialMemeticAlgorithm import EnhancedAdaptiveDifferentialMemeticAlgorithm - - lama_register["EnhancedAdaptiveDifferentialMemeticAlgorithm"] = EnhancedAdaptiveDifferentialMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm", register=True) -except Exception as e: +try: # EnhancedAdaptiveDifferentialMemeticAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveDifferentialMemeticAlgorithm import ( + EnhancedAdaptiveDifferentialMemeticAlgorithm, + ) + + lama_register["EnhancedAdaptiveDifferentialMemeticAlgorithm"] = ( + EnhancedAdaptiveDifferentialMemeticAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDifferentialMemeticAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveDifferentialMemeticAlgorithm print("EnhancedAdaptiveDifferentialMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDirectionalBiasQuorumOptimization import EnhancedAdaptiveDirectionalBiasQuorumOptimization - - lama_register["EnhancedAdaptiveDirectionalBiasQuorumOptimization"] = EnhancedAdaptiveDirectionalBiasQuorumOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveDirectionalBiasQuorumOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveDirectionalBiasQuorumOptimization import ( + EnhancedAdaptiveDirectionalBiasQuorumOptimization, + ) + + lama_register["EnhancedAdaptiveDirectionalBiasQuorumOptimization"] = ( + EnhancedAdaptiveDirectionalBiasQuorumOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMAEnhancedAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: # EnhancedAdaptiveDirectionalBiasQuorumOptimization print("EnhancedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedEvolutionStrategy import EnhancedAdaptiveDiversifiedEvolutionStrategy - - lama_register["EnhancedAdaptiveDiversifiedEvolutionStrategy"] = EnhancedAdaptiveDiversifiedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy").set_name("LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedEvolutionStrategy + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedEvolutionStrategy import ( + EnhancedAdaptiveDiversifiedEvolutionStrategy, + ) + + lama_register["EnhancedAdaptiveDiversifiedEvolutionStrategy"] = ( + EnhancedAdaptiveDiversifiedEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedEvolutionStrategy", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedEvolutionStrategy print("EnhancedAdaptiveDiversifiedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization - - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 - - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 - - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 import EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 - - lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4"] = EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 import ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4"] = ( + EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 print("EnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearch import EnhancedAdaptiveDiversifiedHarmonySearch +try: # EnhancedAdaptiveDiversifiedHarmonySearch + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearch import ( + EnhancedAdaptiveDiversifiedHarmonySearch, + ) lama_register["EnhancedAdaptiveDiversifiedHarmonySearch"] = EnhancedAdaptiveDiversifiedHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearch print("EnhancedAdaptiveDiversifiedHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizer import EnhancedAdaptiveDiversifiedHarmonySearchOptimizer - - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizer"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizer import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizer, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizer"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizer print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 - - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 - - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 - - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 import EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 - - lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5"] = EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 import ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5, + ) + + lama_register["EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5"] = ( + EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 print("EnhancedAdaptiveDiversifiedHarmonySearchOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV2 import EnhancedAdaptiveDiversifiedHarmonySearchV2 +try: # EnhancedAdaptiveDiversifiedHarmonySearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV2 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV2, + ) lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV2"] = EnhancedAdaptiveDiversifiedHarmonySearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchV2 print("EnhancedAdaptiveDiversifiedHarmonySearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV3 import EnhancedAdaptiveDiversifiedHarmonySearchV3 +try: # EnhancedAdaptiveDiversifiedHarmonySearchV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV3 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV3, + ) lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV3"] = EnhancedAdaptiveDiversifiedHarmonySearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchV3 print("EnhancedAdaptiveDiversifiedHarmonySearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV4 import EnhancedAdaptiveDiversifiedHarmonySearchV4 +try: # EnhancedAdaptiveDiversifiedHarmonySearchV4 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedHarmonySearchV4 import ( + EnhancedAdaptiveDiversifiedHarmonySearchV4, + ) lama_register["EnhancedAdaptiveDiversifiedHarmonySearchV4"] = EnhancedAdaptiveDiversifiedHarmonySearchV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4").set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedHarmonySearchV4 print("EnhancedAdaptiveDiversifiedHarmonySearchV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm import EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm - - lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm"] = EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm import ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm"] = ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 import EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 - - lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2"] = EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 import ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2"] = ( + EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 print("EnhancedAdaptiveDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedSearch import EnhancedAdaptiveDiversifiedSearch +try: # EnhancedAdaptiveDiversifiedSearch + from nevergrad.optimization.lama.EnhancedAdaptiveDiversifiedSearch import ( + EnhancedAdaptiveDiversifiedSearch, + ) lama_register["EnhancedAdaptiveDiversifiedSearch"] = EnhancedAdaptiveDiversifiedSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDiversifiedSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedSearch").set_name("LLAMAEnhancedAdaptiveDiversifiedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDiversifiedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDiversifiedSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDiversifiedSearch" + ).set_name("LLAMAEnhancedAdaptiveDiversifiedSearch", register=True) +except Exception as e: # EnhancedAdaptiveDiversifiedSearch print("EnhancedAdaptiveDiversifiedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDolphinPodOptimization import EnhancedAdaptiveDolphinPodOptimization +try: # EnhancedAdaptiveDolphinPodOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveDolphinPodOptimization import ( + EnhancedAdaptiveDolphinPodOptimization, + ) lama_register["EnhancedAdaptiveDolphinPodOptimization"] = EnhancedAdaptiveDolphinPodOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDolphinPodOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDolphinPodOptimization").set_name("LLAMAEnhancedAdaptiveDolphinPodOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDolphinPodOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDolphinPodOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDolphinPodOptimization" + ).set_name("LLAMAEnhancedAdaptiveDolphinPodOptimization", register=True) +except Exception as e: # EnhancedAdaptiveDolphinPodOptimization print("EnhancedAdaptiveDolphinPodOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization import EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization - - lama_register["EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization"] = EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization").set_name("LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization import ( + EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization"] = ( + EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization print("EnhancedAdaptiveDualPhaseEvolutionarySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl import EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl - - lama_register["EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl").set_name("LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) -except Exception as e: +try: # EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl import ( + EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl, + ) + + lama_register["EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl"] = ( + EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl", register=True) +except Exception as e: # EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl print("EnhancedAdaptiveDualPhaseOptimizationWithDynamicParameterControl can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV2 import EnhancedAdaptiveDualPhaseStrategyV2 +try: # EnhancedAdaptiveDualPhaseStrategyV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV2 import ( + EnhancedAdaptiveDualPhaseStrategyV2, + ) lama_register["EnhancedAdaptiveDualPhaseStrategyV2"] = EnhancedAdaptiveDualPhaseStrategyV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDualPhaseStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2").set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDualPhaseStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseStrategyV2" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV2", register=True) +except Exception as e: # EnhancedAdaptiveDualPhaseStrategyV2 print("EnhancedAdaptiveDualPhaseStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV5 import EnhancedAdaptiveDualPhaseStrategyV5 +try: # EnhancedAdaptiveDualPhaseStrategyV5 + from nevergrad.optimization.lama.EnhancedAdaptiveDualPhaseStrategyV5 import ( + EnhancedAdaptiveDualPhaseStrategyV5, + ) lama_register["EnhancedAdaptiveDualPhaseStrategyV5"] = EnhancedAdaptiveDualPhaseStrategyV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDualPhaseStrategyV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5").set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDualPhaseStrategyV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualPhaseStrategyV5" + ).set_name("LLAMAEnhancedAdaptiveDualPhaseStrategyV5", register=True) +except Exception as e: # EnhancedAdaptiveDualPhaseStrategyV5 print("EnhancedAdaptiveDualPhaseStrategyV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDualStrategyOptimizer import EnhancedAdaptiveDualStrategyOptimizer +try: # EnhancedAdaptiveDualStrategyOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveDualStrategyOptimizer import ( + EnhancedAdaptiveDualStrategyOptimizer, + ) lama_register["EnhancedAdaptiveDualStrategyOptimizer"] = EnhancedAdaptiveDualStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualStrategyOptimizer").set_name("LLAMAEnhancedAdaptiveDualStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDualStrategyOptimizer" + ).set_name("LLAMAEnhancedAdaptiveDualStrategyOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveDualStrategyOptimizer print("EnhancedAdaptiveDualStrategyOptimizer can not be imported: ", e) -try: +try: # EnhancedAdaptiveDynamicDE from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDE import EnhancedAdaptiveDynamicDE lama_register["EnhancedAdaptiveDynamicDE"] = EnhancedAdaptiveDynamicDE - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE").set_name("LLAMAEnhancedAdaptiveDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDE").set_name( + "LLAMAEnhancedAdaptiveDynamicDE", register=True + ) +except Exception as e: # EnhancedAdaptiveDynamicDE print("EnhancedAdaptiveDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDifferentialEvolution import EnhancedAdaptiveDynamicDifferentialEvolution - - lama_register["EnhancedAdaptiveDynamicDifferentialEvolution"] = EnhancedAdaptiveDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDifferentialEvolution import ( + EnhancedAdaptiveDynamicDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveDynamicDifferentialEvolution"] = ( + EnhancedAdaptiveDynamicDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveDynamicDifferentialEvolution print("EnhancedAdaptiveDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV19 import EnhancedAdaptiveDynamicDualPhaseStrategyV19 +try: # EnhancedAdaptiveDynamicDualPhaseStrategyV19 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV19 import ( + EnhancedAdaptiveDynamicDualPhaseStrategyV19, + ) lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV19"] = EnhancedAdaptiveDynamicDualPhaseStrategyV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19").set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19" + ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV19", register=True) +except Exception as e: # EnhancedAdaptiveDynamicDualPhaseStrategyV19 print("EnhancedAdaptiveDynamicDualPhaseStrategyV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV22 import EnhancedAdaptiveDynamicDualPhaseStrategyV22 +try: # EnhancedAdaptiveDynamicDualPhaseStrategyV22 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicDualPhaseStrategyV22 import ( + EnhancedAdaptiveDynamicDualPhaseStrategyV22, + ) lama_register["EnhancedAdaptiveDynamicDualPhaseStrategyV22"] = EnhancedAdaptiveDynamicDualPhaseStrategyV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22").set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22" + ).set_name("LLAMAEnhancedAdaptiveDynamicDualPhaseStrategyV22", register=True) +except Exception as e: # EnhancedAdaptiveDynamicDualPhaseStrategyV22 print("EnhancedAdaptiveDynamicDualPhaseStrategyV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithm import EnhancedAdaptiveDynamicFireworkAlgorithm +try: # EnhancedAdaptiveDynamicFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithm import ( + EnhancedAdaptiveDynamicFireworkAlgorithm, + ) lama_register["EnhancedAdaptiveDynamicFireworkAlgorithm"] = EnhancedAdaptiveDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkAlgorithm print("EnhancedAdaptiveDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced import EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced - - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced"] = EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced import ( + EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced print("EnhancedAdaptiveDynamicFireworkAlgorithmEnhanced can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmImproved import EnhancedAdaptiveDynamicFireworkAlgorithmImproved - - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmImproved"] = EnhancedAdaptiveDynamicFireworkAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkAlgorithmImproved + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmImproved import ( + EnhancedAdaptiveDynamicFireworkAlgorithmImproved, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmImproved"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkAlgorithmImproved print("EnhancedAdaptiveDynamicFireworkAlgorithmImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmRefined import EnhancedAdaptiveDynamicFireworkAlgorithmRefined - - lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmRefined"] = EnhancedAdaptiveDynamicFireworkAlgorithmRefined - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined").set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkAlgorithmRefined + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkAlgorithmRefined import ( + EnhancedAdaptiveDynamicFireworkAlgorithmRefined, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkAlgorithmRefined"] = ( + EnhancedAdaptiveDynamicFireworkAlgorithmRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkAlgorithmRefined print("EnhancedAdaptiveDynamicFireworkAlgorithmRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 - - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 - - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 import EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 - - lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7"] = EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7").set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 import ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7, + ) + + lama_register["EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7"] = ( + EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7" + ).set_name("LLAMAEnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7", register=True) +except Exception as e: # EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 print("EnhancedAdaptiveDynamicFireworkDifferentialEvolutionV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearch import EnhancedAdaptiveDynamicHarmonySearch +try: # EnhancedAdaptiveDynamicHarmonySearch + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearch import ( + EnhancedAdaptiveDynamicHarmonySearch, + ) lama_register["EnhancedAdaptiveDynamicHarmonySearch"] = EnhancedAdaptiveDynamicHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearch").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearch", register=True) +except Exception as e: # EnhancedAdaptiveDynamicHarmonySearch print("EnhancedAdaptiveDynamicHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV2 import EnhancedAdaptiveDynamicHarmonySearchV2 +try: # EnhancedAdaptiveDynamicHarmonySearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV2 import ( + EnhancedAdaptiveDynamicHarmonySearchV2, + ) lama_register["EnhancedAdaptiveDynamicHarmonySearchV2"] = EnhancedAdaptiveDynamicHarmonySearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2", register=True) +except Exception as e: # EnhancedAdaptiveDynamicHarmonySearchV2 print("EnhancedAdaptiveDynamicHarmonySearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV3 import EnhancedAdaptiveDynamicHarmonySearchV3 +try: # EnhancedAdaptiveDynamicHarmonySearchV3 + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicHarmonySearchV3 import ( + EnhancedAdaptiveDynamicHarmonySearchV3, + ) lama_register["EnhancedAdaptiveDynamicHarmonySearchV3"] = EnhancedAdaptiveDynamicHarmonySearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3", register=True) +except Exception as e: # EnhancedAdaptiveDynamicHarmonySearchV3 print("EnhancedAdaptiveDynamicHarmonySearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm import EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm - - lama_register["EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) + + lama_register["EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( + EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm print("EnhancedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution import EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution print("EnhancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveDynamicQuantumSwarmOptimization import EnhancedAdaptiveDynamicQuantumSwarmOptimization - - lama_register["EnhancedAdaptiveDynamicQuantumSwarmOptimization"] = EnhancedAdaptiveDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveDynamicQuantumSwarmOptimization import ( + EnhancedAdaptiveDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveDynamicQuantumSwarmOptimization"] = ( + EnhancedAdaptiveDynamicQuantumSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveDynamicQuantumSwarmOptimization print("EnhancedAdaptiveDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteDifferentialEvolution import EnhancedAdaptiveEliteDifferentialEvolution +try: # EnhancedAdaptiveEliteDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveEliteDifferentialEvolution import ( + EnhancedAdaptiveEliteDifferentialEvolution, + ) lama_register["EnhancedAdaptiveEliteDifferentialEvolution"] = EnhancedAdaptiveEliteDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveEliteDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveEliteDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveEliteDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveEliteDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveEliteDifferentialEvolution print("EnhancedAdaptiveEliteDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteGuidedMutationDE_v2 import EnhancedAdaptiveEliteGuidedMutationDE_v2 +try: # EnhancedAdaptiveEliteGuidedMutationDE_v2 + from nevergrad.optimization.lama.EnhancedAdaptiveEliteGuidedMutationDE_v2 import ( + EnhancedAdaptiveEliteGuidedMutationDE_v2, + ) lama_register["EnhancedAdaptiveEliteGuidedMutationDE_v2"] = EnhancedAdaptiveEliteGuidedMutationDE_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2").set_name("LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2" + ).set_name("LLAMAEnhancedAdaptiveEliteGuidedMutationDE_v2", register=True) +except Exception as e: # EnhancedAdaptiveEliteGuidedMutationDE_v2 print("EnhancedAdaptiveEliteGuidedMutationDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution import EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution - - lama_register["EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveEliteMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution print("EnhancedAdaptiveEliteMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveEnvironmentalStrategyV24 import EnhancedAdaptiveEnvironmentalStrategyV24 +try: # EnhancedAdaptiveEnvironmentalStrategyV24 + from nevergrad.optimization.lama.EnhancedAdaptiveEnvironmentalStrategyV24 import ( + EnhancedAdaptiveEnvironmentalStrategyV24, + ) lama_register["EnhancedAdaptiveEnvironmentalStrategyV24"] = EnhancedAdaptiveEnvironmentalStrategyV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveEnvironmentalStrategyV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24").set_name("LLAMAEnhancedAdaptiveEnvironmentalStrategyV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveEnvironmentalStrategyV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEnvironmentalStrategyV24" + ).set_name("LLAMAEnhancedAdaptiveEnvironmentalStrategyV24", register=True) +except Exception as e: # EnhancedAdaptiveEnvironmentalStrategyV24 print("EnhancedAdaptiveEnvironmentalStrategyV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy import EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy - - lama_register["EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy"] = EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy").set_name("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) -except Exception as e: +try: # EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy + from nevergrad.optimization.lama.EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy import ( + EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy, + ) + + lama_register["EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy"] = ( + EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy" + ).set_name("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy", register=True) +except Exception as e: # EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy print("EnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveExplorationExploitationAlgorithm import EnhancedAdaptiveExplorationExploitationAlgorithm - - lama_register["EnhancedAdaptiveExplorationExploitationAlgorithm"] = EnhancedAdaptiveExplorationExploitationAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm", register=True) -except Exception as e: +try: # EnhancedAdaptiveExplorationExploitationAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationExploitationAlgorithm import ( + EnhancedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["EnhancedAdaptiveExplorationExploitationAlgorithm"] = ( + EnhancedAdaptiveExplorationExploitationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveExplorationExploitationAlgorithm print("EnhancedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveExplorationOptimizer import EnhancedAdaptiveExplorationOptimizer +try: # EnhancedAdaptiveExplorationOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveExplorationOptimizer import ( + EnhancedAdaptiveExplorationOptimizer, + ) lama_register["EnhancedAdaptiveExplorationOptimizer"] = EnhancedAdaptiveExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationOptimizer").set_name("LLAMAEnhancedAdaptiveExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveExplorationOptimizer" + ).set_name("LLAMAEnhancedAdaptiveExplorationOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveExplorationOptimizer print("EnhancedAdaptiveExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveFireworkAlgorithm import EnhancedAdaptiveFireworkAlgorithm +try: # EnhancedAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveFireworkAlgorithm import ( + EnhancedAdaptiveFireworkAlgorithm, + ) lama_register["EnhancedAdaptiveFireworkAlgorithm"] = EnhancedAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveFireworkAlgorithm print("EnhancedAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveFireworksAlgorithm import EnhancedAdaptiveFireworksAlgorithm +try: # EnhancedAdaptiveFireworksAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveFireworksAlgorithm import ( + EnhancedAdaptiveFireworksAlgorithm, + ) lama_register["EnhancedAdaptiveFireworksAlgorithm"] = EnhancedAdaptiveFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworksAlgorithm").set_name("LLAMAEnhancedAdaptiveFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveFireworksAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveFireworksAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveFireworksAlgorithm print("EnhancedAdaptiveFireworksAlgorithm can not be imported: ", e) -try: +try: # EnhancedAdaptiveGaussianSearch from nevergrad.optimization.lama.EnhancedAdaptiveGaussianSearch import EnhancedAdaptiveGaussianSearch lama_register["EnhancedAdaptiveGaussianSearch"] = EnhancedAdaptiveGaussianSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGaussianSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGaussianSearch").set_name("LLAMAEnhancedAdaptiveGaussianSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGaussianSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGaussianSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGaussianSearch" + ).set_name("LLAMAEnhancedAdaptiveGaussianSearch", register=True) +except Exception as e: # EnhancedAdaptiveGaussianSearch print("EnhancedAdaptiveGaussianSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGradientBalancedCrossoverPSO import EnhancedAdaptiveGradientBalancedCrossoverPSO - - lama_register["EnhancedAdaptiveGradientBalancedCrossoverPSO"] = EnhancedAdaptiveGradientBalancedCrossoverPSO - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) -except Exception as e: +try: # EnhancedAdaptiveGradientBalancedCrossoverPSO + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBalancedCrossoverPSO import ( + EnhancedAdaptiveGradientBalancedCrossoverPSO, + ) + + lama_register["EnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( + EnhancedAdaptiveGradientBalancedCrossoverPSO + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMAEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: # EnhancedAdaptiveGradientBalancedCrossoverPSO print("EnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing import EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing - - lama_register["EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing"] = EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing import ( + EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing print("EnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGranularStrategyV26 import EnhancedAdaptiveGranularStrategyV26 +try: # EnhancedAdaptiveGranularStrategyV26 + from nevergrad.optimization.lama.EnhancedAdaptiveGranularStrategyV26 import ( + EnhancedAdaptiveGranularStrategyV26, + ) lama_register["EnhancedAdaptiveGranularStrategyV26"] = EnhancedAdaptiveGranularStrategyV26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGranularStrategyV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGranularStrategyV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGranularStrategyV26").set_name("LLAMAEnhancedAdaptiveGranularStrategyV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGranularStrategyV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGranularStrategyV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGranularStrategyV26" + ).set_name("LLAMAEnhancedAdaptiveGranularStrategyV26", register=True) +except Exception as e: # EnhancedAdaptiveGranularStrategyV26 print("EnhancedAdaptiveGranularStrategyV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV10 import EnhancedAdaptiveGravitationalSwarmIntelligenceV10 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV10"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV10 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV10 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV10, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV10"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV10 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV11 import EnhancedAdaptiveGravitationalSwarmIntelligenceV11 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV11"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV11 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV11 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV11, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV11"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV11 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV11 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV12 import EnhancedAdaptiveGravitationalSwarmIntelligenceV12 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV12"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV12 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV12 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV12, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV12"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV12 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV19 import EnhancedAdaptiveGravitationalSwarmIntelligenceV19 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV19"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV19 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV19 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV19, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV19"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV19 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV19 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV20 import EnhancedAdaptiveGravitationalSwarmIntelligenceV20 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV20"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV20 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV20 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV20, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV20"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV20 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV20 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV21 import EnhancedAdaptiveGravitationalSwarmIntelligenceV21 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV21"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV21 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV21 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV21, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV21"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV21 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV21 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV27 import EnhancedAdaptiveGravitationalSwarmIntelligenceV27 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV27"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV27 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV27 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV27, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV27"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV27 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV27 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV28 import EnhancedAdaptiveGravitationalSwarmIntelligenceV28 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV28"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV28 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV28 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV28 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV28, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV28"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV28 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV28 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV3 import EnhancedAdaptiveGravitationalSwarmIntelligenceV3 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV3"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV3 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV3 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV3, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV3"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV3 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV4 import EnhancedAdaptiveGravitationalSwarmIntelligenceV4 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV4"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV4 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV4 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV4, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV4"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV4 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV5 import EnhancedAdaptiveGravitationalSwarmIntelligenceV5 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV5"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV5 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV5 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV5, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV5"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV5 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV6 import EnhancedAdaptiveGravitationalSwarmIntelligenceV6 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV6"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV6 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV6 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV6, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV6"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV6 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV7 import EnhancedAdaptiveGravitationalSwarmIntelligenceV7 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV7"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV7 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV7 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV7, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV7"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV7 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV8 import EnhancedAdaptiveGravitationalSwarmIntelligenceV8 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV8"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV8 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV8 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV8, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV8"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV8 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV9 import EnhancedAdaptiveGravitationalSwarmIntelligenceV9 - - lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV9"] = EnhancedAdaptiveGravitationalSwarmIntelligenceV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9", register=True) -except Exception as e: +try: # EnhancedAdaptiveGravitationalSwarmIntelligenceV9 + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmIntelligenceV9 import ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV9, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmIntelligenceV9"] = ( + EnhancedAdaptiveGravitationalSwarmIntelligenceV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9" + ).set_name("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9", register=True) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmIntelligenceV9 print("EnhancedAdaptiveGravitationalSwarmIntelligenceV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - - lama_register["EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation").set_name("LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True) -except Exception as e: - print("EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGuidedDifferentialEvolution import EnhancedAdaptiveGuidedDifferentialEvolution +try: # EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + from nevergrad.optimization.lama.EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation import ( + EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation, + ) + + lama_register["EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation"] = ( + EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation" + ).set_name( + "LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation", register=True + ) +except Exception as e: # EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation + print( + "EnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation can not be imported: ", + e, + ) +try: # EnhancedAdaptiveGuidedDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedDifferentialEvolution import ( + EnhancedAdaptiveGuidedDifferentialEvolution, + ) lama_register["EnhancedAdaptiveGuidedDifferentialEvolution"] = EnhancedAdaptiveGuidedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveGuidedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGuidedDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveGuidedDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveGuidedDifferentialEvolution print("EnhancedAdaptiveGuidedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveGuidedMutationOptimizer import EnhancedAdaptiveGuidedMutationOptimizer +try: # EnhancedAdaptiveGuidedMutationOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveGuidedMutationOptimizer import ( + EnhancedAdaptiveGuidedMutationOptimizer, + ) lama_register["EnhancedAdaptiveGuidedMutationOptimizer"] = EnhancedAdaptiveGuidedMutationOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer").set_name("LLAMAEnhancedAdaptiveGuidedMutationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAEnhancedAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveGuidedMutationOptimizer print("EnhancedAdaptiveGuidedMutationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearch import EnhancedAdaptiveHarmonicFireworksTabuSearch +try: # EnhancedAdaptiveHarmonicFireworksTabuSearch + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearch import ( + EnhancedAdaptiveHarmonicFireworksTabuSearch, + ) lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearch"] = EnhancedAdaptiveHarmonicFireworksTabuSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicFireworksTabuSearch print("EnhancedAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearchV2 import EnhancedAdaptiveHarmonicFireworksTabuSearchV2 - - lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearchV2"] = EnhancedAdaptiveHarmonicFireworksTabuSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonicFireworksTabuSearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicFireworksTabuSearchV2 import ( + EnhancedAdaptiveHarmonicFireworksTabuSearchV2, + ) + + lama_register["EnhancedAdaptiveHarmonicFireworksTabuSearchV2"] = ( + EnhancedAdaptiveHarmonicFireworksTabuSearchV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicFireworksTabuSearchV2 print("EnhancedAdaptiveHarmonicFireworksTabuSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicOptimizationV2 import EnhancedAdaptiveHarmonicOptimizationV2 +try: # EnhancedAdaptiveHarmonicOptimizationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicOptimizationV2 import ( + EnhancedAdaptiveHarmonicOptimizationV2, + ) lama_register["EnhancedAdaptiveHarmonicOptimizationV2"] = EnhancedAdaptiveHarmonicOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2").set_name("LLAMAEnhancedAdaptiveHarmonicOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonicOptimizationV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicOptimizationV2 print("EnhancedAdaptiveHarmonicOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV10 import EnhancedAdaptiveHarmonicTabuSearchV10 +try: # EnhancedAdaptiveHarmonicTabuSearchV10 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV10 import ( + EnhancedAdaptiveHarmonicTabuSearchV10, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV10"] = EnhancedAdaptiveHarmonicTabuSearchV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV10 print("EnhancedAdaptiveHarmonicTabuSearchV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV18 import EnhancedAdaptiveHarmonicTabuSearchV18 +try: # EnhancedAdaptiveHarmonicTabuSearchV18 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV18 import ( + EnhancedAdaptiveHarmonicTabuSearchV18, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV18"] = EnhancedAdaptiveHarmonicTabuSearchV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV18 print("EnhancedAdaptiveHarmonicTabuSearchV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV21 import EnhancedAdaptiveHarmonicTabuSearchV21 +try: # EnhancedAdaptiveHarmonicTabuSearchV21 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV21 import ( + EnhancedAdaptiveHarmonicTabuSearchV21, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV21"] = EnhancedAdaptiveHarmonicTabuSearchV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV21 print("EnhancedAdaptiveHarmonicTabuSearchV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV22 import EnhancedAdaptiveHarmonicTabuSearchV22 +try: # EnhancedAdaptiveHarmonicTabuSearchV22 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV22 import ( + EnhancedAdaptiveHarmonicTabuSearchV22, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV22"] = EnhancedAdaptiveHarmonicTabuSearchV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV22 print("EnhancedAdaptiveHarmonicTabuSearchV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV23 import EnhancedAdaptiveHarmonicTabuSearchV23 +try: # EnhancedAdaptiveHarmonicTabuSearchV23 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV23 import ( + EnhancedAdaptiveHarmonicTabuSearchV23, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV23"] = EnhancedAdaptiveHarmonicTabuSearchV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV23 print("EnhancedAdaptiveHarmonicTabuSearchV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV25 import EnhancedAdaptiveHarmonicTabuSearchV25 +try: # EnhancedAdaptiveHarmonicTabuSearchV25 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV25 import ( + EnhancedAdaptiveHarmonicTabuSearchV25, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV25"] = EnhancedAdaptiveHarmonicTabuSearchV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV25 print("EnhancedAdaptiveHarmonicTabuSearchV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV26 import EnhancedAdaptiveHarmonicTabuSearchV26 +try: # EnhancedAdaptiveHarmonicTabuSearchV26 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV26 import ( + EnhancedAdaptiveHarmonicTabuSearchV26, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV26"] = EnhancedAdaptiveHarmonicTabuSearchV26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV26" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV26 print("EnhancedAdaptiveHarmonicTabuSearchV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV27 import EnhancedAdaptiveHarmonicTabuSearchV27 +try: # EnhancedAdaptiveHarmonicTabuSearchV27 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV27 import ( + EnhancedAdaptiveHarmonicTabuSearchV27, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV27"] = EnhancedAdaptiveHarmonicTabuSearchV27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV27" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV27 print("EnhancedAdaptiveHarmonicTabuSearchV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV29 import EnhancedAdaptiveHarmonicTabuSearchV29 +try: # EnhancedAdaptiveHarmonicTabuSearchV29 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV29 import ( + EnhancedAdaptiveHarmonicTabuSearchV29, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV29"] = EnhancedAdaptiveHarmonicTabuSearchV29 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV29" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV29 print("EnhancedAdaptiveHarmonicTabuSearchV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV30 import EnhancedAdaptiveHarmonicTabuSearchV30 +try: # EnhancedAdaptiveHarmonicTabuSearchV30 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV30 import ( + EnhancedAdaptiveHarmonicTabuSearchV30, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV30"] = EnhancedAdaptiveHarmonicTabuSearchV30 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV30" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV30 print("EnhancedAdaptiveHarmonicTabuSearchV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV31 import EnhancedAdaptiveHarmonicTabuSearchV31 +try: # EnhancedAdaptiveHarmonicTabuSearchV31 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV31 import ( + EnhancedAdaptiveHarmonicTabuSearchV31, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV31"] = EnhancedAdaptiveHarmonicTabuSearchV31 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV31" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV31 print("EnhancedAdaptiveHarmonicTabuSearchV31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV9 import EnhancedAdaptiveHarmonicTabuSearchV9 +try: # EnhancedAdaptiveHarmonicTabuSearchV9 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonicTabuSearchV9 import ( + EnhancedAdaptiveHarmonicTabuSearchV9, + ) lama_register["EnhancedAdaptiveHarmonicTabuSearchV9"] = EnhancedAdaptiveHarmonicTabuSearchV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonicTabuSearchV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9").set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonicTabuSearchV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonicTabuSearchV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonicTabuSearchV9 print("EnhancedAdaptiveHarmonicTabuSearchV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyFireworksAlgorithm import EnhancedAdaptiveHarmonyFireworksAlgorithm +try: # EnhancedAdaptiveHarmonyFireworksAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyFireworksAlgorithm import ( + EnhancedAdaptiveHarmonyFireworksAlgorithm, + ) lama_register["EnhancedAdaptiveHarmonyFireworksAlgorithm"] = EnhancedAdaptiveHarmonyFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm").set_name("LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveHarmonyFireworksAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyFireworksAlgorithm print("EnhancedAdaptiveHarmonyFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithm import EnhancedAdaptiveHarmonyMemeticAlgorithm +try: # EnhancedAdaptiveHarmonyMemeticAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithm import ( + EnhancedAdaptiveHarmonyMemeticAlgorithm, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithm"] = EnhancedAdaptiveHarmonyMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithm print("EnhancedAdaptiveHarmonyMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV10 import EnhancedAdaptiveHarmonyMemeticAlgorithmV10 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV10 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV10 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV10, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV10"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV10 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV11 import EnhancedAdaptiveHarmonyMemeticAlgorithmV11 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV11 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV11 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV11, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV11"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV11", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV11 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV12 import EnhancedAdaptiveHarmonyMemeticAlgorithmV12 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV12 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV12 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV12, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV12"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV12", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV12 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV13 import EnhancedAdaptiveHarmonyMemeticAlgorithmV13 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV13 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV13 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV13, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV13"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV13", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV13 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV14 import EnhancedAdaptiveHarmonyMemeticAlgorithmV14 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV14 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV14 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV14, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV14"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV14", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV14 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV16 import EnhancedAdaptiveHarmonyMemeticAlgorithmV16 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV16 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV16 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV16, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV16"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV16", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV16 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV18 import EnhancedAdaptiveHarmonyMemeticAlgorithmV18 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV18 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV18 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV18, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV18"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV18", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV18 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV19 import EnhancedAdaptiveHarmonyMemeticAlgorithmV19 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV19 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV19 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV19, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV19"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV19", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV19 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV2 import EnhancedAdaptiveHarmonyMemeticAlgorithmV2 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV2 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV2, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV2"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV2 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV20 import EnhancedAdaptiveHarmonyMemeticAlgorithmV20 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV20 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV20 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV20, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV20"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV20", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV20 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV21 import EnhancedAdaptiveHarmonyMemeticAlgorithmV21 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV21 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV21 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV21, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV21"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV21", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV21 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV22 import EnhancedAdaptiveHarmonyMemeticAlgorithmV22 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV22 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV22 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV22, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV22"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV22", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV22 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV23 import EnhancedAdaptiveHarmonyMemeticAlgorithmV23 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV23 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV23 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV23, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV23"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV23", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV23 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV24 import EnhancedAdaptiveHarmonyMemeticAlgorithmV24 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV24 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV24 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV24, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV24"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV24", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV24 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV25 import EnhancedAdaptiveHarmonyMemeticAlgorithmV25 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV25 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV25 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV25, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV25"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV25", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV25 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV3 import EnhancedAdaptiveHarmonyMemeticAlgorithmV3 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV3 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV3, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV3"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV3 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV4 import EnhancedAdaptiveHarmonyMemeticAlgorithmV4 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV4 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV4, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV4"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV4 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV5 import EnhancedAdaptiveHarmonyMemeticAlgorithmV5 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV5 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV5, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV5"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV5 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV6 import EnhancedAdaptiveHarmonyMemeticAlgorithmV6 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV6 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV6, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV6"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV6 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV7 import EnhancedAdaptiveHarmonyMemeticAlgorithmV7 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV7 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV7, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV7"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV7 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV8 import EnhancedAdaptiveHarmonyMemeticAlgorithmV8 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV8 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV8, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV8"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV8 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV9 import EnhancedAdaptiveHarmonyMemeticAlgorithmV9 +try: # EnhancedAdaptiveHarmonyMemeticAlgorithmV9 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticAlgorithmV9 import ( + EnhancedAdaptiveHarmonyMemeticAlgorithmV9, + ) lama_register["EnhancedAdaptiveHarmonyMemeticAlgorithmV9"] = EnhancedAdaptiveHarmonyMemeticAlgorithmV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticAlgorithmV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticAlgorithmV9 print("EnhancedAdaptiveHarmonyMemeticAlgorithmV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV28 import EnhancedAdaptiveHarmonyMemeticOptimizationV28 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV28"] = EnhancedAdaptiveHarmonyMemeticOptimizationV28 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV28 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV28 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV28, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV28"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV28 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV28", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV28 print("EnhancedAdaptiveHarmonyMemeticOptimizationV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV29 import EnhancedAdaptiveHarmonyMemeticOptimizationV29 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV29"] = EnhancedAdaptiveHarmonyMemeticOptimizationV29 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV29 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV29 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV29, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV29"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV29 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV29", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV29 print("EnhancedAdaptiveHarmonyMemeticOptimizationV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV3 import EnhancedAdaptiveHarmonyMemeticOptimizationV3 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV3"] = EnhancedAdaptiveHarmonyMemeticOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV3 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV3"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV3 print("EnhancedAdaptiveHarmonyMemeticOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV30 import EnhancedAdaptiveHarmonyMemeticOptimizationV30 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV30"] = EnhancedAdaptiveHarmonyMemeticOptimizationV30 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV30 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV30 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV30, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV30"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV30 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV30", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV30 print("EnhancedAdaptiveHarmonyMemeticOptimizationV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV31 import EnhancedAdaptiveHarmonyMemeticOptimizationV31 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV31"] = EnhancedAdaptiveHarmonyMemeticOptimizationV31 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV31 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV31 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV31, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV31"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV31 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV31", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV31 print("EnhancedAdaptiveHarmonyMemeticOptimizationV31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV32 import EnhancedAdaptiveHarmonyMemeticOptimizationV32 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV32"] = EnhancedAdaptiveHarmonyMemeticOptimizationV32 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV32 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV32 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV32, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV32"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV32 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV32", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV32 print("EnhancedAdaptiveHarmonyMemeticOptimizationV32 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV33 import EnhancedAdaptiveHarmonyMemeticOptimizationV33 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV33"] = EnhancedAdaptiveHarmonyMemeticOptimizationV33 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV33 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV33 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV33, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV33"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV33 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV33", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV33 print("EnhancedAdaptiveHarmonyMemeticOptimizationV33 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV4 import EnhancedAdaptiveHarmonyMemeticOptimizationV4 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV4"] = EnhancedAdaptiveHarmonyMemeticOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV4 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV4"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV4 print("EnhancedAdaptiveHarmonyMemeticOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV5 import EnhancedAdaptiveHarmonyMemeticOptimizationV5 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV5"] = EnhancedAdaptiveHarmonyMemeticOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV5 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV5"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV5 print("EnhancedAdaptiveHarmonyMemeticOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV6 import EnhancedAdaptiveHarmonyMemeticOptimizationV6 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV6"] = EnhancedAdaptiveHarmonyMemeticOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV6 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV6"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV6 print("EnhancedAdaptiveHarmonyMemeticOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV7 import EnhancedAdaptiveHarmonyMemeticOptimizationV7 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV7"] = EnhancedAdaptiveHarmonyMemeticOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV7 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV7"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV7 print("EnhancedAdaptiveHarmonyMemeticOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV8 import EnhancedAdaptiveHarmonyMemeticOptimizationV8 - - lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV8"] = EnhancedAdaptiveHarmonyMemeticOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonyMemeticOptimizationV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticOptimizationV8 import ( + EnhancedAdaptiveHarmonyMemeticOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonyMemeticOptimizationV8"] = ( + EnhancedAdaptiveHarmonyMemeticOptimizationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticOptimizationV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticOptimizationV8 print("EnhancedAdaptiveHarmonyMemeticOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearch import EnhancedAdaptiveHarmonyMemeticSearch +try: # EnhancedAdaptiveHarmonyMemeticSearch + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearch import ( + EnhancedAdaptiveHarmonyMemeticSearch, + ) lama_register["EnhancedAdaptiveHarmonyMemeticSearch"] = EnhancedAdaptiveHarmonyMemeticSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticSearch" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearch", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticSearch print("EnhancedAdaptiveHarmonyMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearchV2 import EnhancedAdaptiveHarmonyMemeticSearchV2 +try: # EnhancedAdaptiveHarmonyMemeticSearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyMemeticSearchV2 import ( + EnhancedAdaptiveHarmonyMemeticSearchV2, + ) lama_register["EnhancedAdaptiveHarmonyMemeticSearchV2"] = EnhancedAdaptiveHarmonyMemeticSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyMemeticSearchV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyMemeticSearchV2 print("EnhancedAdaptiveHarmonyMemeticSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization import EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization - - lama_register["EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization print("EnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization import EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization - - lama_register["EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization print("EnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizer import EnhancedAdaptiveHarmonySearchOptimizer +try: # EnhancedAdaptiveHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizer import ( + EnhancedAdaptiveHarmonySearchOptimizer, + ) lama_register["EnhancedAdaptiveHarmonySearchOptimizer"] = EnhancedAdaptiveHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchOptimizer print("EnhancedAdaptiveHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizerV2 import EnhancedAdaptiveHarmonySearchOptimizerV2 +try: # EnhancedAdaptiveHarmonySearchOptimizerV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchOptimizerV2 import ( + EnhancedAdaptiveHarmonySearchOptimizerV2, + ) lama_register["EnhancedAdaptiveHarmonySearchOptimizerV2"] = EnhancedAdaptiveHarmonySearchOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchOptimizerV2 print("EnhancedAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV10 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV10 import EnhancedAdaptiveHarmonySearchV10 lama_register["EnhancedAdaptiveHarmonySearchV10"] = EnhancedAdaptiveHarmonySearchV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV10 print("EnhancedAdaptiveHarmonySearchV10 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV11 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV11 import EnhancedAdaptiveHarmonySearchV11 lama_register["EnhancedAdaptiveHarmonySearchV11"] = EnhancedAdaptiveHarmonySearchV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV11", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV11 print("EnhancedAdaptiveHarmonySearchV11 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV12 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV12 import EnhancedAdaptiveHarmonySearchV12 lama_register["EnhancedAdaptiveHarmonySearchV12"] = EnhancedAdaptiveHarmonySearchV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV12", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV12 print("EnhancedAdaptiveHarmonySearchV12 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV13 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV13 import EnhancedAdaptiveHarmonySearchV13 lama_register["EnhancedAdaptiveHarmonySearchV13"] = EnhancedAdaptiveHarmonySearchV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV13", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV13 print("EnhancedAdaptiveHarmonySearchV13 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV14 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV14 import EnhancedAdaptiveHarmonySearchV14 lama_register["EnhancedAdaptiveHarmonySearchV14"] = EnhancedAdaptiveHarmonySearchV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV14").set_name("LLAMAEnhancedAdaptiveHarmonySearchV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV14", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV14 print("EnhancedAdaptiveHarmonySearchV14 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV15 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV15 import EnhancedAdaptiveHarmonySearchV15 lama_register["EnhancedAdaptiveHarmonySearchV15"] = EnhancedAdaptiveHarmonySearchV15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV15").set_name("LLAMAEnhancedAdaptiveHarmonySearchV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV15" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV15", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV15 print("EnhancedAdaptiveHarmonySearchV15 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV16 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV16 import EnhancedAdaptiveHarmonySearchV16 lama_register["EnhancedAdaptiveHarmonySearchV16"] = EnhancedAdaptiveHarmonySearchV16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV16").set_name("LLAMAEnhancedAdaptiveHarmonySearchV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV16", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV16 print("EnhancedAdaptiveHarmonySearchV16 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV17 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV17 import EnhancedAdaptiveHarmonySearchV17 lama_register["EnhancedAdaptiveHarmonySearchV17"] = EnhancedAdaptiveHarmonySearchV17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV17").set_name("LLAMAEnhancedAdaptiveHarmonySearchV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV17" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV17", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV17 print("EnhancedAdaptiveHarmonySearchV17 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV18 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV18 import EnhancedAdaptiveHarmonySearchV18 lama_register["EnhancedAdaptiveHarmonySearchV18"] = EnhancedAdaptiveHarmonySearchV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV18").set_name("LLAMAEnhancedAdaptiveHarmonySearchV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV18", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV18 print("EnhancedAdaptiveHarmonySearchV18 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV19 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV19 import EnhancedAdaptiveHarmonySearchV19 lama_register["EnhancedAdaptiveHarmonySearchV19"] = EnhancedAdaptiveHarmonySearchV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV19").set_name("LLAMAEnhancedAdaptiveHarmonySearchV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV19" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV19", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV19 print("EnhancedAdaptiveHarmonySearchV19 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV20 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV20 import EnhancedAdaptiveHarmonySearchV20 lama_register["EnhancedAdaptiveHarmonySearchV20"] = EnhancedAdaptiveHarmonySearchV20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV20").set_name("LLAMAEnhancedAdaptiveHarmonySearchV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV20" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV20", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV20 print("EnhancedAdaptiveHarmonySearchV20 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV21 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV21 import EnhancedAdaptiveHarmonySearchV21 lama_register["EnhancedAdaptiveHarmonySearchV21"] = EnhancedAdaptiveHarmonySearchV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV21").set_name("LLAMAEnhancedAdaptiveHarmonySearchV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV21" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV21", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV21 print("EnhancedAdaptiveHarmonySearchV21 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV22 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV22 import EnhancedAdaptiveHarmonySearchV22 lama_register["EnhancedAdaptiveHarmonySearchV22"] = EnhancedAdaptiveHarmonySearchV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV22").set_name("LLAMAEnhancedAdaptiveHarmonySearchV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV22" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV22", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV22 print("EnhancedAdaptiveHarmonySearchV22 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV23 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV23 import EnhancedAdaptiveHarmonySearchV23 lama_register["EnhancedAdaptiveHarmonySearchV23"] = EnhancedAdaptiveHarmonySearchV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV23").set_name("LLAMAEnhancedAdaptiveHarmonySearchV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV23" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV23", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV23 print("EnhancedAdaptiveHarmonySearchV23 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV24 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV24 import EnhancedAdaptiveHarmonySearchV24 lama_register["EnhancedAdaptiveHarmonySearchV24"] = EnhancedAdaptiveHarmonySearchV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV24").set_name("LLAMAEnhancedAdaptiveHarmonySearchV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV24" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV24", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV24 print("EnhancedAdaptiveHarmonySearchV24 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV25 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV25 import EnhancedAdaptiveHarmonySearchV25 lama_register["EnhancedAdaptiveHarmonySearchV25"] = EnhancedAdaptiveHarmonySearchV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV25").set_name("LLAMAEnhancedAdaptiveHarmonySearchV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV25" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV25", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV25 print("EnhancedAdaptiveHarmonySearchV25 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV3 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV3 import EnhancedAdaptiveHarmonySearchV3 lama_register["EnhancedAdaptiveHarmonySearchV3"] = EnhancedAdaptiveHarmonySearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV3 print("EnhancedAdaptiveHarmonySearchV3 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV4 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV4 import EnhancedAdaptiveHarmonySearchV4 lama_register["EnhancedAdaptiveHarmonySearchV4"] = EnhancedAdaptiveHarmonySearchV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV4 print("EnhancedAdaptiveHarmonySearchV4 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV5 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV5 import EnhancedAdaptiveHarmonySearchV5 lama_register["EnhancedAdaptiveHarmonySearchV5"] = EnhancedAdaptiveHarmonySearchV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV5 print("EnhancedAdaptiveHarmonySearchV5 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV6 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV6 import EnhancedAdaptiveHarmonySearchV6 lama_register["EnhancedAdaptiveHarmonySearchV6"] = EnhancedAdaptiveHarmonySearchV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV6 print("EnhancedAdaptiveHarmonySearchV6 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV7 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV7 import EnhancedAdaptiveHarmonySearchV7 lama_register["EnhancedAdaptiveHarmonySearchV7"] = EnhancedAdaptiveHarmonySearchV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV7 print("EnhancedAdaptiveHarmonySearchV7 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV8 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV8 import EnhancedAdaptiveHarmonySearchV8 lama_register["EnhancedAdaptiveHarmonySearchV8"] = EnhancedAdaptiveHarmonySearchV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV8 print("EnhancedAdaptiveHarmonySearchV8 can not be imported: ", e) -try: +try: # EnhancedAdaptiveHarmonySearchV9 from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchV9 import EnhancedAdaptiveHarmonySearchV9 lama_register["EnhancedAdaptiveHarmonySearchV9"] = EnhancedAdaptiveHarmonySearchV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchV9 print("EnhancedAdaptiveHarmonySearchV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration import EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration - - lama_register["EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration"] = EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration import ( + EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration print("EnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 import EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 - - lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9"] = EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 import ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9"] = ( + EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 print("EnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 import EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 - - lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9"] = EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 import ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9"] = ( + EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 print("EnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 import EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17"] = EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 import EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18"] = EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 print("EnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 import EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13"] = EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 print("EnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 import EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2"] = EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2", register=True) -except Exception as e: - print("EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 import EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3"] = EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3", register=True) -except Exception as e: - print("EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 import EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 - - lama_register["EnhancedAdaptiveHarmonySearchWithHybridInspirationV16"] = EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2" + ).set_name( + "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2", register=True + ) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 + print( + "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2 can not be imported: ", + e, + ) +try: # EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 import ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3"] = ( + EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3" + ).set_name( + "LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3", register=True + ) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 + print( + "EnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3 can not be imported: ", + e, + ) +try: # EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 import ( + EnhancedAdaptiveHarmonySearchWithHybridInspirationV16, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithHybridInspirationV16"] = ( + EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 print("EnhancedAdaptiveHarmonySearchWithHybridInspirationV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlight can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 import EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 - - lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9"] = EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 import ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9"] = ( + EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 print("EnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlight import EnhancedAdaptiveHarmonySearchWithLevyFlight +try: # EnhancedAdaptiveHarmonySearchWithLevyFlight + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithLevyFlight, + ) lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlight"] = EnhancedAdaptiveHarmonySearchWithLevyFlight - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLevyFlight print("EnhancedAdaptiveHarmonySearchWithLevyFlight can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 import EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 - - lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2"] = EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 import ( + EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2"] = ( + EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 print("EnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimization import EnhancedAdaptiveHarmonySearchWithLocalOptimization - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimization"] = EnhancedAdaptiveHarmonySearchWithLocalOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimization import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimization, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimization"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimization print("EnhancedAdaptiveHarmonySearchWithLocalOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 import EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 - - lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8"] = EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 import ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8"] = ( + EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 print("EnhancedAdaptiveHarmonySearchWithLocalOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight import EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight - - lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight"] = EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight import ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight"] = ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlight can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration import EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration - - lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration"] = EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration import ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration"] = ( + EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration print("EnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 import EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 - - lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6"] = EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6").set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 import ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6, + ) + + lama_register["EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6"] = ( + EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6" + ).set_name("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6", register=True) +except Exception as e: # EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 print("EnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuOptimization import EnhancedAdaptiveHarmonyTabuOptimization +try: # EnhancedAdaptiveHarmonyTabuOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuOptimization import ( + EnhancedAdaptiveHarmonyTabuOptimization, + ) lama_register["EnhancedAdaptiveHarmonyTabuOptimization"] = EnhancedAdaptiveHarmonyTabuOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization").set_name("LLAMAEnhancedAdaptiveHarmonyTabuOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuOptimization" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuOptimization", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyTabuOptimization print("EnhancedAdaptiveHarmonyTabuOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV2 import EnhancedAdaptiveHarmonyTabuSearchV2 +try: # EnhancedAdaptiveHarmonyTabuSearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV2 import ( + EnhancedAdaptiveHarmonyTabuSearchV2, + ) lama_register["EnhancedAdaptiveHarmonyTabuSearchV2"] = EnhancedAdaptiveHarmonyTabuSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV2" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyTabuSearchV2 print("EnhancedAdaptiveHarmonyTabuSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV3 import EnhancedAdaptiveHarmonyTabuSearchV3 +try: # EnhancedAdaptiveHarmonyTabuSearchV3 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV3 import ( + EnhancedAdaptiveHarmonyTabuSearchV3, + ) lama_register["EnhancedAdaptiveHarmonyTabuSearchV3"] = EnhancedAdaptiveHarmonyTabuSearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV3" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyTabuSearchV3 print("EnhancedAdaptiveHarmonyTabuSearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV4 import EnhancedAdaptiveHarmonyTabuSearchV4 +try: # EnhancedAdaptiveHarmonyTabuSearchV4 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV4 import ( + EnhancedAdaptiveHarmonyTabuSearchV4, + ) lama_register["EnhancedAdaptiveHarmonyTabuSearchV4"] = EnhancedAdaptiveHarmonyTabuSearchV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV4" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyTabuSearchV4 print("EnhancedAdaptiveHarmonyTabuSearchV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV5 import EnhancedAdaptiveHarmonyTabuSearchV5 +try: # EnhancedAdaptiveHarmonyTabuSearchV5 + from nevergrad.optimization.lama.EnhancedAdaptiveHarmonyTabuSearchV5 import ( + EnhancedAdaptiveHarmonyTabuSearchV5, + ) lama_register["EnhancedAdaptiveHarmonyTabuSearchV5"] = EnhancedAdaptiveHarmonyTabuSearchV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHarmonyTabuSearchV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5").set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHarmonyTabuSearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHarmonyTabuSearchV5" + ).set_name("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5", register=True) +except Exception as e: # EnhancedAdaptiveHarmonyTabuSearchV5 print("EnhancedAdaptiveHarmonyTabuSearchV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory import EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory - - lama_register["EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory"] = EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory").set_name("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory", register=True) -except Exception as e: +try: # EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory + from nevergrad.optimization.lama.EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory import ( + EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory, + ) + + lama_register["EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory"] = ( + EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory" + ).set_name("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory", register=True) +except Exception as e: # EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory print("EnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV22 import EnhancedAdaptiveHybridHarmonySearchV22 +try: # EnhancedAdaptiveHybridHarmonySearchV22 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV22 import ( + EnhancedAdaptiveHybridHarmonySearchV22, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV22"] = EnhancedAdaptiveHybridHarmonySearchV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV22" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV22", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV22 print("EnhancedAdaptiveHybridHarmonySearchV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV23 import EnhancedAdaptiveHybridHarmonySearchV23 +try: # EnhancedAdaptiveHybridHarmonySearchV23 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV23 import ( + EnhancedAdaptiveHybridHarmonySearchV23, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV23"] = EnhancedAdaptiveHybridHarmonySearchV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV23" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV23", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV23 print("EnhancedAdaptiveHybridHarmonySearchV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV24 import EnhancedAdaptiveHybridHarmonySearchV24 +try: # EnhancedAdaptiveHybridHarmonySearchV24 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV24 import ( + EnhancedAdaptiveHybridHarmonySearchV24, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV24"] = EnhancedAdaptiveHybridHarmonySearchV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV24" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV24", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV24 print("EnhancedAdaptiveHybridHarmonySearchV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV25 import EnhancedAdaptiveHybridHarmonySearchV25 +try: # EnhancedAdaptiveHybridHarmonySearchV25 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV25 import ( + EnhancedAdaptiveHybridHarmonySearchV25, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV25"] = EnhancedAdaptiveHybridHarmonySearchV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV25" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV25", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV25 print("EnhancedAdaptiveHybridHarmonySearchV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV26 import EnhancedAdaptiveHybridHarmonySearchV26 +try: # EnhancedAdaptiveHybridHarmonySearchV26 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV26 import ( + EnhancedAdaptiveHybridHarmonySearchV26, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV26"] = EnhancedAdaptiveHybridHarmonySearchV26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV26" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV26", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV26 print("EnhancedAdaptiveHybridHarmonySearchV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV27 import EnhancedAdaptiveHybridHarmonySearchV27 +try: # EnhancedAdaptiveHybridHarmonySearchV27 + from nevergrad.optimization.lama.EnhancedAdaptiveHybridHarmonySearchV27 import ( + EnhancedAdaptiveHybridHarmonySearchV27, + ) lama_register["EnhancedAdaptiveHybridHarmonySearchV27"] = EnhancedAdaptiveHybridHarmonySearchV27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridHarmonySearchV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27").set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridHarmonySearchV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridHarmonySearchV27" + ).set_name("LLAMAEnhancedAdaptiveHybridHarmonySearchV27", register=True) +except Exception as e: # EnhancedAdaptiveHybridHarmonySearchV27 print("EnhancedAdaptiveHybridHarmonySearchV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridMetaOptimizer import EnhancedAdaptiveHybridMetaOptimizer +try: # EnhancedAdaptiveHybridMetaOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveHybridMetaOptimizer import ( + EnhancedAdaptiveHybridMetaOptimizer, + ) lama_register["EnhancedAdaptiveHybridMetaOptimizer"] = EnhancedAdaptiveHybridMetaOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridMetaOptimizer").set_name("LLAMAEnhancedAdaptiveHybridMetaOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridMetaOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHybridMetaOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveHybridMetaOptimizer print("EnhancedAdaptiveHybridMetaOptimizer can not be imported: ", e) -try: +try: # EnhancedAdaptiveHybridOptimizer from nevergrad.optimization.lama.EnhancedAdaptiveHybridOptimizer import EnhancedAdaptiveHybridOptimizer lama_register["EnhancedAdaptiveHybridOptimizer"] = EnhancedAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridOptimizer").set_name("LLAMAEnhancedAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveHybridOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveHybridOptimizer print("EnhancedAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution import EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution - - lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution"] = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution import ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus import EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus - - lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus").set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) -except Exception as e: +try: # EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus + from nevergrad.optimization.lama.EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus import ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus, + ) + + lama_register["EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus"] = ( + EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus" + ).set_name("LLAMAEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus", register=True) +except Exception as e: # EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus print("EnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveInertiaHybridOptimizer import EnhancedAdaptiveInertiaHybridOptimizer +try: # EnhancedAdaptiveInertiaHybridOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveInertiaHybridOptimizer import ( + EnhancedAdaptiveInertiaHybridOptimizer, + ) lama_register["EnhancedAdaptiveInertiaHybridOptimizer"] = EnhancedAdaptiveInertiaHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveInertiaHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer").set_name("LLAMAEnhancedAdaptiveInertiaHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveInertiaHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveInertiaHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveInertiaHybridOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveInertiaHybridOptimizer print("EnhancedAdaptiveInertiaHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm - - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) -except Exception as e: +try: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm", register=True) +except Exception as e: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 - - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 - - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3", register=True) +except Exception as e: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 import EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 - - lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4"] = EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4").set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 + from nevergrad.optimization.lama.EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 import ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4, + ) + + lama_register["EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4"] = ( + EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4" + ).set_name("LLAMAEnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4", register=True) +except Exception as e: # EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 print("EnhancedAdaptiveLevyDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearch import EnhancedAdaptiveLevyHarmonySearch +try: # EnhancedAdaptiveLevyHarmonySearch + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearch import ( + EnhancedAdaptiveLevyHarmonySearch, + ) lama_register["EnhancedAdaptiveLevyHarmonySearch"] = EnhancedAdaptiveLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearch").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearch", register=True) +except Exception as e: # EnhancedAdaptiveLevyHarmonySearch print("EnhancedAdaptiveLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV2 import EnhancedAdaptiveLevyHarmonySearchV2 +try: # EnhancedAdaptiveLevyHarmonySearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV2 import ( + EnhancedAdaptiveLevyHarmonySearchV2, + ) lama_register["EnhancedAdaptiveLevyHarmonySearchV2"] = EnhancedAdaptiveLevyHarmonySearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV2", register=True) +except Exception as e: # EnhancedAdaptiveLevyHarmonySearchV2 print("EnhancedAdaptiveLevyHarmonySearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV3 import EnhancedAdaptiveLevyHarmonySearchV3 +try: # EnhancedAdaptiveLevyHarmonySearchV3 + from nevergrad.optimization.lama.EnhancedAdaptiveLevyHarmonySearchV3 import ( + EnhancedAdaptiveLevyHarmonySearchV3, + ) lama_register["EnhancedAdaptiveLevyHarmonySearchV3"] = EnhancedAdaptiveLevyHarmonySearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLevyHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3").set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLevyHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLevyHarmonySearchV3" + ).set_name("LLAMAEnhancedAdaptiveLevyHarmonySearchV3", register=True) +except Exception as e: # EnhancedAdaptiveLevyHarmonySearchV3 print("EnhancedAdaptiveLevyHarmonySearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing - - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 - - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2", register=True) +except Exception as e: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 - - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3", register=True) +except Exception as e: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 - - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4", register=True) +except Exception as e: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 import EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 - - lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5"] = EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5").set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5", register=True) -except Exception as e: +try: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 + from nevergrad.optimization.lama.EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 import ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5, + ) + + lama_register["EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5"] = ( + EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5" + ).set_name("LLAMAEnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5", register=True) +except Exception as e: # EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 print("EnhancedAdaptiveLocalSearchQuantumSimulatedAnnealingV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDifferentialEvolution import EnhancedAdaptiveMemeticDifferentialEvolution - - lama_register["EnhancedAdaptiveMemeticDifferentialEvolution"] = EnhancedAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDifferentialEvolution import ( + EnhancedAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMemeticDifferentialEvolution"] = ( + EnhancedAdaptiveMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveMemeticDifferentialEvolution print("EnhancedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizer import EnhancedAdaptiveMemeticDiverseOptimizer +try: # EnhancedAdaptiveMemeticDiverseOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizer import ( + EnhancedAdaptiveMemeticDiverseOptimizer, + ) lama_register["EnhancedAdaptiveMemeticDiverseOptimizer"] = EnhancedAdaptiveMemeticDiverseOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveMemeticDiverseOptimizer print("EnhancedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV2 import EnhancedAdaptiveMemeticDiverseOptimizerV2 +try: # EnhancedAdaptiveMemeticDiverseOptimizerV2 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV2 import ( + EnhancedAdaptiveMemeticDiverseOptimizerV2, + ) lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV2"] = EnhancedAdaptiveMemeticDiverseOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV2", register=True) +except Exception as e: # EnhancedAdaptiveMemeticDiverseOptimizerV2 print("EnhancedAdaptiveMemeticDiverseOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV3 import EnhancedAdaptiveMemeticDiverseOptimizerV3 +try: # EnhancedAdaptiveMemeticDiverseOptimizerV3 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticDiverseOptimizerV3 import ( + EnhancedAdaptiveMemeticDiverseOptimizerV3, + ) lama_register["EnhancedAdaptiveMemeticDiverseOptimizerV3"] = EnhancedAdaptiveMemeticDiverseOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3").set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3" + ).set_name("LLAMAEnhancedAdaptiveMemeticDiverseOptimizerV3", register=True) +except Exception as e: # EnhancedAdaptiveMemeticDiverseOptimizerV3 print("EnhancedAdaptiveMemeticDiverseOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 import EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 - - lama_register["EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2"] = EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2").set_name("LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 import ( + EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2, + ) + + lama_register["EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2"] = ( + EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticEvolutionaryAlgorithmV2", register=True) +except Exception as e: # EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 print("EnhancedAdaptiveMemeticEvolutionaryAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimization import EnhancedAdaptiveMemeticHarmonyOptimization +try: # EnhancedAdaptiveMemeticHarmonyOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimization import ( + EnhancedAdaptiveMemeticHarmonyOptimization, + ) lama_register["EnhancedAdaptiveMemeticHarmonyOptimization"] = EnhancedAdaptiveMemeticHarmonyOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimization" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimization", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHarmonyOptimization print("EnhancedAdaptiveMemeticHarmonyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV2 import EnhancedAdaptiveMemeticHarmonyOptimizationV2 - - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV2"] = EnhancedAdaptiveMemeticHarmonyOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticHarmonyOptimizationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV2 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV2, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV2"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV2", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHarmonyOptimizationV2 print("EnhancedAdaptiveMemeticHarmonyOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV3 import EnhancedAdaptiveMemeticHarmonyOptimizationV3 - - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV3"] = EnhancedAdaptiveMemeticHarmonyOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticHarmonyOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV3 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV3, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV3"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHarmonyOptimizationV3 print("EnhancedAdaptiveMemeticHarmonyOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV4 import EnhancedAdaptiveMemeticHarmonyOptimizationV4 - - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV4"] = EnhancedAdaptiveMemeticHarmonyOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticHarmonyOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV4 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV4, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV4"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHarmonyOptimizationV4 print("EnhancedAdaptiveMemeticHarmonyOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV6 import EnhancedAdaptiveMemeticHarmonyOptimizationV6 - - lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV6"] = EnhancedAdaptiveMemeticHarmonyOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6").set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemeticHarmonyOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHarmonyOptimizationV6 import ( + EnhancedAdaptiveMemeticHarmonyOptimizationV6, + ) + + lama_register["EnhancedAdaptiveMemeticHarmonyOptimizationV6"] = ( + EnhancedAdaptiveMemeticHarmonyOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveMemeticHarmonyOptimizationV6", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHarmonyOptimizationV6 print("EnhancedAdaptiveMemeticHarmonyOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHybridOptimizer import EnhancedAdaptiveMemeticHybridOptimizer +try: # EnhancedAdaptiveMemeticHybridOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticHybridOptimizer import ( + EnhancedAdaptiveMemeticHybridOptimizer, + ) lama_register["EnhancedAdaptiveMemeticHybridOptimizer"] = EnhancedAdaptiveMemeticHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer").set_name("LLAMAEnhancedAdaptiveMemeticHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveMemeticHybridOptimizer print("EnhancedAdaptiveMemeticHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemeticOptimizerV7 import EnhancedAdaptiveMemeticOptimizerV7 +try: # EnhancedAdaptiveMemeticOptimizerV7 + from nevergrad.optimization.lama.EnhancedAdaptiveMemeticOptimizerV7 import ( + EnhancedAdaptiveMemeticOptimizerV7, + ) lama_register["EnhancedAdaptiveMemeticOptimizerV7"] = EnhancedAdaptiveMemeticOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemeticOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticOptimizerV7").set_name("LLAMAEnhancedAdaptiveMemeticOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemeticOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemeticOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemeticOptimizerV7" + ).set_name("LLAMAEnhancedAdaptiveMemeticOptimizerV7", register=True) +except Exception as e: # EnhancedAdaptiveMemeticOptimizerV7 print("EnhancedAdaptiveMemeticOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryControlStrategyV49 import EnhancedAdaptiveMemoryControlStrategyV49 +try: # EnhancedAdaptiveMemoryControlStrategyV49 + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryControlStrategyV49 import ( + EnhancedAdaptiveMemoryControlStrategyV49, + ) lama_register["EnhancedAdaptiveMemoryControlStrategyV49"] = EnhancedAdaptiveMemoryControlStrategyV49 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryControlStrategyV49 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49").set_name("LLAMAEnhancedAdaptiveMemoryControlStrategyV49", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryControlStrategyV49 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryControlStrategyV49" + ).set_name("LLAMAEnhancedAdaptiveMemoryControlStrategyV49", register=True) +except Exception as e: # EnhancedAdaptiveMemoryControlStrategyV49 print("EnhancedAdaptiveMemoryControlStrategyV49 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryDualPhaseStrategyV46 import EnhancedAdaptiveMemoryDualPhaseStrategyV46 +try: # EnhancedAdaptiveMemoryDualPhaseStrategyV46 + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryDualPhaseStrategyV46 import ( + EnhancedAdaptiveMemoryDualPhaseStrategyV46, + ) lama_register["EnhancedAdaptiveMemoryDualPhaseStrategyV46"] = EnhancedAdaptiveMemoryDualPhaseStrategyV46 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46").set_name("LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46" + ).set_name("LLAMAEnhancedAdaptiveMemoryDualPhaseStrategyV46", register=True) +except Exception as e: # EnhancedAdaptiveMemoryDualPhaseStrategyV46 print("EnhancedAdaptiveMemoryDualPhaseStrategyV46 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost import EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost - - lama_register["EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost"] = EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost").set_name("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) -except Exception as e: +try: # EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost import ( + EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost, + ) + + lama_register["EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost"] = ( + EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost" + ).set_name("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost", register=True) +except Exception as e: # EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost print("EnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridAnnealing import EnhancedAdaptiveMemoryHybridAnnealing +try: # EnhancedAdaptiveMemoryHybridAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridAnnealing import ( + EnhancedAdaptiveMemoryHybridAnnealing, + ) lama_register["EnhancedAdaptiveMemoryHybridAnnealing"] = EnhancedAdaptiveMemoryHybridAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryHybridAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing").set_name("LLAMAEnhancedAdaptiveMemoryHybridAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryHybridAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryHybridAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMemoryHybridAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveMemoryHybridAnnealing print("EnhancedAdaptiveMemoryHybridAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridDEPSO import EnhancedAdaptiveMemoryHybridDEPSO +try: # EnhancedAdaptiveMemoryHybridDEPSO + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryHybridDEPSO import ( + EnhancedAdaptiveMemoryHybridDEPSO, + ) lama_register["EnhancedAdaptiveMemoryHybridDEPSO"] = EnhancedAdaptiveMemoryHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO").set_name("LLAMAEnhancedAdaptiveMemoryHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryHybridDEPSO" + ).set_name("LLAMAEnhancedAdaptiveMemoryHybridDEPSO", register=True) +except Exception as e: # EnhancedAdaptiveMemoryHybridDEPSO print("EnhancedAdaptiveMemoryHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV54 import EnhancedAdaptiveMemoryStrategyV54 +try: # EnhancedAdaptiveMemoryStrategyV54 + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV54 import ( + EnhancedAdaptiveMemoryStrategyV54, + ) lama_register["EnhancedAdaptiveMemoryStrategyV54"] = EnhancedAdaptiveMemoryStrategyV54 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryStrategyV54 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV54").set_name("LLAMAEnhancedAdaptiveMemoryStrategyV54", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV54")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryStrategyV54 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryStrategyV54" + ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV54", register=True) +except Exception as e: # EnhancedAdaptiveMemoryStrategyV54 print("EnhancedAdaptiveMemoryStrategyV54 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV79 import EnhancedAdaptiveMemoryStrategyV79 +try: # EnhancedAdaptiveMemoryStrategyV79 + from nevergrad.optimization.lama.EnhancedAdaptiveMemoryStrategyV79 import ( + EnhancedAdaptiveMemoryStrategyV79, + ) lama_register["EnhancedAdaptiveMemoryStrategyV79"] = EnhancedAdaptiveMemoryStrategyV79 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMemoryStrategyV79 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV79").set_name("LLAMAEnhancedAdaptiveMemoryStrategyV79", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMemoryStrategyV79")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMemoryStrategyV79 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMemoryStrategyV79" + ).set_name("LLAMAEnhancedAdaptiveMemoryStrategyV79", register=True) +except Exception as e: # EnhancedAdaptiveMemoryStrategyV79 print("EnhancedAdaptiveMemoryStrategyV79 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSO from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSO import EnhancedAdaptiveMetaNetAQAPSO lama_register["EnhancedAdaptiveMetaNetAQAPSO"] = EnhancedAdaptiveMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSO print("EnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv12 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv12 import EnhancedAdaptiveMetaNetAQAPSOv12 lama_register["EnhancedAdaptiveMetaNetAQAPSOv12"] = EnhancedAdaptiveMetaNetAQAPSOv12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv12" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv12", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv12 print("EnhancedAdaptiveMetaNetAQAPSOv12 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv14 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv14 import EnhancedAdaptiveMetaNetAQAPSOv14 lama_register["EnhancedAdaptiveMetaNetAQAPSOv14"] = EnhancedAdaptiveMetaNetAQAPSOv14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv14" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv14", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv14 print("EnhancedAdaptiveMetaNetAQAPSOv14 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv15 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv15 import EnhancedAdaptiveMetaNetAQAPSOv15 lama_register["EnhancedAdaptiveMetaNetAQAPSOv15"] = EnhancedAdaptiveMetaNetAQAPSOv15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv15" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv15", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv15 print("EnhancedAdaptiveMetaNetAQAPSOv15 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv16 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv16 import EnhancedAdaptiveMetaNetAQAPSOv16 lama_register["EnhancedAdaptiveMetaNetAQAPSOv16"] = EnhancedAdaptiveMetaNetAQAPSOv16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv16" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv16", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv16 print("EnhancedAdaptiveMetaNetAQAPSOv16 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv2 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv2 import EnhancedAdaptiveMetaNetAQAPSOv2 lama_register["EnhancedAdaptiveMetaNetAQAPSOv2"] = EnhancedAdaptiveMetaNetAQAPSOv2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv2" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv2", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv2 print("EnhancedAdaptiveMetaNetAQAPSOv2 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetAQAPSOv3 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetAQAPSOv3 import EnhancedAdaptiveMetaNetAQAPSOv3 lama_register["EnhancedAdaptiveMetaNetAQAPSOv3"] = EnhancedAdaptiveMetaNetAQAPSOv3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3").set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetAQAPSOv3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetAQAPSOv3" + ).set_name("LLAMAEnhancedAdaptiveMetaNetAQAPSOv3", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetAQAPSOv3 print("EnhancedAdaptiveMetaNetAQAPSOv3 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetPSO from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO import EnhancedAdaptiveMetaNetPSO lama_register["EnhancedAdaptiveMetaNetPSO"] = EnhancedAdaptiveMetaNetPSO - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO").set_name("LLAMAEnhancedAdaptiveMetaNetPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO").set_name( + "LLAMAEnhancedAdaptiveMetaNetPSO", register=True + ) +except Exception as e: # EnhancedAdaptiveMetaNetPSO print("EnhancedAdaptiveMetaNetPSO can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetPSO_v2 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v2 import EnhancedAdaptiveMetaNetPSO_v2 lama_register["EnhancedAdaptiveMetaNetPSO_v2"] = EnhancedAdaptiveMetaNetPSO_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetPSO_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v2").set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetPSO_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetPSO_v2" + ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v2", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetPSO_v2 print("EnhancedAdaptiveMetaNetPSO_v2 can not be imported: ", e) -try: +try: # EnhancedAdaptiveMetaNetPSO_v3 from nevergrad.optimization.lama.EnhancedAdaptiveMetaNetPSO_v3 import EnhancedAdaptiveMetaNetPSO_v3 lama_register["EnhancedAdaptiveMetaNetPSO_v3"] = EnhancedAdaptiveMetaNetPSO_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMetaNetPSO_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v3").set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMetaNetPSO_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMetaNetPSO_v3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMetaNetPSO_v3" + ).set_name("LLAMAEnhancedAdaptiveMetaNetPSO_v3", register=True) +except Exception as e: # EnhancedAdaptiveMetaNetPSO_v3 print("EnhancedAdaptiveMetaNetPSO_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiMemorySimulatedAnnealing import EnhancedAdaptiveMultiMemorySimulatedAnnealing - - lama_register["EnhancedAdaptiveMultiMemorySimulatedAnnealing"] = EnhancedAdaptiveMultiMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedAdaptiveMultiMemorySimulatedAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveMultiMemorySimulatedAnnealing import ( + EnhancedAdaptiveMultiMemorySimulatedAnnealing, + ) + + lama_register["EnhancedAdaptiveMultiMemorySimulatedAnnealing"] = ( + EnhancedAdaptiveMultiMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveMultiMemorySimulatedAnnealing print("EnhancedAdaptiveMultiMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiOperatorSearch import EnhancedAdaptiveMultiOperatorSearch +try: # EnhancedAdaptiveMultiOperatorSearch + from nevergrad.optimization.lama.EnhancedAdaptiveMultiOperatorSearch import ( + EnhancedAdaptiveMultiOperatorSearch, + ) lama_register["EnhancedAdaptiveMultiOperatorSearch"] = EnhancedAdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiOperatorSearch").set_name("LLAMAEnhancedAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAEnhancedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: # EnhancedAdaptiveMultiOperatorSearch print("EnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealing import EnhancedAdaptiveMultiPhaseAnnealing +try: # EnhancedAdaptiveMultiPhaseAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealing import ( + EnhancedAdaptiveMultiPhaseAnnealing, + ) lama_register["EnhancedAdaptiveMultiPhaseAnnealing"] = EnhancedAdaptiveMultiPhaseAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing").set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPhaseAnnealing" + ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveMultiPhaseAnnealing print("EnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealingWithGradient import EnhancedAdaptiveMultiPhaseAnnealingWithGradient - - lama_register["EnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = EnhancedAdaptiveMultiPhaseAnnealingWithGradient - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient").set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) -except Exception as e: +try: # EnhancedAdaptiveMultiPhaseAnnealingWithGradient + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( + EnhancedAdaptiveMultiPhaseAnnealingWithGradient, + ) + + lama_register["EnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( + EnhancedAdaptiveMultiPhaseAnnealingWithGradient + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient" + ).set_name("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) +except Exception as e: # EnhancedAdaptiveMultiPhaseAnnealingWithGradient print("EnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiPopulationDifferentialEvolution import EnhancedAdaptiveMultiPopulationDifferentialEvolution - - lama_register["EnhancedAdaptiveMultiPopulationDifferentialEvolution"] = EnhancedAdaptiveMultiPopulationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveMultiPopulationDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveMultiPopulationDifferentialEvolution import ( + EnhancedAdaptiveMultiPopulationDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMultiPopulationDifferentialEvolution"] = ( + EnhancedAdaptiveMultiPopulationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMultiPopulationDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveMultiPopulationDifferentialEvolution print("EnhancedAdaptiveMultiPopulationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategicOptimizer import EnhancedAdaptiveMultiStrategicOptimizer +try: # EnhancedAdaptiveMultiStrategicOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategicOptimizer import ( + EnhancedAdaptiveMultiStrategicOptimizer, + ) lama_register["EnhancedAdaptiveMultiStrategicOptimizer"] = EnhancedAdaptiveMultiStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiStrategicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer").set_name("LLAMAEnhancedAdaptiveMultiStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiStrategicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategicOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategicOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveMultiStrategicOptimizer print("EnhancedAdaptiveMultiStrategicOptimizer can not be imported: ", e) -try: +try: # EnhancedAdaptiveMultiStrategyDE from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDE import EnhancedAdaptiveMultiStrategyDE lama_register["EnhancedAdaptiveMultiStrategyDE"] = EnhancedAdaptiveMultiStrategyDE - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDE").set_name("LLAMAEnhancedAdaptiveMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyDE" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDE", register=True) +except Exception as e: # EnhancedAdaptiveMultiStrategyDE print("EnhancedAdaptiveMultiStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDifferentialEvolution import EnhancedAdaptiveMultiStrategyDifferentialEvolution - - lama_register["EnhancedAdaptiveMultiStrategyDifferentialEvolution"] = EnhancedAdaptiveMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyDifferentialEvolution import ( + EnhancedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveMultiStrategyDifferentialEvolution"] = ( + EnhancedAdaptiveMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveMultiStrategyDifferentialEvolution print("EnhancedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyOptimizer import EnhancedAdaptiveMultiStrategyOptimizer +try: # EnhancedAdaptiveMultiStrategyOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveMultiStrategyOptimizer import ( + EnhancedAdaptiveMultiStrategyOptimizer, + ) lama_register["EnhancedAdaptiveMultiStrategyOptimizer"] = EnhancedAdaptiveMultiStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer").set_name("LLAMAEnhancedAdaptiveMultiStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAEnhancedAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveMultiStrategyOptimizer print("EnhancedAdaptiveMultiStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer import EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer - - lama_register["EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer"] = EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer import ( + EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer"] = ( + EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer print("EnhancedAdaptiveNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution import EnhancedAdaptiveOppositionBasedDifferentialEvolution - - lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution"] = EnhancedAdaptiveOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution import ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveOppositionBasedDifferentialEvolution print("EnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 import EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 - - lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2"] = EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2").set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2", register=True) -except Exception as e: +try: # EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 import ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2, + ) + + lama_register["EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2"] = ( + EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2", register=True) +except Exception as e: # EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 print("EnhancedAdaptiveOppositionBasedDifferentialEvolution_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE import EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE - - lama_register["EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE"] = EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) -except Exception as e: +try: # EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE + from nevergrad.optimization.lama.EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: # EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE print("EnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveOrthogonalDifferentialEvolution import EnhancedAdaptiveOrthogonalDifferentialEvolution - - lama_register["EnhancedAdaptiveOrthogonalDifferentialEvolution"] = EnhancedAdaptiveOrthogonalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveOrthogonalDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveOrthogonalDifferentialEvolution import ( + EnhancedAdaptiveOrthogonalDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveOrthogonalDifferentialEvolution"] = ( + EnhancedAdaptiveOrthogonalDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveOrthogonalDifferentialEvolution print("EnhancedAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - - lama_register["EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) -except Exception as e: +try: # EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + from nevergrad.optimization.lama.EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( + EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( + EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: # EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch print("EnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptivePrecisionCohortOptimizationV5 import EnhancedAdaptivePrecisionCohortOptimizationV5 - - lama_register["EnhancedAdaptivePrecisionCohortOptimizationV5"] = EnhancedAdaptivePrecisionCohortOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5").set_name("LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5", register=True) -except Exception as e: +try: # EnhancedAdaptivePrecisionCohortOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionCohortOptimizationV5 import ( + EnhancedAdaptivePrecisionCohortOptimizationV5, + ) + + lama_register["EnhancedAdaptivePrecisionCohortOptimizationV5"] = ( + EnhancedAdaptivePrecisionCohortOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5" + ).set_name("LLAMAEnhancedAdaptivePrecisionCohortOptimizationV5", register=True) +except Exception as e: # EnhancedAdaptivePrecisionCohortOptimizationV5 print("EnhancedAdaptivePrecisionCohortOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptivePrecisionFocalStrategy import EnhancedAdaptivePrecisionFocalStrategy +try: # EnhancedAdaptivePrecisionFocalStrategy + from nevergrad.optimization.lama.EnhancedAdaptivePrecisionFocalStrategy import ( + EnhancedAdaptivePrecisionFocalStrategy, + ) lama_register["EnhancedAdaptivePrecisionFocalStrategy"] = EnhancedAdaptivePrecisionFocalStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptivePrecisionFocalStrategy = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionFocalStrategy").set_name("LLAMAEnhancedAdaptivePrecisionFocalStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptivePrecisionFocalStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptivePrecisionFocalStrategy = NonObjectOptimizer( + method="LLAMAEnhancedAdaptivePrecisionFocalStrategy" + ).set_name("LLAMAEnhancedAdaptivePrecisionFocalStrategy", register=True) +except Exception as e: # EnhancedAdaptivePrecisionFocalStrategy print("EnhancedAdaptivePrecisionFocalStrategy can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA from nevergrad.optimization.lama.EnhancedAdaptiveQGSA import EnhancedAdaptiveQGSA lama_register["EnhancedAdaptiveQGSA"] = EnhancedAdaptiveQGSA - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA").set_name("LLAMAEnhancedAdaptiveQGSA", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA").set_name( + "LLAMAEnhancedAdaptiveQGSA", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA print("EnhancedAdaptiveQGSA can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v10 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v10 import EnhancedAdaptiveQGSA_v10 lama_register["EnhancedAdaptiveQGSA_v10"] = EnhancedAdaptiveQGSA_v10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10").set_name("LLAMAEnhancedAdaptiveQGSA_v10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v10").set_name( + "LLAMAEnhancedAdaptiveQGSA_v10", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v10 print("EnhancedAdaptiveQGSA_v10 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v11 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v11 import EnhancedAdaptiveQGSA_v11 lama_register["EnhancedAdaptiveQGSA_v11"] = EnhancedAdaptiveQGSA_v11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11").set_name("LLAMAEnhancedAdaptiveQGSA_v11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v11").set_name( + "LLAMAEnhancedAdaptiveQGSA_v11", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v11 print("EnhancedAdaptiveQGSA_v11 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v12 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v12 import EnhancedAdaptiveQGSA_v12 lama_register["EnhancedAdaptiveQGSA_v12"] = EnhancedAdaptiveQGSA_v12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12").set_name("LLAMAEnhancedAdaptiveQGSA_v12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v12").set_name( + "LLAMAEnhancedAdaptiveQGSA_v12", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v12 print("EnhancedAdaptiveQGSA_v12 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v13 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v13 import EnhancedAdaptiveQGSA_v13 lama_register["EnhancedAdaptiveQGSA_v13"] = EnhancedAdaptiveQGSA_v13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13").set_name("LLAMAEnhancedAdaptiveQGSA_v13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v13").set_name( + "LLAMAEnhancedAdaptiveQGSA_v13", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v13 print("EnhancedAdaptiveQGSA_v13 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v14 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v14 import EnhancedAdaptiveQGSA_v14 lama_register["EnhancedAdaptiveQGSA_v14"] = EnhancedAdaptiveQGSA_v14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14").set_name("LLAMAEnhancedAdaptiveQGSA_v14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v14").set_name( + "LLAMAEnhancedAdaptiveQGSA_v14", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v14 print("EnhancedAdaptiveQGSA_v14 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v15 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v15 import EnhancedAdaptiveQGSA_v15 lama_register["EnhancedAdaptiveQGSA_v15"] = EnhancedAdaptiveQGSA_v15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15").set_name("LLAMAEnhancedAdaptiveQGSA_v15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v15").set_name( + "LLAMAEnhancedAdaptiveQGSA_v15", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v15 print("EnhancedAdaptiveQGSA_v15 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v16 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v16 import EnhancedAdaptiveQGSA_v16 lama_register["EnhancedAdaptiveQGSA_v16"] = EnhancedAdaptiveQGSA_v16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16").set_name("LLAMAEnhancedAdaptiveQGSA_v16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v16").set_name( + "LLAMAEnhancedAdaptiveQGSA_v16", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v16 print("EnhancedAdaptiveQGSA_v16 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v17 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v17 import EnhancedAdaptiveQGSA_v17 lama_register["EnhancedAdaptiveQGSA_v17"] = EnhancedAdaptiveQGSA_v17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17").set_name("LLAMAEnhancedAdaptiveQGSA_v17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v17").set_name( + "LLAMAEnhancedAdaptiveQGSA_v17", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v17 print("EnhancedAdaptiveQGSA_v17 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v18 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v18 import EnhancedAdaptiveQGSA_v18 lama_register["EnhancedAdaptiveQGSA_v18"] = EnhancedAdaptiveQGSA_v18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18").set_name("LLAMAEnhancedAdaptiveQGSA_v18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v18").set_name( + "LLAMAEnhancedAdaptiveQGSA_v18", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v18 print("EnhancedAdaptiveQGSA_v18 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v19 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v19 import EnhancedAdaptiveQGSA_v19 lama_register["EnhancedAdaptiveQGSA_v19"] = EnhancedAdaptiveQGSA_v19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19").set_name("LLAMAEnhancedAdaptiveQGSA_v19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v19").set_name( + "LLAMAEnhancedAdaptiveQGSA_v19", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v19 print("EnhancedAdaptiveQGSA_v19 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v2 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v2 import EnhancedAdaptiveQGSA_v2 lama_register["EnhancedAdaptiveQGSA_v2"] = EnhancedAdaptiveQGSA_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2").set_name("LLAMAEnhancedAdaptiveQGSA_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v2").set_name( + "LLAMAEnhancedAdaptiveQGSA_v2", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v2 print("EnhancedAdaptiveQGSA_v2 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v20 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v20 import EnhancedAdaptiveQGSA_v20 lama_register["EnhancedAdaptiveQGSA_v20"] = EnhancedAdaptiveQGSA_v20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20").set_name("LLAMAEnhancedAdaptiveQGSA_v20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v20").set_name( + "LLAMAEnhancedAdaptiveQGSA_v20", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v20 print("EnhancedAdaptiveQGSA_v20 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v21 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v21 import EnhancedAdaptiveQGSA_v21 lama_register["EnhancedAdaptiveQGSA_v21"] = EnhancedAdaptiveQGSA_v21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21").set_name("LLAMAEnhancedAdaptiveQGSA_v21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v21").set_name( + "LLAMAEnhancedAdaptiveQGSA_v21", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v21 print("EnhancedAdaptiveQGSA_v21 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v22 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v22 import EnhancedAdaptiveQGSA_v22 lama_register["EnhancedAdaptiveQGSA_v22"] = EnhancedAdaptiveQGSA_v22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22").set_name("LLAMAEnhancedAdaptiveQGSA_v22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v22").set_name( + "LLAMAEnhancedAdaptiveQGSA_v22", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v22 print("EnhancedAdaptiveQGSA_v22 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v23 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v23 import EnhancedAdaptiveQGSA_v23 lama_register["EnhancedAdaptiveQGSA_v23"] = EnhancedAdaptiveQGSA_v23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23").set_name("LLAMAEnhancedAdaptiveQGSA_v23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v23").set_name( + "LLAMAEnhancedAdaptiveQGSA_v23", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v23 print("EnhancedAdaptiveQGSA_v23 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v24 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v24 import EnhancedAdaptiveQGSA_v24 lama_register["EnhancedAdaptiveQGSA_v24"] = EnhancedAdaptiveQGSA_v24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24").set_name("LLAMAEnhancedAdaptiveQGSA_v24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v24").set_name( + "LLAMAEnhancedAdaptiveQGSA_v24", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v24 print("EnhancedAdaptiveQGSA_v24 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v25 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v25 import EnhancedAdaptiveQGSA_v25 lama_register["EnhancedAdaptiveQGSA_v25"] = EnhancedAdaptiveQGSA_v25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25").set_name("LLAMAEnhancedAdaptiveQGSA_v25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v25").set_name( + "LLAMAEnhancedAdaptiveQGSA_v25", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v25 print("EnhancedAdaptiveQGSA_v25 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v26 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v26 import EnhancedAdaptiveQGSA_v26 lama_register["EnhancedAdaptiveQGSA_v26"] = EnhancedAdaptiveQGSA_v26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26").set_name("LLAMAEnhancedAdaptiveQGSA_v26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v26").set_name( + "LLAMAEnhancedAdaptiveQGSA_v26", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v26 print("EnhancedAdaptiveQGSA_v26 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v27 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v27 import EnhancedAdaptiveQGSA_v27 lama_register["EnhancedAdaptiveQGSA_v27"] = EnhancedAdaptiveQGSA_v27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27").set_name("LLAMAEnhancedAdaptiveQGSA_v27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v27").set_name( + "LLAMAEnhancedAdaptiveQGSA_v27", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v27 print("EnhancedAdaptiveQGSA_v27 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v28 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v28 import EnhancedAdaptiveQGSA_v28 lama_register["EnhancedAdaptiveQGSA_v28"] = EnhancedAdaptiveQGSA_v28 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28").set_name("LLAMAEnhancedAdaptiveQGSA_v28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v28").set_name( + "LLAMAEnhancedAdaptiveQGSA_v28", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v28 print("EnhancedAdaptiveQGSA_v28 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v29 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v29 import EnhancedAdaptiveQGSA_v29 lama_register["EnhancedAdaptiveQGSA_v29"] = EnhancedAdaptiveQGSA_v29 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29").set_name("LLAMAEnhancedAdaptiveQGSA_v29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v29").set_name( + "LLAMAEnhancedAdaptiveQGSA_v29", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v29 print("EnhancedAdaptiveQGSA_v29 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v3 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v3 import EnhancedAdaptiveQGSA_v3 lama_register["EnhancedAdaptiveQGSA_v3"] = EnhancedAdaptiveQGSA_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3").set_name("LLAMAEnhancedAdaptiveQGSA_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v3").set_name( + "LLAMAEnhancedAdaptiveQGSA_v3", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v3 print("EnhancedAdaptiveQGSA_v3 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v30 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v30 import EnhancedAdaptiveQGSA_v30 lama_register["EnhancedAdaptiveQGSA_v30"] = EnhancedAdaptiveQGSA_v30 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30").set_name("LLAMAEnhancedAdaptiveQGSA_v30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v30").set_name( + "LLAMAEnhancedAdaptiveQGSA_v30", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v30 print("EnhancedAdaptiveQGSA_v30 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v31 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v31 import EnhancedAdaptiveQGSA_v31 lama_register["EnhancedAdaptiveQGSA_v31"] = EnhancedAdaptiveQGSA_v31 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31").set_name("LLAMAEnhancedAdaptiveQGSA_v31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v31").set_name( + "LLAMAEnhancedAdaptiveQGSA_v31", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v31 print("EnhancedAdaptiveQGSA_v31 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v32 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v32 import EnhancedAdaptiveQGSA_v32 lama_register["EnhancedAdaptiveQGSA_v32"] = EnhancedAdaptiveQGSA_v32 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32").set_name("LLAMAEnhancedAdaptiveQGSA_v32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v32 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v32").set_name( + "LLAMAEnhancedAdaptiveQGSA_v32", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v32 print("EnhancedAdaptiveQGSA_v32 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v33 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v33 import EnhancedAdaptiveQGSA_v33 lama_register["EnhancedAdaptiveQGSA_v33"] = EnhancedAdaptiveQGSA_v33 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33").set_name("LLAMAEnhancedAdaptiveQGSA_v33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v33 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v33").set_name( + "LLAMAEnhancedAdaptiveQGSA_v33", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v33 print("EnhancedAdaptiveQGSA_v33 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v34 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v34 import EnhancedAdaptiveQGSA_v34 lama_register["EnhancedAdaptiveQGSA_v34"] = EnhancedAdaptiveQGSA_v34 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v34 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34").set_name("LLAMAEnhancedAdaptiveQGSA_v34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v34 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v34").set_name( + "LLAMAEnhancedAdaptiveQGSA_v34", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v34 print("EnhancedAdaptiveQGSA_v34 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v35 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v35 import EnhancedAdaptiveQGSA_v35 lama_register["EnhancedAdaptiveQGSA_v35"] = EnhancedAdaptiveQGSA_v35 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v35 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35").set_name("LLAMAEnhancedAdaptiveQGSA_v35", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v35 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v35").set_name( + "LLAMAEnhancedAdaptiveQGSA_v35", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v35 print("EnhancedAdaptiveQGSA_v35 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v36 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v36 import EnhancedAdaptiveQGSA_v36 lama_register["EnhancedAdaptiveQGSA_v36"] = EnhancedAdaptiveQGSA_v36 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v36 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36").set_name("LLAMAEnhancedAdaptiveQGSA_v36", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v36 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v36").set_name( + "LLAMAEnhancedAdaptiveQGSA_v36", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v36 print("EnhancedAdaptiveQGSA_v36 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v38 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v38 import EnhancedAdaptiveQGSA_v38 lama_register["EnhancedAdaptiveQGSA_v38"] = EnhancedAdaptiveQGSA_v38 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v38 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38").set_name("LLAMAEnhancedAdaptiveQGSA_v38", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v38 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v38").set_name( + "LLAMAEnhancedAdaptiveQGSA_v38", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v38 print("EnhancedAdaptiveQGSA_v38 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v39 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v39 import EnhancedAdaptiveQGSA_v39 lama_register["EnhancedAdaptiveQGSA_v39"] = EnhancedAdaptiveQGSA_v39 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v39 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39").set_name("LLAMAEnhancedAdaptiveQGSA_v39", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v39 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v39").set_name( + "LLAMAEnhancedAdaptiveQGSA_v39", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v39 print("EnhancedAdaptiveQGSA_v39 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v4 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v4 import EnhancedAdaptiveQGSA_v4 lama_register["EnhancedAdaptiveQGSA_v4"] = EnhancedAdaptiveQGSA_v4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4").set_name("LLAMAEnhancedAdaptiveQGSA_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v4").set_name( + "LLAMAEnhancedAdaptiveQGSA_v4", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v4 print("EnhancedAdaptiveQGSA_v4 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v40 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v40 import EnhancedAdaptiveQGSA_v40 lama_register["EnhancedAdaptiveQGSA_v40"] = EnhancedAdaptiveQGSA_v40 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v40 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40").set_name("LLAMAEnhancedAdaptiveQGSA_v40", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v40 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v40").set_name( + "LLAMAEnhancedAdaptiveQGSA_v40", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v40 print("EnhancedAdaptiveQGSA_v40 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v41 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v41 import EnhancedAdaptiveQGSA_v41 lama_register["EnhancedAdaptiveQGSA_v41"] = EnhancedAdaptiveQGSA_v41 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v41 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41").set_name("LLAMAEnhancedAdaptiveQGSA_v41", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v41 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v41").set_name( + "LLAMAEnhancedAdaptiveQGSA_v41", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v41 print("EnhancedAdaptiveQGSA_v41 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v42 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v42 import EnhancedAdaptiveQGSA_v42 lama_register["EnhancedAdaptiveQGSA_v42"] = EnhancedAdaptiveQGSA_v42 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v42 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42").set_name("LLAMAEnhancedAdaptiveQGSA_v42", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v42 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v42").set_name( + "LLAMAEnhancedAdaptiveQGSA_v42", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v42 print("EnhancedAdaptiveQGSA_v42 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v43 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v43 import EnhancedAdaptiveQGSA_v43 lama_register["EnhancedAdaptiveQGSA_v43"] = EnhancedAdaptiveQGSA_v43 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v43 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43").set_name("LLAMAEnhancedAdaptiveQGSA_v43", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v43 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v43").set_name( + "LLAMAEnhancedAdaptiveQGSA_v43", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v43 print("EnhancedAdaptiveQGSA_v43 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v44 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v44 import EnhancedAdaptiveQGSA_v44 lama_register["EnhancedAdaptiveQGSA_v44"] = EnhancedAdaptiveQGSA_v44 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v44 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44").set_name("LLAMAEnhancedAdaptiveQGSA_v44", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v44 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v44").set_name( + "LLAMAEnhancedAdaptiveQGSA_v44", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v44 print("EnhancedAdaptiveQGSA_v44 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v47 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v47 import EnhancedAdaptiveQGSA_v47 lama_register["EnhancedAdaptiveQGSA_v47"] = EnhancedAdaptiveQGSA_v47 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v47 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47").set_name("LLAMAEnhancedAdaptiveQGSA_v47", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v47 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v47").set_name( + "LLAMAEnhancedAdaptiveQGSA_v47", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v47 print("EnhancedAdaptiveQGSA_v47 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v5 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v5 import EnhancedAdaptiveQGSA_v5 lama_register["EnhancedAdaptiveQGSA_v5"] = EnhancedAdaptiveQGSA_v5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5").set_name("LLAMAEnhancedAdaptiveQGSA_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v5").set_name( + "LLAMAEnhancedAdaptiveQGSA_v5", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v5 print("EnhancedAdaptiveQGSA_v5 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v6 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v6 import EnhancedAdaptiveQGSA_v6 lama_register["EnhancedAdaptiveQGSA_v6"] = EnhancedAdaptiveQGSA_v6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6").set_name("LLAMAEnhancedAdaptiveQGSA_v6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v6").set_name( + "LLAMAEnhancedAdaptiveQGSA_v6", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v6 print("EnhancedAdaptiveQGSA_v6 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v8 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v8 import EnhancedAdaptiveQGSA_v8 lama_register["EnhancedAdaptiveQGSA_v8"] = EnhancedAdaptiveQGSA_v8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8").set_name("LLAMAEnhancedAdaptiveQGSA_v8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v8").set_name( + "LLAMAEnhancedAdaptiveQGSA_v8", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v8 print("EnhancedAdaptiveQGSA_v8 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQGSA_v9 from nevergrad.optimization.lama.EnhancedAdaptiveQGSA_v9 import EnhancedAdaptiveQGSA_v9 lama_register["EnhancedAdaptiveQGSA_v9"] = EnhancedAdaptiveQGSA_v9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQGSA_v9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9").set_name("LLAMAEnhancedAdaptiveQGSA_v9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQGSA_v9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQGSA_v9").set_name( + "LLAMAEnhancedAdaptiveQGSA_v9", register=True + ) +except Exception as e: # EnhancedAdaptiveQGSA_v9 print("EnhancedAdaptiveQGSA_v9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDEWithDynamicElitistLearning import EnhancedAdaptiveQuantumDEWithDynamicElitistLearning - - lama_register["EnhancedAdaptiveQuantumDEWithDynamicElitistLearning"] = EnhancedAdaptiveQuantumDEWithDynamicElitistLearning - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning").set_name("LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumDEWithDynamicElitistLearning + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDEWithDynamicElitistLearning import ( + EnhancedAdaptiveQuantumDEWithDynamicElitistLearning, + ) + + lama_register["EnhancedAdaptiveQuantumDEWithDynamicElitistLearning"] = ( + EnhancedAdaptiveQuantumDEWithDynamicElitistLearning + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning" + ).set_name("LLAMAEnhancedAdaptiveQuantumDEWithDynamicElitistLearning", register=True) +except Exception as e: # EnhancedAdaptiveQuantumDEWithDynamicElitistLearning print("EnhancedAdaptiveQuantumDEWithDynamicElitistLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolution import EnhancedAdaptiveQuantumDifferentialEvolution - - lama_register["EnhancedAdaptiveQuantumDifferentialEvolution"] = EnhancedAdaptiveQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution").set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumDifferentialEvolution + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolution import ( + EnhancedAdaptiveQuantumDifferentialEvolution, + ) + + lama_register["EnhancedAdaptiveQuantumDifferentialEvolution"] = ( + EnhancedAdaptiveQuantumDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: # EnhancedAdaptiveQuantumDifferentialEvolution print("EnhancedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch import EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch - - lama_register["EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch"] = EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch").set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch import ( + EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch, + ) + + lama_register["EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch"] = ( + EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch", register=True) +except Exception as e: # EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch print("EnhancedAdaptiveQuantumDifferentialEvolutionWithMemoryAndLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDynamicLevyOptimization import EnhancedAdaptiveQuantumDynamicLevyOptimization - - lama_register["EnhancedAdaptiveQuantumDynamicLevyOptimization"] = EnhancedAdaptiveQuantumDynamicLevyOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization").set_name("LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumDynamicLevyOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumDynamicLevyOptimization import ( + EnhancedAdaptiveQuantumDynamicLevyOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumDynamicLevyOptimization"] = ( + EnhancedAdaptiveQuantumDynamicLevyOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumDynamicLevyOptimization", register=True) +except Exception as e: # EnhancedAdaptiveQuantumDynamicLevyOptimization print("EnhancedAdaptiveQuantumDynamicLevyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumGradientMemeticOptimizer import EnhancedAdaptiveQuantumGradientMemeticOptimizer - - lama_register["EnhancedAdaptiveQuantumGradientMemeticOptimizer"] = EnhancedAdaptiveQuantumGradientMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer").set_name("LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumGradientMemeticOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumGradientMemeticOptimizer import ( + EnhancedAdaptiveQuantumGradientMemeticOptimizer, + ) + + lama_register["EnhancedAdaptiveQuantumGradientMemeticOptimizer"] = ( + EnhancedAdaptiveQuantumGradientMemeticOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer" + ).set_name("LLAMAEnhancedAdaptiveQuantumGradientMemeticOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveQuantumGradientMemeticOptimizer print("EnhancedAdaptiveQuantumGradientMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGB import EnhancedAdaptiveQuantumHarmonySearchDBGB +try: # EnhancedAdaptiveQuantumHarmonySearchDBGB + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGB import ( + EnhancedAdaptiveQuantumHarmonySearchDBGB, + ) lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGB"] = EnhancedAdaptiveQuantumHarmonySearchDBGB - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchDBGB print("EnhancedAdaptiveQuantumHarmonySearchDBGB can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinal import EnhancedAdaptiveQuantumHarmonySearchDBGBFinal - - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinal"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinal - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinal + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinal import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinal, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinal"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinal + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinal print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinal can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII import EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII - - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalII can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII import EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII - - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII"] = EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII print("EnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBImproved import EnhancedAdaptiveQuantumHarmonySearchDBGBImproved - - lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBImproved"] = EnhancedAdaptiveQuantumHarmonySearchDBGBImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchDBGBImproved + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchDBGBImproved import ( + EnhancedAdaptiveQuantumHarmonySearchDBGBImproved, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchDBGBImproved"] = ( + EnhancedAdaptiveQuantumHarmonySearchDBGBImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchDBGBImproved print("EnhancedAdaptiveQuantumHarmonySearchDBGBImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchFinal import EnhancedAdaptiveQuantumHarmonySearchFinal +try: # EnhancedAdaptiveQuantumHarmonySearchFinal + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchFinal import ( + EnhancedAdaptiveQuantumHarmonySearchFinal, + ) lama_register["EnhancedAdaptiveQuantumHarmonySearchFinal"] = EnhancedAdaptiveQuantumHarmonySearchFinal - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchFinal print("EnhancedAdaptiveQuantumHarmonySearchFinal can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImproved import EnhancedAdaptiveQuantumHarmonySearchImproved - - lama_register["EnhancedAdaptiveQuantumHarmonySearchImproved"] = EnhancedAdaptiveQuantumHarmonySearchImproved - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchImproved + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImproved import ( + EnhancedAdaptiveQuantumHarmonySearchImproved, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchImproved"] = ( + EnhancedAdaptiveQuantumHarmonySearchImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchImproved print("EnhancedAdaptiveQuantumHarmonySearchImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImprovedRefined import EnhancedAdaptiveQuantumHarmonySearchImprovedRefined - - lama_register["EnhancedAdaptiveQuantumHarmonySearchImprovedRefined"] = EnhancedAdaptiveQuantumHarmonySearchImprovedRefined - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined").set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumHarmonySearchImprovedRefined + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumHarmonySearchImprovedRefined import ( + EnhancedAdaptiveQuantumHarmonySearchImprovedRefined, + ) + + lama_register["EnhancedAdaptiveQuantumHarmonySearchImprovedRefined"] = ( + EnhancedAdaptiveQuantumHarmonySearchImprovedRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined" + ).set_name("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined", register=True) +except Exception as e: # EnhancedAdaptiveQuantumHarmonySearchImprovedRefined print("EnhancedAdaptiveQuantumHarmonySearchImprovedRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevyMemeticOptimizer import EnhancedAdaptiveQuantumLevyMemeticOptimizer +try: # EnhancedAdaptiveQuantumLevyMemeticOptimizer + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevyMemeticOptimizer import ( + EnhancedAdaptiveQuantumLevyMemeticOptimizer, + ) lama_register["EnhancedAdaptiveQuantumLevyMemeticOptimizer"] = EnhancedAdaptiveQuantumLevyMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer").set_name("LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer" + ).set_name("LLAMAEnhancedAdaptiveQuantumLevyMemeticOptimizer", register=True) +except Exception as e: # EnhancedAdaptiveQuantumLevyMemeticOptimizer print("EnhancedAdaptiveQuantumLevyMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevySwarmOptimization import EnhancedAdaptiveQuantumLevySwarmOptimization - - lama_register["EnhancedAdaptiveQuantumLevySwarmOptimization"] = EnhancedAdaptiveQuantumLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumLevySwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLevySwarmOptimization import ( + EnhancedAdaptiveQuantumLevySwarmOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumLevySwarmOptimization"] = ( + EnhancedAdaptiveQuantumLevySwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumLevySwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveQuantumLevySwarmOptimization print("EnhancedAdaptiveQuantumLevySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLocalSearch import EnhancedAdaptiveQuantumLocalSearch +try: # EnhancedAdaptiveQuantumLocalSearch + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumLocalSearch import ( + EnhancedAdaptiveQuantumLocalSearch, + ) lama_register["EnhancedAdaptiveQuantumLocalSearch"] = EnhancedAdaptiveQuantumLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLocalSearch").set_name("LLAMAEnhancedAdaptiveQuantumLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumLocalSearch" + ).set_name("LLAMAEnhancedAdaptiveQuantumLocalSearch", register=True) +except Exception as e: # EnhancedAdaptiveQuantumLocalSearch print("EnhancedAdaptiveQuantumLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumMemeticOptimizerV4 import EnhancedAdaptiveQuantumMemeticOptimizerV4 +try: # EnhancedAdaptiveQuantumMemeticOptimizerV4 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumMemeticOptimizerV4 import ( + EnhancedAdaptiveQuantumMemeticOptimizerV4, + ) lama_register["EnhancedAdaptiveQuantumMemeticOptimizerV4"] = EnhancedAdaptiveQuantumMemeticOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4").set_name("LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4" + ).set_name("LLAMAEnhancedAdaptiveQuantumMemeticOptimizerV4", register=True) +except Exception as e: # EnhancedAdaptiveQuantumMemeticOptimizerV4 print("EnhancedAdaptiveQuantumMemeticOptimizerV4 can not be imported: ", e) -try: +try: # EnhancedAdaptiveQuantumPSO from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSO import EnhancedAdaptiveQuantumPSO lama_register["EnhancedAdaptiveQuantumPSO"] = EnhancedAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO").set_name("LLAMAEnhancedAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSO").set_name( + "LLAMAEnhancedAdaptiveQuantumPSO", register=True + ) +except Exception as e: # EnhancedAdaptiveQuantumPSO print("EnhancedAdaptiveQuantumPSO can not be imported: ", e) -try: +try: # EnhancedAdaptiveQuantumPSOv2 from nevergrad.optimization.lama.EnhancedAdaptiveQuantumPSOv2 import EnhancedAdaptiveQuantumPSOv2 lama_register["EnhancedAdaptiveQuantumPSOv2"] = EnhancedAdaptiveQuantumPSOv2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSOv2").set_name("LLAMAEnhancedAdaptiveQuantumPSOv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumPSOv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumPSOv2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumPSOv2" + ).set_name("LLAMAEnhancedAdaptiveQuantumPSOv2", register=True) +except Exception as e: # EnhancedAdaptiveQuantumPSOv2 print("EnhancedAdaptiveQuantumPSOv2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumParticleSwarmOptimization import EnhancedAdaptiveQuantumParticleSwarmOptimization - - lama_register["EnhancedAdaptiveQuantumParticleSwarmOptimization"] = EnhancedAdaptiveQuantumParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumParticleSwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumParticleSwarmOptimization import ( + EnhancedAdaptiveQuantumParticleSwarmOptimization, + ) + + lama_register["EnhancedAdaptiveQuantumParticleSwarmOptimization"] = ( + EnhancedAdaptiveQuantumParticleSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumParticleSwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveQuantumParticleSwarmOptimization print("EnhancedAdaptiveQuantumParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealing import EnhancedAdaptiveQuantumSimulatedAnnealing +try: # EnhancedAdaptiveQuantumSimulatedAnnealing + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealing import ( + EnhancedAdaptiveQuantumSimulatedAnnealing, + ) lama_register["EnhancedAdaptiveQuantumSimulatedAnnealing"] = EnhancedAdaptiveQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing").set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealing", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSimulatedAnnealing print("EnhancedAdaptiveQuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealingOptimized import EnhancedAdaptiveQuantumSimulatedAnnealingOptimized - - lama_register["EnhancedAdaptiveQuantumSimulatedAnnealingOptimized"] = EnhancedAdaptiveQuantumSimulatedAnnealingOptimized - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized").set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized", register=True) -except Exception as e: +try: # EnhancedAdaptiveQuantumSimulatedAnnealingOptimized + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSimulatedAnnealingOptimized import ( + EnhancedAdaptiveQuantumSimulatedAnnealingOptimized, + ) + + lama_register["EnhancedAdaptiveQuantumSimulatedAnnealingOptimized"] = ( + EnhancedAdaptiveQuantumSimulatedAnnealingOptimized + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized" + ).set_name("LLAMAEnhancedAdaptiveQuantumSimulatedAnnealingOptimized", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSimulatedAnnealingOptimized print("EnhancedAdaptiveQuantumSimulatedAnnealingOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimization import EnhancedAdaptiveQuantumSwarmOptimization +try: # EnhancedAdaptiveQuantumSwarmOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimization import ( + EnhancedAdaptiveQuantumSwarmOptimization, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimization"] = EnhancedAdaptiveQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimization print("EnhancedAdaptiveQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV10 import EnhancedAdaptiveQuantumSwarmOptimizationV10 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV10 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV10 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV10, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV10"] = EnhancedAdaptiveQuantumSwarmOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV10", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV10 print("EnhancedAdaptiveQuantumSwarmOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV11 import EnhancedAdaptiveQuantumSwarmOptimizationV11 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV11 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV11 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV11, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV11"] = EnhancedAdaptiveQuantumSwarmOptimizationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV11", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV11 print("EnhancedAdaptiveQuantumSwarmOptimizationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV12 import EnhancedAdaptiveQuantumSwarmOptimizationV12 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV12 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV12 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV12, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV12"] = EnhancedAdaptiveQuantumSwarmOptimizationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV12", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV12 print("EnhancedAdaptiveQuantumSwarmOptimizationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV13 import EnhancedAdaptiveQuantumSwarmOptimizationV13 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV13 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV13 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV13, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV13"] = EnhancedAdaptiveQuantumSwarmOptimizationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV13", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV13 print("EnhancedAdaptiveQuantumSwarmOptimizationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV14 import EnhancedAdaptiveQuantumSwarmOptimizationV14 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV14 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV14 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV14, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV14"] = EnhancedAdaptiveQuantumSwarmOptimizationV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV14", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV14 print("EnhancedAdaptiveQuantumSwarmOptimizationV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV15 import EnhancedAdaptiveQuantumSwarmOptimizationV15 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV15 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV15 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV15, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV15"] = EnhancedAdaptiveQuantumSwarmOptimizationV15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV15", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV15 print("EnhancedAdaptiveQuantumSwarmOptimizationV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV16 import EnhancedAdaptiveQuantumSwarmOptimizationV16 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV16 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV16 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV16, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV16"] = EnhancedAdaptiveQuantumSwarmOptimizationV16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV16", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV16 print("EnhancedAdaptiveQuantumSwarmOptimizationV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV17 import EnhancedAdaptiveQuantumSwarmOptimizationV17 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV17 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV17 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV17, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV17"] = EnhancedAdaptiveQuantumSwarmOptimizationV17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV17", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV17 print("EnhancedAdaptiveQuantumSwarmOptimizationV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV18 import EnhancedAdaptiveQuantumSwarmOptimizationV18 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV18 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV18 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV18, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV18"] = EnhancedAdaptiveQuantumSwarmOptimizationV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV18", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV18 print("EnhancedAdaptiveQuantumSwarmOptimizationV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV19 import EnhancedAdaptiveQuantumSwarmOptimizationV19 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV19 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV19 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV19, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV19"] = EnhancedAdaptiveQuantumSwarmOptimizationV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV19", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV19 print("EnhancedAdaptiveQuantumSwarmOptimizationV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV2 import EnhancedAdaptiveQuantumSwarmOptimizationV2 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV2 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV2, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV2"] = EnhancedAdaptiveQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV2 print("EnhancedAdaptiveQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV20 import EnhancedAdaptiveQuantumSwarmOptimizationV20 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV20 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV20 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV20, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV20"] = EnhancedAdaptiveQuantumSwarmOptimizationV20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV20", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV20 print("EnhancedAdaptiveQuantumSwarmOptimizationV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV21 import EnhancedAdaptiveQuantumSwarmOptimizationV21 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV21 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV21 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV21, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV21"] = EnhancedAdaptiveQuantumSwarmOptimizationV21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV21", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV21 print("EnhancedAdaptiveQuantumSwarmOptimizationV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV22 import EnhancedAdaptiveQuantumSwarmOptimizationV22 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV22 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV22 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV22, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV22"] = EnhancedAdaptiveQuantumSwarmOptimizationV22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV22", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV22 print("EnhancedAdaptiveQuantumSwarmOptimizationV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV23 import EnhancedAdaptiveQuantumSwarmOptimizationV23 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV23 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV23 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV23, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV23"] = EnhancedAdaptiveQuantumSwarmOptimizationV23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV23", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV23 print("EnhancedAdaptiveQuantumSwarmOptimizationV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV24 import EnhancedAdaptiveQuantumSwarmOptimizationV24 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV24 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV24 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV24, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV24"] = EnhancedAdaptiveQuantumSwarmOptimizationV24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV24", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV24 print("EnhancedAdaptiveQuantumSwarmOptimizationV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV25 import EnhancedAdaptiveQuantumSwarmOptimizationV25 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV25 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV25 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV25, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV25"] = EnhancedAdaptiveQuantumSwarmOptimizationV25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV25", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV25 print("EnhancedAdaptiveQuantumSwarmOptimizationV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV26 import EnhancedAdaptiveQuantumSwarmOptimizationV26 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV26 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV26 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV26, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV26"] = EnhancedAdaptiveQuantumSwarmOptimizationV26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV26", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV26 print("EnhancedAdaptiveQuantumSwarmOptimizationV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV27 import EnhancedAdaptiveQuantumSwarmOptimizationV27 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV27 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV27 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV27, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV27"] = EnhancedAdaptiveQuantumSwarmOptimizationV27 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV27", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV27 print("EnhancedAdaptiveQuantumSwarmOptimizationV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV28 import EnhancedAdaptiveQuantumSwarmOptimizationV28 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV28 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV28 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV28, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV28"] = EnhancedAdaptiveQuantumSwarmOptimizationV28 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV28", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV28 print("EnhancedAdaptiveQuantumSwarmOptimizationV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV29 import EnhancedAdaptiveQuantumSwarmOptimizationV29 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV29 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV29 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV29, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV29"] = EnhancedAdaptiveQuantumSwarmOptimizationV29 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV29", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV29 print("EnhancedAdaptiveQuantumSwarmOptimizationV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV3 import EnhancedAdaptiveQuantumSwarmOptimizationV3 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV3 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV3, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV3"] = EnhancedAdaptiveQuantumSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV3 print("EnhancedAdaptiveQuantumSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV30 import EnhancedAdaptiveQuantumSwarmOptimizationV30 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV30 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV30 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV30, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV30"] = EnhancedAdaptiveQuantumSwarmOptimizationV30 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV30", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV30 print("EnhancedAdaptiveQuantumSwarmOptimizationV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV31 import EnhancedAdaptiveQuantumSwarmOptimizationV31 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV31 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV31 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV31, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV31"] = EnhancedAdaptiveQuantumSwarmOptimizationV31 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV31", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV31 print("EnhancedAdaptiveQuantumSwarmOptimizationV31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV4 import EnhancedAdaptiveQuantumSwarmOptimizationV4 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV4 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV4, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV4"] = EnhancedAdaptiveQuantumSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV4 print("EnhancedAdaptiveQuantumSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV5 import EnhancedAdaptiveQuantumSwarmOptimizationV5 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV5 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV5, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV5"] = EnhancedAdaptiveQuantumSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV5 print("EnhancedAdaptiveQuantumSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV6 import EnhancedAdaptiveQuantumSwarmOptimizationV6 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV6 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV6, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV6"] = EnhancedAdaptiveQuantumSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV6 print("EnhancedAdaptiveQuantumSwarmOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV7 import EnhancedAdaptiveQuantumSwarmOptimizationV7 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV7 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV7, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV7"] = EnhancedAdaptiveQuantumSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV7 print("EnhancedAdaptiveQuantumSwarmOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV8 import EnhancedAdaptiveQuantumSwarmOptimizationV8 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV8 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV8 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV8, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV8"] = EnhancedAdaptiveQuantumSwarmOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV8", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV8 print("EnhancedAdaptiveQuantumSwarmOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV9 import EnhancedAdaptiveQuantumSwarmOptimizationV9 +try: # EnhancedAdaptiveQuantumSwarmOptimizationV9 + from nevergrad.optimization.lama.EnhancedAdaptiveQuantumSwarmOptimizationV9 import ( + EnhancedAdaptiveQuantumSwarmOptimizationV9, + ) lama_register["EnhancedAdaptiveQuantumSwarmOptimizationV9"] = EnhancedAdaptiveQuantumSwarmOptimizationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedAdaptiveQuantumSwarmOptimizationV9", register=True) +except Exception as e: # EnhancedAdaptiveQuantumSwarmOptimizationV9 print("EnhancedAdaptiveQuantumSwarmOptimizationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSinusoidalDifferentialSwarm import EnhancedAdaptiveSinusoidalDifferentialSwarm +try: # EnhancedAdaptiveSinusoidalDifferentialSwarm + from nevergrad.optimization.lama.EnhancedAdaptiveSinusoidalDifferentialSwarm import ( + EnhancedAdaptiveSinusoidalDifferentialSwarm, + ) lama_register["EnhancedAdaptiveSinusoidalDifferentialSwarm"] = EnhancedAdaptiveSinusoidalDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAEnhancedAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: # EnhancedAdaptiveSinusoidalDifferentialSwarm print("EnhancedAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 import EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 - - lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26"] = EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26").set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26", register=True) -except Exception as e: +try: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 + from nevergrad.optimization.lama.EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 import ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26, + ) + + lama_register["EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26"] = ( + EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26" + ).set_name("LLAMAEnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26", register=True) +except Exception as e: # EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 print("EnhancedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_Refined_V26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveSwarmHarmonicOptimization import EnhancedAdaptiveSwarmHarmonicOptimization +try: # EnhancedAdaptiveSwarmHarmonicOptimization + from nevergrad.optimization.lama.EnhancedAdaptiveSwarmHarmonicOptimization import ( + EnhancedAdaptiveSwarmHarmonicOptimization, + ) lama_register["EnhancedAdaptiveSwarmHarmonicOptimization"] = EnhancedAdaptiveSwarmHarmonicOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveSwarmHarmonicOptimization = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization").set_name("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveSwarmHarmonicOptimization = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveSwarmHarmonicOptimization" + ).set_name("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization", register=True) +except Exception as e: # EnhancedAdaptiveSwarmHarmonicOptimization print("EnhancedAdaptiveSwarmHarmonicOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearch import EnhancedAdaptiveTabuHarmonySearch +try: # EnhancedAdaptiveTabuHarmonySearch + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearch import ( + EnhancedAdaptiveTabuHarmonySearch, + ) lama_register["EnhancedAdaptiveTabuHarmonySearch"] = EnhancedAdaptiveTabuHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveTabuHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearch").set_name("LLAMAEnhancedAdaptiveTabuHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveTabuHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveTabuHarmonySearch" + ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearch", register=True) +except Exception as e: # EnhancedAdaptiveTabuHarmonySearch print("EnhancedAdaptiveTabuHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearchV2 import EnhancedAdaptiveTabuHarmonySearchV2 +try: # EnhancedAdaptiveTabuHarmonySearchV2 + from nevergrad.optimization.lama.EnhancedAdaptiveTabuHarmonySearchV2 import ( + EnhancedAdaptiveTabuHarmonySearchV2, + ) lama_register["EnhancedAdaptiveTabuHarmonySearchV2"] = EnhancedAdaptiveTabuHarmonySearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdaptiveTabuHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2").set_name("LLAMAEnhancedAdaptiveTabuHarmonySearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdaptiveTabuHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdaptiveTabuHarmonySearchV2" + ).set_name("LLAMAEnhancedAdaptiveTabuHarmonySearchV2", register=True) +except Exception as e: # EnhancedAdaptiveTabuHarmonySearchV2 print("EnhancedAdaptiveTabuHarmonySearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedAdaptiveFireworkAlgorithm import EnhancedAdvancedAdaptiveFireworkAlgorithm +try: # EnhancedAdvancedAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedAdvancedAdaptiveFireworkAlgorithm import ( + EnhancedAdvancedAdaptiveFireworkAlgorithm, + ) lama_register["EnhancedAdvancedAdaptiveFireworkAlgorithm"] = EnhancedAdvancedAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # EnhancedAdvancedAdaptiveFireworkAlgorithm print("EnhancedAdvancedAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 import EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 - - lama_register["EnhancedAdvancedDifferentialEvolutionLocalSearch_v56"] = EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56").set_name("LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56", register=True) -except Exception as e: +try: # EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 + from nevergrad.optimization.lama.EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 import ( + EnhancedAdvancedDifferentialEvolutionLocalSearch_v56, + ) + + lama_register["EnhancedAdvancedDifferentialEvolutionLocalSearch_v56"] = ( + EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56" + ).set_name("LLAMAEnhancedAdvancedDifferentialEvolutionLocalSearch_v56", register=True) +except Exception as e: # EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 print("EnhancedAdvancedDifferentialEvolutionLocalSearch_v56 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridDifferentialEvolutionV4 import EnhancedAdvancedHybridDifferentialEvolutionV4 - - lama_register["EnhancedAdvancedHybridDifferentialEvolutionV4"] = EnhancedAdvancedHybridDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4").set_name("LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4", register=True) -except Exception as e: +try: # EnhancedAdvancedHybridDifferentialEvolutionV4 + from nevergrad.optimization.lama.EnhancedAdvancedHybridDifferentialEvolutionV4 import ( + EnhancedAdvancedHybridDifferentialEvolutionV4, + ) + + lama_register["EnhancedAdvancedHybridDifferentialEvolutionV4"] = ( + EnhancedAdvancedHybridDifferentialEvolutionV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedAdvancedHybridDifferentialEvolutionV4", register=True) +except Exception as e: # EnhancedAdvancedHybridDifferentialEvolutionV4 print("EnhancedAdvancedHybridDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV17 import EnhancedAdvancedHybridMetaHeuristicOptimizerV17 - - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV17"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV17 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17", register=True) -except Exception as e: +try: # EnhancedAdvancedHybridMetaHeuristicOptimizerV17 + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV17 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV17, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV17"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV17 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17", register=True) +except Exception as e: # EnhancedAdvancedHybridMetaHeuristicOptimizerV17 print("EnhancedAdvancedHybridMetaHeuristicOptimizerV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV18 import EnhancedAdvancedHybridMetaHeuristicOptimizerV18 - - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV18"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV18 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18", register=True) -except Exception as e: +try: # EnhancedAdvancedHybridMetaHeuristicOptimizerV18 + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV18 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV18, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV18"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV18 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18", register=True) +except Exception as e: # EnhancedAdvancedHybridMetaHeuristicOptimizerV18 print("EnhancedAdvancedHybridMetaHeuristicOptimizerV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV19 import EnhancedAdvancedHybridMetaHeuristicOptimizerV19 - - lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV19"] = EnhancedAdvancedHybridMetaHeuristicOptimizerV19 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19").set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19", register=True) -except Exception as e: +try: # EnhancedAdvancedHybridMetaHeuristicOptimizerV19 + from nevergrad.optimization.lama.EnhancedAdvancedHybridMetaHeuristicOptimizerV19 import ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV19, + ) + + lama_register["EnhancedAdvancedHybridMetaHeuristicOptimizerV19"] = ( + EnhancedAdvancedHybridMetaHeuristicOptimizerV19 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19" + ).set_name("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19", register=True) +except Exception as e: # EnhancedAdvancedHybridMetaHeuristicOptimizerV19 print("EnhancedAdvancedHybridMetaHeuristicOptimizerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer import EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer - - lama_register["EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer"] = EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer").set_name("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer", register=True) -except Exception as e: +try: # EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer + from nevergrad.optimization.lama.EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer import ( + EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer, + ) + + lama_register["EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer"] = ( + EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer", register=True) +except Exception as e: # EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer print("EnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV1 import EnhancedAdvancedQuantumSwarmOptimizationV1 +try: # EnhancedAdvancedQuantumSwarmOptimizationV1 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV1 import ( + EnhancedAdvancedQuantumSwarmOptimizationV1, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV1"] = EnhancedAdvancedQuantumSwarmOptimizationV1 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV1", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV1 print("EnhancedAdvancedQuantumSwarmOptimizationV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV10 import EnhancedAdvancedQuantumSwarmOptimizationV10 +try: # EnhancedAdvancedQuantumSwarmOptimizationV10 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV10 import ( + EnhancedAdvancedQuantumSwarmOptimizationV10, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV10"] = EnhancedAdvancedQuantumSwarmOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV10", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV10 print("EnhancedAdvancedQuantumSwarmOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV11 import EnhancedAdvancedQuantumSwarmOptimizationV11 +try: # EnhancedAdvancedQuantumSwarmOptimizationV11 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV11 import ( + EnhancedAdvancedQuantumSwarmOptimizationV11, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV11"] = EnhancedAdvancedQuantumSwarmOptimizationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV11", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV11 print("EnhancedAdvancedQuantumSwarmOptimizationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV12 import EnhancedAdvancedQuantumSwarmOptimizationV12 +try: # EnhancedAdvancedQuantumSwarmOptimizationV12 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV12 import ( + EnhancedAdvancedQuantumSwarmOptimizationV12, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV12"] = EnhancedAdvancedQuantumSwarmOptimizationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV12", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV12 print("EnhancedAdvancedQuantumSwarmOptimizationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV13 import EnhancedAdvancedQuantumSwarmOptimizationV13 +try: # EnhancedAdvancedQuantumSwarmOptimizationV13 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV13 import ( + EnhancedAdvancedQuantumSwarmOptimizationV13, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV13"] = EnhancedAdvancedQuantumSwarmOptimizationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV13", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV13 print("EnhancedAdvancedQuantumSwarmOptimizationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV14 import EnhancedAdvancedQuantumSwarmOptimizationV14 +try: # EnhancedAdvancedQuantumSwarmOptimizationV14 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV14 import ( + EnhancedAdvancedQuantumSwarmOptimizationV14, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV14"] = EnhancedAdvancedQuantumSwarmOptimizationV14 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV14", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV14 print("EnhancedAdvancedQuantumSwarmOptimizationV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV2 import EnhancedAdvancedQuantumSwarmOptimizationV2 +try: # EnhancedAdvancedQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV2 import ( + EnhancedAdvancedQuantumSwarmOptimizationV2, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV2"] = EnhancedAdvancedQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV2 print("EnhancedAdvancedQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV3 import EnhancedAdvancedQuantumSwarmOptimizationV3 +try: # EnhancedAdvancedQuantumSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV3 import ( + EnhancedAdvancedQuantumSwarmOptimizationV3, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV3"] = EnhancedAdvancedQuantumSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV3 print("EnhancedAdvancedQuantumSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV4 import EnhancedAdvancedQuantumSwarmOptimizationV4 +try: # EnhancedAdvancedQuantumSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV4 import ( + EnhancedAdvancedQuantumSwarmOptimizationV4, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV4"] = EnhancedAdvancedQuantumSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV4 print("EnhancedAdvancedQuantumSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV5 import EnhancedAdvancedQuantumSwarmOptimizationV5 +try: # EnhancedAdvancedQuantumSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV5 import ( + EnhancedAdvancedQuantumSwarmOptimizationV5, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV5"] = EnhancedAdvancedQuantumSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV5 print("EnhancedAdvancedQuantumSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV6 import EnhancedAdvancedQuantumSwarmOptimizationV6 +try: # EnhancedAdvancedQuantumSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV6 import ( + EnhancedAdvancedQuantumSwarmOptimizationV6, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV6"] = EnhancedAdvancedQuantumSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV6 print("EnhancedAdvancedQuantumSwarmOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV7 import EnhancedAdvancedQuantumSwarmOptimizationV7 +try: # EnhancedAdvancedQuantumSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV7 import ( + EnhancedAdvancedQuantumSwarmOptimizationV7, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV7"] = EnhancedAdvancedQuantumSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV7 print("EnhancedAdvancedQuantumSwarmOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV8 import EnhancedAdvancedQuantumSwarmOptimizationV8 +try: # EnhancedAdvancedQuantumSwarmOptimizationV8 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV8 import ( + EnhancedAdvancedQuantumSwarmOptimizationV8, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV8"] = EnhancedAdvancedQuantumSwarmOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV8", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV8 print("EnhancedAdvancedQuantumSwarmOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV9 import EnhancedAdvancedQuantumSwarmOptimizationV9 +try: # EnhancedAdvancedQuantumSwarmOptimizationV9 + from nevergrad.optimization.lama.EnhancedAdvancedQuantumSwarmOptimizationV9 import ( + EnhancedAdvancedQuantumSwarmOptimizationV9, + ) lama_register["EnhancedAdvancedQuantumSwarmOptimizationV9"] = EnhancedAdvancedQuantumSwarmOptimizationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedAdvancedQuantumSwarmOptimizationV9", register=True) +except Exception as e: # EnhancedAdvancedQuantumSwarmOptimizationV9 print("EnhancedAdvancedQuantumSwarmOptimizationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 import EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 - - lama_register["EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78"] = EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78").set_name("LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78", register=True) -except Exception as e: +try: # EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 + from nevergrad.optimization.lama.EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 import ( + EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78, + ) + + lama_register["EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78"] = ( + EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78" + ).set_name("LLAMAEnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78", register=True) +except Exception as e: # EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 print("EnhancedAdvancedRefinedUltimateGuidedMassQGSA_v78 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedAdvancedUltimateGuidedMassQGSA_v79 import EnhancedAdvancedUltimateGuidedMassQGSA_v79 +try: # EnhancedAdvancedUltimateGuidedMassQGSA_v79 + from nevergrad.optimization.lama.EnhancedAdvancedUltimateGuidedMassQGSA_v79 import ( + EnhancedAdvancedUltimateGuidedMassQGSA_v79, + ) lama_register["EnhancedAdvancedUltimateGuidedMassQGSA_v79"] = EnhancedAdvancedUltimateGuidedMassQGSA_v79 - res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79 = NonObjectOptimizer(method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79").set_name("LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79 = NonObjectOptimizer( + method="LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79" + ).set_name("LLAMAEnhancedAdvancedUltimateGuidedMassQGSA_v79", register=True) +except Exception as e: # EnhancedAdvancedUltimateGuidedMassQGSA_v79 print("EnhancedAdvancedUltimateGuidedMassQGSA_v79 can not be imported: ", e) -try: +try: # EnhancedArchiveDE from nevergrad.optimization.lama.EnhancedArchiveDE import EnhancedArchiveDE lama_register["EnhancedArchiveDE"] = EnhancedArchiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedArchiveDE = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE").set_name("LLAMAEnhancedArchiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedArchiveDE = NonObjectOptimizer(method="LLAMAEnhancedArchiveDE").set_name( + "LLAMAEnhancedArchiveDE", register=True + ) +except Exception as e: # EnhancedArchiveDE print("EnhancedArchiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedBalancedDualStrategyAdaptiveDE import EnhancedBalancedDualStrategyAdaptiveDE +try: # EnhancedBalancedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.EnhancedBalancedDualStrategyAdaptiveDE import ( + EnhancedBalancedDualStrategyAdaptiveDE, + ) lama_register["EnhancedBalancedDualStrategyAdaptiveDE"] = EnhancedBalancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE").set_name("LLAMAEnhancedBalancedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # EnhancedBalancedDualStrategyAdaptiveDE print("EnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) -try: +try: # EnhancedCMAES from nevergrad.optimization.lama.EnhancedCMAES import EnhancedCMAES lama_register["EnhancedCMAES"] = EnhancedCMAES - res = NonObjectOptimizer(method="LLAMAEnhancedCMAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCMAES = NonObjectOptimizer(method="LLAMAEnhancedCMAES").set_name("LLAMAEnhancedCMAES", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCMAES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCMAES = NonObjectOptimizer(method="LLAMAEnhancedCMAES").set_name( + "LLAMAEnhancedCMAES", register=True + ) +except Exception as e: # EnhancedCMAES print("EnhancedCMAES can not be imported: ", e) -try: +try: # EnhancedCMAESv2 from nevergrad.optimization.lama.EnhancedCMAESv2 import EnhancedCMAESv2 lama_register["EnhancedCMAESv2"] = EnhancedCMAESv2 - res = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCMAESv2 = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2").set_name("LLAMAEnhancedCMAESv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCMAESv2 = NonObjectOptimizer(method="LLAMAEnhancedCMAESv2").set_name( + "LLAMAEnhancedCMAESv2", register=True + ) +except Exception as e: # EnhancedCMAESv2 print("EnhancedCMAESv2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedChaoticFireworksOptimization import EnhancedChaoticFireworksOptimization +try: # EnhancedChaoticFireworksOptimization + from nevergrad.optimization.lama.EnhancedChaoticFireworksOptimization import ( + EnhancedChaoticFireworksOptimization, + ) lama_register["EnhancedChaoticFireworksOptimization"] = EnhancedChaoticFireworksOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedChaoticFireworksOptimization = NonObjectOptimizer(method="LLAMAEnhancedChaoticFireworksOptimization").set_name("LLAMAEnhancedChaoticFireworksOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedChaoticFireworksOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedChaoticFireworksOptimization = NonObjectOptimizer( + method="LLAMAEnhancedChaoticFireworksOptimization" + ).set_name("LLAMAEnhancedChaoticFireworksOptimization", register=True) +except Exception as e: # EnhancedChaoticFireworksOptimization print("EnhancedChaoticFireworksOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedClusterDifferentialCrossover import EnhancedClusterDifferentialCrossover +try: # EnhancedClusterDifferentialCrossover + from nevergrad.optimization.lama.EnhancedClusterDifferentialCrossover import ( + EnhancedClusterDifferentialCrossover, + ) lama_register["EnhancedClusterDifferentialCrossover"] = EnhancedClusterDifferentialCrossover - res = NonObjectOptimizer(method="LLAMAEnhancedClusterDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedClusterDifferentialCrossover = NonObjectOptimizer(method="LLAMAEnhancedClusterDifferentialCrossover").set_name("LLAMAEnhancedClusterDifferentialCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedClusterDifferentialCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedClusterDifferentialCrossover = NonObjectOptimizer( + method="LLAMAEnhancedClusterDifferentialCrossover" + ).set_name("LLAMAEnhancedClusterDifferentialCrossover", register=True) +except Exception as e: # EnhancedClusterDifferentialCrossover print("EnhancedClusterDifferentialCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedClusteredDifferentialEvolution import EnhancedClusteredDifferentialEvolution +try: # EnhancedClusteredDifferentialEvolution + from nevergrad.optimization.lama.EnhancedClusteredDifferentialEvolution import ( + EnhancedClusteredDifferentialEvolution, + ) lama_register["EnhancedClusteredDifferentialEvolution"] = EnhancedClusteredDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedClusteredDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedClusteredDifferentialEvolution").set_name("LLAMAEnhancedClusteredDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedClusteredDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedClusteredDifferentialEvolution" + ).set_name("LLAMAEnhancedClusteredDifferentialEvolution", register=True) +except Exception as e: # EnhancedClusteredDifferentialEvolution print("EnhancedClusteredDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedConvergenceAcceleratedSpiralSearch import EnhancedConvergenceAcceleratedSpiralSearch +try: # EnhancedConvergenceAcceleratedSpiralSearch + from nevergrad.optimization.lama.EnhancedConvergenceAcceleratedSpiralSearch import ( + EnhancedConvergenceAcceleratedSpiralSearch, + ) lama_register["EnhancedConvergenceAcceleratedSpiralSearch"] = EnhancedConvergenceAcceleratedSpiralSearch - res = NonObjectOptimizer(method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedConvergenceAcceleratedSpiralSearch = NonObjectOptimizer(method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch").set_name("LLAMAEnhancedConvergenceAcceleratedSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedConvergenceAcceleratedSpiralSearch = NonObjectOptimizer( + method="LLAMAEnhancedConvergenceAcceleratedSpiralSearch" + ).set_name("LLAMAEnhancedConvergenceAcceleratedSpiralSearch", register=True) +except Exception as e: # EnhancedConvergenceAcceleratedSpiralSearch print("EnhancedConvergenceAcceleratedSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolution import EnhancedConvergentDifferentialEvolution +try: # EnhancedConvergentDifferentialEvolution + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolution import ( + EnhancedConvergentDifferentialEvolution, + ) lama_register["EnhancedConvergentDifferentialEvolution"] = EnhancedConvergentDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedConvergentDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolution").set_name("LLAMAEnhancedConvergentDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedConvergentDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolution" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolution", register=True) +except Exception as e: # EnhancedConvergentDifferentialEvolution print("EnhancedConvergentDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV2 import EnhancedConvergentDifferentialEvolutionV2 +try: # EnhancedConvergentDifferentialEvolutionV2 + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV2 import ( + EnhancedConvergentDifferentialEvolutionV2, + ) lama_register["EnhancedConvergentDifferentialEvolutionV2"] = EnhancedConvergentDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedConvergentDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV2").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedConvergentDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV2", register=True) +except Exception as e: # EnhancedConvergentDifferentialEvolutionV2 print("EnhancedConvergentDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV3 import EnhancedConvergentDifferentialEvolutionV3 +try: # EnhancedConvergentDifferentialEvolutionV3 + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV3 import ( + EnhancedConvergentDifferentialEvolutionV3, + ) lama_register["EnhancedConvergentDifferentialEvolutionV3"] = EnhancedConvergentDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedConvergentDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV3").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedConvergentDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV3", register=True) +except Exception as e: # EnhancedConvergentDifferentialEvolutionV3 print("EnhancedConvergentDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV4 import EnhancedConvergentDifferentialEvolutionV4 +try: # EnhancedConvergentDifferentialEvolutionV4 + from nevergrad.optimization.lama.EnhancedConvergentDifferentialEvolutionV4 import ( + EnhancedConvergentDifferentialEvolutionV4, + ) lama_register["EnhancedConvergentDifferentialEvolutionV4"] = EnhancedConvergentDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedConvergentDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV4").set_name("LLAMAEnhancedConvergentDifferentialEvolutionV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedConvergentDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedConvergentDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedConvergentDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedConvergentDifferentialEvolutionV4", register=True) +except Exception as e: # EnhancedConvergentDifferentialEvolutionV4 print("EnhancedConvergentDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCooperativeCulturalDifferentialSearch import EnhancedCooperativeCulturalDifferentialSearch - - lama_register["EnhancedCooperativeCulturalDifferentialSearch"] = EnhancedCooperativeCulturalDifferentialSearch - res = NonObjectOptimizer(method="LLAMAEnhancedCooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCooperativeCulturalDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedCooperativeCulturalDifferentialSearch").set_name("LLAMAEnhancedCooperativeCulturalDifferentialSearch", register=True) -except Exception as e: +try: # EnhancedCooperativeCulturalDifferentialSearch + from nevergrad.optimization.lama.EnhancedCooperativeCulturalDifferentialSearch import ( + EnhancedCooperativeCulturalDifferentialSearch, + ) + + lama_register["EnhancedCooperativeCulturalDifferentialSearch"] = ( + EnhancedCooperativeCulturalDifferentialSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedCooperativeCulturalDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCooperativeCulturalDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedCooperativeCulturalDifferentialSearch" + ).set_name("LLAMAEnhancedCooperativeCulturalDifferentialSearch", register=True) +except Exception as e: # EnhancedCooperativeCulturalDifferentialSearch print("EnhancedCooperativeCulturalDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarm import EnhancedCosineAdaptiveDifferentialSwarm +try: # EnhancedCosineAdaptiveDifferentialSwarm + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarm import ( + EnhancedCosineAdaptiveDifferentialSwarm, + ) lama_register["EnhancedCosineAdaptiveDifferentialSwarm"] = EnhancedCosineAdaptiveDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm").set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAEnhancedCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: # EnhancedCosineAdaptiveDifferentialSwarm print("EnhancedCosineAdaptiveDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarmV2 import EnhancedCosineAdaptiveDifferentialSwarmV2 +try: # EnhancedCosineAdaptiveDifferentialSwarmV2 + from nevergrad.optimization.lama.EnhancedCosineAdaptiveDifferentialSwarmV2 import ( + EnhancedCosineAdaptiveDifferentialSwarmV2, + ) lama_register["EnhancedCosineAdaptiveDifferentialSwarmV2"] = EnhancedCosineAdaptiveDifferentialSwarmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2 = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2").set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2 = NonObjectOptimizer( + method="LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2" + ).set_name("LLAMAEnhancedCosineAdaptiveDifferentialSwarmV2", register=True) +except Exception as e: # EnhancedCosineAdaptiveDifferentialSwarmV2 print("EnhancedCosineAdaptiveDifferentialSwarmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCovarianceGradientSearchV2 import EnhancedCovarianceGradientSearchV2 +try: # EnhancedCovarianceGradientSearchV2 + from nevergrad.optimization.lama.EnhancedCovarianceGradientSearchV2 import ( + EnhancedCovarianceGradientSearchV2, + ) lama_register["EnhancedCovarianceGradientSearchV2"] = EnhancedCovarianceGradientSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceGradientSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCovarianceGradientSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedCovarianceGradientSearchV2").set_name("LLAMAEnhancedCovarianceGradientSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceGradientSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCovarianceGradientSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceGradientSearchV2" + ).set_name("LLAMAEnhancedCovarianceGradientSearchV2", register=True) +except Exception as e: # EnhancedCovarianceGradientSearchV2 print("EnhancedCovarianceGradientSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixAdaptation import EnhancedCovarianceMatrixAdaptation +try: # EnhancedCovarianceMatrixAdaptation + from nevergrad.optimization.lama.EnhancedCovarianceMatrixAdaptation import ( + EnhancedCovarianceMatrixAdaptation, + ) lama_register["EnhancedCovarianceMatrixAdaptation"] = EnhancedCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixAdaptation").set_name("LLAMAEnhancedCovarianceMatrixAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixAdaptation" + ).set_name("LLAMAEnhancedCovarianceMatrixAdaptation", register=True) +except Exception as e: # EnhancedCovarianceMatrixAdaptation print("EnhancedCovarianceMatrixAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolution import EnhancedCovarianceMatrixEvolution +try: # EnhancedCovarianceMatrixEvolution + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolution import ( + EnhancedCovarianceMatrixEvolution, + ) lama_register["EnhancedCovarianceMatrixEvolution"] = EnhancedCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolution").set_name("LLAMAEnhancedCovarianceMatrixEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedCovarianceMatrixEvolution", register=True) +except Exception as e: # EnhancedCovarianceMatrixEvolution print("EnhancedCovarianceMatrixEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolutionV2 import EnhancedCovarianceMatrixEvolutionV2 +try: # EnhancedCovarianceMatrixEvolutionV2 + from nevergrad.optimization.lama.EnhancedCovarianceMatrixEvolutionV2 import ( + EnhancedCovarianceMatrixEvolutionV2, + ) lama_register["EnhancedCovarianceMatrixEvolutionV2"] = EnhancedCovarianceMatrixEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCovarianceMatrixEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolutionV2").set_name("LLAMAEnhancedCovarianceMatrixEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCovarianceMatrixEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCovarianceMatrixEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedCovarianceMatrixEvolutionV2" + ).set_name("LLAMAEnhancedCovarianceMatrixEvolutionV2", register=True) +except Exception as e: # EnhancedCovarianceMatrixEvolutionV2 print("EnhancedCovarianceMatrixEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCrossoverElitistStrategyV9 import EnhancedCrossoverElitistStrategyV9 +try: # EnhancedCrossoverElitistStrategyV9 + from nevergrad.optimization.lama.EnhancedCrossoverElitistStrategyV9 import ( + EnhancedCrossoverElitistStrategyV9, + ) lama_register["EnhancedCrossoverElitistStrategyV9"] = EnhancedCrossoverElitistStrategyV9 - res = NonObjectOptimizer(method="LLAMAEnhancedCrossoverElitistStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCrossoverElitistStrategyV9 = NonObjectOptimizer(method="LLAMAEnhancedCrossoverElitistStrategyV9").set_name("LLAMAEnhancedCrossoverElitistStrategyV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCrossoverElitistStrategyV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCrossoverElitistStrategyV9 = NonObjectOptimizer( + method="LLAMAEnhancedCrossoverElitistStrategyV9" + ).set_name("LLAMAEnhancedCrossoverElitistStrategyV9", register=True) +except Exception as e: # EnhancedCrossoverElitistStrategyV9 print("EnhancedCrossoverElitistStrategyV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCrowdingMemoryHybridOptimizer import EnhancedCrowdingMemoryHybridOptimizer +try: # EnhancedCrowdingMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedCrowdingMemoryHybridOptimizer import ( + EnhancedCrowdingMemoryHybridOptimizer, + ) lama_register["EnhancedCrowdingMemoryHybridOptimizer"] = EnhancedCrowdingMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedCrowdingMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCrowdingMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedCrowdingMemoryHybridOptimizer").set_name("LLAMAEnhancedCrowdingMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCrowdingMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCrowdingMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedCrowdingMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedCrowdingMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedCrowdingMemoryHybridOptimizer print("EnhancedCrowdingMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCulturalAdaptiveDifferentialEvolution import EnhancedCulturalAdaptiveDifferentialEvolution - - lama_register["EnhancedCulturalAdaptiveDifferentialEvolution"] = EnhancedCulturalAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCulturalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedCulturalAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedCulturalAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedCulturalAdaptiveDifferentialEvolution import ( + EnhancedCulturalAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedCulturalAdaptiveDifferentialEvolution"] = ( + EnhancedCulturalAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCulturalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCulturalAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedCulturalAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedCulturalAdaptiveDifferentialEvolution print("EnhancedCulturalAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCulturalEvolutionaryOptimizer import EnhancedCulturalEvolutionaryOptimizer +try: # EnhancedCulturalEvolutionaryOptimizer + from nevergrad.optimization.lama.EnhancedCulturalEvolutionaryOptimizer import ( + EnhancedCulturalEvolutionaryOptimizer, + ) lama_register["EnhancedCulturalEvolutionaryOptimizer"] = EnhancedCulturalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCulturalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEnhancedCulturalEvolutionaryOptimizer").set_name("LLAMAEnhancedCulturalEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCulturalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedCulturalEvolutionaryOptimizer" + ).set_name("LLAMAEnhancedCulturalEvolutionaryOptimizer", register=True) +except Exception as e: # EnhancedCulturalEvolutionaryOptimizer print("EnhancedCulturalEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedCulturalMemeticDifferentialEvolution import EnhancedCulturalMemeticDifferentialEvolution - - lama_register["EnhancedCulturalMemeticDifferentialEvolution"] = EnhancedCulturalMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedCulturalMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedCulturalMemeticDifferentialEvolution").set_name("LLAMAEnhancedCulturalMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedCulturalMemeticDifferentialEvolution + from nevergrad.optimization.lama.EnhancedCulturalMemeticDifferentialEvolution import ( + EnhancedCulturalMemeticDifferentialEvolution, + ) + + lama_register["EnhancedCulturalMemeticDifferentialEvolution"] = ( + EnhancedCulturalMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedCulturalMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedCulturalMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedCulturalMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedCulturalMemeticDifferentialEvolution", register=True) +except Exception as e: # EnhancedCulturalMemeticDifferentialEvolution print("EnhancedCulturalMemeticDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedDifferentialEvolution from nevergrad.optimization.lama.EnhancedDifferentialEvolution import EnhancedDifferentialEvolution lama_register["EnhancedDifferentialEvolution"] = EnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolution").set_name("LLAMAEnhancedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolution" + ).set_name("LLAMAEnhancedDifferentialEvolution", register=True) +except Exception as e: # EnhancedDifferentialEvolution print("EnhancedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptivePSO import EnhancedDifferentialEvolutionAdaptivePSO +try: # EnhancedDifferentialEvolutionAdaptivePSO + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptivePSO import ( + EnhancedDifferentialEvolutionAdaptivePSO, + ) lama_register["EnhancedDifferentialEvolutionAdaptivePSO"] = EnhancedDifferentialEvolutionAdaptivePSO - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO").set_name("LLAMAEnhancedDifferentialEvolutionAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionAdaptivePSO" + ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptivePSO", register=True) +except Exception as e: # EnhancedDifferentialEvolutionAdaptivePSO print("EnhancedDifferentialEvolutionAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptiveStrategy import EnhancedDifferentialEvolutionAdaptiveStrategy - - lama_register["EnhancedDifferentialEvolutionAdaptiveStrategy"] = EnhancedDifferentialEvolutionAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy").set_name("LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionAdaptiveStrategy + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionAdaptiveStrategy import ( + EnhancedDifferentialEvolutionAdaptiveStrategy, + ) + + lama_register["EnhancedDifferentialEvolutionAdaptiveStrategy"] = ( + EnhancedDifferentialEvolutionAdaptiveStrategy + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy" + ).set_name("LLAMAEnhancedDifferentialEvolutionAdaptiveStrategy", register=True) +except Exception as e: # EnhancedDifferentialEvolutionAdaptiveStrategy print("EnhancedDifferentialEvolutionAdaptiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionFireworkAlgorithm import EnhancedDifferentialEvolutionFireworkAlgorithm - - lama_register["EnhancedDifferentialEvolutionFireworkAlgorithm"] = EnhancedDifferentialEvolutionFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm").set_name("LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionFireworkAlgorithm import ( + EnhancedDifferentialEvolutionFireworkAlgorithm, + ) + + lama_register["EnhancedDifferentialEvolutionFireworkAlgorithm"] = ( + EnhancedDifferentialEvolutionFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm" + ).set_name("LLAMAEnhancedDifferentialEvolutionFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDifferentialEvolutionFireworkAlgorithm print("EnhancedDifferentialEvolutionFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v15 import EnhancedDifferentialEvolutionLSRefinement_v15 - - lama_register["EnhancedDifferentialEvolutionLSRefinement_v15"] = EnhancedDifferentialEvolutionLSRefinement_v15 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLSRefinement_v15 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v15", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLSRefinement_v15 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v15 import ( + EnhancedDifferentialEvolutionLSRefinement_v15, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v15"] = ( + EnhancedDifferentialEvolutionLSRefinement_v15 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v15 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v15" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v15", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLSRefinement_v15 print("EnhancedDifferentialEvolutionLSRefinement_v15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v16 import EnhancedDifferentialEvolutionLSRefinement_v16 - - lama_register["EnhancedDifferentialEvolutionLSRefinement_v16"] = EnhancedDifferentialEvolutionLSRefinement_v16 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLSRefinement_v16 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v16", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLSRefinement_v16 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v16 import ( + EnhancedDifferentialEvolutionLSRefinement_v16, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v16"] = ( + EnhancedDifferentialEvolutionLSRefinement_v16 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v16 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v16" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v16", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLSRefinement_v16 print("EnhancedDifferentialEvolutionLSRefinement_v16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v17 import EnhancedDifferentialEvolutionLSRefinement_v17 - - lama_register["EnhancedDifferentialEvolutionLSRefinement_v17"] = EnhancedDifferentialEvolutionLSRefinement_v17 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLSRefinement_v17 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v17", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLSRefinement_v17 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v17 import ( + EnhancedDifferentialEvolutionLSRefinement_v17, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v17"] = ( + EnhancedDifferentialEvolutionLSRefinement_v17 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v17 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v17" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v17", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLSRefinement_v17 print("EnhancedDifferentialEvolutionLSRefinement_v17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v18 import EnhancedDifferentialEvolutionLSRefinement_v18 - - lama_register["EnhancedDifferentialEvolutionLSRefinement_v18"] = EnhancedDifferentialEvolutionLSRefinement_v18 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLSRefinement_v18 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v18", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLSRefinement_v18 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v18 import ( + EnhancedDifferentialEvolutionLSRefinement_v18, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v18"] = ( + EnhancedDifferentialEvolutionLSRefinement_v18 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v18 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v18" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v18", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLSRefinement_v18 print("EnhancedDifferentialEvolutionLSRefinement_v18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v19 import EnhancedDifferentialEvolutionLSRefinement_v19 - - lama_register["EnhancedDifferentialEvolutionLSRefinement_v19"] = EnhancedDifferentialEvolutionLSRefinement_v19 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLSRefinement_v19 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19").set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v19", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLSRefinement_v19 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLSRefinement_v19 import ( + EnhancedDifferentialEvolutionLSRefinement_v19, + ) + + lama_register["EnhancedDifferentialEvolutionLSRefinement_v19"] = ( + EnhancedDifferentialEvolutionLSRefinement_v19 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLSRefinement_v19 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLSRefinement_v19" + ).set_name("LLAMAEnhancedDifferentialEvolutionLSRefinement_v19", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLSRefinement_v19 print("EnhancedDifferentialEvolutionLSRefinement_v19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v21 import EnhancedDifferentialEvolutionLocalSearch_v21 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v21"] = EnhancedDifferentialEvolutionLocalSearch_v21 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v21 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v21", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v21 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v21 import ( + EnhancedDifferentialEvolutionLocalSearch_v21, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v21"] = ( + EnhancedDifferentialEvolutionLocalSearch_v21 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v21 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v21" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v21", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v21 print("EnhancedDifferentialEvolutionLocalSearch_v21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v22 import EnhancedDifferentialEvolutionLocalSearch_v22 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v22"] = EnhancedDifferentialEvolutionLocalSearch_v22 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v22 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v22", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v22 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v22 import ( + EnhancedDifferentialEvolutionLocalSearch_v22, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v22"] = ( + EnhancedDifferentialEvolutionLocalSearch_v22 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v22 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v22" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v22", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v22 print("EnhancedDifferentialEvolutionLocalSearch_v22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v23 import EnhancedDifferentialEvolutionLocalSearch_v23 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v23"] = EnhancedDifferentialEvolutionLocalSearch_v23 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v23 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v23", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v23 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v23 import ( + EnhancedDifferentialEvolutionLocalSearch_v23, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v23"] = ( + EnhancedDifferentialEvolutionLocalSearch_v23 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v23 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v23" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v23", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v23 print("EnhancedDifferentialEvolutionLocalSearch_v23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v24 import EnhancedDifferentialEvolutionLocalSearch_v24 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v24"] = EnhancedDifferentialEvolutionLocalSearch_v24 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v24 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v24", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v24 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v24 import ( + EnhancedDifferentialEvolutionLocalSearch_v24, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v24"] = ( + EnhancedDifferentialEvolutionLocalSearch_v24 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v24 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v24" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v24", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v24 print("EnhancedDifferentialEvolutionLocalSearch_v24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v25 import EnhancedDifferentialEvolutionLocalSearch_v25 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v25"] = EnhancedDifferentialEvolutionLocalSearch_v25 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v25 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v25", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v25 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v25 import ( + EnhancedDifferentialEvolutionLocalSearch_v25, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v25"] = ( + EnhancedDifferentialEvolutionLocalSearch_v25 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v25 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v25" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v25", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v25 print("EnhancedDifferentialEvolutionLocalSearch_v25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v26 import EnhancedDifferentialEvolutionLocalSearch_v26 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v26"] = EnhancedDifferentialEvolutionLocalSearch_v26 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v26 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v26", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v26 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v26 import ( + EnhancedDifferentialEvolutionLocalSearch_v26, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v26"] = ( + EnhancedDifferentialEvolutionLocalSearch_v26 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v26 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v26" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v26", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v26 print("EnhancedDifferentialEvolutionLocalSearch_v26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v27 import EnhancedDifferentialEvolutionLocalSearch_v27 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v27"] = EnhancedDifferentialEvolutionLocalSearch_v27 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v27 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v27", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v27 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v27 import ( + EnhancedDifferentialEvolutionLocalSearch_v27, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v27"] = ( + EnhancedDifferentialEvolutionLocalSearch_v27 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v27 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v27" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v27", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v27 print("EnhancedDifferentialEvolutionLocalSearch_v27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v28 import EnhancedDifferentialEvolutionLocalSearch_v28 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v28"] = EnhancedDifferentialEvolutionLocalSearch_v28 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v28 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v28", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v28 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v28 import ( + EnhancedDifferentialEvolutionLocalSearch_v28, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v28"] = ( + EnhancedDifferentialEvolutionLocalSearch_v28 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v28 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v28" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v28", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v28 print("EnhancedDifferentialEvolutionLocalSearch_v28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v29 import EnhancedDifferentialEvolutionLocalSearch_v29 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v29"] = EnhancedDifferentialEvolutionLocalSearch_v29 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v29 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v29", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v29 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v29 import ( + EnhancedDifferentialEvolutionLocalSearch_v29, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v29"] = ( + EnhancedDifferentialEvolutionLocalSearch_v29 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v29 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v29" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v29", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v29 print("EnhancedDifferentialEvolutionLocalSearch_v29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v30 import EnhancedDifferentialEvolutionLocalSearch_v30 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v30"] = EnhancedDifferentialEvolutionLocalSearch_v30 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v30 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v30", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v30 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v30 import ( + EnhancedDifferentialEvolutionLocalSearch_v30, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v30"] = ( + EnhancedDifferentialEvolutionLocalSearch_v30 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v30 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v30" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v30", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v30 print("EnhancedDifferentialEvolutionLocalSearch_v30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v31 import EnhancedDifferentialEvolutionLocalSearch_v31 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v31"] = EnhancedDifferentialEvolutionLocalSearch_v31 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v31 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v31", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v31 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v31 import ( + EnhancedDifferentialEvolutionLocalSearch_v31, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v31"] = ( + EnhancedDifferentialEvolutionLocalSearch_v31 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v31 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v31" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v31", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v31 print("EnhancedDifferentialEvolutionLocalSearch_v31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v32 import EnhancedDifferentialEvolutionLocalSearch_v32 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v32"] = EnhancedDifferentialEvolutionLocalSearch_v32 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v32 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v32", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v32 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v32 import ( + EnhancedDifferentialEvolutionLocalSearch_v32, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v32"] = ( + EnhancedDifferentialEvolutionLocalSearch_v32 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v32 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v32" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v32", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v32 print("EnhancedDifferentialEvolutionLocalSearch_v32 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v33 import EnhancedDifferentialEvolutionLocalSearch_v33 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v33"] = EnhancedDifferentialEvolutionLocalSearch_v33 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v33 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v33", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v33 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v33 import ( + EnhancedDifferentialEvolutionLocalSearch_v33, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v33"] = ( + EnhancedDifferentialEvolutionLocalSearch_v33 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v33 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v33" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v33", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v33 print("EnhancedDifferentialEvolutionLocalSearch_v33 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v34 import EnhancedDifferentialEvolutionLocalSearch_v34 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v34"] = EnhancedDifferentialEvolutionLocalSearch_v34 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v34 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v34", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v34 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v34 import ( + EnhancedDifferentialEvolutionLocalSearch_v34, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v34"] = ( + EnhancedDifferentialEvolutionLocalSearch_v34 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v34 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v34" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v34", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v34 print("EnhancedDifferentialEvolutionLocalSearch_v34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v35 import EnhancedDifferentialEvolutionLocalSearch_v35 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v35"] = EnhancedDifferentialEvolutionLocalSearch_v35 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v35 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v35", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v35 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v35 import ( + EnhancedDifferentialEvolutionLocalSearch_v35, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v35"] = ( + EnhancedDifferentialEvolutionLocalSearch_v35 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v35 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v35" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v35", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v35 print("EnhancedDifferentialEvolutionLocalSearch_v35 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v36 import EnhancedDifferentialEvolutionLocalSearch_v36 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v36"] = EnhancedDifferentialEvolutionLocalSearch_v36 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v36 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v36", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v36 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v36 import ( + EnhancedDifferentialEvolutionLocalSearch_v36, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v36"] = ( + EnhancedDifferentialEvolutionLocalSearch_v36 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v36 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v36" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v36", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v36 print("EnhancedDifferentialEvolutionLocalSearch_v36 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v37 import EnhancedDifferentialEvolutionLocalSearch_v37 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v37"] = EnhancedDifferentialEvolutionLocalSearch_v37 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v37 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v37", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v37 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v37 import ( + EnhancedDifferentialEvolutionLocalSearch_v37, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v37"] = ( + EnhancedDifferentialEvolutionLocalSearch_v37 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v37 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v37" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v37", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v37 print("EnhancedDifferentialEvolutionLocalSearch_v37 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v38 import EnhancedDifferentialEvolutionLocalSearch_v38 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v38"] = EnhancedDifferentialEvolutionLocalSearch_v38 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v38 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v38", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v38 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v38 import ( + EnhancedDifferentialEvolutionLocalSearch_v38, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v38"] = ( + EnhancedDifferentialEvolutionLocalSearch_v38 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v38 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v38" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v38", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v38 print("EnhancedDifferentialEvolutionLocalSearch_v38 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v39 import EnhancedDifferentialEvolutionLocalSearch_v39 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v39"] = EnhancedDifferentialEvolutionLocalSearch_v39 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v39 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v39", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v39 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v39 import ( + EnhancedDifferentialEvolutionLocalSearch_v39, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v39"] = ( + EnhancedDifferentialEvolutionLocalSearch_v39 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v39 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v39" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v39", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v39 print("EnhancedDifferentialEvolutionLocalSearch_v39 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v40 import EnhancedDifferentialEvolutionLocalSearch_v40 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v40"] = EnhancedDifferentialEvolutionLocalSearch_v40 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v40 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v40", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v40 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v40 import ( + EnhancedDifferentialEvolutionLocalSearch_v40, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v40"] = ( + EnhancedDifferentialEvolutionLocalSearch_v40 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v40 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v40" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v40", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v40 print("EnhancedDifferentialEvolutionLocalSearch_v40 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v41 import EnhancedDifferentialEvolutionLocalSearch_v41 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v41"] = EnhancedDifferentialEvolutionLocalSearch_v41 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v41 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v41", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v41 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v41 import ( + EnhancedDifferentialEvolutionLocalSearch_v41, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v41"] = ( + EnhancedDifferentialEvolutionLocalSearch_v41 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v41 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v41" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v41", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v41 print("EnhancedDifferentialEvolutionLocalSearch_v41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v43 import EnhancedDifferentialEvolutionLocalSearch_v43 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v43"] = EnhancedDifferentialEvolutionLocalSearch_v43 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v43 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v43", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v43 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v43 import ( + EnhancedDifferentialEvolutionLocalSearch_v43, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v43"] = ( + EnhancedDifferentialEvolutionLocalSearch_v43 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v43 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v43" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v43", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v43 print("EnhancedDifferentialEvolutionLocalSearch_v43 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v44 import EnhancedDifferentialEvolutionLocalSearch_v44 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v44"] = EnhancedDifferentialEvolutionLocalSearch_v44 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v44 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v44", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v44 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v44 import ( + EnhancedDifferentialEvolutionLocalSearch_v44, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v44"] = ( + EnhancedDifferentialEvolutionLocalSearch_v44 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v44 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v44" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v44", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v44 print("EnhancedDifferentialEvolutionLocalSearch_v44 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v45 import EnhancedDifferentialEvolutionLocalSearch_v45 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v45"] = EnhancedDifferentialEvolutionLocalSearch_v45 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v45 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v45", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v45 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v45 import ( + EnhancedDifferentialEvolutionLocalSearch_v45, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v45"] = ( + EnhancedDifferentialEvolutionLocalSearch_v45 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v45 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v45" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v45", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v45 print("EnhancedDifferentialEvolutionLocalSearch_v45 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v46 import EnhancedDifferentialEvolutionLocalSearch_v46 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v46"] = EnhancedDifferentialEvolutionLocalSearch_v46 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v46 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v46", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v46 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v46 import ( + EnhancedDifferentialEvolutionLocalSearch_v46, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v46"] = ( + EnhancedDifferentialEvolutionLocalSearch_v46 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v46 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v46" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v46", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v46 print("EnhancedDifferentialEvolutionLocalSearch_v46 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v47 import EnhancedDifferentialEvolutionLocalSearch_v47 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v47"] = EnhancedDifferentialEvolutionLocalSearch_v47 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v47 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v47", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v47 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v47 import ( + EnhancedDifferentialEvolutionLocalSearch_v47, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v47"] = ( + EnhancedDifferentialEvolutionLocalSearch_v47 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v47 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v47" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v47", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v47 print("EnhancedDifferentialEvolutionLocalSearch_v47 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v48 import EnhancedDifferentialEvolutionLocalSearch_v48 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v48"] = EnhancedDifferentialEvolutionLocalSearch_v48 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v48 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v48", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v48 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v48 import ( + EnhancedDifferentialEvolutionLocalSearch_v48, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v48"] = ( + EnhancedDifferentialEvolutionLocalSearch_v48 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v48 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v48" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v48", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v48 print("EnhancedDifferentialEvolutionLocalSearch_v48 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v49 import EnhancedDifferentialEvolutionLocalSearch_v49 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v49"] = EnhancedDifferentialEvolutionLocalSearch_v49 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v49 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v49", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v49 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v49 import ( + EnhancedDifferentialEvolutionLocalSearch_v49, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v49"] = ( + EnhancedDifferentialEvolutionLocalSearch_v49 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v49 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v49" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v49", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v49 print("EnhancedDifferentialEvolutionLocalSearch_v49 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v50 import EnhancedDifferentialEvolutionLocalSearch_v50 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v50"] = EnhancedDifferentialEvolutionLocalSearch_v50 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v50 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v50", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v50 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v50 import ( + EnhancedDifferentialEvolutionLocalSearch_v50, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v50"] = ( + EnhancedDifferentialEvolutionLocalSearch_v50 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v50 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v50" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v50", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v50 print("EnhancedDifferentialEvolutionLocalSearch_v50 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v51 import EnhancedDifferentialEvolutionLocalSearch_v51 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v51"] = EnhancedDifferentialEvolutionLocalSearch_v51 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v51 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v51", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v51 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v51 import ( + EnhancedDifferentialEvolutionLocalSearch_v51, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v51"] = ( + EnhancedDifferentialEvolutionLocalSearch_v51 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v51 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v51" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v51", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v51 print("EnhancedDifferentialEvolutionLocalSearch_v51 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v52 import EnhancedDifferentialEvolutionLocalSearch_v52 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v52"] = EnhancedDifferentialEvolutionLocalSearch_v52 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v52 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v52", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v52 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v52 import ( + EnhancedDifferentialEvolutionLocalSearch_v52, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v52"] = ( + EnhancedDifferentialEvolutionLocalSearch_v52 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v52 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v52" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v52", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v52 print("EnhancedDifferentialEvolutionLocalSearch_v52 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v53 import EnhancedDifferentialEvolutionLocalSearch_v53 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v53"] = EnhancedDifferentialEvolutionLocalSearch_v53 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v53 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v53", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v53 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v53 import ( + EnhancedDifferentialEvolutionLocalSearch_v53, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v53"] = ( + EnhancedDifferentialEvolutionLocalSearch_v53 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v53 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v53" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v53", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v53 print("EnhancedDifferentialEvolutionLocalSearch_v53 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v59 import EnhancedDifferentialEvolutionLocalSearch_v59 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v59"] = EnhancedDifferentialEvolutionLocalSearch_v59 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v59 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v59", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v59 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v59 import ( + EnhancedDifferentialEvolutionLocalSearch_v59, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v59"] = ( + EnhancedDifferentialEvolutionLocalSearch_v59 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v59 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v59" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v59", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v59 print("EnhancedDifferentialEvolutionLocalSearch_v59 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v60 import EnhancedDifferentialEvolutionLocalSearch_v60 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v60"] = EnhancedDifferentialEvolutionLocalSearch_v60 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v60 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v60", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v60 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v60 import ( + EnhancedDifferentialEvolutionLocalSearch_v60, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v60"] = ( + EnhancedDifferentialEvolutionLocalSearch_v60 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v60 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v60" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v60", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v60 print("EnhancedDifferentialEvolutionLocalSearch_v60 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v62 import EnhancedDifferentialEvolutionLocalSearch_v62 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v62"] = EnhancedDifferentialEvolutionLocalSearch_v62 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v62 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v62", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v62 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v62 import ( + EnhancedDifferentialEvolutionLocalSearch_v62, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v62"] = ( + EnhancedDifferentialEvolutionLocalSearch_v62 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v62 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v62" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v62", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v62 print("EnhancedDifferentialEvolutionLocalSearch_v62 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v63 import EnhancedDifferentialEvolutionLocalSearch_v63 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v63"] = EnhancedDifferentialEvolutionLocalSearch_v63 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v63 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v63", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v63 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v63 import ( + EnhancedDifferentialEvolutionLocalSearch_v63, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v63"] = ( + EnhancedDifferentialEvolutionLocalSearch_v63 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v63 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v63" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v63", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v63 print("EnhancedDifferentialEvolutionLocalSearch_v63 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v64 import EnhancedDifferentialEvolutionLocalSearch_v64 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v64"] = EnhancedDifferentialEvolutionLocalSearch_v64 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v64 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v64", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v64 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v64 import ( + EnhancedDifferentialEvolutionLocalSearch_v64, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v64"] = ( + EnhancedDifferentialEvolutionLocalSearch_v64 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v64 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v64" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v64", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v64 print("EnhancedDifferentialEvolutionLocalSearch_v64 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v66 import EnhancedDifferentialEvolutionLocalSearch_v66 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v66"] = EnhancedDifferentialEvolutionLocalSearch_v66 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v66 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v66", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v66 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v66 import ( + EnhancedDifferentialEvolutionLocalSearch_v66, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v66"] = ( + EnhancedDifferentialEvolutionLocalSearch_v66 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v66 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v66" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v66", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v66 print("EnhancedDifferentialEvolutionLocalSearch_v66 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v67 import EnhancedDifferentialEvolutionLocalSearch_v67 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v67"] = EnhancedDifferentialEvolutionLocalSearch_v67 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v67 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v67", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v67 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v67 import ( + EnhancedDifferentialEvolutionLocalSearch_v67, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v67"] = ( + EnhancedDifferentialEvolutionLocalSearch_v67 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v67 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v67" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v67", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v67 print("EnhancedDifferentialEvolutionLocalSearch_v67 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v68 import EnhancedDifferentialEvolutionLocalSearch_v68 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v68"] = EnhancedDifferentialEvolutionLocalSearch_v68 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v68 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v68", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v68 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v68 import ( + EnhancedDifferentialEvolutionLocalSearch_v68, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v68"] = ( + EnhancedDifferentialEvolutionLocalSearch_v68 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v68 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v68" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v68", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v68 print("EnhancedDifferentialEvolutionLocalSearch_v68 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v69 import EnhancedDifferentialEvolutionLocalSearch_v69 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v69"] = EnhancedDifferentialEvolutionLocalSearch_v69 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v69 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v69", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v69 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v69 import ( + EnhancedDifferentialEvolutionLocalSearch_v69, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v69"] = ( + EnhancedDifferentialEvolutionLocalSearch_v69 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v69 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v69" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v69", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v69 print("EnhancedDifferentialEvolutionLocalSearch_v69 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v70 import EnhancedDifferentialEvolutionLocalSearch_v70 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v70"] = EnhancedDifferentialEvolutionLocalSearch_v70 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v70 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v70", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v70 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v70 import ( + EnhancedDifferentialEvolutionLocalSearch_v70, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v70"] = ( + EnhancedDifferentialEvolutionLocalSearch_v70 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v70 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v70" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v70", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v70 print("EnhancedDifferentialEvolutionLocalSearch_v70 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v71 import EnhancedDifferentialEvolutionLocalSearch_v71 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v71"] = EnhancedDifferentialEvolutionLocalSearch_v71 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v71 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v71", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v71 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v71 import ( + EnhancedDifferentialEvolutionLocalSearch_v71, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v71"] = ( + EnhancedDifferentialEvolutionLocalSearch_v71 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v71 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v71" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v71", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v71 print("EnhancedDifferentialEvolutionLocalSearch_v71 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v72 import EnhancedDifferentialEvolutionLocalSearch_v72 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v72"] = EnhancedDifferentialEvolutionLocalSearch_v72 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v72 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v72", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v72 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v72 import ( + EnhancedDifferentialEvolutionLocalSearch_v72, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v72"] = ( + EnhancedDifferentialEvolutionLocalSearch_v72 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v72 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v72" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v72", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v72 print("EnhancedDifferentialEvolutionLocalSearch_v72 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v73 import EnhancedDifferentialEvolutionLocalSearch_v73 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v73"] = EnhancedDifferentialEvolutionLocalSearch_v73 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v73 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v73", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v73 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v73 import ( + EnhancedDifferentialEvolutionLocalSearch_v73, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v73"] = ( + EnhancedDifferentialEvolutionLocalSearch_v73 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v73 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v73" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v73", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v73 print("EnhancedDifferentialEvolutionLocalSearch_v73 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v74 import EnhancedDifferentialEvolutionLocalSearch_v74 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v74"] = EnhancedDifferentialEvolutionLocalSearch_v74 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v74 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v74", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v74 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v74 import ( + EnhancedDifferentialEvolutionLocalSearch_v74, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v74"] = ( + EnhancedDifferentialEvolutionLocalSearch_v74 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v74 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v74" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v74", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v74 print("EnhancedDifferentialEvolutionLocalSearch_v74 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v75 import EnhancedDifferentialEvolutionLocalSearch_v75 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v75"] = EnhancedDifferentialEvolutionLocalSearch_v75 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v75 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v75", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v75 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v75 import ( + EnhancedDifferentialEvolutionLocalSearch_v75, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v75"] = ( + EnhancedDifferentialEvolutionLocalSearch_v75 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v75 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v75" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v75", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v75 print("EnhancedDifferentialEvolutionLocalSearch_v75 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v76 import EnhancedDifferentialEvolutionLocalSearch_v76 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v76"] = EnhancedDifferentialEvolutionLocalSearch_v76 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v76 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v76", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v76 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v76 import ( + EnhancedDifferentialEvolutionLocalSearch_v76, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v76"] = ( + EnhancedDifferentialEvolutionLocalSearch_v76 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v76 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v76" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v76", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v76 print("EnhancedDifferentialEvolutionLocalSearch_v76 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v77 import EnhancedDifferentialEvolutionLocalSearch_v77 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v77"] = EnhancedDifferentialEvolutionLocalSearch_v77 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v77 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v77", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v77 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v77 import ( + EnhancedDifferentialEvolutionLocalSearch_v77, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v77"] = ( + EnhancedDifferentialEvolutionLocalSearch_v77 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v77 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v77" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v77", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v77 print("EnhancedDifferentialEvolutionLocalSearch_v77 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v78 import EnhancedDifferentialEvolutionLocalSearch_v78 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v78"] = EnhancedDifferentialEvolutionLocalSearch_v78 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v78 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v78", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v78 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v78 import ( + EnhancedDifferentialEvolutionLocalSearch_v78, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v78"] = ( + EnhancedDifferentialEvolutionLocalSearch_v78 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v78 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v78" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v78", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v78 print("EnhancedDifferentialEvolutionLocalSearch_v78 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v79 import EnhancedDifferentialEvolutionLocalSearch_v79 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v79"] = EnhancedDifferentialEvolutionLocalSearch_v79 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v79 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v79", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v79 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v79 import ( + EnhancedDifferentialEvolutionLocalSearch_v79, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v79"] = ( + EnhancedDifferentialEvolutionLocalSearch_v79 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v79 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v79" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v79", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v79 print("EnhancedDifferentialEvolutionLocalSearch_v79 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v80 import EnhancedDifferentialEvolutionLocalSearch_v80 - - lama_register["EnhancedDifferentialEvolutionLocalSearch_v80"] = EnhancedDifferentialEvolutionLocalSearch_v80 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionLocalSearch_v80 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80").set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v80", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionLocalSearch_v80 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionLocalSearch_v80 import ( + EnhancedDifferentialEvolutionLocalSearch_v80, + ) + + lama_register["EnhancedDifferentialEvolutionLocalSearch_v80"] = ( + EnhancedDifferentialEvolutionLocalSearch_v80 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionLocalSearch_v80 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionLocalSearch_v80" + ).set_name("LLAMAEnhancedDifferentialEvolutionLocalSearch_v80", register=True) +except Exception as e: # EnhancedDifferentialEvolutionLocalSearch_v80 print("EnhancedDifferentialEvolutionLocalSearch_v80 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionOptimizer import EnhancedDifferentialEvolutionOptimizer +try: # EnhancedDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionOptimizer import ( + EnhancedDifferentialEvolutionOptimizer, + ) lama_register["EnhancedDifferentialEvolutionOptimizer"] = EnhancedDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionOptimizer").set_name("LLAMAEnhancedDifferentialEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionOptimizer" + ).set_name("LLAMAEnhancedDifferentialEvolutionOptimizer", register=True) +except Exception as e: # EnhancedDifferentialEvolutionOptimizer print("EnhancedDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizer import EnhancedDifferentialEvolutionParticleSwarmOptimizer - - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizer"] = EnhancedDifferentialEvolutionParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionParticleSwarmOptimizer + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizer import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizer, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizer"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedDifferentialEvolutionParticleSwarmOptimizer print("EnhancedDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 - - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV2"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV2, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV2"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2", register=True) +except Exception as e: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 - - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV3"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV3, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV3"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3", register=True) +except Exception as e: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 import EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 - - lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV4"] = EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4").set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 import ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV4, + ) + + lama_register["EnhancedDifferentialEvolutionParticleSwarmOptimizerV4"] = ( + EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4" + ).set_name("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4", register=True) +except Exception as e: # EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 print("EnhancedDifferentialEvolutionParticleSwarmOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialEvolutionWithAdaptiveMutationControl import EnhancedDifferentialEvolutionWithAdaptiveMutationControl - - lama_register["EnhancedDifferentialEvolutionWithAdaptiveMutationControl"] = EnhancedDifferentialEvolutionWithAdaptiveMutationControl - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl").set_name("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl", register=True) -except Exception as e: +try: # EnhancedDifferentialEvolutionWithAdaptiveMutationControl + from nevergrad.optimization.lama.EnhancedDifferentialEvolutionWithAdaptiveMutationControl import ( + EnhancedDifferentialEvolutionWithAdaptiveMutationControl, + ) + + lama_register["EnhancedDifferentialEvolutionWithAdaptiveMutationControl"] = ( + EnhancedDifferentialEvolutionWithAdaptiveMutationControl + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl" + ).set_name("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl", register=True) +except Exception as e: # EnhancedDifferentialEvolutionWithAdaptiveMutationControl print("EnhancedDifferentialEvolutionWithAdaptiveMutationControl can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm import EnhancedDifferentialFireworkAlgorithm +try: # EnhancedDifferentialFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm import ( + EnhancedDifferentialFireworkAlgorithm, + ) lama_register["EnhancedDifferentialFireworkAlgorithm"] = EnhancedDifferentialFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm").set_name("LLAMAEnhancedDifferentialFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialFireworkAlgorithm" + ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDifferentialFireworkAlgorithm print("EnhancedDifferentialFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm_v2 import EnhancedDifferentialFireworkAlgorithm_v2 +try: # EnhancedDifferentialFireworkAlgorithm_v2 + from nevergrad.optimization.lama.EnhancedDifferentialFireworkAlgorithm_v2 import ( + EnhancedDifferentialFireworkAlgorithm_v2, + ) lama_register["EnhancedDifferentialFireworkAlgorithm_v2"] = EnhancedDifferentialFireworkAlgorithm_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialFireworkAlgorithm_v2 = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2").set_name("LLAMAEnhancedDifferentialFireworkAlgorithm_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialFireworkAlgorithm_v2 = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialFireworkAlgorithm_v2" + ).set_name("LLAMAEnhancedDifferentialFireworkAlgorithm_v2", register=True) +except Exception as e: # EnhancedDifferentialFireworkAlgorithm_v2 print("EnhancedDifferentialFireworkAlgorithm_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentialSimulatedAnnealingOptimizer import EnhancedDifferentialSimulatedAnnealingOptimizer - - lama_register["EnhancedDifferentialSimulatedAnnealingOptimizer"] = EnhancedDifferentialSimulatedAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer").set_name("LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer", register=True) -except Exception as e: +try: # EnhancedDifferentialSimulatedAnnealingOptimizer + from nevergrad.optimization.lama.EnhancedDifferentialSimulatedAnnealingOptimizer import ( + EnhancedDifferentialSimulatedAnnealingOptimizer, + ) + + lama_register["EnhancedDifferentialSimulatedAnnealingOptimizer"] = ( + EnhancedDifferentialSimulatedAnnealingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer" + ).set_name("LLAMAEnhancedDifferentialSimulatedAnnealingOptimizer", register=True) +except Exception as e: # EnhancedDifferentialSimulatedAnnealingOptimizer print("EnhancedDifferentialSimulatedAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDifferentiatedAdaptiveEvolution import EnhancedDifferentiatedAdaptiveEvolution +try: # EnhancedDifferentiatedAdaptiveEvolution + from nevergrad.optimization.lama.EnhancedDifferentiatedAdaptiveEvolution import ( + EnhancedDifferentiatedAdaptiveEvolution, + ) lama_register["EnhancedDifferentiatedAdaptiveEvolution"] = EnhancedDifferentiatedAdaptiveEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDifferentiatedAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDifferentiatedAdaptiveEvolution = NonObjectOptimizer(method="LLAMAEnhancedDifferentiatedAdaptiveEvolution").set_name("LLAMAEnhancedDifferentiatedAdaptiveEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDifferentiatedAdaptiveEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDifferentiatedAdaptiveEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDifferentiatedAdaptiveEvolution" + ).set_name("LLAMAEnhancedDifferentiatedAdaptiveEvolution", register=True) +except Exception as e: # EnhancedDifferentiatedAdaptiveEvolution print("EnhancedDifferentiatedAdaptiveEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDimensionalFeedbackEvolverV3 import EnhancedDimensionalFeedbackEvolverV3 +try: # EnhancedDimensionalFeedbackEvolverV3 + from nevergrad.optimization.lama.EnhancedDimensionalFeedbackEvolverV3 import ( + EnhancedDimensionalFeedbackEvolverV3, + ) lama_register["EnhancedDimensionalFeedbackEvolverV3"] = EnhancedDimensionalFeedbackEvolverV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDimensionalFeedbackEvolverV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDimensionalFeedbackEvolverV3 = NonObjectOptimizer(method="LLAMAEnhancedDimensionalFeedbackEvolverV3").set_name("LLAMAEnhancedDimensionalFeedbackEvolverV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDimensionalFeedbackEvolverV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDimensionalFeedbackEvolverV3 = NonObjectOptimizer( + method="LLAMAEnhancedDimensionalFeedbackEvolverV3" + ).set_name("LLAMAEnhancedDimensionalFeedbackEvolverV3", register=True) +except Exception as e: # EnhancedDimensionalFeedbackEvolverV3 print("EnhancedDimensionalFeedbackEvolverV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiverseMemoryHybridOptimizer import EnhancedDiverseMemoryHybridOptimizer +try: # EnhancedDiverseMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedDiverseMemoryHybridOptimizer import ( + EnhancedDiverseMemoryHybridOptimizer, + ) lama_register["EnhancedDiverseMemoryHybridOptimizer"] = EnhancedDiverseMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDiverseMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiverseMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiverseMemoryHybridOptimizer").set_name("LLAMAEnhancedDiverseMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiverseMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiverseMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiverseMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedDiverseMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedDiverseMemoryHybridOptimizer print("EnhancedDiverseMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedAdaptiveHarmonySearch import EnhancedDiversifiedAdaptiveHarmonySearch +try: # EnhancedDiversifiedAdaptiveHarmonySearch + from nevergrad.optimization.lama.EnhancedDiversifiedAdaptiveHarmonySearch import ( + EnhancedDiversifiedAdaptiveHarmonySearch, + ) lama_register["EnhancedDiversifiedAdaptiveHarmonySearch"] = EnhancedDiversifiedAdaptiveHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch").set_name("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedAdaptiveHarmonySearch" + ).set_name("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch", register=True) +except Exception as e: # EnhancedDiversifiedAdaptiveHarmonySearch print("EnhancedDiversifiedAdaptiveHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithm import EnhancedDiversifiedCuckooFireworksAlgorithm +try: # EnhancedDiversifiedCuckooFireworksAlgorithm + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithm import ( + EnhancedDiversifiedCuckooFireworksAlgorithm, + ) lama_register["EnhancedDiversifiedCuckooFireworksAlgorithm"] = EnhancedDiversifiedCuckooFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm").set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithm", register=True) +except Exception as e: # EnhancedDiversifiedCuckooFireworksAlgorithm print("EnhancedDiversifiedCuckooFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithmV2 import EnhancedDiversifiedCuckooFireworksAlgorithmV2 - - lama_register["EnhancedDiversifiedCuckooFireworksAlgorithmV2"] = EnhancedDiversifiedCuckooFireworksAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2").set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedDiversifiedCuckooFireworksAlgorithmV2 + from nevergrad.optimization.lama.EnhancedDiversifiedCuckooFireworksAlgorithmV2 import ( + EnhancedDiversifiedCuckooFireworksAlgorithmV2, + ) + + lama_register["EnhancedDiversifiedCuckooFireworksAlgorithmV2"] = ( + EnhancedDiversifiedCuckooFireworksAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedDiversifiedCuckooFireworksAlgorithmV2", register=True) +except Exception as e: # EnhancedDiversifiedCuckooFireworksAlgorithmV2 print("EnhancedDiversifiedCuckooFireworksAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimization import EnhancedDiversifiedGravitationalSwarmOptimization - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimization"] = EnhancedDiversifiedGravitationalSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimization + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimization import ( + EnhancedDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimization"] = ( + EnhancedDiversifiedGravitationalSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimization print("EnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV2 import EnhancedDiversifiedGravitationalSwarmOptimizationV2 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV2"] = EnhancedDiversifiedGravitationalSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV2 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV2"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV2 print("EnhancedDiversifiedGravitationalSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV3 import EnhancedDiversifiedGravitationalSwarmOptimizationV3 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV3"] = EnhancedDiversifiedGravitationalSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV3 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV3"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV3 print("EnhancedDiversifiedGravitationalSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV4 import EnhancedDiversifiedGravitationalSwarmOptimizationV4 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV4"] = EnhancedDiversifiedGravitationalSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV4 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV4"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV4 print("EnhancedDiversifiedGravitationalSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV5 import EnhancedDiversifiedGravitationalSwarmOptimizationV5 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV5"] = EnhancedDiversifiedGravitationalSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV5 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV5, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV5"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV5 print("EnhancedDiversifiedGravitationalSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV6 import EnhancedDiversifiedGravitationalSwarmOptimizationV6 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV6"] = EnhancedDiversifiedGravitationalSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV6 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV6, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV6"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV6 print("EnhancedDiversifiedGravitationalSwarmOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV7 import EnhancedDiversifiedGravitationalSwarmOptimizationV7 - - lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV7"] = EnhancedDiversifiedGravitationalSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7").set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7", register=True) -except Exception as e: +try: # EnhancedDiversifiedGravitationalSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedDiversifiedGravitationalSwarmOptimizationV7 import ( + EnhancedDiversifiedGravitationalSwarmOptimizationV7, + ) + + lama_register["EnhancedDiversifiedGravitationalSwarmOptimizationV7"] = ( + EnhancedDiversifiedGravitationalSwarmOptimizationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7" + ).set_name("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedDiversifiedGravitationalSwarmOptimizationV7 print("EnhancedDiversifiedGravitationalSwarmOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer import EnhancedDiversifiedHarmonicHarmonyOptimizer +try: # EnhancedDiversifiedHarmonicHarmonyOptimizer + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer, + ) lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer"] = EnhancedDiversifiedHarmonicHarmonyOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer", register=True) +except Exception as e: # EnhancedDiversifiedHarmonicHarmonyOptimizer print("EnhancedDiversifiedHarmonicHarmonyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 import EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 - - lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V2"] = EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2", register=True) -except Exception as e: +try: # EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V2, + ) + + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V2"] = ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V2", register=True) +except Exception as e: # EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 import EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 - - lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V3"] = EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3").set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3", register=True) -except Exception as e: +try: # EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 import ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V3, + ) + + lama_register["EnhancedDiversifiedHarmonicHarmonyOptimizer_V3"] = ( + EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3" + ).set_name("LLAMAEnhancedDiversifiedHarmonicHarmonyOptimizer_V3", register=True) +except Exception as e: # EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 print("EnhancedDiversifiedHarmonicHarmonyOptimizer_V3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyAlgorithm import EnhancedDiversifiedHarmonyAlgorithm +try: # EnhancedDiversifiedHarmonyAlgorithm + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyAlgorithm import ( + EnhancedDiversifiedHarmonyAlgorithm, + ) lama_register["EnhancedDiversifiedHarmonyAlgorithm"] = EnhancedDiversifiedHarmonyAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyAlgorithm").set_name("LLAMAEnhancedDiversifiedHarmonyAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedHarmonyAlgorithm", register=True) +except Exception as e: # EnhancedDiversifiedHarmonyAlgorithm print("EnhancedDiversifiedHarmonyAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithm import EnhancedDiversifiedHarmonyFireworksAlgorithm - - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithm"] = EnhancedDiversifiedHarmonyFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm", register=True) -except Exception as e: +try: # EnhancedDiversifiedHarmonyFireworksAlgorithm + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithm import ( + EnhancedDiversifiedHarmonyFireworksAlgorithm, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithm"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithm", register=True) +except Exception as e: # EnhancedDiversifiedHarmonyFireworksAlgorithm print("EnhancedDiversifiedHarmonyFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV2 import EnhancedDiversifiedHarmonyFireworksAlgorithmV2 - - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV2"] = EnhancedDiversifiedHarmonyFireworksAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedDiversifiedHarmonyFireworksAlgorithmV2 + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV2 import ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV2, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV2"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV2", register=True) +except Exception as e: # EnhancedDiversifiedHarmonyFireworksAlgorithmV2 print("EnhancedDiversifiedHarmonyFireworksAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV3 import EnhancedDiversifiedHarmonyFireworksAlgorithmV3 - - lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV3"] = EnhancedDiversifiedHarmonyFireworksAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3").set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3", register=True) -except Exception as e: +try: # EnhancedDiversifiedHarmonyFireworksAlgorithmV3 + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonyFireworksAlgorithmV3 import ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV3, + ) + + lama_register["EnhancedDiversifiedHarmonyFireworksAlgorithmV3"] = ( + EnhancedDiversifiedHarmonyFireworksAlgorithmV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3" + ).set_name("LLAMAEnhancedDiversifiedHarmonyFireworksAlgorithmV3", register=True) +except Exception as e: # EnhancedDiversifiedHarmonyFireworksAlgorithmV3 print("EnhancedDiversifiedHarmonyFireworksAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedHarmonySearchOptimizer import EnhancedDiversifiedHarmonySearchOptimizer +try: # EnhancedDiversifiedHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedDiversifiedHarmonySearchOptimizer import ( + EnhancedDiversifiedHarmonySearchOptimizer, + ) lama_register["EnhancedDiversifiedHarmonySearchOptimizer"] = EnhancedDiversifiedHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedDiversifiedHarmonySearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedDiversifiedHarmonySearchOptimizer print("EnhancedDiversifiedHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV3 import EnhancedDiversifiedMetaHeuristicAlgorithmV3 +try: # EnhancedDiversifiedMetaHeuristicAlgorithmV3 + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV3 import ( + EnhancedDiversifiedMetaHeuristicAlgorithmV3, + ) lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV3"] = EnhancedDiversifiedMetaHeuristicAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3").set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3" + ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV3", register=True) +except Exception as e: # EnhancedDiversifiedMetaHeuristicAlgorithmV3 print("EnhancedDiversifiedMetaHeuristicAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV4 import EnhancedDiversifiedMetaHeuristicAlgorithmV4 +try: # EnhancedDiversifiedMetaHeuristicAlgorithmV4 + from nevergrad.optimization.lama.EnhancedDiversifiedMetaHeuristicAlgorithmV4 import ( + EnhancedDiversifiedMetaHeuristicAlgorithmV4, + ) lama_register["EnhancedDiversifiedMetaHeuristicAlgorithmV4"] = EnhancedDiversifiedMetaHeuristicAlgorithmV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4").set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4" + ).set_name("LLAMAEnhancedDiversifiedMetaHeuristicAlgorithmV4", register=True) +except Exception as e: # EnhancedDiversifiedMetaHeuristicAlgorithmV4 print("EnhancedDiversifiedMetaHeuristicAlgorithmV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization import EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization - - lama_register["EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization"] = EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization").set_name("LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization", register=True) -except Exception as e: +try: # EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization import ( + EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization, + ) + + lama_register["EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization"] = ( + EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization", register=True) +except Exception as e: # EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization print("EnhancedDualPhaseAdaptiveEvolutionarySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizationV3 import EnhancedDualPhaseAdaptiveHybridOptimizationV3 - - lama_register["EnhancedDualPhaseAdaptiveHybridOptimizationV3"] = EnhancedDualPhaseAdaptiveHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3").set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) -except Exception as e: +try: # EnhancedDualPhaseAdaptiveHybridOptimizationV3 + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( + EnhancedDualPhaseAdaptiveHybridOptimizationV3, + ) + + lama_register["EnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( + EnhancedDualPhaseAdaptiveHybridOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) +except Exception as e: # EnhancedDualPhaseAdaptiveHybridOptimizationV3 print("EnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizerV3 import EnhancedDualPhaseAdaptiveHybridOptimizerV3 +try: # EnhancedDualPhaseAdaptiveHybridOptimizerV3 + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveHybridOptimizerV3 import ( + EnhancedDualPhaseAdaptiveHybridOptimizerV3, + ) lama_register["EnhancedDualPhaseAdaptiveHybridOptimizerV3"] = EnhancedDualPhaseAdaptiveHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3").set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveHybridOptimizerV3", register=True) +except Exception as e: # EnhancedDualPhaseAdaptiveHybridOptimizerV3 print("EnhancedDualPhaseAdaptiveHybridOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution import EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution - - lama_register["EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution"] = EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution").set_name("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution import ( + EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution"] = ( + EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution print("EnhancedDualPhaseAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseDifferentialEvolution import EnhancedDualPhaseDifferentialEvolution +try: # EnhancedDualPhaseDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDualPhaseDifferentialEvolution import ( + EnhancedDualPhaseDifferentialEvolution, + ) lama_register["EnhancedDualPhaseDifferentialEvolution"] = EnhancedDualPhaseDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseDifferentialEvolution").set_name("LLAMAEnhancedDualPhaseDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseDifferentialEvolution" + ).set_name("LLAMAEnhancedDualPhaseDifferentialEvolution", register=True) +except Exception as e: # EnhancedDualPhaseDifferentialEvolution print("EnhancedDualPhaseDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimization import EnhancedDualPhaseHybridOptimization +try: # EnhancedDualPhaseHybridOptimization + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimization import ( + EnhancedDualPhaseHybridOptimization, + ) lama_register["EnhancedDualPhaseHybridOptimization"] = EnhancedDualPhaseHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimization").set_name("LLAMAEnhancedDualPhaseHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseHybridOptimization" + ).set_name("LLAMAEnhancedDualPhaseHybridOptimization", register=True) +except Exception as e: # EnhancedDualPhaseHybridOptimization print("EnhancedDualPhaseHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimizationV2 import EnhancedDualPhaseHybridOptimizationV2 +try: # EnhancedDualPhaseHybridOptimizationV2 + from nevergrad.optimization.lama.EnhancedDualPhaseHybridOptimizationV2 import ( + EnhancedDualPhaseHybridOptimizationV2, + ) lama_register["EnhancedDualPhaseHybridOptimizationV2"] = EnhancedDualPhaseHybridOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualPhaseHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimizationV2").set_name("LLAMAEnhancedDualPhaseHybridOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualPhaseHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualPhaseHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDualPhaseHybridOptimizationV2" + ).set_name("LLAMAEnhancedDualPhaseHybridOptimizationV2", register=True) +except Exception as e: # EnhancedDualPhaseHybridOptimizationV2 print("EnhancedDualPhaseHybridOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualStrategyAdaptiveDE_v2 import EnhancedDualStrategyAdaptiveDE_v2 +try: # EnhancedDualStrategyAdaptiveDE_v2 + from nevergrad.optimization.lama.EnhancedDualStrategyAdaptiveDE_v2 import ( + EnhancedDualStrategyAdaptiveDE_v2, + ) lama_register["EnhancedDualStrategyAdaptiveDE_v2"] = EnhancedDualStrategyAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyAdaptiveDE_v2").set_name("LLAMAEnhancedDualStrategyAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedDualStrategyAdaptiveDE_v2" + ).set_name("LLAMAEnhancedDualStrategyAdaptiveDE_v2", register=True) +except Exception as e: # EnhancedDualStrategyAdaptiveDE_v2 print("EnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDualStrategyHybridOptimizer import EnhancedDualStrategyHybridOptimizer +try: # EnhancedDualStrategyHybridOptimizer + from nevergrad.optimization.lama.EnhancedDualStrategyHybridOptimizer import ( + EnhancedDualStrategyHybridOptimizer, + ) lama_register["EnhancedDualStrategyHybridOptimizer"] = EnhancedDualStrategyHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDualStrategyHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyHybridOptimizer").set_name("LLAMAEnhancedDualStrategyHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDualStrategyHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDualStrategyHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDualStrategyHybridOptimizer" + ).set_name("LLAMAEnhancedDualStrategyHybridOptimizer", register=True) +except Exception as e: # EnhancedDualStrategyHybridOptimizer print("EnhancedDualStrategyHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveClimbingStrategy import EnhancedDynamicAdaptiveClimbingStrategy +try: # EnhancedDynamicAdaptiveClimbingStrategy + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveClimbingStrategy import ( + EnhancedDynamicAdaptiveClimbingStrategy, + ) lama_register["EnhancedDynamicAdaptiveClimbingStrategy"] = EnhancedDynamicAdaptiveClimbingStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveClimbingStrategy = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy").set_name("LLAMAEnhancedDynamicAdaptiveClimbingStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveClimbingStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveClimbingStrategy" + ).set_name("LLAMAEnhancedDynamicAdaptiveClimbingStrategy", register=True) +except Exception as e: # EnhancedDynamicAdaptiveClimbingStrategy print("EnhancedDynamicAdaptiveClimbingStrategy can not be imported: ", e) -try: +try: # EnhancedDynamicAdaptiveDE from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDE import EnhancedDynamicAdaptiveDE lama_register["EnhancedDynamicAdaptiveDE"] = EnhancedDynamicAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE").set_name("LLAMAEnhancedDynamicAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDE").set_name( + "LLAMAEnhancedDynamicAdaptiveDE", register=True + ) +except Exception as e: # EnhancedDynamicAdaptiveDE print("EnhancedDynamicAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolution import EnhancedDynamicAdaptiveDifferentialEvolution - - lama_register["EnhancedDynamicAdaptiveDifferentialEvolution"] = EnhancedDynamicAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolution import ( + EnhancedDynamicAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolution"] = ( + EnhancedDynamicAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedDynamicAdaptiveDifferentialEvolution print("EnhancedDynamicAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation import EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation - - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation"] = EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation import ( + EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation", register=True) +except Exception as e: # EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation print("EnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionRefined import EnhancedDynamicAdaptiveDifferentialEvolutionRefined - - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionRefined"] = EnhancedDynamicAdaptiveDifferentialEvolutionRefined - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveDifferentialEvolutionRefined + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionRefined import ( + EnhancedDynamicAdaptiveDifferentialEvolutionRefined, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionRefined"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined", register=True) +except Exception as e: # EnhancedDynamicAdaptiveDifferentialEvolutionRefined print("EnhancedDynamicAdaptiveDifferentialEvolutionRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionV2 import EnhancedDynamicAdaptiveDifferentialEvolutionV2 - - lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionV2"] = EnhancedDynamicAdaptiveDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveDifferentialEvolutionV2 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveDifferentialEvolutionV2 import ( + EnhancedDynamicAdaptiveDifferentialEvolutionV2, + ) + + lama_register["EnhancedDynamicAdaptiveDifferentialEvolutionV2"] = ( + EnhancedDynamicAdaptiveDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2", register=True) +except Exception as e: # EnhancedDynamicAdaptiveDifferentialEvolutionV2 print("EnhancedDynamicAdaptiveDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveExplorationOptimization import EnhancedDynamicAdaptiveExplorationOptimization - - lama_register["EnhancedDynamicAdaptiveExplorationOptimization"] = EnhancedDynamicAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization").set_name("LLAMAEnhancedDynamicAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveExplorationOptimization + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveExplorationOptimization import ( + EnhancedDynamicAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedDynamicAdaptiveExplorationOptimization"] = ( + EnhancedDynamicAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: # EnhancedDynamicAdaptiveExplorationOptimization print("EnhancedDynamicAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveFireworkAlgorithm import EnhancedDynamicAdaptiveFireworkAlgorithm +try: # EnhancedDynamicAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveFireworkAlgorithm import ( + EnhancedDynamicAdaptiveFireworkAlgorithm, + ) lama_register["EnhancedDynamicAdaptiveFireworkAlgorithm"] = EnhancedDynamicAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDynamicAdaptiveFireworkAlgorithm print("EnhancedDynamicAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligence import EnhancedDynamicAdaptiveGravitationalSwarmIntelligence - - lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = EnhancedDynamicAdaptiveGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveGravitationalSwarmIntelligence + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligence + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: # EnhancedDynamicAdaptiveGravitationalSwarmIntelligence print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 import EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 - - lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2"] = EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2").set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 import ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2, + ) + + lama_register["EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2"] = ( + EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: # EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 print("EnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizer import EnhancedDynamicAdaptiveHarmonySearchOptimizer - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizer"] = EnhancedDynamicAdaptiveHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizer import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizer, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizer"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizer print("EnhancedDynamicAdaptiveHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV2"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV2, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV2"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV2", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV3"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV3, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV3"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV3", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV4"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV4, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV4"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV4", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV5"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV5, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV5"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV5", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 import EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 - - lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV6"] = EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6").set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 import ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV6, + ) + + lama_register["EnhancedDynamicAdaptiveHarmonySearchOptimizerV6"] = ( + EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6" + ).set_name("LLAMAEnhancedDynamicAdaptiveHarmonySearchOptimizerV6", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 print("EnhancedDynamicAdaptiveHarmonySearchOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridDEPSO import EnhancedDynamicAdaptiveHybridDEPSO +try: # EnhancedDynamicAdaptiveHybridDEPSO + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridDEPSO import ( + EnhancedDynamicAdaptiveHybridDEPSO, + ) lama_register["EnhancedDynamicAdaptiveHybridDEPSO"] = EnhancedDynamicAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO").set_name("LLAMAEnhancedDynamicAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHybridDEPSO print("EnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimization import EnhancedDynamicAdaptiveHybridOptimization +try: # EnhancedDynamicAdaptiveHybridOptimization + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimization import ( + EnhancedDynamicAdaptiveHybridOptimization, + ) lama_register["EnhancedDynamicAdaptiveHybridOptimization"] = EnhancedDynamicAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHybridOptimization print("EnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimizer import EnhancedDynamicAdaptiveHybridOptimizer +try: # EnhancedDynamicAdaptiveHybridOptimizer + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveHybridOptimizer import ( + EnhancedDynamicAdaptiveHybridOptimizer, + ) lama_register["EnhancedDynamicAdaptiveHybridOptimizer"] = EnhancedDynamicAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer").set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAEnhancedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: # EnhancedDynamicAdaptiveHybridOptimizer print("EnhancedDynamicAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryAnnealing import EnhancedDynamicAdaptiveMemoryAnnealing +try: # EnhancedDynamicAdaptiveMemoryAnnealing + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryAnnealing import ( + EnhancedDynamicAdaptiveMemoryAnnealing, + ) lama_register["EnhancedDynamicAdaptiveMemoryAnnealing"] = EnhancedDynamicAdaptiveMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing").set_name("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveMemoryAnnealing" + ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing", register=True) +except Exception as e: # EnhancedDynamicAdaptiveMemoryAnnealing print("EnhancedDynamicAdaptiveMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryStrategyV59 import EnhancedDynamicAdaptiveMemoryStrategyV59 +try: # EnhancedDynamicAdaptiveMemoryStrategyV59 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveMemoryStrategyV59 import ( + EnhancedDynamicAdaptiveMemoryStrategyV59, + ) lama_register["EnhancedDynamicAdaptiveMemoryStrategyV59"] = EnhancedDynamicAdaptiveMemoryStrategyV59 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59").set_name("LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59" + ).set_name("LLAMAEnhancedDynamicAdaptiveMemoryStrategyV59", register=True) +except Exception as e: # EnhancedDynamicAdaptiveMemoryStrategyV59 print("EnhancedDynamicAdaptiveMemoryStrategyV59 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveOptimizerV8 import EnhancedDynamicAdaptiveOptimizerV8 +try: # EnhancedDynamicAdaptiveOptimizerV8 + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveOptimizerV8 import ( + EnhancedDynamicAdaptiveOptimizerV8, + ) lama_register["EnhancedDynamicAdaptiveOptimizerV8"] = EnhancedDynamicAdaptiveOptimizerV8 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveOptimizerV8").set_name("LLAMAEnhancedDynamicAdaptiveOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveOptimizerV8" + ).set_name("LLAMAEnhancedDynamicAdaptiveOptimizerV8", register=True) +except Exception as e: # EnhancedDynamicAdaptiveOptimizerV8 print("EnhancedDynamicAdaptiveOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptivePopulationDifferentialEvolution import EnhancedDynamicAdaptivePopulationDifferentialEvolution - - lama_register["EnhancedDynamicAdaptivePopulationDifferentialEvolution"] = EnhancedDynamicAdaptivePopulationDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution").set_name("LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedDynamicAdaptivePopulationDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDynamicAdaptivePopulationDifferentialEvolution import ( + EnhancedDynamicAdaptivePopulationDifferentialEvolution, + ) + + lama_register["EnhancedDynamicAdaptivePopulationDifferentialEvolution"] = ( + EnhancedDynamicAdaptivePopulationDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicAdaptivePopulationDifferentialEvolution", register=True) +except Exception as e: # EnhancedDynamicAdaptivePopulationDifferentialEvolution print("EnhancedDynamicAdaptivePopulationDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicAdaptiveQuantumPSO import EnhancedDynamicAdaptiveQuantumPSO +try: # EnhancedDynamicAdaptiveQuantumPSO + from nevergrad.optimization.lama.EnhancedDynamicAdaptiveQuantumPSO import ( + EnhancedDynamicAdaptiveQuantumPSO, + ) lama_register["EnhancedDynamicAdaptiveQuantumPSO"] = EnhancedDynamicAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveQuantumPSO").set_name("LLAMAEnhancedDynamicAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicAdaptiveQuantumPSO = NonObjectOptimizer( + method="LLAMAEnhancedDynamicAdaptiveQuantumPSO" + ).set_name("LLAMAEnhancedDynamicAdaptiveQuantumPSO", register=True) +except Exception as e: # EnhancedDynamicAdaptiveQuantumPSO print("EnhancedDynamicAdaptiveQuantumPSO can not be imported: ", e) -try: +try: # EnhancedDynamicBalancingPSO from nevergrad.optimization.lama.EnhancedDynamicBalancingPSO import EnhancedDynamicBalancingPSO lama_register["EnhancedDynamicBalancingPSO"] = EnhancedDynamicBalancingPSO - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO").set_name("LLAMAEnhancedDynamicBalancingPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicBalancingPSO").set_name( + "LLAMAEnhancedDynamicBalancingPSO", register=True + ) +except Exception as e: # EnhancedDynamicBalancingPSO print("EnhancedDynamicBalancingPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicClusterOptimization import EnhancedDynamicClusterOptimization +try: # EnhancedDynamicClusterOptimization + from nevergrad.optimization.lama.EnhancedDynamicClusterOptimization import ( + EnhancedDynamicClusterOptimization, + ) lama_register["EnhancedDynamicClusterOptimization"] = EnhancedDynamicClusterOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicClusterOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterOptimization").set_name("LLAMAEnhancedDynamicClusterOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicClusterOptimization" + ).set_name("LLAMAEnhancedDynamicClusterOptimization", register=True) +except Exception as e: # EnhancedDynamicClusterOptimization print("EnhancedDynamicClusterOptimization can not be imported: ", e) -try: +try: # EnhancedDynamicClusterSearch from nevergrad.optimization.lama.EnhancedDynamicClusterSearch import EnhancedDynamicClusterSearch lama_register["EnhancedDynamicClusterSearch"] = EnhancedDynamicClusterSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicClusterSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterSearch").set_name("LLAMAEnhancedDynamicClusterSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicClusterSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicClusterSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicClusterSearch" + ).set_name("LLAMAEnhancedDynamicClusterSearch", register=True) +except Exception as e: # EnhancedDynamicClusterSearch print("EnhancedDynamicClusterSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicCohortOptimization import EnhancedDynamicCohortOptimization +try: # EnhancedDynamicCohortOptimization + from nevergrad.optimization.lama.EnhancedDynamicCohortOptimization import ( + EnhancedDynamicCohortOptimization, + ) lama_register["EnhancedDynamicCohortOptimization"] = EnhancedDynamicCohortOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicCohortOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicCohortOptimization").set_name("LLAMAEnhancedDynamicCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicCohortOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCohortOptimization" + ).set_name("LLAMAEnhancedDynamicCohortOptimization", register=True) +except Exception as e: # EnhancedDynamicCohortOptimization print("EnhancedDynamicCohortOptimization can not be imported: ", e) -try: +try: # EnhancedDynamicCrossoverRAMEDS from nevergrad.optimization.lama.EnhancedDynamicCrossoverRAMEDS import EnhancedDynamicCrossoverRAMEDS lama_register["EnhancedDynamicCrossoverRAMEDS"] = EnhancedDynamicCrossoverRAMEDS - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCrossoverRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicCrossoverRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedDynamicCrossoverRAMEDS").set_name("LLAMAEnhancedDynamicCrossoverRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCrossoverRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicCrossoverRAMEDS = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCrossoverRAMEDS" + ).set_name("LLAMAEnhancedDynamicCrossoverRAMEDS", register=True) +except Exception as e: # EnhancedDynamicCrossoverRAMEDS print("EnhancedDynamicCrossoverRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicCuckooHarmonyAlgorithm import EnhancedDynamicCuckooHarmonyAlgorithm +try: # EnhancedDynamicCuckooHarmonyAlgorithm + from nevergrad.optimization.lama.EnhancedDynamicCuckooHarmonyAlgorithm import ( + EnhancedDynamicCuckooHarmonyAlgorithm, + ) lama_register["EnhancedDynamicCuckooHarmonyAlgorithm"] = EnhancedDynamicCuckooHarmonyAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm").set_name("LLAMAEnhancedDynamicCuckooHarmonyAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicCuckooHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDynamicCuckooHarmonyAlgorithm", register=True) +except Exception as e: # EnhancedDynamicCuckooHarmonyAlgorithm print("EnhancedDynamicCuckooHarmonyAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolution import EnhancedDynamicDifferentialEvolution +try: # EnhancedDynamicDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolution import ( + EnhancedDynamicDifferentialEvolution, + ) lama_register["EnhancedDynamicDifferentialEvolution"] = EnhancedDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolution").set_name("LLAMAEnhancedDynamicDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolution print("EnhancedDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionImproved import EnhancedDynamicDifferentialEvolutionImproved - - lama_register["EnhancedDynamicDifferentialEvolutionImproved"] = EnhancedDynamicDifferentialEvolutionImproved - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionImproved").set_name("LLAMAEnhancedDynamicDifferentialEvolutionImproved", register=True) -except Exception as e: +try: # EnhancedDynamicDifferentialEvolutionImproved + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionImproved import ( + EnhancedDynamicDifferentialEvolutionImproved, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionImproved"] = ( + EnhancedDynamicDifferentialEvolutionImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionImproved" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionImproved", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionImproved print("EnhancedDynamicDifferentialEvolutionImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionRefined import EnhancedDynamicDifferentialEvolutionRefined +try: # EnhancedDynamicDifferentialEvolutionRefined + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionRefined import ( + EnhancedDynamicDifferentialEvolutionRefined, + ) lama_register["EnhancedDynamicDifferentialEvolutionRefined"] = EnhancedDynamicDifferentialEvolutionRefined - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionRefined").set_name("LLAMAEnhancedDynamicDifferentialEvolutionRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionRefined" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionRefined", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionRefined print("EnhancedDynamicDifferentialEvolutionRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV2 import EnhancedDynamicDifferentialEvolutionV2 +try: # EnhancedDynamicDifferentialEvolutionV2 + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV2 import ( + EnhancedDynamicDifferentialEvolutionV2, + ) lama_register["EnhancedDynamicDifferentialEvolutionV2"] = EnhancedDynamicDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV2", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionV2 print("EnhancedDynamicDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV3 import EnhancedDynamicDifferentialEvolutionV3 +try: # EnhancedDynamicDifferentialEvolutionV3 + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionV3 import ( + EnhancedDynamicDifferentialEvolutionV3, + ) lama_register["EnhancedDynamicDifferentialEvolutionV3"] = EnhancedDynamicDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV3").set_name("LLAMAEnhancedDynamicDifferentialEvolutionV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionV3", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionV3 print("EnhancedDynamicDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover - - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover", register=True) -except Exception as e: +try: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation - - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation", register=True) -except Exception as e: +try: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined import EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined - - lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined"] = EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined", register=True) -except Exception as e: +try: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined import ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined"] = ( + EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined" + ).set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined", register=True) +except Exception as e: # EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined print("EnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover import EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover - - lama_register["EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover"] = EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover").set_name("LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover", register=True) -except Exception as e: - print("EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDiversifiedHarmonySearchOptimizer import EnhancedDynamicDiversifiedHarmonySearchOptimizer - - lama_register["EnhancedDynamicDiversifiedHarmonySearchOptimizer"] = EnhancedDynamicDiversifiedHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer", register=True) -except Exception as e: +try: # EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover + from nevergrad.optimization.lama.EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover import ( + EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover, + ) + + lama_register["EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover"] = ( + EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover" + ).set_name( + "LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover", register=True + ) +except Exception as e: # EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover + print( + "EnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover can not be imported: ", e + ) +try: # EnhancedDynamicDiversifiedHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedDynamicDiversifiedHarmonySearchOptimizer import ( + EnhancedDynamicDiversifiedHarmonySearchOptimizer, + ) + + lama_register["EnhancedDynamicDiversifiedHarmonySearchOptimizer"] = ( + EnhancedDynamicDiversifiedHarmonySearchOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedDynamicDiversifiedHarmonySearchOptimizer print("EnhancedDynamicDiversifiedHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicDualPhaseStrategyV12 import EnhancedDynamicDualPhaseStrategyV12 +try: # EnhancedDynamicDualPhaseStrategyV12 + from nevergrad.optimization.lama.EnhancedDynamicDualPhaseStrategyV12 import ( + EnhancedDynamicDualPhaseStrategyV12, + ) lama_register["EnhancedDynamicDualPhaseStrategyV12"] = EnhancedDynamicDualPhaseStrategyV12 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDualPhaseStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicDualPhaseStrategyV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicDualPhaseStrategyV12").set_name("LLAMAEnhancedDynamicDualPhaseStrategyV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicDualPhaseStrategyV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicDualPhaseStrategyV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicDualPhaseStrategyV12" + ).set_name("LLAMAEnhancedDynamicDualPhaseStrategyV12", register=True) +except Exception as e: # EnhancedDynamicDualPhaseStrategyV12 print("EnhancedDynamicDualPhaseStrategyV12 can not be imported: ", e) -try: +try: # EnhancedDynamicEliteAnnealingDE from nevergrad.optimization.lama.EnhancedDynamicEliteAnnealingDE import EnhancedDynamicEliteAnnealingDE lama_register["EnhancedDynamicEliteAnnealingDE"] = EnhancedDynamicEliteAnnealingDE - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicEliteAnnealingDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicEliteAnnealingDE").set_name("LLAMAEnhancedDynamicEliteAnnealingDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEliteAnnealingDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicEliteAnnealingDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEliteAnnealingDE" + ).set_name("LLAMAEnhancedDynamicEliteAnnealingDE", register=True) +except Exception as e: # EnhancedDynamicEliteAnnealingDE print("EnhancedDynamicEliteAnnealingDE can not be imported: ", e) -try: +try: # EnhancedDynamicEscapeStrategyV32 from nevergrad.optimization.lama.EnhancedDynamicEscapeStrategyV32 import EnhancedDynamicEscapeStrategyV32 lama_register["EnhancedDynamicEscapeStrategyV32"] = EnhancedDynamicEscapeStrategyV32 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEscapeStrategyV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicEscapeStrategyV32 = NonObjectOptimizer(method="LLAMAEnhancedDynamicEscapeStrategyV32").set_name("LLAMAEnhancedDynamicEscapeStrategyV32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEscapeStrategyV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicEscapeStrategyV32 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEscapeStrategyV32" + ).set_name("LLAMAEnhancedDynamicEscapeStrategyV32", register=True) +except Exception as e: # EnhancedDynamicEscapeStrategyV32 print("EnhancedDynamicEscapeStrategyV32 can not be imported: ", e) -try: +try: # EnhancedDynamicEvolutionStrategy from nevergrad.optimization.lama.EnhancedDynamicEvolutionStrategy import EnhancedDynamicEvolutionStrategy lama_register["EnhancedDynamicEvolutionStrategy"] = EnhancedDynamicEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedDynamicEvolutionStrategy").set_name("LLAMAEnhancedDynamicEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedDynamicEvolutionStrategy" + ).set_name("LLAMAEnhancedDynamicEvolutionStrategy", register=True) +except Exception as e: # EnhancedDynamicEvolutionStrategy print("EnhancedDynamicEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicExplorationOptimizer import EnhancedDynamicExplorationOptimizer +try: # EnhancedDynamicExplorationOptimizer + from nevergrad.optimization.lama.EnhancedDynamicExplorationOptimizer import ( + EnhancedDynamicExplorationOptimizer, + ) lama_register["EnhancedDynamicExplorationOptimizer"] = EnhancedDynamicExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicExplorationOptimizer").set_name("LLAMAEnhancedDynamicExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicExplorationOptimizer" + ).set_name("LLAMAEnhancedDynamicExplorationOptimizer", register=True) +except Exception as e: # EnhancedDynamicExplorationOptimizer print("EnhancedDynamicExplorationOptimizer can not be imported: ", e) -try: +try: # EnhancedDynamicFireworkAlgorithm from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithm import EnhancedDynamicFireworkAlgorithm lama_register["EnhancedDynamicFireworkAlgorithm"] = EnhancedDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithm").set_name("LLAMAEnhancedDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithm print("EnhancedDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmFinal import EnhancedDynamicFireworkAlgorithmFinal +try: # EnhancedDynamicFireworkAlgorithmFinal + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmFinal import ( + EnhancedDynamicFireworkAlgorithmFinal, + ) lama_register["EnhancedDynamicFireworkAlgorithmFinal"] = EnhancedDynamicFireworkAlgorithmFinal - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmFinal = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmFinal").set_name("LLAMAEnhancedDynamicFireworkAlgorithmFinal", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmFinal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmFinal = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmFinal" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmFinal", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmFinal print("EnhancedDynamicFireworkAlgorithmFinal can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmImproved import EnhancedDynamicFireworkAlgorithmImproved +try: # EnhancedDynamicFireworkAlgorithmImproved + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmImproved import ( + EnhancedDynamicFireworkAlgorithmImproved, + ) lama_register["EnhancedDynamicFireworkAlgorithmImproved"] = EnhancedDynamicFireworkAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedDynamicFireworkAlgorithmImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmImproved print("EnhancedDynamicFireworkAlgorithmImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRedesigned import EnhancedDynamicFireworkAlgorithmRedesigned +try: # EnhancedDynamicFireworkAlgorithmRedesigned + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRedesigned import ( + EnhancedDynamicFireworkAlgorithmRedesigned, + ) lama_register["EnhancedDynamicFireworkAlgorithmRedesigned"] = EnhancedDynamicFireworkAlgorithmRedesigned - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned").set_name("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmRedesigned = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmRedesigned" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmRedesigned print("EnhancedDynamicFireworkAlgorithmRedesigned can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRefined import EnhancedDynamicFireworkAlgorithmRefined +try: # EnhancedDynamicFireworkAlgorithmRefined + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmRefined import ( + EnhancedDynamicFireworkAlgorithmRefined, + ) lama_register["EnhancedDynamicFireworkAlgorithmRefined"] = EnhancedDynamicFireworkAlgorithmRefined - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRefined").set_name("LLAMAEnhancedDynamicFireworkAlgorithmRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmRefined print("EnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmV2 import EnhancedDynamicFireworkAlgorithmV2 +try: # EnhancedDynamicFireworkAlgorithmV2 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmV2 import ( + EnhancedDynamicFireworkAlgorithmV2, + ) lama_register["EnhancedDynamicFireworkAlgorithmV2"] = EnhancedDynamicFireworkAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmV2").set_name("LLAMAEnhancedDynamicFireworkAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmV2", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmV2 print("EnhancedDynamicFireworkAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization - - lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10 can not be imported: ", + e, + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11 can not be imported: ", + e, + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12 can not be imported: ", + e, + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13 can not be imported: ", + e, + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 + print( + "EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9 can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization print("EnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization import EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization - - lama_register["EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization"] = EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization import ( + EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization print("EnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization - - lama_register["EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization", register=True) -except Exception as e: - print("EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithHybridSearch import EnhancedDynamicFireworkAlgorithmWithHybridSearch - - lama_register["EnhancedDynamicFireworkAlgorithmWithHybridSearch"] = EnhancedDynamicFireworkAlgorithmWithHybridSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization" + ).set_name( + "LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization", register=True + ) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization + print( + "EnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization can not be imported: ", e + ) +try: # EnhancedDynamicFireworkAlgorithmWithHybridSearch + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithHybridSearch import ( + EnhancedDynamicFireworkAlgorithmWithHybridSearch, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithHybridSearch"] = ( + EnhancedDynamicFireworkAlgorithmWithHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithHybridSearch print("EnhancedDynamicFireworkAlgorithmWithHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization import EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization - - lama_register["EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization"] = EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization").set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization + from nevergrad.optimization.lama.EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization import ( + EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization, + ) + + lama_register["EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization"] = ( + EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization" + ).set_name("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization", register=True) +except Exception as e: # EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization print("EnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolution import EnhancedDynamicFireworkDifferentialEvolution - - lama_register["EnhancedDynamicFireworkDifferentialEvolution"] = EnhancedDynamicFireworkDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolution").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolution import ( + EnhancedDynamicFireworkDifferentialEvolution, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolution"] = ( + EnhancedDynamicFireworkDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolution", register=True) +except Exception as e: # EnhancedDynamicFireworkDifferentialEvolution print("EnhancedDynamicFireworkDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV2 import EnhancedDynamicFireworkDifferentialEvolutionV2 - - lama_register["EnhancedDynamicFireworkDifferentialEvolutionV2"] = EnhancedDynamicFireworkDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkDifferentialEvolutionV2 + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV2 import ( + EnhancedDynamicFireworkDifferentialEvolutionV2, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV2"] = ( + EnhancedDynamicFireworkDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV2", register=True) +except Exception as e: # EnhancedDynamicFireworkDifferentialEvolutionV2 print("EnhancedDynamicFireworkDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV3 import EnhancedDynamicFireworkDifferentialEvolutionV3 - - lama_register["EnhancedDynamicFireworkDifferentialEvolutionV3"] = EnhancedDynamicFireworkDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3").set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3", register=True) -except Exception as e: +try: # EnhancedDynamicFireworkDifferentialEvolutionV3 + from nevergrad.optimization.lama.EnhancedDynamicFireworkDifferentialEvolutionV3 import ( + EnhancedDynamicFireworkDifferentialEvolutionV3, + ) + + lama_register["EnhancedDynamicFireworkDifferentialEvolutionV3"] = ( + EnhancedDynamicFireworkDifferentialEvolutionV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedDynamicFireworkDifferentialEvolutionV3", register=True) +except Exception as e: # EnhancedDynamicFireworkDifferentialEvolutionV3 print("EnhancedDynamicFireworkDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealing import EnhancedDynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealing"] = EnhancedDynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedDynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealing import ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # EnhancedDynamicGradientBoostedMemorySimulatedAnnealing print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus import EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus - - lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) -except Exception as e: +try: # EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus + from nevergrad.optimization.lama.EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: # EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus print("EnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonyAlgorithm from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithm import EnhancedDynamicHarmonyAlgorithm lama_register["EnhancedDynamicHarmonyAlgorithm"] = EnhancedDynamicHarmonyAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithm").set_name("LLAMAEnhancedDynamicHarmonyAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyAlgorithm" + ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithm", register=True) +except Exception as e: # EnhancedDynamicHarmonyAlgorithm print("EnhancedDynamicHarmonyAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithmV2 import EnhancedDynamicHarmonyAlgorithmV2 +try: # EnhancedDynamicHarmonyAlgorithmV2 + from nevergrad.optimization.lama.EnhancedDynamicHarmonyAlgorithmV2 import ( + EnhancedDynamicHarmonyAlgorithmV2, + ) lama_register["EnhancedDynamicHarmonyAlgorithmV2"] = EnhancedDynamicHarmonyAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonyAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithmV2").set_name("LLAMAEnhancedDynamicHarmonyAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonyAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicHarmonyAlgorithmV2", register=True) +except Exception as e: # EnhancedDynamicHarmonyAlgorithmV2 print("EnhancedDynamicHarmonyAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonyFireworksSearch import EnhancedDynamicHarmonyFireworksSearch +try: # EnhancedDynamicHarmonyFireworksSearch + from nevergrad.optimization.lama.EnhancedDynamicHarmonyFireworksSearch import ( + EnhancedDynamicHarmonyFireworksSearch, + ) lama_register["EnhancedDynamicHarmonyFireworksSearch"] = EnhancedDynamicHarmonyFireworksSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyFireworksSearch").set_name("LLAMAEnhancedDynamicHarmonyFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyFireworksSearch" + ).set_name("LLAMAEnhancedDynamicHarmonyFireworksSearch", register=True) +except Exception as e: # EnhancedDynamicHarmonyFireworksSearch print("EnhancedDynamicHarmonyFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizer import EnhancedDynamicHarmonySearchOptimizer +try: # EnhancedDynamicHarmonySearchOptimizer + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizer import ( + EnhancedDynamicHarmonySearchOptimizer, + ) lama_register["EnhancedDynamicHarmonySearchOptimizer"] = EnhancedDynamicHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizer").set_name("LLAMAEnhancedDynamicHarmonySearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchOptimizer" + ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizer", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchOptimizer print("EnhancedDynamicHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizerV7 import EnhancedDynamicHarmonySearchOptimizerV7 +try: # EnhancedDynamicHarmonySearchOptimizerV7 + from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchOptimizerV7 import ( + EnhancedDynamicHarmonySearchOptimizerV7, + ) lama_register["EnhancedDynamicHarmonySearchOptimizerV7"] = EnhancedDynamicHarmonySearchOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7").set_name("LLAMAEnhancedDynamicHarmonySearchOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchOptimizerV7" + ).set_name("LLAMAEnhancedDynamicHarmonySearchOptimizerV7", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchOptimizerV7 print("EnhancedDynamicHarmonySearchOptimizerV7 can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonySearchV5 from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV5 import EnhancedDynamicHarmonySearchV5 lama_register["EnhancedDynamicHarmonySearchV5"] = EnhancedDynamicHarmonySearchV5 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV5").set_name("LLAMAEnhancedDynamicHarmonySearchV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV5" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV5", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchV5 print("EnhancedDynamicHarmonySearchV5 can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonySearchV6 from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV6 import EnhancedDynamicHarmonySearchV6 lama_register["EnhancedDynamicHarmonySearchV6"] = EnhancedDynamicHarmonySearchV6 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV6").set_name("LLAMAEnhancedDynamicHarmonySearchV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV6" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV6", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchV6 print("EnhancedDynamicHarmonySearchV6 can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonySearchV7 from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV7 import EnhancedDynamicHarmonySearchV7 lama_register["EnhancedDynamicHarmonySearchV7"] = EnhancedDynamicHarmonySearchV7 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV7").set_name("LLAMAEnhancedDynamicHarmonySearchV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV7" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV7", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchV7 print("EnhancedDynamicHarmonySearchV7 can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonySearchV8 from nevergrad.optimization.lama.EnhancedDynamicHarmonySearchV8 import EnhancedDynamicHarmonySearchV8 lama_register["EnhancedDynamicHarmonySearchV8"] = EnhancedDynamicHarmonySearchV8 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonySearchV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV8").set_name("LLAMAEnhancedDynamicHarmonySearchV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonySearchV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonySearchV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonySearchV8" + ).set_name("LLAMAEnhancedDynamicHarmonySearchV8", register=True) +except Exception as e: # EnhancedDynamicHarmonySearchV8 print("EnhancedDynamicHarmonySearchV8 can not be imported: ", e) -try: +try: # EnhancedDynamicHarmonyTabuSearch from nevergrad.optimization.lama.EnhancedDynamicHarmonyTabuSearch import EnhancedDynamicHarmonyTabuSearch lama_register["EnhancedDynamicHarmonyTabuSearch"] = EnhancedDynamicHarmonyTabuSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyTabuSearch").set_name("LLAMAEnhancedDynamicHarmonyTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHarmonyTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHarmonyTabuSearch" + ).set_name("LLAMAEnhancedDynamicHarmonyTabuSearch", register=True) +except Exception as e: # EnhancedDynamicHarmonyTabuSearch print("EnhancedDynamicHarmonyTabuSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHybridDEPSOWithEliteMemory import EnhancedDynamicHybridDEPSOWithEliteMemory +try: # EnhancedDynamicHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.EnhancedDynamicHybridDEPSOWithEliteMemory import ( + EnhancedDynamicHybridDEPSOWithEliteMemory, + ) lama_register["EnhancedDynamicHybridDEPSOWithEliteMemory"] = EnhancedDynamicHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory").set_name("LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory" + ).set_name("LLAMAEnhancedDynamicHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # EnhancedDynamicHybridDEPSOWithEliteMemory print("EnhancedDynamicHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 import EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 - - lama_register["EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21"] = EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21").set_name("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21", register=True) -except Exception as e: +try: # EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 + from nevergrad.optimization.lama.EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 import ( + EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21, + ) + + lama_register["EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21"] = ( + EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21" + ).set_name("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21", register=True) +except Exception as e: # EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 print("EnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicHybridOptimization import EnhancedDynamicHybridOptimization +try: # EnhancedDynamicHybridOptimization + from nevergrad.optimization.lama.EnhancedDynamicHybridOptimization import ( + EnhancedDynamicHybridOptimization, + ) lama_register["EnhancedDynamicHybridOptimization"] = EnhancedDynamicHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimization").set_name("LLAMAEnhancedDynamicHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridOptimization" + ).set_name("LLAMAEnhancedDynamicHybridOptimization", register=True) +except Exception as e: # EnhancedDynamicHybridOptimization print("EnhancedDynamicHybridOptimization can not be imported: ", e) -try: +try: # EnhancedDynamicHybridOptimizer from nevergrad.optimization.lama.EnhancedDynamicHybridOptimizer import EnhancedDynamicHybridOptimizer lama_register["EnhancedDynamicHybridOptimizer"] = EnhancedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimizer").set_name("LLAMAEnhancedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: # EnhancedDynamicHybridOptimizer print("EnhancedDynamicHybridOptimizer can not be imported: ", e) -try: +try: # EnhancedDynamicLevyHarmonySearch from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearch import EnhancedDynamicLevyHarmonySearch lama_register["EnhancedDynamicLevyHarmonySearch"] = EnhancedDynamicLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearch").set_name("LLAMAEnhancedDynamicLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearch" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearch", register=True) +except Exception as e: # EnhancedDynamicLevyHarmonySearch print("EnhancedDynamicLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV2 import EnhancedDynamicLevyHarmonySearchV2 +try: # EnhancedDynamicLevyHarmonySearchV2 + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV2 import ( + EnhancedDynamicLevyHarmonySearchV2, + ) lama_register["EnhancedDynamicLevyHarmonySearchV2"] = EnhancedDynamicLevyHarmonySearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLevyHarmonySearchV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV2").set_name("LLAMAEnhancedDynamicLevyHarmonySearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLevyHarmonySearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearchV2" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV2", register=True) +except Exception as e: # EnhancedDynamicLevyHarmonySearchV2 print("EnhancedDynamicLevyHarmonySearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV3 import EnhancedDynamicLevyHarmonySearchV3 +try: # EnhancedDynamicLevyHarmonySearchV3 + from nevergrad.optimization.lama.EnhancedDynamicLevyHarmonySearchV3 import ( + EnhancedDynamicLevyHarmonySearchV3, + ) lama_register["EnhancedDynamicLevyHarmonySearchV3"] = EnhancedDynamicLevyHarmonySearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLevyHarmonySearchV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV3").set_name("LLAMAEnhancedDynamicLevyHarmonySearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLevyHarmonySearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLevyHarmonySearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLevyHarmonySearchV3" + ).set_name("LLAMAEnhancedDynamicLevyHarmonySearchV3", register=True) +except Exception as e: # EnhancedDynamicLevyHarmonySearchV3 print("EnhancedDynamicLevyHarmonySearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithm import EnhancedDynamicLocalSearchFireworkAlgorithm +try: # EnhancedDynamicLocalSearchFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithm import ( + EnhancedDynamicLocalSearchFireworkAlgorithm, + ) lama_register["EnhancedDynamicLocalSearchFireworkAlgorithm"] = EnhancedDynamicLocalSearchFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDynamicLocalSearchFireworkAlgorithm print("EnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV2 import EnhancedDynamicLocalSearchFireworkAlgorithmV2 - - lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV2"] = EnhancedDynamicLocalSearchFireworkAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2", register=True) -except Exception as e: +try: # EnhancedDynamicLocalSearchFireworkAlgorithmV2 + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV2 import ( + EnhancedDynamicLocalSearchFireworkAlgorithmV2, + ) + + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV2"] = ( + EnhancedDynamicLocalSearchFireworkAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2", register=True) +except Exception as e: # EnhancedDynamicLocalSearchFireworkAlgorithmV2 print("EnhancedDynamicLocalSearchFireworkAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV3 import EnhancedDynamicLocalSearchFireworkAlgorithmV3 - - lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV3"] = EnhancedDynamicLocalSearchFireworkAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3").set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3", register=True) -except Exception as e: +try: # EnhancedDynamicLocalSearchFireworkAlgorithmV3 + from nevergrad.optimization.lama.EnhancedDynamicLocalSearchFireworkAlgorithmV3 import ( + EnhancedDynamicLocalSearchFireworkAlgorithmV3, + ) + + lama_register["EnhancedDynamicLocalSearchFireworkAlgorithmV3"] = ( + EnhancedDynamicLocalSearchFireworkAlgorithmV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3" + ).set_name("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3", register=True) +except Exception as e: # EnhancedDynamicLocalSearchFireworkAlgorithmV3 print("EnhancedDynamicLocalSearchFireworkAlgorithmV3 can not be imported: ", e) -try: +try: # EnhancedDynamicMemoryStrategyV51 from nevergrad.optimization.lama.EnhancedDynamicMemoryStrategyV51 import EnhancedDynamicMemoryStrategyV51 lama_register["EnhancedDynamicMemoryStrategyV51"] = EnhancedDynamicMemoryStrategyV51 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMemoryStrategyV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicMemoryStrategyV51 = NonObjectOptimizer(method="LLAMAEnhancedDynamicMemoryStrategyV51").set_name("LLAMAEnhancedDynamicMemoryStrategyV51", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMemoryStrategyV51")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicMemoryStrategyV51 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMemoryStrategyV51" + ).set_name("LLAMAEnhancedDynamicMemoryStrategyV51", register=True) +except Exception as e: # EnhancedDynamicMemoryStrategyV51 print("EnhancedDynamicMemoryStrategyV51 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicMultiPhaseAnnealingPlus import EnhancedDynamicMultiPhaseAnnealingPlus +try: # EnhancedDynamicMultiPhaseAnnealingPlus + from nevergrad.optimization.lama.EnhancedDynamicMultiPhaseAnnealingPlus import ( + EnhancedDynamicMultiPhaseAnnealingPlus, + ) lama_register["EnhancedDynamicMultiPhaseAnnealingPlus"] = EnhancedDynamicMultiPhaseAnnealingPlus - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicMultiPhaseAnnealingPlus = NonObjectOptimizer(method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus").set_name("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicMultiPhaseAnnealingPlus = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMultiPhaseAnnealingPlus" + ).set_name("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus", register=True) +except Exception as e: # EnhancedDynamicMultiPhaseAnnealingPlus print("EnhancedDynamicMultiPhaseAnnealingPlus can not be imported: ", e) -try: +try: # EnhancedDynamicMutationSearch from nevergrad.optimization.lama.EnhancedDynamicMutationSearch import EnhancedDynamicMutationSearch lama_register["EnhancedDynamicMutationSearch"] = EnhancedDynamicMutationSearch - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicMutationSearch = NonObjectOptimizer(method="LLAMAEnhancedDynamicMutationSearch").set_name("LLAMAEnhancedDynamicMutationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicMutationSearch = NonObjectOptimizer( + method="LLAMAEnhancedDynamicMutationSearch" + ).set_name("LLAMAEnhancedDynamicMutationSearch", register=True) +except Exception as e: # EnhancedDynamicMutationSearch print("EnhancedDynamicMutationSearch can not be imported: ", e) -try: +try: # EnhancedDynamicNichePSO_DE_LS from nevergrad.optimization.lama.EnhancedDynamicNichePSO_DE_LS import EnhancedDynamicNichePSO_DE_LS lama_register["EnhancedDynamicNichePSO_DE_LS"] = EnhancedDynamicNichePSO_DE_LS - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicNichePSO_DE_LS = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichePSO_DE_LS").set_name("LLAMAEnhancedDynamicNichePSO_DE_LS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichePSO_DE_LS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicNichePSO_DE_LS = NonObjectOptimizer( + method="LLAMAEnhancedDynamicNichePSO_DE_LS" + ).set_name("LLAMAEnhancedDynamicNichePSO_DE_LS", register=True) +except Exception as e: # EnhancedDynamicNichePSO_DE_LS print("EnhancedDynamicNichePSO_DE_LS can not be imported: ", e) -try: +try: # EnhancedDynamicNichingDEPSO from nevergrad.optimization.lama.EnhancedDynamicNichingDEPSO import EnhancedDynamicNichingDEPSO lama_register["EnhancedDynamicNichingDEPSO"] = EnhancedDynamicNichingDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicNichingDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO").set_name("LLAMAEnhancedDynamicNichingDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicNichingDEPSO = NonObjectOptimizer(method="LLAMAEnhancedDynamicNichingDEPSO").set_name( + "LLAMAEnhancedDynamicNichingDEPSO", register=True + ) +except Exception as e: # EnhancedDynamicNichingDEPSO print("EnhancedDynamicNichingDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicPrecisionBalancedEvolution import EnhancedDynamicPrecisionBalancedEvolution +try: # EnhancedDynamicPrecisionBalancedEvolution + from nevergrad.optimization.lama.EnhancedDynamicPrecisionBalancedEvolution import ( + EnhancedDynamicPrecisionBalancedEvolution, + ) lama_register["EnhancedDynamicPrecisionBalancedEvolution"] = EnhancedDynamicPrecisionBalancedEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicPrecisionBalancedEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionBalancedEvolution").set_name("LLAMAEnhancedDynamicPrecisionBalancedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionBalancedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicPrecisionBalancedEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicPrecisionBalancedEvolution" + ).set_name("LLAMAEnhancedDynamicPrecisionBalancedEvolution", register=True) +except Exception as e: # EnhancedDynamicPrecisionBalancedEvolution print("EnhancedDynamicPrecisionBalancedEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicPrecisionOptimizer import EnhancedDynamicPrecisionOptimizer +try: # EnhancedDynamicPrecisionOptimizer + from nevergrad.optimization.lama.EnhancedDynamicPrecisionOptimizer import ( + EnhancedDynamicPrecisionOptimizer, + ) lama_register["EnhancedDynamicPrecisionOptimizer"] = EnhancedDynamicPrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionOptimizer").set_name("LLAMAEnhancedDynamicPrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedDynamicPrecisionOptimizer" + ).set_name("LLAMAEnhancedDynamicPrecisionOptimizer", register=True) +except Exception as e: # EnhancedDynamicPrecisionOptimizer print("EnhancedDynamicPrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolution import EnhancedDynamicQuantumDifferentialEvolution +try: # EnhancedDynamicQuantumDifferentialEvolution + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolution import ( + EnhancedDynamicQuantumDifferentialEvolution, + ) lama_register["EnhancedDynamicQuantumDifferentialEvolution"] = EnhancedDynamicQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolution").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolution", register=True) +except Exception as e: # EnhancedDynamicQuantumDifferentialEvolution print("EnhancedDynamicQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory import EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory - - lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory"] = EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory", register=True) -except Exception as e: - print("EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart import EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart - - lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart"] = EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart").set_name("LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart", register=True) -except Exception as e: - print("EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimization import EnhancedDynamicQuantumSwarmOptimization +try: # EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory import ( + EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory, + ) + + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory"] = ( + EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory" + ).set_name( + "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory", register=True + ) +except Exception as e: # EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory + print( + "EnhancedDynamicQuantumDifferentialEvolutionWithAdaptiveRestartAndDiverseMemory can not be imported: ", + e, + ) +try: # EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart + from nevergrad.optimization.lama.EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart import ( + EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart, + ) + + lama_register["EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart"] = ( + EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart" + ).set_name( + "LLAMAEnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart", register=True + ) +except Exception as e: # EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart + print( + "EnhancedDynamicQuantumDifferentialEvolutionWithLocalSearchAndAdaptiveRestart can not be imported: ", + e, + ) +try: # EnhancedDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimization import ( + EnhancedDynamicQuantumSwarmOptimization, + ) lama_register["EnhancedDynamicQuantumSwarmOptimization"] = EnhancedDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimization print("EnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationFinal import EnhancedDynamicQuantumSwarmOptimizationFinal - - lama_register["EnhancedDynamicQuantumSwarmOptimizationFinal"] = EnhancedDynamicQuantumSwarmOptimizationFinal - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal", register=True) -except Exception as e: +try: # EnhancedDynamicQuantumSwarmOptimizationFinal + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationFinal import ( + EnhancedDynamicQuantumSwarmOptimizationFinal, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationFinal"] = ( + EnhancedDynamicQuantumSwarmOptimizationFinal + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationFinal", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationFinal print("EnhancedDynamicQuantumSwarmOptimizationFinal can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationImproved import EnhancedDynamicQuantumSwarmOptimizationImproved - - lama_register["EnhancedDynamicQuantumSwarmOptimizationImproved"] = EnhancedDynamicQuantumSwarmOptimizationImproved - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved", register=True) -except Exception as e: +try: # EnhancedDynamicQuantumSwarmOptimizationImproved + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationImproved import ( + EnhancedDynamicQuantumSwarmOptimizationImproved, + ) + + lama_register["EnhancedDynamicQuantumSwarmOptimizationImproved"] = ( + EnhancedDynamicQuantumSwarmOptimizationImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationImproved", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationImproved print("EnhancedDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV10 import EnhancedDynamicQuantumSwarmOptimizationV10 +try: # EnhancedDynamicQuantumSwarmOptimizationV10 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV10 import ( + EnhancedDynamicQuantumSwarmOptimizationV10, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV10"] = EnhancedDynamicQuantumSwarmOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV10", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV10 print("EnhancedDynamicQuantumSwarmOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV11 import EnhancedDynamicQuantumSwarmOptimizationV11 +try: # EnhancedDynamicQuantumSwarmOptimizationV11 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV11 import ( + EnhancedDynamicQuantumSwarmOptimizationV11, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV11"] = EnhancedDynamicQuantumSwarmOptimizationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV11", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV11 print("EnhancedDynamicQuantumSwarmOptimizationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV12 import EnhancedDynamicQuantumSwarmOptimizationV12 +try: # EnhancedDynamicQuantumSwarmOptimizationV12 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV12 import ( + EnhancedDynamicQuantumSwarmOptimizationV12, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV12"] = EnhancedDynamicQuantumSwarmOptimizationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV12", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV12 print("EnhancedDynamicQuantumSwarmOptimizationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV13 import EnhancedDynamicQuantumSwarmOptimizationV13 +try: # EnhancedDynamicQuantumSwarmOptimizationV13 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV13 import ( + EnhancedDynamicQuantumSwarmOptimizationV13, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV13"] = EnhancedDynamicQuantumSwarmOptimizationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV13", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV13 print("EnhancedDynamicQuantumSwarmOptimizationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV14 import EnhancedDynamicQuantumSwarmOptimizationV14 +try: # EnhancedDynamicQuantumSwarmOptimizationV14 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV14 import ( + EnhancedDynamicQuantumSwarmOptimizationV14, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV14"] = EnhancedDynamicQuantumSwarmOptimizationV14 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV14" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV14", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV14 print("EnhancedDynamicQuantumSwarmOptimizationV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV15 import EnhancedDynamicQuantumSwarmOptimizationV15 +try: # EnhancedDynamicQuantumSwarmOptimizationV15 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV15 import ( + EnhancedDynamicQuantumSwarmOptimizationV15, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV15"] = EnhancedDynamicQuantumSwarmOptimizationV15 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV15" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV15", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV15 print("EnhancedDynamicQuantumSwarmOptimizationV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV16 import EnhancedDynamicQuantumSwarmOptimizationV16 +try: # EnhancedDynamicQuantumSwarmOptimizationV16 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV16 import ( + EnhancedDynamicQuantumSwarmOptimizationV16, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV16"] = EnhancedDynamicQuantumSwarmOptimizationV16 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV16" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV16", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV16 print("EnhancedDynamicQuantumSwarmOptimizationV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV17 import EnhancedDynamicQuantumSwarmOptimizationV17 +try: # EnhancedDynamicQuantumSwarmOptimizationV17 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV17 import ( + EnhancedDynamicQuantumSwarmOptimizationV17, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV17"] = EnhancedDynamicQuantumSwarmOptimizationV17 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV17" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV17", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV17 print("EnhancedDynamicQuantumSwarmOptimizationV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV18 import EnhancedDynamicQuantumSwarmOptimizationV18 +try: # EnhancedDynamicQuantumSwarmOptimizationV18 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV18 import ( + EnhancedDynamicQuantumSwarmOptimizationV18, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV18"] = EnhancedDynamicQuantumSwarmOptimizationV18 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV18 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV18 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV18" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV18", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV18 print("EnhancedDynamicQuantumSwarmOptimizationV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV19 import EnhancedDynamicQuantumSwarmOptimizationV19 +try: # EnhancedDynamicQuantumSwarmOptimizationV19 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV19 import ( + EnhancedDynamicQuantumSwarmOptimizationV19, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV19"] = EnhancedDynamicQuantumSwarmOptimizationV19 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV19 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV19 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV19" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV19", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV19 print("EnhancedDynamicQuantumSwarmOptimizationV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV2 import EnhancedDynamicQuantumSwarmOptimizationV2 +try: # EnhancedDynamicQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV2 import ( + EnhancedDynamicQuantumSwarmOptimizationV2, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV2"] = EnhancedDynamicQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV2 print("EnhancedDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV20 import EnhancedDynamicQuantumSwarmOptimizationV20 +try: # EnhancedDynamicQuantumSwarmOptimizationV20 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV20 import ( + EnhancedDynamicQuantumSwarmOptimizationV20, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV20"] = EnhancedDynamicQuantumSwarmOptimizationV20 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV20 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV20 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV20" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV20", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV20 print("EnhancedDynamicQuantumSwarmOptimizationV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV21 import EnhancedDynamicQuantumSwarmOptimizationV21 +try: # EnhancedDynamicQuantumSwarmOptimizationV21 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV21 import ( + EnhancedDynamicQuantumSwarmOptimizationV21, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV21"] = EnhancedDynamicQuantumSwarmOptimizationV21 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV21 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV21 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV21" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV21", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV21 print("EnhancedDynamicQuantumSwarmOptimizationV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV22 import EnhancedDynamicQuantumSwarmOptimizationV22 +try: # EnhancedDynamicQuantumSwarmOptimizationV22 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV22 import ( + EnhancedDynamicQuantumSwarmOptimizationV22, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV22"] = EnhancedDynamicQuantumSwarmOptimizationV22 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV22 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV22 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV22" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV22", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV22 print("EnhancedDynamicQuantumSwarmOptimizationV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV23 import EnhancedDynamicQuantumSwarmOptimizationV23 +try: # EnhancedDynamicQuantumSwarmOptimizationV23 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV23 import ( + EnhancedDynamicQuantumSwarmOptimizationV23, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV23"] = EnhancedDynamicQuantumSwarmOptimizationV23 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV23 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV23 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV23" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV23", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV23 print("EnhancedDynamicQuantumSwarmOptimizationV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV24 import EnhancedDynamicQuantumSwarmOptimizationV24 +try: # EnhancedDynamicQuantumSwarmOptimizationV24 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV24 import ( + EnhancedDynamicQuantumSwarmOptimizationV24, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV24"] = EnhancedDynamicQuantumSwarmOptimizationV24 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV24 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV24 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV24" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV24", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV24 print("EnhancedDynamicQuantumSwarmOptimizationV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV25 import EnhancedDynamicQuantumSwarmOptimizationV25 +try: # EnhancedDynamicQuantumSwarmOptimizationV25 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV25 import ( + EnhancedDynamicQuantumSwarmOptimizationV25, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV25"] = EnhancedDynamicQuantumSwarmOptimizationV25 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV25 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV25 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV25" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV25", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV25 print("EnhancedDynamicQuantumSwarmOptimizationV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV26 import EnhancedDynamicQuantumSwarmOptimizationV26 +try: # EnhancedDynamicQuantumSwarmOptimizationV26 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV26 import ( + EnhancedDynamicQuantumSwarmOptimizationV26, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV26"] = EnhancedDynamicQuantumSwarmOptimizationV26 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV26 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV26 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV26" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV26", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV26 print("EnhancedDynamicQuantumSwarmOptimizationV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV27 import EnhancedDynamicQuantumSwarmOptimizationV27 +try: # EnhancedDynamicQuantumSwarmOptimizationV27 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV27 import ( + EnhancedDynamicQuantumSwarmOptimizationV27, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV27"] = EnhancedDynamicQuantumSwarmOptimizationV27 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV27 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV27 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV27" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV27", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV27 print("EnhancedDynamicQuantumSwarmOptimizationV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV28 import EnhancedDynamicQuantumSwarmOptimizationV28 +try: # EnhancedDynamicQuantumSwarmOptimizationV28 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV28 import ( + EnhancedDynamicQuantumSwarmOptimizationV28, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV28"] = EnhancedDynamicQuantumSwarmOptimizationV28 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV28 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV28 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV28" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV28", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV28 print("EnhancedDynamicQuantumSwarmOptimizationV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV3 import EnhancedDynamicQuantumSwarmOptimizationV3 +try: # EnhancedDynamicQuantumSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV3 import ( + EnhancedDynamicQuantumSwarmOptimizationV3, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV3"] = EnhancedDynamicQuantumSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV3 print("EnhancedDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV4 import EnhancedDynamicQuantumSwarmOptimizationV4 +try: # EnhancedDynamicQuantumSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV4 import ( + EnhancedDynamicQuantumSwarmOptimizationV4, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV4"] = EnhancedDynamicQuantumSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV4 print("EnhancedDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV5 import EnhancedDynamicQuantumSwarmOptimizationV5 +try: # EnhancedDynamicQuantumSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV5 import ( + EnhancedDynamicQuantumSwarmOptimizationV5, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV5"] = EnhancedDynamicQuantumSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV5 print("EnhancedDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV6 import EnhancedDynamicQuantumSwarmOptimizationV6 +try: # EnhancedDynamicQuantumSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV6 import ( + EnhancedDynamicQuantumSwarmOptimizationV6, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV6"] = EnhancedDynamicQuantumSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV6 print("EnhancedDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV7 import EnhancedDynamicQuantumSwarmOptimizationV7 +try: # EnhancedDynamicQuantumSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV7 import ( + EnhancedDynamicQuantumSwarmOptimizationV7, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV7"] = EnhancedDynamicQuantumSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV7 print("EnhancedDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV8 import EnhancedDynamicQuantumSwarmOptimizationV8 +try: # EnhancedDynamicQuantumSwarmOptimizationV8 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV8 import ( + EnhancedDynamicQuantumSwarmOptimizationV8, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV8"] = EnhancedDynamicQuantumSwarmOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV8", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV8 print("EnhancedDynamicQuantumSwarmOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV9 import EnhancedDynamicQuantumSwarmOptimizationV9 +try: # EnhancedDynamicQuantumSwarmOptimizationV9 + from nevergrad.optimization.lama.EnhancedDynamicQuantumSwarmOptimizationV9 import ( + EnhancedDynamicQuantumSwarmOptimizationV9, + ) lama_register["EnhancedDynamicQuantumSwarmOptimizationV9"] = EnhancedDynamicQuantumSwarmOptimizationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedDynamicQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedDynamicQuantumSwarmOptimizationV9", register=True) +except Exception as e: # EnhancedDynamicQuantumSwarmOptimizationV9 print("EnhancedDynamicQuantumSwarmOptimizationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing import EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing - - lama_register["EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing import ( + EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing print("EnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicRefinementGradientBoostedMemoryAnnealing import EnhancedDynamicRefinementGradientBoostedMemoryAnnealing - - lama_register["EnhancedDynamicRefinementGradientBoostedMemoryAnnealing"] = EnhancedDynamicRefinementGradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing").set_name("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing", register=True) -except Exception as e: +try: # EnhancedDynamicRefinementGradientBoostedMemoryAnnealing + from nevergrad.optimization.lama.EnhancedDynamicRefinementGradientBoostedMemoryAnnealing import ( + EnhancedDynamicRefinementGradientBoostedMemoryAnnealing, + ) + + lama_register["EnhancedDynamicRefinementGradientBoostedMemoryAnnealing"] = ( + EnhancedDynamicRefinementGradientBoostedMemoryAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing" + ).set_name("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # EnhancedDynamicRefinementGradientBoostedMemoryAnnealing print("EnhancedDynamicRefinementGradientBoostedMemoryAnnealing can not be imported: ", e) -try: +try: # EnhancedDynamicRestartAdaptiveDE from nevergrad.optimization.lama.EnhancedDynamicRestartAdaptiveDE import EnhancedDynamicRestartAdaptiveDE lama_register["EnhancedDynamicRestartAdaptiveDE"] = EnhancedDynamicRestartAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicRestartAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicRestartAdaptiveDE").set_name("LLAMAEnhancedDynamicRestartAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicRestartAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicRestartAdaptiveDE" + ).set_name("LLAMAEnhancedDynamicRestartAdaptiveDE", register=True) +except Exception as e: # EnhancedDynamicRestartAdaptiveDE print("EnhancedDynamicRestartAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicStrategyAdaptiveDE import EnhancedDynamicStrategyAdaptiveDE +try: # EnhancedDynamicStrategyAdaptiveDE + from nevergrad.optimization.lama.EnhancedDynamicStrategyAdaptiveDE import ( + EnhancedDynamicStrategyAdaptiveDE, + ) lama_register["EnhancedDynamicStrategyAdaptiveDE"] = EnhancedDynamicStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedDynamicStrategyAdaptiveDE").set_name("LLAMAEnhancedDynamicStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedDynamicStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedDynamicStrategyAdaptiveDE", register=True) +except Exception as e: # EnhancedDynamicStrategyAdaptiveDE print("EnhancedDynamicStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithm import EnhancedDynamicallyAdaptiveFireworkAlgorithm - - lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithm"] = EnhancedDynamicallyAdaptiveFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm").set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm", register=True) -except Exception as e: +try: # EnhancedDynamicallyAdaptiveFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithm import ( + EnhancedDynamicallyAdaptiveFireworkAlgorithm, + ) + + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithm"] = ( + EnhancedDynamicallyAdaptiveFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm" + ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm", register=True) +except Exception as e: # EnhancedDynamicallyAdaptiveFireworkAlgorithm print("EnhancedDynamicallyAdaptiveFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved import EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved - - lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved"] = EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved").set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved", register=True) -except Exception as e: +try: # EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved + from nevergrad.optimization.lama.EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved import ( + EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved, + ) + + lama_register["EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved"] = ( + EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved", register=True) +except Exception as e: # EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved print("EnhancedDynamicallyAdaptiveFireworkAlgorithmImproved can not be imported: ", e) -try: +try: # EnhancedEliteAdaptiveHybridDEPSO from nevergrad.optimization.lama.EnhancedEliteAdaptiveHybridDEPSO import EnhancedEliteAdaptiveHybridDEPSO lama_register["EnhancedEliteAdaptiveHybridDEPSO"] = EnhancedEliteAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveHybridDEPSO").set_name("LLAMAEnhancedEliteAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedEliteAdaptiveHybridDEPSO", register=True) +except Exception as e: # EnhancedEliteAdaptiveHybridDEPSO print("EnhancedEliteAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizer import EnhancedEliteAdaptiveMemoryHybridOptimizer +try: # EnhancedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizer import ( + EnhancedEliteAdaptiveMemoryHybridOptimizer, + ) lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizer"] = EnhancedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedEliteAdaptiveMemoryHybridOptimizer print("EnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV2 import EnhancedEliteAdaptiveMemoryHybridOptimizerV2 - - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV2"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2", register=True) -except Exception as e: +try: # EnhancedEliteAdaptiveMemoryHybridOptimizerV2 + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV2 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV2, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV2"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV2", register=True) +except Exception as e: # EnhancedEliteAdaptiveMemoryHybridOptimizerV2 print("EnhancedEliteAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV6 import EnhancedEliteAdaptiveMemoryHybridOptimizerV6 - - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV6"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6", register=True) -except Exception as e: +try: # EnhancedEliteAdaptiveMemoryHybridOptimizerV6 + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV6 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV6, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV6"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV6", register=True) +except Exception as e: # EnhancedEliteAdaptiveMemoryHybridOptimizerV6 print("EnhancedEliteAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV7 import EnhancedEliteAdaptiveMemoryHybridOptimizerV7 - - lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV7"] = EnhancedEliteAdaptiveMemoryHybridOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7").set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7", register=True) -except Exception as e: +try: # EnhancedEliteAdaptiveMemoryHybridOptimizerV7 + from nevergrad.optimization.lama.EnhancedEliteAdaptiveMemoryHybridOptimizerV7 import ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV7, + ) + + lama_register["EnhancedEliteAdaptiveMemoryHybridOptimizerV7"] = ( + EnhancedEliteAdaptiveMemoryHybridOptimizerV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7" + ).set_name("LLAMAEnhancedEliteAdaptiveMemoryHybridOptimizerV7", register=True) +except Exception as e: # EnhancedEliteAdaptiveMemoryHybridOptimizerV7 print("EnhancedEliteAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteCrowdingMemoryHybridOptimizerV3 import EnhancedEliteCrowdingMemoryHybridOptimizerV3 - - lama_register["EnhancedEliteCrowdingMemoryHybridOptimizerV3"] = EnhancedEliteCrowdingMemoryHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3").set_name("LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3", register=True) -except Exception as e: +try: # EnhancedEliteCrowdingMemoryHybridOptimizerV3 + from nevergrad.optimization.lama.EnhancedEliteCrowdingMemoryHybridOptimizerV3 import ( + EnhancedEliteCrowdingMemoryHybridOptimizerV3, + ) + + lama_register["EnhancedEliteCrowdingMemoryHybridOptimizerV3"] = ( + EnhancedEliteCrowdingMemoryHybridOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3" + ).set_name("LLAMAEnhancedEliteCrowdingMemoryHybridOptimizerV3", register=True) +except Exception as e: # EnhancedEliteCrowdingMemoryHybridOptimizerV3 print("EnhancedEliteCrowdingMemoryHybridOptimizerV3 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedAdaptiveDE from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveDE import EnhancedEliteGuidedAdaptiveDE lama_register["EnhancedEliteGuidedAdaptiveDE"] = EnhancedEliteGuidedAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveDE").set_name("LLAMAEnhancedEliteGuidedAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedAdaptiveDE" + ).set_name("LLAMAEnhancedEliteGuidedAdaptiveDE", register=True) +except Exception as e: # EnhancedEliteGuidedAdaptiveDE print("EnhancedEliteGuidedAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveRestartDE import EnhancedEliteGuidedAdaptiveRestartDE +try: # EnhancedEliteGuidedAdaptiveRestartDE + from nevergrad.optimization.lama.EnhancedEliteGuidedAdaptiveRestartDE import ( + EnhancedEliteGuidedAdaptiveRestartDE, + ) lama_register["EnhancedEliteGuidedAdaptiveRestartDE"] = EnhancedEliteGuidedAdaptiveRestartDE - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE").set_name("LLAMAEnhancedEliteGuidedAdaptiveRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMAEnhancedEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: # EnhancedEliteGuidedAdaptiveRestartDE print("EnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteGuidedDualMutationDE import EnhancedEliteGuidedDualMutationDE +try: # EnhancedEliteGuidedDualMutationDE + from nevergrad.optimization.lama.EnhancedEliteGuidedDualMutationDE import ( + EnhancedEliteGuidedDualMutationDE, + ) lama_register["EnhancedEliteGuidedDualMutationDE"] = EnhancedEliteGuidedDualMutationDE - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedDualMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedDualMutationDE = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedDualMutationDE").set_name("LLAMAEnhancedEliteGuidedDualMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedDualMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedDualMutationDE = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedDualMutationDE" + ).set_name("LLAMAEnhancedEliteGuidedDualMutationDE", register=True) +except Exception as e: # EnhancedEliteGuidedDualMutationDE print("EnhancedEliteGuidedDualMutationDE can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMassQGSA_v81 from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v81 import EnhancedEliteGuidedMassQGSA_v81 lama_register["EnhancedEliteGuidedMassQGSA_v81"] = EnhancedEliteGuidedMassQGSA_v81 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v81")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMassQGSA_v81 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v81").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v81", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v81")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMassQGSA_v81 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v81" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v81", register=True) +except Exception as e: # EnhancedEliteGuidedMassQGSA_v81 print("EnhancedEliteGuidedMassQGSA_v81 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMassQGSA_v82 from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v82 import EnhancedEliteGuidedMassQGSA_v82 lama_register["EnhancedEliteGuidedMassQGSA_v82"] = EnhancedEliteGuidedMassQGSA_v82 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v82")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMassQGSA_v82 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v82").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v82", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v82")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMassQGSA_v82 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v82" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v82", register=True) +except Exception as e: # EnhancedEliteGuidedMassQGSA_v82 print("EnhancedEliteGuidedMassQGSA_v82 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMassQGSA_v83 from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v83 import EnhancedEliteGuidedMassQGSA_v83 lama_register["EnhancedEliteGuidedMassQGSA_v83"] = EnhancedEliteGuidedMassQGSA_v83 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v83")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMassQGSA_v83 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v83").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v83", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v83")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMassQGSA_v83 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v83" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v83", register=True) +except Exception as e: # EnhancedEliteGuidedMassQGSA_v83 print("EnhancedEliteGuidedMassQGSA_v83 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMassQGSA_v85 from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v85 import EnhancedEliteGuidedMassQGSA_v85 lama_register["EnhancedEliteGuidedMassQGSA_v85"] = EnhancedEliteGuidedMassQGSA_v85 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v85")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMassQGSA_v85 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v85").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v85", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v85")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMassQGSA_v85 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v85" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v85", register=True) +except Exception as e: # EnhancedEliteGuidedMassQGSA_v85 print("EnhancedEliteGuidedMassQGSA_v85 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMassQGSA_v86 from nevergrad.optimization.lama.EnhancedEliteGuidedMassQGSA_v86 import EnhancedEliteGuidedMassQGSA_v86 lama_register["EnhancedEliteGuidedMassQGSA_v86"] = EnhancedEliteGuidedMassQGSA_v86 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v86")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMassQGSA_v86 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v86").set_name("LLAMAEnhancedEliteGuidedMassQGSA_v86", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMassQGSA_v86")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMassQGSA_v86 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMassQGSA_v86" + ).set_name("LLAMAEnhancedEliteGuidedMassQGSA_v86", register=True) +except Exception as e: # EnhancedEliteGuidedMassQGSA_v86 print("EnhancedEliteGuidedMassQGSA_v86 can not be imported: ", e) -try: +try: # EnhancedEliteGuidedMutationDE_v2 from nevergrad.optimization.lama.EnhancedEliteGuidedMutationDE_v2 import EnhancedEliteGuidedMutationDE_v2 lama_register["EnhancedEliteGuidedMutationDE_v2"] = EnhancedEliteGuidedMutationDE_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMutationDE_v2").set_name("LLAMAEnhancedEliteGuidedMutationDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEliteGuidedMutationDE_v2" + ).set_name("LLAMAEnhancedEliteGuidedMutationDE_v2", register=True) +except Exception as e: # EnhancedEliteGuidedMutationDE_v2 print("EnhancedEliteGuidedMutationDE_v2 can not be imported: ", e) -try: +try: # EnhancedEliteHybridOptimizer from nevergrad.optimization.lama.EnhancedEliteHybridOptimizer import EnhancedEliteHybridOptimizer lama_register["EnhancedEliteHybridOptimizer"] = EnhancedEliteHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEliteHybridOptimizer").set_name("LLAMAEnhancedEliteHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEliteHybridOptimizer" + ).set_name("LLAMAEnhancedEliteHybridOptimizer", register=True) +except Exception as e: # EnhancedEliteHybridOptimizer print("EnhancedEliteHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEliteQuantumAdaptiveExplorationOptimization import EnhancedEliteQuantumAdaptiveExplorationOptimization - - lama_register["EnhancedEliteQuantumAdaptiveExplorationOptimization"] = EnhancedEliteQuantumAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization").set_name("LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedEliteQuantumAdaptiveExplorationOptimization + from nevergrad.optimization.lama.EnhancedEliteQuantumAdaptiveExplorationOptimization import ( + EnhancedEliteQuantumAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedEliteQuantumAdaptiveExplorationOptimization"] = ( + EnhancedEliteQuantumAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedEliteQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: # EnhancedEliteQuantumAdaptiveExplorationOptimization print("EnhancedEliteQuantumAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 import EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 - - lama_register["EnhancedEnhancedAdaptiveHarmonicTabuSearchV24"] = EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24", register=True) -except Exception as e: +try: # EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 import ( + EnhancedEnhancedAdaptiveHarmonicTabuSearchV24, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonicTabuSearchV24"] = ( + EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24" + ).set_name("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24", register=True) +except Exception as e: # EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 print("EnhancedEnhancedAdaptiveHarmonicTabuSearchV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 import EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 - - lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7"] = EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7", register=True) -except Exception as e: - print("EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 import EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 - - lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8"] = EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8").set_name("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8", register=True) -except Exception as e: - print("EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution import EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution - - lama_register["EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution"] = EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 import ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7"] = ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7" + ).set_name( + "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7", register=True + ) +except Exception as e: # EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 + print( + "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7 can not be imported: ", + e, + ) +try: # EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 import ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8, + ) + + lama_register["EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8"] = ( + EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8" + ).set_name( + "LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8", register=True + ) +except Exception as e: # EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 + print( + "EnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8 can not be imported: ", + e, + ) +try: # EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution import ( + EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution"] = ( + EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution print("EnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 import EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 - - lama_register["EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57"] = EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57").set_name("LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57", register=True) -except Exception as e: +try: # EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 + from nevergrad.optimization.lama.EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 import ( + EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57, + ) + + lama_register["EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57"] = ( + EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57" + ).set_name("LLAMAEnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57", register=True) +except Exception as e: # EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 print("EnhancedEnhancedAdvancedDifferentialEvolutionLocalSearch_v57 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence import EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence - - lama_register["EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence").set_name("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) -except Exception as e: +try: # EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence + from nevergrad.optimization.lama.EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence import ( + EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence, + ) + + lama_register["EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence"] = ( + EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence" + ).set_name("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence", register=True) +except Exception as e: # EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence print("EnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedDynamicQuantumSwarmOptimization import EnhancedEnhancedDynamicQuantumSwarmOptimization - - lama_register["EnhancedEnhancedDynamicQuantumSwarmOptimization"] = EnhancedEnhancedDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedEnhancedDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.EnhancedEnhancedDynamicQuantumSwarmOptimization import ( + EnhancedEnhancedDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedEnhancedDynamicQuantumSwarmOptimization"] = ( + EnhancedEnhancedDynamicQuantumSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedEnhancedDynamicQuantumSwarmOptimization print("EnhancedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 - - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10", register=True) -except Exception as e: +try: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10", register=True) +except Exception as e: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 - - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6", register=True) -except Exception as e: +try: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6", register=True) +except Exception as e: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 - - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7", register=True) -except Exception as e: +try: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7", register=True) +except Exception as e: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 - - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8", register=True) -except Exception as e: +try: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8", register=True) +except Exception as e: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 import EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 - - lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9"] = EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9").set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9", register=True) -except Exception as e: +try: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 + from nevergrad.optimization.lama.EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 import ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9, + ) + + lama_register["EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9"] = ( + EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9" + ).set_name("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9", register=True) +except Exception as e: # EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 print("EnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization import EnhancedEnhancedFireworkSwarmOptimization +try: # EnhancedEnhancedFireworkSwarmOptimization + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization import ( + EnhancedEnhancedFireworkSwarmOptimization, + ) lama_register["EnhancedEnhancedFireworkSwarmOptimization"] = EnhancedEnhancedFireworkSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedFireworkSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization", register=True) +except Exception as e: # EnhancedEnhancedFireworkSwarmOptimization print("EnhancedEnhancedFireworkSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v2 import EnhancedEnhancedFireworkSwarmOptimization_v2 - - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v2"] = EnhancedEnhancedFireworkSwarmOptimization_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2", register=True) -except Exception as e: +try: # EnhancedEnhancedFireworkSwarmOptimization_v2 + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v2 import ( + EnhancedEnhancedFireworkSwarmOptimization_v2, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v2"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v2", register=True) +except Exception as e: # EnhancedEnhancedFireworkSwarmOptimization_v2 print("EnhancedEnhancedFireworkSwarmOptimization_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v3 import EnhancedEnhancedFireworkSwarmOptimization_v3 - - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v3"] = EnhancedEnhancedFireworkSwarmOptimization_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3", register=True) -except Exception as e: +try: # EnhancedEnhancedFireworkSwarmOptimization_v3 + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v3 import ( + EnhancedEnhancedFireworkSwarmOptimization_v3, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v3"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v3", register=True) +except Exception as e: # EnhancedEnhancedFireworkSwarmOptimization_v3 print("EnhancedEnhancedFireworkSwarmOptimization_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v4 import EnhancedEnhancedFireworkSwarmOptimization_v4 - - lama_register["EnhancedEnhancedFireworkSwarmOptimization_v4"] = EnhancedEnhancedFireworkSwarmOptimization_v4 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4").set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4", register=True) -except Exception as e: +try: # EnhancedEnhancedFireworkSwarmOptimization_v4 + from nevergrad.optimization.lama.EnhancedEnhancedFireworkSwarmOptimization_v4 import ( + EnhancedEnhancedFireworkSwarmOptimization_v4, + ) + + lama_register["EnhancedEnhancedFireworkSwarmOptimization_v4"] = ( + EnhancedEnhancedFireworkSwarmOptimization_v4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4" + ).set_name("LLAMAEnhancedEnhancedFireworkSwarmOptimization_v4", register=True) +except Exception as e: # EnhancedEnhancedFireworkSwarmOptimization_v4 print("EnhancedEnhancedFireworkSwarmOptimization_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v63 import EnhancedEnhancedGuidedMassQGSA_v63 +try: # EnhancedEnhancedGuidedMassQGSA_v63 + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v63 import ( + EnhancedEnhancedGuidedMassQGSA_v63, + ) lama_register["EnhancedEnhancedGuidedMassQGSA_v63"] = EnhancedEnhancedGuidedMassQGSA_v63 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedGuidedMassQGSA_v63 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v63", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v63 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v63" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v63", register=True) +except Exception as e: # EnhancedEnhancedGuidedMassQGSA_v63 print("EnhancedEnhancedGuidedMassQGSA_v63 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v64 import EnhancedEnhancedGuidedMassQGSA_v64 +try: # EnhancedEnhancedGuidedMassQGSA_v64 + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v64 import ( + EnhancedEnhancedGuidedMassQGSA_v64, + ) lama_register["EnhancedEnhancedGuidedMassQGSA_v64"] = EnhancedEnhancedGuidedMassQGSA_v64 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedGuidedMassQGSA_v64 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v64", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v64 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v64" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v64", register=True) +except Exception as e: # EnhancedEnhancedGuidedMassQGSA_v64 print("EnhancedEnhancedGuidedMassQGSA_v64 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v68 import EnhancedEnhancedGuidedMassQGSA_v68 +try: # EnhancedEnhancedGuidedMassQGSA_v68 + from nevergrad.optimization.lama.EnhancedEnhancedGuidedMassQGSA_v68 import ( + EnhancedEnhancedGuidedMassQGSA_v68, + ) lama_register["EnhancedEnhancedGuidedMassQGSA_v68"] = EnhancedEnhancedGuidedMassQGSA_v68 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedGuidedMassQGSA_v68 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68").set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v68", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedGuidedMassQGSA_v68 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedGuidedMassQGSA_v68" + ).set_name("LLAMAEnhancedEnhancedGuidedMassQGSA_v68", register=True) +except Exception as e: # EnhancedEnhancedGuidedMassQGSA_v68 print("EnhancedEnhancedGuidedMassQGSA_v68 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration import EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration - - lama_register["EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration"] = EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration", register=True) -except Exception as e: +try: # EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration + from nevergrad.optimization.lama.EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration import ( + EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration"] = ( + EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration", register=True) +except Exception as e: # EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration print("EnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizer import EnhancedEnhancedHybridMetaHeuristicOptimizer - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizer"] = EnhancedEnhancedHybridMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizer + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizer import ( + EnhancedEnhancedHybridMetaHeuristicOptimizer, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizer"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizer print("EnhancedEnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV10 import EnhancedEnhancedHybridMetaHeuristicOptimizerV10 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV10"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV10 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV10 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV10 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV10, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV10"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV10 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV11 import EnhancedEnhancedHybridMetaHeuristicOptimizerV11 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV11"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV11 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV11 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV11 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV11, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV11"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV11 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV11 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV12 import EnhancedEnhancedHybridMetaHeuristicOptimizerV12 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV12"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV12 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV12 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV12 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV12, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV12"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV12 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV13 import EnhancedEnhancedHybridMetaHeuristicOptimizerV13 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV13"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV13 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV13 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV13 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV13, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV13"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV13 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV14 import EnhancedEnhancedHybridMetaHeuristicOptimizerV14 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV14"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV14 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV14 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV14 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV14, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV14"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV14 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV14 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV2 import EnhancedEnhancedHybridMetaHeuristicOptimizerV2 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV2"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV2 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV2 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV2, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV2"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV2 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV3 import EnhancedEnhancedHybridMetaHeuristicOptimizerV3 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV3"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV3 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV3 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV3, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV3"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV3 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV4 import EnhancedEnhancedHybridMetaHeuristicOptimizerV4 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV4"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV4 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV4 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV4, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV4"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV4 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV5 import EnhancedEnhancedHybridMetaHeuristicOptimizerV5 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV5"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV5 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV5 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV5, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV5"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV5 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV6 import EnhancedEnhancedHybridMetaHeuristicOptimizerV6 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV6"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV6 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV6 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV6, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV6"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV6 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV7 import EnhancedEnhancedHybridMetaHeuristicOptimizerV7 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV7"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV7 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV7 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV7, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV7"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV7 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV8 import EnhancedEnhancedHybridMetaHeuristicOptimizerV8 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV8"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV8 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV8 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV8 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV8, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV8"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV8 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV9 import EnhancedEnhancedHybridMetaHeuristicOptimizerV9 - - lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV9"] = EnhancedEnhancedHybridMetaHeuristicOptimizerV9 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9").set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9", register=True) -except Exception as e: +try: # EnhancedEnhancedHybridMetaHeuristicOptimizerV9 + from nevergrad.optimization.lama.EnhancedEnhancedHybridMetaHeuristicOptimizerV9 import ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV9, + ) + + lama_register["EnhancedEnhancedHybridMetaHeuristicOptimizerV9"] = ( + EnhancedEnhancedHybridMetaHeuristicOptimizerV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9" + ).set_name("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9", register=True) +except Exception as e: # EnhancedEnhancedHybridMetaHeuristicOptimizerV9 print("EnhancedEnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedMetaHeuristicOptimizerV3 import EnhancedEnhancedMetaHeuristicOptimizerV3 +try: # EnhancedEnhancedMetaHeuristicOptimizerV3 + from nevergrad.optimization.lama.EnhancedEnhancedMetaHeuristicOptimizerV3 import ( + EnhancedEnhancedMetaHeuristicOptimizerV3, + ) lama_register["EnhancedEnhancedMetaHeuristicOptimizerV3"] = EnhancedEnhancedMetaHeuristicOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3", register=True) +except Exception as e: # EnhancedEnhancedMetaHeuristicOptimizerV3 print("EnhancedEnhancedMetaHeuristicOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP import EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP - - lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) -except Exception as e: +try: # EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP, + ) + + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: # EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 import EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 - - lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4"] = EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 - res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4").set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4", register=True) -except Exception as e: +try: # EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 + from nevergrad.optimization.lama.EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 import ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4, + ) + + lama_register["EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4"] = ( + EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 = NonObjectOptimizer( + method="LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4" + ).set_name("LLAMAEnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4", register=True) +except Exception as e: # EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 print("EnhancedEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV1 import EnhancedEvolutionaryDifferentialSwarmOptimizerV1 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV1"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV1 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV1 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV1 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV1, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV1"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV1 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV1 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV12 import EnhancedEvolutionaryDifferentialSwarmOptimizerV12 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV12"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV12 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV12 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV12 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV12, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV12"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV12 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV12 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV13 import EnhancedEvolutionaryDifferentialSwarmOptimizerV13 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV13"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV13 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV13 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV13 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV13, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV13"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV13 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV13 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV14 import EnhancedEvolutionaryDifferentialSwarmOptimizerV14 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV14"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV14 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV14 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV14 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV14, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV14"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV14 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV14 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV15 import EnhancedEvolutionaryDifferentialSwarmOptimizerV15 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV15"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV15 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV15 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV15 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV15, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV15"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV15 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV15 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV16 import EnhancedEvolutionaryDifferentialSwarmOptimizerV16 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV16"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV16 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV16 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV16 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV16, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV16"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV16 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV16 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV17 import EnhancedEvolutionaryDifferentialSwarmOptimizerV17 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV17"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV17 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV17 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV17 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV17, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV17"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV17 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV17 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV18 import EnhancedEvolutionaryDifferentialSwarmOptimizerV18 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV18"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV18 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV18 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV18 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV18, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV18"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV18 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV18 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV19 import EnhancedEvolutionaryDifferentialSwarmOptimizerV19 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV19"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV19 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV19 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV19 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV19, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV19"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV19 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV19 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV2 import EnhancedEvolutionaryDifferentialSwarmOptimizerV2 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV2"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV2 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV2 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV2, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV2"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV2 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV20 import EnhancedEvolutionaryDifferentialSwarmOptimizerV20 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV20"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV20 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV20 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV20 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV20, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV20"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV20 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV20 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV21 import EnhancedEvolutionaryDifferentialSwarmOptimizerV21 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV21"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV21 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV21 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV21 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV21, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV21"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV21 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV21 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV22 import EnhancedEvolutionaryDifferentialSwarmOptimizerV22 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV22"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV22 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV22 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV22 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV22, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV22"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV22 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV22 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV23 import EnhancedEvolutionaryDifferentialSwarmOptimizerV23 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV23"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV23 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV23 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV23 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV23, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV23"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV23 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV23 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV24 import EnhancedEvolutionaryDifferentialSwarmOptimizerV24 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV24"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV24 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV24 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV24 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV24, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV24"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV24 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV24 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV25 import EnhancedEvolutionaryDifferentialSwarmOptimizerV25 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV25"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV25 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV25 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV25 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV25, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV25"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV25 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV25 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV26 import EnhancedEvolutionaryDifferentialSwarmOptimizerV26 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV26"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV26 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV26 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV26 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV26, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV26"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV26 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV26 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV27 import EnhancedEvolutionaryDifferentialSwarmOptimizerV27 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV27"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV27 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV27 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV27 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV27, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV27"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV27 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV27 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV28 import EnhancedEvolutionaryDifferentialSwarmOptimizerV28 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV28"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV28 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV28 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV28 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV28, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV28"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV28 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV28 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV29 import EnhancedEvolutionaryDifferentialSwarmOptimizerV29 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV29"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV29 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV29 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV29 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV29, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV29"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV29 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV29 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV3 import EnhancedEvolutionaryDifferentialSwarmOptimizerV3 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV3"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV3 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV3 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV3, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV3"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV3 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV30 import EnhancedEvolutionaryDifferentialSwarmOptimizerV30 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV30"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV30 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV30 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV30 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV30, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV30"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV30 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV30 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV4 import EnhancedEvolutionaryDifferentialSwarmOptimizerV4 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV4"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV4 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV4 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV4, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV4"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV4 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV5 import EnhancedEvolutionaryDifferentialSwarmOptimizerV5 - - lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV5"] = EnhancedEvolutionaryDifferentialSwarmOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5").set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5", register=True) -except Exception as e: +try: # EnhancedEvolutionaryDifferentialSwarmOptimizerV5 + from nevergrad.optimization.lama.EnhancedEvolutionaryDifferentialSwarmOptimizerV5 import ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV5, + ) + + lama_register["EnhancedEvolutionaryDifferentialSwarmOptimizerV5"] = ( + EnhancedEvolutionaryDifferentialSwarmOptimizerV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5" + ).set_name("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5", register=True) +except Exception as e: # EnhancedEvolutionaryDifferentialSwarmOptimizerV5 print("EnhancedEvolutionaryDifferentialSwarmOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch import EnhancedEvolutionaryFireworksSearch +try: # EnhancedEvolutionaryFireworksSearch + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch import ( + EnhancedEvolutionaryFireworksSearch, + ) lama_register["EnhancedEvolutionaryFireworksSearch"] = EnhancedEvolutionaryFireworksSearch - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch").set_name("LLAMAEnhancedEvolutionaryFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch print("EnhancedEvolutionaryFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v2 import EnhancedEvolutionaryFireworksSearch_v2 +try: # EnhancedEvolutionaryFireworksSearch_v2 + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v2 import ( + EnhancedEvolutionaryFireworksSearch_v2, + ) lama_register["EnhancedEvolutionaryFireworksSearch_v2"] = EnhancedEvolutionaryFireworksSearch_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v2").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v2" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v2", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch_v2 print("EnhancedEvolutionaryFireworksSearch_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v3 import EnhancedEvolutionaryFireworksSearch_v3 +try: # EnhancedEvolutionaryFireworksSearch_v3 + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v3 import ( + EnhancedEvolutionaryFireworksSearch_v3, + ) lama_register["EnhancedEvolutionaryFireworksSearch_v3"] = EnhancedEvolutionaryFireworksSearch_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch_v3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v3").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v3" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v3", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch_v3 print("EnhancedEvolutionaryFireworksSearch_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v4 import EnhancedEvolutionaryFireworksSearch_v4 +try: # EnhancedEvolutionaryFireworksSearch_v4 + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v4 import ( + EnhancedEvolutionaryFireworksSearch_v4, + ) lama_register["EnhancedEvolutionaryFireworksSearch_v4"] = EnhancedEvolutionaryFireworksSearch_v4 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch_v4 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v4").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v4 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v4" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v4", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch_v4 print("EnhancedEvolutionaryFireworksSearch_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v5 import EnhancedEvolutionaryFireworksSearch_v5 +try: # EnhancedEvolutionaryFireworksSearch_v5 + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v5 import ( + EnhancedEvolutionaryFireworksSearch_v5, + ) lama_register["EnhancedEvolutionaryFireworksSearch_v5"] = EnhancedEvolutionaryFireworksSearch_v5 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch_v5 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v5").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v5 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v5" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v5", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch_v5 print("EnhancedEvolutionaryFireworksSearch_v5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v6 import EnhancedEvolutionaryFireworksSearch_v6 +try: # EnhancedEvolutionaryFireworksSearch_v6 + from nevergrad.optimization.lama.EnhancedEvolutionaryFireworksSearch_v6 import ( + EnhancedEvolutionaryFireworksSearch_v6, + ) lama_register["EnhancedEvolutionaryFireworksSearch_v6"] = EnhancedEvolutionaryFireworksSearch_v6 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryFireworksSearch_v6 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v6").set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryFireworksSearch_v6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryFireworksSearch_v6 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryFireworksSearch_v6" + ).set_name("LLAMAEnhancedEvolutionaryFireworksSearch_v6", register=True) +except Exception as e: # EnhancedEvolutionaryFireworksSearch_v6 print("EnhancedEvolutionaryFireworksSearch_v6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryGradientSearch import EnhancedEvolutionaryGradientSearch +try: # EnhancedEvolutionaryGradientSearch + from nevergrad.optimization.lama.EnhancedEvolutionaryGradientSearch import ( + EnhancedEvolutionaryGradientSearch, + ) lama_register["EnhancedEvolutionaryGradientSearch"] = EnhancedEvolutionaryGradientSearch - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryGradientSearch").set_name("LLAMAEnhancedEvolutionaryGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryGradientSearch" + ).set_name("LLAMAEnhancedEvolutionaryGradientSearch", register=True) +except Exception as e: # EnhancedEvolutionaryGradientSearch print("EnhancedEvolutionaryGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizer import EnhancedEvolutionaryParticleSwarmOptimizer +try: # EnhancedEvolutionaryParticleSwarmOptimizer + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizer import ( + EnhancedEvolutionaryParticleSwarmOptimizer, + ) lama_register["EnhancedEvolutionaryParticleSwarmOptimizer"] = EnhancedEvolutionaryParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedEvolutionaryParticleSwarmOptimizer print("EnhancedEvolutionaryParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV2 import EnhancedEvolutionaryParticleSwarmOptimizerV2 - - lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV2"] = EnhancedEvolutionaryParticleSwarmOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2", register=True) -except Exception as e: +try: # EnhancedEvolutionaryParticleSwarmOptimizerV2 + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV2 import ( + EnhancedEvolutionaryParticleSwarmOptimizerV2, + ) + + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV2"] = ( + EnhancedEvolutionaryParticleSwarmOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2", register=True) +except Exception as e: # EnhancedEvolutionaryParticleSwarmOptimizerV2 print("EnhancedEvolutionaryParticleSwarmOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV3 import EnhancedEvolutionaryParticleSwarmOptimizerV3 - - lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV3"] = EnhancedEvolutionaryParticleSwarmOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3").set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3", register=True) -except Exception as e: +try: # EnhancedEvolutionaryParticleSwarmOptimizerV3 + from nevergrad.optimization.lama.EnhancedEvolutionaryParticleSwarmOptimizerV3 import ( + EnhancedEvolutionaryParticleSwarmOptimizerV3, + ) + + lama_register["EnhancedEvolutionaryParticleSwarmOptimizerV3"] = ( + EnhancedEvolutionaryParticleSwarmOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3" + ).set_name("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3", register=True) +except Exception as e: # EnhancedEvolutionaryParticleSwarmOptimizerV3 print("EnhancedEvolutionaryParticleSwarmOptimizerV3 can not be imported: ", e) -try: +try: # EnhancedEvolutionaryStrategy from nevergrad.optimization.lama.EnhancedEvolutionaryStrategy import EnhancedEvolutionaryStrategy lama_register["EnhancedEvolutionaryStrategy"] = EnhancedEvolutionaryStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedEvolutionaryStrategy = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryStrategy").set_name("LLAMAEnhancedEvolutionaryStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedEvolutionaryStrategy = NonObjectOptimizer( + method="LLAMAEnhancedEvolutionaryStrategy" + ).set_name("LLAMAEnhancedEvolutionaryStrategy", register=True) +except Exception as e: # EnhancedEvolutionaryStrategy print("EnhancedEvolutionaryStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimization import EnhancedExplorationGravitationalSwarmOptimization - - lama_register["EnhancedExplorationGravitationalSwarmOptimization"] = EnhancedExplorationGravitationalSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimization").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedExplorationGravitationalSwarmOptimization + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimization import ( + EnhancedExplorationGravitationalSwarmOptimization, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimization"] = ( + EnhancedExplorationGravitationalSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimization" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimization", register=True) +except Exception as e: # EnhancedExplorationGravitationalSwarmOptimization print("EnhancedExplorationGravitationalSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV2 import EnhancedExplorationGravitationalSwarmOptimizationV2 - - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV2"] = EnhancedExplorationGravitationalSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2", register=True) -except Exception as e: +try: # EnhancedExplorationGravitationalSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV2 import ( + EnhancedExplorationGravitationalSwarmOptimizationV2, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV2"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedExplorationGravitationalSwarmOptimizationV2 print("EnhancedExplorationGravitationalSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV3 import EnhancedExplorationGravitationalSwarmOptimizationV3 - - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV3"] = EnhancedExplorationGravitationalSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3", register=True) -except Exception as e: +try: # EnhancedExplorationGravitationalSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV3 import ( + EnhancedExplorationGravitationalSwarmOptimizationV3, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV3"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedExplorationGravitationalSwarmOptimizationV3 print("EnhancedExplorationGravitationalSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV4 import EnhancedExplorationGravitationalSwarmOptimizationV4 - - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV4"] = EnhancedExplorationGravitationalSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4", register=True) -except Exception as e: +try: # EnhancedExplorationGravitationalSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV4 import ( + EnhancedExplorationGravitationalSwarmOptimizationV4, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV4"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedExplorationGravitationalSwarmOptimizationV4 print("EnhancedExplorationGravitationalSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV5 import EnhancedExplorationGravitationalSwarmOptimizationV5 - - lama_register["EnhancedExplorationGravitationalSwarmOptimizationV5"] = EnhancedExplorationGravitationalSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5").set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5", register=True) -except Exception as e: +try: # EnhancedExplorationGravitationalSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedExplorationGravitationalSwarmOptimizationV5 import ( + EnhancedExplorationGravitationalSwarmOptimizationV5, + ) + + lama_register["EnhancedExplorationGravitationalSwarmOptimizationV5"] = ( + EnhancedExplorationGravitationalSwarmOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5" + ).set_name("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedExplorationGravitationalSwarmOptimizationV5 print("EnhancedExplorationGravitationalSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedExplorativeHarmonicSwarmOptimizer import EnhancedExplorativeHarmonicSwarmOptimizer +try: # EnhancedExplorativeHarmonicSwarmOptimizer + from nevergrad.optimization.lama.EnhancedExplorativeHarmonicSwarmOptimizer import ( + EnhancedExplorativeHarmonicSwarmOptimizer, + ) lama_register["EnhancedExplorativeHarmonicSwarmOptimizer"] = EnhancedExplorativeHarmonicSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedExplorativeHarmonicSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer").set_name("LLAMAEnhancedExplorativeHarmonicSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedExplorativeHarmonicSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedExplorativeHarmonicSwarmOptimizer" + ).set_name("LLAMAEnhancedExplorativeHarmonicSwarmOptimizer", register=True) +except Exception as e: # EnhancedExplorativeHarmonicSwarmOptimizer print("EnhancedExplorativeHarmonicSwarmOptimizer can not be imported: ", e) -try: +try: # EnhancedFireworkAlgorithm from nevergrad.optimization.lama.EnhancedFireworkAlgorithm import EnhancedFireworkAlgorithm lama_register["EnhancedFireworkAlgorithm"] = EnhancedFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm").set_name("LLAMAEnhancedFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithm").set_name( + "LLAMAEnhancedFireworkAlgorithm", register=True + ) +except Exception as e: # EnhancedFireworkAlgorithm print("EnhancedFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization import EnhancedFireworkAlgorithmOptimization +try: # EnhancedFireworkAlgorithmOptimization + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization import ( + EnhancedFireworkAlgorithmOptimization, + ) lama_register["EnhancedFireworkAlgorithmOptimization"] = EnhancedFireworkAlgorithmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization").set_name("LLAMAEnhancedFireworkAlgorithmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmOptimization" + ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization", register=True) +except Exception as e: # EnhancedFireworkAlgorithmOptimization print("EnhancedFireworkAlgorithmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization_v2 import EnhancedFireworkAlgorithmOptimization_v2 +try: # EnhancedFireworkAlgorithmOptimization_v2 + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmOptimization_v2 import ( + EnhancedFireworkAlgorithmOptimization_v2, + ) lama_register["EnhancedFireworkAlgorithmOptimization_v2"] = EnhancedFireworkAlgorithmOptimization_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization_v2").set_name("LLAMAEnhancedFireworkAlgorithmOptimization_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmOptimization_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmOptimization_v2" + ).set_name("LLAMAEnhancedFireworkAlgorithmOptimization_v2", register=True) +except Exception as e: # EnhancedFireworkAlgorithmOptimization_v2 print("EnhancedFireworkAlgorithmOptimization_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearch import EnhancedFireworkAlgorithmWithAdaptiveLocalSearch - - lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = EnhancedFireworkAlgorithmWithAdaptiveLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithAdaptiveLocalSearch + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithAdaptiveLocalSearch print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined import EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined - - lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined"] = EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined import ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined"] = ( + EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined print("EnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveMutation import EnhancedFireworkAlgorithmWithAdaptiveMutation - - lama_register["EnhancedFireworkAlgorithmWithAdaptiveMutation"] = EnhancedFireworkAlgorithmWithAdaptiveMutation - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithAdaptiveMutation + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithAdaptiveMutation import ( + EnhancedFireworkAlgorithmWithAdaptiveMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithAdaptiveMutation"] = ( + EnhancedFireworkAlgorithmWithAdaptiveMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithAdaptiveMutation print("EnhancedFireworkAlgorithmWithAdaptiveMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithDynamicMutation import EnhancedFireworkAlgorithmWithDynamicMutation - - lama_register["EnhancedFireworkAlgorithmWithDynamicMutation"] = EnhancedFireworkAlgorithmWithDynamicMutation - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithDynamicMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithDynamicMutation + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithDynamicMutation import ( + EnhancedFireworkAlgorithmWithDynamicMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithDynamicMutation"] = ( + EnhancedFireworkAlgorithmWithDynamicMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithDynamicMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithDynamicMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithDynamicMutation print("EnhancedFireworkAlgorithmWithDynamicMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithHybridLocalSearch import EnhancedFireworkAlgorithmWithHybridLocalSearch - - lama_register["EnhancedFireworkAlgorithmWithHybridLocalSearch"] = EnhancedFireworkAlgorithmWithHybridLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithHybridLocalSearch + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithHybridLocalSearch import ( + EnhancedFireworkAlgorithmWithHybridLocalSearch, + ) + + lama_register["EnhancedFireworkAlgorithmWithHybridLocalSearch"] = ( + EnhancedFireworkAlgorithmWithHybridLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithHybridLocalSearch print("EnhancedFireworkAlgorithmWithHybridLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithImprovedMutation import EnhancedFireworkAlgorithmWithImprovedMutation - - lama_register["EnhancedFireworkAlgorithmWithImprovedMutation"] = EnhancedFireworkAlgorithmWithImprovedMutation - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithImprovedMutation = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation").set_name("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithImprovedMutation + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithImprovedMutation import ( + EnhancedFireworkAlgorithmWithImprovedMutation, + ) + + lama_register["EnhancedFireworkAlgorithmWithImprovedMutation"] = ( + EnhancedFireworkAlgorithmWithImprovedMutation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithImprovedMutation = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithImprovedMutation" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithImprovedMutation print("EnhancedFireworkAlgorithmWithImprovedMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearch import EnhancedFireworkAlgorithmWithLocalSearch +try: # EnhancedFireworkAlgorithmWithLocalSearch + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearch import ( + EnhancedFireworkAlgorithmWithLocalSearch, + ) lama_register["EnhancedFireworkAlgorithmWithLocalSearch"] = EnhancedFireworkAlgorithmWithLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearch print("EnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinal import EnhancedFireworkAlgorithmWithLocalSearchFinal - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinal"] = EnhancedFireworkAlgorithmWithLocalSearchFinal - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchFinal + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinal import ( + EnhancedFireworkAlgorithmWithLocalSearchFinal, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinal"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinal + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchFinal print("EnhancedFireworkAlgorithmWithLocalSearchFinal can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized import EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized"] = EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized import ( + EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized print("EnhancedFireworkAlgorithmWithLocalSearchFinalOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalRefined import EnhancedFireworkAlgorithmWithLocalSearchFinalRefined - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalRefined"] = EnhancedFireworkAlgorithmWithLocalSearchFinalRefined - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchFinalRefined + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchFinalRefined import ( + EnhancedFireworkAlgorithmWithLocalSearchFinalRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchFinalRefined"] = ( + EnhancedFireworkAlgorithmWithLocalSearchFinalRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchFinalRefined print("EnhancedFireworkAlgorithmWithLocalSearchFinalRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchImproved import EnhancedFireworkAlgorithmWithLocalSearchImproved - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchImproved"] = EnhancedFireworkAlgorithmWithLocalSearchImproved - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchImproved + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchImproved import ( + EnhancedFireworkAlgorithmWithLocalSearchImproved, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchImproved"] = ( + EnhancedFireworkAlgorithmWithLocalSearchImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchImproved print("EnhancedFireworkAlgorithmWithLocalSearchImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchOptimized import EnhancedFireworkAlgorithmWithLocalSearchOptimized - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchOptimized"] = EnhancedFireworkAlgorithmWithLocalSearchOptimized - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchOptimized + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchOptimized import ( + EnhancedFireworkAlgorithmWithLocalSearchOptimized, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchOptimized"] = ( + EnhancedFireworkAlgorithmWithLocalSearchOptimized + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchOptimized print("EnhancedFireworkAlgorithmWithLocalSearchOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchRefined import EnhancedFireworkAlgorithmWithLocalSearchRefined - - lama_register["EnhancedFireworkAlgorithmWithLocalSearchRefined"] = EnhancedFireworkAlgorithmWithLocalSearchRefined - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined").set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined", register=True) -except Exception as e: +try: # EnhancedFireworkAlgorithmWithLocalSearchRefined + from nevergrad.optimization.lama.EnhancedFireworkAlgorithmWithLocalSearchRefined import ( + EnhancedFireworkAlgorithmWithLocalSearchRefined, + ) + + lama_register["EnhancedFireworkAlgorithmWithLocalSearchRefined"] = ( + EnhancedFireworkAlgorithmWithLocalSearchRefined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined = NonObjectOptimizer( + method="LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined" + ).set_name("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined", register=True) +except Exception as e: # EnhancedFireworkAlgorithmWithLocalSearchRefined print("EnhancedFireworkAlgorithmWithLocalSearchRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworkSwarmOptimization import EnhancedFireworkSwarmOptimization +try: # EnhancedFireworkSwarmOptimization + from nevergrad.optimization.lama.EnhancedFireworkSwarmOptimization import ( + EnhancedFireworkSwarmOptimization, + ) lama_register["EnhancedFireworkSwarmOptimization"] = EnhancedFireworkSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworkSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedFireworkSwarmOptimization").set_name("LLAMAEnhancedFireworkSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworkSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworkSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedFireworkSwarmOptimization" + ).set_name("LLAMAEnhancedFireworkSwarmOptimization", register=True) +except Exception as e: # EnhancedFireworkSwarmOptimization print("EnhancedFireworkSwarmOptimization can not be imported: ", e) -try: +try: # EnhancedFireworksAlgorithm from nevergrad.optimization.lama.EnhancedFireworksAlgorithm import EnhancedFireworksAlgorithm lama_register["EnhancedFireworksAlgorithm"] = EnhancedFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm").set_name("LLAMAEnhancedFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedFireworksAlgorithm").set_name( + "LLAMAEnhancedFireworksAlgorithm", register=True + ) +except Exception as e: # EnhancedFireworksAlgorithm print("EnhancedFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFireworksSwarmOptimization_v4 import EnhancedFireworksSwarmOptimization_v4 +try: # EnhancedFireworksSwarmOptimization_v4 + from nevergrad.optimization.lama.EnhancedFireworksSwarmOptimization_v4 import ( + EnhancedFireworksSwarmOptimization_v4, + ) lama_register["EnhancedFireworksSwarmOptimization_v4"] = EnhancedFireworksSwarmOptimization_v4 - res = NonObjectOptimizer(method="LLAMAEnhancedFireworksSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFireworksSwarmOptimization_v4 = NonObjectOptimizer(method="LLAMAEnhancedFireworksSwarmOptimization_v4").set_name("LLAMAEnhancedFireworksSwarmOptimization_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFireworksSwarmOptimization_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFireworksSwarmOptimization_v4 = NonObjectOptimizer( + method="LLAMAEnhancedFireworksSwarmOptimization_v4" + ).set_name("LLAMAEnhancedFireworksSwarmOptimization_v4", register=True) +except Exception as e: # EnhancedFireworksSwarmOptimization_v4 print("EnhancedFireworksSwarmOptimization_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedFocusedBalancedAdaptivePSO import EnhancedFocusedBalancedAdaptivePSO +try: # EnhancedFocusedBalancedAdaptivePSO + from nevergrad.optimization.lama.EnhancedFocusedBalancedAdaptivePSO import ( + EnhancedFocusedBalancedAdaptivePSO, + ) lama_register["EnhancedFocusedBalancedAdaptivePSO"] = EnhancedFocusedBalancedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAEnhancedFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedFocusedBalancedAdaptivePSO").set_name("LLAMAEnhancedFocusedBalancedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedFocusedBalancedAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedFocusedBalancedAdaptivePSO" + ).set_name("LLAMAEnhancedFocusedBalancedAdaptivePSO", register=True) +except Exception as e: # EnhancedFocusedBalancedAdaptivePSO print("EnhancedFocusedBalancedAdaptivePSO can not be imported: ", e) -try: +try: # EnhancedGlobalClimbingOptimizer from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizer import EnhancedGlobalClimbingOptimizer lama_register["EnhancedGlobalClimbingOptimizer"] = EnhancedGlobalClimbingOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGlobalClimbingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizer").set_name("LLAMAEnhancedGlobalClimbingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGlobalClimbingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalClimbingOptimizer" + ).set_name("LLAMAEnhancedGlobalClimbingOptimizer", register=True) +except Exception as e: # EnhancedGlobalClimbingOptimizer print("EnhancedGlobalClimbingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizerV3 import EnhancedGlobalClimbingOptimizerV3 +try: # EnhancedGlobalClimbingOptimizerV3 + from nevergrad.optimization.lama.EnhancedGlobalClimbingOptimizerV3 import ( + EnhancedGlobalClimbingOptimizerV3, + ) lama_register["EnhancedGlobalClimbingOptimizerV3"] = EnhancedGlobalClimbingOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGlobalClimbingOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizerV3").set_name("LLAMAEnhancedGlobalClimbingOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGlobalClimbingOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGlobalClimbingOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedGlobalClimbingOptimizerV3" + ).set_name("LLAMAEnhancedGlobalClimbingOptimizerV3", register=True) +except Exception as e: # EnhancedGlobalClimbingOptimizerV3 print("EnhancedGlobalClimbingOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGlobalStructureAdaptiveEvolver import EnhancedGlobalStructureAdaptiveEvolver +try: # EnhancedGlobalStructureAdaptiveEvolver + from nevergrad.optimization.lama.EnhancedGlobalStructureAdaptiveEvolver import ( + EnhancedGlobalStructureAdaptiveEvolver, + ) lama_register["EnhancedGlobalStructureAdaptiveEvolver"] = EnhancedGlobalStructureAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGlobalStructureAdaptiveEvolver = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAdaptiveEvolver").set_name("LLAMAEnhancedGlobalStructureAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGlobalStructureAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureAdaptiveEvolver" + ).set_name("LLAMAEnhancedGlobalStructureAdaptiveEvolver", register=True) +except Exception as e: # EnhancedGlobalStructureAdaptiveEvolver print("EnhancedGlobalStructureAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGlobalStructureAwareOptimizer import EnhancedGlobalStructureAwareOptimizer +try: # EnhancedGlobalStructureAwareOptimizer + from nevergrad.optimization.lama.EnhancedGlobalStructureAwareOptimizer import ( + EnhancedGlobalStructureAwareOptimizer, + ) lama_register["EnhancedGlobalStructureAwareOptimizer"] = EnhancedGlobalStructureAwareOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAwareOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGlobalStructureAwareOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAwareOptimizer").set_name("LLAMAEnhancedGlobalStructureAwareOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureAwareOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGlobalStructureAwareOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureAwareOptimizer" + ).set_name("LLAMAEnhancedGlobalStructureAwareOptimizer", register=True) +except Exception as e: # EnhancedGlobalStructureAwareOptimizer print("EnhancedGlobalStructureAwareOptimizer can not be imported: ", e) -try: +try: # EnhancedGlobalStructureOptimizer from nevergrad.optimization.lama.EnhancedGlobalStructureOptimizer import EnhancedGlobalStructureOptimizer lama_register["EnhancedGlobalStructureOptimizer"] = EnhancedGlobalStructureOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGlobalStructureOptimizer = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureOptimizer").set_name("LLAMAEnhancedGlobalStructureOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGlobalStructureOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGlobalStructureOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedGlobalStructureOptimizer" + ).set_name("LLAMAEnhancedGlobalStructureOptimizer", register=True) +except Exception as e: # EnhancedGlobalStructureOptimizer print("EnhancedGlobalStructureOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGradientBoostedAnnealingWithAdaptiveMemory import EnhancedGradientBoostedAnnealingWithAdaptiveMemory - - lama_register["EnhancedGradientBoostedAnnealingWithAdaptiveMemory"] = EnhancedGradientBoostedAnnealingWithAdaptiveMemory - res = NonObjectOptimizer(method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory = NonObjectOptimizer(method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory").set_name("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory", register=True) -except Exception as e: +try: # EnhancedGradientBoostedAnnealingWithAdaptiveMemory + from nevergrad.optimization.lama.EnhancedGradientBoostedAnnealingWithAdaptiveMemory import ( + EnhancedGradientBoostedAnnealingWithAdaptiveMemory, + ) + + lama_register["EnhancedGradientBoostedAnnealingWithAdaptiveMemory"] = ( + EnhancedGradientBoostedAnnealingWithAdaptiveMemory + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory = NonObjectOptimizer( + method="LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory" + ).set_name("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory", register=True) +except Exception as e: # EnhancedGradientBoostedAnnealingWithAdaptiveMemory print("EnhancedGradientBoostedAnnealingWithAdaptiveMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGradientGuidedClusterSearch import EnhancedGradientGuidedClusterSearch +try: # EnhancedGradientGuidedClusterSearch + from nevergrad.optimization.lama.EnhancedGradientGuidedClusterSearch import ( + EnhancedGradientGuidedClusterSearch, + ) lama_register["EnhancedGradientGuidedClusterSearch"] = EnhancedGradientGuidedClusterSearch - res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedClusterSearch").set_name("LLAMAEnhancedGradientGuidedClusterSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGradientGuidedClusterSearch = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedClusterSearch" + ).set_name("LLAMAEnhancedGradientGuidedClusterSearch", register=True) +except Exception as e: # EnhancedGradientGuidedClusterSearch print("EnhancedGradientGuidedClusterSearch can not be imported: ", e) -try: +try: # EnhancedGradientGuidedEvolution from nevergrad.optimization.lama.EnhancedGradientGuidedEvolution import EnhancedGradientGuidedEvolution lama_register["EnhancedGradientGuidedEvolution"] = EnhancedGradientGuidedEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGradientGuidedEvolution = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedEvolution").set_name("LLAMAEnhancedGradientGuidedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedEvolution" + ).set_name("LLAMAEnhancedGradientGuidedEvolution", register=True) +except Exception as e: # EnhancedGradientGuidedEvolution print("EnhancedGradientGuidedEvolution can not be imported: ", e) -try: +try: # EnhancedGradientGuidedHybridPSO from nevergrad.optimization.lama.EnhancedGradientGuidedHybridPSO import EnhancedGradientGuidedHybridPSO lama_register["EnhancedGradientGuidedHybridPSO"] = EnhancedGradientGuidedHybridPSO - res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedHybridPSO").set_name("LLAMAEnhancedGradientGuidedHybridPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: # EnhancedGradientGuidedHybridPSO print("EnhancedGradientGuidedHybridPSO can not be imported: ", e) -try: +try: # EnhancedGradualAdaptiveRAMEDS from nevergrad.optimization.lama.EnhancedGradualAdaptiveRAMEDS import EnhancedGradualAdaptiveRAMEDS lama_register["EnhancedGradualAdaptiveRAMEDS"] = EnhancedGradualAdaptiveRAMEDS - res = NonObjectOptimizer(method="LLAMAEnhancedGradualAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGradualAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedGradualAdaptiveRAMEDS").set_name("LLAMAEnhancedGradualAdaptiveRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGradualAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGradualAdaptiveRAMEDS = NonObjectOptimizer( + method="LLAMAEnhancedGradualAdaptiveRAMEDS" + ).set_name("LLAMAEnhancedGradualAdaptiveRAMEDS", register=True) +except Exception as e: # EnhancedGradualAdaptiveRAMEDS print("EnhancedGradualAdaptiveRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimization import EnhancedGravitationSwarmOptimization +try: # EnhancedGravitationSwarmOptimization + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimization import ( + EnhancedGravitationSwarmOptimization, + ) lama_register["EnhancedGravitationSwarmOptimization"] = EnhancedGravitationSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimization").set_name("LLAMAEnhancedGravitationSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedGravitationSwarmOptimization" + ).set_name("LLAMAEnhancedGravitationSwarmOptimization", register=True) +except Exception as e: # EnhancedGravitationSwarmOptimization print("EnhancedGravitationSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimizationV2 import EnhancedGravitationSwarmOptimizationV2 +try: # EnhancedGravitationSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedGravitationSwarmOptimizationV2 import ( + EnhancedGravitationSwarmOptimizationV2, + ) lama_register["EnhancedGravitationSwarmOptimizationV2"] = EnhancedGravitationSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimizationV2").set_name("LLAMAEnhancedGravitationSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationSwarmOptimizationV2" + ).set_name("LLAMAEnhancedGravitationSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedGravitationSwarmOptimizationV2 print("EnhancedGravitationSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV10 import EnhancedGravitationalSwarmIntelligenceV10 +try: # EnhancedGravitationalSwarmIntelligenceV10 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV10 import ( + EnhancedGravitationalSwarmIntelligenceV10, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV10"] = EnhancedGravitationalSwarmIntelligenceV10 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV10 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV10").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV10 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV10" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV10", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV10 print("EnhancedGravitationalSwarmIntelligenceV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV11 import EnhancedGravitationalSwarmIntelligenceV11 +try: # EnhancedGravitationalSwarmIntelligenceV11 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV11 import ( + EnhancedGravitationalSwarmIntelligenceV11, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV11"] = EnhancedGravitationalSwarmIntelligenceV11 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV11 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV11").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV11 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV11" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV11", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV11 print("EnhancedGravitationalSwarmIntelligenceV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV12 import EnhancedGravitationalSwarmIntelligenceV12 +try: # EnhancedGravitationalSwarmIntelligenceV12 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV12 import ( + EnhancedGravitationalSwarmIntelligenceV12, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV12"] = EnhancedGravitationalSwarmIntelligenceV12 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV12 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV12").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV12 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV12" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV12", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV12 print("EnhancedGravitationalSwarmIntelligenceV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV13 import EnhancedGravitationalSwarmIntelligenceV13 +try: # EnhancedGravitationalSwarmIntelligenceV13 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV13 import ( + EnhancedGravitationalSwarmIntelligenceV13, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV13"] = EnhancedGravitationalSwarmIntelligenceV13 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV13 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV13").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV13 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV13" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV13", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV13 print("EnhancedGravitationalSwarmIntelligenceV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV14 import EnhancedGravitationalSwarmIntelligenceV14 +try: # EnhancedGravitationalSwarmIntelligenceV14 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV14 import ( + EnhancedGravitationalSwarmIntelligenceV14, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV14"] = EnhancedGravitationalSwarmIntelligenceV14 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV14 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV14").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV14 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV14" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV14", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV14 print("EnhancedGravitationalSwarmIntelligenceV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV15 import EnhancedGravitationalSwarmIntelligenceV15 +try: # EnhancedGravitationalSwarmIntelligenceV15 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV15 import ( + EnhancedGravitationalSwarmIntelligenceV15, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV15"] = EnhancedGravitationalSwarmIntelligenceV15 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV15 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV15").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV15 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV15" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV15", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV15 print("EnhancedGravitationalSwarmIntelligenceV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV16 import EnhancedGravitationalSwarmIntelligenceV16 +try: # EnhancedGravitationalSwarmIntelligenceV16 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV16 import ( + EnhancedGravitationalSwarmIntelligenceV16, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV16"] = EnhancedGravitationalSwarmIntelligenceV16 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV16 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV16").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV16 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV16" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV16", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV16 print("EnhancedGravitationalSwarmIntelligenceV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV17 import EnhancedGravitationalSwarmIntelligenceV17 +try: # EnhancedGravitationalSwarmIntelligenceV17 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV17 import ( + EnhancedGravitationalSwarmIntelligenceV17, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV17"] = EnhancedGravitationalSwarmIntelligenceV17 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV17 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV17").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV17 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV17" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV17", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV17 print("EnhancedGravitationalSwarmIntelligenceV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV18 import EnhancedGravitationalSwarmIntelligenceV18 +try: # EnhancedGravitationalSwarmIntelligenceV18 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV18 import ( + EnhancedGravitationalSwarmIntelligenceV18, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV18"] = EnhancedGravitationalSwarmIntelligenceV18 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV18").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV18 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV18" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV18", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV18 print("EnhancedGravitationalSwarmIntelligenceV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV19 import EnhancedGravitationalSwarmIntelligenceV19 +try: # EnhancedGravitationalSwarmIntelligenceV19 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV19 import ( + EnhancedGravitationalSwarmIntelligenceV19, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV19"] = EnhancedGravitationalSwarmIntelligenceV19 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV19 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV19").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV19 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV19" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV19", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV19 print("EnhancedGravitationalSwarmIntelligenceV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV2 import EnhancedGravitationalSwarmIntelligenceV2 +try: # EnhancedGravitationalSwarmIntelligenceV2 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV2 import ( + EnhancedGravitationalSwarmIntelligenceV2, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV2"] = EnhancedGravitationalSwarmIntelligenceV2 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV2").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV2" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV2", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV2 print("EnhancedGravitationalSwarmIntelligenceV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV20 import EnhancedGravitationalSwarmIntelligenceV20 +try: # EnhancedGravitationalSwarmIntelligenceV20 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV20 import ( + EnhancedGravitationalSwarmIntelligenceV20, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV20"] = EnhancedGravitationalSwarmIntelligenceV20 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV20 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV20").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV20 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV20" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV20", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV20 print("EnhancedGravitationalSwarmIntelligenceV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV21 import EnhancedGravitationalSwarmIntelligenceV21 +try: # EnhancedGravitationalSwarmIntelligenceV21 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV21 import ( + EnhancedGravitationalSwarmIntelligenceV21, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV21"] = EnhancedGravitationalSwarmIntelligenceV21 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV21 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV21").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV21 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV21" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV21", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV21 print("EnhancedGravitationalSwarmIntelligenceV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV22 import EnhancedGravitationalSwarmIntelligenceV22 +try: # EnhancedGravitationalSwarmIntelligenceV22 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV22 import ( + EnhancedGravitationalSwarmIntelligenceV22, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV22"] = EnhancedGravitationalSwarmIntelligenceV22 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV22").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV22 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV22" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV22", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV22 print("EnhancedGravitationalSwarmIntelligenceV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV23 import EnhancedGravitationalSwarmIntelligenceV23 +try: # EnhancedGravitationalSwarmIntelligenceV23 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV23 import ( + EnhancedGravitationalSwarmIntelligenceV23, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV23"] = EnhancedGravitationalSwarmIntelligenceV23 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV23 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV23").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV23 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV23" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV23", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV23 print("EnhancedGravitationalSwarmIntelligenceV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV24 import EnhancedGravitationalSwarmIntelligenceV24 +try: # EnhancedGravitationalSwarmIntelligenceV24 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV24 import ( + EnhancedGravitationalSwarmIntelligenceV24, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV24"] = EnhancedGravitationalSwarmIntelligenceV24 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV24 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV24").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV24 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV24" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV24", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV24 print("EnhancedGravitationalSwarmIntelligenceV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV25 import EnhancedGravitationalSwarmIntelligenceV25 +try: # EnhancedGravitationalSwarmIntelligenceV25 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV25 import ( + EnhancedGravitationalSwarmIntelligenceV25, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV25"] = EnhancedGravitationalSwarmIntelligenceV25 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV25 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV25").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV25 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV25" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV25", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV25 print("EnhancedGravitationalSwarmIntelligenceV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV3 import EnhancedGravitationalSwarmIntelligenceV3 +try: # EnhancedGravitationalSwarmIntelligenceV3 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV3 import ( + EnhancedGravitationalSwarmIntelligenceV3, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV3"] = EnhancedGravitationalSwarmIntelligenceV3 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV3 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV3").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV3 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV3" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV3", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV3 print("EnhancedGravitationalSwarmIntelligenceV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV30 import EnhancedGravitationalSwarmIntelligenceV30 +try: # EnhancedGravitationalSwarmIntelligenceV30 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV30 import ( + EnhancedGravitationalSwarmIntelligenceV30, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV30"] = EnhancedGravitationalSwarmIntelligenceV30 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV30 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV30").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV30 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV30" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV30", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV30 print("EnhancedGravitationalSwarmIntelligenceV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV31 import EnhancedGravitationalSwarmIntelligenceV31 +try: # EnhancedGravitationalSwarmIntelligenceV31 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV31 import ( + EnhancedGravitationalSwarmIntelligenceV31, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV31"] = EnhancedGravitationalSwarmIntelligenceV31 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV31 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV31").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV31 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV31" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV31", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV31 print("EnhancedGravitationalSwarmIntelligenceV31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV32 import EnhancedGravitationalSwarmIntelligenceV32 +try: # EnhancedGravitationalSwarmIntelligenceV32 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV32 import ( + EnhancedGravitationalSwarmIntelligenceV32, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV32"] = EnhancedGravitationalSwarmIntelligenceV32 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV32 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV32").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV32 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV32" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV32", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV32 print("EnhancedGravitationalSwarmIntelligenceV32 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV4 import EnhancedGravitationalSwarmIntelligenceV4 +try: # EnhancedGravitationalSwarmIntelligenceV4 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV4 import ( + EnhancedGravitationalSwarmIntelligenceV4, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV4"] = EnhancedGravitationalSwarmIntelligenceV4 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV4 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV4").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV4 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV4" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV4", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV4 print("EnhancedGravitationalSwarmIntelligenceV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV6 import EnhancedGravitationalSwarmIntelligenceV6 +try: # EnhancedGravitationalSwarmIntelligenceV6 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV6 import ( + EnhancedGravitationalSwarmIntelligenceV6, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV6"] = EnhancedGravitationalSwarmIntelligenceV6 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV6 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV6").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV6 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV6" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV6", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV6 print("EnhancedGravitationalSwarmIntelligenceV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV7 import EnhancedGravitationalSwarmIntelligenceV7 +try: # EnhancedGravitationalSwarmIntelligenceV7 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV7 import ( + EnhancedGravitationalSwarmIntelligenceV7, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV7"] = EnhancedGravitationalSwarmIntelligenceV7 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV7 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV7").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV7 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV7" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV7", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV7 print("EnhancedGravitationalSwarmIntelligenceV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV8 import EnhancedGravitationalSwarmIntelligenceV8 +try: # EnhancedGravitationalSwarmIntelligenceV8 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV8 import ( + EnhancedGravitationalSwarmIntelligenceV8, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV8"] = EnhancedGravitationalSwarmIntelligenceV8 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV8 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV8").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV8 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV8" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV8", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV8 print("EnhancedGravitationalSwarmIntelligenceV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV9 import EnhancedGravitationalSwarmIntelligenceV9 +try: # EnhancedGravitationalSwarmIntelligenceV9 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmIntelligenceV9 import ( + EnhancedGravitationalSwarmIntelligenceV9, + ) lama_register["EnhancedGravitationalSwarmIntelligenceV9"] = EnhancedGravitationalSwarmIntelligenceV9 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmIntelligenceV9 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV9").set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmIntelligenceV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmIntelligenceV9 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmIntelligenceV9" + ).set_name("LLAMAEnhancedGravitationalSwarmIntelligenceV9", register=True) +except Exception as e: # EnhancedGravitationalSwarmIntelligenceV9 print("EnhancedGravitationalSwarmIntelligenceV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDiversityPreservation import EnhancedGravitationalSwarmOptimizationWithDiversityPreservation - - lama_register["EnhancedGravitationalSwarmOptimizationWithDiversityPreservation"] = EnhancedGravitationalSwarmOptimizationWithDiversityPreservation - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation", register=True) -except Exception as e: +try: # EnhancedGravitationalSwarmOptimizationWithDiversityPreservation + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDiversityPreservation import ( + EnhancedGravitationalSwarmOptimizationWithDiversityPreservation, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDiversityPreservation"] = ( + EnhancedGravitationalSwarmOptimizationWithDiversityPreservation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation", register=True) +except Exception as e: # EnhancedGravitationalSwarmOptimizationWithDiversityPreservation print("EnhancedGravitationalSwarmOptimizationWithDiversityPreservation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 import EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 - - lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2"] = EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2", register=True) -except Exception as e: +try: # EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 import ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2"] = ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2", register=True) +except Exception as e: # EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 import EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 - - lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3"] = EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3").set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3", register=True) -except Exception as e: +try: # EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 + from nevergrad.optimization.lama.EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 import ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3, + ) + + lama_register["EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3"] = ( + EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 = NonObjectOptimizer( + method="LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3" + ).set_name("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3", register=True) +except Exception as e: # EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 print("EnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3 can not be imported: ", e) -try: +try: # EnhancedGuidedMassQGSA_v62 from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v62 import EnhancedGuidedMassQGSA_v62 lama_register["EnhancedGuidedMassQGSA_v62"] = EnhancedGuidedMassQGSA_v62 - res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGuidedMassQGSA_v62 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62").set_name("LLAMAEnhancedGuidedMassQGSA_v62", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGuidedMassQGSA_v62 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v62").set_name( + "LLAMAEnhancedGuidedMassQGSA_v62", register=True + ) +except Exception as e: # EnhancedGuidedMassQGSA_v62 print("EnhancedGuidedMassQGSA_v62 can not be imported: ", e) -try: +try: # EnhancedGuidedMassQGSA_v94 from nevergrad.optimization.lama.EnhancedGuidedMassQGSA_v94 import EnhancedGuidedMassQGSA_v94 lama_register["EnhancedGuidedMassQGSA_v94"] = EnhancedGuidedMassQGSA_v94 - res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedGuidedMassQGSA_v94 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94").set_name("LLAMAEnhancedGuidedMassQGSA_v94", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedGuidedMassQGSA_v94 = NonObjectOptimizer(method="LLAMAEnhancedGuidedMassQGSA_v94").set_name( + "LLAMAEnhancedGuidedMassQGSA_v94", register=True + ) +except Exception as e: # EnhancedGuidedMassQGSA_v94 print("EnhancedGuidedMassQGSA_v94 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicFireworkAlgorithm import EnhancedHarmonicFireworkAlgorithm +try: # EnhancedHarmonicFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedHarmonicFireworkAlgorithm import ( + EnhancedHarmonicFireworkAlgorithm, + ) lama_register["EnhancedHarmonicFireworkAlgorithm"] = EnhancedHarmonicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHarmonicFireworkAlgorithm").set_name("LLAMAEnhancedHarmonicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicFireworkAlgorithm" + ).set_name("LLAMAEnhancedHarmonicFireworkAlgorithm", register=True) +except Exception as e: # EnhancedHarmonicFireworkAlgorithm print("EnhancedHarmonicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicLevyDolphinOptimization import EnhancedHarmonicLevyDolphinOptimization +try: # EnhancedHarmonicLevyDolphinOptimization + from nevergrad.optimization.lama.EnhancedHarmonicLevyDolphinOptimization import ( + EnhancedHarmonicLevyDolphinOptimization, + ) lama_register["EnhancedHarmonicLevyDolphinOptimization"] = EnhancedHarmonicLevyDolphinOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicLevyDolphinOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicLevyDolphinOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonicLevyDolphinOptimization").set_name("LLAMAEnhancedHarmonicLevyDolphinOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicLevyDolphinOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicLevyDolphinOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicLevyDolphinOptimization" + ).set_name("LLAMAEnhancedHarmonicLevyDolphinOptimization", register=True) +except Exception as e: # EnhancedHarmonicLevyDolphinOptimization print("EnhancedHarmonicLevyDolphinOptimization can not be imported: ", e) -try: +try: # EnhancedHarmonicSearchOptimizer from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizer import EnhancedHarmonicSearchOptimizer lama_register["EnhancedHarmonicSearchOptimizer"] = EnhancedHarmonicSearchOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSearchOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizer").set_name("LLAMAEnhancedHarmonicSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSearchOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizer" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizer", register=True) +except Exception as e: # EnhancedHarmonicSearchOptimizer print("EnhancedHarmonicSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV2 import EnhancedHarmonicSearchOptimizerV2 +try: # EnhancedHarmonicSearchOptimizerV2 + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV2 import ( + EnhancedHarmonicSearchOptimizerV2, + ) lama_register["EnhancedHarmonicSearchOptimizerV2"] = EnhancedHarmonicSearchOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSearchOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV2").set_name("LLAMAEnhancedHarmonicSearchOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSearchOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV2" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV2", register=True) +except Exception as e: # EnhancedHarmonicSearchOptimizerV2 print("EnhancedHarmonicSearchOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV3 import EnhancedHarmonicSearchOptimizerV3 +try: # EnhancedHarmonicSearchOptimizerV3 + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV3 import ( + EnhancedHarmonicSearchOptimizerV3, + ) lama_register["EnhancedHarmonicSearchOptimizerV3"] = EnhancedHarmonicSearchOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSearchOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV3").set_name("LLAMAEnhancedHarmonicSearchOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSearchOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV3" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV3", register=True) +except Exception as e: # EnhancedHarmonicSearchOptimizerV3 print("EnhancedHarmonicSearchOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV4 import EnhancedHarmonicSearchOptimizerV4 +try: # EnhancedHarmonicSearchOptimizerV4 + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV4 import ( + EnhancedHarmonicSearchOptimizerV4, + ) lama_register["EnhancedHarmonicSearchOptimizerV4"] = EnhancedHarmonicSearchOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSearchOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV4").set_name("LLAMAEnhancedHarmonicSearchOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSearchOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV4" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV4", register=True) +except Exception as e: # EnhancedHarmonicSearchOptimizerV4 print("EnhancedHarmonicSearchOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV5 import EnhancedHarmonicSearchOptimizerV5 +try: # EnhancedHarmonicSearchOptimizerV5 + from nevergrad.optimization.lama.EnhancedHarmonicSearchOptimizerV5 import ( + EnhancedHarmonicSearchOptimizerV5, + ) lama_register["EnhancedHarmonicSearchOptimizerV5"] = EnhancedHarmonicSearchOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSearchOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV5").set_name("LLAMAEnhancedHarmonicSearchOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSearchOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSearchOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSearchOptimizerV5" + ).set_name("LLAMAEnhancedHarmonicSearchOptimizerV5", register=True) +except Exception as e: # EnhancedHarmonicSearchOptimizerV5 print("EnhancedHarmonicSearchOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimization import EnhancedHarmonicSwarmOptimization +try: # EnhancedHarmonicSwarmOptimization + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimization import ( + EnhancedHarmonicSwarmOptimization, + ) lama_register["EnhancedHarmonicSwarmOptimization"] = EnhancedHarmonicSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimization").set_name("LLAMAEnhancedHarmonicSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimization" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimization", register=True) +except Exception as e: # EnhancedHarmonicSwarmOptimization print("EnhancedHarmonicSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV2 import EnhancedHarmonicSwarmOptimizationV2 +try: # EnhancedHarmonicSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV2 import ( + EnhancedHarmonicSwarmOptimizationV2, + ) lama_register["EnhancedHarmonicSwarmOptimizationV2"] = EnhancedHarmonicSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV2").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV2" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedHarmonicSwarmOptimizationV2 print("EnhancedHarmonicSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV3 import EnhancedHarmonicSwarmOptimizationV3 +try: # EnhancedHarmonicSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV3 import ( + EnhancedHarmonicSwarmOptimizationV3, + ) lama_register["EnhancedHarmonicSwarmOptimizationV3"] = EnhancedHarmonicSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV3").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV3" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedHarmonicSwarmOptimizationV3 print("EnhancedHarmonicSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV4 import EnhancedHarmonicSwarmOptimizationV4 +try: # EnhancedHarmonicSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedHarmonicSwarmOptimizationV4 import ( + EnhancedHarmonicSwarmOptimizationV4, + ) lama_register["EnhancedHarmonicSwarmOptimizationV4"] = EnhancedHarmonicSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV4").set_name("LLAMAEnhancedHarmonicSwarmOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicSwarmOptimizationV4" + ).set_name("LLAMAEnhancedHarmonicSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedHarmonicSwarmOptimizationV4 print("EnhancedHarmonicSwarmOptimizationV4 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV11 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV11 import EnhancedHarmonicTabuSearchV11 lama_register["EnhancedHarmonicTabuSearchV11"] = EnhancedHarmonicTabuSearchV11 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV11 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV11").set_name("LLAMAEnhancedHarmonicTabuSearchV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV11 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV11" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV11", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV11 print("EnhancedHarmonicTabuSearchV11 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV13 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV13 import EnhancedHarmonicTabuSearchV13 lama_register["EnhancedHarmonicTabuSearchV13"] = EnhancedHarmonicTabuSearchV13 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV13 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV13").set_name("LLAMAEnhancedHarmonicTabuSearchV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV13 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV13" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV13", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV13 print("EnhancedHarmonicTabuSearchV13 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV14 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV14 import EnhancedHarmonicTabuSearchV14 lama_register["EnhancedHarmonicTabuSearchV14"] = EnhancedHarmonicTabuSearchV14 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV14 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV14").set_name("LLAMAEnhancedHarmonicTabuSearchV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV14 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV14" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV14", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV14 print("EnhancedHarmonicTabuSearchV14 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV15 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV15 import EnhancedHarmonicTabuSearchV15 lama_register["EnhancedHarmonicTabuSearchV15"] = EnhancedHarmonicTabuSearchV15 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV15 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV15").set_name("LLAMAEnhancedHarmonicTabuSearchV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV15 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV15" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV15", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV15 print("EnhancedHarmonicTabuSearchV15 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV16 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV16 import EnhancedHarmonicTabuSearchV16 lama_register["EnhancedHarmonicTabuSearchV16"] = EnhancedHarmonicTabuSearchV16 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV16 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV16").set_name("LLAMAEnhancedHarmonicTabuSearchV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV16 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV16" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV16", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV16 print("EnhancedHarmonicTabuSearchV16 can not be imported: ", e) -try: +try: # EnhancedHarmonicTabuSearchV19 from nevergrad.optimization.lama.EnhancedHarmonicTabuSearchV19 import EnhancedHarmonicTabuSearchV19 lama_register["EnhancedHarmonicTabuSearchV19"] = EnhancedHarmonicTabuSearchV19 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonicTabuSearchV19 = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV19").set_name("LLAMAEnhancedHarmonicTabuSearchV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonicTabuSearchV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonicTabuSearchV19 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonicTabuSearchV19" + ).set_name("LLAMAEnhancedHarmonicTabuSearchV19", register=True) +except Exception as e: # EnhancedHarmonicTabuSearchV19 print("EnhancedHarmonicTabuSearchV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyDiversifiedCuckooAlgorithm import EnhancedHarmonyDiversifiedCuckooAlgorithm +try: # EnhancedHarmonyDiversifiedCuckooAlgorithm + from nevergrad.optimization.lama.EnhancedHarmonyDiversifiedCuckooAlgorithm import ( + EnhancedHarmonyDiversifiedCuckooAlgorithm, + ) lama_register["EnhancedHarmonyDiversifiedCuckooAlgorithm"] = EnhancedHarmonyDiversifiedCuckooAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm").set_name("LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm" + ).set_name("LLAMAEnhancedHarmonyDiversifiedCuckooAlgorithm", register=True) +except Exception as e: # EnhancedHarmonyDiversifiedCuckooAlgorithm print("EnhancedHarmonyDiversifiedCuckooAlgorithm can not be imported: ", e) -try: +try: # EnhancedHarmonyFireworkOptimizer from nevergrad.optimization.lama.EnhancedHarmonyFireworkOptimizer import EnhancedHarmonyFireworkOptimizer lama_register["EnhancedHarmonyFireworkOptimizer"] = EnhancedHarmonyFireworkOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHarmonyFireworkOptimizer").set_name("LLAMAEnhancedHarmonyFireworkOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyFireworkOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyFireworkOptimizer" + ).set_name("LLAMAEnhancedHarmonyFireworkOptimizer", register=True) +except Exception as e: # EnhancedHarmonyFireworkOptimizer print("EnhancedHarmonyFireworkOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV2 import EnhancedHarmonyMemeticAlgorithmV2 +try: # EnhancedHarmonyMemeticAlgorithmV2 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV2 import ( + EnhancedHarmonyMemeticAlgorithmV2, + ) lama_register["EnhancedHarmonyMemeticAlgorithmV2"] = EnhancedHarmonyMemeticAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV2").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV2" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV2", register=True) +except Exception as e: # EnhancedHarmonyMemeticAlgorithmV2 print("EnhancedHarmonyMemeticAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV3 import EnhancedHarmonyMemeticAlgorithmV3 +try: # EnhancedHarmonyMemeticAlgorithmV3 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV3 import ( + EnhancedHarmonyMemeticAlgorithmV3, + ) lama_register["EnhancedHarmonyMemeticAlgorithmV3"] = EnhancedHarmonyMemeticAlgorithmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticAlgorithmV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV3").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV3" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV3", register=True) +except Exception as e: # EnhancedHarmonyMemeticAlgorithmV3 print("EnhancedHarmonyMemeticAlgorithmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV4 import EnhancedHarmonyMemeticAlgorithmV4 +try: # EnhancedHarmonyMemeticAlgorithmV4 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticAlgorithmV4 import ( + EnhancedHarmonyMemeticAlgorithmV4, + ) lama_register["EnhancedHarmonyMemeticAlgorithmV4"] = EnhancedHarmonyMemeticAlgorithmV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticAlgorithmV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV4").set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticAlgorithmV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticAlgorithmV4 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticAlgorithmV4" + ).set_name("LLAMAEnhancedHarmonyMemeticAlgorithmV4", register=True) +except Exception as e: # EnhancedHarmonyMemeticAlgorithmV4 print("EnhancedHarmonyMemeticAlgorithmV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV10 import EnhancedHarmonyMemeticOptimizationV10 +try: # EnhancedHarmonyMemeticOptimizationV10 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV10 import ( + EnhancedHarmonyMemeticOptimizationV10, + ) lama_register["EnhancedHarmonyMemeticOptimizationV10"] = EnhancedHarmonyMemeticOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV10").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV10" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV10", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV10 print("EnhancedHarmonyMemeticOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV11 import EnhancedHarmonyMemeticOptimizationV11 +try: # EnhancedHarmonyMemeticOptimizationV11 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV11 import ( + EnhancedHarmonyMemeticOptimizationV11, + ) lama_register["EnhancedHarmonyMemeticOptimizationV11"] = EnhancedHarmonyMemeticOptimizationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV11").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV11" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV11", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV11 print("EnhancedHarmonyMemeticOptimizationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV12 import EnhancedHarmonyMemeticOptimizationV12 +try: # EnhancedHarmonyMemeticOptimizationV12 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV12 import ( + EnhancedHarmonyMemeticOptimizationV12, + ) lama_register["EnhancedHarmonyMemeticOptimizationV12"] = EnhancedHarmonyMemeticOptimizationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV12").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV12" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV12", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV12 print("EnhancedHarmonyMemeticOptimizationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV13 import EnhancedHarmonyMemeticOptimizationV13 +try: # EnhancedHarmonyMemeticOptimizationV13 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV13 import ( + EnhancedHarmonyMemeticOptimizationV13, + ) lama_register["EnhancedHarmonyMemeticOptimizationV13"] = EnhancedHarmonyMemeticOptimizationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV13").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV13" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV13", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV13 print("EnhancedHarmonyMemeticOptimizationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV14 import EnhancedHarmonyMemeticOptimizationV14 +try: # EnhancedHarmonyMemeticOptimizationV14 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV14 import ( + EnhancedHarmonyMemeticOptimizationV14, + ) lama_register["EnhancedHarmonyMemeticOptimizationV14"] = EnhancedHarmonyMemeticOptimizationV14 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV14 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV14").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV14 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV14" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV14", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV14 print("EnhancedHarmonyMemeticOptimizationV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV15 import EnhancedHarmonyMemeticOptimizationV15 +try: # EnhancedHarmonyMemeticOptimizationV15 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV15 import ( + EnhancedHarmonyMemeticOptimizationV15, + ) lama_register["EnhancedHarmonyMemeticOptimizationV15"] = EnhancedHarmonyMemeticOptimizationV15 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV15 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV15").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV15 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV15" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV15", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV15 print("EnhancedHarmonyMemeticOptimizationV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV16 import EnhancedHarmonyMemeticOptimizationV16 +try: # EnhancedHarmonyMemeticOptimizationV16 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV16 import ( + EnhancedHarmonyMemeticOptimizationV16, + ) lama_register["EnhancedHarmonyMemeticOptimizationV16"] = EnhancedHarmonyMemeticOptimizationV16 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV16 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV16").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV16 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV16" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV16", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV16 print("EnhancedHarmonyMemeticOptimizationV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV17 import EnhancedHarmonyMemeticOptimizationV17 +try: # EnhancedHarmonyMemeticOptimizationV17 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV17 import ( + EnhancedHarmonyMemeticOptimizationV17, + ) lama_register["EnhancedHarmonyMemeticOptimizationV17"] = EnhancedHarmonyMemeticOptimizationV17 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV17 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV17").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV17 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV17" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV17", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV17 print("EnhancedHarmonyMemeticOptimizationV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV34 import EnhancedHarmonyMemeticOptimizationV34 +try: # EnhancedHarmonyMemeticOptimizationV34 + from nevergrad.optimization.lama.EnhancedHarmonyMemeticOptimizationV34 import ( + EnhancedHarmonyMemeticOptimizationV34, + ) lama_register["EnhancedHarmonyMemeticOptimizationV34"] = EnhancedHarmonyMemeticOptimizationV34 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticOptimizationV34 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV34").set_name("LLAMAEnhancedHarmonyMemeticOptimizationV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticOptimizationV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticOptimizationV34 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticOptimizationV34" + ).set_name("LLAMAEnhancedHarmonyMemeticOptimizationV34", register=True) +except Exception as e: # EnhancedHarmonyMemeticOptimizationV34 print("EnhancedHarmonyMemeticOptimizationV34 can not be imported: ", e) -try: +try: # EnhancedHarmonyMemeticSearch from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearch import EnhancedHarmonyMemeticSearch lama_register["EnhancedHarmonyMemeticSearch"] = EnhancedHarmonyMemeticSearch - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearch").set_name("LLAMAEnhancedHarmonyMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticSearch = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearch" + ).set_name("LLAMAEnhancedHarmonyMemeticSearch", register=True) +except Exception as e: # EnhancedHarmonyMemeticSearch print("EnhancedHarmonyMemeticSearch can not be imported: ", e) -try: +try: # EnhancedHarmonyMemeticSearchV2 from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV2 import EnhancedHarmonyMemeticSearchV2 lama_register["EnhancedHarmonyMemeticSearchV2"] = EnhancedHarmonyMemeticSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV2").set_name("LLAMAEnhancedHarmonyMemeticSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearchV2" + ).set_name("LLAMAEnhancedHarmonyMemeticSearchV2", register=True) +except Exception as e: # EnhancedHarmonyMemeticSearchV2 print("EnhancedHarmonyMemeticSearchV2 can not be imported: ", e) -try: +try: # EnhancedHarmonyMemeticSearchV3 from nevergrad.optimization.lama.EnhancedHarmonyMemeticSearchV3 import EnhancedHarmonyMemeticSearchV3 lama_register["EnhancedHarmonyMemeticSearchV3"] = EnhancedHarmonyMemeticSearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyMemeticSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV3").set_name("LLAMAEnhancedHarmonyMemeticSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyMemeticSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyMemeticSearchV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyMemeticSearchV3" + ).set_name("LLAMAEnhancedHarmonyMemeticSearchV3", register=True) +except Exception as e: # EnhancedHarmonyMemeticSearchV3 print("EnhancedHarmonyMemeticSearchV3 can not be imported: ", e) -try: +try: # EnhancedHarmonySearchOB from nevergrad.optimization.lama.EnhancedHarmonySearchOB import EnhancedHarmonySearchOB lama_register["EnhancedHarmonySearchOB"] = EnhancedHarmonySearchOB - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB").set_name("LLAMAEnhancedHarmonySearchOB", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchOB").set_name( + "LLAMAEnhancedHarmonySearchOB", register=True + ) +except Exception as e: # EnhancedHarmonySearchOB print("EnhancedHarmonySearchOB can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - - lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) -except Exception as e: +try: # EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( + EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: # EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration print("EnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightV2 import EnhancedHarmonySearchWithAdaptiveLevyFlightV2 - - lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightV2"] = EnhancedHarmonySearchWithAdaptiveLevyFlightV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2").set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2", register=True) -except Exception as e: +try: # EnhancedHarmonySearchWithAdaptiveLevyFlightV2 + from nevergrad.optimization.lama.EnhancedHarmonySearchWithAdaptiveLevyFlightV2 import ( + EnhancedHarmonySearchWithAdaptiveLevyFlightV2, + ) + + lama_register["EnhancedHarmonySearchWithAdaptiveLevyFlightV2"] = ( + EnhancedHarmonySearchWithAdaptiveLevyFlightV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2" + ).set_name("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2", register=True) +except Exception as e: # EnhancedHarmonySearchWithAdaptiveLevyFlightV2 print("EnhancedHarmonySearchWithAdaptiveLevyFlightV2 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuOptimization from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimization import EnhancedHarmonyTabuOptimization lama_register["EnhancedHarmonyTabuOptimization"] = EnhancedHarmonyTabuOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimization").set_name("LLAMAEnhancedHarmonyTabuOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimization" + ).set_name("LLAMAEnhancedHarmonyTabuOptimization", register=True) +except Exception as e: # EnhancedHarmonyTabuOptimization print("EnhancedHarmonyTabuOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV2 import EnhancedHarmonyTabuOptimizationV2 +try: # EnhancedHarmonyTabuOptimizationV2 + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV2 import ( + EnhancedHarmonyTabuOptimizationV2, + ) lama_register["EnhancedHarmonyTabuOptimizationV2"] = EnhancedHarmonyTabuOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV2").set_name("LLAMAEnhancedHarmonyTabuOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimizationV2" + ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV2", register=True) +except Exception as e: # EnhancedHarmonyTabuOptimizationV2 print("EnhancedHarmonyTabuOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV3 import EnhancedHarmonyTabuOptimizationV3 +try: # EnhancedHarmonyTabuOptimizationV3 + from nevergrad.optimization.lama.EnhancedHarmonyTabuOptimizationV3 import ( + EnhancedHarmonyTabuOptimizationV3, + ) lama_register["EnhancedHarmonyTabuOptimizationV3"] = EnhancedHarmonyTabuOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV3").set_name("LLAMAEnhancedHarmonyTabuOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedHarmonyTabuOptimizationV3" + ).set_name("LLAMAEnhancedHarmonyTabuOptimizationV3", register=True) +except Exception as e: # EnhancedHarmonyTabuOptimizationV3 print("EnhancedHarmonyTabuOptimizationV3 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearch from nevergrad.optimization.lama.EnhancedHarmonyTabuSearch import EnhancedHarmonyTabuSearch lama_register["EnhancedHarmonyTabuSearch"] = EnhancedHarmonyTabuSearch - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch").set_name("LLAMAEnhancedHarmonyTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearch").set_name( + "LLAMAEnhancedHarmonyTabuSearch", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearch print("EnhancedHarmonyTabuSearch can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearchV2 from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV2 import EnhancedHarmonyTabuSearchV2 lama_register["EnhancedHarmonyTabuSearchV2"] = EnhancedHarmonyTabuSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2").set_name("LLAMAEnhancedHarmonyTabuSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV2").set_name( + "LLAMAEnhancedHarmonyTabuSearchV2", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearchV2 print("EnhancedHarmonyTabuSearchV2 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearchV3 from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV3 import EnhancedHarmonyTabuSearchV3 lama_register["EnhancedHarmonyTabuSearchV3"] = EnhancedHarmonyTabuSearchV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3").set_name("LLAMAEnhancedHarmonyTabuSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearchV3 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV3").set_name( + "LLAMAEnhancedHarmonyTabuSearchV3", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearchV3 print("EnhancedHarmonyTabuSearchV3 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearchV4 from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV4 import EnhancedHarmonyTabuSearchV4 lama_register["EnhancedHarmonyTabuSearchV4"] = EnhancedHarmonyTabuSearchV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4").set_name("LLAMAEnhancedHarmonyTabuSearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearchV4 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV4").set_name( + "LLAMAEnhancedHarmonyTabuSearchV4", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearchV4 print("EnhancedHarmonyTabuSearchV4 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearchV6 from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV6 import EnhancedHarmonyTabuSearchV6 lama_register["EnhancedHarmonyTabuSearchV6"] = EnhancedHarmonyTabuSearchV6 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearchV6 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6").set_name("LLAMAEnhancedHarmonyTabuSearchV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearchV6 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV6").set_name( + "LLAMAEnhancedHarmonyTabuSearchV6", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearchV6 print("EnhancedHarmonyTabuSearchV6 can not be imported: ", e) -try: +try: # EnhancedHarmonyTabuSearchV7 from nevergrad.optimization.lama.EnhancedHarmonyTabuSearchV7 import EnhancedHarmonyTabuSearchV7 lama_register["EnhancedHarmonyTabuSearchV7"] = EnhancedHarmonyTabuSearchV7 - res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHarmonyTabuSearchV7 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7").set_name("LLAMAEnhancedHarmonyTabuSearchV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHarmonyTabuSearchV7 = NonObjectOptimizer(method="LLAMAEnhancedHarmonyTabuSearchV7").set_name( + "LLAMAEnhancedHarmonyTabuSearchV7", register=True + ) +except Exception as e: # EnhancedHarmonyTabuSearchV7 print("EnhancedHarmonyTabuSearchV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHierarchicalCovarianceMatrixAdaptation import EnhancedHierarchicalCovarianceMatrixAdaptation - - lama_register["EnhancedHierarchicalCovarianceMatrixAdaptation"] = EnhancedHierarchicalCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation").set_name("LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation", register=True) -except Exception as e: +try: # EnhancedHierarchicalCovarianceMatrixAdaptation + from nevergrad.optimization.lama.EnhancedHierarchicalCovarianceMatrixAdaptation import ( + EnhancedHierarchicalCovarianceMatrixAdaptation, + ) + + lama_register["EnhancedHierarchicalCovarianceMatrixAdaptation"] = ( + EnhancedHierarchicalCovarianceMatrixAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation" + ).set_name("LLAMAEnhancedHierarchicalCovarianceMatrixAdaptation", register=True) +except Exception as e: # EnhancedHierarchicalCovarianceMatrixAdaptation print("EnhancedHierarchicalCovarianceMatrixAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveDifferentialEvolution import EnhancedHybridAdaptiveDifferentialEvolution +try: # EnhancedHybridAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedHybridAdaptiveDifferentialEvolution import ( + EnhancedHybridAdaptiveDifferentialEvolution, + ) lama_register["EnhancedHybridAdaptiveDifferentialEvolution"] = EnhancedHybridAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedHybridAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedHybridAdaptiveDifferentialEvolution print("EnhancedHybridAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveExplorationOptimizer import EnhancedHybridAdaptiveExplorationOptimizer +try: # EnhancedHybridAdaptiveExplorationOptimizer + from nevergrad.optimization.lama.EnhancedHybridAdaptiveExplorationOptimizer import ( + EnhancedHybridAdaptiveExplorationOptimizer, + ) lama_register["EnhancedHybridAdaptiveExplorationOptimizer"] = EnhancedHybridAdaptiveExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer").set_name("LLAMAEnhancedHybridAdaptiveExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveExplorationOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveExplorationOptimizer", register=True) +except Exception as e: # EnhancedHybridAdaptiveExplorationOptimizer print("EnhancedHybridAdaptiveExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveGeneticSwarmOptimizer import EnhancedHybridAdaptiveGeneticSwarmOptimizer +try: # EnhancedHybridAdaptiveGeneticSwarmOptimizer + from nevergrad.optimization.lama.EnhancedHybridAdaptiveGeneticSwarmOptimizer import ( + EnhancedHybridAdaptiveGeneticSwarmOptimizer, + ) lama_register["EnhancedHybridAdaptiveGeneticSwarmOptimizer"] = EnhancedHybridAdaptiveGeneticSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: # EnhancedHybridAdaptiveGeneticSwarmOptimizer print("EnhancedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveHarmonicFireworksTabuSearch import EnhancedHybridAdaptiveHarmonicFireworksTabuSearch - - lama_register["EnhancedHybridAdaptiveHarmonicFireworksTabuSearch"] = EnhancedHybridAdaptiveHarmonicFireworksTabuSearch - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) -except Exception as e: +try: # EnhancedHybridAdaptiveHarmonicFireworksTabuSearch + from nevergrad.optimization.lama.EnhancedHybridAdaptiveHarmonicFireworksTabuSearch import ( + EnhancedHybridAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["EnhancedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( + EnhancedHybridAdaptiveHarmonicFireworksTabuSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: # EnhancedHybridAdaptiveHarmonicFireworksTabuSearch print("EnhancedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMemoryAnnealing import EnhancedHybridAdaptiveMemoryAnnealing +try: # EnhancedHybridAdaptiveMemoryAnnealing + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMemoryAnnealing import ( + EnhancedHybridAdaptiveMemoryAnnealing, + ) lama_register["EnhancedHybridAdaptiveMemoryAnnealing"] = EnhancedHybridAdaptiveMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing").set_name("LLAMAEnhancedHybridAdaptiveMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMemoryAnnealing" + ).set_name("LLAMAEnhancedHybridAdaptiveMemoryAnnealing", register=True) +except Exception as e: # EnhancedHybridAdaptiveMemoryAnnealing print("EnhancedHybridAdaptiveMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiPhaseEvolution import EnhancedHybridAdaptiveMultiPhaseEvolution +try: # EnhancedHybridAdaptiveMultiPhaseEvolution + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiPhaseEvolution import ( + EnhancedHybridAdaptiveMultiPhaseEvolution, + ) lama_register["EnhancedHybridAdaptiveMultiPhaseEvolution"] = EnhancedHybridAdaptiveMultiPhaseEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution").set_name("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution", register=True) +except Exception as e: # EnhancedHybridAdaptiveMultiPhaseEvolution print("EnhancedHybridAdaptiveMultiPhaseEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiStageOptimization import EnhancedHybridAdaptiveMultiStageOptimization - - lama_register["EnhancedHybridAdaptiveMultiStageOptimization"] = EnhancedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization").set_name("LLAMAEnhancedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: +try: # EnhancedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.EnhancedHybridAdaptiveMultiStageOptimization import ( + EnhancedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["EnhancedHybridAdaptiveMultiStageOptimization"] = ( + EnhancedHybridAdaptiveMultiStageOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAEnhancedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # EnhancedHybridAdaptiveMultiStageOptimization print("EnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveQuantumOptimizer import EnhancedHybridAdaptiveQuantumOptimizer +try: # EnhancedHybridAdaptiveQuantumOptimizer + from nevergrad.optimization.lama.EnhancedHybridAdaptiveQuantumOptimizer import ( + EnhancedHybridAdaptiveQuantumOptimizer, + ) lama_register["EnhancedHybridAdaptiveQuantumOptimizer"] = EnhancedHybridAdaptiveQuantumOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveQuantumOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer").set_name("LLAMAEnhancedHybridAdaptiveQuantumOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveQuantumOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveQuantumOptimizer" + ).set_name("LLAMAEnhancedHybridAdaptiveQuantumOptimizer", register=True) +except Exception as e: # EnhancedHybridAdaptiveQuantumOptimizer print("EnhancedHybridAdaptiveQuantumOptimizer can not be imported: ", e) -try: +try: # EnhancedHybridAdaptiveSearch from nevergrad.optimization.lama.EnhancedHybridAdaptiveSearch import EnhancedHybridAdaptiveSearch lama_register["EnhancedHybridAdaptiveSearch"] = EnhancedHybridAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSearch").set_name("LLAMAEnhancedHybridAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveSearch = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveSearch" + ).set_name("LLAMAEnhancedHybridAdaptiveSearch", register=True) +except Exception as e: # EnhancedHybridAdaptiveSearch print("EnhancedHybridAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution import EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution - - lama_register["EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution"] = EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution import ( + EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( + EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution print("EnhancedHybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedHybridCMAESDE from nevergrad.optimization.lama.EnhancedHybridCMAESDE import EnhancedHybridCMAESDE lama_register["EnhancedHybridCMAESDE"] = EnhancedHybridCMAESDE - res = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridCMAESDE = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE").set_name("LLAMAEnhancedHybridCMAESDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridCMAESDE = NonObjectOptimizer(method="LLAMAEnhancedHybridCMAESDE").set_name( + "LLAMAEnhancedHybridCMAESDE", register=True + ) +except Exception as e: # EnhancedHybridCMAESDE print("EnhancedHybridCMAESDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridCovarianceMatrixDifferentialEvolution import EnhancedHybridCovarianceMatrixDifferentialEvolution - - lama_register["EnhancedHybridCovarianceMatrixDifferentialEvolution"] = EnhancedHybridCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedHybridCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.EnhancedHybridCovarianceMatrixDifferentialEvolution import ( + EnhancedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedHybridCovarianceMatrixDifferentialEvolution"] = ( + EnhancedHybridCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # EnhancedHybridCovarianceMatrixDifferentialEvolution print("EnhancedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOWithDynamicAdaptationV4 import EnhancedHybridDEPSOWithDynamicAdaptationV4 +try: # EnhancedHybridDEPSOWithDynamicAdaptationV4 + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithDynamicAdaptationV4 import ( + EnhancedHybridDEPSOWithDynamicAdaptationV4, + ) lama_register["EnhancedHybridDEPSOWithDynamicAdaptationV4"] = EnhancedHybridDEPSOWithDynamicAdaptationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4 = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4").set_name("LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4 = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4" + ).set_name("LLAMAEnhancedHybridDEPSOWithDynamicAdaptationV4", register=True) +except Exception as e: # EnhancedHybridDEPSOWithDynamicAdaptationV4 print("EnhancedHybridDEPSOWithDynamicAdaptationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOWithQuantumLevyFlight import EnhancedHybridDEPSOWithQuantumLevyFlight +try: # EnhancedHybridDEPSOWithQuantumLevyFlight + from nevergrad.optimization.lama.EnhancedHybridDEPSOWithQuantumLevyFlight import ( + EnhancedHybridDEPSOWithQuantumLevyFlight, + ) lama_register["EnhancedHybridDEPSOWithQuantumLevyFlight"] = EnhancedHybridDEPSOWithQuantumLevyFlight - res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight").set_name("LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight" + ).set_name("LLAMAEnhancedHybridDEPSOWithQuantumLevyFlight", register=True) +except Exception as e: # EnhancedHybridDEPSOWithQuantumLevyFlight print("EnhancedHybridDEPSOWithQuantumLevyFlight can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridDEPSOwithAdaptiveRestart import EnhancedHybridDEPSOwithAdaptiveRestart +try: # EnhancedHybridDEPSOwithAdaptiveRestart + from nevergrad.optimization.lama.EnhancedHybridDEPSOwithAdaptiveRestart import ( + EnhancedHybridDEPSOwithAdaptiveRestart, + ) lama_register["EnhancedHybridDEPSOwithAdaptiveRestart"] = EnhancedHybridDEPSOwithAdaptiveRestart - res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridDEPSOwithAdaptiveRestart = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart").set_name("LLAMAEnhancedHybridDEPSOwithAdaptiveRestart", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridDEPSOwithAdaptiveRestart = NonObjectOptimizer( + method="LLAMAEnhancedHybridDEPSOwithAdaptiveRestart" + ).set_name("LLAMAEnhancedHybridDEPSOwithAdaptiveRestart", register=True) +except Exception as e: # EnhancedHybridDEPSOwithAdaptiveRestart print("EnhancedHybridDEPSOwithAdaptiveRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridDifferentialEvolutionMemeticOptimizer import EnhancedHybridDifferentialEvolutionMemeticOptimizer - - lama_register["EnhancedHybridDifferentialEvolutionMemeticOptimizer"] = EnhancedHybridDifferentialEvolutionMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer").set_name("LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer", register=True) -except Exception as e: +try: # EnhancedHybridDifferentialEvolutionMemeticOptimizer + from nevergrad.optimization.lama.EnhancedHybridDifferentialEvolutionMemeticOptimizer import ( + EnhancedHybridDifferentialEvolutionMemeticOptimizer, + ) + + lama_register["EnhancedHybridDifferentialEvolutionMemeticOptimizer"] = ( + EnhancedHybridDifferentialEvolutionMemeticOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer" + ).set_name("LLAMAEnhancedHybridDifferentialEvolutionMemeticOptimizer", register=True) +except Exception as e: # EnhancedHybridDifferentialEvolutionMemeticOptimizer print("EnhancedHybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridDynamicAdaptiveExplorationOptimization import EnhancedHybridDynamicAdaptiveExplorationOptimization - - lama_register["EnhancedHybridDynamicAdaptiveExplorationOptimization"] = EnhancedHybridDynamicAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization").set_name("LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedHybridDynamicAdaptiveExplorationOptimization + from nevergrad.optimization.lama.EnhancedHybridDynamicAdaptiveExplorationOptimization import ( + EnhancedHybridDynamicAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedHybridDynamicAdaptiveExplorationOptimization"] = ( + EnhancedHybridDynamicAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedHybridDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: # EnhancedHybridDynamicAdaptiveExplorationOptimization print("EnhancedHybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridExplorationOptimization import EnhancedHybridExplorationOptimization +try: # EnhancedHybridExplorationOptimization + from nevergrad.optimization.lama.EnhancedHybridExplorationOptimization import ( + EnhancedHybridExplorationOptimization, + ) lama_register["EnhancedHybridExplorationOptimization"] = EnhancedHybridExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridExplorationOptimization").set_name("LLAMAEnhancedHybridExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridExplorationOptimization" + ).set_name("LLAMAEnhancedHybridExplorationOptimization", register=True) +except Exception as e: # EnhancedHybridExplorationOptimization print("EnhancedHybridExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridGradientAnnealingWithMemory import EnhancedHybridGradientAnnealingWithMemory +try: # EnhancedHybridGradientAnnealingWithMemory + from nevergrad.optimization.lama.EnhancedHybridGradientAnnealingWithMemory import ( + EnhancedHybridGradientAnnealingWithMemory, + ) lama_register["EnhancedHybridGradientAnnealingWithMemory"] = EnhancedHybridGradientAnnealingWithMemory - res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridGradientAnnealingWithMemory = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientAnnealingWithMemory").set_name("LLAMAEnhancedHybridGradientAnnealingWithMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridGradientAnnealingWithMemory = NonObjectOptimizer( + method="LLAMAEnhancedHybridGradientAnnealingWithMemory" + ).set_name("LLAMAEnhancedHybridGradientAnnealingWithMemory", register=True) +except Exception as e: # EnhancedHybridGradientAnnealingWithMemory print("EnhancedHybridGradientAnnealingWithMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridGradientBasedStrategyV8 import EnhancedHybridGradientBasedStrategyV8 +try: # EnhancedHybridGradientBasedStrategyV8 + from nevergrad.optimization.lama.EnhancedHybridGradientBasedStrategyV8 import ( + EnhancedHybridGradientBasedStrategyV8, + ) lama_register["EnhancedHybridGradientBasedStrategyV8"] = EnhancedHybridGradientBasedStrategyV8 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientBasedStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridGradientBasedStrategyV8 = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientBasedStrategyV8").set_name("LLAMAEnhancedHybridGradientBasedStrategyV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientBasedStrategyV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridGradientBasedStrategyV8 = NonObjectOptimizer( + method="LLAMAEnhancedHybridGradientBasedStrategyV8" + ).set_name("LLAMAEnhancedHybridGradientBasedStrategyV8", register=True) +except Exception as e: # EnhancedHybridGradientBasedStrategyV8 print("EnhancedHybridGradientBasedStrategyV8 can not be imported: ", e) -try: +try: # EnhancedHybridGradientPSO from nevergrad.optimization.lama.EnhancedHybridGradientPSO import EnhancedHybridGradientPSO lama_register["EnhancedHybridGradientPSO"] = EnhancedHybridGradientPSO - res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridGradientPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO").set_name("LLAMAEnhancedHybridGradientPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridGradientPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridGradientPSO").set_name( + "LLAMAEnhancedHybridGradientPSO", register=True + ) +except Exception as e: # EnhancedHybridGradientPSO print("EnhancedHybridGradientPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridHarmonySearchWithAdaptiveMutationV20 import EnhancedHybridHarmonySearchWithAdaptiveMutationV20 - - lama_register["EnhancedHybridHarmonySearchWithAdaptiveMutationV20"] = EnhancedHybridHarmonySearchWithAdaptiveMutationV20 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20 = NonObjectOptimizer(method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20").set_name("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20", register=True) -except Exception as e: +try: # EnhancedHybridHarmonySearchWithAdaptiveMutationV20 + from nevergrad.optimization.lama.EnhancedHybridHarmonySearchWithAdaptiveMutationV20 import ( + EnhancedHybridHarmonySearchWithAdaptiveMutationV20, + ) + + lama_register["EnhancedHybridHarmonySearchWithAdaptiveMutationV20"] = ( + EnhancedHybridHarmonySearchWithAdaptiveMutationV20 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20 = NonObjectOptimizer( + method="LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20" + ).set_name("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20", register=True) +except Exception as e: # EnhancedHybridHarmonySearchWithAdaptiveMutationV20 print("EnhancedHybridHarmonySearchWithAdaptiveMutationV20 can not be imported: ", e) -try: +try: # EnhancedHybridMemoryAdaptiveDE from nevergrad.optimization.lama.EnhancedHybridMemoryAdaptiveDE import EnhancedHybridMemoryAdaptiveDE lama_register["EnhancedHybridMemoryAdaptiveDE"] = EnhancedHybridMemoryAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryAdaptiveDE").set_name("LLAMAEnhancedHybridMemoryAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMemoryAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedHybridMemoryAdaptiveDE" + ).set_name("LLAMAEnhancedHybridMemoryAdaptiveDE", register=True) +except Exception as e: # EnhancedHybridMemoryAdaptiveDE print("EnhancedHybridMemoryAdaptiveDE can not be imported: ", e) -try: +try: # EnhancedHybridMemoryPSO from nevergrad.optimization.lama.EnhancedHybridMemoryPSO import EnhancedHybridMemoryPSO lama_register["EnhancedHybridMemoryPSO"] = EnhancedHybridMemoryPSO - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMemoryPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO").set_name("LLAMAEnhancedHybridMemoryPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMemoryPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridMemoryPSO").set_name( + "LLAMAEnhancedHybridMemoryPSO", register=True + ) +except Exception as e: # EnhancedHybridMemoryPSO print("EnhancedHybridMemoryPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizer import EnhancedHybridMetaHeuristicOptimizer +try: # EnhancedHybridMetaHeuristicOptimizer + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizer import ( + EnhancedHybridMetaHeuristicOptimizer, + ) lama_register["EnhancedHybridMetaHeuristicOptimizer"] = EnhancedHybridMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizer").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizer", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizer print("EnhancedHybridMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV10 import EnhancedHybridMetaHeuristicOptimizerV10 +try: # EnhancedHybridMetaHeuristicOptimizerV10 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV10 import ( + EnhancedHybridMetaHeuristicOptimizerV10, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV10"] = EnhancedHybridMetaHeuristicOptimizerV10 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV10 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV10" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV10", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV10 print("EnhancedHybridMetaHeuristicOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV11 import EnhancedHybridMetaHeuristicOptimizerV11 +try: # EnhancedHybridMetaHeuristicOptimizerV11 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV11 import ( + EnhancedHybridMetaHeuristicOptimizerV11, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV11"] = EnhancedHybridMetaHeuristicOptimizerV11 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV11 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV11" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV11", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV11 print("EnhancedHybridMetaHeuristicOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV12 import EnhancedHybridMetaHeuristicOptimizerV12 +try: # EnhancedHybridMetaHeuristicOptimizerV12 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV12 import ( + EnhancedHybridMetaHeuristicOptimizerV12, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV12"] = EnhancedHybridMetaHeuristicOptimizerV12 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV12" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV12", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV12 print("EnhancedHybridMetaHeuristicOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV15 import EnhancedHybridMetaHeuristicOptimizerV15 +try: # EnhancedHybridMetaHeuristicOptimizerV15 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV15 import ( + EnhancedHybridMetaHeuristicOptimizerV15, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV15"] = EnhancedHybridMetaHeuristicOptimizerV15 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV15 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV15 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV15" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV15", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV15 print("EnhancedHybridMetaHeuristicOptimizerV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV2 import EnhancedHybridMetaHeuristicOptimizerV2 +try: # EnhancedHybridMetaHeuristicOptimizerV2 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV2 import ( + EnhancedHybridMetaHeuristicOptimizerV2, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV2"] = EnhancedHybridMetaHeuristicOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV2", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV2 print("EnhancedHybridMetaHeuristicOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV3 import EnhancedHybridMetaHeuristicOptimizerV3 +try: # EnhancedHybridMetaHeuristicOptimizerV3 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV3 import ( + EnhancedHybridMetaHeuristicOptimizerV3, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV3"] = EnhancedHybridMetaHeuristicOptimizerV3 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV3 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV3" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV3", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV3 print("EnhancedHybridMetaHeuristicOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV4 import EnhancedHybridMetaHeuristicOptimizerV4 +try: # EnhancedHybridMetaHeuristicOptimizerV4 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV4 import ( + EnhancedHybridMetaHeuristicOptimizerV4, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV4"] = EnhancedHybridMetaHeuristicOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV4", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV4 print("EnhancedHybridMetaHeuristicOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV5 import EnhancedHybridMetaHeuristicOptimizerV5 +try: # EnhancedHybridMetaHeuristicOptimizerV5 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV5 import ( + EnhancedHybridMetaHeuristicOptimizerV5, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV5"] = EnhancedHybridMetaHeuristicOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV5" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV5", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV5 print("EnhancedHybridMetaHeuristicOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV6 import EnhancedHybridMetaHeuristicOptimizerV6 +try: # EnhancedHybridMetaHeuristicOptimizerV6 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV6 import ( + EnhancedHybridMetaHeuristicOptimizerV6, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV6"] = EnhancedHybridMetaHeuristicOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV6" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV6", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV6 print("EnhancedHybridMetaHeuristicOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV7 import EnhancedHybridMetaHeuristicOptimizerV7 +try: # EnhancedHybridMetaHeuristicOptimizerV7 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV7 import ( + EnhancedHybridMetaHeuristicOptimizerV7, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV7"] = EnhancedHybridMetaHeuristicOptimizerV7 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV7 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV7" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV7", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV7 print("EnhancedHybridMetaHeuristicOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV8 import EnhancedHybridMetaHeuristicOptimizerV8 +try: # EnhancedHybridMetaHeuristicOptimizerV8 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV8 import ( + EnhancedHybridMetaHeuristicOptimizerV8, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV8"] = EnhancedHybridMetaHeuristicOptimizerV8 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV8 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV8" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV8", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV8 print("EnhancedHybridMetaHeuristicOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV9 import EnhancedHybridMetaHeuristicOptimizerV9 +try: # EnhancedHybridMetaHeuristicOptimizerV9 + from nevergrad.optimization.lama.EnhancedHybridMetaHeuristicOptimizerV9 import ( + EnhancedHybridMetaHeuristicOptimizerV9, + ) lama_register["EnhancedHybridMetaHeuristicOptimizerV9"] = EnhancedHybridMetaHeuristicOptimizerV9 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9").set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaHeuristicOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaHeuristicOptimizerV9" + ).set_name("LLAMAEnhancedHybridMetaHeuristicOptimizerV9", register=True) +except Exception as e: # EnhancedHybridMetaHeuristicOptimizerV9 print("EnhancedHybridMetaHeuristicOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithm import EnhancedHybridMetaOptimizationAlgorithm +try: # EnhancedHybridMetaOptimizationAlgorithm + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithm import ( + EnhancedHybridMetaOptimizationAlgorithm, + ) lama_register["EnhancedHybridMetaOptimizationAlgorithm"] = EnhancedHybridMetaOptimizationAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithm").set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaOptimizationAlgorithm" + ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithm", register=True) +except Exception as e: # EnhancedHybridMetaOptimizationAlgorithm print("EnhancedHybridMetaOptimizationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithmV2 import EnhancedHybridMetaOptimizationAlgorithmV2 +try: # EnhancedHybridMetaOptimizationAlgorithmV2 + from nevergrad.optimization.lama.EnhancedHybridMetaOptimizationAlgorithmV2 import ( + EnhancedHybridMetaOptimizationAlgorithmV2, + ) lama_register["EnhancedHybridMetaOptimizationAlgorithmV2"] = EnhancedHybridMetaOptimizationAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridMetaOptimizationAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2").set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridMetaOptimizationAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedHybridMetaOptimizationAlgorithmV2" + ).set_name("LLAMAEnhancedHybridMetaOptimizationAlgorithmV2", register=True) +except Exception as e: # EnhancedHybridMetaOptimizationAlgorithmV2 print("EnhancedHybridMetaOptimizationAlgorithmV2 can not be imported: ", e) -try: +try: # EnhancedHybridOptimization from nevergrad.optimization.lama.EnhancedHybridOptimization import EnhancedHybridOptimization lama_register["EnhancedHybridOptimization"] = EnhancedHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization").set_name("LLAMAEnhancedHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimization").set_name( + "LLAMAEnhancedHybridOptimization", register=True + ) +except Exception as e: # EnhancedHybridOptimization print("EnhancedHybridOptimization can not be imported: ", e) -try: +try: # EnhancedHybridOptimizer from nevergrad.optimization.lama.EnhancedHybridOptimizer import EnhancedHybridOptimizer lama_register["EnhancedHybridOptimizer"] = EnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer").set_name("LLAMAEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedHybridOptimizer").set_name( + "LLAMAEnhancedHybridOptimizer", register=True + ) +except Exception as e: # EnhancedHybridOptimizer print("EnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridQuantumDifferentialPSO import EnhancedHybridQuantumDifferentialPSO +try: # EnhancedHybridQuantumDifferentialPSO + from nevergrad.optimization.lama.EnhancedHybridQuantumDifferentialPSO import ( + EnhancedHybridQuantumDifferentialPSO, + ) lama_register["EnhancedHybridQuantumDifferentialPSO"] = EnhancedHybridQuantumDifferentialPSO - res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuantumDifferentialPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridQuantumDifferentialPSO = NonObjectOptimizer(method="LLAMAEnhancedHybridQuantumDifferentialPSO").set_name("LLAMAEnhancedHybridQuantumDifferentialPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuantumDifferentialPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridQuantumDifferentialPSO = NonObjectOptimizer( + method="LLAMAEnhancedHybridQuantumDifferentialPSO" + ).set_name("LLAMAEnhancedHybridQuantumDifferentialPSO", register=True) +except Exception as e: # EnhancedHybridQuantumDifferentialPSO print("EnhancedHybridQuantumDifferentialPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridQuasiRandomGradientDifferentialEvolution import EnhancedHybridQuasiRandomGradientDifferentialEvolution - - lama_register["EnhancedHybridQuasiRandomGradientDifferentialEvolution"] = EnhancedHybridQuasiRandomGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution").set_name("LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedHybridQuasiRandomGradientDifferentialEvolution + from nevergrad.optimization.lama.EnhancedHybridQuasiRandomGradientDifferentialEvolution import ( + EnhancedHybridQuasiRandomGradientDifferentialEvolution, + ) + + lama_register["EnhancedHybridQuasiRandomGradientDifferentialEvolution"] = ( + EnhancedHybridQuasiRandomGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution" + ).set_name("LLAMAEnhancedHybridQuasiRandomGradientDifferentialEvolution", register=True) +except Exception as e: # EnhancedHybridQuasiRandomGradientDifferentialEvolution print("EnhancedHybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedHybridSearch from nevergrad.optimization.lama.EnhancedHybridSearch import EnhancedHybridSearch lama_register["EnhancedHybridSearch"] = EnhancedHybridSearch - res = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch").set_name("LLAMAEnhancedHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridSearch = NonObjectOptimizer(method="LLAMAEnhancedHybridSearch").set_name( + "LLAMAEnhancedHybridSearch", register=True + ) +except Exception as e: # EnhancedHybridSearch print("EnhancedHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHybridSimulatedAnnealingOptimization import EnhancedHybridSimulatedAnnealingOptimization - - lama_register["EnhancedHybridSimulatedAnnealingOptimization"] = EnhancedHybridSimulatedAnnealingOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedHybridSimulatedAnnealingOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHybridSimulatedAnnealingOptimization = NonObjectOptimizer(method="LLAMAEnhancedHybridSimulatedAnnealingOptimization").set_name("LLAMAEnhancedHybridSimulatedAnnealingOptimization", register=True) -except Exception as e: +try: # EnhancedHybridSimulatedAnnealingOptimization + from nevergrad.optimization.lama.EnhancedHybridSimulatedAnnealingOptimization import ( + EnhancedHybridSimulatedAnnealingOptimization, + ) + + lama_register["EnhancedHybridSimulatedAnnealingOptimization"] = ( + EnhancedHybridSimulatedAnnealingOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHybridSimulatedAnnealingOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHybridSimulatedAnnealingOptimization = NonObjectOptimizer( + method="LLAMAEnhancedHybridSimulatedAnnealingOptimization" + ).set_name("LLAMAEnhancedHybridSimulatedAnnealingOptimization", register=True) +except Exception as e: # EnhancedHybridSimulatedAnnealingOptimization print("EnhancedHybridSimulatedAnnealingOptimization can not be imported: ", e) -try: +try: # EnhancedHyperAdaptiveHybridDEPSO from nevergrad.optimization.lama.EnhancedHyperAdaptiveHybridDEPSO import EnhancedHyperAdaptiveHybridDEPSO lama_register["EnhancedHyperAdaptiveHybridDEPSO"] = EnhancedHyperAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedHyperAdaptiveHybridDEPSO").set_name("LLAMAEnhancedHyperAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedHyperAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedHyperAdaptiveHybridDEPSO", register=True) +except Exception as e: # EnhancedHyperAdaptiveHybridDEPSO print("EnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 import EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 - - lama_register["EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59"] = EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 - res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59").set_name("LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59", register=True) -except Exception as e: +try: # EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 + from nevergrad.optimization.lama.EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 import ( + EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59, + ) + + lama_register["EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59"] = ( + EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59" + ).set_name("LLAMAEnhancedHyperOptimalStrategicEvolutionaryOptimizerV59", register=True) +except Exception as e: # EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 print("EnhancedHyperOptimalStrategicEvolutionaryOptimizerV59 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 import EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 - - lama_register["EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62"] = EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 - res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62").set_name("LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62", register=True) -except Exception as e: +try: # EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 + from nevergrad.optimization.lama.EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 import ( + EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62, + ) + + lama_register["EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62"] = ( + EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62" + ).set_name("LLAMAEnhancedHyperOptimizedEvolutionaryGradientOptimizerV62", register=True) +except Exception as e: # EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 print("EnhancedHyperOptimizedEvolutionaryGradientOptimizerV62 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHyperOptimizedMultiStrategicOptimizerV49 import EnhancedHyperOptimizedMultiStrategicOptimizerV49 - - lama_register["EnhancedHyperOptimizedMultiStrategicOptimizerV49"] = EnhancedHyperOptimizedMultiStrategicOptimizerV49 - res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49 = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49").set_name("LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49", register=True) -except Exception as e: +try: # EnhancedHyperOptimizedMultiStrategicOptimizerV49 + from nevergrad.optimization.lama.EnhancedHyperOptimizedMultiStrategicOptimizerV49 import ( + EnhancedHyperOptimizedMultiStrategicOptimizerV49, + ) + + lama_register["EnhancedHyperOptimizedMultiStrategicOptimizerV49"] = ( + EnhancedHyperOptimizedMultiStrategicOptimizerV49 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49 = NonObjectOptimizer( + method="LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49" + ).set_name("LLAMAEnhancedHyperOptimizedMultiStrategicOptimizerV49", register=True) +except Exception as e: # EnhancedHyperOptimizedMultiStrategicOptimizerV49 print("EnhancedHyperOptimizedMultiStrategicOptimizerV49 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 import EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 - - lama_register["EnhancedHyperParameterTunedMetaHeuristicOptimizerV4"] = EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4").set_name("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4", register=True) -except Exception as e: +try: # EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 + from nevergrad.optimization.lama.EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 import ( + EnhancedHyperParameterTunedMetaHeuristicOptimizerV4, + ) + + lama_register["EnhancedHyperParameterTunedMetaHeuristicOptimizerV4"] = ( + EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4" + ).set_name("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4", register=True) +except Exception as e: # EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 print("EnhancedHyperParameterTunedMetaHeuristicOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedHyperStrategicOptimizerV56 import EnhancedHyperStrategicOptimizerV56 +try: # EnhancedHyperStrategicOptimizerV56 + from nevergrad.optimization.lama.EnhancedHyperStrategicOptimizerV56 import ( + EnhancedHyperStrategicOptimizerV56, + ) lama_register["EnhancedHyperStrategicOptimizerV56"] = EnhancedHyperStrategicOptimizerV56 - res = NonObjectOptimizer(method="LLAMAEnhancedHyperStrategicOptimizerV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedHyperStrategicOptimizerV56 = NonObjectOptimizer(method="LLAMAEnhancedHyperStrategicOptimizerV56").set_name("LLAMAEnhancedHyperStrategicOptimizerV56", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedHyperStrategicOptimizerV56")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedHyperStrategicOptimizerV56 = NonObjectOptimizer( + method="LLAMAEnhancedHyperStrategicOptimizerV56" + ).set_name("LLAMAEnhancedHyperStrategicOptimizerV56", register=True) +except Exception as e: # EnhancedHyperStrategicOptimizerV56 print("EnhancedHyperStrategicOptimizerV56 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedImprovedDifferentialEvolutionLocalSearch_v58 import EnhancedImprovedDifferentialEvolutionLocalSearch_v58 - - lama_register["EnhancedImprovedDifferentialEvolutionLocalSearch_v58"] = EnhancedImprovedDifferentialEvolutionLocalSearch_v58 - res = NonObjectOptimizer(method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58 = NonObjectOptimizer(method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58").set_name("LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58", register=True) -except Exception as e: +try: # EnhancedImprovedDifferentialEvolutionLocalSearch_v58 + from nevergrad.optimization.lama.EnhancedImprovedDifferentialEvolutionLocalSearch_v58 import ( + EnhancedImprovedDifferentialEvolutionLocalSearch_v58, + ) + + lama_register["EnhancedImprovedDifferentialEvolutionLocalSearch_v58"] = ( + EnhancedImprovedDifferentialEvolutionLocalSearch_v58 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58" + ).set_name("LLAMAEnhancedImprovedDifferentialEvolutionLocalSearch_v58", register=True) +except Exception as e: # EnhancedImprovedDifferentialEvolutionLocalSearch_v58 print("EnhancedImprovedDifferentialEvolutionLocalSearch_v58 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer import EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer - - lama_register["EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer"] = EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer").set_name("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer", register=True) -except Exception as e: +try: # EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer + from nevergrad.optimization.lama.EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer import ( + EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer, + ) + + lama_register["EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer"] = ( + EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer", register=True) +except Exception as e: # EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer print("EnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 import EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 - - lama_register["EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77"] = EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 - res = NonObjectOptimizer(method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 = NonObjectOptimizer(method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77").set_name("LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77", register=True) -except Exception as e: +try: # EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 + from nevergrad.optimization.lama.EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 import ( + EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77, + ) + + lama_register["EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77"] = ( + EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77" + ).set_name("LLAMAEnhancedImprovedRefinedUltimateGuidedMassQGSA_v77", register=True) +except Exception as e: # EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 print("EnhancedImprovedRefinedUltimateGuidedMassQGSA_v77 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 import EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 - - lama_register["EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7"] = EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7", register=True) -except Exception as e: +try: # EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 import ( + EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7, + ) + + lama_register["EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7"] = ( + EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 print("EnhancedImprovedSuperDynamicQuantumSwarmOptimizationV7 can not be imported: ", e) -try: +try: # EnhancedIslandEvolutionStrategy from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategy import EnhancedIslandEvolutionStrategy lama_register["EnhancedIslandEvolutionStrategy"] = EnhancedIslandEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedIslandEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategy").set_name("LLAMAEnhancedIslandEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedIslandEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategy" + ).set_name("LLAMAEnhancedIslandEvolutionStrategy", register=True) +except Exception as e: # EnhancedIslandEvolutionStrategy print("EnhancedIslandEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV10 import EnhancedIslandEvolutionStrategyV10 +try: # EnhancedIslandEvolutionStrategyV10 + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV10 import ( + EnhancedIslandEvolutionStrategyV10, + ) lama_register["EnhancedIslandEvolutionStrategyV10"] = EnhancedIslandEvolutionStrategyV10 - res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedIslandEvolutionStrategyV10 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV10").set_name("LLAMAEnhancedIslandEvolutionStrategyV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedIslandEvolutionStrategyV10 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV10" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV10", register=True) +except Exception as e: # EnhancedIslandEvolutionStrategyV10 print("EnhancedIslandEvolutionStrategyV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV3 import EnhancedIslandEvolutionStrategyV3 +try: # EnhancedIslandEvolutionStrategyV3 + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV3 import ( + EnhancedIslandEvolutionStrategyV3, + ) lama_register["EnhancedIslandEvolutionStrategyV3"] = EnhancedIslandEvolutionStrategyV3 - res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedIslandEvolutionStrategyV3 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV3").set_name("LLAMAEnhancedIslandEvolutionStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedIslandEvolutionStrategyV3 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV3" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV3", register=True) +except Exception as e: # EnhancedIslandEvolutionStrategyV3 print("EnhancedIslandEvolutionStrategyV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV7 import EnhancedIslandEvolutionStrategyV7 +try: # EnhancedIslandEvolutionStrategyV7 + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV7 import ( + EnhancedIslandEvolutionStrategyV7, + ) lama_register["EnhancedIslandEvolutionStrategyV7"] = EnhancedIslandEvolutionStrategyV7 - res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedIslandEvolutionStrategyV7 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV7").set_name("LLAMAEnhancedIslandEvolutionStrategyV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedIslandEvolutionStrategyV7 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV7" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV7", register=True) +except Exception as e: # EnhancedIslandEvolutionStrategyV7 print("EnhancedIslandEvolutionStrategyV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV8 import EnhancedIslandEvolutionStrategyV8 +try: # EnhancedIslandEvolutionStrategyV8 + from nevergrad.optimization.lama.EnhancedIslandEvolutionStrategyV8 import ( + EnhancedIslandEvolutionStrategyV8, + ) lama_register["EnhancedIslandEvolutionStrategyV8"] = EnhancedIslandEvolutionStrategyV8 - res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedIslandEvolutionStrategyV8 = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV8").set_name("LLAMAEnhancedIslandEvolutionStrategyV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedIslandEvolutionStrategyV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedIslandEvolutionStrategyV8 = NonObjectOptimizer( + method="LLAMAEnhancedIslandEvolutionStrategyV8" + ).set_name("LLAMAEnhancedIslandEvolutionStrategyV8", register=True) +except Exception as e: # EnhancedIslandEvolutionStrategyV8 print("EnhancedIslandEvolutionStrategyV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedLocalSearchAdaptiveStrategyV29 import EnhancedLocalSearchAdaptiveStrategyV29 +try: # EnhancedLocalSearchAdaptiveStrategyV29 + from nevergrad.optimization.lama.EnhancedLocalSearchAdaptiveStrategyV29 import ( + EnhancedLocalSearchAdaptiveStrategyV29, + ) lama_register["EnhancedLocalSearchAdaptiveStrategyV29"] = EnhancedLocalSearchAdaptiveStrategyV29 - res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedLocalSearchAdaptiveStrategyV29 = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29").set_name("LLAMAEnhancedLocalSearchAdaptiveStrategyV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedLocalSearchAdaptiveStrategyV29 = NonObjectOptimizer( + method="LLAMAEnhancedLocalSearchAdaptiveStrategyV29" + ).set_name("LLAMAEnhancedLocalSearchAdaptiveStrategyV29", register=True) +except Exception as e: # EnhancedLocalSearchAdaptiveStrategyV29 print("EnhancedLocalSearchAdaptiveStrategyV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedLocalSearchQuantumSimulatedAnnealingV6 import EnhancedLocalSearchQuantumSimulatedAnnealingV6 - - lama_register["EnhancedLocalSearchQuantumSimulatedAnnealingV6"] = EnhancedLocalSearchQuantumSimulatedAnnealingV6 - res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6 = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6").set_name("LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6", register=True) -except Exception as e: +try: # EnhancedLocalSearchQuantumSimulatedAnnealingV6 + from nevergrad.optimization.lama.EnhancedLocalSearchQuantumSimulatedAnnealingV6 import ( + EnhancedLocalSearchQuantumSimulatedAnnealingV6, + ) + + lama_register["EnhancedLocalSearchQuantumSimulatedAnnealingV6"] = ( + EnhancedLocalSearchQuantumSimulatedAnnealingV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6 = NonObjectOptimizer( + method="LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6" + ).set_name("LLAMAEnhancedLocalSearchQuantumSimulatedAnnealingV6", register=True) +except Exception as e: # EnhancedLocalSearchQuantumSimulatedAnnealingV6 print("EnhancedLocalSearchQuantumSimulatedAnnealingV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemeticDifferentialEvolution import EnhancedMemeticDifferentialEvolution +try: # EnhancedMemeticDifferentialEvolution + from nevergrad.optimization.lama.EnhancedMemeticDifferentialEvolution import ( + EnhancedMemeticDifferentialEvolution, + ) lama_register["EnhancedMemeticDifferentialEvolution"] = EnhancedMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedMemeticDifferentialEvolution").set_name("LLAMAEnhancedMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedMemeticDifferentialEvolution" + ).set_name("LLAMAEnhancedMemeticDifferentialEvolution", register=True) +except Exception as e: # EnhancedMemeticDifferentialEvolution print("EnhancedMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemeticEvolutionarySearch import EnhancedMemeticEvolutionarySearch +try: # EnhancedMemeticEvolutionarySearch + from nevergrad.optimization.lama.EnhancedMemeticEvolutionarySearch import ( + EnhancedMemeticEvolutionarySearch, + ) lama_register["EnhancedMemeticEvolutionarySearch"] = EnhancedMemeticEvolutionarySearch - res = NonObjectOptimizer(method="LLAMAEnhancedMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemeticEvolutionarySearch = NonObjectOptimizer(method="LLAMAEnhancedMemeticEvolutionarySearch").set_name("LLAMAEnhancedMemeticEvolutionarySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMemeticEvolutionarySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemeticEvolutionarySearch = NonObjectOptimizer( + method="LLAMAEnhancedMemeticEvolutionarySearch" + ).set_name("LLAMAEnhancedMemeticEvolutionarySearch", register=True) +except Exception as e: # EnhancedMemeticEvolutionarySearch print("EnhancedMemeticEvolutionarySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemeticHarmonyOptimization import EnhancedMemeticHarmonyOptimization +try: # EnhancedMemeticHarmonyOptimization + from nevergrad.optimization.lama.EnhancedMemeticHarmonyOptimization import ( + EnhancedMemeticHarmonyOptimization, + ) lama_register["EnhancedMemeticHarmonyOptimization"] = EnhancedMemeticHarmonyOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAEnhancedMemeticHarmonyOptimization").set_name("LLAMAEnhancedMemeticHarmonyOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAEnhancedMemeticHarmonyOptimization" + ).set_name("LLAMAEnhancedMemeticHarmonyOptimization", register=True) +except Exception as e: # EnhancedMemeticHarmonyOptimization print("EnhancedMemeticHarmonyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemoryAdaptiveDynamicHybridOptimizer import EnhancedMemoryAdaptiveDynamicHybridOptimizer - - lama_register["EnhancedMemoryAdaptiveDynamicHybridOptimizer"] = EnhancedMemoryAdaptiveDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer").set_name("LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer", register=True) -except Exception as e: +try: # EnhancedMemoryAdaptiveDynamicHybridOptimizer + from nevergrad.optimization.lama.EnhancedMemoryAdaptiveDynamicHybridOptimizer import ( + EnhancedMemoryAdaptiveDynamicHybridOptimizer, + ) + + lama_register["EnhancedMemoryAdaptiveDynamicHybridOptimizer"] = ( + EnhancedMemoryAdaptiveDynamicHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMAEnhancedMemoryAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: # EnhancedMemoryAdaptiveDynamicHybridOptimizer print("EnhancedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 import EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 - - lama_register["EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77"] = EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 - res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77").set_name("LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77", register=True) -except Exception as e: +try: # EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 import ( + EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77, + ) + + lama_register["EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77"] = ( + EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77", register=True) +except Exception as e: # EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 print("EnhancedMemoryGuidedAdaptiveDualPhaseStrategyV77 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV41 import EnhancedMemoryGuidedAdaptiveStrategyV41 +try: # EnhancedMemoryGuidedAdaptiveStrategyV41 + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV41 import ( + EnhancedMemoryGuidedAdaptiveStrategyV41, + ) lama_register["EnhancedMemoryGuidedAdaptiveStrategyV41"] = EnhancedMemoryGuidedAdaptiveStrategyV41 - res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41").set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV41", register=True) +except Exception as e: # EnhancedMemoryGuidedAdaptiveStrategyV41 print("EnhancedMemoryGuidedAdaptiveStrategyV41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV69 import EnhancedMemoryGuidedAdaptiveStrategyV69 +try: # EnhancedMemoryGuidedAdaptiveStrategyV69 + from nevergrad.optimization.lama.EnhancedMemoryGuidedAdaptiveStrategyV69 import ( + EnhancedMemoryGuidedAdaptiveStrategyV69, + ) lama_register["EnhancedMemoryGuidedAdaptiveStrategyV69"] = EnhancedMemoryGuidedAdaptiveStrategyV69 - res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69 = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69").set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69 = NonObjectOptimizer( + method="LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69" + ).set_name("LLAMAEnhancedMemoryGuidedAdaptiveStrategyV69", register=True) +except Exception as e: # EnhancedMemoryGuidedAdaptiveStrategyV69 print("EnhancedMemoryGuidedAdaptiveStrategyV69 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaDynamicPrecisionOptimizerV1 import EnhancedMetaDynamicPrecisionOptimizerV1 +try: # EnhancedMetaDynamicPrecisionOptimizerV1 + from nevergrad.optimization.lama.EnhancedMetaDynamicPrecisionOptimizerV1 import ( + EnhancedMetaDynamicPrecisionOptimizerV1, + ) lama_register["EnhancedMetaDynamicPrecisionOptimizerV1"] = EnhancedMetaDynamicPrecisionOptimizerV1 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1").set_name("LLAMAEnhancedMetaDynamicPrecisionOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedMetaDynamicPrecisionOptimizerV1" + ).set_name("LLAMAEnhancedMetaDynamicPrecisionOptimizerV1", register=True) +except Exception as e: # EnhancedMetaDynamicPrecisionOptimizerV1 print("EnhancedMetaDynamicPrecisionOptimizerV1 can not be imported: ", e) -try: +try: # EnhancedMetaHeuristicOptimizerV2 from nevergrad.optimization.lama.EnhancedMetaHeuristicOptimizerV2 import EnhancedMetaHeuristicOptimizerV2 lama_register["EnhancedMetaHeuristicOptimizerV2"] = EnhancedMetaHeuristicOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaHeuristicOptimizerV2 = NonObjectOptimizer(method="LLAMAEnhancedMetaHeuristicOptimizerV2").set_name("LLAMAEnhancedMetaHeuristicOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaHeuristicOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaHeuristicOptimizerV2 = NonObjectOptimizer( + method="LLAMAEnhancedMetaHeuristicOptimizerV2" + ).set_name("LLAMAEnhancedMetaHeuristicOptimizerV2", register=True) +except Exception as e: # EnhancedMetaHeuristicOptimizerV2 print("EnhancedMetaHeuristicOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V1, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V1"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V1", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V2, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V2"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V2", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V3, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V3"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V3", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V4, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V4"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V4", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V5, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V5"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V5", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V6, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V6"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V6", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 import EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 +try: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 + from nevergrad.optimization.lama.EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 import ( + EnhancedMetaNetAQAPSO_LS_DIW_AP_V7, + ) lama_register["EnhancedMetaNetAQAPSO_LS_DIW_AP_V7"] = EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7").set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7 = NonObjectOptimizer( + method="LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7" + ).set_name("LLAMAEnhancedMetaNetAQAPSO_LS_DIW_AP_V7", register=True) +except Exception as e: # EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 print("EnhancedMetaNetAQAPSO_LS_DIW_AP_V7 can not be imported: ", e) -try: +try: # EnhancedMetaNetAQAPSOv2 from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv2 import EnhancedMetaNetAQAPSOv2 lama_register["EnhancedMetaNetAQAPSOv2"] = EnhancedMetaNetAQAPSOv2 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2").set_name("LLAMAEnhancedMetaNetAQAPSOv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv2").set_name( + "LLAMAEnhancedMetaNetAQAPSOv2", register=True + ) +except Exception as e: # EnhancedMetaNetAQAPSOv2 print("EnhancedMetaNetAQAPSOv2 can not be imported: ", e) -try: +try: # EnhancedMetaNetAQAPSOv3 from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv3 import EnhancedMetaNetAQAPSOv3 lama_register["EnhancedMetaNetAQAPSOv3"] = EnhancedMetaNetAQAPSOv3 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3").set_name("LLAMAEnhancedMetaNetAQAPSOv3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSOv3 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv3").set_name( + "LLAMAEnhancedMetaNetAQAPSOv3", register=True + ) +except Exception as e: # EnhancedMetaNetAQAPSOv3 print("EnhancedMetaNetAQAPSOv3 can not be imported: ", e) -try: +try: # EnhancedMetaNetAQAPSOv4 from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv4 import EnhancedMetaNetAQAPSOv4 lama_register["EnhancedMetaNetAQAPSOv4"] = EnhancedMetaNetAQAPSOv4 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4").set_name("LLAMAEnhancedMetaNetAQAPSOv4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv4").set_name( + "LLAMAEnhancedMetaNetAQAPSOv4", register=True + ) +except Exception as e: # EnhancedMetaNetAQAPSOv4 print("EnhancedMetaNetAQAPSOv4 can not be imported: ", e) -try: +try: # EnhancedMetaNetAQAPSOv5 from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv5 import EnhancedMetaNetAQAPSOv5 lama_register["EnhancedMetaNetAQAPSOv5"] = EnhancedMetaNetAQAPSOv5 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSOv5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5").set_name("LLAMAEnhancedMetaNetAQAPSOv5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSOv5 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv5").set_name( + "LLAMAEnhancedMetaNetAQAPSOv5", register=True + ) +except Exception as e: # EnhancedMetaNetAQAPSOv5 print("EnhancedMetaNetAQAPSOv5 can not be imported: ", e) -try: +try: # EnhancedMetaNetAQAPSOv6 from nevergrad.optimization.lama.EnhancedMetaNetAQAPSOv6 import EnhancedMetaNetAQAPSOv6 lama_register["EnhancedMetaNetAQAPSOv6"] = EnhancedMetaNetAQAPSOv6 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetAQAPSOv6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6").set_name("LLAMAEnhancedMetaNetAQAPSOv6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetAQAPSOv6 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetAQAPSOv6").set_name( + "LLAMAEnhancedMetaNetAQAPSOv6", register=True + ) +except Exception as e: # EnhancedMetaNetAQAPSOv6 print("EnhancedMetaNetAQAPSOv6 can not be imported: ", e) -try: +try: # EnhancedMetaNetPSO from nevergrad.optimization.lama.EnhancedMetaNetPSO import EnhancedMetaNetPSO lama_register["EnhancedMetaNetPSO"] = EnhancedMetaNetPSO - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO").set_name("LLAMAEnhancedMetaNetPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetPSO = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSO").set_name( + "LLAMAEnhancedMetaNetPSO", register=True + ) +except Exception as e: # EnhancedMetaNetPSO print("EnhancedMetaNetPSO can not be imported: ", e) -try: +try: # EnhancedMetaNetPSOv2 from nevergrad.optimization.lama.EnhancedMetaNetPSOv2 import EnhancedMetaNetPSOv2 lama_register["EnhancedMetaNetPSOv2"] = EnhancedMetaNetPSOv2 - res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaNetPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2").set_name("LLAMAEnhancedMetaNetPSOv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaNetPSOv2 = NonObjectOptimizer(method="LLAMAEnhancedMetaNetPSOv2").set_name( + "LLAMAEnhancedMetaNetPSOv2", register=True + ) +except Exception as e: # EnhancedMetaNetPSOv2 print("EnhancedMetaNetPSOv2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMetaPopulationAdaptiveGradientSearch import EnhancedMetaPopulationAdaptiveGradientSearch - - lama_register["EnhancedMetaPopulationAdaptiveGradientSearch"] = EnhancedMetaPopulationAdaptiveGradientSearch - res = NonObjectOptimizer(method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMetaPopulationAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch").set_name("LLAMAEnhancedMetaPopulationAdaptiveGradientSearch", register=True) -except Exception as e: +try: # EnhancedMetaPopulationAdaptiveGradientSearch + from nevergrad.optimization.lama.EnhancedMetaPopulationAdaptiveGradientSearch import ( + EnhancedMetaPopulationAdaptiveGradientSearch, + ) + + lama_register["EnhancedMetaPopulationAdaptiveGradientSearch"] = ( + EnhancedMetaPopulationAdaptiveGradientSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMetaPopulationAdaptiveGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedMetaPopulationAdaptiveGradientSearch" + ).set_name("LLAMAEnhancedMetaPopulationAdaptiveGradientSearch", register=True) +except Exception as e: # EnhancedMetaPopulationAdaptiveGradientSearch print("EnhancedMetaPopulationAdaptiveGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiFocalAdaptiveOptimizer import EnhancedMultiFocalAdaptiveOptimizer +try: # EnhancedMultiFocalAdaptiveOptimizer + from nevergrad.optimization.lama.EnhancedMultiFocalAdaptiveOptimizer import ( + EnhancedMultiFocalAdaptiveOptimizer, + ) lama_register["EnhancedMultiFocalAdaptiveOptimizer"] = EnhancedMultiFocalAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiFocalAdaptiveOptimizer").set_name("LLAMAEnhancedMultiFocalAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiFocalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiFocalAdaptiveOptimizer" + ).set_name("LLAMAEnhancedMultiFocalAdaptiveOptimizer", register=True) +except Exception as e: # EnhancedMultiFocalAdaptiveOptimizer print("EnhancedMultiFocalAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiModalAdaptiveOptimizer import EnhancedMultiModalAdaptiveOptimizer +try: # EnhancedMultiModalAdaptiveOptimizer + from nevergrad.optimization.lama.EnhancedMultiModalAdaptiveOptimizer import ( + EnhancedMultiModalAdaptiveOptimizer, + ) lama_register["EnhancedMultiModalAdaptiveOptimizer"] = EnhancedMultiModalAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiModalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalAdaptiveOptimizer").set_name("LLAMAEnhancedMultiModalAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiModalAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalAdaptiveOptimizer" + ).set_name("LLAMAEnhancedMultiModalAdaptiveOptimizer", register=True) +except Exception as e: # EnhancedMultiModalAdaptiveOptimizer print("EnhancedMultiModalAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiModalConvergenceOptimizer import EnhancedMultiModalConvergenceOptimizer +try: # EnhancedMultiModalConvergenceOptimizer + from nevergrad.optimization.lama.EnhancedMultiModalConvergenceOptimizer import ( + EnhancedMultiModalConvergenceOptimizer, + ) lama_register["EnhancedMultiModalConvergenceOptimizer"] = EnhancedMultiModalConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiModalConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalConvergenceOptimizer").set_name("LLAMAEnhancedMultiModalConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiModalConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalConvergenceOptimizer" + ).set_name("LLAMAEnhancedMultiModalConvergenceOptimizer", register=True) +except Exception as e: # EnhancedMultiModalConvergenceOptimizer print("EnhancedMultiModalConvergenceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiModalExplorationStrategy import EnhancedMultiModalExplorationStrategy +try: # EnhancedMultiModalExplorationStrategy + from nevergrad.optimization.lama.EnhancedMultiModalExplorationStrategy import ( + EnhancedMultiModalExplorationStrategy, + ) lama_register["EnhancedMultiModalExplorationStrategy"] = EnhancedMultiModalExplorationStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalExplorationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiModalExplorationStrategy = NonObjectOptimizer(method="LLAMAEnhancedMultiModalExplorationStrategy").set_name("LLAMAEnhancedMultiModalExplorationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalExplorationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiModalExplorationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalExplorationStrategy" + ).set_name("LLAMAEnhancedMultiModalExplorationStrategy", register=True) +except Exception as e: # EnhancedMultiModalExplorationStrategy print("EnhancedMultiModalExplorationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiModalMemoryHybridOptimizer import EnhancedMultiModalMemoryHybridOptimizer +try: # EnhancedMultiModalMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedMultiModalMemoryHybridOptimizer import ( + EnhancedMultiModalMemoryHybridOptimizer, + ) lama_register["EnhancedMultiModalMemoryHybridOptimizer"] = EnhancedMultiModalMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiModalMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiModalMemoryHybridOptimizer").set_name("LLAMAEnhancedMultiModalMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiModalMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiModalMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiModalMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedMultiModalMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedMultiModalMemoryHybridOptimizer print("EnhancedMultiModalMemoryHybridOptimizer can not be imported: ", e) -try: +try: # EnhancedMultiOperatorSearch from nevergrad.optimization.lama.EnhancedMultiOperatorSearch import EnhancedMultiOperatorSearch lama_register["EnhancedMultiOperatorSearch"] = EnhancedMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch").set_name("LLAMAEnhancedMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch").set_name( + "LLAMAEnhancedMultiOperatorSearch", register=True + ) +except Exception as e: # EnhancedMultiOperatorSearch print("EnhancedMultiOperatorSearch can not be imported: ", e) -try: +try: # EnhancedMultiOperatorSearch2 from nevergrad.optimization.lama.EnhancedMultiOperatorSearch2 import EnhancedMultiOperatorSearch2 lama_register["EnhancedMultiOperatorSearch2"] = EnhancedMultiOperatorSearch2 - res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiOperatorSearch2 = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch2").set_name("LLAMAEnhancedMultiOperatorSearch2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiOperatorSearch2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiOperatorSearch2 = NonObjectOptimizer( + method="LLAMAEnhancedMultiOperatorSearch2" + ).set_name("LLAMAEnhancedMultiOperatorSearch2", register=True) +except Exception as e: # EnhancedMultiOperatorSearch2 print("EnhancedMultiOperatorSearch2 can not be imported: ", e) -try: +try: # EnhancedMultiPhaseAdaptiveDE from nevergrad.optimization.lama.EnhancedMultiPhaseAdaptiveDE import EnhancedMultiPhaseAdaptiveDE lama_register["EnhancedMultiPhaseAdaptiveDE"] = EnhancedMultiPhaseAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseAdaptiveDE").set_name("LLAMAEnhancedMultiPhaseAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedMultiPhaseAdaptiveDE" + ).set_name("LLAMAEnhancedMultiPhaseAdaptiveDE", register=True) +except Exception as e: # EnhancedMultiPhaseAdaptiveDE print("EnhancedMultiPhaseAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiPhaseOptimizationAlgorithm import EnhancedMultiPhaseOptimizationAlgorithm +try: # EnhancedMultiPhaseOptimizationAlgorithm + from nevergrad.optimization.lama.EnhancedMultiPhaseOptimizationAlgorithm import ( + EnhancedMultiPhaseOptimizationAlgorithm, + ) lama_register["EnhancedMultiPhaseOptimizationAlgorithm"] = EnhancedMultiPhaseOptimizationAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm").set_name("LLAMAEnhancedMultiPhaseOptimizationAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiPhaseOptimizationAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedMultiPhaseOptimizationAlgorithm" + ).set_name("LLAMAEnhancedMultiPhaseOptimizationAlgorithm", register=True) +except Exception as e: # EnhancedMultiPhaseOptimizationAlgorithm print("EnhancedMultiPhaseOptimizationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiStageGradientBoostedAnnealing import EnhancedMultiStageGradientBoostedAnnealing +try: # EnhancedMultiStageGradientBoostedAnnealing + from nevergrad.optimization.lama.EnhancedMultiStageGradientBoostedAnnealing import ( + EnhancedMultiStageGradientBoostedAnnealing, + ) lama_register["EnhancedMultiStageGradientBoostedAnnealing"] = EnhancedMultiStageGradientBoostedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedMultiStageGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiStageGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedMultiStageGradientBoostedAnnealing").set_name("LLAMAEnhancedMultiStageGradientBoostedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiStageGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiStageGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedMultiStageGradientBoostedAnnealing" + ).set_name("LLAMAEnhancedMultiStageGradientBoostedAnnealing", register=True) +except Exception as e: # EnhancedMultiStageGradientBoostedAnnealing print("EnhancedMultiStageGradientBoostedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiStrategyDifferentialEvolution import EnhancedMultiStrategyDifferentialEvolution +try: # EnhancedMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.EnhancedMultiStrategyDifferentialEvolution import ( + EnhancedMultiStrategyDifferentialEvolution, + ) lama_register["EnhancedMultiStrategyDifferentialEvolution"] = EnhancedMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedMultiStrategyDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # EnhancedMultiStrategyDifferentialEvolution print("EnhancedMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedMultiStrategyQuantumLevyOptimizer import EnhancedMultiStrategyQuantumLevyOptimizer +try: # EnhancedMultiStrategyQuantumLevyOptimizer + from nevergrad.optimization.lama.EnhancedMultiStrategyQuantumLevyOptimizer import ( + EnhancedMultiStrategyQuantumLevyOptimizer, + ) lama_register["EnhancedMultiStrategyQuantumLevyOptimizer"] = EnhancedMultiStrategyQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer").set_name("LLAMAEnhancedMultiStrategyQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedMultiStrategyQuantumLevyOptimizer" + ).set_name("LLAMAEnhancedMultiStrategyQuantumLevyOptimizer", register=True) +except Exception as e: # EnhancedMultiStrategyQuantumLevyOptimizer print("EnhancedMultiStrategyQuantumLevyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedNicheDifferentialParticleSwarmOptimizer import EnhancedNicheDifferentialParticleSwarmOptimizer - - lama_register["EnhancedNicheDifferentialParticleSwarmOptimizer"] = EnhancedNicheDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # EnhancedNicheDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.EnhancedNicheDifferentialParticleSwarmOptimizer import ( + EnhancedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedNicheDifferentialParticleSwarmOptimizer"] = ( + EnhancedNicheDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedNicheDifferentialParticleSwarmOptimizer print("EnhancedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOppositionBasedDifferentialEvolution import EnhancedOppositionBasedDifferentialEvolution - - lama_register["EnhancedOppositionBasedDifferentialEvolution"] = EnhancedOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedDifferentialEvolution").set_name("LLAMAEnhancedOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.EnhancedOppositionBasedDifferentialEvolution import ( + EnhancedOppositionBasedDifferentialEvolution, + ) + + lama_register["EnhancedOppositionBasedDifferentialEvolution"] = ( + EnhancedOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedDifferentialEvolution" + ).set_name("LLAMAEnhancedOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # EnhancedOppositionBasedDifferentialEvolution print("EnhancedOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearch import EnhancedOppositionBasedHarmonySearch +try: # EnhancedOppositionBasedHarmonySearch + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearch import ( + EnhancedOppositionBasedHarmonySearch, + ) lama_register["EnhancedOppositionBasedHarmonySearch"] = EnhancedOppositionBasedHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOppositionBasedHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearch").set_name("LLAMAEnhancedOppositionBasedHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOppositionBasedHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearch" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearch", register=True) +except Exception as e: # EnhancedOppositionBasedHarmonySearch print("EnhancedOppositionBasedHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidth import EnhancedOppositionBasedHarmonySearchDynamicBandwidth - - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidth"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidth - res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth", register=True) -except Exception as e: +try: # EnhancedOppositionBasedHarmonySearchDynamicBandwidth + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidth import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidth, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidth"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidth + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth", register=True) +except Exception as e: # EnhancedOppositionBasedHarmonySearchDynamicBandwidth print("EnhancedOppositionBasedHarmonySearchDynamicBandwidth can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC import EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC - - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC - res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC", register=True) -except Exception as e: +try: # EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC", register=True) +except Exception as e: # EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthABC can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE import EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE - - lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE - res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE").set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) -except Exception as e: +try: # EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE + from nevergrad.optimization.lama.EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE import ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE, + ) + + lama_register["EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE"] = ( + EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE = NonObjectOptimizer( + method="LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE" + ).set_name("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE", register=True) +except Exception as e: # EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE print("EnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOptimalEvolutionaryGradientOptimizerV9 import EnhancedOptimalEvolutionaryGradientOptimizerV9 - - lama_register["EnhancedOptimalEvolutionaryGradientOptimizerV9"] = EnhancedOptimalEvolutionaryGradientOptimizerV9 - res = NonObjectOptimizer(method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9 = NonObjectOptimizer(method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9").set_name("LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9", register=True) -except Exception as e: +try: # EnhancedOptimalEvolutionaryGradientOptimizerV9 + from nevergrad.optimization.lama.EnhancedOptimalEvolutionaryGradientOptimizerV9 import ( + EnhancedOptimalEvolutionaryGradientOptimizerV9, + ) + + lama_register["EnhancedOptimalEvolutionaryGradientOptimizerV9"] = ( + EnhancedOptimalEvolutionaryGradientOptimizerV9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9 = NonObjectOptimizer( + method="LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9" + ).set_name("LLAMAEnhancedOptimalEvolutionaryGradientOptimizerV9", register=True) +except Exception as e: # EnhancedOptimalEvolutionaryGradientOptimizerV9 print("EnhancedOptimalEvolutionaryGradientOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOptimalPrecisionEvolutionaryThermalOptimizer import EnhancedOptimalPrecisionEvolutionaryThermalOptimizer - - lama_register["EnhancedOptimalPrecisionEvolutionaryThermalOptimizer"] = EnhancedOptimalPrecisionEvolutionaryThermalOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer", register=True) -except Exception as e: +try: # EnhancedOptimalPrecisionEvolutionaryThermalOptimizer + from nevergrad.optimization.lama.EnhancedOptimalPrecisionEvolutionaryThermalOptimizer import ( + EnhancedOptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["EnhancedOptimalPrecisionEvolutionaryThermalOptimizer"] = ( + EnhancedOptimalPrecisionEvolutionaryThermalOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAEnhancedOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: # EnhancedOptimalPrecisionEvolutionaryThermalOptimizer print("EnhancedOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOptimizedEvolutiveStrategy import EnhancedOptimizedEvolutiveStrategy +try: # EnhancedOptimizedEvolutiveStrategy + from nevergrad.optimization.lama.EnhancedOptimizedEvolutiveStrategy import ( + EnhancedOptimizedEvolutiveStrategy, + ) lama_register["EnhancedOptimizedEvolutiveStrategy"] = EnhancedOptimizedEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedOptimizedEvolutiveStrategy").set_name("LLAMAEnhancedOptimizedEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedOptimizedEvolutiveStrategy" + ).set_name("LLAMAEnhancedOptimizedEvolutiveStrategy", register=True) +except Exception as e: # EnhancedOptimizedEvolutiveStrategy print("EnhancedOptimizedEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 import EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 - - lama_register["EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46"] = EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 - res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 = NonObjectOptimizer(method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46").set_name("LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46", register=True) -except Exception as e: +try: # EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 + from nevergrad.optimization.lama.EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 import ( + EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46, + ) + + lama_register["EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46"] = ( + EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 = NonObjectOptimizer( + method="LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46" + ).set_name("LLAMAEnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46", register=True) +except Exception as e: # EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 print("EnhancedOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV46 can not be imported: ", e) -try: +try: # EnhancedOrthogonalDE from nevergrad.optimization.lama.EnhancedOrthogonalDE import EnhancedOrthogonalDE lama_register["EnhancedOrthogonalDE"] = EnhancedOrthogonalDE - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDE = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE").set_name("LLAMAEnhancedOrthogonalDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDE = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDE").set_name( + "LLAMAEnhancedOrthogonalDE", register=True + ) +except Exception as e: # EnhancedOrthogonalDE print("EnhancedOrthogonalDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolution import EnhancedOrthogonalDifferentialEvolution +try: # EnhancedOrthogonalDifferentialEvolution + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolution import ( + EnhancedOrthogonalDifferentialEvolution, + ) lama_register["EnhancedOrthogonalDifferentialEvolution"] = EnhancedOrthogonalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolution").set_name("LLAMAEnhancedOrthogonalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolution" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolution", register=True) +except Exception as e: # EnhancedOrthogonalDifferentialEvolution print("EnhancedOrthogonalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionImproved import EnhancedOrthogonalDifferentialEvolutionImproved - - lama_register["EnhancedOrthogonalDifferentialEvolutionImproved"] = EnhancedOrthogonalDifferentialEvolutionImproved - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDifferentialEvolutionImproved = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved", register=True) -except Exception as e: +try: # EnhancedOrthogonalDifferentialEvolutionImproved + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionImproved import ( + EnhancedOrthogonalDifferentialEvolutionImproved, + ) + + lama_register["EnhancedOrthogonalDifferentialEvolutionImproved"] = ( + EnhancedOrthogonalDifferentialEvolutionImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionImproved = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionImproved" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved", register=True) +except Exception as e: # EnhancedOrthogonalDifferentialEvolutionImproved print("EnhancedOrthogonalDifferentialEvolutionImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV2 import EnhancedOrthogonalDifferentialEvolutionV2 +try: # EnhancedOrthogonalDifferentialEvolutionV2 + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV2 import ( + EnhancedOrthogonalDifferentialEvolutionV2, + ) lama_register["EnhancedOrthogonalDifferentialEvolutionV2"] = EnhancedOrthogonalDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV2" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV2", register=True) +except Exception as e: # EnhancedOrthogonalDifferentialEvolutionV2 print("EnhancedOrthogonalDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV3 import EnhancedOrthogonalDifferentialEvolutionV3 +try: # EnhancedOrthogonalDifferentialEvolutionV3 + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV3 import ( + EnhancedOrthogonalDifferentialEvolutionV3, + ) lama_register["EnhancedOrthogonalDifferentialEvolutionV3"] = EnhancedOrthogonalDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV3" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV3", register=True) +except Exception as e: # EnhancedOrthogonalDifferentialEvolutionV3 print("EnhancedOrthogonalDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV4 import EnhancedOrthogonalDifferentialEvolutionV4 +try: # EnhancedOrthogonalDifferentialEvolutionV4 + from nevergrad.optimization.lama.EnhancedOrthogonalDifferentialEvolutionV4 import ( + EnhancedOrthogonalDifferentialEvolutionV4, + ) lama_register["EnhancedOrthogonalDifferentialEvolutionV4"] = EnhancedOrthogonalDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedOrthogonalDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4").set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedOrthogonalDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAEnhancedOrthogonalDifferentialEvolutionV4" + ).set_name("LLAMAEnhancedOrthogonalDifferentialEvolutionV4", register=True) +except Exception as e: # EnhancedOrthogonalDifferentialEvolutionV4 print("EnhancedOrthogonalDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedParallelDifferentialEvolution import EnhancedParallelDifferentialEvolution +try: # EnhancedParallelDifferentialEvolution + from nevergrad.optimization.lama.EnhancedParallelDifferentialEvolution import ( + EnhancedParallelDifferentialEvolution, + ) lama_register["EnhancedParallelDifferentialEvolution"] = EnhancedParallelDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedParallelDifferentialEvolution").set_name("LLAMAEnhancedParallelDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedParallelDifferentialEvolution" + ).set_name("LLAMAEnhancedParallelDifferentialEvolution", register=True) +except Exception as e: # EnhancedParallelDifferentialEvolution print("EnhancedParallelDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedParticleSwarmOptimization import EnhancedParticleSwarmOptimization +try: # EnhancedParticleSwarmOptimization + from nevergrad.optimization.lama.EnhancedParticleSwarmOptimization import ( + EnhancedParticleSwarmOptimization, + ) lama_register["EnhancedParticleSwarmOptimization"] = EnhancedParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimization").set_name("LLAMAEnhancedParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimization" + ).set_name("LLAMAEnhancedParticleSwarmOptimization", register=True) +except Exception as e: # EnhancedParticleSwarmOptimization print("EnhancedParticleSwarmOptimization can not be imported: ", e) -try: +try: # EnhancedParticleSwarmOptimizer from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizer import EnhancedParticleSwarmOptimizer lama_register["EnhancedParticleSwarmOptimizer"] = EnhancedParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizer").set_name("LLAMAEnhancedParticleSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedParticleSwarmOptimizer print("EnhancedParticleSwarmOptimizer can not be imported: ", e) -try: +try: # EnhancedParticleSwarmOptimizerV4 from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV4 import EnhancedParticleSwarmOptimizerV4 lama_register["EnhancedParticleSwarmOptimizerV4"] = EnhancedParticleSwarmOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParticleSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV4").set_name("LLAMAEnhancedParticleSwarmOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParticleSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV4" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV4", register=True) +except Exception as e: # EnhancedParticleSwarmOptimizerV4 print("EnhancedParticleSwarmOptimizerV4 can not be imported: ", e) -try: +try: # EnhancedParticleSwarmOptimizerV5 from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV5 import EnhancedParticleSwarmOptimizerV5 lama_register["EnhancedParticleSwarmOptimizerV5"] = EnhancedParticleSwarmOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParticleSwarmOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV5").set_name("LLAMAEnhancedParticleSwarmOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParticleSwarmOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV5" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV5", register=True) +except Exception as e: # EnhancedParticleSwarmOptimizerV5 print("EnhancedParticleSwarmOptimizerV5 can not be imported: ", e) -try: +try: # EnhancedParticleSwarmOptimizerV6 from nevergrad.optimization.lama.EnhancedParticleSwarmOptimizerV6 import EnhancedParticleSwarmOptimizerV6 lama_register["EnhancedParticleSwarmOptimizerV6"] = EnhancedParticleSwarmOptimizerV6 - res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedParticleSwarmOptimizerV6 = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV6").set_name("LLAMAEnhancedParticleSwarmOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedParticleSwarmOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedParticleSwarmOptimizerV6 = NonObjectOptimizer( + method="LLAMAEnhancedParticleSwarmOptimizerV6" + ).set_name("LLAMAEnhancedParticleSwarmOptimizerV6", register=True) +except Exception as e: # EnhancedParticleSwarmOptimizerV6 print("EnhancedParticleSwarmOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPhaseAdaptiveMemoryStrategyV75 import EnhancedPhaseAdaptiveMemoryStrategyV75 +try: # EnhancedPhaseAdaptiveMemoryStrategyV75 + from nevergrad.optimization.lama.EnhancedPhaseAdaptiveMemoryStrategyV75 import ( + EnhancedPhaseAdaptiveMemoryStrategyV75, + ) lama_register["EnhancedPhaseAdaptiveMemoryStrategyV75"] = EnhancedPhaseAdaptiveMemoryStrategyV75 - res = NonObjectOptimizer(method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75 = NonObjectOptimizer(method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75").set_name("LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75 = NonObjectOptimizer( + method="LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75" + ).set_name("LLAMAEnhancedPhaseAdaptiveMemoryStrategyV75", register=True) +except Exception as e: # EnhancedPhaseAdaptiveMemoryStrategyV75 print("EnhancedPhaseAdaptiveMemoryStrategyV75 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPhaseTransitionMemoryStrategyV82 import EnhancedPhaseTransitionMemoryStrategyV82 +try: # EnhancedPhaseTransitionMemoryStrategyV82 + from nevergrad.optimization.lama.EnhancedPhaseTransitionMemoryStrategyV82 import ( + EnhancedPhaseTransitionMemoryStrategyV82, + ) lama_register["EnhancedPhaseTransitionMemoryStrategyV82"] = EnhancedPhaseTransitionMemoryStrategyV82 - res = NonObjectOptimizer(method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPhaseTransitionMemoryStrategyV82 = NonObjectOptimizer(method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82").set_name("LLAMAEnhancedPhaseTransitionMemoryStrategyV82", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPhaseTransitionMemoryStrategyV82 = NonObjectOptimizer( + method="LLAMAEnhancedPhaseTransitionMemoryStrategyV82" + ).set_name("LLAMAEnhancedPhaseTransitionMemoryStrategyV82", register=True) +except Exception as e: # EnhancedPhaseTransitionMemoryStrategyV82 print("EnhancedPhaseTransitionMemoryStrategyV82 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveCohortOptimization import EnhancedPrecisionAdaptiveCohortOptimization +try: # EnhancedPrecisionAdaptiveCohortOptimization + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveCohortOptimization import ( + EnhancedPrecisionAdaptiveCohortOptimization, + ) lama_register["EnhancedPrecisionAdaptiveCohortOptimization"] = EnhancedPrecisionAdaptiveCohortOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization").set_name("LLAMAEnhancedPrecisionAdaptiveCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionAdaptiveCohortOptimization" + ).set_name("LLAMAEnhancedPrecisionAdaptiveCohortOptimization", register=True) +except Exception as e: # EnhancedPrecisionAdaptiveCohortOptimization print("EnhancedPrecisionAdaptiveCohortOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveGradientClusteringPSO import EnhancedPrecisionAdaptiveGradientClusteringPSO - - lama_register["EnhancedPrecisionAdaptiveGradientClusteringPSO"] = EnhancedPrecisionAdaptiveGradientClusteringPSO - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO").set_name("LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO", register=True) -except Exception as e: +try: # EnhancedPrecisionAdaptiveGradientClusteringPSO + from nevergrad.optimization.lama.EnhancedPrecisionAdaptiveGradientClusteringPSO import ( + EnhancedPrecisionAdaptiveGradientClusteringPSO, + ) + + lama_register["EnhancedPrecisionAdaptiveGradientClusteringPSO"] = ( + EnhancedPrecisionAdaptiveGradientClusteringPSO + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO" + ).set_name("LLAMAEnhancedPrecisionAdaptiveGradientClusteringPSO", register=True) +except Exception as e: # EnhancedPrecisionAdaptiveGradientClusteringPSO print("EnhancedPrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionBoostedDifferentialEvolution import EnhancedPrecisionBoostedDifferentialEvolution - - lama_register["EnhancedPrecisionBoostedDifferentialEvolution"] = EnhancedPrecisionBoostedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionBoostedDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution").set_name("LLAMAEnhancedPrecisionBoostedDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedPrecisionBoostedDifferentialEvolution + from nevergrad.optimization.lama.EnhancedPrecisionBoostedDifferentialEvolution import ( + EnhancedPrecisionBoostedDifferentialEvolution, + ) + + lama_register["EnhancedPrecisionBoostedDifferentialEvolution"] = ( + EnhancedPrecisionBoostedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionBoostedDifferentialEvolution" + ).set_name("LLAMAEnhancedPrecisionBoostedDifferentialEvolution", register=True) +except Exception as e: # EnhancedPrecisionBoostedDifferentialEvolution print("EnhancedPrecisionBoostedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionConvergenceOptimizer import EnhancedPrecisionConvergenceOptimizer +try: # EnhancedPrecisionConvergenceOptimizer + from nevergrad.optimization.lama.EnhancedPrecisionConvergenceOptimizer import ( + EnhancedPrecisionConvergenceOptimizer, + ) lama_register["EnhancedPrecisionConvergenceOptimizer"] = EnhancedPrecisionConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedPrecisionConvergenceOptimizer").set_name("LLAMAEnhancedPrecisionConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionConvergenceOptimizer" + ).set_name("LLAMAEnhancedPrecisionConvergenceOptimizer", register=True) +except Exception as e: # EnhancedPrecisionConvergenceOptimizer print("EnhancedPrecisionConvergenceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV38 import EnhancedPrecisionEvolutionaryOptimizerV38 +try: # EnhancedPrecisionEvolutionaryOptimizerV38 + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV38 import ( + EnhancedPrecisionEvolutionaryOptimizerV38, + ) lama_register["EnhancedPrecisionEvolutionaryOptimizerV38"] = EnhancedPrecisionEvolutionaryOptimizerV38 - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionEvolutionaryOptimizerV38 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38").set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV38", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionEvolutionaryOptimizerV38 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV38" + ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV38", register=True) +except Exception as e: # EnhancedPrecisionEvolutionaryOptimizerV38 print("EnhancedPrecisionEvolutionaryOptimizerV38 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV39 import EnhancedPrecisionEvolutionaryOptimizerV39 +try: # EnhancedPrecisionEvolutionaryOptimizerV39 + from nevergrad.optimization.lama.EnhancedPrecisionEvolutionaryOptimizerV39 import ( + EnhancedPrecisionEvolutionaryOptimizerV39, + ) lama_register["EnhancedPrecisionEvolutionaryOptimizerV39"] = EnhancedPrecisionEvolutionaryOptimizerV39 - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionEvolutionaryOptimizerV39 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39").set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV39", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionEvolutionaryOptimizerV39 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionEvolutionaryOptimizerV39" + ).set_name("LLAMAEnhancedPrecisionEvolutionaryOptimizerV39", register=True) +except Exception as e: # EnhancedPrecisionEvolutionaryOptimizerV39 print("EnhancedPrecisionEvolutionaryOptimizerV39 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionGuidedQuantumStrategy import EnhancedPrecisionGuidedQuantumStrategy +try: # EnhancedPrecisionGuidedQuantumStrategy + from nevergrad.optimization.lama.EnhancedPrecisionGuidedQuantumStrategy import ( + EnhancedPrecisionGuidedQuantumStrategy, + ) lama_register["EnhancedPrecisionGuidedQuantumStrategy"] = EnhancedPrecisionGuidedQuantumStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionGuidedQuantumStrategy = NonObjectOptimizer(method="LLAMAEnhancedPrecisionGuidedQuantumStrategy").set_name("LLAMAEnhancedPrecisionGuidedQuantumStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionGuidedQuantumStrategy = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionGuidedQuantumStrategy" + ).set_name("LLAMAEnhancedPrecisionGuidedQuantumStrategy", register=True) +except Exception as e: # EnhancedPrecisionGuidedQuantumStrategy print("EnhancedPrecisionGuidedQuantumStrategy can not be imported: ", e) -try: +try: # EnhancedPrecisionHybridSearchV2 from nevergrad.optimization.lama.EnhancedPrecisionHybridSearchV2 import EnhancedPrecisionHybridSearchV2 lama_register["EnhancedPrecisionHybridSearchV2"] = EnhancedPrecisionHybridSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionHybridSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionHybridSearchV2").set_name("LLAMAEnhancedPrecisionHybridSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionHybridSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionHybridSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionHybridSearchV2" + ).set_name("LLAMAEnhancedPrecisionHybridSearchV2", register=True) +except Exception as e: # EnhancedPrecisionHybridSearchV2 print("EnhancedPrecisionHybridSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedPrecisionTunedCrossoverElitistStrategyV14 import EnhancedPrecisionTunedCrossoverElitistStrategyV14 - - lama_register["EnhancedPrecisionTunedCrossoverElitistStrategyV14"] = EnhancedPrecisionTunedCrossoverElitistStrategyV14 - res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14 = NonObjectOptimizer(method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14").set_name("LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14", register=True) -except Exception as e: +try: # EnhancedPrecisionTunedCrossoverElitistStrategyV14 + from nevergrad.optimization.lama.EnhancedPrecisionTunedCrossoverElitistStrategyV14 import ( + EnhancedPrecisionTunedCrossoverElitistStrategyV14, + ) + + lama_register["EnhancedPrecisionTunedCrossoverElitistStrategyV14"] = ( + EnhancedPrecisionTunedCrossoverElitistStrategyV14 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14 = NonObjectOptimizer( + method="LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14" + ).set_name("LLAMAEnhancedPrecisionTunedCrossoverElitistStrategyV14", register=True) +except Exception as e: # EnhancedPrecisionTunedCrossoverElitistStrategyV14 print("EnhancedPrecisionTunedCrossoverElitistStrategyV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedProgressiveAdaptiveDifferentialEvolution import EnhancedProgressiveAdaptiveDifferentialEvolution - - lama_register["EnhancedProgressiveAdaptiveDifferentialEvolution"] = EnhancedProgressiveAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution").set_name("LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedProgressiveAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.EnhancedProgressiveAdaptiveDifferentialEvolution import ( + EnhancedProgressiveAdaptiveDifferentialEvolution, + ) + + lama_register["EnhancedProgressiveAdaptiveDifferentialEvolution"] = ( + EnhancedProgressiveAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution" + ).set_name("LLAMAEnhancedProgressiveAdaptiveDifferentialEvolution", register=True) +except Exception as e: # EnhancedProgressiveAdaptiveDifferentialEvolution print("EnhancedProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedQAPSOAIRVCHR from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHR import EnhancedQAPSOAIRVCHR lama_register["EnhancedQAPSOAIRVCHR"] = EnhancedQAPSOAIRVCHR - res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR").set_name("LLAMAEnhancedQAPSOAIRVCHR", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHR").set_name( + "LLAMAEnhancedQAPSOAIRVCHR", register=True + ) +except Exception as e: # EnhancedQAPSOAIRVCHR print("EnhancedQAPSOAIRVCHR can not be imported: ", e) -try: +try: # EnhancedQAPSOAIRVCHRLS from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLS import EnhancedQAPSOAIRVCHRLS lama_register["EnhancedQAPSOAIRVCHRLS"] = EnhancedQAPSOAIRVCHRLS - res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS").set_name("LLAMAEnhancedQAPSOAIRVCHRLS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLS").set_name( + "LLAMAEnhancedQAPSOAIRVCHRLS", register=True + ) +except Exception as e: # EnhancedQAPSOAIRVCHRLS print("EnhancedQAPSOAIRVCHRLS can not be imported: ", e) -try: +try: # EnhancedQAPSOAIRVCHRLSDP from nevergrad.optimization.lama.EnhancedQAPSOAIRVCHRLSDP import EnhancedQAPSOAIRVCHRLSDP lama_register["EnhancedQAPSOAIRVCHRLSDP"] = EnhancedQAPSOAIRVCHRLSDP - res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQAPSOAIRVCHRLSDP = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP").set_name("LLAMAEnhancedQAPSOAIRVCHRLSDP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQAPSOAIRVCHRLSDP = NonObjectOptimizer(method="LLAMAEnhancedQAPSOAIRVCHRLSDP").set_name( + "LLAMAEnhancedQAPSOAIRVCHRLSDP", register=True + ) +except Exception as e: # EnhancedQAPSOAIRVCHRLSDP print("EnhancedQAPSOAIRVCHRLSDP can not be imported: ", e) -try: +try: # EnhancedQuantumAdaptiveCrossover from nevergrad.optimization.lama.EnhancedQuantumAdaptiveCrossover import EnhancedQuantumAdaptiveCrossover lama_register["EnhancedQuantumAdaptiveCrossover"] = EnhancedQuantumAdaptiveCrossover - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveCrossover = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveCrossover").set_name("LLAMAEnhancedQuantumAdaptiveCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveCrossover = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveCrossover" + ).set_name("LLAMAEnhancedQuantumAdaptiveCrossover", register=True) +except Exception as e: # EnhancedQuantumAdaptiveCrossover print("EnhancedQuantumAdaptiveCrossover can not be imported: ", e) -try: +try: # EnhancedQuantumAdaptiveDE from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDE import EnhancedQuantumAdaptiveDE lama_register["EnhancedQuantumAdaptiveDE"] = EnhancedQuantumAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE").set_name("LLAMAEnhancedQuantumAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDE").set_name( + "LLAMAEnhancedQuantumAdaptiveDE", register=True + ) +except Exception as e: # EnhancedQuantumAdaptiveDE print("EnhancedQuantumAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - - lama_register["EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory").set_name("LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) -except Exception as e: +try: # EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( + EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, + ) + + lama_register["EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( + EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" + ).set_name("LLAMAEnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) +except Exception as e: # EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory print("EnhancedQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveEliteGuidedSearch import EnhancedQuantumAdaptiveEliteGuidedSearch +try: # EnhancedQuantumAdaptiveEliteGuidedSearch + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveEliteGuidedSearch import ( + EnhancedQuantumAdaptiveEliteGuidedSearch, + ) lama_register["EnhancedQuantumAdaptiveEliteGuidedSearch"] = EnhancedQuantumAdaptiveEliteGuidedSearch - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch").set_name("LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch" + ).set_name("LLAMAEnhancedQuantumAdaptiveEliteGuidedSearch", register=True) +except Exception as e: # EnhancedQuantumAdaptiveEliteGuidedSearch print("EnhancedQuantumAdaptiveEliteGuidedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveFireworksOptimizer import EnhancedQuantumAdaptiveFireworksOptimizer +try: # EnhancedQuantumAdaptiveFireworksOptimizer + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveFireworksOptimizer import ( + EnhancedQuantumAdaptiveFireworksOptimizer, + ) lama_register["EnhancedQuantumAdaptiveFireworksOptimizer"] = EnhancedQuantumAdaptiveFireworksOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer").set_name("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveFireworksOptimizer" + ).set_name("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer", register=True) +except Exception as e: # EnhancedQuantumAdaptiveFireworksOptimizer print("EnhancedQuantumAdaptiveFireworksOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveGradientDiversityExplorer import EnhancedQuantumAdaptiveGradientDiversityExplorer - - lama_register["EnhancedQuantumAdaptiveGradientDiversityExplorer"] = EnhancedQuantumAdaptiveGradientDiversityExplorer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer").set_name("LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer", register=True) -except Exception as e: +try: # EnhancedQuantumAdaptiveGradientDiversityExplorer + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveGradientDiversityExplorer import ( + EnhancedQuantumAdaptiveGradientDiversityExplorer, + ) + + lama_register["EnhancedQuantumAdaptiveGradientDiversityExplorer"] = ( + EnhancedQuantumAdaptiveGradientDiversityExplorer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer" + ).set_name("LLAMAEnhancedQuantumAdaptiveGradientDiversityExplorer", register=True) +except Exception as e: # EnhancedQuantumAdaptiveGradientDiversityExplorer print("EnhancedQuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridDEPSO_V4 import EnhancedQuantumAdaptiveHybridDEPSO_V4 +try: # EnhancedQuantumAdaptiveHybridDEPSO_V4 + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridDEPSO_V4 import ( + EnhancedQuantumAdaptiveHybridDEPSO_V4, + ) lama_register["EnhancedQuantumAdaptiveHybridDEPSO_V4"] = EnhancedQuantumAdaptiveHybridDEPSO_V4 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4").set_name("LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4" + ).set_name("LLAMAEnhancedQuantumAdaptiveHybridDEPSO_V4", register=True) +except Exception as e: # EnhancedQuantumAdaptiveHybridDEPSO_V4 print("EnhancedQuantumAdaptiveHybridDEPSO_V4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridSearchV2 import EnhancedQuantumAdaptiveHybridSearchV2 +try: # EnhancedQuantumAdaptiveHybridSearchV2 + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveHybridSearchV2 import ( + EnhancedQuantumAdaptiveHybridSearchV2, + ) lama_register["EnhancedQuantumAdaptiveHybridSearchV2"] = EnhancedQuantumAdaptiveHybridSearchV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveHybridSearchV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2").set_name("LLAMAEnhancedQuantumAdaptiveHybridSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveHybridSearchV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveHybridSearchV2" + ).set_name("LLAMAEnhancedQuantumAdaptiveHybridSearchV2", register=True) +except Exception as e: # EnhancedQuantumAdaptiveHybridSearchV2 print("EnhancedQuantumAdaptiveHybridSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveLevySwarmOptimization import EnhancedQuantumAdaptiveLevySwarmOptimization - - lama_register["EnhancedQuantumAdaptiveLevySwarmOptimization"] = EnhancedQuantumAdaptiveLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization").set_name("LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization", register=True) -except Exception as e: +try: # EnhancedQuantumAdaptiveLevySwarmOptimization + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveLevySwarmOptimization import ( + EnhancedQuantumAdaptiveLevySwarmOptimization, + ) + + lama_register["EnhancedQuantumAdaptiveLevySwarmOptimization"] = ( + EnhancedQuantumAdaptiveLevySwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization" + ).set_name("LLAMAEnhancedQuantumAdaptiveLevySwarmOptimization", register=True) +except Exception as e: # EnhancedQuantumAdaptiveLevySwarmOptimization print("EnhancedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiPhaseDE_v3 import EnhancedQuantumAdaptiveMultiPhaseDE_v3 +try: # EnhancedQuantumAdaptiveMultiPhaseDE_v3 + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiPhaseDE_v3 import ( + EnhancedQuantumAdaptiveMultiPhaseDE_v3, + ) lama_register["EnhancedQuantumAdaptiveMultiPhaseDE_v3"] = EnhancedQuantumAdaptiveMultiPhaseDE_v3 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3 = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3").set_name("LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3" + ).set_name("LLAMAEnhancedQuantumAdaptiveMultiPhaseDE_v3", register=True) +except Exception as e: # EnhancedQuantumAdaptiveMultiPhaseDE_v3 print("EnhancedQuantumAdaptiveMultiPhaseDE_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiStrategyEvolution import EnhancedQuantumAdaptiveMultiStrategyEvolution - - lama_register["EnhancedQuantumAdaptiveMultiStrategyEvolution"] = EnhancedQuantumAdaptiveMultiStrategyEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution").set_name("LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution", register=True) -except Exception as e: +try: # EnhancedQuantumAdaptiveMultiStrategyEvolution + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveMultiStrategyEvolution import ( + EnhancedQuantumAdaptiveMultiStrategyEvolution, + ) + + lama_register["EnhancedQuantumAdaptiveMultiStrategyEvolution"] = ( + EnhancedQuantumAdaptiveMultiStrategyEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution" + ).set_name("LLAMAEnhancedQuantumAdaptiveMultiStrategyEvolution", register=True) +except Exception as e: # EnhancedQuantumAdaptiveMultiStrategyEvolution print("EnhancedQuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAdaptiveNesterovStrategy import EnhancedQuantumAdaptiveNesterovStrategy +try: # EnhancedQuantumAdaptiveNesterovStrategy + from nevergrad.optimization.lama.EnhancedQuantumAdaptiveNesterovStrategy import ( + EnhancedQuantumAdaptiveNesterovStrategy, + ) lama_register["EnhancedQuantumAdaptiveNesterovStrategy"] = EnhancedQuantumAdaptiveNesterovStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveNesterovStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy").set_name("LLAMAEnhancedQuantumAdaptiveNesterovStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveNesterovStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveNesterovStrategy" + ).set_name("LLAMAEnhancedQuantumAdaptiveNesterovStrategy", register=True) +except Exception as e: # EnhancedQuantumAdaptiveNesterovStrategy print("EnhancedQuantumAdaptiveNesterovStrategy can not be imported: ", e) -try: +try: # EnhancedQuantumAdaptiveOptimizer from nevergrad.optimization.lama.EnhancedQuantumAdaptiveOptimizer import EnhancedQuantumAdaptiveOptimizer lama_register["EnhancedQuantumAdaptiveOptimizer"] = EnhancedQuantumAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveOptimizer").set_name("LLAMAEnhancedQuantumAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAdaptiveOptimizer" + ).set_name("LLAMAEnhancedQuantumAdaptiveOptimizer", register=True) +except Exception as e: # EnhancedQuantumAdaptiveOptimizer print("EnhancedQuantumAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumAnnealingOptimizer import EnhancedQuantumAnnealingOptimizer +try: # EnhancedQuantumAnnealingOptimizer + from nevergrad.optimization.lama.EnhancedQuantumAnnealingOptimizer import ( + EnhancedQuantumAnnealingOptimizer, + ) lama_register["EnhancedQuantumAnnealingOptimizer"] = EnhancedQuantumAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumAnnealingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumAnnealingOptimizer").set_name("LLAMAEnhancedQuantumAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumAnnealingOptimizer" + ).set_name("LLAMAEnhancedQuantumAnnealingOptimizer", register=True) +except Exception as e: # EnhancedQuantumAnnealingOptimizer print("EnhancedQuantumAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCognitionFocusedOptimizerV18 import EnhancedQuantumCognitionFocusedOptimizerV18 +try: # EnhancedQuantumCognitionFocusedOptimizerV18 + from nevergrad.optimization.lama.EnhancedQuantumCognitionFocusedOptimizerV18 import ( + EnhancedQuantumCognitionFocusedOptimizerV18, + ) lama_register["EnhancedQuantumCognitionFocusedOptimizerV18"] = EnhancedQuantumCognitionFocusedOptimizerV18 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCognitionFocusedOptimizerV18 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18").set_name("LLAMAEnhancedQuantumCognitionFocusedOptimizerV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCognitionFocusedOptimizerV18 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCognitionFocusedOptimizerV18" + ).set_name("LLAMAEnhancedQuantumCognitionFocusedOptimizerV18", register=True) +except Exception as e: # EnhancedQuantumCognitionFocusedOptimizerV18 print("EnhancedQuantumCognitionFocusedOptimizerV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCognitionOptimizerV12 import EnhancedQuantumCognitionOptimizerV12 +try: # EnhancedQuantumCognitionOptimizerV12 + from nevergrad.optimization.lama.EnhancedQuantumCognitionOptimizerV12 import ( + EnhancedQuantumCognitionOptimizerV12, + ) lama_register["EnhancedQuantumCognitionOptimizerV12"] = EnhancedQuantumCognitionOptimizerV12 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCognitionOptimizerV12 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionOptimizerV12").set_name("LLAMAEnhancedQuantumCognitionOptimizerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCognitionOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCognitionOptimizerV12 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCognitionOptimizerV12" + ).set_name("LLAMAEnhancedQuantumCognitionOptimizerV12", register=True) +except Exception as e: # EnhancedQuantumCognitionOptimizerV12 print("EnhancedQuantumCognitionOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCooperativeStrategy import EnhancedQuantumCooperativeStrategy +try: # EnhancedQuantumCooperativeStrategy + from nevergrad.optimization.lama.EnhancedQuantumCooperativeStrategy import ( + EnhancedQuantumCooperativeStrategy, + ) lama_register["EnhancedQuantumCooperativeStrategy"] = EnhancedQuantumCooperativeStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCooperativeStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumCooperativeStrategy").set_name("LLAMAEnhancedQuantumCooperativeStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCooperativeStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCooperativeStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCooperativeStrategy" + ).set_name("LLAMAEnhancedQuantumCooperativeStrategy", register=True) +except Exception as e: # EnhancedQuantumCooperativeStrategy print("EnhancedQuantumCooperativeStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolution import EnhancedQuantumCovarianceMatrixDifferentialEvolution - - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolution"] = EnhancedQuantumCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedQuantumCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolution import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # EnhancedQuantumCovarianceMatrixDifferentialEvolution print("EnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus import EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus - - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus"] = EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus", register=True) -except Exception as e: +try: # EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus", register=True) +except Exception as e: # EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - - lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2").set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) -except Exception as e: +try: # EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + from nevergrad.optimization.lama.EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2, + ) + + lama_register["EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( + EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" + ).set_name("LLAMAEnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) +except Exception as e: # EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 print("EnhancedQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts import EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts - - lama_register["EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts"] = EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts").set_name("LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts + from nevergrad.optimization.lama.EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts import ( + EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts, + ) + + lama_register["EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts"] = ( + EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts" + ).set_name("LLAMAEnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts", register=True) +except Exception as e: # EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts print("EnhancedQuantumDifferentialElitistAlgorithmWithAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolution import EnhancedQuantumDifferentialEvolution +try: # EnhancedQuantumDifferentialEvolution + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolution import ( + EnhancedQuantumDifferentialEvolution, + ) lama_register["EnhancedQuantumDifferentialEvolution"] = EnhancedQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolution").set_name("LLAMAEnhancedQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolution" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolution", register=True) +except Exception as e: # EnhancedQuantumDifferentialEvolution print("EnhancedQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart import EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart - - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart", register=True) +except Exception as e: # EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart print("EnhancedQuantumDifferentialEvolutionWithAdaptiveElitismAndRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts import EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts - - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts", register=True) +except Exception as e: # EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory import EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory - - lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory"] = EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory import ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory"] = ( + EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory", register=True) +except Exception as e: # EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory print("EnhancedQuantumDifferentialEvolutionWithAdaptiveRestartsAndMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism import EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism - - lama_register["EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism"] = EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism").set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism + from nevergrad.optimization.lama.EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism import ( + EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism, + ) + + lama_register["EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism"] = ( + EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism" + ).set_name("LLAMAEnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism", register=True) +except Exception as e: # EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism print("EnhancedQuantumDifferentialEvolutionWithSelfAdaptiveMechanism can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism import EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism - - lama_register["EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism"] = EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism").set_name("LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism import ( + EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism, + ) + + lama_register["EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism"] = ( + EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism" + ).set_name("LLAMAEnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism", register=True) +except Exception as e: # EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism print("EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleSwarmOptimizer import EnhancedQuantumDifferentialParticleSwarmOptimizer - - lama_register["EnhancedQuantumDifferentialParticleSwarmOptimizer"] = EnhancedQuantumDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer").set_name("LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # EnhancedQuantumDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.EnhancedQuantumDifferentialParticleSwarmOptimizer import ( + EnhancedQuantumDifferentialParticleSwarmOptimizer, + ) + + lama_register["EnhancedQuantumDifferentialParticleSwarmOptimizer"] = ( + EnhancedQuantumDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAEnhancedQuantumDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # EnhancedQuantumDifferentialParticleSwarmOptimizer print("EnhancedQuantumDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: +try: # EnhancedQuantumDiversityDE from nevergrad.optimization.lama.EnhancedQuantumDiversityDE import EnhancedQuantumDiversityDE lama_register["EnhancedQuantumDiversityDE"] = EnhancedQuantumDiversityDE - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDiversityDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE").set_name("LLAMAEnhancedQuantumDiversityDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDiversityDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumDiversityDE").set_name( + "LLAMAEnhancedQuantumDiversityDE", register=True + ) +except Exception as e: # EnhancedQuantumDiversityDE print("EnhancedQuantumDiversityDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDynamicAdaptiveHybridDEPSO import EnhancedQuantumDynamicAdaptiveHybridDEPSO +try: # EnhancedQuantumDynamicAdaptiveHybridDEPSO + from nevergrad.optimization.lama.EnhancedQuantumDynamicAdaptiveHybridDEPSO import ( + EnhancedQuantumDynamicAdaptiveHybridDEPSO, + ) lama_register["EnhancedQuantumDynamicAdaptiveHybridDEPSO"] = EnhancedQuantumDynamicAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO").set_name("LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAEnhancedQuantumDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: # EnhancedQuantumDynamicAdaptiveHybridDEPSO print("EnhancedQuantumDynamicAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumDynamicBalanceOptimizer import EnhancedQuantumDynamicBalanceOptimizer +try: # EnhancedQuantumDynamicBalanceOptimizer + from nevergrad.optimization.lama.EnhancedQuantumDynamicBalanceOptimizer import ( + EnhancedQuantumDynamicBalanceOptimizer, + ) lama_register["EnhancedQuantumDynamicBalanceOptimizer"] = EnhancedQuantumDynamicBalanceOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicBalanceOptimizer").set_name("LLAMAEnhancedQuantumDynamicBalanceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicBalanceOptimizer" + ).set_name("LLAMAEnhancedQuantumDynamicBalanceOptimizer", register=True) +except Exception as e: # EnhancedQuantumDynamicBalanceOptimizer print("EnhancedQuantumDynamicBalanceOptimizer can not be imported: ", e) -try: +try: # EnhancedQuantumDynamicOptimizer from nevergrad.optimization.lama.EnhancedQuantumDynamicOptimizer import EnhancedQuantumDynamicOptimizer lama_register["EnhancedQuantumDynamicOptimizer"] = EnhancedQuantumDynamicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumDynamicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicOptimizer").set_name("LLAMAEnhancedQuantumDynamicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumDynamicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumDynamicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumDynamicOptimizer" + ).set_name("LLAMAEnhancedQuantumDynamicOptimizer", register=True) +except Exception as e: # EnhancedQuantumDynamicOptimizer print("EnhancedQuantumDynamicOptimizer can not be imported: ", e) -try: +try: # EnhancedQuantumEvolutionStrategy from nevergrad.optimization.lama.EnhancedQuantumEvolutionStrategy import EnhancedQuantumEvolutionStrategy lama_register["EnhancedQuantumEvolutionStrategy"] = EnhancedQuantumEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumEvolutionStrategy").set_name("LLAMAEnhancedQuantumEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumEvolutionStrategy" + ).set_name("LLAMAEnhancedQuantumEvolutionStrategy", register=True) +except Exception as e: # EnhancedQuantumEvolutionStrategy print("EnhancedQuantumEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithm import EnhancedQuantumFireworksAlgorithm +try: # EnhancedQuantumFireworksAlgorithm + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithm import ( + EnhancedQuantumFireworksAlgorithm, + ) lama_register["EnhancedQuantumFireworksAlgorithm"] = EnhancedQuantumFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithm").set_name("LLAMAEnhancedQuantumFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedQuantumFireworksAlgorithm" + ).set_name("LLAMAEnhancedQuantumFireworksAlgorithm", register=True) +except Exception as e: # EnhancedQuantumFireworksAlgorithm print("EnhancedQuantumFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithmV2 import EnhancedQuantumFireworksAlgorithmV2 +try: # EnhancedQuantumFireworksAlgorithmV2 + from nevergrad.optimization.lama.EnhancedQuantumFireworksAlgorithmV2 import ( + EnhancedQuantumFireworksAlgorithmV2, + ) lama_register["EnhancedQuantumFireworksAlgorithmV2"] = EnhancedQuantumFireworksAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumFireworksAlgorithmV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithmV2").set_name("LLAMAEnhancedQuantumFireworksAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumFireworksAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumFireworksAlgorithmV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumFireworksAlgorithmV2" + ).set_name("LLAMAEnhancedQuantumFireworksAlgorithmV2", register=True) +except Exception as e: # EnhancedQuantumFireworksAlgorithmV2 print("EnhancedQuantumFireworksAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimization import EnhancedQuantumGradientAdaptiveExplorationOptimization - - lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimization"] = EnhancedQuantumGradientAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedQuantumGradientAdaptiveExplorationOptimization + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimization import ( + EnhancedQuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimization"] = ( + EnhancedQuantumGradientAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: # EnhancedQuantumGradientAdaptiveExplorationOptimization print("EnhancedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 import EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 - - lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimizationV5"] = EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5").set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5", register=True) -except Exception as e: +try: # EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 + from nevergrad.optimization.lama.EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 import ( + EnhancedQuantumGradientAdaptiveExplorationOptimizationV5, + ) + + lama_register["EnhancedQuantumGradientAdaptiveExplorationOptimizationV5"] = ( + EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5" + ).set_name("LLAMAEnhancedQuantumGradientAdaptiveExplorationOptimizationV5", register=True) +except Exception as e: # EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 print("EnhancedQuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimization import EnhancedQuantumGradientExplorationOptimization - - lama_register["EnhancedQuantumGradientExplorationOptimization"] = EnhancedQuantumGradientExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimization").set_name("LLAMAEnhancedQuantumGradientExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedQuantumGradientExplorationOptimization + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimization import ( + EnhancedQuantumGradientExplorationOptimization, + ) + + lama_register["EnhancedQuantumGradientExplorationOptimization"] = ( + EnhancedQuantumGradientExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientExplorationOptimization" + ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimization", register=True) +except Exception as e: # EnhancedQuantumGradientExplorationOptimization print("EnhancedQuantumGradientExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimizationV2 import EnhancedQuantumGradientExplorationOptimizationV2 - - lama_register["EnhancedQuantumGradientExplorationOptimizationV2"] = EnhancedQuantumGradientExplorationOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2").set_name("LLAMAEnhancedQuantumGradientExplorationOptimizationV2", register=True) -except Exception as e: +try: # EnhancedQuantumGradientExplorationOptimizationV2 + from nevergrad.optimization.lama.EnhancedQuantumGradientExplorationOptimizationV2 import ( + EnhancedQuantumGradientExplorationOptimizationV2, + ) + + lama_register["EnhancedQuantumGradientExplorationOptimizationV2"] = ( + EnhancedQuantumGradientExplorationOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientExplorationOptimizationV2" + ).set_name("LLAMAEnhancedQuantumGradientExplorationOptimizationV2", register=True) +except Exception as e: # EnhancedQuantumGradientExplorationOptimizationV2 print("EnhancedQuantumGradientExplorationOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientMemeticOptimizer import EnhancedQuantumGradientMemeticOptimizer +try: # EnhancedQuantumGradientMemeticOptimizer + from nevergrad.optimization.lama.EnhancedQuantumGradientMemeticOptimizer import ( + EnhancedQuantumGradientMemeticOptimizer, + ) lama_register["EnhancedQuantumGradientMemeticOptimizer"] = EnhancedQuantumGradientMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientMemeticOptimizer").set_name("LLAMAEnhancedQuantumGradientMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumGradientMemeticOptimizer", register=True) +except Exception as e: # EnhancedQuantumGradientMemeticOptimizer print("EnhancedQuantumGradientMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumGradientOptimizerV5 import EnhancedQuantumGradientOptimizerV5 +try: # EnhancedQuantumGradientOptimizerV5 + from nevergrad.optimization.lama.EnhancedQuantumGradientOptimizerV5 import ( + EnhancedQuantumGradientOptimizerV5, + ) lama_register["EnhancedQuantumGradientOptimizerV5"] = EnhancedQuantumGradientOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumGradientOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientOptimizerV5").set_name("LLAMAEnhancedQuantumGradientOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumGradientOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumGradientOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumGradientOptimizerV5" + ).set_name("LLAMAEnhancedQuantumGradientOptimizerV5", register=True) +except Exception as e: # EnhancedQuantumGradientOptimizerV5 print("EnhancedQuantumGradientOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonicAdaptationStrategy import EnhancedQuantumHarmonicAdaptationStrategy +try: # EnhancedQuantumHarmonicAdaptationStrategy + from nevergrad.optimization.lama.EnhancedQuantumHarmonicAdaptationStrategy import ( + EnhancedQuantumHarmonicAdaptationStrategy, + ) lama_register["EnhancedQuantumHarmonicAdaptationStrategy"] = EnhancedQuantumHarmonicAdaptationStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonicAdaptationStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy").set_name("LLAMAEnhancedQuantumHarmonicAdaptationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonicAdaptationStrategy" + ).set_name("LLAMAEnhancedQuantumHarmonicAdaptationStrategy", register=True) +except Exception as e: # EnhancedQuantumHarmonicAdaptationStrategy print("EnhancedQuantumHarmonicAdaptationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonyMemeticAlgorithm import EnhancedQuantumHarmonyMemeticAlgorithm +try: # EnhancedQuantumHarmonyMemeticAlgorithm + from nevergrad.optimization.lama.EnhancedQuantumHarmonyMemeticAlgorithm import ( + EnhancedQuantumHarmonyMemeticAlgorithm, + ) lama_register["EnhancedQuantumHarmonyMemeticAlgorithm"] = EnhancedQuantumHarmonyMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm").set_name("LLAMAEnhancedQuantumHarmonyMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonyMemeticAlgorithm" + ).set_name("LLAMAEnhancedQuantumHarmonyMemeticAlgorithm", register=True) +except Exception as e: # EnhancedQuantumHarmonyMemeticAlgorithm print("EnhancedQuantumHarmonyMemeticAlgorithm can not be imported: ", e) -try: +try: # EnhancedQuantumHarmonySearch from nevergrad.optimization.lama.EnhancedQuantumHarmonySearch import EnhancedQuantumHarmonySearch lama_register["EnhancedQuantumHarmonySearch"] = EnhancedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearch").set_name("LLAMAEnhancedQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearch" + ).set_name("LLAMAEnhancedQuantumHarmonySearch", register=True) +except Exception as e: # EnhancedQuantumHarmonySearch print("EnhancedQuantumHarmonySearch can not be imported: ", e) -try: +try: # EnhancedQuantumHarmonySearchAB from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchAB import EnhancedQuantumHarmonySearchAB lama_register["EnhancedQuantumHarmonySearchAB"] = EnhancedQuantumHarmonySearchAB - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchAB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonySearchAB = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchAB").set_name("LLAMAEnhancedQuantumHarmonySearchAB", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchAB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonySearchAB = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchAB" + ).set_name("LLAMAEnhancedQuantumHarmonySearchAB", register=True) +except Exception as e: # EnhancedQuantumHarmonySearchAB print("EnhancedQuantumHarmonySearchAB can not be imported: ", e) -try: +try: # EnhancedQuantumHarmonySearchABGB from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGB import EnhancedQuantumHarmonySearchABGB lama_register["EnhancedQuantumHarmonySearchABGB"] = EnhancedQuantumHarmonySearchABGB - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonySearchABGB = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGB").set_name("LLAMAEnhancedQuantumHarmonySearchABGB", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonySearchABGB = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchABGB" + ).set_name("LLAMAEnhancedQuantumHarmonySearchABGB", register=True) +except Exception as e: # EnhancedQuantumHarmonySearchABGB print("EnhancedQuantumHarmonySearchABGB can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGBRefined import EnhancedQuantumHarmonySearchABGBRefined +try: # EnhancedQuantumHarmonySearchABGBRefined + from nevergrad.optimization.lama.EnhancedQuantumHarmonySearchABGBRefined import ( + EnhancedQuantumHarmonySearchABGBRefined, + ) lama_register["EnhancedQuantumHarmonySearchABGBRefined"] = EnhancedQuantumHarmonySearchABGBRefined - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGBRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHarmonySearchABGBRefined = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGBRefined").set_name("LLAMAEnhancedQuantumHarmonySearchABGBRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHarmonySearchABGBRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHarmonySearchABGBRefined = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHarmonySearchABGBRefined" + ).set_name("LLAMAEnhancedQuantumHarmonySearchABGBRefined", register=True) +except Exception as e: # EnhancedQuantumHarmonySearchABGBRefined print("EnhancedQuantumHarmonySearchABGBRefined can not be imported: ", e) -try: +try: # EnhancedQuantumHybridAdaptiveDE from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE import EnhancedQuantumHybridAdaptiveDE lama_register["EnhancedQuantumHybridAdaptiveDE"] = EnhancedQuantumHybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE").set_name("LLAMAEnhancedQuantumHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHybridAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHybridAdaptiveDE" + ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE", register=True) +except Exception as e: # EnhancedQuantumHybridAdaptiveDE print("EnhancedQuantumHybridAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE_v2 import EnhancedQuantumHybridAdaptiveDE_v2 +try: # EnhancedQuantumHybridAdaptiveDE_v2 + from nevergrad.optimization.lama.EnhancedQuantumHybridAdaptiveDE_v2 import ( + EnhancedQuantumHybridAdaptiveDE_v2, + ) lama_register["EnhancedQuantumHybridAdaptiveDE_v2"] = EnhancedQuantumHybridAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumHybridAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2").set_name("LLAMAEnhancedQuantumHybridAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumHybridAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumHybridAdaptiveDE_v2" + ).set_name("LLAMAEnhancedQuantumHybridAdaptiveDE_v2", register=True) +except Exception as e: # EnhancedQuantumHybridAdaptiveDE_v2 print("EnhancedQuantumHybridAdaptiveDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumInformedGradientOptimizer import EnhancedQuantumInformedGradientOptimizer +try: # EnhancedQuantumInformedGradientOptimizer + from nevergrad.optimization.lama.EnhancedQuantumInformedGradientOptimizer import ( + EnhancedQuantumInformedGradientOptimizer, + ) lama_register["EnhancedQuantumInformedGradientOptimizer"] = EnhancedQuantumInformedGradientOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumInformedGradientOptimizer").set_name("LLAMAEnhancedQuantumInformedGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInformedGradientOptimizer" + ).set_name("LLAMAEnhancedQuantumInformedGradientOptimizer", register=True) +except Exception as e: # EnhancedQuantumInformedGradientOptimizer print("EnhancedQuantumInformedGradientOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumInfusedAdaptiveStrategy import EnhancedQuantumInfusedAdaptiveStrategy +try: # EnhancedQuantumInfusedAdaptiveStrategy + from nevergrad.optimization.lama.EnhancedQuantumInfusedAdaptiveStrategy import ( + EnhancedQuantumInfusedAdaptiveStrategy, + ) lama_register["EnhancedQuantumInfusedAdaptiveStrategy"] = EnhancedQuantumInfusedAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumInfusedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy").set_name("LLAMAEnhancedQuantumInfusedAdaptiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInfusedAdaptiveStrategy" + ).set_name("LLAMAEnhancedQuantumInfusedAdaptiveStrategy", register=True) +except Exception as e: # EnhancedQuantumInfusedAdaptiveStrategy print("EnhancedQuantumInfusedAdaptiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumInspiredHybridOptimizer import EnhancedQuantumInspiredHybridOptimizer +try: # EnhancedQuantumInspiredHybridOptimizer + from nevergrad.optimization.lama.EnhancedQuantumInspiredHybridOptimizer import ( + EnhancedQuantumInspiredHybridOptimizer, + ) lama_register["EnhancedQuantumInspiredHybridOptimizer"] = EnhancedQuantumInspiredHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumInspiredHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumInspiredHybridOptimizer").set_name("LLAMAEnhancedQuantumInspiredHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumInspiredHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumInspiredHybridOptimizer" + ).set_name("LLAMAEnhancedQuantumInspiredHybridOptimizer", register=True) +except Exception as e: # EnhancedQuantumInspiredHybridOptimizer print("EnhancedQuantumInspiredHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumIterativeRefinement import EnhancedQuantumIterativeRefinement +try: # EnhancedQuantumIterativeRefinement + from nevergrad.optimization.lama.EnhancedQuantumIterativeRefinement import ( + EnhancedQuantumIterativeRefinement, + ) lama_register["EnhancedQuantumIterativeRefinement"] = EnhancedQuantumIterativeRefinement - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumIterativeRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumIterativeRefinement = NonObjectOptimizer(method="LLAMAEnhancedQuantumIterativeRefinement").set_name("LLAMAEnhancedQuantumIterativeRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumIterativeRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumIterativeRefinement = NonObjectOptimizer( + method="LLAMAEnhancedQuantumIterativeRefinement" + ).set_name("LLAMAEnhancedQuantumIterativeRefinement", register=True) +except Exception as e: # EnhancedQuantumIterativeRefinement print("EnhancedQuantumIterativeRefinement can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLeapGradientBoostPSO import EnhancedQuantumLeapGradientBoostPSO +try: # EnhancedQuantumLeapGradientBoostPSO + from nevergrad.optimization.lama.EnhancedQuantumLeapGradientBoostPSO import ( + EnhancedQuantumLeapGradientBoostPSO, + ) lama_register["EnhancedQuantumLeapGradientBoostPSO"] = EnhancedQuantumLeapGradientBoostPSO - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapGradientBoostPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLeapGradientBoostPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapGradientBoostPSO").set_name("LLAMAEnhancedQuantumLeapGradientBoostPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapGradientBoostPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLeapGradientBoostPSO = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLeapGradientBoostPSO" + ).set_name("LLAMAEnhancedQuantumLeapGradientBoostPSO", register=True) +except Exception as e: # EnhancedQuantumLeapGradientBoostPSO print("EnhancedQuantumLeapGradientBoostPSO can not be imported: ", e) -try: +try: # EnhancedQuantumLeapPSO from nevergrad.optimization.lama.EnhancedQuantumLeapPSO import EnhancedQuantumLeapPSO lama_register["EnhancedQuantumLeapPSO"] = EnhancedQuantumLeapPSO - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLeapPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO").set_name("LLAMAEnhancedQuantumLeapPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLeapPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumLeapPSO").set_name( + "LLAMAEnhancedQuantumLeapPSO", register=True + ) +except Exception as e: # EnhancedQuantumLeapPSO print("EnhancedQuantumLeapPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialDynamicOptimizer import EnhancedQuantumLevyDifferentialDynamicOptimizer - - lama_register["EnhancedQuantumLevyDifferentialDynamicOptimizer"] = EnhancedQuantumLevyDifferentialDynamicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer").set_name("LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer", register=True) -except Exception as e: +try: # EnhancedQuantumLevyDifferentialDynamicOptimizer + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialDynamicOptimizer import ( + EnhancedQuantumLevyDifferentialDynamicOptimizer, + ) + + lama_register["EnhancedQuantumLevyDifferentialDynamicOptimizer"] = ( + EnhancedQuantumLevyDifferentialDynamicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialDynamicOptimizer", register=True) +except Exception as e: # EnhancedQuantumLevyDifferentialDynamicOptimizer print("EnhancedQuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialOptimizer import EnhancedQuantumLevyDifferentialOptimizer +try: # EnhancedQuantumLevyDifferentialOptimizer + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialOptimizer import ( + EnhancedQuantumLevyDifferentialOptimizer, + ) lama_register["EnhancedQuantumLevyDifferentialOptimizer"] = EnhancedQuantumLevyDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLevyDifferentialOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialOptimizer").set_name("LLAMAEnhancedQuantumLevyDifferentialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLevyDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialOptimizer", register=True) +except Exception as e: # EnhancedQuantumLevyDifferentialOptimizer print("EnhancedQuantumLevyDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialSearch import EnhancedQuantumLevyDifferentialSearch +try: # EnhancedQuantumLevyDifferentialSearch + from nevergrad.optimization.lama.EnhancedQuantumLevyDifferentialSearch import ( + EnhancedQuantumLevyDifferentialSearch, + ) lama_register["EnhancedQuantumLevyDifferentialSearch"] = EnhancedQuantumLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialSearch").set_name("LLAMAEnhancedQuantumLevyDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyDifferentialSearch" + ).set_name("LLAMAEnhancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: # EnhancedQuantumLevyDifferentialSearch print("EnhancedQuantumLevyDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLevyMemeticOptimizer import EnhancedQuantumLevyMemeticOptimizer +try: # EnhancedQuantumLevyMemeticOptimizer + from nevergrad.optimization.lama.EnhancedQuantumLevyMemeticOptimizer import ( + EnhancedQuantumLevyMemeticOptimizer, + ) lama_register["EnhancedQuantumLevyMemeticOptimizer"] = EnhancedQuantumLevyMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLevyMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyMemeticOptimizer").set_name("LLAMAEnhancedQuantumLevyMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLevyMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumLevyMemeticOptimizer", register=True) +except Exception as e: # EnhancedQuantumLevyMemeticOptimizer print("EnhancedQuantumLevyMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLevyParticleOptimization import EnhancedQuantumLevyParticleOptimization +try: # EnhancedQuantumLevyParticleOptimization + from nevergrad.optimization.lama.EnhancedQuantumLevyParticleOptimization import ( + EnhancedQuantumLevyParticleOptimization, + ) lama_register["EnhancedQuantumLevyParticleOptimization"] = EnhancedQuantumLevyParticleOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyParticleOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLevyParticleOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyParticleOptimization").set_name("LLAMAEnhancedQuantumLevyParticleOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLevyParticleOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLevyParticleOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLevyParticleOptimization" + ).set_name("LLAMAEnhancedQuantumLevyParticleOptimization", register=True) +except Exception as e: # EnhancedQuantumLevyParticleOptimization print("EnhancedQuantumLevyParticleOptimization can not be imported: ", e) -try: +try: # EnhancedQuantumLocalSearch from nevergrad.optimization.lama.EnhancedQuantumLocalSearch import EnhancedQuantumLocalSearch lama_register["EnhancedQuantumLocalSearch"] = EnhancedQuantumLocalSearch - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch").set_name("LLAMAEnhancedQuantumLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLocalSearch = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearch").set_name( + "LLAMAEnhancedQuantumLocalSearch", register=True + ) +except Exception as e: # EnhancedQuantumLocalSearch print("EnhancedQuantumLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumLocalSearchImproved import EnhancedQuantumLocalSearchImproved +try: # EnhancedQuantumLocalSearchImproved + from nevergrad.optimization.lama.EnhancedQuantumLocalSearchImproved import ( + EnhancedQuantumLocalSearchImproved, + ) lama_register["EnhancedQuantumLocalSearchImproved"] = EnhancedQuantumLocalSearchImproved - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearchImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumLocalSearchImproved = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearchImproved").set_name("LLAMAEnhancedQuantumLocalSearchImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumLocalSearchImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumLocalSearchImproved = NonObjectOptimizer( + method="LLAMAEnhancedQuantumLocalSearchImproved" + ).set_name("LLAMAEnhancedQuantumLocalSearchImproved", register=True) +except Exception as e: # EnhancedQuantumLocalSearchImproved print("EnhancedQuantumLocalSearchImproved can not be imported: ", e) -try: +try: # EnhancedQuantumMemeticOptimizer from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizer import EnhancedQuantumMemeticOptimizer lama_register["EnhancedQuantumMemeticOptimizer"] = EnhancedQuantumMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizer").set_name("LLAMAEnhancedQuantumMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMemeticOptimizer" + ).set_name("LLAMAEnhancedQuantumMemeticOptimizer", register=True) +except Exception as e: # EnhancedQuantumMemeticOptimizer print("EnhancedQuantumMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizerV5 import EnhancedQuantumMemeticOptimizerV5 +try: # EnhancedQuantumMemeticOptimizerV5 + from nevergrad.optimization.lama.EnhancedQuantumMemeticOptimizerV5 import ( + EnhancedQuantumMemeticOptimizerV5, + ) lama_register["EnhancedQuantumMemeticOptimizerV5"] = EnhancedQuantumMemeticOptimizerV5 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumMemeticOptimizerV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizerV5").set_name("LLAMAEnhancedQuantumMemeticOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMemeticOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumMemeticOptimizerV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMemeticOptimizerV5" + ).set_name("LLAMAEnhancedQuantumMemeticOptimizerV5", register=True) +except Exception as e: # EnhancedQuantumMemeticOptimizerV5 print("EnhancedQuantumMemeticOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumMultiPhaseAdaptiveDE_v10 import EnhancedQuantumMultiPhaseAdaptiveDE_v10 +try: # EnhancedQuantumMultiPhaseAdaptiveDE_v10 + from nevergrad.optimization.lama.EnhancedQuantumMultiPhaseAdaptiveDE_v10 import ( + EnhancedQuantumMultiPhaseAdaptiveDE_v10, + ) lama_register["EnhancedQuantumMultiPhaseAdaptiveDE_v10"] = EnhancedQuantumMultiPhaseAdaptiveDE_v10 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10").set_name("LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10" + ).set_name("LLAMAEnhancedQuantumMultiPhaseAdaptiveDE_v10", register=True) +except Exception as e: # EnhancedQuantumMultiPhaseAdaptiveDE_v10 print("EnhancedQuantumMultiPhaseAdaptiveDE_v10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumMultiStrategyOptimization_v2 import EnhancedQuantumMultiStrategyOptimization_v2 +try: # EnhancedQuantumMultiStrategyOptimization_v2 + from nevergrad.optimization.lama.EnhancedQuantumMultiStrategyOptimization_v2 import ( + EnhancedQuantumMultiStrategyOptimization_v2, + ) lama_register["EnhancedQuantumMultiStrategyOptimization_v2"] = EnhancedQuantumMultiStrategyOptimization_v2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumMultiStrategyOptimization_v2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2").set_name("LLAMAEnhancedQuantumMultiStrategyOptimization_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumMultiStrategyOptimization_v2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumMultiStrategyOptimization_v2" + ).set_name("LLAMAEnhancedQuantumMultiStrategyOptimization_v2", register=True) +except Exception as e: # EnhancedQuantumMultiStrategyOptimization_v2 print("EnhancedQuantumMultiStrategyOptimization_v2 can not be imported: ", e) -try: +try: # EnhancedQuantumPSO from nevergrad.optimization.lama.EnhancedQuantumPSO import EnhancedQuantumPSO lama_register["EnhancedQuantumPSO"] = EnhancedQuantumPSO - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO").set_name("LLAMAEnhancedQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumPSO = NonObjectOptimizer(method="LLAMAEnhancedQuantumPSO").set_name( + "LLAMAEnhancedQuantumPSO", register=True + ) +except Exception as e: # EnhancedQuantumPSO print("EnhancedQuantumPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumReactiveCooperativeStrategy import EnhancedQuantumReactiveCooperativeStrategy +try: # EnhancedQuantumReactiveCooperativeStrategy + from nevergrad.optimization.lama.EnhancedQuantumReactiveCooperativeStrategy import ( + EnhancedQuantumReactiveCooperativeStrategy, + ) lama_register["EnhancedQuantumReactiveCooperativeStrategy"] = EnhancedQuantumReactiveCooperativeStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumReactiveCooperativeStrategy = NonObjectOptimizer(method="LLAMAEnhancedQuantumReactiveCooperativeStrategy").set_name("LLAMAEnhancedQuantumReactiveCooperativeStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumReactiveCooperativeStrategy = NonObjectOptimizer( + method="LLAMAEnhancedQuantumReactiveCooperativeStrategy" + ).set_name("LLAMAEnhancedQuantumReactiveCooperativeStrategy", register=True) +except Exception as e: # EnhancedQuantumReactiveCooperativeStrategy print("EnhancedQuantumReactiveCooperativeStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumReinforcedNesterovAcceleratorV2 import EnhancedQuantumReinforcedNesterovAcceleratorV2 - - lama_register["EnhancedQuantumReinforcedNesterovAcceleratorV2"] = EnhancedQuantumReinforcedNesterovAcceleratorV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2").set_name("LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2", register=True) -except Exception as e: +try: # EnhancedQuantumReinforcedNesterovAcceleratorV2 + from nevergrad.optimization.lama.EnhancedQuantumReinforcedNesterovAcceleratorV2 import ( + EnhancedQuantumReinforcedNesterovAcceleratorV2, + ) + + lama_register["EnhancedQuantumReinforcedNesterovAcceleratorV2"] = ( + EnhancedQuantumReinforcedNesterovAcceleratorV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2" + ).set_name("LLAMAEnhancedQuantumReinforcedNesterovAcceleratorV2", register=True) +except Exception as e: # EnhancedQuantumReinforcedNesterovAcceleratorV2 print("EnhancedQuantumReinforcedNesterovAcceleratorV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumResilientCrossoverStrategyV2 import EnhancedQuantumResilientCrossoverStrategyV2 +try: # EnhancedQuantumResilientCrossoverStrategyV2 + from nevergrad.optimization.lama.EnhancedQuantumResilientCrossoverStrategyV2 import ( + EnhancedQuantumResilientCrossoverStrategyV2, + ) lama_register["EnhancedQuantumResilientCrossoverStrategyV2"] = EnhancedQuantumResilientCrossoverStrategyV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumResilientCrossoverStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2").set_name("LLAMAEnhancedQuantumResilientCrossoverStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumResilientCrossoverStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumResilientCrossoverStrategyV2" + ).set_name("LLAMAEnhancedQuantumResilientCrossoverStrategyV2", register=True) +except Exception as e: # EnhancedQuantumResilientCrossoverStrategyV2 print("EnhancedQuantumResilientCrossoverStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealing import EnhancedQuantumSimulatedAnnealing +try: # EnhancedQuantumSimulatedAnnealing + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealing import ( + EnhancedQuantumSimulatedAnnealing, + ) lama_register["EnhancedQuantumSimulatedAnnealing"] = EnhancedQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealing").set_name("LLAMAEnhancedQuantumSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealing" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealing", register=True) +except Exception as e: # EnhancedQuantumSimulatedAnnealing print("EnhancedQuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingImproved import EnhancedQuantumSimulatedAnnealingImproved +try: # EnhancedQuantumSimulatedAnnealingImproved + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingImproved import ( + EnhancedQuantumSimulatedAnnealingImproved, + ) lama_register["EnhancedQuantumSimulatedAnnealingImproved"] = EnhancedQuantumSimulatedAnnealingImproved - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSimulatedAnnealingImproved = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingImproved").set_name("LLAMAEnhancedQuantumSimulatedAnnealingImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSimulatedAnnealingImproved = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingImproved" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingImproved", register=True) +except Exception as e: # EnhancedQuantumSimulatedAnnealingImproved print("EnhancedQuantumSimulatedAnnealingImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingOptimized import EnhancedQuantumSimulatedAnnealingOptimized +try: # EnhancedQuantumSimulatedAnnealingOptimized + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingOptimized import ( + EnhancedQuantumSimulatedAnnealingOptimized, + ) lama_register["EnhancedQuantumSimulatedAnnealingOptimized"] = EnhancedQuantumSimulatedAnnealingOptimized - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSimulatedAnnealingOptimized = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized").set_name("LLAMAEnhancedQuantumSimulatedAnnealingOptimized", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSimulatedAnnealingOptimized = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingOptimized" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingOptimized", register=True) +except Exception as e: # EnhancedQuantumSimulatedAnnealingOptimized print("EnhancedQuantumSimulatedAnnealingOptimized can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingV2 import EnhancedQuantumSimulatedAnnealingV2 +try: # EnhancedQuantumSimulatedAnnealingV2 + from nevergrad.optimization.lama.EnhancedQuantumSimulatedAnnealingV2 import ( + EnhancedQuantumSimulatedAnnealingV2, + ) lama_register["EnhancedQuantumSimulatedAnnealingV2"] = EnhancedQuantumSimulatedAnnealingV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSimulatedAnnealingV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingV2").set_name("LLAMAEnhancedQuantumSimulatedAnnealingV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSimulatedAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSimulatedAnnealingV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSimulatedAnnealingV2" + ).set_name("LLAMAEnhancedQuantumSimulatedAnnealingV2", register=True) +except Exception as e: # EnhancedQuantumSimulatedAnnealingV2 print("EnhancedQuantumSimulatedAnnealingV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumStateConvergenceOptimizer import EnhancedQuantumStateConvergenceOptimizer +try: # EnhancedQuantumStateConvergenceOptimizer + from nevergrad.optimization.lama.EnhancedQuantumStateConvergenceOptimizer import ( + EnhancedQuantumStateConvergenceOptimizer, + ) lama_register["EnhancedQuantumStateConvergenceOptimizer"] = EnhancedQuantumStateConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumStateConvergenceOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumStateConvergenceOptimizer").set_name("LLAMAEnhancedQuantumStateConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumStateConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumStateConvergenceOptimizer" + ).set_name("LLAMAEnhancedQuantumStateConvergenceOptimizer", register=True) +except Exception as e: # EnhancedQuantumStateConvergenceOptimizer print("EnhancedQuantumStateConvergenceOptimizer can not be imported: ", e) -try: +try: # EnhancedQuantumSwarmOptimization from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimization import EnhancedQuantumSwarmOptimization lama_register["EnhancedQuantumSwarmOptimization"] = EnhancedQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimization").set_name("LLAMAEnhancedQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimization print("EnhancedQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationRefined import EnhancedQuantumSwarmOptimizationRefined +try: # EnhancedQuantumSwarmOptimizationRefined + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationRefined import ( + EnhancedQuantumSwarmOptimizationRefined, + ) lama_register["EnhancedQuantumSwarmOptimizationRefined"] = EnhancedQuantumSwarmOptimizationRefined - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationRefined = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationRefined").set_name("LLAMAEnhancedQuantumSwarmOptimizationRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationRefined = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationRefined" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationRefined", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationRefined print("EnhancedQuantumSwarmOptimizationRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV10 import EnhancedQuantumSwarmOptimizationV10 +try: # EnhancedQuantumSwarmOptimizationV10 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV10 import ( + EnhancedQuantumSwarmOptimizationV10, + ) lama_register["EnhancedQuantumSwarmOptimizationV10"] = EnhancedQuantumSwarmOptimizationV10 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV10 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV10").set_name("LLAMAEnhancedQuantumSwarmOptimizationV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV10 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV10" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV10", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV10 print("EnhancedQuantumSwarmOptimizationV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV11 import EnhancedQuantumSwarmOptimizationV11 +try: # EnhancedQuantumSwarmOptimizationV11 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV11 import ( + EnhancedQuantumSwarmOptimizationV11, + ) lama_register["EnhancedQuantumSwarmOptimizationV11"] = EnhancedQuantumSwarmOptimizationV11 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV11 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV11").set_name("LLAMAEnhancedQuantumSwarmOptimizationV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV11 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV11" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV11", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV11 print("EnhancedQuantumSwarmOptimizationV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV12 import EnhancedQuantumSwarmOptimizationV12 +try: # EnhancedQuantumSwarmOptimizationV12 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV12 import ( + EnhancedQuantumSwarmOptimizationV12, + ) lama_register["EnhancedQuantumSwarmOptimizationV12"] = EnhancedQuantumSwarmOptimizationV12 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV12 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV12").set_name("LLAMAEnhancedQuantumSwarmOptimizationV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV12 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV12" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV12", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV12 print("EnhancedQuantumSwarmOptimizationV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV13 import EnhancedQuantumSwarmOptimizationV13 +try: # EnhancedQuantumSwarmOptimizationV13 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV13 import ( + EnhancedQuantumSwarmOptimizationV13, + ) lama_register["EnhancedQuantumSwarmOptimizationV13"] = EnhancedQuantumSwarmOptimizationV13 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV13 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV13").set_name("LLAMAEnhancedQuantumSwarmOptimizationV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV13 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV13" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV13", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV13 print("EnhancedQuantumSwarmOptimizationV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV2 import EnhancedQuantumSwarmOptimizationV2 +try: # EnhancedQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV2 import ( + EnhancedQuantumSwarmOptimizationV2, + ) lama_register["EnhancedQuantumSwarmOptimizationV2"] = EnhancedQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedQuantumSwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV2 print("EnhancedQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV3 import EnhancedQuantumSwarmOptimizationV3 +try: # EnhancedQuantumSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV3 import ( + EnhancedQuantumSwarmOptimizationV3, + ) lama_register["EnhancedQuantumSwarmOptimizationV3"] = EnhancedQuantumSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedQuantumSwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV3 print("EnhancedQuantumSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV4 import EnhancedQuantumSwarmOptimizationV4 +try: # EnhancedQuantumSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV4 import ( + EnhancedQuantumSwarmOptimizationV4, + ) lama_register["EnhancedQuantumSwarmOptimizationV4"] = EnhancedQuantumSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedQuantumSwarmOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV4 print("EnhancedQuantumSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV5 import EnhancedQuantumSwarmOptimizationV5 +try: # EnhancedQuantumSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV5 import ( + EnhancedQuantumSwarmOptimizationV5, + ) lama_register["EnhancedQuantumSwarmOptimizationV5"] = EnhancedQuantumSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedQuantumSwarmOptimizationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV5 print("EnhancedQuantumSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV6 import EnhancedQuantumSwarmOptimizationV6 +try: # EnhancedQuantumSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV6 import ( + EnhancedQuantumSwarmOptimizationV6, + ) lama_register["EnhancedQuantumSwarmOptimizationV6"] = EnhancedQuantumSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedQuantumSwarmOptimizationV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV6 print("EnhancedQuantumSwarmOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV7 import EnhancedQuantumSwarmOptimizationV7 +try: # EnhancedQuantumSwarmOptimizationV7 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV7 import ( + EnhancedQuantumSwarmOptimizationV7, + ) lama_register["EnhancedQuantumSwarmOptimizationV7"] = EnhancedQuantumSwarmOptimizationV7 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV7 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV7").set_name("LLAMAEnhancedQuantumSwarmOptimizationV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV7 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV7" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV7", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV7 print("EnhancedQuantumSwarmOptimizationV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV8 import EnhancedQuantumSwarmOptimizationV8 +try: # EnhancedQuantumSwarmOptimizationV8 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV8 import ( + EnhancedQuantumSwarmOptimizationV8, + ) lama_register["EnhancedQuantumSwarmOptimizationV8"] = EnhancedQuantumSwarmOptimizationV8 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV8 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV8").set_name("LLAMAEnhancedQuantumSwarmOptimizationV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV8 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV8" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV8", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV8 print("EnhancedQuantumSwarmOptimizationV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV9 import EnhancedQuantumSwarmOptimizationV9 +try: # EnhancedQuantumSwarmOptimizationV9 + from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizationV9 import ( + EnhancedQuantumSwarmOptimizationV9, + ) lama_register["EnhancedQuantumSwarmOptimizationV9"] = EnhancedQuantumSwarmOptimizationV9 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizationV9 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV9").set_name("LLAMAEnhancedQuantumSwarmOptimizationV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizationV9 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizationV9" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizationV9", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizationV9 print("EnhancedQuantumSwarmOptimizationV9 can not be imported: ", e) -try: +try: # EnhancedQuantumSwarmOptimizerV4 from nevergrad.optimization.lama.EnhancedQuantumSwarmOptimizerV4 import EnhancedQuantumSwarmOptimizerV4 lama_register["EnhancedQuantumSwarmOptimizerV4"] = EnhancedQuantumSwarmOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSwarmOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizerV4").set_name("LLAMAEnhancedQuantumSwarmOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSwarmOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSwarmOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSwarmOptimizerV4" + ).set_name("LLAMAEnhancedQuantumSwarmOptimizerV4", register=True) +except Exception as e: # EnhancedQuantumSwarmOptimizerV4 print("EnhancedQuantumSwarmOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumSymbioticStrategyV5 import EnhancedQuantumSymbioticStrategyV5 +try: # EnhancedQuantumSymbioticStrategyV5 + from nevergrad.optimization.lama.EnhancedQuantumSymbioticStrategyV5 import ( + EnhancedQuantumSymbioticStrategyV5, + ) lama_register["EnhancedQuantumSymbioticStrategyV5"] = EnhancedQuantumSymbioticStrategyV5 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSymbioticStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSymbioticStrategyV5 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSymbioticStrategyV5").set_name("LLAMAEnhancedQuantumSymbioticStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSymbioticStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSymbioticStrategyV5 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSymbioticStrategyV5" + ).set_name("LLAMAEnhancedQuantumSymbioticStrategyV5", register=True) +except Exception as e: # EnhancedQuantumSymbioticStrategyV5 print("EnhancedQuantumSymbioticStrategyV5 can not be imported: ", e) -try: +try: # EnhancedQuantumSynergyStrategyV2 from nevergrad.optimization.lama.EnhancedQuantumSynergyStrategyV2 import EnhancedQuantumSynergyStrategyV2 lama_register["EnhancedQuantumSynergyStrategyV2"] = EnhancedQuantumSynergyStrategyV2 - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSynergyStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumSynergyStrategyV2 = NonObjectOptimizer(method="LLAMAEnhancedQuantumSynergyStrategyV2").set_name("LLAMAEnhancedQuantumSynergyStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumSynergyStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumSynergyStrategyV2 = NonObjectOptimizer( + method="LLAMAEnhancedQuantumSynergyStrategyV2" + ).set_name("LLAMAEnhancedQuantumSynergyStrategyV2", register=True) +except Exception as e: # EnhancedQuantumSynergyStrategyV2 print("EnhancedQuantumSynergyStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedQuantumTunnelingOptimizer import EnhancedQuantumTunnelingOptimizer +try: # EnhancedQuantumTunnelingOptimizer + from nevergrad.optimization.lama.EnhancedQuantumTunnelingOptimizer import ( + EnhancedQuantumTunnelingOptimizer, + ) lama_register["EnhancedQuantumTunnelingOptimizer"] = EnhancedQuantumTunnelingOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAEnhancedQuantumTunnelingOptimizer").set_name("LLAMAEnhancedQuantumTunnelingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedQuantumTunnelingOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedQuantumTunnelingOptimizer" + ).set_name("LLAMAEnhancedQuantumTunnelingOptimizer", register=True) +except Exception as e: # EnhancedQuantumTunnelingOptimizer print("EnhancedQuantumTunnelingOptimizer can not be imported: ", e) -try: +try: # EnhancedRAMEDS from nevergrad.optimization.lama.EnhancedRAMEDS import EnhancedRAMEDS lama_register["EnhancedRAMEDS"] = EnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS").set_name("LLAMAEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedRAMEDS").set_name( + "LLAMAEnhancedRAMEDS", register=True + ) +except Exception as e: # EnhancedRAMEDS print("EnhancedRAMEDS can not be imported: ", e) -try: +try: # EnhancedRAMEDSPro from nevergrad.optimization.lama.EnhancedRAMEDSPro import EnhancedRAMEDSPro lama_register["EnhancedRAMEDSPro"] = EnhancedRAMEDSPro - res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRAMEDSPro = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro").set_name("LLAMAEnhancedRAMEDSPro", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRAMEDSPro = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSPro").set_name( + "LLAMAEnhancedRAMEDSPro", register=True + ) +except Exception as e: # EnhancedRAMEDSPro print("EnhancedRAMEDSPro can not be imported: ", e) -try: +try: # EnhancedRAMEDSProV2 from nevergrad.optimization.lama.EnhancedRAMEDSProV2 import EnhancedRAMEDSProV2 lama_register["EnhancedRAMEDSProV2"] = EnhancedRAMEDSProV2 - res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRAMEDSProV2 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2").set_name("LLAMAEnhancedRAMEDSProV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRAMEDSProV2 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSProV2").set_name( + "LLAMAEnhancedRAMEDSProV2", register=True + ) +except Exception as e: # EnhancedRAMEDSProV2 print("EnhancedRAMEDSProV2 can not be imported: ", e) -try: +try: # EnhancedRAMEDSv3 from nevergrad.optimization.lama.EnhancedRAMEDSv3 import EnhancedRAMEDSv3 lama_register["EnhancedRAMEDSv3"] = EnhancedRAMEDSv3 - res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3").set_name("LLAMAEnhancedRAMEDSv3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv3").set_name( + "LLAMAEnhancedRAMEDSv3", register=True + ) +except Exception as e: # EnhancedRAMEDSv3 print("EnhancedRAMEDSv3 can not be imported: ", e) -try: +try: # EnhancedRAMEDSv4 from nevergrad.optimization.lama.EnhancedRAMEDSv4 import EnhancedRAMEDSv4 lama_register["EnhancedRAMEDSv4"] = EnhancedRAMEDSv4 - res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4").set_name("LLAMAEnhancedRAMEDSv4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMAEnhancedRAMEDSv4").set_name( + "LLAMAEnhancedRAMEDSv4", register=True + ) +except Exception as e: # EnhancedRAMEDSv4 print("EnhancedRAMEDSv4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolution import EnhancedRefinedAdaptiveCovarianceMatrixEvolution - - lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolution"] = EnhancedRefinedAdaptiveCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution").set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveCovarianceMatrixEvolution + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolution import ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolution"] = ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: # EnhancedRefinedAdaptiveCovarianceMatrixEvolution print("EnhancedRefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus import EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus - - lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus"] = EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus").set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus import ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus, + ) + + lama_register["EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus"] = ( + EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus" + ).set_name("LLAMAEnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus", register=True) +except Exception as e: # EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus print("EnhancedRefinedAdaptiveCovarianceMatrixEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost import EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost - - lama_register["EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( + EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost print("EnhancedRefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSearch import EnhancedRefinedAdaptiveDifferentialSearch +try: # EnhancedRefinedAdaptiveDifferentialSearch + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSearch import ( + EnhancedRefinedAdaptiveDifferentialSearch, + ) lama_register["EnhancedRefinedAdaptiveDifferentialSearch"] = EnhancedRefinedAdaptiveDifferentialSearch - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSearch", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDifferentialSearch print("EnhancedRefinedAdaptiveDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSpiralSearch import EnhancedRefinedAdaptiveDifferentialSpiralSearch - - lama_register["EnhancedRefinedAdaptiveDifferentialSpiralSearch"] = EnhancedRefinedAdaptiveDifferentialSpiralSearch - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch").set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveDifferentialSpiralSearch + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDifferentialSpiralSearch import ( + EnhancedRefinedAdaptiveDifferentialSpiralSearch, + ) + + lama_register["EnhancedRefinedAdaptiveDifferentialSpiralSearch"] = ( + EnhancedRefinedAdaptiveDifferentialSpiralSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDifferentialSpiralSearch print("EnhancedRefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveDynamicDE from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDE import EnhancedRefinedAdaptiveDynamicDE lama_register["EnhancedRefinedAdaptiveDynamicDE"] = EnhancedRefinedAdaptiveDynamicDE - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDE").set_name("LLAMAEnhancedRefinedAdaptiveDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDynamicDE = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicDE" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDE", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDynamicDE print("EnhancedRefinedAdaptiveDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 import EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 - - lama_register["EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15"] = EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15").set_name("LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 import ( + EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15"] = ( + EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 print("EnhancedRefinedAdaptiveDynamicDualPhaseStrategyV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicExplorationOptimization import EnhancedRefinedAdaptiveDynamicExplorationOptimization - - lama_register["EnhancedRefinedAdaptiveDynamicExplorationOptimization"] = EnhancedRefinedAdaptiveDynamicExplorationOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveDynamicExplorationOptimization + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicExplorationOptimization import ( + EnhancedRefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicExplorationOptimization"] = ( + EnhancedRefinedAdaptiveDynamicExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDynamicExplorationOptimization print("EnhancedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution import EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution print("EnhancedRefinedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveFocusedEvolutionStrategy import EnhancedRefinedAdaptiveFocusedEvolutionStrategy - - lama_register["EnhancedRefinedAdaptiveFocusedEvolutionStrategy"] = EnhancedRefinedAdaptiveFocusedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy").set_name("LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveFocusedEvolutionStrategy + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveFocusedEvolutionStrategy import ( + EnhancedRefinedAdaptiveFocusedEvolutionStrategy, + ) + + lama_register["EnhancedRefinedAdaptiveFocusedEvolutionStrategy"] = ( + EnhancedRefinedAdaptiveFocusedEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy" + ).set_name("LLAMAEnhancedRefinedAdaptiveFocusedEvolutionStrategy", register=True) +except Exception as e: # EnhancedRefinedAdaptiveFocusedEvolutionStrategy print("EnhancedRefinedAdaptiveFocusedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveHarmonySearch import EnhancedRefinedAdaptiveHarmonySearch +try: # EnhancedRefinedAdaptiveHarmonySearch + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveHarmonySearch import ( + EnhancedRefinedAdaptiveHarmonySearch, + ) lama_register["EnhancedRefinedAdaptiveHarmonySearch"] = EnhancedRefinedAdaptiveHarmonySearch - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveHarmonySearch").set_name("LLAMAEnhancedRefinedAdaptiveHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveHarmonySearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveHarmonySearch", register=True) +except Exception as e: # EnhancedRefinedAdaptiveHarmonySearch print("EnhancedRefinedAdaptiveHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMemeticDiverseOptimizer import EnhancedRefinedAdaptiveMemeticDiverseOptimizer - - lama_register["EnhancedRefinedAdaptiveMemeticDiverseOptimizer"] = EnhancedRefinedAdaptiveMemeticDiverseOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer").set_name("LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveMemeticDiverseOptimizer + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMemeticDiverseOptimizer import ( + EnhancedRefinedAdaptiveMemeticDiverseOptimizer, + ) + + lama_register["EnhancedRefinedAdaptiveMemeticDiverseOptimizer"] = ( + EnhancedRefinedAdaptiveMemeticDiverseOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMAEnhancedRefinedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: # EnhancedRefinedAdaptiveMemeticDiverseOptimizer print("EnhancedRefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v4 import EnhancedRefinedAdaptiveMetaNetPSO_v4 +try: # EnhancedRefinedAdaptiveMetaNetPSO_v4 + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v4 import ( + EnhancedRefinedAdaptiveMetaNetPSO_v4, + ) lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v4"] = EnhancedRefinedAdaptiveMetaNetPSO_v4 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4").set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4" + ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v4", register=True) +except Exception as e: # EnhancedRefinedAdaptiveMetaNetPSO_v4 print("EnhancedRefinedAdaptiveMetaNetPSO_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v5 import EnhancedRefinedAdaptiveMetaNetPSO_v5 +try: # EnhancedRefinedAdaptiveMetaNetPSO_v5 + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveMetaNetPSO_v5 import ( + EnhancedRefinedAdaptiveMetaNetPSO_v5, + ) lama_register["EnhancedRefinedAdaptiveMetaNetPSO_v5"] = EnhancedRefinedAdaptiveMetaNetPSO_v5 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5").set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5" + ).set_name("LLAMAEnhancedRefinedAdaptiveMetaNetPSO_v5", register=True) +except Exception as e: # EnhancedRefinedAdaptiveMetaNetPSO_v5 print("EnhancedRefinedAdaptiveMetaNetPSO_v5 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v49 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v49 import EnhancedRefinedAdaptiveQGSA_v49 lama_register["EnhancedRefinedAdaptiveQGSA_v49"] = EnhancedRefinedAdaptiveQGSA_v49 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v49 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v49").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v49", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v49")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v49 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v49" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v49", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v49 print("EnhancedRefinedAdaptiveQGSA_v49 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v52 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v52 import EnhancedRefinedAdaptiveQGSA_v52 lama_register["EnhancedRefinedAdaptiveQGSA_v52"] = EnhancedRefinedAdaptiveQGSA_v52 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v52 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v52").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v52", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v52 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v52" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v52", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v52 print("EnhancedRefinedAdaptiveQGSA_v52 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v53 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v53 import EnhancedRefinedAdaptiveQGSA_v53 lama_register["EnhancedRefinedAdaptiveQGSA_v53"] = EnhancedRefinedAdaptiveQGSA_v53 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v53 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v53").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v53", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v53")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v53 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v53" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v53", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v53 print("EnhancedRefinedAdaptiveQGSA_v53 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v54 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v54 import EnhancedRefinedAdaptiveQGSA_v54 lama_register["EnhancedRefinedAdaptiveQGSA_v54"] = EnhancedRefinedAdaptiveQGSA_v54 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v54 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v54").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v54", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v54")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v54 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v54" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v54", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v54 print("EnhancedRefinedAdaptiveQGSA_v54 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v55 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v55 import EnhancedRefinedAdaptiveQGSA_v55 lama_register["EnhancedRefinedAdaptiveQGSA_v55"] = EnhancedRefinedAdaptiveQGSA_v55 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v55 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v55").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v55", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v55")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v55 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v55" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v55", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v55 print("EnhancedRefinedAdaptiveQGSA_v55 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v56 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v56 import EnhancedRefinedAdaptiveQGSA_v56 lama_register["EnhancedRefinedAdaptiveQGSA_v56"] = EnhancedRefinedAdaptiveQGSA_v56 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v56 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v56").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v56", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v56")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v56 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v56" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v56", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v56 print("EnhancedRefinedAdaptiveQGSA_v56 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v57 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v57 import EnhancedRefinedAdaptiveQGSA_v57 lama_register["EnhancedRefinedAdaptiveQGSA_v57"] = EnhancedRefinedAdaptiveQGSA_v57 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v57 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v57").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v57", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v57")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v57 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v57" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v57", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v57 print("EnhancedRefinedAdaptiveQGSA_v57 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v58 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v58 import EnhancedRefinedAdaptiveQGSA_v58 lama_register["EnhancedRefinedAdaptiveQGSA_v58"] = EnhancedRefinedAdaptiveQGSA_v58 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v58 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v58").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v58", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v58")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v58 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v58" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v58", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v58 print("EnhancedRefinedAdaptiveQGSA_v58 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v59 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v59 import EnhancedRefinedAdaptiveQGSA_v59 lama_register["EnhancedRefinedAdaptiveQGSA_v59"] = EnhancedRefinedAdaptiveQGSA_v59 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v59")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v59 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v59").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v59", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v59")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v59 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v59" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v59", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v59 print("EnhancedRefinedAdaptiveQGSA_v59 can not be imported: ", e) -try: +try: # EnhancedRefinedAdaptiveQGSA_v60 from nevergrad.optimization.lama.EnhancedRefinedAdaptiveQGSA_v60 import EnhancedRefinedAdaptiveQGSA_v60 lama_register["EnhancedRefinedAdaptiveQGSA_v60"] = EnhancedRefinedAdaptiveQGSA_v60 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveQGSA_v60 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v60").set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v60", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveQGSA_v60")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveQGSA_v60 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveQGSA_v60" + ).set_name("LLAMAEnhancedRefinedAdaptiveQGSA_v60", register=True) +except Exception as e: # EnhancedRefinedAdaptiveQGSA_v60 print("EnhancedRefinedAdaptiveQGSA_v60 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSpiralGradientSearch import EnhancedRefinedAdaptiveSpiralGradientSearch +try: # EnhancedRefinedAdaptiveSpiralGradientSearch + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSpiralGradientSearch import ( + EnhancedRefinedAdaptiveSpiralGradientSearch, + ) lama_register["EnhancedRefinedAdaptiveSpiralGradientSearch"] = EnhancedRefinedAdaptiveSpiralGradientSearch - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch").set_name("LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch" + ).set_name("LLAMAEnhancedRefinedAdaptiveSpiralGradientSearch", register=True) +except Exception as e: # EnhancedRefinedAdaptiveSpiralGradientSearch print("EnhancedRefinedAdaptiveSpiralGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 import EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 - - lama_register["EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3"] = EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3").set_name("LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3", register=True) -except Exception as e: +try: # EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 + from nevergrad.optimization.lama.EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 import ( + EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3, + ) + + lama_register["EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3"] = ( + EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3" + ).set_name("LLAMAEnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3", register=True) +except Exception as e: # EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 print("EnhancedRefinedAdaptiveSuperchargedAQAPSO_LS_DIW_AP_V3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedDualStrategyAdaptiveDE import EnhancedRefinedDualStrategyAdaptiveDE +try: # EnhancedRefinedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.EnhancedRefinedDualStrategyAdaptiveDE import ( + EnhancedRefinedDualStrategyAdaptiveDE, + ) lama_register["EnhancedRefinedDualStrategyAdaptiveDE"] = EnhancedRefinedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE").set_name("LLAMAEnhancedRefinedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDualStrategyAdaptiveDE" + ).set_name("LLAMAEnhancedRefinedDualStrategyAdaptiveDE", register=True) +except Exception as e: # EnhancedRefinedDualStrategyAdaptiveDE print("EnhancedRefinedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedDynamicFireworkAlgorithm import EnhancedRefinedDynamicFireworkAlgorithm +try: # EnhancedRefinedDynamicFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedRefinedDynamicFireworkAlgorithm import ( + EnhancedRefinedDynamicFireworkAlgorithm, + ) lama_register["EnhancedRefinedDynamicFireworkAlgorithm"] = EnhancedRefinedDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm").set_name("LLAMAEnhancedRefinedDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedRefinedDynamicFireworkAlgorithm", register=True) +except Exception as e: # EnhancedRefinedDynamicFireworkAlgorithm print("EnhancedRefinedDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing import EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( + EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing print("EnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer import EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer - - lama_register["EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: +try: # EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer import ( + EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer"] = ( + EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer print("EnhancedRefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedEliteDynamicMemoryHybridOptimizer import EnhancedRefinedEliteDynamicMemoryHybridOptimizer - - lama_register["EnhancedRefinedEliteDynamicMemoryHybridOptimizer"] = EnhancedRefinedEliteDynamicMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer").set_name("LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer", register=True) -except Exception as e: +try: # EnhancedRefinedEliteDynamicMemoryHybridOptimizer + from nevergrad.optimization.lama.EnhancedRefinedEliteDynamicMemoryHybridOptimizer import ( + EnhancedRefinedEliteDynamicMemoryHybridOptimizer, + ) + + lama_register["EnhancedRefinedEliteDynamicMemoryHybridOptimizer"] = ( + EnhancedRefinedEliteDynamicMemoryHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: # EnhancedRefinedEliteDynamicMemoryHybridOptimizer print("EnhancedRefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 import EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 - - lama_register["EnhancedRefinedEvolutionaryGradientHybridOptimizerV4"] = EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4 = NonObjectOptimizer(method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4").set_name("LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4", register=True) -except Exception as e: +try: # EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 + from nevergrad.optimization.lama.EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 import ( + EnhancedRefinedEvolutionaryGradientHybridOptimizerV4, + ) + + lama_register["EnhancedRefinedEvolutionaryGradientHybridOptimizerV4"] = ( + EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4" + ).set_name("LLAMAEnhancedRefinedEvolutionaryGradientHybridOptimizerV4", register=True) +except Exception as e: # EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 print("EnhancedRefinedEvolutionaryGradientHybridOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGradientBoostedMemoryAnnealing import EnhancedRefinedGradientBoostedMemoryAnnealing - - lama_register["EnhancedRefinedGradientBoostedMemoryAnnealing"] = EnhancedRefinedGradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing").set_name("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing", register=True) -except Exception as e: +try: # EnhancedRefinedGradientBoostedMemoryAnnealing + from nevergrad.optimization.lama.EnhancedRefinedGradientBoostedMemoryAnnealing import ( + EnhancedRefinedGradientBoostedMemoryAnnealing, + ) + + lama_register["EnhancedRefinedGradientBoostedMemoryAnnealing"] = ( + EnhancedRefinedGradientBoostedMemoryAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # EnhancedRefinedGradientBoostedMemoryAnnealing print("EnhancedRefinedGradientBoostedMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v88 import EnhancedRefinedGuidedMassQGSA_v88 +try: # EnhancedRefinedGuidedMassQGSA_v88 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v88 import ( + EnhancedRefinedGuidedMassQGSA_v88, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v88"] = EnhancedRefinedGuidedMassQGSA_v88 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v88")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v88 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v88").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v88", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v88")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v88 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v88" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v88", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v88 print("EnhancedRefinedGuidedMassQGSA_v88 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v89 import EnhancedRefinedGuidedMassQGSA_v89 +try: # EnhancedRefinedGuidedMassQGSA_v89 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v89 import ( + EnhancedRefinedGuidedMassQGSA_v89, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v89"] = EnhancedRefinedGuidedMassQGSA_v89 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v89")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v89 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v89").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v89", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v89")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v89 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v89" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v89", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v89 print("EnhancedRefinedGuidedMassQGSA_v89 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v90 import EnhancedRefinedGuidedMassQGSA_v90 +try: # EnhancedRefinedGuidedMassQGSA_v90 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v90 import ( + EnhancedRefinedGuidedMassQGSA_v90, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v90"] = EnhancedRefinedGuidedMassQGSA_v90 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v90")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v90 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v90").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v90", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v90")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v90 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v90" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v90", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v90 print("EnhancedRefinedGuidedMassQGSA_v90 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v91 import EnhancedRefinedGuidedMassQGSA_v91 +try: # EnhancedRefinedGuidedMassQGSA_v91 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v91 import ( + EnhancedRefinedGuidedMassQGSA_v91, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v91"] = EnhancedRefinedGuidedMassQGSA_v91 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v91")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v91 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v91").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v91", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v91")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v91 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v91" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v91", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v91 print("EnhancedRefinedGuidedMassQGSA_v91 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v92 import EnhancedRefinedGuidedMassQGSA_v92 +try: # EnhancedRefinedGuidedMassQGSA_v92 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v92 import ( + EnhancedRefinedGuidedMassQGSA_v92, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v92"] = EnhancedRefinedGuidedMassQGSA_v92 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v92")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v92 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v92").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v92", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v92")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v92 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v92" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v92", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v92 print("EnhancedRefinedGuidedMassQGSA_v92 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v93 import EnhancedRefinedGuidedMassQGSA_v93 +try: # EnhancedRefinedGuidedMassQGSA_v93 + from nevergrad.optimization.lama.EnhancedRefinedGuidedMassQGSA_v93 import ( + EnhancedRefinedGuidedMassQGSA_v93, + ) lama_register["EnhancedRefinedGuidedMassQGSA_v93"] = EnhancedRefinedGuidedMassQGSA_v93 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v93")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedGuidedMassQGSA_v93 = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v93").set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v93", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedGuidedMassQGSA_v93")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedGuidedMassQGSA_v93 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedGuidedMassQGSA_v93" + ).set_name("LLAMAEnhancedRefinedGuidedMassQGSA_v93", register=True) +except Exception as e: # EnhancedRefinedGuidedMassQGSA_v93 print("EnhancedRefinedGuidedMassQGSA_v93 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution import EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution - - lama_register["EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution"] = EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution import ( + EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution"] = ( + EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution print("EnhancedRefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedHybridDEPSOWithDynamicAdaptation import EnhancedRefinedHybridDEPSOWithDynamicAdaptation - - lama_register["EnhancedRefinedHybridDEPSOWithDynamicAdaptation"] = EnhancedRefinedHybridDEPSOWithDynamicAdaptation - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation").set_name("LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", register=True) -except Exception as e: +try: # EnhancedRefinedHybridDEPSOWithDynamicAdaptation + from nevergrad.optimization.lama.EnhancedRefinedHybridDEPSOWithDynamicAdaptation import ( + EnhancedRefinedHybridDEPSOWithDynamicAdaptation, + ) + + lama_register["EnhancedRefinedHybridDEPSOWithDynamicAdaptation"] = ( + EnhancedRefinedHybridDEPSOWithDynamicAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation" + ).set_name("LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", register=True) +except Exception as e: # EnhancedRefinedHybridDEPSOWithDynamicAdaptation print("EnhancedRefinedHybridDEPSOWithDynamicAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution import EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution - - lama_register["EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( + EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMAEnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution print("EnhancedRefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedRefinedHybridOptimizer from nevergrad.optimization.lama.EnhancedRefinedHybridOptimizer import EnhancedRefinedHybridOptimizer lama_register["EnhancedRefinedHybridOptimizer"] = EnhancedRefinedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHybridOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridOptimizer").set_name("LLAMAEnhancedRefinedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHybridOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHybridOptimizer" + ).set_name("LLAMAEnhancedRefinedHybridOptimizer", register=True) +except Exception as e: # EnhancedRefinedHybridOptimizer print("EnhancedRefinedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 import EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 - - lama_register["EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3"] = EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3").set_name("LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3", register=True) -except Exception as e: +try: # EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 + from nevergrad.optimization.lama.EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 import ( + EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3, + ) + + lama_register["EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3"] = ( + EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3" + ).set_name("LLAMAEnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3", register=True) +except Exception as e: # EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 print("EnhancedRefinedHyperAdaptiveSinusoidalDifferentialSwarmV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer import EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer - - lama_register["EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer"] = EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) -except Exception as e: +try: # EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer + from nevergrad.optimization.lama.EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer import ( + EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer, + ) + + lama_register["EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( + EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMAEnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: # EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer print("EnhancedRefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) -try: +try: # EnhancedRefinedMetaNetAQAPSO from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSO import EnhancedRefinedMetaNetAQAPSO lama_register["EnhancedRefinedMetaNetAQAPSO"] = EnhancedRefinedMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSO").set_name("LLAMAEnhancedRefinedMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSO" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSO", register=True) +except Exception as e: # EnhancedRefinedMetaNetAQAPSO print("EnhancedRefinedMetaNetAQAPSO can not be imported: ", e) -try: +try: # EnhancedRefinedMetaNetAQAPSOv8 from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv8 import EnhancedRefinedMetaNetAQAPSOv8 lama_register["EnhancedRefinedMetaNetAQAPSOv8"] = EnhancedRefinedMetaNetAQAPSOv8 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedMetaNetAQAPSOv8 = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv8").set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedMetaNetAQAPSOv8 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSOv8" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv8", register=True) +except Exception as e: # EnhancedRefinedMetaNetAQAPSOv8 print("EnhancedRefinedMetaNetAQAPSOv8 can not be imported: ", e) -try: +try: # EnhancedRefinedMetaNetAQAPSOv9 from nevergrad.optimization.lama.EnhancedRefinedMetaNetAQAPSOv9 import EnhancedRefinedMetaNetAQAPSOv9 lama_register["EnhancedRefinedMetaNetAQAPSOv9"] = EnhancedRefinedMetaNetAQAPSOv9 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedMetaNetAQAPSOv9 = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv9").set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedMetaNetAQAPSOv9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedMetaNetAQAPSOv9 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedMetaNetAQAPSOv9" + ).set_name("LLAMAEnhancedRefinedMetaNetAQAPSOv9", register=True) +except Exception as e: # EnhancedRefinedMetaNetAQAPSOv9 print("EnhancedRefinedMetaNetAQAPSOv9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 import EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 - - lama_register["EnhancedRefinedOptimalDynamicPrecisionOptimizerV16"] = EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16 = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16").set_name("LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16", register=True) -except Exception as e: +try: # EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 + from nevergrad.optimization.lama.EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 import ( + EnhancedRefinedOptimalDynamicPrecisionOptimizerV16, + ) + + lama_register["EnhancedRefinedOptimalDynamicPrecisionOptimizerV16"] = ( + EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16" + ).set_name("LLAMAEnhancedRefinedOptimalDynamicPrecisionOptimizerV16", register=True) +except Exception as e: # EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 print("EnhancedRefinedOptimalDynamicPrecisionOptimizerV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization import EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization - - lama_register["EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization"] = EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: +try: # EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization import ( + EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( + EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAEnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization print("EnhancedRefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: +try: # EnhancedRefinedSpatialOptimizer from nevergrad.optimization.lama.EnhancedRefinedSpatialOptimizer import EnhancedRefinedSpatialOptimizer lama_register["EnhancedRefinedSpatialOptimizer"] = EnhancedRefinedSpatialOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedSpatialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedSpatialOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRefinedSpatialOptimizer").set_name("LLAMAEnhancedRefinedSpatialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedSpatialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedSpatialOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRefinedSpatialOptimizer" + ).set_name("LLAMAEnhancedRefinedSpatialOptimizer", register=True) +except Exception as e: # EnhancedRefinedSpatialOptimizer print("EnhancedRefinedSpatialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 import EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 - - lama_register["EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35"] = EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35").set_name("LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35", register=True) -except Exception as e: +try: # EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 + from nevergrad.optimization.lama.EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 import ( + EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35, + ) + + lama_register["EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35"] = ( + EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35" + ).set_name("LLAMAEnhancedRefinedUltimateEvolutionaryGradientOptimizerV35", register=True) +except Exception as e: # EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 print("EnhancedRefinedUltimateEvolutionaryGradientOptimizerV35 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v72 import EnhancedRefinedUltimateGuidedMassQGSA_v72 +try: # EnhancedRefinedUltimateGuidedMassQGSA_v72 + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v72 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v72, + ) lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v72"] = EnhancedRefinedUltimateGuidedMassQGSA_v72 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v72", register=True) +except Exception as e: # EnhancedRefinedUltimateGuidedMassQGSA_v72 print("EnhancedRefinedUltimateGuidedMassQGSA_v72 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v73 import EnhancedRefinedUltimateGuidedMassQGSA_v73 +try: # EnhancedRefinedUltimateGuidedMassQGSA_v73 + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v73 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v73, + ) lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v73"] = EnhancedRefinedUltimateGuidedMassQGSA_v73 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v73", register=True) +except Exception as e: # EnhancedRefinedUltimateGuidedMassQGSA_v73 print("EnhancedRefinedUltimateGuidedMassQGSA_v73 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v74 import EnhancedRefinedUltimateGuidedMassQGSA_v74 +try: # EnhancedRefinedUltimateGuidedMassQGSA_v74 + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v74 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v74, + ) lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v74"] = EnhancedRefinedUltimateGuidedMassQGSA_v74 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v74", register=True) +except Exception as e: # EnhancedRefinedUltimateGuidedMassQGSA_v74 print("EnhancedRefinedUltimateGuidedMassQGSA_v74 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v76 import EnhancedRefinedUltimateGuidedMassQGSA_v76 +try: # EnhancedRefinedUltimateGuidedMassQGSA_v76 + from nevergrad.optimization.lama.EnhancedRefinedUltimateGuidedMassQGSA_v76 import ( + EnhancedRefinedUltimateGuidedMassQGSA_v76, + ) lama_register["EnhancedRefinedUltimateGuidedMassQGSA_v76"] = EnhancedRefinedUltimateGuidedMassQGSA_v76 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76").set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76" + ).set_name("LLAMAEnhancedRefinedUltimateGuidedMassQGSA_v76", register=True) +except Exception as e: # EnhancedRefinedUltimateGuidedMassQGSA_v76 print("EnhancedRefinedUltimateGuidedMassQGSA_v76 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 import EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 - - lama_register["EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43"] = EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 - res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43").set_name("LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43", register=True) -except Exception as e: +try: # EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 + from nevergrad.optimization.lama.EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 import ( + EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43, + ) + + lama_register["EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43"] = ( + EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( + method="LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43" + ).set_name("LLAMAEnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43", register=True) +except Exception as e: # EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 print("EnhancedRefinedUltimatePrecisionEvolutionaryOptimizerV43 can not be imported: ", e) -try: +try: # EnhancedResilientAdaptivePSO from nevergrad.optimization.lama.EnhancedResilientAdaptivePSO import EnhancedResilientAdaptivePSO lama_register["EnhancedResilientAdaptivePSO"] = EnhancedResilientAdaptivePSO - res = NonObjectOptimizer(method="LLAMAEnhancedResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAEnhancedResilientAdaptivePSO").set_name("LLAMAEnhancedResilientAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedResilientAdaptivePSO = NonObjectOptimizer( + method="LLAMAEnhancedResilientAdaptivePSO" + ).set_name("LLAMAEnhancedResilientAdaptivePSO", register=True) +except Exception as e: # EnhancedResilientAdaptivePSO print("EnhancedResilientAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch import EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch - - lama_register["EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch"] = EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch - res = NonObjectOptimizer(method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch").set_name("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch", register=True) -except Exception as e: +try: # EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch + from nevergrad.optimization.lama.EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch import ( + EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch, + ) + + lama_register["EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch"] = ( + EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch" + ).set_name("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch", register=True) +except Exception as e: # EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch print("EnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) -try: +try: # EnhancedRotationalClimbOptimizer from nevergrad.optimization.lama.EnhancedRotationalClimbOptimizer import EnhancedRotationalClimbOptimizer lama_register["EnhancedRotationalClimbOptimizer"] = EnhancedRotationalClimbOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAEnhancedRotationalClimbOptimizer").set_name("LLAMAEnhancedRotationalClimbOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedRotationalClimbOptimizer" + ).set_name("LLAMAEnhancedRotationalClimbOptimizer", register=True) +except Exception as e: # EnhancedRotationalClimbOptimizer print("EnhancedRotationalClimbOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSelectiveEvolutionaryOptimizerV21 import EnhancedSelectiveEvolutionaryOptimizerV21 +try: # EnhancedSelectiveEvolutionaryOptimizerV21 + from nevergrad.optimization.lama.EnhancedSelectiveEvolutionaryOptimizerV21 import ( + EnhancedSelectiveEvolutionaryOptimizerV21, + ) lama_register["EnhancedSelectiveEvolutionaryOptimizerV21"] = EnhancedSelectiveEvolutionaryOptimizerV21 - res = NonObjectOptimizer(method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSelectiveEvolutionaryOptimizerV21 = NonObjectOptimizer(method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21").set_name("LLAMAEnhancedSelectiveEvolutionaryOptimizerV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSelectiveEvolutionaryOptimizerV21 = NonObjectOptimizer( + method="LLAMAEnhancedSelectiveEvolutionaryOptimizerV21" + ).set_name("LLAMAEnhancedSelectiveEvolutionaryOptimizerV21", register=True) +except Exception as e: # EnhancedSelectiveEvolutionaryOptimizerV21 print("EnhancedSelectiveEvolutionaryOptimizerV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution import EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution - - lama_register["EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution"] = EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution import ( + EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAEnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution print("EnhancedSelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: +try: # EnhancedSelfAdaptiveDE from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE import EnhancedSelfAdaptiveDE lama_register["EnhancedSelfAdaptiveDE"] = EnhancedSelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE").set_name("LLAMAEnhancedSelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSelfAdaptiveDE = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE").set_name( + "LLAMAEnhancedSelfAdaptiveDE", register=True + ) +except Exception as e: # EnhancedSelfAdaptiveDE print("EnhancedSelfAdaptiveDE can not be imported: ", e) -try: +try: # EnhancedSelfAdaptiveDE2 from nevergrad.optimization.lama.EnhancedSelfAdaptiveDE2 import EnhancedSelfAdaptiveDE2 lama_register["EnhancedSelfAdaptiveDE2"] = EnhancedSelfAdaptiveDE2 - res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSelfAdaptiveDE2 = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2").set_name("LLAMAEnhancedSelfAdaptiveDE2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSelfAdaptiveDE2 = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveDE2").set_name( + "LLAMAEnhancedSelfAdaptiveDE2", register=True + ) +except Exception as e: # EnhancedSelfAdaptiveDE2 print("EnhancedSelfAdaptiveDE2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSelfAdaptiveMemeticAlgorithm import EnhancedSelfAdaptiveMemeticAlgorithm +try: # EnhancedSelfAdaptiveMemeticAlgorithm + from nevergrad.optimization.lama.EnhancedSelfAdaptiveMemeticAlgorithm import ( + EnhancedSelfAdaptiveMemeticAlgorithm, + ) lama_register["EnhancedSelfAdaptiveMemeticAlgorithm"] = EnhancedSelfAdaptiveMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSelfAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm").set_name("LLAMAEnhancedSelfAdaptiveMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSelfAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedSelfAdaptiveMemeticAlgorithm" + ).set_name("LLAMAEnhancedSelfAdaptiveMemeticAlgorithm", register=True) +except Exception as e: # EnhancedSelfAdaptiveMemeticAlgorithm print("EnhancedSelfAdaptiveMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSequentialQuadraticAdaptiveEvolutionStrategy import EnhancedSequentialQuadraticAdaptiveEvolutionStrategy - - lama_register["EnhancedSequentialQuadraticAdaptiveEvolutionStrategy"] = EnhancedSequentialQuadraticAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy").set_name("LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy", register=True) -except Exception as e: +try: # EnhancedSequentialQuadraticAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.EnhancedSequentialQuadraticAdaptiveEvolutionStrategy import ( + EnhancedSequentialQuadraticAdaptiveEvolutionStrategy, + ) + + lama_register["EnhancedSequentialQuadraticAdaptiveEvolutionStrategy"] = ( + EnhancedSequentialQuadraticAdaptiveEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMAEnhancedSequentialQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: # EnhancedSequentialQuadraticAdaptiveEvolutionStrategy print("EnhancedSequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) -try: +try: # EnhancedSpatialAdaptiveEvolver from nevergrad.optimization.lama.EnhancedSpatialAdaptiveEvolver import EnhancedSpatialAdaptiveEvolver lama_register["EnhancedSpatialAdaptiveEvolver"] = EnhancedSpatialAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveEvolver").set_name("LLAMAEnhancedSpatialAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMAEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: # EnhancedSpatialAdaptiveEvolver print("EnhancedSpatialAdaptiveEvolver can not be imported: ", e) -try: +try: # EnhancedSpatialAdaptiveOptimizer from nevergrad.optimization.lama.EnhancedSpatialAdaptiveOptimizer import EnhancedSpatialAdaptiveOptimizer lama_register["EnhancedSpatialAdaptiveOptimizer"] = EnhancedSpatialAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSpatialAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveOptimizer").set_name("LLAMAEnhancedSpatialAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSpatialAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedSpatialAdaptiveOptimizer" + ).set_name("LLAMAEnhancedSpatialAdaptiveOptimizer", register=True) +except Exception as e: # EnhancedSpatialAdaptiveOptimizer print("EnhancedSpatialAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSpectralHybridOptimization import EnhancedSpectralHybridOptimization +try: # EnhancedSpectralHybridOptimization + from nevergrad.optimization.lama.EnhancedSpectralHybridOptimization import ( + EnhancedSpectralHybridOptimization, + ) lama_register["EnhancedSpectralHybridOptimization"] = EnhancedSpectralHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedSpectralHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSpectralHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedSpectralHybridOptimization").set_name("LLAMAEnhancedSpectralHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSpectralHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSpectralHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSpectralHybridOptimization" + ).set_name("LLAMAEnhancedSpectralHybridOptimization", register=True) +except Exception as e: # EnhancedSpectralHybridOptimization print("EnhancedSpectralHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover import EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover - - lama_register["EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover"] = EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover - res = NonObjectOptimizer(method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover = NonObjectOptimizer(method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover").set_name("LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover", register=True) -except Exception as e: - print("EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedStochasticGradientDifferentialEvolution import EnhancedStochasticGradientDifferentialEvolution - - lama_register["EnhancedStochasticGradientDifferentialEvolution"] = EnhancedStochasticGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAEnhancedStochasticGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStochasticGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAEnhancedStochasticGradientDifferentialEvolution").set_name("LLAMAEnhancedStochasticGradientDifferentialEvolution", register=True) -except Exception as e: +try: # EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover + from nevergrad.optimization.lama.EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover import ( + EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover, + ) + + lama_register["EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover"] = ( + EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover = NonObjectOptimizer( + method="LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover" + ).set_name( + "LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover", register=True + ) +except Exception as e: # EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover + print( + "EnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover can not be imported: ", e + ) +try: # EnhancedStochasticGradientDifferentialEvolution + from nevergrad.optimization.lama.EnhancedStochasticGradientDifferentialEvolution import ( + EnhancedStochasticGradientDifferentialEvolution, + ) + + lama_register["EnhancedStochasticGradientDifferentialEvolution"] = ( + EnhancedStochasticGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedStochasticGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStochasticGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAEnhancedStochasticGradientDifferentialEvolution" + ).set_name("LLAMAEnhancedStochasticGradientDifferentialEvolution", register=True) +except Exception as e: # EnhancedStochasticGradientDifferentialEvolution print("EnhancedStochasticGradientDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedStochasticMetaHeuristicOptimizer import EnhancedStochasticMetaHeuristicOptimizer +try: # EnhancedStochasticMetaHeuristicOptimizer + from nevergrad.optimization.lama.EnhancedStochasticMetaHeuristicOptimizer import ( + EnhancedStochasticMetaHeuristicOptimizer, + ) lama_register["EnhancedStochasticMetaHeuristicOptimizer"] = EnhancedStochasticMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAEnhancedStochasticMetaHeuristicOptimizer").set_name("LLAMAEnhancedStochasticMetaHeuristicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedStochasticMetaHeuristicOptimizer" + ).set_name("LLAMAEnhancedStochasticMetaHeuristicOptimizer", register=True) +except Exception as e: # EnhancedStochasticMetaHeuristicOptimizer print("EnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedStrategicAdaptiveOptimizer import EnhancedStrategicAdaptiveOptimizer +try: # EnhancedStrategicAdaptiveOptimizer + from nevergrad.optimization.lama.EnhancedStrategicAdaptiveOptimizer import ( + EnhancedStrategicAdaptiveOptimizer, + ) lama_register["EnhancedStrategicAdaptiveOptimizer"] = EnhancedStrategicAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAEnhancedStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStrategicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAEnhancedStrategicAdaptiveOptimizer").set_name("LLAMAEnhancedStrategicAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStrategicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAEnhancedStrategicAdaptiveOptimizer" + ).set_name("LLAMAEnhancedStrategicAdaptiveOptimizer", register=True) +except Exception as e: # EnhancedStrategicAdaptiveOptimizer print("EnhancedStrategicAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedStrategicMemoryAdaptiveStrategyV44 import EnhancedStrategicMemoryAdaptiveStrategyV44 +try: # EnhancedStrategicMemoryAdaptiveStrategyV44 + from nevergrad.optimization.lama.EnhancedStrategicMemoryAdaptiveStrategyV44 import ( + EnhancedStrategicMemoryAdaptiveStrategyV44, + ) lama_register["EnhancedStrategicMemoryAdaptiveStrategyV44"] = EnhancedStrategicMemoryAdaptiveStrategyV44 - res = NonObjectOptimizer(method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44 = NonObjectOptimizer(method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44").set_name("LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44 = NonObjectOptimizer( + method="LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44" + ).set_name("LLAMAEnhancedStrategicMemoryAdaptiveStrategyV44", register=True) +except Exception as e: # EnhancedStrategicMemoryAdaptiveStrategyV44 print("EnhancedStrategicMemoryAdaptiveStrategyV44 can not be imported: ", e) -try: +try: # EnhancedStrategicPSO from nevergrad.optimization.lama.EnhancedStrategicPSO import EnhancedStrategicPSO lama_register["EnhancedStrategicPSO"] = EnhancedStrategicPSO - res = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStrategicPSO = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO").set_name("LLAMAEnhancedStrategicPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStrategicPSO = NonObjectOptimizer(method="LLAMAEnhancedStrategicPSO").set_name( + "LLAMAEnhancedStrategicPSO", register=True + ) +except Exception as e: # EnhancedStrategicPSO print("EnhancedStrategicPSO can not be imported: ", e) -try: +try: # EnhancedStrategyDE from nevergrad.optimization.lama.EnhancedStrategyDE import EnhancedStrategyDE lama_register["EnhancedStrategyDE"] = EnhancedStrategyDE - res = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE").set_name("LLAMAEnhancedStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAEnhancedStrategyDE").set_name( + "LLAMAEnhancedStrategyDE", register=True + ) +except Exception as e: # EnhancedStrategyDE print("EnhancedStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimization import EnhancedSuperDynamicQuantumSwarmOptimization - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimization"] = EnhancedSuperDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimization", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimization import ( + EnhancedSuperDynamicQuantumSwarmOptimization, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimization"] = ( + EnhancedSuperDynamicQuantumSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimization" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimization print("EnhancedSuperDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV2 import EnhancedSuperDynamicQuantumSwarmOptimizationV2 - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV2"] = EnhancedSuperDynamicQuantumSwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimizationV2 + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV2 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV2, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV2"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV2", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimizationV2 print("EnhancedSuperDynamicQuantumSwarmOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV3 import EnhancedSuperDynamicQuantumSwarmOptimizationV3 - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV3"] = EnhancedSuperDynamicQuantumSwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimizationV3 + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV3 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV3, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV3"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV3", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimizationV3 print("EnhancedSuperDynamicQuantumSwarmOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV4 import EnhancedSuperDynamicQuantumSwarmOptimizationV4 - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV4"] = EnhancedSuperDynamicQuantumSwarmOptimizationV4 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimizationV4 + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV4 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV4, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV4"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV4", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimizationV4 print("EnhancedSuperDynamicQuantumSwarmOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV5 import EnhancedSuperDynamicQuantumSwarmOptimizationV5 - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV5"] = EnhancedSuperDynamicQuantumSwarmOptimizationV5 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimizationV5 + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV5 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV5, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV5"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV5", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimizationV5 print("EnhancedSuperDynamicQuantumSwarmOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV6 import EnhancedSuperDynamicQuantumSwarmOptimizationV6 - - lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV6"] = EnhancedSuperDynamicQuantumSwarmOptimizationV6 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6").set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6", register=True) -except Exception as e: +try: # EnhancedSuperDynamicQuantumSwarmOptimizationV6 + from nevergrad.optimization.lama.EnhancedSuperDynamicQuantumSwarmOptimizationV6 import ( + EnhancedSuperDynamicQuantumSwarmOptimizationV6, + ) + + lama_register["EnhancedSuperDynamicQuantumSwarmOptimizationV6"] = ( + EnhancedSuperDynamicQuantumSwarmOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6 = NonObjectOptimizer( + method="LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6" + ).set_name("LLAMAEnhancedSuperDynamicQuantumSwarmOptimizationV6", register=True) +except Exception as e: # EnhancedSuperDynamicQuantumSwarmOptimizationV6 print("EnhancedSuperDynamicQuantumSwarmOptimizationV6 can not be imported: ", e) -try: +try: # EnhancedSuperRefinedRAMEDS from nevergrad.optimization.lama.EnhancedSuperRefinedRAMEDS import EnhancedSuperRefinedRAMEDS lama_register["EnhancedSuperRefinedRAMEDS"] = EnhancedSuperRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperRefinedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS").set_name("LLAMAEnhancedSuperRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperRefinedRAMEDS = NonObjectOptimizer(method="LLAMAEnhancedSuperRefinedRAMEDS").set_name( + "LLAMAEnhancedSuperRefinedRAMEDS", register=True + ) +except Exception as e: # EnhancedSuperRefinedRAMEDS print("EnhancedSuperRefinedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 import EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 - - lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9"] = EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9").set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9", register=True) -except Exception as e: +try: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 + from nevergrad.optimization.lama.EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 import ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9, + ) + + lama_register["EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9"] = ( + EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 = NonObjectOptimizer( + method="LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9" + ).set_name("LLAMAEnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9", register=True) +except Exception as e: # EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 print("EnhancedSuperchargedAQAPSO_LS_DIW_AP_Refined_V9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSuperiorUltimateGuidedMassQGSA_v80 import EnhancedSuperiorUltimateGuidedMassQGSA_v80 +try: # EnhancedSuperiorUltimateGuidedMassQGSA_v80 + from nevergrad.optimization.lama.EnhancedSuperiorUltimateGuidedMassQGSA_v80 import ( + EnhancedSuperiorUltimateGuidedMassQGSA_v80, + ) lama_register["EnhancedSuperiorUltimateGuidedMassQGSA_v80"] = EnhancedSuperiorUltimateGuidedMassQGSA_v80 - res = NonObjectOptimizer(method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80 = NonObjectOptimizer(method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80").set_name("LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80 = NonObjectOptimizer( + method="LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80" + ).set_name("LLAMAEnhancedSuperiorUltimateGuidedMassQGSA_v80", register=True) +except Exception as e: # EnhancedSuperiorUltimateGuidedMassQGSA_v80 print("EnhancedSuperiorUltimateGuidedMassQGSA_v80 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedSupremeDynamicPrecisionOptimizerV1 import EnhancedSupremeDynamicPrecisionOptimizerV1 +try: # EnhancedSupremeDynamicPrecisionOptimizerV1 + from nevergrad.optimization.lama.EnhancedSupremeDynamicPrecisionOptimizerV1 import ( + EnhancedSupremeDynamicPrecisionOptimizerV1, + ) lama_register["EnhancedSupremeDynamicPrecisionOptimizerV1"] = EnhancedSupremeDynamicPrecisionOptimizerV1 - res = NonObjectOptimizer(method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1").set_name("LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1" + ).set_name("LLAMAEnhancedSupremeDynamicPrecisionOptimizerV1", register=True) +except Exception as e: # EnhancedSupremeDynamicPrecisionOptimizerV1 print("EnhancedSupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) -try: +try: # EnhancedSwarmHybridOptimization from nevergrad.optimization.lama.EnhancedSwarmHybridOptimization import EnhancedSwarmHybridOptimization lama_register["EnhancedSwarmHybridOptimization"] = EnhancedSwarmHybridOptimization - res = NonObjectOptimizer(method="LLAMAEnhancedSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedSwarmHybridOptimization = NonObjectOptimizer(method="LLAMAEnhancedSwarmHybridOptimization").set_name("LLAMAEnhancedSwarmHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedSwarmHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedSwarmHybridOptimization = NonObjectOptimizer( + method="LLAMAEnhancedSwarmHybridOptimization" + ).set_name("LLAMAEnhancedSwarmHybridOptimization", register=True) +except Exception as e: # EnhancedSwarmHybridOptimization print("EnhancedSwarmHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedTwoPhaseDynamicStrategyV39 import EnhancedTwoPhaseDynamicStrategyV39 +try: # EnhancedTwoPhaseDynamicStrategyV39 + from nevergrad.optimization.lama.EnhancedTwoPhaseDynamicStrategyV39 import ( + EnhancedTwoPhaseDynamicStrategyV39, + ) lama_register["EnhancedTwoPhaseDynamicStrategyV39"] = EnhancedTwoPhaseDynamicStrategyV39 - res = NonObjectOptimizer(method="LLAMAEnhancedTwoPhaseDynamicStrategyV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedTwoPhaseDynamicStrategyV39 = NonObjectOptimizer(method="LLAMAEnhancedTwoPhaseDynamicStrategyV39").set_name("LLAMAEnhancedTwoPhaseDynamicStrategyV39", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedTwoPhaseDynamicStrategyV39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedTwoPhaseDynamicStrategyV39 = NonObjectOptimizer( + method="LLAMAEnhancedTwoPhaseDynamicStrategyV39" + ).set_name("LLAMAEnhancedTwoPhaseDynamicStrategyV39", register=True) +except Exception as e: # EnhancedTwoPhaseDynamicStrategyV39 print("EnhancedTwoPhaseDynamicStrategyV39 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithm import EnhancedUltimateDynamicFireworkAlgorithm +try: # EnhancedUltimateDynamicFireworkAlgorithm + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithm import ( + EnhancedUltimateDynamicFireworkAlgorithm, + ) lama_register["EnhancedUltimateDynamicFireworkAlgorithm"] = EnhancedUltimateDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm").set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEnhancedUltimateDynamicFireworkAlgorithm" + ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithm", register=True) +except Exception as e: # EnhancedUltimateDynamicFireworkAlgorithm print("EnhancedUltimateDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithmImproved import EnhancedUltimateDynamicFireworkAlgorithmImproved - - lama_register["EnhancedUltimateDynamicFireworkAlgorithmImproved"] = EnhancedUltimateDynamicFireworkAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved").set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved", register=True) -except Exception as e: +try: # EnhancedUltimateDynamicFireworkAlgorithmImproved + from nevergrad.optimization.lama.EnhancedUltimateDynamicFireworkAlgorithmImproved import ( + EnhancedUltimateDynamicFireworkAlgorithmImproved, + ) + + lama_register["EnhancedUltimateDynamicFireworkAlgorithmImproved"] = ( + EnhancedUltimateDynamicFireworkAlgorithmImproved + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: # EnhancedUltimateDynamicFireworkAlgorithmImproved print("EnhancedUltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateEvolutionaryGradientOptimizerV36 import EnhancedUltimateEvolutionaryGradientOptimizerV36 - - lama_register["EnhancedUltimateEvolutionaryGradientOptimizerV36"] = EnhancedUltimateEvolutionaryGradientOptimizerV36 - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36 = NonObjectOptimizer(method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36").set_name("LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36", register=True) -except Exception as e: +try: # EnhancedUltimateEvolutionaryGradientOptimizerV36 + from nevergrad.optimization.lama.EnhancedUltimateEvolutionaryGradientOptimizerV36 import ( + EnhancedUltimateEvolutionaryGradientOptimizerV36, + ) + + lama_register["EnhancedUltimateEvolutionaryGradientOptimizerV36"] = ( + EnhancedUltimateEvolutionaryGradientOptimizerV36 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36" + ).set_name("LLAMAEnhancedUltimateEvolutionaryGradientOptimizerV36", register=True) +except Exception as e: # EnhancedUltimateEvolutionaryGradientOptimizerV36 print("EnhancedUltimateEvolutionaryGradientOptimizerV36 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP +try: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP, + ) lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined - - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined", register=True) -except Exception as e: +try: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined", register=True) +except Exception as e: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 - - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2", register=True) -except Exception as e: +try: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2", register=True) +except Exception as e: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 import EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 - - lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3"] = EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 - res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3").set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3", register=True) -except Exception as e: +try: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 + from nevergrad.optimization.lama.EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 import ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3, + ) + + lama_register["EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3"] = ( + EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 = NonObjectOptimizer( + method="LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3" + ).set_name("LLAMAEnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3", register=True) +except Exception as e: # EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 print("EnhancedUltimateRefinedAQAPSO_LS_DIW_AP_Refined_V3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 import EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 - - lama_register["EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44"] = EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 - res = NonObjectOptimizer(method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 = NonObjectOptimizer(method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44").set_name("LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44", register=True) -except Exception as e: +try: # EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 + from nevergrad.optimization.lama.EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 import ( + EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44, + ) + + lama_register["EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44"] = ( + EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 + ) + # res = NonObjectOptimizer(method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 = NonObjectOptimizer( + method="LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44" + ).set_name("LLAMAEnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44", register=True) +except Exception as e: # EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 print("EnhancedUltraRefinedPrecisionEvolutionaryOptimizerV44 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnsembleAdaptiveEvolutionaryAlgorithm import EnsembleAdaptiveEvolutionaryAlgorithm +try: # EnsembleAdaptiveEvolutionaryAlgorithm + from nevergrad.optimization.lama.EnsembleAdaptiveEvolutionaryAlgorithm import ( + EnsembleAdaptiveEvolutionaryAlgorithm, + ) lama_register["EnsembleAdaptiveEvolutionaryAlgorithm"] = EnsembleAdaptiveEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm").set_name("LLAMAEnsembleAdaptiveEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAEnsembleAdaptiveEvolutionaryAlgorithm" + ).set_name("LLAMAEnsembleAdaptiveEvolutionaryAlgorithm", register=True) +except Exception as e: # EnsembleAdaptiveEvolutionaryAlgorithm print("EnsembleAdaptiveEvolutionaryAlgorithm can not be imported: ", e) -try: +try: # EnsembleAdaptiveMemeticOptimizer from nevergrad.optimization.lama.EnsembleAdaptiveMemeticOptimizer import EnsembleAdaptiveMemeticOptimizer lama_register["EnsembleAdaptiveMemeticOptimizer"] = EnsembleAdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveMemeticOptimizer").set_name("LLAMAEnsembleAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAEnsembleAdaptiveMemeticOptimizer" + ).set_name("LLAMAEnsembleAdaptiveMemeticOptimizer", register=True) +except Exception as e: # EnsembleAdaptiveMemeticOptimizer print("EnsembleAdaptiveMemeticOptimizer can not be imported: ", e) -try: +try: # EnsembleAdaptiveQuantumDE from nevergrad.optimization.lama.EnsembleAdaptiveQuantumDE import EnsembleAdaptiveQuantumDE lama_register["EnsembleAdaptiveQuantumDE"] = EnsembleAdaptiveQuantumDE - res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE").set_name("LLAMAEnsembleAdaptiveQuantumDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMAEnsembleAdaptiveQuantumDE").set_name( + "LLAMAEnsembleAdaptiveQuantumDE", register=True + ) +except Exception as e: # EnsembleAdaptiveQuantumDE print("EnsembleAdaptiveQuantumDE can not be imported: ", e) -try: +try: # EnsembleDE from nevergrad.optimization.lama.EnsembleDE import EnsembleDE lama_register["EnsembleDE"] = EnsembleDE - res = NonObjectOptimizer(method="LLAMAEnsembleDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAEnsembleDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAEnsembleDE = NonObjectOptimizer(method="LLAMAEnsembleDE").set_name("LLAMAEnsembleDE", register=True) -except Exception as e: +except Exception as e: # EnsembleDE print("EnsembleDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EnsembleEvolutionaryCulturalSearch import EnsembleEvolutionaryCulturalSearch +try: # EnsembleEvolutionaryCulturalSearch + from nevergrad.optimization.lama.EnsembleEvolutionaryCulturalSearch import ( + EnsembleEvolutionaryCulturalSearch, + ) lama_register["EnsembleEvolutionaryCulturalSearch"] = EnsembleEvolutionaryCulturalSearch - res = NonObjectOptimizer(method="LLAMAEnsembleEvolutionaryCulturalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleEvolutionaryCulturalSearch = NonObjectOptimizer(method="LLAMAEnsembleEvolutionaryCulturalSearch").set_name("LLAMAEnsembleEvolutionaryCulturalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleEvolutionaryCulturalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleEvolutionaryCulturalSearch = NonObjectOptimizer( + method="LLAMAEnsembleEvolutionaryCulturalSearch" + ).set_name("LLAMAEnsembleEvolutionaryCulturalSearch", register=True) +except Exception as e: # EnsembleEvolutionaryCulturalSearch print("EnsembleEvolutionaryCulturalSearch can not be imported: ", e) -try: +try: # EnsembleHybridSearch from nevergrad.optimization.lama.EnsembleHybridSearch import EnsembleHybridSearch lama_register["EnsembleHybridSearch"] = EnsembleHybridSearch - res = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleHybridSearch = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch").set_name("LLAMAEnsembleHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleHybridSearch = NonObjectOptimizer(method="LLAMAEnsembleHybridSearch").set_name( + "LLAMAEnsembleHybridSearch", register=True + ) +except Exception as e: # EnsembleHybridSearch print("EnsembleHybridSearch can not be imported: ", e) -try: +try: # EnsembleMemeticAlgorithm from nevergrad.optimization.lama.EnsembleMemeticAlgorithm import EnsembleMemeticAlgorithm lama_register["EnsembleMemeticAlgorithm"] = EnsembleMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm").set_name("LLAMAEnsembleMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleMemeticAlgorithm = NonObjectOptimizer(method="LLAMAEnsembleMemeticAlgorithm").set_name( + "LLAMAEnsembleMemeticAlgorithm", register=True + ) +except Exception as e: # EnsembleMemeticAlgorithm print("EnsembleMemeticAlgorithm can not be imported: ", e) -try: +try: # EnsembleMutationAdaptiveDE from nevergrad.optimization.lama.EnsembleMutationAdaptiveDE import EnsembleMutationAdaptiveDE lama_register["EnsembleMutationAdaptiveDE"] = EnsembleMutationAdaptiveDE - res = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEnsembleMutationAdaptiveDE = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE").set_name("LLAMAEnsembleMutationAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEnsembleMutationAdaptiveDE = NonObjectOptimizer(method="LLAMAEnsembleMutationAdaptiveDE").set_name( + "LLAMAEnsembleMutationAdaptiveDE", register=True + ) +except Exception as e: # EnsembleMutationAdaptiveDE print("EnsembleMutationAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.EntropyEnhancedAdaptiveStrategyV61 import EntropyEnhancedAdaptiveStrategyV61 +try: # EntropyEnhancedAdaptiveStrategyV61 + from nevergrad.optimization.lama.EntropyEnhancedAdaptiveStrategyV61 import ( + EntropyEnhancedAdaptiveStrategyV61, + ) lama_register["EntropyEnhancedAdaptiveStrategyV61"] = EntropyEnhancedAdaptiveStrategyV61 - res = NonObjectOptimizer(method="LLAMAEntropyEnhancedAdaptiveStrategyV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEntropyEnhancedAdaptiveStrategyV61 = NonObjectOptimizer(method="LLAMAEntropyEnhancedAdaptiveStrategyV61").set_name("LLAMAEntropyEnhancedAdaptiveStrategyV61", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEntropyEnhancedAdaptiveStrategyV61")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEntropyEnhancedAdaptiveStrategyV61 = NonObjectOptimizer( + method="LLAMAEntropyEnhancedAdaptiveStrategyV61" + ).set_name("LLAMAEntropyEnhancedAdaptiveStrategyV61", register=True) +except Exception as e: # EntropyEnhancedAdaptiveStrategyV61 print("EntropyEnhancedAdaptiveStrategyV61 can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryConvergenceSpiralSearch import EvolutionaryConvergenceSpiralSearch +try: # EvolutionaryConvergenceSpiralSearch + from nevergrad.optimization.lama.EvolutionaryConvergenceSpiralSearch import ( + EvolutionaryConvergenceSpiralSearch, + ) lama_register["EvolutionaryConvergenceSpiralSearch"] = EvolutionaryConvergenceSpiralSearch - res = NonObjectOptimizer(method="LLAMAEvolutionaryConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryConvergenceSpiralSearch = NonObjectOptimizer(method="LLAMAEvolutionaryConvergenceSpiralSearch").set_name("LLAMAEvolutionaryConvergenceSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryConvergenceSpiralSearch = NonObjectOptimizer( + method="LLAMAEvolutionaryConvergenceSpiralSearch" + ).set_name("LLAMAEvolutionaryConvergenceSpiralSearch", register=True) +except Exception as e: # EvolutionaryConvergenceSpiralSearch print("EvolutionaryConvergenceSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryDynamicGradientSearch import EvolutionaryDynamicGradientSearch +try: # EvolutionaryDynamicGradientSearch + from nevergrad.optimization.lama.EvolutionaryDynamicGradientSearch import ( + EvolutionaryDynamicGradientSearch, + ) lama_register["EvolutionaryDynamicGradientSearch"] = EvolutionaryDynamicGradientSearch - res = NonObjectOptimizer(method="LLAMAEvolutionaryDynamicGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryDynamicGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryDynamicGradientSearch").set_name("LLAMAEvolutionaryDynamicGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryDynamicGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryDynamicGradientSearch = NonObjectOptimizer( + method="LLAMAEvolutionaryDynamicGradientSearch" + ).set_name("LLAMAEvolutionaryDynamicGradientSearch", register=True) +except Exception as e: # EvolutionaryDynamicGradientSearch print("EvolutionaryDynamicGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizer import EvolutionaryGradientHybridOptimizer +try: # EvolutionaryGradientHybridOptimizer + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizer import ( + EvolutionaryGradientHybridOptimizer, + ) lama_register["EvolutionaryGradientHybridOptimizer"] = EvolutionaryGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizer").set_name("LLAMAEvolutionaryGradientHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAEvolutionaryGradientHybridOptimizer" + ).set_name("LLAMAEvolutionaryGradientHybridOptimizer", register=True) +except Exception as e: # EvolutionaryGradientHybridOptimizer print("EvolutionaryGradientHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizerV2 import EvolutionaryGradientHybridOptimizerV2 +try: # EvolutionaryGradientHybridOptimizerV2 + from nevergrad.optimization.lama.EvolutionaryGradientHybridOptimizerV2 import ( + EvolutionaryGradientHybridOptimizerV2, + ) lama_register["EvolutionaryGradientHybridOptimizerV2"] = EvolutionaryGradientHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryGradientHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizerV2").set_name("LLAMAEvolutionaryGradientHybridOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryGradientHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAEvolutionaryGradientHybridOptimizerV2" + ).set_name("LLAMAEvolutionaryGradientHybridOptimizerV2", register=True) +except Exception as e: # EvolutionaryGradientHybridOptimizerV2 print("EvolutionaryGradientHybridOptimizerV2 can not be imported: ", e) -try: +try: # EvolutionaryGradientSearch from nevergrad.optimization.lama.EvolutionaryGradientSearch import EvolutionaryGradientSearch lama_register["EvolutionaryGradientSearch"] = EvolutionaryGradientSearch - res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch").set_name("LLAMAEvolutionaryGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryGradientSearch = NonObjectOptimizer(method="LLAMAEvolutionaryGradientSearch").set_name( + "LLAMAEvolutionaryGradientSearch", register=True + ) +except Exception as e: # EvolutionaryGradientSearch print("EvolutionaryGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryHarmonicFireworkAlgorithm import EvolutionaryHarmonicFireworkAlgorithm +try: # EvolutionaryHarmonicFireworkAlgorithm + from nevergrad.optimization.lama.EvolutionaryHarmonicFireworkAlgorithm import ( + EvolutionaryHarmonicFireworkAlgorithm, + ) lama_register["EvolutionaryHarmonicFireworkAlgorithm"] = EvolutionaryHarmonicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAEvolutionaryHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryHarmonicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAEvolutionaryHarmonicFireworkAlgorithm").set_name("LLAMAEvolutionaryHarmonicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryHarmonicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryHarmonicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAEvolutionaryHarmonicFireworkAlgorithm" + ).set_name("LLAMAEvolutionaryHarmonicFireworkAlgorithm", register=True) +except Exception as e: # EvolutionaryHarmonicFireworkAlgorithm print("EvolutionaryHarmonicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.EvolutionaryParticleSwarmOptimizer import EvolutionaryParticleSwarmOptimizer +try: # EvolutionaryParticleSwarmOptimizer + from nevergrad.optimization.lama.EvolutionaryParticleSwarmOptimizer import ( + EvolutionaryParticleSwarmOptimizer, + ) lama_register["EvolutionaryParticleSwarmOptimizer"] = EvolutionaryParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAEvolutionaryParticleSwarmOptimizer").set_name("LLAMAEvolutionaryParticleSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAEvolutionaryParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAEvolutionaryParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAEvolutionaryParticleSwarmOptimizer" + ).set_name("LLAMAEvolutionaryParticleSwarmOptimizer", register=True) +except Exception as e: # EvolutionaryParticleSwarmOptimizer print("EvolutionaryParticleSwarmOptimizer can not be imported: ", e) -try: +try: # ExDADe from nevergrad.optimization.lama.ExDADe import ExDADe lama_register["ExDADe"] = ExDADe - res = NonObjectOptimizer(method="LLAMAExDADe")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAExDADe")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAExDADe = NonObjectOptimizer(method="LLAMAExDADe").set_name("LLAMAExDADe", register=True) -except Exception as e: +except Exception as e: # ExDADe print("ExDADe can not be imported: ", e) -try: +try: # FEDE from nevergrad.optimization.lama.FEDE import FEDE lama_register["FEDE"] = FEDE - res = NonObjectOptimizer(method="LLAMAFEDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAFEDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAFEDE = NonObjectOptimizer(method="LLAMAFEDE").set_name("LLAMAFEDE", register=True) -except Exception as e: +except Exception as e: # FEDE print("FEDE can not be imported: ", e) -try: +try: # FTADEEM from nevergrad.optimization.lama.FTADEEM import FTADEEM lama_register["FTADEEM"] = FTADEEM - res = NonObjectOptimizer(method="LLAMAFTADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAFTADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAFTADEEM = NonObjectOptimizer(method="LLAMAFTADEEM").set_name("LLAMAFTADEEM", register=True) -except Exception as e: +except Exception as e: # FTADEEM print("FTADEEM can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - - lama_register["FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch - res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) -except Exception as e: +try: # FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + from nevergrad.optimization.lama.FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch import ( + FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: # FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch print("FinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalEnhancedDynamicLocalSearchFireworkAlgorithm import FinalEnhancedDynamicLocalSearchFireworkAlgorithm - - lama_register["FinalEnhancedDynamicLocalSearchFireworkAlgorithm"] = FinalEnhancedDynamicLocalSearchFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) -except Exception as e: +try: # FinalEnhancedDynamicLocalSearchFireworkAlgorithm + from nevergrad.optimization.lama.FinalEnhancedDynamicLocalSearchFireworkAlgorithm import ( + FinalEnhancedDynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["FinalEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( + FinalEnhancedDynamicLocalSearchFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: # FinalEnhancedDynamicLocalSearchFireworkAlgorithm print("FinalEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - - lama_register["FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - res = NonObjectOptimizer(method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) -except Exception as e: +try: # FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + from nevergrad.optimization.lama.FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: # FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch print("FinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 import FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 - - lama_register["FinalEnhancedRefinedUltimateGuidedMassQGSA_v75"] = FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 - res = NonObjectOptimizer(method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75 = NonObjectOptimizer(method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75").set_name("LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75", register=True) -except Exception as e: +try: # FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 + from nevergrad.optimization.lama.FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 import ( + FinalEnhancedRefinedUltimateGuidedMassQGSA_v75, + ) + + lama_register["FinalEnhancedRefinedUltimateGuidedMassQGSA_v75"] = ( + FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 + ) + # res = NonObjectOptimizer(method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75 = NonObjectOptimizer( + method="LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75" + ).set_name("LLAMAFinalEnhancedRefinedUltimateGuidedMassQGSA_v75", register=True) +except Exception as e: # FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 print("FinalEnhancedRefinedUltimateGuidedMassQGSA_v75 can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithm import FinalOptimizedEnhancedDynamicFireworkAlgorithm - - lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithm"] = FinalOptimizedEnhancedDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm").set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm", register=True) -except Exception as e: +try: # FinalOptimizedEnhancedDynamicFireworkAlgorithm + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithm import ( + FinalOptimizedEnhancedDynamicFireworkAlgorithm, + ) + + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithm"] = ( + FinalOptimizedEnhancedDynamicFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: # FinalOptimizedEnhancedDynamicFireworkAlgorithm print("FinalOptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined import FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined - - lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined"] = FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined - res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined").set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined", register=True) -except Exception as e: +try: # FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined + from nevergrad.optimization.lama.FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined import ( + FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined, + ) + + lama_register["FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined"] = ( + FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined + ) + # res = NonObjectOptimizer(method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined = NonObjectOptimizer( + method="LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined" + ).set_name("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined", register=True) +except Exception as e: # FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined print("FinalOptimizedEnhancedDynamicFireworkAlgorithmRefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.FineTunedCohortDiversityOptimizer import FineTunedCohortDiversityOptimizer +try: # FineTunedCohortDiversityOptimizer + from nevergrad.optimization.lama.FineTunedCohortDiversityOptimizer import ( + FineTunedCohortDiversityOptimizer, + ) lama_register["FineTunedCohortDiversityOptimizer"] = FineTunedCohortDiversityOptimizer - res = NonObjectOptimizer(method="LLAMAFineTunedCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFineTunedCohortDiversityOptimizer = NonObjectOptimizer(method="LLAMAFineTunedCohortDiversityOptimizer").set_name("LLAMAFineTunedCohortDiversityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFineTunedCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFineTunedCohortDiversityOptimizer = NonObjectOptimizer( + method="LLAMAFineTunedCohortDiversityOptimizer" + ).set_name("LLAMAFineTunedCohortDiversityOptimizer", register=True) +except Exception as e: # FineTunedCohortDiversityOptimizer print("FineTunedCohortDiversityOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.FineTunedFocusedAdaptiveOptimizer import FineTunedFocusedAdaptiveOptimizer +try: # FineTunedFocusedAdaptiveOptimizer + from nevergrad.optimization.lama.FineTunedFocusedAdaptiveOptimizer import ( + FineTunedFocusedAdaptiveOptimizer, + ) lama_register["FineTunedFocusedAdaptiveOptimizer"] = FineTunedFocusedAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAFineTunedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFineTunedFocusedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAFineTunedFocusedAdaptiveOptimizer").set_name("LLAMAFineTunedFocusedAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFineTunedFocusedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFineTunedFocusedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAFineTunedFocusedAdaptiveOptimizer" + ).set_name("LLAMAFineTunedFocusedAdaptiveOptimizer", register=True) +except Exception as e: # FineTunedFocusedAdaptiveOptimizer print("FineTunedFocusedAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.FineTunedProgressiveAdaptiveSearch import FineTunedProgressiveAdaptiveSearch +try: # FineTunedProgressiveAdaptiveSearch + from nevergrad.optimization.lama.FineTunedProgressiveAdaptiveSearch import ( + FineTunedProgressiveAdaptiveSearch, + ) lama_register["FineTunedProgressiveAdaptiveSearch"] = FineTunedProgressiveAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAFineTunedProgressiveAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFineTunedProgressiveAdaptiveSearch = NonObjectOptimizer(method="LLAMAFineTunedProgressiveAdaptiveSearch").set_name("LLAMAFineTunedProgressiveAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFineTunedProgressiveAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFineTunedProgressiveAdaptiveSearch = NonObjectOptimizer( + method="LLAMAFineTunedProgressiveAdaptiveSearch" + ).set_name("LLAMAFineTunedProgressiveAdaptiveSearch", register=True) +except Exception as e: # FineTunedProgressiveAdaptiveSearch print("FineTunedProgressiveAdaptiveSearch can not be imported: ", e) -try: +try: # FocusedBalancedAdaptivePSO from nevergrad.optimization.lama.FocusedBalancedAdaptivePSO import FocusedBalancedAdaptivePSO lama_register["FocusedBalancedAdaptivePSO"] = FocusedBalancedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO").set_name("LLAMAFocusedBalancedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFocusedBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAFocusedBalancedAdaptivePSO").set_name( + "LLAMAFocusedBalancedAdaptivePSO", register=True + ) +except Exception as e: # FocusedBalancedAdaptivePSO print("FocusedBalancedAdaptivePSO can not be imported: ", e) -try: +try: # FocusedEvolutionStrategy from nevergrad.optimization.lama.FocusedEvolutionStrategy import FocusedEvolutionStrategy lama_register["FocusedEvolutionStrategy"] = FocusedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy").set_name("LLAMAFocusedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFocusedEvolutionStrategy = NonObjectOptimizer(method="LLAMAFocusedEvolutionStrategy").set_name( + "LLAMAFocusedEvolutionStrategy", register=True + ) +except Exception as e: # FocusedEvolutionStrategy print("FocusedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.FractionalOrderClusterHybridOptimization import FractionalOrderClusterHybridOptimization +try: # FractionalOrderClusterHybridOptimization + from nevergrad.optimization.lama.FractionalOrderClusterHybridOptimization import ( + FractionalOrderClusterHybridOptimization, + ) lama_register["FractionalOrderClusterHybridOptimization"] = FractionalOrderClusterHybridOptimization - res = NonObjectOptimizer(method="LLAMAFractionalOrderClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFractionalOrderClusterHybridOptimization = NonObjectOptimizer(method="LLAMAFractionalOrderClusterHybridOptimization").set_name("LLAMAFractionalOrderClusterHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAFractionalOrderClusterHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFractionalOrderClusterHybridOptimization = NonObjectOptimizer( + method="LLAMAFractionalOrderClusterHybridOptimization" + ).set_name("LLAMAFractionalOrderClusterHybridOptimization", register=True) +except Exception as e: # FractionalOrderClusterHybridOptimization print("FractionalOrderClusterHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.FurtherEnhancedHybridMetaHeuristicOptimizerV13 import FurtherEnhancedHybridMetaHeuristicOptimizerV13 - - lama_register["FurtherEnhancedHybridMetaHeuristicOptimizerV13"] = FurtherEnhancedHybridMetaHeuristicOptimizerV13 - res = NonObjectOptimizer(method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer(method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13").set_name("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13", register=True) -except Exception as e: +try: # FurtherEnhancedHybridMetaHeuristicOptimizerV13 + from nevergrad.optimization.lama.FurtherEnhancedHybridMetaHeuristicOptimizerV13 import ( + FurtherEnhancedHybridMetaHeuristicOptimizerV13, + ) + + lama_register["FurtherEnhancedHybridMetaHeuristicOptimizerV13"] = ( + FurtherEnhancedHybridMetaHeuristicOptimizerV13 + ) + # res = NonObjectOptimizer(method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13 = NonObjectOptimizer( + method="LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13" + ).set_name("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13", register=True) +except Exception as e: # FurtherEnhancedHybridMetaHeuristicOptimizerV13 print("FurtherEnhancedHybridMetaHeuristicOptimizerV13 can not be imported: ", e) -try: +try: # GEEA from nevergrad.optimization.lama.GEEA import GEEA lama_register["GEEA"] = GEEA - res = NonObjectOptimizer(method="LLAMAGEEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAGEEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAGEEA = NonObjectOptimizer(method="LLAMAGEEA").set_name("LLAMAGEEA", register=True) -except Exception as e: +except Exception as e: # GEEA print("GEEA can not be imported: ", e) -try: +try: # GESA from nevergrad.optimization.lama.GESA import GESA lama_register["GESA"] = GESA - res = NonObjectOptimizer(method="LLAMAGESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAGESA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAGESA = NonObjectOptimizer(method="LLAMAGESA").set_name("LLAMAGESA", register=True) -except Exception as e: +except Exception as e: # GESA print("GESA can not be imported: ", e) -try: +try: # GGAES from nevergrad.optimization.lama.GGAES import GGAES lama_register["GGAES"] = GGAES - res = NonObjectOptimizer(method="LLAMAGGAES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAGGAES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAGGAES = NonObjectOptimizer(method="LLAMAGGAES").set_name("LLAMAGGAES", register=True) -except Exception as e: +except Exception as e: # GGAES print("GGAES can not be imported: ", e) -try: +try: # GIDE from nevergrad.optimization.lama.GIDE import GIDE lama_register["GIDE"] = GIDE - res = NonObjectOptimizer(method="LLAMAGIDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAGIDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAGIDE = NonObjectOptimizer(method="LLAMAGIDE").set_name("LLAMAGIDE", register=True) -except Exception as e: +except Exception as e: # GIDE print("GIDE can not be imported: ", e) -try: +try: # GaussianAdaptivePSO from nevergrad.optimization.lama.GaussianAdaptivePSO import GaussianAdaptivePSO lama_register["GaussianAdaptivePSO"] = GaussianAdaptivePSO - res = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGaussianAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO").set_name("LLAMAGaussianAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGaussianAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianAdaptivePSO").set_name( + "LLAMAGaussianAdaptivePSO", register=True + ) +except Exception as e: # GaussianAdaptivePSO print("GaussianAdaptivePSO can not be imported: ", e) -try: +try: # GaussianEnhancedAdaptivePSO from nevergrad.optimization.lama.GaussianEnhancedAdaptivePSO import GaussianEnhancedAdaptivePSO lama_register["GaussianEnhancedAdaptivePSO"] = GaussianEnhancedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGaussianEnhancedAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO").set_name("LLAMAGaussianEnhancedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGaussianEnhancedAdaptivePSO = NonObjectOptimizer(method="LLAMAGaussianEnhancedAdaptivePSO").set_name( + "LLAMAGaussianEnhancedAdaptivePSO", register=True + ) +except Exception as e: # GaussianEnhancedAdaptivePSO print("GaussianEnhancedAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientAssistedDifferentialCrossover import GradientAssistedDifferentialCrossover +try: # GradientAssistedDifferentialCrossover + from nevergrad.optimization.lama.GradientAssistedDifferentialCrossover import ( + GradientAssistedDifferentialCrossover, + ) lama_register["GradientAssistedDifferentialCrossover"] = GradientAssistedDifferentialCrossover - res = NonObjectOptimizer(method="LLAMAGradientAssistedDifferentialCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientAssistedDifferentialCrossover = NonObjectOptimizer(method="LLAMAGradientAssistedDifferentialCrossover").set_name("LLAMAGradientAssistedDifferentialCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientAssistedDifferentialCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientAssistedDifferentialCrossover = NonObjectOptimizer( + method="LLAMAGradientAssistedDifferentialCrossover" + ).set_name("LLAMAGradientAssistedDifferentialCrossover", register=True) +except Exception as e: # GradientAssistedDifferentialCrossover print("GradientAssistedDifferentialCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientBalancedEvolutionStrategy import GradientBalancedEvolutionStrategy +try: # GradientBalancedEvolutionStrategy + from nevergrad.optimization.lama.GradientBalancedEvolutionStrategy import ( + GradientBalancedEvolutionStrategy, + ) lama_register["GradientBalancedEvolutionStrategy"] = GradientBalancedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGradientBalancedEvolutionStrategy").set_name("LLAMAGradientBalancedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAGradientBalancedEvolutionStrategy" + ).set_name("LLAMAGradientBalancedEvolutionStrategy", register=True) +except Exception as e: # GradientBalancedEvolutionStrategy print("GradientBalancedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientBasedAdaptiveCovarianceMatrixAdaptation import GradientBasedAdaptiveCovarianceMatrixAdaptation - - lama_register["GradientBasedAdaptiveCovarianceMatrixAdaptation"] = GradientBasedAdaptiveCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation").set_name("LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation", register=True) -except Exception as e: +try: # GradientBasedAdaptiveCovarianceMatrixAdaptation + from nevergrad.optimization.lama.GradientBasedAdaptiveCovarianceMatrixAdaptation import ( + GradientBasedAdaptiveCovarianceMatrixAdaptation, + ) + + lama_register["GradientBasedAdaptiveCovarianceMatrixAdaptation"] = ( + GradientBasedAdaptiveCovarianceMatrixAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMAGradientBasedAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: # GradientBasedAdaptiveCovarianceMatrixAdaptation print("GradientBasedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) -try: +try: # GradientBoostedMemoryAnnealing from nevergrad.optimization.lama.GradientBoostedMemoryAnnealing import GradientBoostedMemoryAnnealing lama_register["GradientBoostedMemoryAnnealing"] = GradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMAGradientBoostedMemoryAnnealing").set_name("LLAMAGradientBoostedMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMAGradientBoostedMemoryAnnealing" + ).set_name("LLAMAGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # GradientBoostedMemoryAnnealing print("GradientBoostedMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientEnhancedAdaptiveAnnealing import GradientEnhancedAdaptiveAnnealing +try: # GradientEnhancedAdaptiveAnnealing + from nevergrad.optimization.lama.GradientEnhancedAdaptiveAnnealing import ( + GradientEnhancedAdaptiveAnnealing, + ) lama_register["GradientEnhancedAdaptiveAnnealing"] = GradientEnhancedAdaptiveAnnealing - res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientEnhancedAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveAnnealing").set_name("LLAMAGradientEnhancedAdaptiveAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientEnhancedAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAGradientEnhancedAdaptiveAnnealing" + ).set_name("LLAMAGradientEnhancedAdaptiveAnnealing", register=True) +except Exception as e: # GradientEnhancedAdaptiveAnnealing print("GradientEnhancedAdaptiveAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientEnhancedAdaptiveDifferentialEvolution import GradientEnhancedAdaptiveDifferentialEvolution - - lama_register["GradientEnhancedAdaptiveDifferentialEvolution"] = GradientEnhancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAGradientEnhancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # GradientEnhancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.GradientEnhancedAdaptiveDifferentialEvolution import ( + GradientEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["GradientEnhancedAdaptiveDifferentialEvolution"] = ( + GradientEnhancedAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAGradientEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAGradientEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # GradientEnhancedAdaptiveDifferentialEvolution print("GradientEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # GradientEstimationSearch from nevergrad.optimization.lama.GradientEstimationSearch import GradientEstimationSearch lama_register["GradientEstimationSearch"] = GradientEstimationSearch - res = NonObjectOptimizer(method="LLAMAGradientEstimationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientEstimationSearch = NonObjectOptimizer(method="LLAMAGradientEstimationSearch").set_name("LLAMAGradientEstimationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientEstimationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientEstimationSearch = NonObjectOptimizer(method="LLAMAGradientEstimationSearch").set_name( + "LLAMAGradientEstimationSearch", register=True + ) +except Exception as e: # GradientEstimationSearch print("GradientEstimationSearch can not be imported: ", e) -try: +try: # GradientGuidedClusterSearch from nevergrad.optimization.lama.GradientGuidedClusterSearch import GradientGuidedClusterSearch lama_register["GradientGuidedClusterSearch"] = GradientGuidedClusterSearch - res = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch").set_name("LLAMAGradientGuidedClusterSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientGuidedClusterSearch = NonObjectOptimizer(method="LLAMAGradientGuidedClusterSearch").set_name( + "LLAMAGradientGuidedClusterSearch", register=True + ) +except Exception as e: # GradientGuidedClusterSearch print("GradientGuidedClusterSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientGuidedDifferentialEvolution import GradientGuidedDifferentialEvolution +try: # GradientGuidedDifferentialEvolution + from nevergrad.optimization.lama.GradientGuidedDifferentialEvolution import ( + GradientGuidedDifferentialEvolution, + ) lama_register["GradientGuidedDifferentialEvolution"] = GradientGuidedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAGradientGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientGuidedDifferentialEvolution = NonObjectOptimizer(method="LLAMAGradientGuidedDifferentialEvolution").set_name("LLAMAGradientGuidedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientGuidedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientGuidedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAGradientGuidedDifferentialEvolution" + ).set_name("LLAMAGradientGuidedDifferentialEvolution", register=True) +except Exception as e: # GradientGuidedDifferentialEvolution print("GradientGuidedDifferentialEvolution can not be imported: ", e) -try: +try: # GradientGuidedEvolutionStrategy from nevergrad.optimization.lama.GradientGuidedEvolutionStrategy import GradientGuidedEvolutionStrategy lama_register["GradientGuidedEvolutionStrategy"] = GradientGuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGradientGuidedEvolutionStrategy").set_name("LLAMAGradientGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAGradientGuidedEvolutionStrategy" + ).set_name("LLAMAGradientGuidedEvolutionStrategy", register=True) +except Exception as e: # GradientGuidedEvolutionStrategy print("GradientGuidedEvolutionStrategy can not be imported: ", e) -try: +try: # GradientGuidedHybridPSO from nevergrad.optimization.lama.GradientGuidedHybridPSO import GradientGuidedHybridPSO lama_register["GradientGuidedHybridPSO"] = GradientGuidedHybridPSO - res = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO").set_name("LLAMAGradientGuidedHybridPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAGradientGuidedHybridPSO").set_name( + "LLAMAGradientGuidedHybridPSO", register=True + ) +except Exception as e: # GradientGuidedHybridPSO print("GradientGuidedHybridPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientInformedAdaptiveDirectionSearch import GradientInformedAdaptiveDirectionSearch +try: # GradientInformedAdaptiveDirectionSearch + from nevergrad.optimization.lama.GradientInformedAdaptiveDirectionSearch import ( + GradientInformedAdaptiveDirectionSearch, + ) lama_register["GradientInformedAdaptiveDirectionSearch"] = GradientInformedAdaptiveDirectionSearch - res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveDirectionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientInformedAdaptiveDirectionSearch = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveDirectionSearch").set_name("LLAMAGradientInformedAdaptiveDirectionSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveDirectionSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientInformedAdaptiveDirectionSearch = NonObjectOptimizer( + method="LLAMAGradientInformedAdaptiveDirectionSearch" + ).set_name("LLAMAGradientInformedAdaptiveDirectionSearch", register=True) +except Exception as e: # GradientInformedAdaptiveDirectionSearch print("GradientInformedAdaptiveDirectionSearch can not be imported: ", e) -try: +try: # GradientInformedAdaptiveSearch from nevergrad.optimization.lama.GradientInformedAdaptiveSearch import GradientInformedAdaptiveSearch lama_register["GradientInformedAdaptiveSearch"] = GradientInformedAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientInformedAdaptiveSearch = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveSearch").set_name("LLAMAGradientInformedAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientInformedAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientInformedAdaptiveSearch = NonObjectOptimizer( + method="LLAMAGradientInformedAdaptiveSearch" + ).set_name("LLAMAGradientInformedAdaptiveSearch", register=True) +except Exception as e: # GradientInformedAdaptiveSearch print("GradientInformedAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientInformedParticleOptimizer import GradientInformedParticleOptimizer +try: # GradientInformedParticleOptimizer + from nevergrad.optimization.lama.GradientInformedParticleOptimizer import ( + GradientInformedParticleOptimizer, + ) lama_register["GradientInformedParticleOptimizer"] = GradientInformedParticleOptimizer - res = NonObjectOptimizer(method="LLAMAGradientInformedParticleOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientInformedParticleOptimizer = NonObjectOptimizer(method="LLAMAGradientInformedParticleOptimizer").set_name("LLAMAGradientInformedParticleOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientInformedParticleOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientInformedParticleOptimizer = NonObjectOptimizer( + method="LLAMAGradientInformedParticleOptimizer" + ).set_name("LLAMAGradientInformedParticleOptimizer", register=True) +except Exception as e: # GradientInformedParticleOptimizer print("GradientInformedParticleOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.GradientSpiralDifferentialEnhancerV5 import GradientSpiralDifferentialEnhancerV5 +try: # GradientSpiralDifferentialEnhancerV5 + from nevergrad.optimization.lama.GradientSpiralDifferentialEnhancerV5 import ( + GradientSpiralDifferentialEnhancerV5, + ) lama_register["GradientSpiralDifferentialEnhancerV5"] = GradientSpiralDifferentialEnhancerV5 - res = NonObjectOptimizer(method="LLAMAGradientSpiralDifferentialEnhancerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGradientSpiralDifferentialEnhancerV5 = NonObjectOptimizer(method="LLAMAGradientSpiralDifferentialEnhancerV5").set_name("LLAMAGradientSpiralDifferentialEnhancerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGradientSpiralDifferentialEnhancerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGradientSpiralDifferentialEnhancerV5 = NonObjectOptimizer( + method="LLAMAGradientSpiralDifferentialEnhancerV5" + ).set_name("LLAMAGradientSpiralDifferentialEnhancerV5", register=True) +except Exception as e: # GradientSpiralDifferentialEnhancerV5 print("GradientSpiralDifferentialEnhancerV5 can not be imported: ", e) -try: +try: # GravitationalSwarmIntelligence from nevergrad.optimization.lama.GravitationalSwarmIntelligence import GravitationalSwarmIntelligence lama_register["GravitationalSwarmIntelligence"] = GravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAGravitationalSwarmIntelligence").set_name("LLAMAGravitationalSwarmIntelligence", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAGravitationalSwarmIntelligence" + ).set_name("LLAMAGravitationalSwarmIntelligence", register=True) +except Exception as e: # GravitationalSwarmIntelligence print("GravitationalSwarmIntelligence can not be imported: ", e) -try: +try: # GreedyDiversityMultiStrategySADE from nevergrad.optimization.lama.GreedyDiversityMultiStrategySADE import GreedyDiversityMultiStrategySADE lama_register["GreedyDiversityMultiStrategySADE"] = GreedyDiversityMultiStrategySADE - res = NonObjectOptimizer(method="LLAMAGreedyDiversityMultiStrategySADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGreedyDiversityMultiStrategySADE = NonObjectOptimizer(method="LLAMAGreedyDiversityMultiStrategySADE").set_name("LLAMAGreedyDiversityMultiStrategySADE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGreedyDiversityMultiStrategySADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGreedyDiversityMultiStrategySADE = NonObjectOptimizer( + method="LLAMAGreedyDiversityMultiStrategySADE" + ).set_name("LLAMAGreedyDiversityMultiStrategySADE", register=True) +except Exception as e: # GreedyDiversityMultiStrategySADE print("GreedyDiversityMultiStrategySADE can not be imported: ", e) -try: +try: # GreedyDynamicMultiStrategyDE from nevergrad.optimization.lama.GreedyDynamicMultiStrategyDE import GreedyDynamicMultiStrategyDE lama_register["GreedyDynamicMultiStrategyDE"] = GreedyDynamicMultiStrategyDE - res = NonObjectOptimizer(method="LLAMAGreedyDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGreedyDynamicMultiStrategyDE = NonObjectOptimizer(method="LLAMAGreedyDynamicMultiStrategyDE").set_name("LLAMAGreedyDynamicMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGreedyDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGreedyDynamicMultiStrategyDE = NonObjectOptimizer( + method="LLAMAGreedyDynamicMultiStrategyDE" + ).set_name("LLAMAGreedyDynamicMultiStrategyDE", register=True) +except Exception as e: # GreedyDynamicMultiStrategyDE print("GreedyDynamicMultiStrategyDE can not be imported: ", e) -try: +try: # GuidedEvolutionStrategy from nevergrad.optimization.lama.GuidedEvolutionStrategy import GuidedEvolutionStrategy lama_register["GuidedEvolutionStrategy"] = GuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy").set_name("LLAMAGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAGuidedEvolutionStrategy").set_name( + "LLAMAGuidedEvolutionStrategy", register=True + ) +except Exception as e: # GuidedEvolutionStrategy print("GuidedEvolutionStrategy can not be imported: ", e) -try: +try: # GuidedMutationOptimizer from nevergrad.optimization.lama.GuidedMutationOptimizer import GuidedMutationOptimizer lama_register["GuidedMutationOptimizer"] = GuidedMutationOptimizer - res = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer").set_name("LLAMAGuidedMutationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAGuidedMutationOptimizer").set_name( + "LLAMAGuidedMutationOptimizer", register=True + ) +except Exception as e: # GuidedMutationOptimizer print("GuidedMutationOptimizer can not be imported: ", e) -try: +try: # HADE from nevergrad.optimization.lama.HADE import HADE lama_register["HADE"] = HADE - res = NonObjectOptimizer(method="LLAMAHADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHADE = NonObjectOptimizer(method="LLAMAHADE").set_name("LLAMAHADE", register=True) -except Exception as e: +except Exception as e: # HADE print("HADE can not be imported: ", e) -try: +try: # HADEEM from nevergrad.optimization.lama.HADEEM import HADEEM lama_register["HADEEM"] = HADEEM - res = NonObjectOptimizer(method="LLAMAHADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHADEEM = NonObjectOptimizer(method="LLAMAHADEEM").set_name("LLAMAHADEEM", register=True) -except Exception as e: +except Exception as e: # HADEEM print("HADEEM can not be imported: ", e) -try: +try: # HADEMI from nevergrad.optimization.lama.HADEMI import HADEMI lama_register["HADEMI"] = HADEMI - res = NonObjectOptimizer(method="LLAMAHADEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHADEMI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHADEMI = NonObjectOptimizer(method="LLAMAHADEMI").set_name("LLAMAHADEMI", register=True) -except Exception as e: +except Exception as e: # HADEMI print("HADEMI can not be imported: ", e) -try: +try: # HAVCDE from nevergrad.optimization.lama.HAVCDE import HAVCDE lama_register["HAVCDE"] = HAVCDE - res = NonObjectOptimizer(method="LLAMAHAVCDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHAVCDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHAVCDE = NonObjectOptimizer(method="LLAMAHAVCDE").set_name("LLAMAHAVCDE", register=True) -except Exception as e: +except Exception as e: # HAVCDE print("HAVCDE can not be imported: ", e) -try: +try: # HEAS from nevergrad.optimization.lama.HEAS import HEAS lama_register["HEAS"] = HEAS - res = NonObjectOptimizer(method="LLAMAHEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHEAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHEAS = NonObjectOptimizer(method="LLAMAHEAS").set_name("LLAMAHEAS", register=True) -except Exception as e: +except Exception as e: # HEAS print("HEAS can not be imported: ", e) -try: +try: # HarmonyFireworkOptimizer from nevergrad.optimization.lama.HarmonyFireworkOptimizer import HarmonyFireworkOptimizer lama_register["HarmonyFireworkOptimizer"] = HarmonyFireworkOptimizer - res = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer").set_name("LLAMAHarmonyFireworkOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHarmonyFireworkOptimizer = NonObjectOptimizer(method="LLAMAHarmonyFireworkOptimizer").set_name( + "LLAMAHarmonyFireworkOptimizer", register=True + ) +except Exception as e: # HarmonyFireworkOptimizer print("HarmonyFireworkOptimizer can not be imported: ", e) -try: +try: # HarmonyTabuOptimization from nevergrad.optimization.lama.HarmonyTabuOptimization import HarmonyTabuOptimization lama_register["HarmonyTabuOptimization"] = HarmonyTabuOptimization - res = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization").set_name("LLAMAHarmonyTabuOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHarmonyTabuOptimization = NonObjectOptimizer(method="LLAMAHarmonyTabuOptimization").set_name( + "LLAMAHarmonyTabuOptimization", register=True + ) +except Exception as e: # HarmonyTabuOptimization print("HarmonyTabuOptimization can not be imported: ", e) -try: +try: # HierarchicalAdaptiveAnnealing from nevergrad.optimization.lama.HierarchicalAdaptiveAnnealing import HierarchicalAdaptiveAnnealing lama_register["HierarchicalAdaptiveAnnealing"] = HierarchicalAdaptiveAnnealing - res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHierarchicalAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveAnnealing").set_name("LLAMAHierarchicalAdaptiveAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHierarchicalAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAHierarchicalAdaptiveAnnealing" + ).set_name("LLAMAHierarchicalAdaptiveAnnealing", register=True) +except Exception as e: # HierarchicalAdaptiveAnnealing print("HierarchicalAdaptiveAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.HierarchicalAdaptiveCovarianceMatrixAdaptation import HierarchicalAdaptiveCovarianceMatrixAdaptation - - lama_register["HierarchicalAdaptiveCovarianceMatrixAdaptation"] = HierarchicalAdaptiveCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation").set_name("LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation", register=True) -except Exception as e: +try: # HierarchicalAdaptiveCovarianceMatrixAdaptation + from nevergrad.optimization.lama.HierarchicalAdaptiveCovarianceMatrixAdaptation import ( + HierarchicalAdaptiveCovarianceMatrixAdaptation, + ) + + lama_register["HierarchicalAdaptiveCovarianceMatrixAdaptation"] = ( + HierarchicalAdaptiveCovarianceMatrixAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMAHierarchicalAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: # HierarchicalAdaptiveCovarianceMatrixAdaptation print("HierarchicalAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) -try: +try: # HierarchicalAdaptiveSearch from nevergrad.optimization.lama.HierarchicalAdaptiveSearch import HierarchicalAdaptiveSearch lama_register["HierarchicalAdaptiveSearch"] = HierarchicalAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHierarchicalAdaptiveSearch = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch").set_name("LLAMAHierarchicalAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHierarchicalAdaptiveSearch = NonObjectOptimizer(method="LLAMAHierarchicalAdaptiveSearch").set_name( + "LLAMAHierarchicalAdaptiveSearch", register=True + ) +except Exception as e: # HierarchicalAdaptiveSearch print("HierarchicalAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HierarchicalDiversityEnhancedCovarianceMatrixAdaptation import HierarchicalDiversityEnhancedCovarianceMatrixAdaptation - - lama_register["HierarchicalDiversityEnhancedCovarianceMatrixAdaptation"] = HierarchicalDiversityEnhancedCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation").set_name("LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation", register=True) -except Exception as e: +try: # HierarchicalDiversityEnhancedCovarianceMatrixAdaptation + from nevergrad.optimization.lama.HierarchicalDiversityEnhancedCovarianceMatrixAdaptation import ( + HierarchicalDiversityEnhancedCovarianceMatrixAdaptation, + ) + + lama_register["HierarchicalDiversityEnhancedCovarianceMatrixAdaptation"] = ( + HierarchicalDiversityEnhancedCovarianceMatrixAdaptation + ) + # res = NonObjectOptimizer(method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation" + ).set_name("LLAMAHierarchicalDiversityEnhancedCovarianceMatrixAdaptation", register=True) +except Exception as e: # HierarchicalDiversityEnhancedCovarianceMatrixAdaptation print("HierarchicalDiversityEnhancedCovarianceMatrixAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.HighPerformanceAdaptiveDifferentialSearch import HighPerformanceAdaptiveDifferentialSearch +try: # HighPerformanceAdaptiveDifferentialSearch + from nevergrad.optimization.lama.HighPerformanceAdaptiveDifferentialSearch import ( + HighPerformanceAdaptiveDifferentialSearch, + ) lama_register["HighPerformanceAdaptiveDifferentialSearch"] = HighPerformanceAdaptiveDifferentialSearch - res = NonObjectOptimizer(method="LLAMAHighPerformanceAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHighPerformanceAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAHighPerformanceAdaptiveDifferentialSearch").set_name("LLAMAHighPerformanceAdaptiveDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHighPerformanceAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHighPerformanceAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAHighPerformanceAdaptiveDifferentialSearch" + ).set_name("LLAMAHighPerformanceAdaptiveDifferentialSearch", register=True) +except Exception as e: # HighPerformanceAdaptiveDifferentialSearch print("HighPerformanceAdaptiveDifferentialSearch can not be imported: ", e) -try: +try: # HyGDAE from nevergrad.optimization.lama.HyGDAE import HyGDAE lama_register["HyGDAE"] = HyGDAE - res = NonObjectOptimizer(method="LLAMAHyGDAE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAHyGDAE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAHyGDAE = NonObjectOptimizer(method="LLAMAHyGDAE").set_name("LLAMAHyGDAE", register=True) -except Exception as e: +except Exception as e: # HyGDAE print("HyGDAE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveCovarianceMatrixDifferentialEvolution import HybridAdaptiveCovarianceMatrixDifferentialEvolution - - lama_register["HybridAdaptiveCovarianceMatrixDifferentialEvolution"] = HybridAdaptiveCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # HybridAdaptiveCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveCovarianceMatrixDifferentialEvolution import ( + HybridAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["HybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + HybridAdaptiveCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveCovarianceMatrixDifferentialEvolution print("HybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveCrossoverElitistStrategyV10 import HybridAdaptiveCrossoverElitistStrategyV10 +try: # HybridAdaptiveCrossoverElitistStrategyV10 + from nevergrad.optimization.lama.HybridAdaptiveCrossoverElitistStrategyV10 import ( + HybridAdaptiveCrossoverElitistStrategyV10, + ) lama_register["HybridAdaptiveCrossoverElitistStrategyV10"] = HybridAdaptiveCrossoverElitistStrategyV10 - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveCrossoverElitistStrategyV10 = NonObjectOptimizer(method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10").set_name("LLAMAHybridAdaptiveCrossoverElitistStrategyV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveCrossoverElitistStrategyV10 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveCrossoverElitistStrategyV10" + ).set_name("LLAMAHybridAdaptiveCrossoverElitistStrategyV10", register=True) +except Exception as e: # HybridAdaptiveCrossoverElitistStrategyV10 print("HybridAdaptiveCrossoverElitistStrategyV10 can not be imported: ", e) -try: +try: # HybridAdaptiveDE from nevergrad.optimization.lama.HybridAdaptiveDE import HybridAdaptiveDE lama_register["HybridAdaptiveDE"] = HybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE").set_name("LLAMAHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveDE").set_name( + "LLAMAHybridAdaptiveDE", register=True + ) +except Exception as e: # HybridAdaptiveDE print("HybridAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolution import HybridAdaptiveDifferentialEvolution +try: # HybridAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolution import ( + HybridAdaptiveDifferentialEvolution, + ) lama_register["HybridAdaptiveDifferentialEvolution"] = HybridAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolution").set_name("LLAMAHybridAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveDifferentialEvolution print("HybridAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning import HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning - - lama_register["HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning"] = HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning").set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning", register=True) -except Exception as e: +try: # HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning import ( + HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning, + ) + + lama_register["HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning"] = ( + HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning", register=True) +except Exception as e: # HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning print("HybridAdaptiveDifferentialEvolutionWithDynamicParameterTuning can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch import HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch - - lama_register["HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch"] = HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch").set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch", register=True) -except Exception as e: +try: # HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch + from nevergrad.optimization.lama.HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch import ( + HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch, + ) + + lama_register["HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch"] = ( + HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch" + ).set_name("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch", register=True) +except Exception as e: # HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch print("HybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDifferentialQuantumSearch import HybridAdaptiveDifferentialQuantumSearch +try: # HybridAdaptiveDifferentialQuantumSearch + from nevergrad.optimization.lama.HybridAdaptiveDifferentialQuantumSearch import ( + HybridAdaptiveDifferentialQuantumSearch, + ) lama_register["HybridAdaptiveDifferentialQuantumSearch"] = HybridAdaptiveDifferentialQuantumSearch - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDifferentialQuantumSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialQuantumSearch").set_name("LLAMAHybridAdaptiveDifferentialQuantumSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialQuantumSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDifferentialQuantumSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialQuantumSearch" + ).set_name("LLAMAHybridAdaptiveDifferentialQuantumSearch", register=True) +except Exception as e: # HybridAdaptiveDifferentialQuantumSearch print("HybridAdaptiveDifferentialQuantumSearch can not be imported: ", e) -try: +try: # HybridAdaptiveDifferentialSwarm from nevergrad.optimization.lama.HybridAdaptiveDifferentialSwarm import HybridAdaptiveDifferentialSwarm lama_register["HybridAdaptiveDifferentialSwarm"] = HybridAdaptiveDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialSwarm").set_name("LLAMAHybridAdaptiveDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDifferentialSwarm" + ).set_name("LLAMAHybridAdaptiveDifferentialSwarm", register=True) +except Exception as e: # HybridAdaptiveDifferentialSwarm print("HybridAdaptiveDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDiversityMaintainingGradientEvolution import HybridAdaptiveDiversityMaintainingGradientEvolution - - lama_register["HybridAdaptiveDiversityMaintainingGradientEvolution"] = HybridAdaptiveDiversityMaintainingGradientEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution").set_name("LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution", register=True) -except Exception as e: +try: # HybridAdaptiveDiversityMaintainingGradientEvolution + from nevergrad.optimization.lama.HybridAdaptiveDiversityMaintainingGradientEvolution import ( + HybridAdaptiveDiversityMaintainingGradientEvolution, + ) + + lama_register["HybridAdaptiveDiversityMaintainingGradientEvolution"] = ( + HybridAdaptiveDiversityMaintainingGradientEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution" + ).set_name("LLAMAHybridAdaptiveDiversityMaintainingGradientEvolution", register=True) +except Exception as e: # HybridAdaptiveDiversityMaintainingGradientEvolution print("HybridAdaptiveDiversityMaintainingGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveDualPhaseStrategyV6 import HybridAdaptiveDualPhaseStrategyV6 +try: # HybridAdaptiveDualPhaseStrategyV6 + from nevergrad.optimization.lama.HybridAdaptiveDualPhaseStrategyV6 import ( + HybridAdaptiveDualPhaseStrategyV6, + ) lama_register["HybridAdaptiveDualPhaseStrategyV6"] = HybridAdaptiveDualPhaseStrategyV6 - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDualPhaseStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveDualPhaseStrategyV6 = NonObjectOptimizer(method="LLAMAHybridAdaptiveDualPhaseStrategyV6").set_name("LLAMAHybridAdaptiveDualPhaseStrategyV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveDualPhaseStrategyV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveDualPhaseStrategyV6 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveDualPhaseStrategyV6" + ).set_name("LLAMAHybridAdaptiveDualPhaseStrategyV6", register=True) +except Exception as e: # HybridAdaptiveDualPhaseStrategyV6 print("HybridAdaptiveDualPhaseStrategyV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveEvolutionaryOptimizer import HybridAdaptiveEvolutionaryOptimizer +try: # HybridAdaptiveEvolutionaryOptimizer + from nevergrad.optimization.lama.HybridAdaptiveEvolutionaryOptimizer import ( + HybridAdaptiveEvolutionaryOptimizer, + ) lama_register["HybridAdaptiveEvolutionaryOptimizer"] = HybridAdaptiveEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveEvolutionaryOptimizer").set_name("LLAMAHybridAdaptiveEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAHybridAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: # HybridAdaptiveEvolutionaryOptimizer print("HybridAdaptiveEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveExplorationOptimizer import HybridAdaptiveExplorationOptimizer +try: # HybridAdaptiveExplorationOptimizer + from nevergrad.optimization.lama.HybridAdaptiveExplorationOptimizer import ( + HybridAdaptiveExplorationOptimizer, + ) lama_register["HybridAdaptiveExplorationOptimizer"] = HybridAdaptiveExplorationOptimizer - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveExplorationOptimizer").set_name("LLAMAHybridAdaptiveExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveExplorationOptimizer" + ).set_name("LLAMAHybridAdaptiveExplorationOptimizer", register=True) +except Exception as e: # HybridAdaptiveExplorationOptimizer print("HybridAdaptiveExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizer import HybridAdaptiveGeneticSwarmOptimizer +try: # HybridAdaptiveGeneticSwarmOptimizer + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizer import ( + HybridAdaptiveGeneticSwarmOptimizer, + ) lama_register["HybridAdaptiveGeneticSwarmOptimizer"] = HybridAdaptiveGeneticSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: # HybridAdaptiveGeneticSwarmOptimizer print("HybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizerV2 import HybridAdaptiveGeneticSwarmOptimizerV2 +try: # HybridAdaptiveGeneticSwarmOptimizerV2 + from nevergrad.optimization.lama.HybridAdaptiveGeneticSwarmOptimizerV2 import ( + HybridAdaptiveGeneticSwarmOptimizerV2, + ) lama_register["HybridAdaptiveGeneticSwarmOptimizerV2"] = HybridAdaptiveGeneticSwarmOptimizerV2 - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveGeneticSwarmOptimizerV2 = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2").set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveGeneticSwarmOptimizerV2 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveGeneticSwarmOptimizerV2" + ).set_name("LLAMAHybridAdaptiveGeneticSwarmOptimizerV2", register=True) +except Exception as e: # HybridAdaptiveGeneticSwarmOptimizerV2 print("HybridAdaptiveGeneticSwarmOptimizerV2 can not be imported: ", e) -try: +try: # HybridAdaptiveGradientPSO from nevergrad.optimization.lama.HybridAdaptiveGradientPSO import HybridAdaptiveGradientPSO lama_register["HybridAdaptiveGradientPSO"] = HybridAdaptiveGradientPSO - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO").set_name("LLAMAHybridAdaptiveGradientPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveGradientPSO").set_name( + "LLAMAHybridAdaptiveGradientPSO", register=True + ) +except Exception as e: # HybridAdaptiveGradientPSO print("HybridAdaptiveGradientPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveHarmonicFireworksTabuSearch import HybridAdaptiveHarmonicFireworksTabuSearch +try: # HybridAdaptiveHarmonicFireworksTabuSearch + from nevergrad.optimization.lama.HybridAdaptiveHarmonicFireworksTabuSearch import ( + HybridAdaptiveHarmonicFireworksTabuSearch, + ) lama_register["HybridAdaptiveHarmonicFireworksTabuSearch"] = HybridAdaptiveHarmonicFireworksTabuSearch - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: # HybridAdaptiveHarmonicFireworksTabuSearch print("HybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) -try: +try: # HybridAdaptiveMemeticAlgorithm from nevergrad.optimization.lama.HybridAdaptiveMemeticAlgorithm import HybridAdaptiveMemeticAlgorithm lama_register["HybridAdaptiveMemeticAlgorithm"] = HybridAdaptiveMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticAlgorithm").set_name("LLAMAHybridAdaptiveMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticAlgorithm" + ).set_name("LLAMAHybridAdaptiveMemeticAlgorithm", register=True) +except Exception as e: # HybridAdaptiveMemeticAlgorithm print("HybridAdaptiveMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism import HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism - - lama_register["HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism"] = HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism").set_name("LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism", register=True) -except Exception as e: +try: # HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism + from nevergrad.optimization.lama.HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism import ( + HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism, + ) + + lama_register["HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism"] = ( + HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism" + ).set_name("LLAMAHybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism", register=True) +except Exception as e: # HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism print("HybridAdaptiveMemeticDifferentialEvolutionWithDynamicElitism can not be imported: ", e) -try: +try: # HybridAdaptiveMemeticOptimizerV4 from nevergrad.optimization.lama.HybridAdaptiveMemeticOptimizerV4 import HybridAdaptiveMemeticOptimizerV4 lama_register["HybridAdaptiveMemeticOptimizerV4"] = HybridAdaptiveMemeticOptimizerV4 - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMemeticOptimizerV4 = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticOptimizerV4").set_name("LLAMAHybridAdaptiveMemeticOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemeticOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMemeticOptimizerV4 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemeticOptimizerV4" + ).set_name("LLAMAHybridAdaptiveMemeticOptimizerV4", register=True) +except Exception as e: # HybridAdaptiveMemeticOptimizerV4 print("HybridAdaptiveMemeticOptimizerV4 can not be imported: ", e) -try: +try: # HybridAdaptiveMemoryAnnealing from nevergrad.optimization.lama.HybridAdaptiveMemoryAnnealing import HybridAdaptiveMemoryAnnealing lama_register["HybridAdaptiveMemoryAnnealing"] = HybridAdaptiveMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMemoryAnnealing = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemoryAnnealing").set_name("LLAMAHybridAdaptiveMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMemoryAnnealing = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMemoryAnnealing" + ).set_name("LLAMAHybridAdaptiveMemoryAnnealing", register=True) +except Exception as e: # HybridAdaptiveMemoryAnnealing print("HybridAdaptiveMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolution import HybridAdaptiveMultiPhaseEvolution +try: # HybridAdaptiveMultiPhaseEvolution + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolution import ( + HybridAdaptiveMultiPhaseEvolution, + ) lama_register["HybridAdaptiveMultiPhaseEvolution"] = HybridAdaptiveMultiPhaseEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolution").set_name("LLAMAHybridAdaptiveMultiPhaseEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMultiPhaseEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMultiPhaseEvolution" + ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolution", register=True) +except Exception as e: # HybridAdaptiveMultiPhaseEvolution print("HybridAdaptiveMultiPhaseEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolutionV2 import HybridAdaptiveMultiPhaseEvolutionV2 +try: # HybridAdaptiveMultiPhaseEvolutionV2 + from nevergrad.optimization.lama.HybridAdaptiveMultiPhaseEvolutionV2 import ( + HybridAdaptiveMultiPhaseEvolutionV2, + ) lama_register["HybridAdaptiveMultiPhaseEvolutionV2"] = HybridAdaptiveMultiPhaseEvolutionV2 - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveMultiPhaseEvolutionV2 = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2").set_name("LLAMAHybridAdaptiveMultiPhaseEvolutionV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveMultiPhaseEvolutionV2 = NonObjectOptimizer( + method="LLAMAHybridAdaptiveMultiPhaseEvolutionV2" + ).set_name("LLAMAHybridAdaptiveMultiPhaseEvolutionV2", register=True) +except Exception as e: # HybridAdaptiveMultiPhaseEvolutionV2 print("HybridAdaptiveMultiPhaseEvolutionV2 can not be imported: ", e) -try: +try: # HybridAdaptiveNesterovSynergy from nevergrad.optimization.lama.HybridAdaptiveNesterovSynergy import HybridAdaptiveNesterovSynergy lama_register["HybridAdaptiveNesterovSynergy"] = HybridAdaptiveNesterovSynergy - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveNesterovSynergy = NonObjectOptimizer(method="LLAMAHybridAdaptiveNesterovSynergy").set_name("LLAMAHybridAdaptiveNesterovSynergy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveNesterovSynergy = NonObjectOptimizer( + method="LLAMAHybridAdaptiveNesterovSynergy" + ).set_name("LLAMAHybridAdaptiveNesterovSynergy", register=True) +except Exception as e: # HybridAdaptiveNesterovSynergy print("HybridAdaptiveNesterovSynergy can not be imported: ", e) -try: +try: # HybridAdaptiveOptimization from nevergrad.optimization.lama.HybridAdaptiveOptimization import HybridAdaptiveOptimization lama_register["HybridAdaptiveOptimization"] = HybridAdaptiveOptimization - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization").set_name("LLAMAHybridAdaptiveOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveOptimization").set_name( + "LLAMAHybridAdaptiveOptimization", register=True + ) +except Exception as e: # HybridAdaptiveOptimization print("HybridAdaptiveOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveOrthogonalDifferentialEvolution import HybridAdaptiveOrthogonalDifferentialEvolution - - lama_register["HybridAdaptiveOrthogonalDifferentialEvolution"] = HybridAdaptiveOrthogonalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution").set_name("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution", register=True) -except Exception as e: +try: # HybridAdaptiveOrthogonalDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveOrthogonalDifferentialEvolution import ( + HybridAdaptiveOrthogonalDifferentialEvolution, + ) + + lama_register["HybridAdaptiveOrthogonalDifferentialEvolution"] = ( + HybridAdaptiveOrthogonalDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveOrthogonalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveOrthogonalDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveOrthogonalDifferentialEvolution print("HybridAdaptiveOrthogonalDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveParallelDifferentialEvolution import HybridAdaptiveParallelDifferentialEvolution +try: # HybridAdaptiveParallelDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveParallelDifferentialEvolution import ( + HybridAdaptiveParallelDifferentialEvolution, + ) lama_register["HybridAdaptiveParallelDifferentialEvolution"] = HybridAdaptiveParallelDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveParallelDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveParallelDifferentialEvolution").set_name("LLAMAHybridAdaptiveParallelDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParallelDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveParallelDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveParallelDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveParallelDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveParallelDifferentialEvolution print("HybridAdaptiveParallelDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveParameterTuningOptimization import HybridAdaptiveParameterTuningOptimization +try: # HybridAdaptiveParameterTuningOptimization + from nevergrad.optimization.lama.HybridAdaptiveParameterTuningOptimization import ( + HybridAdaptiveParameterTuningOptimization, + ) lama_register["HybridAdaptiveParameterTuningOptimization"] = HybridAdaptiveParameterTuningOptimization - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParameterTuningOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveParameterTuningOptimization = NonObjectOptimizer(method="LLAMAHybridAdaptiveParameterTuningOptimization").set_name("LLAMAHybridAdaptiveParameterTuningOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveParameterTuningOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveParameterTuningOptimization = NonObjectOptimizer( + method="LLAMAHybridAdaptiveParameterTuningOptimization" + ).set_name("LLAMAHybridAdaptiveParameterTuningOptimization", register=True) +except Exception as e: # HybridAdaptiveParameterTuningOptimization print("HybridAdaptiveParameterTuningOptimization can not be imported: ", e) -try: +try: # HybridAdaptivePopulationDE from nevergrad.optimization.lama.HybridAdaptivePopulationDE import HybridAdaptivePopulationDE lama_register["HybridAdaptivePopulationDE"] = HybridAdaptivePopulationDE - res = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptivePopulationDE = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE").set_name("LLAMAHybridAdaptivePopulationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptivePopulationDE = NonObjectOptimizer(method="LLAMAHybridAdaptivePopulationDE").set_name( + "LLAMAHybridAdaptivePopulationDE", register=True + ) +except Exception as e: # HybridAdaptivePopulationDE print("HybridAdaptivePopulationDE can not be imported: ", e) -try: +try: # HybridAdaptiveQuantumLevySearch from nevergrad.optimization.lama.HybridAdaptiveQuantumLevySearch import HybridAdaptiveQuantumLevySearch lama_register["HybridAdaptiveQuantumLevySearch"] = HybridAdaptiveQuantumLevySearch - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumLevySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveQuantumLevySearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumLevySearch").set_name("LLAMAHybridAdaptiveQuantumLevySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumLevySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveQuantumLevySearch = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumLevySearch" + ).set_name("LLAMAHybridAdaptiveQuantumLevySearch", register=True) +except Exception as e: # HybridAdaptiveQuantumLevySearch print("HybridAdaptiveQuantumLevySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticDifferentialEvolution import HybridAdaptiveQuantumMemeticDifferentialEvolution - - lama_register["HybridAdaptiveQuantumMemeticDifferentialEvolution"] = HybridAdaptiveQuantumMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution").set_name("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # HybridAdaptiveQuantumMemeticDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticDifferentialEvolution import ( + HybridAdaptiveQuantumMemeticDifferentialEvolution, + ) + + lama_register["HybridAdaptiveQuantumMemeticDifferentialEvolution"] = ( + HybridAdaptiveQuantumMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveQuantumMemeticDifferentialEvolution print("HybridAdaptiveQuantumMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticOptimizer import HybridAdaptiveQuantumMemeticOptimizer +try: # HybridAdaptiveQuantumMemeticOptimizer + from nevergrad.optimization.lama.HybridAdaptiveQuantumMemeticOptimizer import ( + HybridAdaptiveQuantumMemeticOptimizer, + ) lama_register["HybridAdaptiveQuantumMemeticOptimizer"] = HybridAdaptiveQuantumMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticOptimizer").set_name("LLAMAHybridAdaptiveQuantumMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveQuantumMemeticOptimizer = NonObjectOptimizer( + method="LLAMAHybridAdaptiveQuantumMemeticOptimizer" + ).set_name("LLAMAHybridAdaptiveQuantumMemeticOptimizer", register=True) +except Exception as e: # HybridAdaptiveQuantumMemeticOptimizer print("HybridAdaptiveQuantumMemeticOptimizer can not be imported: ", e) -try: +try: # HybridAdaptiveQuantumPSO from nevergrad.optimization.lama.HybridAdaptiveQuantumPSO import HybridAdaptiveQuantumPSO lama_register["HybridAdaptiveQuantumPSO"] = HybridAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO").set_name("LLAMAHybridAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAHybridAdaptiveQuantumPSO").set_name( + "LLAMAHybridAdaptiveQuantumPSO", register=True + ) +except Exception as e: # HybridAdaptiveQuantumPSO print("HybridAdaptiveQuantumPSO can not be imported: ", e) -try: +try: # HybridAdaptiveSearch from nevergrad.optimization.lama.HybridAdaptiveSearch import HybridAdaptiveSearch lama_register["HybridAdaptiveSearch"] = HybridAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch").set_name("LLAMAHybridAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveSearch = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearch").set_name( + "LLAMAHybridAdaptiveSearch", register=True + ) +except Exception as e: # HybridAdaptiveSearch print("HybridAdaptiveSearch can not be imported: ", e) -try: +try: # HybridAdaptiveSearchStrategy from nevergrad.optimization.lama.HybridAdaptiveSearchStrategy import HybridAdaptiveSearchStrategy lama_register["HybridAdaptiveSearchStrategy"] = HybridAdaptiveSearchStrategy - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearchStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveSearchStrategy = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearchStrategy").set_name("LLAMAHybridAdaptiveSearchStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSearchStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveSearchStrategy = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSearchStrategy" + ).set_name("LLAMAHybridAdaptiveSearchStrategy", register=True) +except Exception as e: # HybridAdaptiveSearchStrategy print("HybridAdaptiveSearchStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveSelfAdaptiveDifferentialEvolution import HybridAdaptiveSelfAdaptiveDifferentialEvolution - - lama_register["HybridAdaptiveSelfAdaptiveDifferentialEvolution"] = HybridAdaptiveSelfAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution").set_name("LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # HybridAdaptiveSelfAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.HybridAdaptiveSelfAdaptiveDifferentialEvolution import ( + HybridAdaptiveSelfAdaptiveDifferentialEvolution, + ) + + lama_register["HybridAdaptiveSelfAdaptiveDifferentialEvolution"] = ( + HybridAdaptiveSelfAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridAdaptiveSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: # HybridAdaptiveSelfAdaptiveDifferentialEvolution print("HybridAdaptiveSelfAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridAdaptiveSimulatedAnnealingDE import HybridAdaptiveSimulatedAnnealingDE +try: # HybridAdaptiveSimulatedAnnealingDE + from nevergrad.optimization.lama.HybridAdaptiveSimulatedAnnealingDE import ( + HybridAdaptiveSimulatedAnnealingDE, + ) lama_register["HybridAdaptiveSimulatedAnnealingDE"] = HybridAdaptiveSimulatedAnnealingDE - res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridAdaptiveSimulatedAnnealingDE = NonObjectOptimizer(method="LLAMAHybridAdaptiveSimulatedAnnealingDE").set_name("LLAMAHybridAdaptiveSimulatedAnnealingDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( + method="LLAMAHybridAdaptiveSimulatedAnnealingDE" + ).set_name("LLAMAHybridAdaptiveSimulatedAnnealingDE", register=True) +except Exception as e: # HybridAdaptiveSimulatedAnnealingDE print("HybridAdaptiveSimulatedAnnealingDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridCosineSineDualPhaseStrategyV10 import HybridCosineSineDualPhaseStrategyV10 +try: # HybridCosineSineDualPhaseStrategyV10 + from nevergrad.optimization.lama.HybridCosineSineDualPhaseStrategyV10 import ( + HybridCosineSineDualPhaseStrategyV10, + ) lama_register["HybridCosineSineDualPhaseStrategyV10"] = HybridCosineSineDualPhaseStrategyV10 - res = NonObjectOptimizer(method="LLAMAHybridCosineSineDualPhaseStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridCosineSineDualPhaseStrategyV10 = NonObjectOptimizer(method="LLAMAHybridCosineSineDualPhaseStrategyV10").set_name("LLAMAHybridCosineSineDualPhaseStrategyV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridCosineSineDualPhaseStrategyV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridCosineSineDualPhaseStrategyV10 = NonObjectOptimizer( + method="LLAMAHybridCosineSineDualPhaseStrategyV10" + ).set_name("LLAMAHybridCosineSineDualPhaseStrategyV10", register=True) +except Exception as e: # HybridCosineSineDualPhaseStrategyV10 print("HybridCosineSineDualPhaseStrategyV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptionDifferentialEvolution import HybridCovarianceMatrixAdaptionDifferentialEvolution - - lama_register["HybridCovarianceMatrixAdaptionDifferentialEvolution"] = HybridCovarianceMatrixAdaptionDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution").set_name("LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution", register=True) -except Exception as e: +try: # HybridCovarianceMatrixAdaptionDifferentialEvolution + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptionDifferentialEvolution import ( + HybridCovarianceMatrixAdaptionDifferentialEvolution, + ) + + lama_register["HybridCovarianceMatrixAdaptionDifferentialEvolution"] = ( + HybridCovarianceMatrixAdaptionDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution" + ).set_name("LLAMAHybridCovarianceMatrixAdaptionDifferentialEvolution", register=True) +except Exception as e: # HybridCovarianceMatrixAdaptionDifferentialEvolution print("HybridCovarianceMatrixAdaptionDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 import HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 - - lama_register["HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2"] = HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2").set_name("LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2", register=True) -except Exception as e: +try: # HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 + from nevergrad.optimization.lama.HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 import ( + HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2, + ) + + lama_register["HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2"] = ( + HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2" + ).set_name("LLAMAHybridCovarianceMatrixAdaptiveDifferentialEvolutionV2", register=True) +except Exception as e: # HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 print("HybridCovarianceMatrixAdaptiveDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights import HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights - - lama_register["HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights"] = HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights - res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights").set_name("LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights", register=True) -except Exception as e: +try: # HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights + from nevergrad.optimization.lama.HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights import ( + HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights, + ) + + lama_register["HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights"] = ( + HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights + ) + # res = NonObjectOptimizer(method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights = NonObjectOptimizer( + method="LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights" + ).set_name("LLAMAHybridCovarianceMatrixDifferentialEvolutionWithLevyFlights", register=True) +except Exception as e: # HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights print("HybridCovarianceMatrixDifferentialEvolutionWithLevyFlights can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridCulturalDifferentialEvolution import HybridCulturalDifferentialEvolution +try: # HybridCulturalDifferentialEvolution + from nevergrad.optimization.lama.HybridCulturalDifferentialEvolution import ( + HybridCulturalDifferentialEvolution, + ) lama_register["HybridCulturalDifferentialEvolution"] = HybridCulturalDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridCulturalDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridCulturalDifferentialEvolution").set_name("LLAMAHybridCulturalDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridCulturalDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridCulturalDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridCulturalDifferentialEvolution" + ).set_name("LLAMAHybridCulturalDifferentialEvolution", register=True) +except Exception as e: # HybridCulturalDifferentialEvolution print("HybridCulturalDifferentialEvolution can not be imported: ", e) -try: +try: # HybridDEPSO from nevergrad.optimization.lama.HybridDEPSO import HybridDEPSO lama_register["HybridDEPSO"] = HybridDEPSO - res = NonObjectOptimizer(method="LLAMAHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDEPSO = NonObjectOptimizer(method="LLAMAHybridDEPSO").set_name("LLAMAHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDEPSO = NonObjectOptimizer(method="LLAMAHybridDEPSO").set_name( + "LLAMAHybridDEPSO", register=True + ) +except Exception as e: # HybridDEPSO print("HybridDEPSO can not be imported: ", e) -try: +try: # HybridDEPSOWithDynamicAdaptation from nevergrad.optimization.lama.HybridDEPSOWithDynamicAdaptation import HybridDEPSOWithDynamicAdaptation lama_register["HybridDEPSOWithDynamicAdaptation"] = HybridDEPSOWithDynamicAdaptation - res = NonObjectOptimizer(method="LLAMAHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer(method="LLAMAHybridDEPSOWithDynamicAdaptation").set_name("LLAMAHybridDEPSOWithDynamicAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDEPSOWithDynamicAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDEPSOWithDynamicAdaptation = NonObjectOptimizer( + method="LLAMAHybridDEPSOWithDynamicAdaptation" + ).set_name("LLAMAHybridDEPSOWithDynamicAdaptation", register=True) +except Exception as e: # HybridDEPSOWithDynamicAdaptation print("HybridDEPSOWithDynamicAdaptation can not be imported: ", e) -try: +try: # HybridDifferentialEvolution from nevergrad.optimization.lama.HybridDifferentialEvolution import HybridDifferentialEvolution lama_register["HybridDifferentialEvolution"] = HybridDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution").set_name("LLAMAHybridDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolution").set_name( + "LLAMAHybridDifferentialEvolution", register=True + ) +except Exception as e: # HybridDifferentialEvolution print("HybridDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionMemeticOptimizer import HybridDifferentialEvolutionMemeticOptimizer +try: # HybridDifferentialEvolutionMemeticOptimizer + from nevergrad.optimization.lama.HybridDifferentialEvolutionMemeticOptimizer import ( + HybridDifferentialEvolutionMemeticOptimizer, + ) lama_register["HybridDifferentialEvolutionMemeticOptimizer"] = HybridDifferentialEvolutionMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionMemeticOptimizer").set_name("LLAMAHybridDifferentialEvolutionMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDifferentialEvolutionMemeticOptimizer = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionMemeticOptimizer" + ).set_name("LLAMAHybridDifferentialEvolutionMemeticOptimizer", register=True) +except Exception as e: # HybridDifferentialEvolutionMemeticOptimizer print("HybridDifferentialEvolutionMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionParticleSwarmOptimizer import HybridDifferentialEvolutionParticleSwarmOptimizer - - lama_register["HybridDifferentialEvolutionParticleSwarmOptimizer"] = HybridDifferentialEvolutionParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer").set_name("LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer", register=True) -except Exception as e: +try: # HybridDifferentialEvolutionParticleSwarmOptimizer + from nevergrad.optimization.lama.HybridDifferentialEvolutionParticleSwarmOptimizer import ( + HybridDifferentialEvolutionParticleSwarmOptimizer, + ) + + lama_register["HybridDifferentialEvolutionParticleSwarmOptimizer"] = ( + HybridDifferentialEvolutionParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer" + ).set_name("LLAMAHybridDifferentialEvolutionParticleSwarmOptimizer", register=True) +except Exception as e: # HybridDifferentialEvolutionParticleSwarmOptimizer print("HybridDifferentialEvolutionParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDifferentialEvolutionWithLocalSearch import HybridDifferentialEvolutionWithLocalSearch +try: # HybridDifferentialEvolutionWithLocalSearch + from nevergrad.optimization.lama.HybridDifferentialEvolutionWithLocalSearch import ( + HybridDifferentialEvolutionWithLocalSearch, + ) lama_register["HybridDifferentialEvolutionWithLocalSearch"] = HybridDifferentialEvolutionWithLocalSearch - res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionWithLocalSearch").set_name("LLAMAHybridDifferentialEvolutionWithLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAHybridDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAHybridDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: # HybridDifferentialEvolutionWithLocalSearch print("HybridDifferentialEvolutionWithLocalSearch can not be imported: ", e) -try: +try: # HybridDifferentialLocalSearch from nevergrad.optimization.lama.HybridDifferentialLocalSearch import HybridDifferentialLocalSearch lama_register["HybridDifferentialLocalSearch"] = HybridDifferentialLocalSearch - res = NonObjectOptimizer(method="LLAMAHybridDifferentialLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDifferentialLocalSearch = NonObjectOptimizer(method="LLAMAHybridDifferentialLocalSearch").set_name("LLAMAHybridDifferentialLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDifferentialLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDifferentialLocalSearch = NonObjectOptimizer( + method="LLAMAHybridDifferentialLocalSearch" + ).set_name("LLAMAHybridDifferentialLocalSearch", register=True) +except Exception as e: # HybridDifferentialLocalSearch print("HybridDifferentialLocalSearch can not be imported: ", e) -try: +try: # HybridDualLocalOptimizationDE from nevergrad.optimization.lama.HybridDualLocalOptimizationDE import HybridDualLocalOptimizationDE lama_register["HybridDualLocalOptimizationDE"] = HybridDualLocalOptimizationDE - res = NonObjectOptimizer(method="LLAMAHybridDualLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDualLocalOptimizationDE = NonObjectOptimizer(method="LLAMAHybridDualLocalOptimizationDE").set_name("LLAMAHybridDualLocalOptimizationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDualLocalOptimizationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDualLocalOptimizationDE = NonObjectOptimizer( + method="LLAMAHybridDualLocalOptimizationDE" + ).set_name("LLAMAHybridDualLocalOptimizationDE", register=True) +except Exception as e: # HybridDualLocalOptimizationDE print("HybridDualLocalOptimizationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDualPhaseParticleSwarmDifferentialEvolution import HybridDualPhaseParticleSwarmDifferentialEvolution - - lama_register["HybridDualPhaseParticleSwarmDifferentialEvolution"] = HybridDualPhaseParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # HybridDualPhaseParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.HybridDualPhaseParticleSwarmDifferentialEvolution import ( + HybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["HybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + HybridDualPhaseParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMAHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # HybridDualPhaseParticleSwarmDifferentialEvolution print("HybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) -try: +try: # HybridDynamicAdaptiveDE from nevergrad.optimization.lama.HybridDynamicAdaptiveDE import HybridDynamicAdaptiveDE lama_register["HybridDynamicAdaptiveDE"] = HybridDynamicAdaptiveDE - res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE").set_name("LLAMAHybridDynamicAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveDE").set_name( + "LLAMAHybridDynamicAdaptiveDE", register=True + ) +except Exception as e: # HybridDynamicAdaptiveDE print("HybridDynamicAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDynamicAdaptiveExplorationOptimization import HybridDynamicAdaptiveExplorationOptimization - - lama_register["HybridDynamicAdaptiveExplorationOptimization"] = HybridDynamicAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveExplorationOptimization").set_name("LLAMAHybridDynamicAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # HybridDynamicAdaptiveExplorationOptimization + from nevergrad.optimization.lama.HybridDynamicAdaptiveExplorationOptimization import ( + HybridDynamicAdaptiveExplorationOptimization, + ) + + lama_register["HybridDynamicAdaptiveExplorationOptimization"] = ( + HybridDynamicAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAHybridDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAHybridDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAHybridDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: # HybridDynamicAdaptiveExplorationOptimization print("HybridDynamicAdaptiveExplorationOptimization can not be imported: ", e) -try: +try: # HybridDynamicClusterOptimization from nevergrad.optimization.lama.HybridDynamicClusterOptimization import HybridDynamicClusterOptimization lama_register["HybridDynamicClusterOptimization"] = HybridDynamicClusterOptimization - res = NonObjectOptimizer(method="LLAMAHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicClusterOptimization = NonObjectOptimizer(method="LLAMAHybridDynamicClusterOptimization").set_name("LLAMAHybridDynamicClusterOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMAHybridDynamicClusterOptimization" + ).set_name("LLAMAHybridDynamicClusterOptimization", register=True) +except Exception as e: # HybridDynamicClusterOptimization print("HybridDynamicClusterOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDynamicCuckooHarmonyAlgorithm import HybridDynamicCuckooHarmonyAlgorithm +try: # HybridDynamicCuckooHarmonyAlgorithm + from nevergrad.optimization.lama.HybridDynamicCuckooHarmonyAlgorithm import ( + HybridDynamicCuckooHarmonyAlgorithm, + ) lama_register["HybridDynamicCuckooHarmonyAlgorithm"] = HybridDynamicCuckooHarmonyAlgorithm - res = NonObjectOptimizer(method="LLAMAHybridDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAHybridDynamicCuckooHarmonyAlgorithm").set_name("LLAMAHybridDynamicCuckooHarmonyAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicCuckooHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicCuckooHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAHybridDynamicCuckooHarmonyAlgorithm" + ).set_name("LLAMAHybridDynamicCuckooHarmonyAlgorithm", register=True) +except Exception as e: # HybridDynamicCuckooHarmonyAlgorithm print("HybridDynamicCuckooHarmonyAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDynamicDifferentialEvolution import HybridDynamicDifferentialEvolution +try: # HybridDynamicDifferentialEvolution + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolution import ( + HybridDynamicDifferentialEvolution, + ) lama_register["HybridDynamicDifferentialEvolution"] = HybridDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolution").set_name("LLAMAHybridDynamicDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridDynamicDifferentialEvolution" + ).set_name("LLAMAHybridDynamicDifferentialEvolution", register=True) +except Exception as e: # HybridDynamicDifferentialEvolution print("HybridDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDynamicDifferentialEvolutionGradient import HybridDynamicDifferentialEvolutionGradient +try: # HybridDynamicDifferentialEvolutionGradient + from nevergrad.optimization.lama.HybridDynamicDifferentialEvolutionGradient import ( + HybridDynamicDifferentialEvolutionGradient, + ) lama_register["HybridDynamicDifferentialEvolutionGradient"] = HybridDynamicDifferentialEvolutionGradient - res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolutionGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicDifferentialEvolutionGradient = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolutionGradient").set_name("LLAMAHybridDynamicDifferentialEvolutionGradient", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicDifferentialEvolutionGradient")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicDifferentialEvolutionGradient = NonObjectOptimizer( + method="LLAMAHybridDynamicDifferentialEvolutionGradient" + ).set_name("LLAMAHybridDynamicDifferentialEvolutionGradient", register=True) +except Exception as e: # HybridDynamicDifferentialEvolutionGradient print("HybridDynamicDifferentialEvolutionGradient can not be imported: ", e) -try: +try: # HybridDynamicElitistDE from nevergrad.optimization.lama.HybridDynamicElitistDE import HybridDynamicElitistDE lama_register["HybridDynamicElitistDE"] = HybridDynamicElitistDE - res = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicElitistDE = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE").set_name("LLAMAHybridDynamicElitistDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicElitistDE = NonObjectOptimizer(method="LLAMAHybridDynamicElitistDE").set_name( + "LLAMAHybridDynamicElitistDE", register=True + ) +except Exception as e: # HybridDynamicElitistDE print("HybridDynamicElitistDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridDynamicQuantumLevyDifferentialSearch import HybridDynamicQuantumLevyDifferentialSearch +try: # HybridDynamicQuantumLevyDifferentialSearch + from nevergrad.optimization.lama.HybridDynamicQuantumLevyDifferentialSearch import ( + HybridDynamicQuantumLevyDifferentialSearch, + ) lama_register["HybridDynamicQuantumLevyDifferentialSearch"] = HybridDynamicQuantumLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMAHybridDynamicQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAHybridDynamicQuantumLevyDifferentialSearch").set_name("LLAMAHybridDynamicQuantumLevyDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAHybridDynamicQuantumLevyDifferentialSearch" + ).set_name("LLAMAHybridDynamicQuantumLevyDifferentialSearch", register=True) +except Exception as e: # HybridDynamicQuantumLevyDifferentialSearch print("HybridDynamicQuantumLevyDifferentialSearch can not be imported: ", e) -try: +try: # HybridDynamicSearch from nevergrad.optimization.lama.HybridDynamicSearch import HybridDynamicSearch lama_register["HybridDynamicSearch"] = HybridDynamicSearch - res = NonObjectOptimizer(method="LLAMAHybridDynamicSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridDynamicSearch = NonObjectOptimizer(method="LLAMAHybridDynamicSearch").set_name("LLAMAHybridDynamicSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridDynamicSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridDynamicSearch = NonObjectOptimizer(method="LLAMAHybridDynamicSearch").set_name( + "LLAMAHybridDynamicSearch", register=True + ) +except Exception as e: # HybridDynamicSearch print("HybridDynamicSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridEnhancedAdaptiveDifferentialEvolution import HybridEnhancedAdaptiveDifferentialEvolution +try: # HybridEnhancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.HybridEnhancedAdaptiveDifferentialEvolution import ( + HybridEnhancedAdaptiveDifferentialEvolution, + ) lama_register["HybridEnhancedAdaptiveDifferentialEvolution"] = HybridEnhancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAHybridEnhancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # HybridEnhancedAdaptiveDifferentialEvolution print("HybridEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridEnhancedDualPhaseAdaptiveOptimizationV6 import HybridEnhancedDualPhaseAdaptiveOptimizationV6 - - lama_register["HybridEnhancedDualPhaseAdaptiveOptimizationV6"] = HybridEnhancedDualPhaseAdaptiveOptimizationV6 - res = NonObjectOptimizer(method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6 = NonObjectOptimizer(method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6").set_name("LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6", register=True) -except Exception as e: +try: # HybridEnhancedDualPhaseAdaptiveOptimizationV6 + from nevergrad.optimization.lama.HybridEnhancedDualPhaseAdaptiveOptimizationV6 import ( + HybridEnhancedDualPhaseAdaptiveOptimizationV6, + ) + + lama_register["HybridEnhancedDualPhaseAdaptiveOptimizationV6"] = ( + HybridEnhancedDualPhaseAdaptiveOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6 = NonObjectOptimizer( + method="LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6" + ).set_name("LLAMAHybridEnhancedDualPhaseAdaptiveOptimizationV6", register=True) +except Exception as e: # HybridEnhancedDualPhaseAdaptiveOptimizationV6 print("HybridEnhancedDualPhaseAdaptiveOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridEnhancedGravitationalSwarmIntelligence import HybridEnhancedGravitationalSwarmIntelligence - - lama_register["HybridEnhancedGravitationalSwarmIntelligence"] = HybridEnhancedGravitationalSwarmIntelligence - res = NonObjectOptimizer(method="LLAMAHybridEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer(method="LLAMAHybridEnhancedGravitationalSwarmIntelligence").set_name("LLAMAHybridEnhancedGravitationalSwarmIntelligence", register=True) -except Exception as e: +try: # HybridEnhancedGravitationalSwarmIntelligence + from nevergrad.optimization.lama.HybridEnhancedGravitationalSwarmIntelligence import ( + HybridEnhancedGravitationalSwarmIntelligence, + ) + + lama_register["HybridEnhancedGravitationalSwarmIntelligence"] = ( + HybridEnhancedGravitationalSwarmIntelligence + ) + # res = NonObjectOptimizer(method="LLAMAHybridEnhancedGravitationalSwarmIntelligence")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEnhancedGravitationalSwarmIntelligence = NonObjectOptimizer( + method="LLAMAHybridEnhancedGravitationalSwarmIntelligence" + ).set_name("LLAMAHybridEnhancedGravitationalSwarmIntelligence", register=True) +except Exception as e: # HybridEnhancedGravitationalSwarmIntelligence print("HybridEnhancedGravitationalSwarmIntelligence can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridEvolutionaryAnnealingOptimizer import HybridEvolutionaryAnnealingOptimizer +try: # HybridEvolutionaryAnnealingOptimizer + from nevergrad.optimization.lama.HybridEvolutionaryAnnealingOptimizer import ( + HybridEvolutionaryAnnealingOptimizer, + ) lama_register["HybridEvolutionaryAnnealingOptimizer"] = HybridEvolutionaryAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAHybridEvolutionaryAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: # HybridEvolutionaryAnnealingOptimizer print("HybridEvolutionaryAnnealingOptimizer can not be imported: ", e) -try: +try: # HybridEvolutionaryOptimization from nevergrad.optimization.lama.HybridEvolutionaryOptimization import HybridEvolutionaryOptimization lama_register["HybridEvolutionaryOptimization"] = HybridEvolutionaryOptimization - res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEvolutionaryOptimization = NonObjectOptimizer(method="LLAMAHybridEvolutionaryOptimization").set_name("LLAMAHybridEvolutionaryOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEvolutionaryOptimization = NonObjectOptimizer( + method="LLAMAHybridEvolutionaryOptimization" + ).set_name("LLAMAHybridEvolutionaryOptimization", register=True) +except Exception as e: # HybridEvolutionaryOptimization print("HybridEvolutionaryOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridEvolvingAdaptiveStrategyV28 import HybridEvolvingAdaptiveStrategyV28 +try: # HybridEvolvingAdaptiveStrategyV28 + from nevergrad.optimization.lama.HybridEvolvingAdaptiveStrategyV28 import ( + HybridEvolvingAdaptiveStrategyV28, + ) lama_register["HybridEvolvingAdaptiveStrategyV28"] = HybridEvolvingAdaptiveStrategyV28 - res = NonObjectOptimizer(method="LLAMAHybridEvolvingAdaptiveStrategyV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridEvolvingAdaptiveStrategyV28 = NonObjectOptimizer(method="LLAMAHybridEvolvingAdaptiveStrategyV28").set_name("LLAMAHybridEvolvingAdaptiveStrategyV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridEvolvingAdaptiveStrategyV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridEvolvingAdaptiveStrategyV28 = NonObjectOptimizer( + method="LLAMAHybridEvolvingAdaptiveStrategyV28" + ).set_name("LLAMAHybridEvolvingAdaptiveStrategyV28", register=True) +except Exception as e: # HybridEvolvingAdaptiveStrategyV28 print("HybridEvolvingAdaptiveStrategyV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridExploitationExplorationGradientSearch import HybridExploitationExplorationGradientSearch +try: # HybridExploitationExplorationGradientSearch + from nevergrad.optimization.lama.HybridExploitationExplorationGradientSearch import ( + HybridExploitationExplorationGradientSearch, + ) lama_register["HybridExploitationExplorationGradientSearch"] = HybridExploitationExplorationGradientSearch - res = NonObjectOptimizer(method="LLAMAHybridExploitationExplorationGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridExploitationExplorationGradientSearch = NonObjectOptimizer(method="LLAMAHybridExploitationExplorationGradientSearch").set_name("LLAMAHybridExploitationExplorationGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridExploitationExplorationGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridExploitationExplorationGradientSearch = NonObjectOptimizer( + method="LLAMAHybridExploitationExplorationGradientSearch" + ).set_name("LLAMAHybridExploitationExplorationGradientSearch", register=True) +except Exception as e: # HybridExploitationExplorationGradientSearch print("HybridExploitationExplorationGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGradientAnnealingWithMemory import HybridGradientAnnealingWithMemory +try: # HybridGradientAnnealingWithMemory + from nevergrad.optimization.lama.HybridGradientAnnealingWithMemory import ( + HybridGradientAnnealingWithMemory, + ) lama_register["HybridGradientAnnealingWithMemory"] = HybridGradientAnnealingWithMemory - res = NonObjectOptimizer(method="LLAMAHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientAnnealingWithMemory = NonObjectOptimizer(method="LLAMAHybridGradientAnnealingWithMemory").set_name("LLAMAHybridGradientAnnealingWithMemory", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientAnnealingWithMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientAnnealingWithMemory = NonObjectOptimizer( + method="LLAMAHybridGradientAnnealingWithMemory" + ).set_name("LLAMAHybridGradientAnnealingWithMemory", register=True) +except Exception as e: # HybridGradientAnnealingWithMemory print("HybridGradientAnnealingWithMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGradientBoostedMemoryAnnealingPlus import HybridGradientBoostedMemoryAnnealingPlus +try: # HybridGradientBoostedMemoryAnnealingPlus + from nevergrad.optimization.lama.HybridGradientBoostedMemoryAnnealingPlus import ( + HybridGradientBoostedMemoryAnnealingPlus, + ) lama_register["HybridGradientBoostedMemoryAnnealingPlus"] = HybridGradientBoostedMemoryAnnealingPlus - res = NonObjectOptimizer(method="LLAMAHybridGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer(method="LLAMAHybridGradientBoostedMemoryAnnealingPlus").set_name("LLAMAHybridGradientBoostedMemoryAnnealingPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientBoostedMemoryAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientBoostedMemoryAnnealingPlus = NonObjectOptimizer( + method="LLAMAHybridGradientBoostedMemoryAnnealingPlus" + ).set_name("LLAMAHybridGradientBoostedMemoryAnnealingPlus", register=True) +except Exception as e: # HybridGradientBoostedMemoryAnnealingPlus print("HybridGradientBoostedMemoryAnnealingPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGradientCrossoverOptimization import HybridGradientCrossoverOptimization +try: # HybridGradientCrossoverOptimization + from nevergrad.optimization.lama.HybridGradientCrossoverOptimization import ( + HybridGradientCrossoverOptimization, + ) lama_register["HybridGradientCrossoverOptimization"] = HybridGradientCrossoverOptimization - res = NonObjectOptimizer(method="LLAMAHybridGradientCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientCrossoverOptimization = NonObjectOptimizer(method="LLAMAHybridGradientCrossoverOptimization").set_name("LLAMAHybridGradientCrossoverOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientCrossoverOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientCrossoverOptimization = NonObjectOptimizer( + method="LLAMAHybridGradientCrossoverOptimization" + ).set_name("LLAMAHybridGradientCrossoverOptimization", register=True) +except Exception as e: # HybridGradientCrossoverOptimization print("HybridGradientCrossoverOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGradientDifferentialEvolution import HybridGradientDifferentialEvolution +try: # HybridGradientDifferentialEvolution + from nevergrad.optimization.lama.HybridGradientDifferentialEvolution import ( + HybridGradientDifferentialEvolution, + ) lama_register["HybridGradientDifferentialEvolution"] = HybridGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridGradientDifferentialEvolution").set_name("LLAMAHybridGradientDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridGradientDifferentialEvolution" + ).set_name("LLAMAHybridGradientDifferentialEvolution", register=True) +except Exception as e: # HybridGradientDifferentialEvolution print("HybridGradientDifferentialEvolution can not be imported: ", e) -try: +try: # HybridGradientEvolution from nevergrad.optimization.lama.HybridGradientEvolution import HybridGradientEvolution lama_register["HybridGradientEvolution"] = HybridGradientEvolution - res = NonObjectOptimizer(method="LLAMAHybridGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientEvolution = NonObjectOptimizer(method="LLAMAHybridGradientEvolution").set_name("LLAMAHybridGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientEvolution = NonObjectOptimizer(method="LLAMAHybridGradientEvolution").set_name( + "LLAMAHybridGradientEvolution", register=True + ) +except Exception as e: # HybridGradientEvolution print("HybridGradientEvolution can not be imported: ", e) -try: +try: # HybridGradientMemoryAnnealing from nevergrad.optimization.lama.HybridGradientMemoryAnnealing import HybridGradientMemoryAnnealing lama_register["HybridGradientMemoryAnnealing"] = HybridGradientMemoryAnnealing - res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientMemoryAnnealing = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealing").set_name("LLAMAHybridGradientMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientMemoryAnnealing = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealing" + ).set_name("LLAMAHybridGradientMemoryAnnealing", register=True) +except Exception as e: # HybridGradientMemoryAnnealing print("HybridGradientMemoryAnnealing can not be imported: ", e) -try: +try: # HybridGradientMemoryAnnealingV2 from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV2 import HybridGradientMemoryAnnealingV2 lama_register["HybridGradientMemoryAnnealingV2"] = HybridGradientMemoryAnnealingV2 - res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientMemoryAnnealingV2 = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV2").set_name("LLAMAHybridGradientMemoryAnnealingV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientMemoryAnnealingV2 = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealingV2" + ).set_name("LLAMAHybridGradientMemoryAnnealingV2", register=True) +except Exception as e: # HybridGradientMemoryAnnealingV2 print("HybridGradientMemoryAnnealingV2 can not be imported: ", e) -try: +try: # HybridGradientMemoryAnnealingV3 from nevergrad.optimization.lama.HybridGradientMemoryAnnealingV3 import HybridGradientMemoryAnnealingV3 lama_register["HybridGradientMemoryAnnealingV3"] = HybridGradientMemoryAnnealingV3 - res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientMemoryAnnealingV3 = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV3").set_name("LLAMAHybridGradientMemoryAnnealingV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientMemoryAnnealingV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientMemoryAnnealingV3 = NonObjectOptimizer( + method="LLAMAHybridGradientMemoryAnnealingV3" + ).set_name("LLAMAHybridGradientMemoryAnnealingV3", register=True) +except Exception as e: # HybridGradientMemoryAnnealingV3 print("HybridGradientMemoryAnnealingV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGradientMemorySimulatedAnnealing import HybridGradientMemorySimulatedAnnealing +try: # HybridGradientMemorySimulatedAnnealing + from nevergrad.optimization.lama.HybridGradientMemorySimulatedAnnealing import ( + HybridGradientMemorySimulatedAnnealing, + ) lama_register["HybridGradientMemorySimulatedAnnealing"] = HybridGradientMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAHybridGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAHybridGradientMemorySimulatedAnnealing").set_name("LLAMAHybridGradientMemorySimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAHybridGradientMemorySimulatedAnnealing" + ).set_name("LLAMAHybridGradientMemorySimulatedAnnealing", register=True) +except Exception as e: # HybridGradientMemorySimulatedAnnealing print("HybridGradientMemorySimulatedAnnealing can not be imported: ", e) -try: +try: # HybridGradientPSO from nevergrad.optimization.lama.HybridGradientPSO import HybridGradientPSO lama_register["HybridGradientPSO"] = HybridGradientPSO - res = NonObjectOptimizer(method="LLAMAHybridGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGradientPSO = NonObjectOptimizer(method="LLAMAHybridGradientPSO").set_name("LLAMAHybridGradientPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGradientPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGradientPSO = NonObjectOptimizer(method="LLAMAHybridGradientPSO").set_name( + "LLAMAHybridGradientPSO", register=True + ) +except Exception as e: # HybridGradientPSO print("HybridGradientPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridGuidedEvolutionaryOptimizer import HybridGuidedEvolutionaryOptimizer +try: # HybridGuidedEvolutionaryOptimizer + from nevergrad.optimization.lama.HybridGuidedEvolutionaryOptimizer import ( + HybridGuidedEvolutionaryOptimizer, + ) lama_register["HybridGuidedEvolutionaryOptimizer"] = HybridGuidedEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAHybridGuidedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridGuidedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHybridGuidedEvolutionaryOptimizer").set_name("LLAMAHybridGuidedEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridGuidedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridGuidedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHybridGuidedEvolutionaryOptimizer" + ).set_name("LLAMAHybridGuidedEvolutionaryOptimizer", register=True) +except Exception as e: # HybridGuidedEvolutionaryOptimizer print("HybridGuidedEvolutionaryOptimizer can not be imported: ", e) -try: +try: # HybridMemoryAdaptiveDE from nevergrad.optimization.lama.HybridMemoryAdaptiveDE import HybridMemoryAdaptiveDE lama_register["HybridMemoryAdaptiveDE"] = HybridMemoryAdaptiveDE - res = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE").set_name("LLAMAHybridMemoryAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridMemoryAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridMemoryAdaptiveDE").set_name( + "LLAMAHybridMemoryAdaptiveDE", register=True + ) +except Exception as e: # HybridMemoryAdaptiveDE print("HybridMemoryAdaptiveDE can not be imported: ", e) -try: +try: # HybridMultiDimensionalAnnealing from nevergrad.optimization.lama.HybridMultiDimensionalAnnealing import HybridMultiDimensionalAnnealing lama_register["HybridMultiDimensionalAnnealing"] = HybridMultiDimensionalAnnealing - res = NonObjectOptimizer(method="LLAMAHybridMultiDimensionalAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridMultiDimensionalAnnealing = NonObjectOptimizer(method="LLAMAHybridMultiDimensionalAnnealing").set_name("LLAMAHybridMultiDimensionalAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridMultiDimensionalAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridMultiDimensionalAnnealing = NonObjectOptimizer( + method="LLAMAHybridMultiDimensionalAnnealing" + ).set_name("LLAMAHybridMultiDimensionalAnnealing", register=True) +except Exception as e: # HybridMultiDimensionalAnnealing print("HybridMultiDimensionalAnnealing can not be imported: ", e) -try: +try: # HybridPSO_DE from nevergrad.optimization.lama.HybridPSO_DE import HybridPSO_DE lama_register["HybridPSO_DE"] = HybridPSO_DE - res = NonObjectOptimizer(method="LLAMAHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridPSO_DE = NonObjectOptimizer(method="LLAMAHybridPSO_DE").set_name("LLAMAHybridPSO_DE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridPSO_DE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridPSO_DE = NonObjectOptimizer(method="LLAMAHybridPSO_DE").set_name( + "LLAMAHybridPSO_DE", register=True + ) +except Exception as e: # HybridPSO_DE print("HybridPSO_DE can not be imported: ", e) -try: +try: # HybridPSO_DE_GradientOptimizer from nevergrad.optimization.lama.HybridPSO_DE_GradientOptimizer import HybridPSO_DE_GradientOptimizer lama_register["HybridPSO_DE_GradientOptimizer"] = HybridPSO_DE_GradientOptimizer - res = NonObjectOptimizer(method="LLAMAHybridPSO_DE_GradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridPSO_DE_GradientOptimizer = NonObjectOptimizer(method="LLAMAHybridPSO_DE_GradientOptimizer").set_name("LLAMAHybridPSO_DE_GradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridPSO_DE_GradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridPSO_DE_GradientOptimizer = NonObjectOptimizer( + method="LLAMAHybridPSO_DE_GradientOptimizer" + ).set_name("LLAMAHybridPSO_DE_GradientOptimizer", register=True) +except Exception as e: # HybridPSO_DE_GradientOptimizer print("HybridPSO_DE_GradientOptimizer can not be imported: ", e) -try: +try: # HybridParticleDE from nevergrad.optimization.lama.HybridParticleDE import HybridParticleDE lama_register["HybridParticleDE"] = HybridParticleDE - res = NonObjectOptimizer(method="LLAMAHybridParticleDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridParticleDE = NonObjectOptimizer(method="LLAMAHybridParticleDE").set_name("LLAMAHybridParticleDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridParticleDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridParticleDE = NonObjectOptimizer(method="LLAMAHybridParticleDE").set_name( + "LLAMAHybridParticleDE", register=True + ) +except Exception as e: # HybridParticleDE print("HybridParticleDE can not be imported: ", e) -try: +try: # HybridParticleDE_v2 from nevergrad.optimization.lama.HybridParticleDE_v2 import HybridParticleDE_v2 lama_register["HybridParticleDE_v2"] = HybridParticleDE_v2 - res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridParticleDE_v2 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2").set_name("LLAMAHybridParticleDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridParticleDE_v2 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v2").set_name( + "LLAMAHybridParticleDE_v2", register=True + ) +except Exception as e: # HybridParticleDE_v2 print("HybridParticleDE_v2 can not be imported: ", e) -try: +try: # HybridParticleDE_v3 from nevergrad.optimization.lama.HybridParticleDE_v3 import HybridParticleDE_v3 lama_register["HybridParticleDE_v3"] = HybridParticleDE_v3 - res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridParticleDE_v3 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3").set_name("LLAMAHybridParticleDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridParticleDE_v3 = NonObjectOptimizer(method="LLAMAHybridParticleDE_v3").set_name( + "LLAMAHybridParticleDE_v3", register=True + ) +except Exception as e: # HybridParticleDE_v3 print("HybridParticleDE_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridParticleSwarmDifferentialEvolutionOptimizer import HybridParticleSwarmDifferentialEvolutionOptimizer - - lama_register["HybridParticleSwarmDifferentialEvolutionOptimizer"] = HybridParticleSwarmDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer").set_name("LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer", register=True) -except Exception as e: +try: # HybridParticleSwarmDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.HybridParticleSwarmDifferentialEvolutionOptimizer import ( + HybridParticleSwarmDifferentialEvolutionOptimizer, + ) + + lama_register["HybridParticleSwarmDifferentialEvolutionOptimizer"] = ( + HybridParticleSwarmDifferentialEvolutionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer" + ).set_name("LLAMAHybridParticleSwarmDifferentialEvolutionOptimizer", register=True) +except Exception as e: # HybridParticleSwarmDifferentialEvolutionOptimizer print("HybridParticleSwarmDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumAdaptiveMemeticSearch import HybridQuantumAdaptiveMemeticSearch +try: # HybridQuantumAdaptiveMemeticSearch + from nevergrad.optimization.lama.HybridQuantumAdaptiveMemeticSearch import ( + HybridQuantumAdaptiveMemeticSearch, + ) lama_register["HybridQuantumAdaptiveMemeticSearch"] = HybridQuantumAdaptiveMemeticSearch - res = NonObjectOptimizer(method="LLAMAHybridQuantumAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumAdaptiveMemeticSearch = NonObjectOptimizer(method="LLAMAHybridQuantumAdaptiveMemeticSearch").set_name("LLAMAHybridQuantumAdaptiveMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumAdaptiveMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumAdaptiveMemeticSearch = NonObjectOptimizer( + method="LLAMAHybridQuantumAdaptiveMemeticSearch" + ).set_name("LLAMAHybridQuantumAdaptiveMemeticSearch", register=True) +except Exception as e: # HybridQuantumAdaptiveMemeticSearch print("HybridQuantumAdaptiveMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolution import HybridQuantumDifferentialEvolution +try: # HybridQuantumDifferentialEvolution + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolution import ( + HybridQuantumDifferentialEvolution, + ) lama_register["HybridQuantumDifferentialEvolution"] = HybridQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolution").set_name("LLAMAHybridQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolution" + ).set_name("LLAMAHybridQuantumDifferentialEvolution", register=True) +except Exception as e: # HybridQuantumDifferentialEvolution print("HybridQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart import HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart - - lama_register["HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart"] = HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart - res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart").set_name("LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart", register=True) -except Exception as e: - print("HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch import HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch - - lama_register["HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch"] = HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch - res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch").set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch", register=True) -except Exception as e: +try: # HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart import ( + HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart"] = ( + HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart + ) + # res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart" + ).set_name( + "LLAMAHybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart", register=True + ) +except Exception as e: # HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart + print( + "HybridQuantumDifferentialEvolutionWithAdaptiveMemoryAndElitistDynamicRestart can not be imported: ", + e, + ) +try: # HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch import ( + HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch"] = ( + HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch" + ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch", register=True) +except Exception as e: # HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch print("HybridQuantumDifferentialEvolutionWithDynamicElitismAndLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory import HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory - - lama_register["HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory"] = HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory - res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory").set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory", register=True) -except Exception as e: +try: # HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory + from nevergrad.optimization.lama.HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory import ( + HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory, + ) + + lama_register["HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory"] = ( + HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory + ) + # res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory" + ).set_name("LLAMAHybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory", register=True) +except Exception as e: # HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory print("HybridQuantumDifferentialEvolutionWithDynamicLearningAndMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumDifferentialParticleSwarmOptimization import HybridQuantumDifferentialParticleSwarmOptimization - - lama_register["HybridQuantumDifferentialParticleSwarmOptimization"] = HybridQuantumDifferentialParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization").set_name("LLAMAHybridQuantumDifferentialParticleSwarmOptimization", register=True) -except Exception as e: +try: # HybridQuantumDifferentialParticleSwarmOptimization + from nevergrad.optimization.lama.HybridQuantumDifferentialParticleSwarmOptimization import ( + HybridQuantumDifferentialParticleSwarmOptimization, + ) + + lama_register["HybridQuantumDifferentialParticleSwarmOptimization"] = ( + HybridQuantumDifferentialParticleSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAHybridQuantumDifferentialParticleSwarmOptimization" + ).set_name("LLAMAHybridQuantumDifferentialParticleSwarmOptimization", register=True) +except Exception as e: # HybridQuantumDifferentialParticleSwarmOptimization print("HybridQuantumDifferentialParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuantumEnhancedMultiPhaseAdaptiveDE import HybridQuantumEnhancedMultiPhaseAdaptiveDE +try: # HybridQuantumEnhancedMultiPhaseAdaptiveDE + from nevergrad.optimization.lama.HybridQuantumEnhancedMultiPhaseAdaptiveDE import ( + HybridQuantumEnhancedMultiPhaseAdaptiveDE, + ) lama_register["HybridQuantumEnhancedMultiPhaseAdaptiveDE"] = HybridQuantumEnhancedMultiPhaseAdaptiveDE - res = NonObjectOptimizer(method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE").set_name("LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE = NonObjectOptimizer( + method="LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE" + ).set_name("LLAMAHybridQuantumEnhancedMultiPhaseAdaptiveDE", register=True) +except Exception as e: # HybridQuantumEnhancedMultiPhaseAdaptiveDE print("HybridQuantumEnhancedMultiPhaseAdaptiveDE can not be imported: ", e) -try: +try: # HybridQuantumEvolution from nevergrad.optimization.lama.HybridQuantumEvolution import HybridQuantumEvolution lama_register["HybridQuantumEvolution"] = HybridQuantumEvolution - res = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution").set_name("LLAMAHybridQuantumEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumEvolution").set_name( + "LLAMAHybridQuantumEvolution", register=True + ) +except Exception as e: # HybridQuantumEvolution print("HybridQuantumEvolution can not be imported: ", e) -try: +try: # HybridQuantumGradientEvolution from nevergrad.optimization.lama.HybridQuantumGradientEvolution import HybridQuantumGradientEvolution lama_register["HybridQuantumGradientEvolution"] = HybridQuantumGradientEvolution - res = NonObjectOptimizer(method="LLAMAHybridQuantumGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumGradientEvolution = NonObjectOptimizer(method="LLAMAHybridQuantumGradientEvolution").set_name("LLAMAHybridQuantumGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumGradientEvolution = NonObjectOptimizer( + method="LLAMAHybridQuantumGradientEvolution" + ).set_name("LLAMAHybridQuantumGradientEvolution", register=True) +except Exception as e: # HybridQuantumGradientEvolution print("HybridQuantumGradientEvolution can not be imported: ", e) -try: +try: # HybridQuantumLevyAdaptiveSwarmV2 from nevergrad.optimization.lama.HybridQuantumLevyAdaptiveSwarmV2 import HybridQuantumLevyAdaptiveSwarmV2 lama_register["HybridQuantumLevyAdaptiveSwarmV2"] = HybridQuantumLevyAdaptiveSwarmV2 - res = NonObjectOptimizer(method="LLAMAHybridQuantumLevyAdaptiveSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumLevyAdaptiveSwarmV2 = NonObjectOptimizer(method="LLAMAHybridQuantumLevyAdaptiveSwarmV2").set_name("LLAMAHybridQuantumLevyAdaptiveSwarmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumLevyAdaptiveSwarmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumLevyAdaptiveSwarmV2 = NonObjectOptimizer( + method="LLAMAHybridQuantumLevyAdaptiveSwarmV2" + ).set_name("LLAMAHybridQuantumLevyAdaptiveSwarmV2", register=True) +except Exception as e: # HybridQuantumLevyAdaptiveSwarmV2 print("HybridQuantumLevyAdaptiveSwarmV2 can not be imported: ", e) -try: +try: # HybridQuantumMemeticOptimization from nevergrad.optimization.lama.HybridQuantumMemeticOptimization import HybridQuantumMemeticOptimization lama_register["HybridQuantumMemeticOptimization"] = HybridQuantumMemeticOptimization - res = NonObjectOptimizer(method="LLAMAHybridQuantumMemeticOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuantumMemeticOptimization = NonObjectOptimizer(method="LLAMAHybridQuantumMemeticOptimization").set_name("LLAMAHybridQuantumMemeticOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuantumMemeticOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuantumMemeticOptimization = NonObjectOptimizer( + method="LLAMAHybridQuantumMemeticOptimization" + ).set_name("LLAMAHybridQuantumMemeticOptimization", register=True) +except Exception as e: # HybridQuantumMemeticOptimization print("HybridQuantumMemeticOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuasiRandomDEGradientAnnealing import HybridQuasiRandomDEGradientAnnealing +try: # HybridQuasiRandomDEGradientAnnealing + from nevergrad.optimization.lama.HybridQuasiRandomDEGradientAnnealing import ( + HybridQuasiRandomDEGradientAnnealing, + ) lama_register["HybridQuasiRandomDEGradientAnnealing"] = HybridQuasiRandomDEGradientAnnealing - res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMAHybridQuasiRandomDEGradientAnnealing").set_name("LLAMAHybridQuasiRandomDEGradientAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMAHybridQuasiRandomDEGradientAnnealing" + ).set_name("LLAMAHybridQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: # HybridQuasiRandomDEGradientAnnealing print("HybridQuasiRandomDEGradientAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridQuasiRandomGradientDifferentialEvolution import HybridQuasiRandomGradientDifferentialEvolution - - lama_register["HybridQuasiRandomGradientDifferentialEvolution"] = HybridQuasiRandomGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridQuasiRandomGradientDifferentialEvolution").set_name("LLAMAHybridQuasiRandomGradientDifferentialEvolution", register=True) -except Exception as e: +try: # HybridQuasiRandomGradientDifferentialEvolution + from nevergrad.optimization.lama.HybridQuasiRandomGradientDifferentialEvolution import ( + HybridQuasiRandomGradientDifferentialEvolution, + ) + + lama_register["HybridQuasiRandomGradientDifferentialEvolution"] = ( + HybridQuasiRandomGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAHybridQuasiRandomGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridQuasiRandomGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridQuasiRandomGradientDifferentialEvolution" + ).set_name("LLAMAHybridQuasiRandomGradientDifferentialEvolution", register=True) +except Exception as e: # HybridQuasiRandomGradientDifferentialEvolution print("HybridQuasiRandomGradientDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost import HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost - - lama_register["HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost"] = HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost - res = NonObjectOptimizer(method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost = NonObjectOptimizer(method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost").set_name("LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost", register=True) -except Exception as e: +try: # HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost + from nevergrad.optimization.lama.HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost import ( + HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost, + ) + + lama_register["HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost"] = ( + HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost + ) + # res = NonObjectOptimizer(method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost = NonObjectOptimizer( + method="LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost" + ).set_name("LLAMAHybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost", register=True) +except Exception as e: # HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost print("HybridRefinedDifferentialEvolutionWithQuasiRandomGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.HybridSelfAdaptiveDifferentialEvolution import HybridSelfAdaptiveDifferentialEvolution +try: # HybridSelfAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.HybridSelfAdaptiveDifferentialEvolution import ( + HybridSelfAdaptiveDifferentialEvolution, + ) lama_register["HybridSelfAdaptiveDifferentialEvolution"] = HybridSelfAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAHybridSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHybridSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAHybridSelfAdaptiveDifferentialEvolution").set_name("LLAMAHybridSelfAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHybridSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHybridSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAHybridSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAHybridSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: # HybridSelfAdaptiveDifferentialEvolution print("HybridSelfAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # HyperAdaptiveConvergenceStrategy from nevergrad.optimization.lama.HyperAdaptiveConvergenceStrategy import HyperAdaptiveConvergenceStrategy lama_register["HyperAdaptiveConvergenceStrategy"] = HyperAdaptiveConvergenceStrategy - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveConvergenceStrategy = NonObjectOptimizer(method="LLAMAHyperAdaptiveConvergenceStrategy").set_name("LLAMAHyperAdaptiveConvergenceStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveConvergenceStrategy = NonObjectOptimizer( + method="LLAMAHyperAdaptiveConvergenceStrategy" + ).set_name("LLAMAHyperAdaptiveConvergenceStrategy", register=True) +except Exception as e: # HyperAdaptiveConvergenceStrategy print("HyperAdaptiveConvergenceStrategy can not be imported: ", e) -try: +try: # HyperAdaptiveGradientRAMEDS from nevergrad.optimization.lama.HyperAdaptiveGradientRAMEDS import HyperAdaptiveGradientRAMEDS lama_register["HyperAdaptiveGradientRAMEDS"] = HyperAdaptiveGradientRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveGradientRAMEDS = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS").set_name("LLAMAHyperAdaptiveGradientRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveGradientRAMEDS = NonObjectOptimizer(method="LLAMAHyperAdaptiveGradientRAMEDS").set_name( + "LLAMAHyperAdaptiveGradientRAMEDS", register=True + ) +except Exception as e: # HyperAdaptiveGradientRAMEDS print("HyperAdaptiveGradientRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperAdaptiveHybridDEPSOwithDynamicRestart import HyperAdaptiveHybridDEPSOwithDynamicRestart +try: # HyperAdaptiveHybridDEPSOwithDynamicRestart + from nevergrad.optimization.lama.HyperAdaptiveHybridDEPSOwithDynamicRestart import ( + HyperAdaptiveHybridDEPSOwithDynamicRestart, + ) lama_register["HyperAdaptiveHybridDEPSOwithDynamicRestart"] = HyperAdaptiveHybridDEPSOwithDynamicRestart - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart = NonObjectOptimizer(method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart").set_name("LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart = NonObjectOptimizer( + method="LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart" + ).set_name("LLAMAHyperAdaptiveHybridDEPSOwithDynamicRestart", register=True) +except Exception as e: # HyperAdaptiveHybridDEPSOwithDynamicRestart print("HyperAdaptiveHybridDEPSOwithDynamicRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperAdaptiveMemoryGuidedStrategyV74 import HyperAdaptiveMemoryGuidedStrategyV74 +try: # HyperAdaptiveMemoryGuidedStrategyV74 + from nevergrad.optimization.lama.HyperAdaptiveMemoryGuidedStrategyV74 import ( + HyperAdaptiveMemoryGuidedStrategyV74, + ) lama_register["HyperAdaptiveMemoryGuidedStrategyV74"] = HyperAdaptiveMemoryGuidedStrategyV74 - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveMemoryGuidedStrategyV74 = NonObjectOptimizer(method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74").set_name("LLAMAHyperAdaptiveMemoryGuidedStrategyV74", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveMemoryGuidedStrategyV74 = NonObjectOptimizer( + method="LLAMAHyperAdaptiveMemoryGuidedStrategyV74" + ).set_name("LLAMAHyperAdaptiveMemoryGuidedStrategyV74", register=True) +except Exception as e: # HyperAdaptiveMemoryGuidedStrategyV74 print("HyperAdaptiveMemoryGuidedStrategyV74 can not be imported: ", e) -try: +try: # HyperAdaptivePrecisionOptimizer from nevergrad.optimization.lama.HyperAdaptivePrecisionOptimizer import HyperAdaptivePrecisionOptimizer lama_register["HyperAdaptivePrecisionOptimizer"] = HyperAdaptivePrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAHyperAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperAdaptivePrecisionOptimizer").set_name("LLAMAHyperAdaptivePrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperAdaptivePrecisionOptimizer" + ).set_name("LLAMAHyperAdaptivePrecisionOptimizer", register=True) +except Exception as e: # HyperAdaptivePrecisionOptimizer print("HyperAdaptivePrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperAdaptiveSinusoidalDifferentialSwarm import HyperAdaptiveSinusoidalDifferentialSwarm +try: # HyperAdaptiveSinusoidalDifferentialSwarm + from nevergrad.optimization.lama.HyperAdaptiveSinusoidalDifferentialSwarm import ( + HyperAdaptiveSinusoidalDifferentialSwarm, + ) lama_register["HyperAdaptiveSinusoidalDifferentialSwarm"] = HyperAdaptiveSinusoidalDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer(method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm").set_name("LLAMAHyperAdaptiveSinusoidalDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveSinusoidalDifferentialSwarm = NonObjectOptimizer( + method="LLAMAHyperAdaptiveSinusoidalDifferentialSwarm" + ).set_name("LLAMAHyperAdaptiveSinusoidalDifferentialSwarm", register=True) +except Exception as e: # HyperAdaptiveSinusoidalDifferentialSwarm print("HyperAdaptiveSinusoidalDifferentialSwarm can not be imported: ", e) -try: +try: # HyperAdaptiveStrategyDE from nevergrad.optimization.lama.HyperAdaptiveStrategyDE import HyperAdaptiveStrategyDE lama_register["HyperAdaptiveStrategyDE"] = HyperAdaptiveStrategyDE - res = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE").set_name("LLAMAHyperAdaptiveStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMAHyperAdaptiveStrategyDE").set_name( + "LLAMAHyperAdaptiveStrategyDE", register=True + ) +except Exception as e: # HyperAdaptiveStrategyDE print("HyperAdaptiveStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperAdvancedDynamicPrecisionOptimizerV41 import HyperAdvancedDynamicPrecisionOptimizerV41 +try: # HyperAdvancedDynamicPrecisionOptimizerV41 + from nevergrad.optimization.lama.HyperAdvancedDynamicPrecisionOptimizerV41 import ( + HyperAdvancedDynamicPrecisionOptimizerV41, + ) lama_register["HyperAdvancedDynamicPrecisionOptimizerV41"] = HyperAdvancedDynamicPrecisionOptimizerV41 - res = NonObjectOptimizer(method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperAdvancedDynamicPrecisionOptimizerV41 = NonObjectOptimizer(method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41").set_name("LLAMAHyperAdvancedDynamicPrecisionOptimizerV41", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperAdvancedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( + method="LLAMAHyperAdvancedDynamicPrecisionOptimizerV41" + ).set_name("LLAMAHyperAdvancedDynamicPrecisionOptimizerV41", register=True) +except Exception as e: # HyperAdvancedDynamicPrecisionOptimizerV41 print("HyperAdvancedDynamicPrecisionOptimizerV41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperEvolvedDynamicPrecisionOptimizerV48 import HyperEvolvedDynamicPrecisionOptimizerV48 +try: # HyperEvolvedDynamicPrecisionOptimizerV48 + from nevergrad.optimization.lama.HyperEvolvedDynamicPrecisionOptimizerV48 import ( + HyperEvolvedDynamicPrecisionOptimizerV48, + ) lama_register["HyperEvolvedDynamicPrecisionOptimizerV48"] = HyperEvolvedDynamicPrecisionOptimizerV48 - res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperEvolvedDynamicPrecisionOptimizerV48 = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48").set_name("LLAMAHyperEvolvedDynamicPrecisionOptimizerV48", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperEvolvedDynamicPrecisionOptimizerV48 = NonObjectOptimizer( + method="LLAMAHyperEvolvedDynamicPrecisionOptimizerV48" + ).set_name("LLAMAHyperEvolvedDynamicPrecisionOptimizerV48", register=True) +except Exception as e: # HyperEvolvedDynamicPrecisionOptimizerV48 print("HyperEvolvedDynamicPrecisionOptimizerV48 can not be imported: ", e) -try: +try: # HyperEvolvedDynamicRAMEDS from nevergrad.optimization.lama.HyperEvolvedDynamicRAMEDS import HyperEvolvedDynamicRAMEDS lama_register["HyperEvolvedDynamicRAMEDS"] = HyperEvolvedDynamicRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS").set_name("LLAMAHyperEvolvedDynamicRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedDynamicRAMEDS").set_name( + "LLAMAHyperEvolvedDynamicRAMEDS", register=True + ) +except Exception as e: # HyperEvolvedDynamicRAMEDS print("HyperEvolvedDynamicRAMEDS can not be imported: ", e) -try: +try: # HyperEvolvedRAMEDS from nevergrad.optimization.lama.HyperEvolvedRAMEDS import HyperEvolvedRAMEDS lama_register["HyperEvolvedRAMEDS"] = HyperEvolvedRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperEvolvedRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS").set_name("LLAMAHyperEvolvedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperEvolvedRAMEDS = NonObjectOptimizer(method="LLAMAHyperEvolvedRAMEDS").set_name( + "LLAMAHyperEvolvedRAMEDS", register=True + ) +except Exception as e: # HyperEvolvedRAMEDS print("HyperEvolvedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperFocusedAdaptiveElitistStrategyV5 import HyperFocusedAdaptiveElitistStrategyV5 +try: # HyperFocusedAdaptiveElitistStrategyV5 + from nevergrad.optimization.lama.HyperFocusedAdaptiveElitistStrategyV5 import ( + HyperFocusedAdaptiveElitistStrategyV5, + ) lama_register["HyperFocusedAdaptiveElitistStrategyV5"] = HyperFocusedAdaptiveElitistStrategyV5 - res = NonObjectOptimizer(method="LLAMAHyperFocusedAdaptiveElitistStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperFocusedAdaptiveElitistStrategyV5 = NonObjectOptimizer(method="LLAMAHyperFocusedAdaptiveElitistStrategyV5").set_name("LLAMAHyperFocusedAdaptiveElitistStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperFocusedAdaptiveElitistStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperFocusedAdaptiveElitistStrategyV5 = NonObjectOptimizer( + method="LLAMAHyperFocusedAdaptiveElitistStrategyV5" + ).set_name("LLAMAHyperFocusedAdaptiveElitistStrategyV5", register=True) +except Exception as e: # HyperFocusedAdaptiveElitistStrategyV5 print("HyperFocusedAdaptiveElitistStrategyV5 can not be imported: ", e) -try: +try: # HyperOptimalRAMEDS from nevergrad.optimization.lama.HyperOptimalRAMEDS import HyperOptimalRAMEDS lama_register["HyperOptimalRAMEDS"] = HyperOptimalRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimalRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS").set_name("LLAMAHyperOptimalRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimalRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimalRAMEDS").set_name( + "LLAMAHyperOptimalRAMEDS", register=True + ) +except Exception as e: # HyperOptimalRAMEDS print("HyperOptimalRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimalStrategicEvolutionaryOptimizerV58 import HyperOptimalStrategicEvolutionaryOptimizerV58 - - lama_register["HyperOptimalStrategicEvolutionaryOptimizerV58"] = HyperOptimalStrategicEvolutionaryOptimizerV58 - res = NonObjectOptimizer(method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58 = NonObjectOptimizer(method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58").set_name("LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58", register=True) -except Exception as e: +try: # HyperOptimalStrategicEvolutionaryOptimizerV58 + from nevergrad.optimization.lama.HyperOptimalStrategicEvolutionaryOptimizerV58 import ( + HyperOptimalStrategicEvolutionaryOptimizerV58, + ) + + lama_register["HyperOptimalStrategicEvolutionaryOptimizerV58"] = ( + HyperOptimalStrategicEvolutionaryOptimizerV58 + ) + # res = NonObjectOptimizer(method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58 = NonObjectOptimizer( + method="LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58" + ).set_name("LLAMAHyperOptimalStrategicEvolutionaryOptimizerV58", register=True) +except Exception as e: # HyperOptimalStrategicEvolutionaryOptimizerV58 print("HyperOptimalStrategicEvolutionaryOptimizerV58 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizer import HyperOptimizedDynamicPrecisionOptimizer +try: # HyperOptimizedDynamicPrecisionOptimizer + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizer import ( + HyperOptimizedDynamicPrecisionOptimizer, + ) lama_register["HyperOptimizedDynamicPrecisionOptimizer"] = HyperOptimizedDynamicPrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizer").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizer" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizer", register=True) +except Exception as e: # HyperOptimizedDynamicPrecisionOptimizer print("HyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV12 import HyperOptimizedDynamicPrecisionOptimizerV12 +try: # HyperOptimizedDynamicPrecisionOptimizerV12 + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV12 import ( + HyperOptimizedDynamicPrecisionOptimizerV12, + ) lama_register["HyperOptimizedDynamicPrecisionOptimizerV12"] = HyperOptimizedDynamicPrecisionOptimizerV12 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedDynamicPrecisionOptimizerV12 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV12 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV12" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV12", register=True) +except Exception as e: # HyperOptimizedDynamicPrecisionOptimizerV12 print("HyperOptimizedDynamicPrecisionOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV42 import HyperOptimizedDynamicPrecisionOptimizerV42 +try: # HyperOptimizedDynamicPrecisionOptimizerV42 + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV42 import ( + HyperOptimizedDynamicPrecisionOptimizerV42, + ) lama_register["HyperOptimizedDynamicPrecisionOptimizerV42"] = HyperOptimizedDynamicPrecisionOptimizerV42 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedDynamicPrecisionOptimizerV42 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV42", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV42 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV42" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV42", register=True) +except Exception as e: # HyperOptimizedDynamicPrecisionOptimizerV42 print("HyperOptimizedDynamicPrecisionOptimizerV42 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV43 import HyperOptimizedDynamicPrecisionOptimizerV43 +try: # HyperOptimizedDynamicPrecisionOptimizerV43 + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV43 import ( + HyperOptimizedDynamicPrecisionOptimizerV43, + ) lama_register["HyperOptimizedDynamicPrecisionOptimizerV43"] = HyperOptimizedDynamicPrecisionOptimizerV43 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedDynamicPrecisionOptimizerV43 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV43", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV43 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV43" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV43", register=True) +except Exception as e: # HyperOptimizedDynamicPrecisionOptimizerV43 print("HyperOptimizedDynamicPrecisionOptimizerV43 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV57 import HyperOptimizedDynamicPrecisionOptimizerV57 +try: # HyperOptimizedDynamicPrecisionOptimizerV57 + from nevergrad.optimization.lama.HyperOptimizedDynamicPrecisionOptimizerV57 import ( + HyperOptimizedDynamicPrecisionOptimizerV57, + ) lama_register["HyperOptimizedDynamicPrecisionOptimizerV57"] = HyperOptimizedDynamicPrecisionOptimizerV57 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedDynamicPrecisionOptimizerV57 = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57").set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV57", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedDynamicPrecisionOptimizerV57 = NonObjectOptimizer( + method="LLAMAHyperOptimizedDynamicPrecisionOptimizerV57" + ).set_name("LLAMAHyperOptimizedDynamicPrecisionOptimizerV57", register=True) +except Exception as e: # HyperOptimizedDynamicPrecisionOptimizerV57 print("HyperOptimizedDynamicPrecisionOptimizerV57 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedEvolutionaryGradientOptimizerV61 import HyperOptimizedEvolutionaryGradientOptimizerV61 - - lama_register["HyperOptimizedEvolutionaryGradientOptimizerV61"] = HyperOptimizedEvolutionaryGradientOptimizerV61 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61 = NonObjectOptimizer(method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61").set_name("LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61", register=True) -except Exception as e: +try: # HyperOptimizedEvolutionaryGradientOptimizerV61 + from nevergrad.optimization.lama.HyperOptimizedEvolutionaryGradientOptimizerV61 import ( + HyperOptimizedEvolutionaryGradientOptimizerV61, + ) + + lama_register["HyperOptimizedEvolutionaryGradientOptimizerV61"] = ( + HyperOptimizedEvolutionaryGradientOptimizerV61 + ) + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61 = NonObjectOptimizer( + method="LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61" + ).set_name("LLAMAHyperOptimizedEvolutionaryGradientOptimizerV61", register=True) +except Exception as e: # HyperOptimizedEvolutionaryGradientOptimizerV61 print("HyperOptimizedEvolutionaryGradientOptimizerV61 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedGradientEnhancedRAMEDS import HyperOptimizedGradientEnhancedRAMEDS +try: # HyperOptimizedGradientEnhancedRAMEDS + from nevergrad.optimization.lama.HyperOptimizedGradientEnhancedRAMEDS import ( + HyperOptimizedGradientEnhancedRAMEDS, + ) lama_register["HyperOptimizedGradientEnhancedRAMEDS"] = HyperOptimizedGradientEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperOptimizedGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedGradientEnhancedRAMEDS").set_name("LLAMAHyperOptimizedGradientEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMAHyperOptimizedGradientEnhancedRAMEDS" + ).set_name("LLAMAHyperOptimizedGradientEnhancedRAMEDS", register=True) +except Exception as e: # HyperOptimizedGradientEnhancedRAMEDS print("HyperOptimizedGradientEnhancedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 import HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 - - lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV47"] = HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47 = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47").set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47", register=True) -except Exception as e: +try: # HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 import ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV47, + ) + + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV47"] = ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 + ) + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47 = NonObjectOptimizer( + method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47" + ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV47", register=True) +except Exception as e: # HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV47 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 import HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 - - lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV48"] = HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48 = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48").set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48", register=True) -except Exception as e: +try: # HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 + from nevergrad.optimization.lama.HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 import ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV48, + ) + + lama_register["HyperOptimizedMultiStrategicEvolutionaryOptimizerV48"] = ( + HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 + ) + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48 = NonObjectOptimizer( + method="LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48" + ).set_name("LLAMAHyperOptimizedMultiStrategicEvolutionaryOptimizerV48", register=True) +except Exception as e: # HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 print("HyperOptimizedMultiStrategicEvolutionaryOptimizerV48 can not be imported: ", e) -try: +try: # HyperOptimizedRAMEDS from nevergrad.optimization.lama.HyperOptimizedRAMEDS import HyperOptimizedRAMEDS lama_register["HyperOptimizedRAMEDS"] = HyperOptimizedRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS").set_name("LLAMAHyperOptimizedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedRAMEDS").set_name( + "LLAMAHyperOptimizedRAMEDS", register=True + ) +except Exception as e: # HyperOptimizedRAMEDS print("HyperOptimizedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedSpiralDifferentialOptimizerV8 import HyperOptimizedSpiralDifferentialOptimizerV8 +try: # HyperOptimizedSpiralDifferentialOptimizerV8 + from nevergrad.optimization.lama.HyperOptimizedSpiralDifferentialOptimizerV8 import ( + HyperOptimizedSpiralDifferentialOptimizerV8, + ) lama_register["HyperOptimizedSpiralDifferentialOptimizerV8"] = HyperOptimizedSpiralDifferentialOptimizerV8 - res = NonObjectOptimizer(method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedSpiralDifferentialOptimizerV8 = NonObjectOptimizer(method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8").set_name("LLAMAHyperOptimizedSpiralDifferentialOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedSpiralDifferentialOptimizerV8 = NonObjectOptimizer( + method="LLAMAHyperOptimizedSpiralDifferentialOptimizerV8" + ).set_name("LLAMAHyperOptimizedSpiralDifferentialOptimizerV8", register=True) +except Exception as e: # HyperOptimizedSpiralDifferentialOptimizerV8 print("HyperOptimizedSpiralDifferentialOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperOptimizedThermalEvolutionaryOptimizer import HyperOptimizedThermalEvolutionaryOptimizer +try: # HyperOptimizedThermalEvolutionaryOptimizer + from nevergrad.optimization.lama.HyperOptimizedThermalEvolutionaryOptimizer import ( + HyperOptimizedThermalEvolutionaryOptimizer, + ) lama_register["HyperOptimizedThermalEvolutionaryOptimizer"] = HyperOptimizedThermalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMAHyperOptimizedThermalEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMAHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: # HyperOptimizedThermalEvolutionaryOptimizer print("HyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) -try: +try: # HyperOptimizedUltraRefinedRAMEDS from nevergrad.optimization.lama.HyperOptimizedUltraRefinedRAMEDS import HyperOptimizedUltraRefinedRAMEDS lama_register["HyperOptimizedUltraRefinedRAMEDS"] = HyperOptimizedUltraRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperOptimizedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperOptimizedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAHyperOptimizedUltraRefinedRAMEDS").set_name("LLAMAHyperOptimizedUltraRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperOptimizedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperOptimizedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMAHyperOptimizedUltraRefinedRAMEDS" + ).set_name("LLAMAHyperOptimizedUltraRefinedRAMEDS", register=True) +except Exception as e: # HyperOptimizedUltraRefinedRAMEDS print("HyperOptimizedUltraRefinedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperPreciseEvolutionaryOptimizer import HyperPreciseEvolutionaryOptimizer +try: # HyperPreciseEvolutionaryOptimizer + from nevergrad.optimization.lama.HyperPreciseEvolutionaryOptimizer import ( + HyperPreciseEvolutionaryOptimizer, + ) lama_register["HyperPreciseEvolutionaryOptimizer"] = HyperPreciseEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAHyperPreciseEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperPreciseEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAHyperPreciseEvolutionaryOptimizer").set_name("LLAMAHyperPreciseEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperPreciseEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperPreciseEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAHyperPreciseEvolutionaryOptimizer" + ).set_name("LLAMAHyperPreciseEvolutionaryOptimizer", register=True) +except Exception as e: # HyperPreciseEvolutionaryOptimizer print("HyperPreciseEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperPrecisionEvolutionaryOptimizerV23 import HyperPrecisionEvolutionaryOptimizerV23 +try: # HyperPrecisionEvolutionaryOptimizerV23 + from nevergrad.optimization.lama.HyperPrecisionEvolutionaryOptimizerV23 import ( + HyperPrecisionEvolutionaryOptimizerV23, + ) lama_register["HyperPrecisionEvolutionaryOptimizerV23"] = HyperPrecisionEvolutionaryOptimizerV23 - res = NonObjectOptimizer(method="LLAMAHyperPrecisionEvolutionaryOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperPrecisionEvolutionaryOptimizerV23 = NonObjectOptimizer(method="LLAMAHyperPrecisionEvolutionaryOptimizerV23").set_name("LLAMAHyperPrecisionEvolutionaryOptimizerV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperPrecisionEvolutionaryOptimizerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperPrecisionEvolutionaryOptimizerV23 = NonObjectOptimizer( + method="LLAMAHyperPrecisionEvolutionaryOptimizerV23" + ).set_name("LLAMAHyperPrecisionEvolutionaryOptimizerV23", register=True) +except Exception as e: # HyperPrecisionEvolutionaryOptimizerV23 print("HyperPrecisionEvolutionaryOptimizerV23 can not be imported: ", e) -try: +try: # HyperQuantumConvergenceOptimizer from nevergrad.optimization.lama.HyperQuantumConvergenceOptimizer import HyperQuantumConvergenceOptimizer lama_register["HyperQuantumConvergenceOptimizer"] = HyperQuantumConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAHyperQuantumConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperQuantumConvergenceOptimizer = NonObjectOptimizer(method="LLAMAHyperQuantumConvergenceOptimizer").set_name("LLAMAHyperQuantumConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperQuantumConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperQuantumConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAHyperQuantumConvergenceOptimizer" + ).set_name("LLAMAHyperQuantumConvergenceOptimizer", register=True) +except Exception as e: # HyperQuantumConvergenceOptimizer print("HyperQuantumConvergenceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperQuantumStateCrossoverOptimization import HyperQuantumStateCrossoverOptimization +try: # HyperQuantumStateCrossoverOptimization + from nevergrad.optimization.lama.HyperQuantumStateCrossoverOptimization import ( + HyperQuantumStateCrossoverOptimization, + ) lama_register["HyperQuantumStateCrossoverOptimization"] = HyperQuantumStateCrossoverOptimization - res = NonObjectOptimizer(method="LLAMAHyperQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAHyperQuantumStateCrossoverOptimization").set_name("LLAMAHyperQuantumStateCrossoverOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAHyperQuantumStateCrossoverOptimization" + ).set_name("LLAMAHyperQuantumStateCrossoverOptimization", register=True) +except Exception as e: # HyperQuantumStateCrossoverOptimization print("HyperQuantumStateCrossoverOptimization can not be imported: ", e) -try: +try: # HyperRAMEDS from nevergrad.optimization.lama.HyperRAMEDS import HyperRAMEDS lama_register["HyperRAMEDS"] = HyperRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRAMEDS = NonObjectOptimizer(method="LLAMAHyperRAMEDS").set_name("LLAMAHyperRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRAMEDS = NonObjectOptimizer(method="LLAMAHyperRAMEDS").set_name( + "LLAMAHyperRAMEDS", register=True + ) +except Exception as e: # HyperRAMEDS print("HyperRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 import HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 - - lama_register["HyperRefinedAdaptiveDynamicPrecisionOptimizerV52"] = HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 - res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52 = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52").set_name("LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52", register=True) -except Exception as e: +try: # HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 + from nevergrad.optimization.lama.HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 import ( + HyperRefinedAdaptiveDynamicPrecisionOptimizerV52, + ) + + lama_register["HyperRefinedAdaptiveDynamicPrecisionOptimizerV52"] = ( + HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 + ) + # res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52 = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52" + ).set_name("LLAMAHyperRefinedAdaptiveDynamicPrecisionOptimizerV52", register=True) +except Exception as e: # HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 print("HyperRefinedAdaptiveDynamicPrecisionOptimizerV52 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedAdaptiveGuidedMutationOptimizer import HyperRefinedAdaptiveGuidedMutationOptimizer +try: # HyperRefinedAdaptiveGuidedMutationOptimizer + from nevergrad.optimization.lama.HyperRefinedAdaptiveGuidedMutationOptimizer import ( + HyperRefinedAdaptiveGuidedMutationOptimizer, + ) lama_register["HyperRefinedAdaptiveGuidedMutationOptimizer"] = HyperRefinedAdaptiveGuidedMutationOptimizer - res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer").set_name("LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer" + ).set_name("LLAMAHyperRefinedAdaptiveGuidedMutationOptimizer", register=True) +except Exception as e: # HyperRefinedAdaptiveGuidedMutationOptimizer print("HyperRefinedAdaptiveGuidedMutationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionOptimizer import HyperRefinedAdaptivePrecisionOptimizer +try: # HyperRefinedAdaptivePrecisionOptimizer + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionOptimizer import ( + HyperRefinedAdaptivePrecisionOptimizer, + ) lama_register["HyperRefinedAdaptivePrecisionOptimizer"] = HyperRefinedAdaptivePrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionOptimizer").set_name("LLAMAHyperRefinedAdaptivePrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAHyperRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: # HyperRefinedAdaptivePrecisionOptimizer print("HyperRefinedAdaptivePrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionSearch import HyperRefinedAdaptivePrecisionSearch +try: # HyperRefinedAdaptivePrecisionSearch + from nevergrad.optimization.lama.HyperRefinedAdaptivePrecisionSearch import ( + HyperRefinedAdaptivePrecisionSearch, + ) lama_register["HyperRefinedAdaptivePrecisionSearch"] = HyperRefinedAdaptivePrecisionSearch - res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedAdaptivePrecisionSearch = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionSearch").set_name("LLAMAHyperRefinedAdaptivePrecisionSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedAdaptivePrecisionSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedAdaptivePrecisionSearch = NonObjectOptimizer( + method="LLAMAHyperRefinedAdaptivePrecisionSearch" + ).set_name("LLAMAHyperRefinedAdaptivePrecisionSearch", register=True) +except Exception as e: # HyperRefinedAdaptivePrecisionSearch print("HyperRefinedAdaptivePrecisionSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV3 import HyperRefinedDynamicPrecisionOptimizerV3 +try: # HyperRefinedDynamicPrecisionOptimizerV3 + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV3 import ( + HyperRefinedDynamicPrecisionOptimizerV3, + ) lama_register["HyperRefinedDynamicPrecisionOptimizerV3"] = HyperRefinedDynamicPrecisionOptimizerV3 - res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedDynamicPrecisionOptimizerV3 = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3").set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedDynamicPrecisionOptimizerV3 = NonObjectOptimizer( + method="LLAMAHyperRefinedDynamicPrecisionOptimizerV3" + ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV3", register=True) +except Exception as e: # HyperRefinedDynamicPrecisionOptimizerV3 print("HyperRefinedDynamicPrecisionOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV49 import HyperRefinedDynamicPrecisionOptimizerV49 +try: # HyperRefinedDynamicPrecisionOptimizerV49 + from nevergrad.optimization.lama.HyperRefinedDynamicPrecisionOptimizerV49 import ( + HyperRefinedDynamicPrecisionOptimizerV49, + ) lama_register["HyperRefinedDynamicPrecisionOptimizerV49"] = HyperRefinedDynamicPrecisionOptimizerV49 - res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedDynamicPrecisionOptimizerV49 = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49").set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV49", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedDynamicPrecisionOptimizerV49 = NonObjectOptimizer( + method="LLAMAHyperRefinedDynamicPrecisionOptimizerV49" + ).set_name("LLAMAHyperRefinedDynamicPrecisionOptimizerV49", register=True) +except Exception as e: # HyperRefinedDynamicPrecisionOptimizerV49 print("HyperRefinedDynamicPrecisionOptimizerV49 can not be imported: ", e) -try: +try: # HyperRefinedEnhancedRAMEDS from nevergrad.optimization.lama.HyperRefinedEnhancedRAMEDS import HyperRefinedEnhancedRAMEDS lama_register["HyperRefinedEnhancedRAMEDS"] = HyperRefinedEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS").set_name("LLAMAHyperRefinedEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAHyperRefinedEnhancedRAMEDS").set_name( + "LLAMAHyperRefinedEnhancedRAMEDS", register=True + ) +except Exception as e: # HyperRefinedEnhancedRAMEDS print("HyperRefinedEnhancedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.HyperRefinedQuantumVelocityOptimizer import HyperRefinedQuantumVelocityOptimizer +try: # HyperRefinedQuantumVelocityOptimizer + from nevergrad.optimization.lama.HyperRefinedQuantumVelocityOptimizer import ( + HyperRefinedQuantumVelocityOptimizer, + ) lama_register["HyperRefinedQuantumVelocityOptimizer"] = HyperRefinedQuantumVelocityOptimizer - res = NonObjectOptimizer(method="LLAMAHyperRefinedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperRefinedQuantumVelocityOptimizer = NonObjectOptimizer(method="LLAMAHyperRefinedQuantumVelocityOptimizer").set_name("LLAMAHyperRefinedQuantumVelocityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperRefinedQuantumVelocityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperRefinedQuantumVelocityOptimizer = NonObjectOptimizer( + method="LLAMAHyperRefinedQuantumVelocityOptimizer" + ).set_name("LLAMAHyperRefinedQuantumVelocityOptimizer", register=True) +except Exception as e: # HyperRefinedQuantumVelocityOptimizer print("HyperRefinedQuantumVelocityOptimizer can not be imported: ", e) -try: +try: # HyperSpiralDifferentialClimber from nevergrad.optimization.lama.HyperSpiralDifferentialClimber import HyperSpiralDifferentialClimber lama_register["HyperSpiralDifferentialClimber"] = HyperSpiralDifferentialClimber - res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperSpiralDifferentialClimber = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimber").set_name("LLAMAHyperSpiralDifferentialClimber", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimber")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperSpiralDifferentialClimber = NonObjectOptimizer( + method="LLAMAHyperSpiralDifferentialClimber" + ).set_name("LLAMAHyperSpiralDifferentialClimber", register=True) +except Exception as e: # HyperSpiralDifferentialClimber print("HyperSpiralDifferentialClimber can not be imported: ", e) -try: +try: # HyperSpiralDifferentialClimberV2 from nevergrad.optimization.lama.HyperSpiralDifferentialClimberV2 import HyperSpiralDifferentialClimberV2 lama_register["HyperSpiralDifferentialClimberV2"] = HyperSpiralDifferentialClimberV2 - res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimberV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAHyperSpiralDifferentialClimberV2 = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimberV2").set_name("LLAMAHyperSpiralDifferentialClimberV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAHyperSpiralDifferentialClimberV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAHyperSpiralDifferentialClimberV2 = NonObjectOptimizer( + method="LLAMAHyperSpiralDifferentialClimberV2" + ).set_name("LLAMAHyperSpiralDifferentialClimberV2", register=True) +except Exception as e: # HyperSpiralDifferentialClimberV2 print("HyperSpiralDifferentialClimberV2 can not be imported: ", e) -try: +try: # IADEA from nevergrad.optimization.lama.IADEA import IADEA lama_register["IADEA"] = IADEA - res = NonObjectOptimizer(method="LLAMAIADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAIADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAIADEA = NonObjectOptimizer(method="LLAMAIADEA").set_name("LLAMAIADEA", register=True) -except Exception as e: +except Exception as e: # IADEA print("IADEA can not be imported: ", e) -try: +try: # IAGEA from nevergrad.optimization.lama.IAGEA import IAGEA lama_register["IAGEA"] = IAGEA - res = NonObjectOptimizer(method="LLAMAIAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAIAGEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAIAGEA = NonObjectOptimizer(method="LLAMAIAGEA").set_name("LLAMAIAGEA", register=True) -except Exception as e: +except Exception as e: # IAGEA print("IAGEA can not be imported: ", e) -try: +try: # IALNF from nevergrad.optimization.lama.IALNF import IALNF lama_register["IALNF"] = IALNF - res = NonObjectOptimizer(method="LLAMAIALNF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAIALNF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAIALNF = NonObjectOptimizer(method="LLAMAIALNF").set_name("LLAMAIALNF", register=True) -except Exception as e: +except Exception as e: # IALNF print("IALNF can not be imported: ", e) -try: +try: # IASDD from nevergrad.optimization.lama.IASDD import IASDD lama_register["IASDD"] = IASDD - res = NonObjectOptimizer(method="LLAMAIASDD")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAIASDD")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAIASDD = NonObjectOptimizer(method="LLAMAIASDD").set_name("LLAMAIASDD", register=True) -except Exception as e: +except Exception as e: # IASDD print("IASDD can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveCovarianceGradientSearch import ImprovedAdaptiveCovarianceGradientSearch +try: # ImprovedAdaptiveCovarianceGradientSearch + from nevergrad.optimization.lama.ImprovedAdaptiveCovarianceGradientSearch import ( + ImprovedAdaptiveCovarianceGradientSearch, + ) lama_register["ImprovedAdaptiveCovarianceGradientSearch"] = ImprovedAdaptiveCovarianceGradientSearch - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveCovarianceGradientSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveCovarianceGradientSearch").set_name("LLAMAImprovedAdaptiveCovarianceGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveCovarianceGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveCovarianceGradientSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveCovarianceGradientSearch" + ).set_name("LLAMAImprovedAdaptiveCovarianceGradientSearch", register=True) +except Exception as e: # ImprovedAdaptiveCovarianceGradientSearch print("ImprovedAdaptiveCovarianceGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveDifferentialEvolution import ImprovedAdaptiveDifferentialEvolution +try: # ImprovedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ImprovedAdaptiveDifferentialEvolution import ( + ImprovedAdaptiveDifferentialEvolution, + ) lama_register["ImprovedAdaptiveDifferentialEvolution"] = ImprovedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDifferentialEvolution").set_name("LLAMAImprovedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ImprovedAdaptiveDifferentialEvolution print("ImprovedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution import ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution print("ImprovedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveEliteGuidedRestartDE import ImprovedAdaptiveEliteGuidedRestartDE +try: # ImprovedAdaptiveEliteGuidedRestartDE + from nevergrad.optimization.lama.ImprovedAdaptiveEliteGuidedRestartDE import ( + ImprovedAdaptiveEliteGuidedRestartDE, + ) lama_register["ImprovedAdaptiveEliteGuidedRestartDE"] = ImprovedAdaptiveEliteGuidedRestartDE - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveEliteGuidedRestartDE = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEliteGuidedRestartDE").set_name("LLAMAImprovedAdaptiveEliteGuidedRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEliteGuidedRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveEliteGuidedRestartDE = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEliteGuidedRestartDE" + ).set_name("LLAMAImprovedAdaptiveEliteGuidedRestartDE", register=True) +except Exception as e: # ImprovedAdaptiveEliteGuidedRestartDE print("ImprovedAdaptiveEliteGuidedRestartDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveEnhancedQuantumHarmonySearch import ImprovedAdaptiveEnhancedQuantumHarmonySearch - - lama_register["ImprovedAdaptiveEnhancedQuantumHarmonySearch"] = ImprovedAdaptiveEnhancedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch").set_name("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch", register=True) -except Exception as e: +try: # ImprovedAdaptiveEnhancedQuantumHarmonySearch + from nevergrad.optimization.lama.ImprovedAdaptiveEnhancedQuantumHarmonySearch import ( + ImprovedAdaptiveEnhancedQuantumHarmonySearch, + ) + + lama_register["ImprovedAdaptiveEnhancedQuantumHarmonySearch"] = ( + ImprovedAdaptiveEnhancedQuantumHarmonySearch + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch" + ).set_name("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch", register=True) +except Exception as e: # ImprovedAdaptiveEnhancedQuantumHarmonySearch print("ImprovedAdaptiveEnhancedQuantumHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveEvolutionaryHyperHeuristic import ImprovedAdaptiveEvolutionaryHyperHeuristic +try: # ImprovedAdaptiveEvolutionaryHyperHeuristic + from nevergrad.optimization.lama.ImprovedAdaptiveEvolutionaryHyperHeuristic import ( + ImprovedAdaptiveEvolutionaryHyperHeuristic, + ) lama_register["ImprovedAdaptiveEvolutionaryHyperHeuristic"] = ImprovedAdaptiveEvolutionaryHyperHeuristic - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic").set_name("LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic" + ).set_name("LLAMAImprovedAdaptiveEvolutionaryHyperHeuristic", register=True) +except Exception as e: # ImprovedAdaptiveEvolutionaryHyperHeuristic print("ImprovedAdaptiveEvolutionaryHyperHeuristic can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveExplorationExploitationAlgorithm import ImprovedAdaptiveExplorationExploitationAlgorithm - - lama_register["ImprovedAdaptiveExplorationExploitationAlgorithm"] = ImprovedAdaptiveExplorationExploitationAlgorithm - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer(method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm").set_name("LLAMAImprovedAdaptiveExplorationExploitationAlgorithm", register=True) -except Exception as e: +try: # ImprovedAdaptiveExplorationExploitationAlgorithm + from nevergrad.optimization.lama.ImprovedAdaptiveExplorationExploitationAlgorithm import ( + ImprovedAdaptiveExplorationExploitationAlgorithm, + ) + + lama_register["ImprovedAdaptiveExplorationExploitationAlgorithm"] = ( + ImprovedAdaptiveExplorationExploitationAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveExplorationExploitationAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveExplorationExploitationAlgorithm" + ).set_name("LLAMAImprovedAdaptiveExplorationExploitationAlgorithm", register=True) +except Exception as e: # ImprovedAdaptiveExplorationExploitationAlgorithm print("ImprovedAdaptiveExplorationExploitationAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveHarmonyMemeticAlgorithmV17 import ImprovedAdaptiveHarmonyMemeticAlgorithmV17 +try: # ImprovedAdaptiveHarmonyMemeticAlgorithmV17 + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonyMemeticAlgorithmV17 import ( + ImprovedAdaptiveHarmonyMemeticAlgorithmV17, + ) lama_register["ImprovedAdaptiveHarmonyMemeticAlgorithmV17"] = ImprovedAdaptiveHarmonyMemeticAlgorithmV17 - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17 = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17").set_name("LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17 = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17" + ).set_name("LLAMAImprovedAdaptiveHarmonyMemeticAlgorithmV17", register=True) +except Exception as e: # ImprovedAdaptiveHarmonyMemeticAlgorithmV17 print("ImprovedAdaptiveHarmonyMemeticAlgorithmV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveHarmonySearchWithCuckooInspiration import ImprovedAdaptiveHarmonySearchWithCuckooInspiration - - lama_register["ImprovedAdaptiveHarmonySearchWithCuckooInspiration"] = ImprovedAdaptiveHarmonySearchWithCuckooInspiration - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration").set_name("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration", register=True) -except Exception as e: +try: # ImprovedAdaptiveHarmonySearchWithCuckooInspiration + from nevergrad.optimization.lama.ImprovedAdaptiveHarmonySearchWithCuckooInspiration import ( + ImprovedAdaptiveHarmonySearchWithCuckooInspiration, + ) + + lama_register["ImprovedAdaptiveHarmonySearchWithCuckooInspiration"] = ( + ImprovedAdaptiveHarmonySearchWithCuckooInspiration + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration" + ).set_name("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration", register=True) +except Exception as e: # ImprovedAdaptiveHarmonySearchWithCuckooInspiration print("ImprovedAdaptiveHarmonySearchWithCuckooInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridMetaOptimizer import ImprovedAdaptiveHybridMetaOptimizer +try: # ImprovedAdaptiveHybridMetaOptimizer + from nevergrad.optimization.lama.ImprovedAdaptiveHybridMetaOptimizer import ( + ImprovedAdaptiveHybridMetaOptimizer, + ) lama_register["ImprovedAdaptiveHybridMetaOptimizer"] = ImprovedAdaptiveHybridMetaOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHybridMetaOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridMetaOptimizer").set_name("LLAMAImprovedAdaptiveHybridMetaOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridMetaOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHybridMetaOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridMetaOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridMetaOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveHybridMetaOptimizer print("ImprovedAdaptiveHybridMetaOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimization import ImprovedAdaptiveHybridOptimization +try: # ImprovedAdaptiveHybridOptimization + from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimization import ( + ImprovedAdaptiveHybridOptimization, + ) lama_register["ImprovedAdaptiveHybridOptimization"] = ImprovedAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimization").set_name("LLAMAImprovedAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridOptimization" + ).set_name("LLAMAImprovedAdaptiveHybridOptimization", register=True) +except Exception as e: # ImprovedAdaptiveHybridOptimization print("ImprovedAdaptiveHybridOptimization can not be imported: ", e) -try: +try: # ImprovedAdaptiveHybridOptimizer from nevergrad.optimization.lama.ImprovedAdaptiveHybridOptimizer import ImprovedAdaptiveHybridOptimizer lama_register["ImprovedAdaptiveHybridOptimizer"] = ImprovedAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimizer").set_name("LLAMAImprovedAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveHybridOptimizer print("ImprovedAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveHybridSearchOptimizer import ImprovedAdaptiveHybridSearchOptimizer +try: # ImprovedAdaptiveHybridSearchOptimizer + from nevergrad.optimization.lama.ImprovedAdaptiveHybridSearchOptimizer import ( + ImprovedAdaptiveHybridSearchOptimizer, + ) lama_register["ImprovedAdaptiveHybridSearchOptimizer"] = ImprovedAdaptiveHybridSearchOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveHybridSearchOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridSearchOptimizer").set_name("LLAMAImprovedAdaptiveHybridSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveHybridSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveHybridSearchOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveHybridSearchOptimizer" + ).set_name("LLAMAImprovedAdaptiveHybridSearchOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveHybridSearchOptimizer print("ImprovedAdaptiveHybridSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveLevyHarmonySearch import ImprovedAdaptiveLevyHarmonySearch +try: # ImprovedAdaptiveLevyHarmonySearch + from nevergrad.optimization.lama.ImprovedAdaptiveLevyHarmonySearch import ( + ImprovedAdaptiveLevyHarmonySearch, + ) lama_register["ImprovedAdaptiveLevyHarmonySearch"] = ImprovedAdaptiveLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveLevyHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveLevyHarmonySearch").set_name("LLAMAImprovedAdaptiveLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveLevyHarmonySearch" + ).set_name("LLAMAImprovedAdaptiveLevyHarmonySearch", register=True) +except Exception as e: # ImprovedAdaptiveLevyHarmonySearch print("ImprovedAdaptiveLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveMemeticHybridOptimizer import ImprovedAdaptiveMemeticHybridOptimizer +try: # ImprovedAdaptiveMemeticHybridOptimizer + from nevergrad.optimization.lama.ImprovedAdaptiveMemeticHybridOptimizer import ( + ImprovedAdaptiveMemeticHybridOptimizer, + ) lama_register["ImprovedAdaptiveMemeticHybridOptimizer"] = ImprovedAdaptiveMemeticHybridOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMemeticHybridOptimizer").set_name("LLAMAImprovedAdaptiveMemeticHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMemeticHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveMemeticHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMemeticHybridOptimizer" + ).set_name("LLAMAImprovedAdaptiveMemeticHybridOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveMemeticHybridOptimizer print("ImprovedAdaptiveMemeticHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiOperatorSearch import ImprovedAdaptiveMultiOperatorSearch +try: # ImprovedAdaptiveMultiOperatorSearch + from nevergrad.optimization.lama.ImprovedAdaptiveMultiOperatorSearch import ( + ImprovedAdaptiveMultiOperatorSearch, + ) lama_register["ImprovedAdaptiveMultiOperatorSearch"] = ImprovedAdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiOperatorSearch").set_name("LLAMAImprovedAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAImprovedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: # ImprovedAdaptiveMultiOperatorSearch print("ImprovedAdaptiveMultiOperatorSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyDifferentialEvolution import ImprovedAdaptiveMultiStrategyDifferentialEvolution - - lama_register["ImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ImprovedAdaptiveMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedAdaptiveMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyDifferentialEvolution import ( + ImprovedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["ImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( + ImprovedAdaptiveMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMAImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # ImprovedAdaptiveMultiStrategyDifferentialEvolution print("ImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyOptimizer import ImprovedAdaptiveMultiStrategyOptimizer +try: # ImprovedAdaptiveMultiStrategyOptimizer + from nevergrad.optimization.lama.ImprovedAdaptiveMultiStrategyOptimizer import ( + ImprovedAdaptiveMultiStrategyOptimizer, + ) lama_register["ImprovedAdaptiveMultiStrategyOptimizer"] = ImprovedAdaptiveMultiStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyOptimizer").set_name("LLAMAImprovedAdaptiveMultiStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveMultiStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveMultiStrategyOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveMultiStrategyOptimizer" + ).set_name("LLAMAImprovedAdaptiveMultiStrategyOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveMultiStrategyOptimizer print("ImprovedAdaptiveMultiStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveParticleSwarmOptimization import ImprovedAdaptiveParticleSwarmOptimization +try: # ImprovedAdaptiveParticleSwarmOptimization + from nevergrad.optimization.lama.ImprovedAdaptiveParticleSwarmOptimization import ( + ImprovedAdaptiveParticleSwarmOptimization, + ) lama_register["ImprovedAdaptiveParticleSwarmOptimization"] = ImprovedAdaptiveParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveParticleSwarmOptimization").set_name("LLAMAImprovedAdaptiveParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveParticleSwarmOptimization" + ).set_name("LLAMAImprovedAdaptiveParticleSwarmOptimization", register=True) +except Exception as e: # ImprovedAdaptiveParticleSwarmOptimization print("ImprovedAdaptiveParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptivePopulationMemeticOptimizer import ImprovedAdaptivePopulationMemeticOptimizer +try: # ImprovedAdaptivePopulationMemeticOptimizer + from nevergrad.optimization.lama.ImprovedAdaptivePopulationMemeticOptimizer import ( + ImprovedAdaptivePopulationMemeticOptimizer, + ) lama_register["ImprovedAdaptivePopulationMemeticOptimizer"] = ImprovedAdaptivePopulationMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptivePopulationMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptivePopulationMemeticOptimizer").set_name("LLAMAImprovedAdaptivePopulationMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptivePopulationMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptivePopulationMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptivePopulationMemeticOptimizer" + ).set_name("LLAMAImprovedAdaptivePopulationMemeticOptimizer", register=True) +except Exception as e: # ImprovedAdaptivePopulationMemeticOptimizer print("ImprovedAdaptivePopulationMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - - lama_register["ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch").set_name("LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) -except Exception as e: +try: # ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch import ( + ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch, + ) + + lama_register["ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch"] = ( + ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch" + ).set_name("LLAMAImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch", register=True) +except Exception as e: # ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch print("ImprovedAdaptiveQuantumDifferentialEvolutionWithDynamicHybridSearch can not be imported: ", e) -try: +try: # ImprovedAdaptiveQuantumEntropyDE from nevergrad.optimization.lama.ImprovedAdaptiveQuantumEntropyDE import ImprovedAdaptiveQuantumEntropyDE lama_register["ImprovedAdaptiveQuantumEntropyDE"] = ImprovedAdaptiveQuantumEntropyDE - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumEntropyDE").set_name("LLAMAImprovedAdaptiveQuantumEntropyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumEntropyDE" + ).set_name("LLAMAImprovedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: # ImprovedAdaptiveQuantumEntropyDE print("ImprovedAdaptiveQuantumEntropyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumLevyOptimizer import ImprovedAdaptiveQuantumLevyOptimizer +try: # ImprovedAdaptiveQuantumLevyOptimizer + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumLevyOptimizer import ( + ImprovedAdaptiveQuantumLevyOptimizer, + ) lama_register["ImprovedAdaptiveQuantumLevyOptimizer"] = ImprovedAdaptiveQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumLevyOptimizer").set_name("LLAMAImprovedAdaptiveQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumLevyOptimizer" + ).set_name("LLAMAImprovedAdaptiveQuantumLevyOptimizer", register=True) +except Exception as e: # ImprovedAdaptiveQuantumLevyOptimizer print("ImprovedAdaptiveQuantumLevyOptimizer can not be imported: ", e) -try: +try: # ImprovedAdaptiveQuantumPSO from nevergrad.optimization.lama.ImprovedAdaptiveQuantumPSO import ImprovedAdaptiveQuantumPSO lama_register["ImprovedAdaptiveQuantumPSO"] = ImprovedAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO").set_name("LLAMAImprovedAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumPSO").set_name( + "LLAMAImprovedAdaptiveQuantumPSO", register=True + ) +except Exception as e: # ImprovedAdaptiveQuantumPSO print("ImprovedAdaptiveQuantumPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdaptiveQuantumSwarmOptimization import ImprovedAdaptiveQuantumSwarmOptimization +try: # ImprovedAdaptiveQuantumSwarmOptimization + from nevergrad.optimization.lama.ImprovedAdaptiveQuantumSwarmOptimization import ( + ImprovedAdaptiveQuantumSwarmOptimization, + ) lama_register["ImprovedAdaptiveQuantumSwarmOptimization"] = ImprovedAdaptiveQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumSwarmOptimization").set_name("LLAMAImprovedAdaptiveQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdaptiveQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdaptiveQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdaptiveQuantumSwarmOptimization" + ).set_name("LLAMAImprovedAdaptiveQuantumSwarmOptimization", register=True) +except Exception as e: # ImprovedAdaptiveQuantumSwarmOptimization print("ImprovedAdaptiveQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedAdvancedHybridAdaptiveOptimization import ImprovedAdvancedHybridAdaptiveOptimization +try: # ImprovedAdvancedHybridAdaptiveOptimization + from nevergrad.optimization.lama.ImprovedAdvancedHybridAdaptiveOptimization import ( + ImprovedAdvancedHybridAdaptiveOptimization, + ) lama_register["ImprovedAdvancedHybridAdaptiveOptimization"] = ImprovedAdvancedHybridAdaptiveOptimization - res = NonObjectOptimizer(method="LLAMAImprovedAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedAdvancedHybridAdaptiveOptimization = NonObjectOptimizer(method="LLAMAImprovedAdvancedHybridAdaptiveOptimization").set_name("LLAMAImprovedAdvancedHybridAdaptiveOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedAdvancedHybridAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedAdvancedHybridAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAImprovedAdvancedHybridAdaptiveOptimization" + ).set_name("LLAMAImprovedAdvancedHybridAdaptiveOptimization", register=True) +except Exception as e: # ImprovedAdvancedHybridAdaptiveOptimization print("ImprovedAdvancedHybridAdaptiveOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedBalancedQuantumLevyDifferentialSearch import ImprovedBalancedQuantumLevyDifferentialSearch - - lama_register["ImprovedBalancedQuantumLevyDifferentialSearch"] = ImprovedBalancedQuantumLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedBalancedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch").set_name("LLAMAImprovedBalancedQuantumLevyDifferentialSearch", register=True) -except Exception as e: +try: # ImprovedBalancedQuantumLevyDifferentialSearch + from nevergrad.optimization.lama.ImprovedBalancedQuantumLevyDifferentialSearch import ( + ImprovedBalancedQuantumLevyDifferentialSearch, + ) + + lama_register["ImprovedBalancedQuantumLevyDifferentialSearch"] = ( + ImprovedBalancedQuantumLevyDifferentialSearch + ) + # res = NonObjectOptimizer(method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedBalancedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAImprovedBalancedQuantumLevyDifferentialSearch" + ).set_name("LLAMAImprovedBalancedQuantumLevyDifferentialSearch", register=True) +except Exception as e: # ImprovedBalancedQuantumLevyDifferentialSearch print("ImprovedBalancedQuantumLevyDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedCooperativeAdaptiveEvolutionaryOptimizer import ImprovedCooperativeAdaptiveEvolutionaryOptimizer - - lama_register["ImprovedCooperativeAdaptiveEvolutionaryOptimizer"] = ImprovedCooperativeAdaptiveEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer").set_name("LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer", register=True) -except Exception as e: +try: # ImprovedCooperativeAdaptiveEvolutionaryOptimizer + from nevergrad.optimization.lama.ImprovedCooperativeAdaptiveEvolutionaryOptimizer import ( + ImprovedCooperativeAdaptiveEvolutionaryOptimizer, + ) + + lama_register["ImprovedCooperativeAdaptiveEvolutionaryOptimizer"] = ( + ImprovedCooperativeAdaptiveEvolutionaryOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAImprovedCooperativeAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: # ImprovedCooperativeAdaptiveEvolutionaryOptimizer print("ImprovedCooperativeAdaptiveEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedCulturalDifferentialMemeticEvolution import ImprovedCulturalDifferentialMemeticEvolution - - lama_register["ImprovedCulturalDifferentialMemeticEvolution"] = ImprovedCulturalDifferentialMemeticEvolution - res = NonObjectOptimizer(method="LLAMAImprovedCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedCulturalDifferentialMemeticEvolution = NonObjectOptimizer(method="LLAMAImprovedCulturalDifferentialMemeticEvolution").set_name("LLAMAImprovedCulturalDifferentialMemeticEvolution", register=True) -except Exception as e: +try: # ImprovedCulturalDifferentialMemeticEvolution + from nevergrad.optimization.lama.ImprovedCulturalDifferentialMemeticEvolution import ( + ImprovedCulturalDifferentialMemeticEvolution, + ) + + lama_register["ImprovedCulturalDifferentialMemeticEvolution"] = ( + ImprovedCulturalDifferentialMemeticEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedCulturalDifferentialMemeticEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedCulturalDifferentialMemeticEvolution = NonObjectOptimizer( + method="LLAMAImprovedCulturalDifferentialMemeticEvolution" + ).set_name("LLAMAImprovedCulturalDifferentialMemeticEvolution", register=True) +except Exception as e: # ImprovedCulturalDifferentialMemeticEvolution print("ImprovedCulturalDifferentialMemeticEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedCulturalEvolutionaryOptimizer import ImprovedCulturalEvolutionaryOptimizer +try: # ImprovedCulturalEvolutionaryOptimizer + from nevergrad.optimization.lama.ImprovedCulturalEvolutionaryOptimizer import ( + ImprovedCulturalEvolutionaryOptimizer, + ) lama_register["ImprovedCulturalEvolutionaryOptimizer"] = ImprovedCulturalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedCulturalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAImprovedCulturalEvolutionaryOptimizer").set_name("LLAMAImprovedCulturalEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedCulturalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedCulturalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAImprovedCulturalEvolutionaryOptimizer" + ).set_name("LLAMAImprovedCulturalEvolutionaryOptimizer", register=True) +except Exception as e: # ImprovedCulturalEvolutionaryOptimizer print("ImprovedCulturalEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDiversifiedHarmonySearchOptimizer import ImprovedDiversifiedHarmonySearchOptimizer +try: # ImprovedDiversifiedHarmonySearchOptimizer + from nevergrad.optimization.lama.ImprovedDiversifiedHarmonySearchOptimizer import ( + ImprovedDiversifiedHarmonySearchOptimizer, + ) lama_register["ImprovedDiversifiedHarmonySearchOptimizer"] = ImprovedDiversifiedHarmonySearchOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer(method="LLAMAImprovedDiversifiedHarmonySearchOptimizer").set_name("LLAMAImprovedDiversifiedHarmonySearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDiversifiedHarmonySearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDiversifiedHarmonySearchOptimizer = NonObjectOptimizer( + method="LLAMAImprovedDiversifiedHarmonySearchOptimizer" + ).set_name("LLAMAImprovedDiversifiedHarmonySearchOptimizer", register=True) +except Exception as e: # ImprovedDiversifiedHarmonySearchOptimizer print("ImprovedDiversifiedHarmonySearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveMemoryStrategyV58 import ImprovedDualPhaseAdaptiveMemoryStrategyV58 +try: # ImprovedDualPhaseAdaptiveMemoryStrategyV58 + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveMemoryStrategyV58 import ( + ImprovedDualPhaseAdaptiveMemoryStrategyV58, + ) lama_register["ImprovedDualPhaseAdaptiveMemoryStrategyV58"] = ImprovedDualPhaseAdaptiveMemoryStrategyV58 - res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58 = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58").set_name("LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58 = NonObjectOptimizer( + method="LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58" + ).set_name("LLAMAImprovedDualPhaseAdaptiveMemoryStrategyV58", register=True) +except Exception as e: # ImprovedDualPhaseAdaptiveMemoryStrategyV58 print("ImprovedDualPhaseAdaptiveMemoryStrategyV58 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 import ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 - - lama_register["ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1"] = ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 - res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1").set_name("LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1", register=True) -except Exception as e: +try: # ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 + from nevergrad.optimization.lama.ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 import ( + ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1, + ) + + lama_register["ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1"] = ( + ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 = NonObjectOptimizer( + method="LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1" + ).set_name("LLAMAImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1", register=True) +except Exception as e: # ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 print("ImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 import ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 - - lama_register["ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2"] = ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 - res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2").set_name("LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2", register=True) -except Exception as e: +try: # ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 import ( + ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2, + ) + + lama_register["ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2"] = ( + ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2" + ).set_name("LLAMAImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2", register=True) +except Exception as e: # ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 print("ImprovedDynamicAdaptiveDEPSOWithEliteMemoryV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveExplorationOptimization import ImprovedDynamicAdaptiveExplorationOptimization - - lama_register["ImprovedDynamicAdaptiveExplorationOptimization"] = ImprovedDynamicAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveExplorationOptimization").set_name("LLAMAImprovedDynamicAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # ImprovedDynamicAdaptiveExplorationOptimization + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveExplorationOptimization import ( + ImprovedDynamicAdaptiveExplorationOptimization, + ) + + lama_register["ImprovedDynamicAdaptiveExplorationOptimization"] = ( + ImprovedDynamicAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveExplorationOptimization" + ).set_name("LLAMAImprovedDynamicAdaptiveExplorationOptimization", register=True) +except Exception as e: # ImprovedDynamicAdaptiveExplorationOptimization print("ImprovedDynamicAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSO import ImprovedDynamicAdaptiveHybridDEPSO +try: # ImprovedDynamicAdaptiveHybridDEPSO + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSO import ( + ImprovedDynamicAdaptiveHybridDEPSO, + ) lama_register["ImprovedDynamicAdaptiveHybridDEPSO"] = ImprovedDynamicAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSO").set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: # ImprovedDynamicAdaptiveHybridDEPSO print("ImprovedDynamicAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory import ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory - - lama_register["ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) -except Exception as e: +try: # ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMAImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory print("ImprovedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicHarmonyFireworksSearch import ImprovedDynamicHarmonyFireworksSearch +try: # ImprovedDynamicHarmonyFireworksSearch + from nevergrad.optimization.lama.ImprovedDynamicHarmonyFireworksSearch import ( + ImprovedDynamicHarmonyFireworksSearch, + ) lama_register["ImprovedDynamicHarmonyFireworksSearch"] = ImprovedDynamicHarmonyFireworksSearch - res = NonObjectOptimizer(method="LLAMAImprovedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicHarmonyFireworksSearch = NonObjectOptimizer(method="LLAMAImprovedDynamicHarmonyFireworksSearch").set_name("LLAMAImprovedDynamicHarmonyFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicHarmonyFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicHarmonyFireworksSearch = NonObjectOptimizer( + method="LLAMAImprovedDynamicHarmonyFireworksSearch" + ).set_name("LLAMAImprovedDynamicHarmonyFireworksSearch", register=True) +except Exception as e: # ImprovedDynamicHarmonyFireworksSearch print("ImprovedDynamicHarmonyFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicHybridDEPSOWithEliteMemoryV3 import ImprovedDynamicHybridDEPSOWithEliteMemoryV3 +try: # ImprovedDynamicHybridDEPSOWithEliteMemoryV3 + from nevergrad.optimization.lama.ImprovedDynamicHybridDEPSOWithEliteMemoryV3 import ( + ImprovedDynamicHybridDEPSOWithEliteMemoryV3, + ) lama_register["ImprovedDynamicHybridDEPSOWithEliteMemoryV3"] = ImprovedDynamicHybridDEPSOWithEliteMemoryV3 - res = NonObjectOptimizer(method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3 = NonObjectOptimizer(method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3").set_name("LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3 = NonObjectOptimizer( + method="LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3" + ).set_name("LLAMAImprovedDynamicHybridDEPSOWithEliteMemoryV3", register=True) +except Exception as e: # ImprovedDynamicHybridDEPSOWithEliteMemoryV3 print("ImprovedDynamicHybridDEPSOWithEliteMemoryV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedDynamicQuantumSwarmOptimization import ImprovedDynamicQuantumSwarmOptimization +try: # ImprovedDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.ImprovedDynamicQuantumSwarmOptimization import ( + ImprovedDynamicQuantumSwarmOptimization, + ) lama_register["ImprovedDynamicQuantumSwarmOptimization"] = ImprovedDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # ImprovedDynamicQuantumSwarmOptimization print("ImprovedDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 import ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 - - lama_register["ImprovedEliteAdaptiveCrowdingHybridOptimizerV2"] = ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2").set_name("LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2", register=True) -except Exception as e: +try: # ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 + from nevergrad.optimization.lama.ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 import ( + ImprovedEliteAdaptiveCrowdingHybridOptimizerV2, + ) + + lama_register["ImprovedEliteAdaptiveCrowdingHybridOptimizerV2"] = ( + ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2" + ).set_name("LLAMAImprovedEliteAdaptiveCrowdingHybridOptimizerV2", register=True) +except Exception as e: # ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 print("ImprovedEliteAdaptiveCrowdingHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemeticDifferentialEvolution import ImprovedEliteAdaptiveMemeticDifferentialEvolution - - lama_register["ImprovedEliteAdaptiveMemeticDifferentialEvolution"] = ImprovedEliteAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution").set_name("LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedEliteAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemeticDifferentialEvolution import ( + ImprovedEliteAdaptiveMemeticDifferentialEvolution, + ) + + lama_register["ImprovedEliteAdaptiveMemeticDifferentialEvolution"] = ( + ImprovedEliteAdaptiveMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMAImprovedEliteAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # ImprovedEliteAdaptiveMemeticDifferentialEvolution print("ImprovedEliteAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemoryHybridOptimizer import ImprovedEliteAdaptiveMemoryHybridOptimizer +try: # ImprovedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.ImprovedEliteAdaptiveMemoryHybridOptimizer import ( + ImprovedEliteAdaptiveMemoryHybridOptimizer, + ) lama_register["ImprovedEliteAdaptiveMemoryHybridOptimizer"] = ImprovedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAImprovedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # ImprovedEliteAdaptiveMemoryHybridOptimizer print("ImprovedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEliteGuidedHybridAdaptiveDE import ImprovedEliteGuidedHybridAdaptiveDE +try: # ImprovedEliteGuidedHybridAdaptiveDE + from nevergrad.optimization.lama.ImprovedEliteGuidedHybridAdaptiveDE import ( + ImprovedEliteGuidedHybridAdaptiveDE, + ) lama_register["ImprovedEliteGuidedHybridAdaptiveDE"] = ImprovedEliteGuidedHybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteGuidedHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedHybridAdaptiveDE").set_name("LLAMAImprovedEliteGuidedHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteGuidedHybridAdaptiveDE = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedHybridAdaptiveDE" + ).set_name("LLAMAImprovedEliteGuidedHybridAdaptiveDE", register=True) +except Exception as e: # ImprovedEliteGuidedHybridAdaptiveDE print("ImprovedEliteGuidedHybridAdaptiveDE can not be imported: ", e) -try: +try: # ImprovedEliteGuidedMutationDE from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE import ImprovedEliteGuidedMutationDE lama_register["ImprovedEliteGuidedMutationDE"] = ImprovedEliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE").set_name("LLAMAImprovedEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedMutationDE" + ).set_name("LLAMAImprovedEliteGuidedMutationDE", register=True) +except Exception as e: # ImprovedEliteGuidedMutationDE print("ImprovedEliteGuidedMutationDE can not be imported: ", e) -try: +try: # ImprovedEliteGuidedMutationDE_v2 from nevergrad.optimization.lama.ImprovedEliteGuidedMutationDE_v2 import ImprovedEliteGuidedMutationDE_v2 lama_register["ImprovedEliteGuidedMutationDE_v2"] = ImprovedEliteGuidedMutationDE_v2 - res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE_v2").set_name("LLAMAImprovedEliteGuidedMutationDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMAImprovedEliteGuidedMutationDE_v2" + ).set_name("LLAMAImprovedEliteGuidedMutationDE_v2", register=True) +except Exception as e: # ImprovedEliteGuidedMutationDE_v2 print("ImprovedEliteGuidedMutationDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEliteQuantumDifferentialMemeticOptimizer import ImprovedEliteQuantumDifferentialMemeticOptimizer - - lama_register["ImprovedEliteQuantumDifferentialMemeticOptimizer"] = ImprovedEliteQuantumDifferentialMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer").set_name("LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer", register=True) -except Exception as e: +try: # ImprovedEliteQuantumDifferentialMemeticOptimizer + from nevergrad.optimization.lama.ImprovedEliteQuantumDifferentialMemeticOptimizer import ( + ImprovedEliteQuantumDifferentialMemeticOptimizer, + ) + + lama_register["ImprovedEliteQuantumDifferentialMemeticOptimizer"] = ( + ImprovedEliteQuantumDifferentialMemeticOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMAImprovedEliteQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: # ImprovedEliteQuantumDifferentialMemeticOptimizer print("ImprovedEliteQuantumDifferentialMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 import ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 - - lama_register["ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6"] = ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6").set_name("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6", register=True) -except Exception as e: +try: # ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 import ( + ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6, + ) + + lama_register["ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6"] = ( + ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6" + ).set_name("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6", register=True) +except Exception as e: # ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 print("ImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 import ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 - - lama_register["ImprovedEnhancedAdaptiveDynamicHarmonySearchV4"] = ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4").set_name("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4", register=True) -except Exception as e: +try: # ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 import ( + ImprovedEnhancedAdaptiveDynamicHarmonySearchV4, + ) + + lama_register["ImprovedEnhancedAdaptiveDynamicHarmonySearchV4"] = ( + ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4" + ).set_name("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4", register=True) +except Exception as e: # ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 print("ImprovedEnhancedAdaptiveDynamicHarmonySearchV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 import ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 - - lama_register["ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19"] = ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19").set_name("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19", register=True) -except Exception as e: +try: # ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 import ( + ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19, + ) + + lama_register["ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19"] = ( + ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19" + ).set_name("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19", register=True) +except Exception as e: # ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 print("ImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveLevyHarmonySearchV4 import ImprovedEnhancedAdaptiveLevyHarmonySearchV4 +try: # ImprovedEnhancedAdaptiveLevyHarmonySearchV4 + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveLevyHarmonySearchV4 import ( + ImprovedEnhancedAdaptiveLevyHarmonySearchV4, + ) lama_register["ImprovedEnhancedAdaptiveLevyHarmonySearchV4"] = ImprovedEnhancedAdaptiveLevyHarmonySearchV4 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4").set_name("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4" + ).set_name("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4", register=True) +except Exception as e: # ImprovedEnhancedAdaptiveLevyHarmonySearchV4 print("ImprovedEnhancedAdaptiveLevyHarmonySearchV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 import ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 +try: # ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 + from nevergrad.optimization.lama.ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 import ( + ImprovedEnhancedAdaptiveMetaNetAQAPSOv4, + ) lama_register["ImprovedEnhancedAdaptiveMetaNetAQAPSOv4"] = ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4").set_name("LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4" + ).set_name("LLAMAImprovedEnhancedAdaptiveMetaNetAQAPSOv4", register=True) +except Exception as e: # ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 print("ImprovedEnhancedAdaptiveMetaNetAQAPSOv4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 import ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 - - lama_register["ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15"] = ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15").set_name("LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15", register=True) -except Exception as e: +try: # ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 + from nevergrad.optimization.lama.ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 import ( + ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15, + ) + + lama_register["ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15"] = ( + ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15" + ).set_name("LLAMAImprovedEnhancedAdvancedQuantumSwarmOptimizationV15", register=True) +except Exception as e: # ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 print("ImprovedEnhancedAdvancedQuantumSwarmOptimizationV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 - - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v54"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54", register=True) -except Exception as e: +try: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v54, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v54"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v54", register=True) +except Exception as e: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v54 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 - - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v61"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61", register=True) -except Exception as e: +try: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v61, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v61"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v61", register=True) +except Exception as e: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v61 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 import ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 - - lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v65"] = ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65 = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65").set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65", register=True) -except Exception as e: +try: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 + from nevergrad.optimization.lama.ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 import ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v65, + ) + + lama_register["ImprovedEnhancedDifferentialEvolutionLocalSearch_v65"] = ( + ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65" + ).set_name("LLAMAImprovedEnhancedDifferentialEvolutionLocalSearch_v65", register=True) +except Exception as e: # ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 print("ImprovedEnhancedDifferentialEvolutionLocalSearch_v65 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDiversifiedGravitationalSwarmOptimization import ImprovedEnhancedDiversifiedGravitationalSwarmOptimization - - lama_register["ImprovedEnhancedDiversifiedGravitationalSwarmOptimization"] = ImprovedEnhancedDiversifiedGravitationalSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization").set_name("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization", register=True) -except Exception as e: +try: # ImprovedEnhancedDiversifiedGravitationalSwarmOptimization + from nevergrad.optimization.lama.ImprovedEnhancedDiversifiedGravitationalSwarmOptimization import ( + ImprovedEnhancedDiversifiedGravitationalSwarmOptimization, + ) + + lama_register["ImprovedEnhancedDiversifiedGravitationalSwarmOptimization"] = ( + ImprovedEnhancedDiversifiedGravitationalSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization" + ).set_name("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization", register=True) +except Exception as e: # ImprovedEnhancedDiversifiedGravitationalSwarmOptimization print("ImprovedEnhancedDiversifiedGravitationalSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicDifferentialEvolution import ImprovedEnhancedDynamicDifferentialEvolution - - lama_register["ImprovedEnhancedDynamicDifferentialEvolution"] = ImprovedEnhancedDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicDifferentialEvolution").set_name("LLAMAImprovedEnhancedDynamicDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedEnhancedDynamicDifferentialEvolution + from nevergrad.optimization.lama.ImprovedEnhancedDynamicDifferentialEvolution import ( + ImprovedEnhancedDynamicDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedDynamicDifferentialEvolution"] = ( + ImprovedEnhancedDynamicDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: # ImprovedEnhancedDynamicDifferentialEvolution print("ImprovedEnhancedDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicHarmonyAlgorithm import ImprovedEnhancedDynamicHarmonyAlgorithm +try: # ImprovedEnhancedDynamicHarmonyAlgorithm + from nevergrad.optimization.lama.ImprovedEnhancedDynamicHarmonyAlgorithm import ( + ImprovedEnhancedDynamicHarmonyAlgorithm, + ) lama_register["ImprovedEnhancedDynamicHarmonyAlgorithm"] = ImprovedEnhancedDynamicHarmonyAlgorithm - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm").set_name("LLAMAImprovedEnhancedDynamicHarmonyAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDynamicHarmonyAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicHarmonyAlgorithm" + ).set_name("LLAMAImprovedEnhancedDynamicHarmonyAlgorithm", register=True) +except Exception as e: # ImprovedEnhancedDynamicHarmonyAlgorithm print("ImprovedEnhancedDynamicHarmonyAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicLevyHarmonySearch import ImprovedEnhancedDynamicLevyHarmonySearch +try: # ImprovedEnhancedDynamicLevyHarmonySearch + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLevyHarmonySearch import ( + ImprovedEnhancedDynamicLevyHarmonySearch, + ) lama_register["ImprovedEnhancedDynamicLevyHarmonySearch"] = ImprovedEnhancedDynamicLevyHarmonySearch - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch").set_name("LLAMAImprovedEnhancedDynamicLevyHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDynamicLevyHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicLevyHarmonySearch" + ).set_name("LLAMAImprovedEnhancedDynamicLevyHarmonySearch", register=True) +except Exception as e: # ImprovedEnhancedDynamicLevyHarmonySearch print("ImprovedEnhancedDynamicLevyHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm import ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm - - lama_register["ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm"] = ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm").set_name("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) -except Exception as e: +try: # ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm + from nevergrad.optimization.lama.ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm import ( + ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm, + ) + + lama_register["ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm"] = ( + ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm" + ).set_name("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm", register=True) +except Exception as e: # ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm print("ImprovedEnhancedDynamicLocalSearchFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedDynamicQuantumSwarmOptimization import ImprovedEnhancedDynamicQuantumSwarmOptimization - - lama_register["ImprovedEnhancedDynamicQuantumSwarmOptimization"] = ImprovedEnhancedDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization", register=True) -except Exception as e: +try: # ImprovedEnhancedDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.ImprovedEnhancedDynamicQuantumSwarmOptimization import ( + ImprovedEnhancedDynamicQuantumSwarmOptimization, + ) + + lama_register["ImprovedEnhancedDynamicQuantumSwarmOptimization"] = ( + ImprovedEnhancedDynamicQuantumSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedEnhancedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # ImprovedEnhancedDynamicQuantumSwarmOptimization print("ImprovedEnhancedDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedEliteGuidedMassQGSA_v84 import ImprovedEnhancedEliteGuidedMassQGSA_v84 +try: # ImprovedEnhancedEliteGuidedMassQGSA_v84 + from nevergrad.optimization.lama.ImprovedEnhancedEliteGuidedMassQGSA_v84 import ( + ImprovedEnhancedEliteGuidedMassQGSA_v84, + ) lama_register["ImprovedEnhancedEliteGuidedMassQGSA_v84"] = ImprovedEnhancedEliteGuidedMassQGSA_v84 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84 = NonObjectOptimizer(method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84").set_name("LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84" + ).set_name("LLAMAImprovedEnhancedEliteGuidedMassQGSA_v84", register=True) +except Exception as e: # ImprovedEnhancedEliteGuidedMassQGSA_v84 print("ImprovedEnhancedEliteGuidedMassQGSA_v84 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 import ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 - - lama_register["ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11"] = ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 = NonObjectOptimizer(method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11").set_name("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11", register=True) -except Exception as e: +try: # ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 + from nevergrad.optimization.lama.ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 import ( + ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11, + ) + + lama_register["ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11"] = ( + ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11" + ).set_name("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11", register=True) +except Exception as e: # ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 print("ImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedEvolutionaryFireworksSearch import ImprovedEnhancedEvolutionaryFireworksSearch +try: # ImprovedEnhancedEvolutionaryFireworksSearch + from nevergrad.optimization.lama.ImprovedEnhancedEvolutionaryFireworksSearch import ( + ImprovedEnhancedEvolutionaryFireworksSearch, + ) lama_register["ImprovedEnhancedEvolutionaryFireworksSearch"] = ImprovedEnhancedEvolutionaryFireworksSearch - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch").set_name("LLAMAImprovedEnhancedEvolutionaryFireworksSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedEvolutionaryFireworksSearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedEvolutionaryFireworksSearch" + ).set_name("LLAMAImprovedEnhancedEvolutionaryFireworksSearch", register=True) +except Exception as e: # ImprovedEnhancedEvolutionaryFireworksSearch print("ImprovedEnhancedEvolutionaryFireworksSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmOptimization import ImprovedEnhancedFireworkAlgorithmOptimization - - lama_register["ImprovedEnhancedFireworkAlgorithmOptimization"] = ImprovedEnhancedFireworkAlgorithmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization").set_name("LLAMAImprovedEnhancedFireworkAlgorithmOptimization", register=True) -except Exception as e: +try: # ImprovedEnhancedFireworkAlgorithmOptimization + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmOptimization import ( + ImprovedEnhancedFireworkAlgorithmOptimization, + ) + + lama_register["ImprovedEnhancedFireworkAlgorithmOptimization"] = ( + ImprovedEnhancedFireworkAlgorithmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedFireworkAlgorithmOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedFireworkAlgorithmOptimization" + ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmOptimization", register=True) +except Exception as e: # ImprovedEnhancedFireworkAlgorithmOptimization print("ImprovedEnhancedFireworkAlgorithmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - - lama_register["ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch").set_name("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) -except Exception as e: +try: # ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + from nevergrad.optimization.lama.ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch import ( + ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch, + ) + + lama_register["ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch"] = ( + ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch" + ).set_name("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch", register=True) +except Exception as e: # ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch print("ImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedGradientDifferentialEvolution import ImprovedEnhancedGradientDifferentialEvolution - - lama_register["ImprovedEnhancedGradientDifferentialEvolution"] = ImprovedEnhancedGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedGradientDifferentialEvolution").set_name("LLAMAImprovedEnhancedGradientDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedEnhancedGradientDifferentialEvolution + from nevergrad.optimization.lama.ImprovedEnhancedGradientDifferentialEvolution import ( + ImprovedEnhancedGradientDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedGradientDifferentialEvolution"] = ( + ImprovedEnhancedGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedGradientDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedGradientDifferentialEvolution", register=True) +except Exception as e: # ImprovedEnhancedGradientDifferentialEvolution print("ImprovedEnhancedGradientDifferentialEvolution can not be imported: ", e) -try: +try: # ImprovedEnhancedHarmonySearchOB from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchOB import ImprovedEnhancedHarmonySearchOB lama_register["ImprovedEnhancedHarmonySearchOB"] = ImprovedEnhancedHarmonySearchOB - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedHarmonySearchOB = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchOB").set_name("LLAMAImprovedEnhancedHarmonySearchOB", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchOB")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedHarmonySearchOB = NonObjectOptimizer( + method="LLAMAImprovedEnhancedHarmonySearchOB" + ).set_name("LLAMAImprovedEnhancedHarmonySearchOB", register=True) +except Exception as e: # ImprovedEnhancedHarmonySearchOB print("ImprovedEnhancedHarmonySearchOB can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - - lama_register["ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration").set_name("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) -except Exception as e: +try: # ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + from nevergrad.optimization.lama.ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration import ( + ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration, + ) + + lama_register["ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration"] = ( + ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration = NonObjectOptimizer( + method="LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration" + ).set_name("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration", register=True) +except Exception as e: # ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration print("ImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedMemeticHarmonyOptimization import ImprovedEnhancedMemeticHarmonyOptimization +try: # ImprovedEnhancedMemeticHarmonyOptimization + from nevergrad.optimization.lama.ImprovedEnhancedMemeticHarmonyOptimization import ( + ImprovedEnhancedMemeticHarmonyOptimization, + ) lama_register["ImprovedEnhancedMemeticHarmonyOptimization"] = ImprovedEnhancedMemeticHarmonyOptimization - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedMemeticHarmonyOptimization = NonObjectOptimizer(method="LLAMAImprovedEnhancedMemeticHarmonyOptimization").set_name("LLAMAImprovedEnhancedMemeticHarmonyOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedMemeticHarmonyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedMemeticHarmonyOptimization = NonObjectOptimizer( + method="LLAMAImprovedEnhancedMemeticHarmonyOptimization" + ).set_name("LLAMAImprovedEnhancedMemeticHarmonyOptimization", register=True) +except Exception as e: # ImprovedEnhancedMemeticHarmonyOptimization print("ImprovedEnhancedMemeticHarmonyOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution import ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution - - lama_register["ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution import ( + ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution"] = ( + ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution print("ImprovedEnhancedQuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedQuantumHarmonySearch import ImprovedEnhancedQuantumHarmonySearch +try: # ImprovedEnhancedQuantumHarmonySearch + from nevergrad.optimization.lama.ImprovedEnhancedQuantumHarmonySearch import ( + ImprovedEnhancedQuantumHarmonySearch, + ) lama_register["ImprovedEnhancedQuantumHarmonySearch"] = ImprovedEnhancedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumHarmonySearch").set_name("LLAMAImprovedEnhancedQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedEnhancedQuantumHarmonySearch" + ).set_name("LLAMAImprovedEnhancedQuantumHarmonySearch", register=True) +except Exception as e: # ImprovedEnhancedQuantumHarmonySearch print("ImprovedEnhancedQuantumHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedRefinedAdaptiveQGSA_v61 import ImprovedEnhancedRefinedAdaptiveQGSA_v61 +try: # ImprovedEnhancedRefinedAdaptiveQGSA_v61 + from nevergrad.optimization.lama.ImprovedEnhancedRefinedAdaptiveQGSA_v61 import ( + ImprovedEnhancedRefinedAdaptiveQGSA_v61, + ) lama_register["ImprovedEnhancedRefinedAdaptiveQGSA_v61"] = ImprovedEnhancedRefinedAdaptiveQGSA_v61 - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61 = NonObjectOptimizer(method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61").set_name("LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61 = NonObjectOptimizer( + method="LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61" + ).set_name("LLAMAImprovedEnhancedRefinedAdaptiveQGSA_v61", register=True) +except Exception as e: # ImprovedEnhancedRefinedAdaptiveQGSA_v61 print("ImprovedEnhancedRefinedAdaptiveQGSA_v61 can not be imported: ", e) -try: +try: # ImprovedEnhancedSADE from nevergrad.optimization.lama.ImprovedEnhancedSADE import ImprovedEnhancedSADE lama_register["ImprovedEnhancedSADE"] = ImprovedEnhancedSADE - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedSADE = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE").set_name("LLAMAImprovedEnhancedSADE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedSADE = NonObjectOptimizer(method="LLAMAImprovedEnhancedSADE").set_name( + "LLAMAImprovedEnhancedSADE", register=True + ) +except Exception as e: # ImprovedEnhancedSADE print("ImprovedEnhancedSADE can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedEnhancedStochasticMetaHeuristicOptimizer import ImprovedEnhancedStochasticMetaHeuristicOptimizer - - lama_register["ImprovedEnhancedStochasticMetaHeuristicOptimizer"] = ImprovedEnhancedStochasticMetaHeuristicOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer(method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer").set_name("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer", register=True) -except Exception as e: +try: # ImprovedEnhancedStochasticMetaHeuristicOptimizer + from nevergrad.optimization.lama.ImprovedEnhancedStochasticMetaHeuristicOptimizer import ( + ImprovedEnhancedStochasticMetaHeuristicOptimizer, + ) + + lama_register["ImprovedEnhancedStochasticMetaHeuristicOptimizer"] = ( + ImprovedEnhancedStochasticMetaHeuristicOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer" + ).set_name("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer", register=True) +except Exception as e: # ImprovedEnhancedStochasticMetaHeuristicOptimizer print("ImprovedEnhancedStochasticMetaHeuristicOptimizer can not be imported: ", e) -try: +try: # ImprovedEnsembleMemeticOptimizer from nevergrad.optimization.lama.ImprovedEnsembleMemeticOptimizer import ImprovedEnsembleMemeticOptimizer lama_register["ImprovedEnsembleMemeticOptimizer"] = ImprovedEnsembleMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedEnsembleMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedEnsembleMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedEnsembleMemeticOptimizer").set_name("LLAMAImprovedEnsembleMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedEnsembleMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedEnsembleMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedEnsembleMemeticOptimizer" + ).set_name("LLAMAImprovedEnsembleMemeticOptimizer", register=True) +except Exception as e: # ImprovedEnsembleMemeticOptimizer print("ImprovedEnsembleMemeticOptimizer can not be imported: ", e) -try: +try: # ImprovedFireworkAlgorithm from nevergrad.optimization.lama.ImprovedFireworkAlgorithm import ImprovedFireworkAlgorithm lama_register["ImprovedFireworkAlgorithm"] = ImprovedFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm").set_name("LLAMAImprovedFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedFireworkAlgorithm = NonObjectOptimizer(method="LLAMAImprovedFireworkAlgorithm").set_name( + "LLAMAImprovedFireworkAlgorithm", register=True + ) +except Exception as e: # ImprovedFireworkAlgorithm print("ImprovedFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveDifferentialEvolution import ImprovedHybridAdaptiveDifferentialEvolution +try: # ImprovedHybridAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ImprovedHybridAdaptiveDifferentialEvolution import ( + ImprovedHybridAdaptiveDifferentialEvolution, + ) lama_register["ImprovedHybridAdaptiveDifferentialEvolution"] = ImprovedHybridAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveDifferentialEvolution").set_name("LLAMAImprovedHybridAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ImprovedHybridAdaptiveDifferentialEvolution print("ImprovedHybridAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveGeneticSwarmOptimizer import ImprovedHybridAdaptiveGeneticSwarmOptimizer +try: # ImprovedHybridAdaptiveGeneticSwarmOptimizer + from nevergrad.optimization.lama.ImprovedHybridAdaptiveGeneticSwarmOptimizer import ( + ImprovedHybridAdaptiveGeneticSwarmOptimizer, + ) lama_register["ImprovedHybridAdaptiveGeneticSwarmOptimizer"] = ImprovedHybridAdaptiveGeneticSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer").set_name("LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer" + ).set_name("LLAMAImprovedHybridAdaptiveGeneticSwarmOptimizer", register=True) +except Exception as e: # ImprovedHybridAdaptiveGeneticSwarmOptimizer print("ImprovedHybridAdaptiveGeneticSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedHybridAdaptiveHarmonicFireworksTabuSearch import ImprovedHybridAdaptiveHarmonicFireworksTabuSearch - - lama_register["ImprovedHybridAdaptiveHarmonicFireworksTabuSearch"] = ImprovedHybridAdaptiveHarmonicFireworksTabuSearch - res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) -except Exception as e: +try: # ImprovedHybridAdaptiveHarmonicFireworksTabuSearch + from nevergrad.optimization.lama.ImprovedHybridAdaptiveHarmonicFireworksTabuSearch import ( + ImprovedHybridAdaptiveHarmonicFireworksTabuSearch, + ) + + lama_register["ImprovedHybridAdaptiveHarmonicFireworksTabuSearch"] = ( + ImprovedHybridAdaptiveHarmonicFireworksTabuSearch + ) + # res = NonObjectOptimizer(method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: # ImprovedHybridAdaptiveHarmonicFireworksTabuSearch print("ImprovedHybridAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) -try: +try: # ImprovedHybridCMAESDE from nevergrad.optimization.lama.ImprovedHybridCMAESDE import ImprovedHybridCMAESDE lama_register["ImprovedHybridCMAESDE"] = ImprovedHybridCMAESDE - res = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridCMAESDE = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE").set_name("LLAMAImprovedHybridCMAESDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridCMAESDE = NonObjectOptimizer(method="LLAMAImprovedHybridCMAESDE").set_name( + "LLAMAImprovedHybridCMAESDE", register=True + ) +except Exception as e: # ImprovedHybridCMAESDE print("ImprovedHybridCMAESDE can not be imported: ", e) -try: +try: # ImprovedHybridGeneticPSO from nevergrad.optimization.lama.ImprovedHybridGeneticPSO import ImprovedHybridGeneticPSO lama_register["ImprovedHybridGeneticPSO"] = ImprovedHybridGeneticPSO - res = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridGeneticPSO = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO").set_name("LLAMAImprovedHybridGeneticPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridGeneticPSO = NonObjectOptimizer(method="LLAMAImprovedHybridGeneticPSO").set_name( + "LLAMAImprovedHybridGeneticPSO", register=True + ) +except Exception as e: # ImprovedHybridGeneticPSO print("ImprovedHybridGeneticPSO can not be imported: ", e) -try: +try: # ImprovedHybridPSODEOptimizer from nevergrad.optimization.lama.ImprovedHybridPSODEOptimizer import ImprovedHybridPSODEOptimizer lama_register["ImprovedHybridPSODEOptimizer"] = ImprovedHybridPSODEOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMAImprovedHybridPSODEOptimizer").set_name("LLAMAImprovedHybridPSODEOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedHybridPSODEOptimizer = NonObjectOptimizer( + method="LLAMAImprovedHybridPSODEOptimizer" + ).set_name("LLAMAImprovedHybridPSODEOptimizer", register=True) +except Exception as e: # ImprovedHybridPSODEOptimizer print("ImprovedHybridPSODEOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedIterativeAdaptiveGradientEvolver import ImprovedIterativeAdaptiveGradientEvolver +try: # ImprovedIterativeAdaptiveGradientEvolver + from nevergrad.optimization.lama.ImprovedIterativeAdaptiveGradientEvolver import ( + ImprovedIterativeAdaptiveGradientEvolver, + ) lama_register["ImprovedIterativeAdaptiveGradientEvolver"] = ImprovedIterativeAdaptiveGradientEvolver - res = NonObjectOptimizer(method="LLAMAImprovedIterativeAdaptiveGradientEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedIterativeAdaptiveGradientEvolver = NonObjectOptimizer(method="LLAMAImprovedIterativeAdaptiveGradientEvolver").set_name("LLAMAImprovedIterativeAdaptiveGradientEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedIterativeAdaptiveGradientEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedIterativeAdaptiveGradientEvolver = NonObjectOptimizer( + method="LLAMAImprovedIterativeAdaptiveGradientEvolver" + ).set_name("LLAMAImprovedIterativeAdaptiveGradientEvolver", register=True) +except Exception as e: # ImprovedIterativeAdaptiveGradientEvolver print("ImprovedIterativeAdaptiveGradientEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedMetaDynamicQuantumSwarmOptimization import ImprovedMetaDynamicQuantumSwarmOptimization +try: # ImprovedMetaDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.ImprovedMetaDynamicQuantumSwarmOptimization import ( + ImprovedMetaDynamicQuantumSwarmOptimization, + ) lama_register["ImprovedMetaDynamicQuantumSwarmOptimization"] = ImprovedMetaDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization").set_name("LLAMAImprovedMetaDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAImprovedMetaDynamicQuantumSwarmOptimization" + ).set_name("LLAMAImprovedMetaDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # ImprovedMetaDynamicQuantumSwarmOptimization print("ImprovedMetaDynamicQuantumSwarmOptimization can not be imported: ", e) -try: +try: # ImprovedMultiOperatorSearch from nevergrad.optimization.lama.ImprovedMultiOperatorSearch import ImprovedMultiOperatorSearch lama_register["ImprovedMultiOperatorSearch"] = ImprovedMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch").set_name("LLAMAImprovedMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedMultiOperatorSearch").set_name( + "LLAMAImprovedMultiOperatorSearch", register=True + ) +except Exception as e: # ImprovedMultiOperatorSearch print("ImprovedMultiOperatorSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedMultiStrategySelfAdaptiveDE import ImprovedMultiStrategySelfAdaptiveDE +try: # ImprovedMultiStrategySelfAdaptiveDE + from nevergrad.optimization.lama.ImprovedMultiStrategySelfAdaptiveDE import ( + ImprovedMultiStrategySelfAdaptiveDE, + ) lama_register["ImprovedMultiStrategySelfAdaptiveDE"] = ImprovedMultiStrategySelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMAImprovedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAImprovedMultiStrategySelfAdaptiveDE").set_name("LLAMAImprovedMultiStrategySelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMAImprovedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMAImprovedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: # ImprovedMultiStrategySelfAdaptiveDE print("ImprovedMultiStrategySelfAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedOppositionBasedDifferentialEvolution import ImprovedOppositionBasedDifferentialEvolution - - lama_register["ImprovedOppositionBasedDifferentialEvolution"] = ImprovedOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedOppositionBasedDifferentialEvolution").set_name("LLAMAImprovedOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.ImprovedOppositionBasedDifferentialEvolution import ( + ImprovedOppositionBasedDifferentialEvolution, + ) + + lama_register["ImprovedOppositionBasedDifferentialEvolution"] = ( + ImprovedOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedOppositionBasedDifferentialEvolution" + ).set_name("LLAMAImprovedOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # ImprovedOppositionBasedDifferentialEvolution print("ImprovedOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedPrecisionAdaptiveEvolutiveStrategy import ImprovedPrecisionAdaptiveEvolutiveStrategy +try: # ImprovedPrecisionAdaptiveEvolutiveStrategy + from nevergrad.optimization.lama.ImprovedPrecisionAdaptiveEvolutiveStrategy import ( + ImprovedPrecisionAdaptiveEvolutiveStrategy, + ) lama_register["ImprovedPrecisionAdaptiveEvolutiveStrategy"] = ImprovedPrecisionAdaptiveEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy = NonObjectOptimizer(method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy").set_name("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy = NonObjectOptimizer( + method="LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy" + ).set_name("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy", register=True) +except Exception as e: # ImprovedPrecisionAdaptiveEvolutiveStrategy print("ImprovedPrecisionAdaptiveEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning import ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning - - lama_register["ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning - res = NonObjectOptimizer(method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer(method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning").set_name("LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) -except Exception as e: +try: # ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning + from nevergrad.optimization.lama.ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning import ( + ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning, + ) + + lama_register["ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( + ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning + ) + # res = NonObjectOptimizer(method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( + method="LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning" + ).set_name("LLAMAImprovedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) +except Exception as e: # ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning print("ImprovedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedQuantumEnhancedDynamicDifferentialEvolution import ImprovedQuantumEnhancedDynamicDifferentialEvolution - - lama_register["ImprovedQuantumEnhancedDynamicDifferentialEvolution"] = ImprovedQuantumEnhancedDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution").set_name("LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedQuantumEnhancedDynamicDifferentialEvolution + from nevergrad.optimization.lama.ImprovedQuantumEnhancedDynamicDifferentialEvolution import ( + ImprovedQuantumEnhancedDynamicDifferentialEvolution, + ) + + lama_register["ImprovedQuantumEnhancedDynamicDifferentialEvolution"] = ( + ImprovedQuantumEnhancedDynamicDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAImprovedQuantumEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: # ImprovedQuantumEnhancedDynamicDifferentialEvolution print("ImprovedQuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) -try: +try: # ImprovedQuantumHarmonySearch from nevergrad.optimization.lama.ImprovedQuantumHarmonySearch import ImprovedQuantumHarmonySearch lama_register["ImprovedQuantumHarmonySearch"] = ImprovedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAImprovedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAImprovedQuantumHarmonySearch").set_name("LLAMAImprovedQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAImprovedQuantumHarmonySearch" + ).set_name("LLAMAImprovedQuantumHarmonySearch", register=True) +except Exception as e: # ImprovedQuantumHarmonySearch print("ImprovedQuantumHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedQuantumLevyAdaptiveHybridSearch import ImprovedQuantumLevyAdaptiveHybridSearch +try: # ImprovedQuantumLevyAdaptiveHybridSearch + from nevergrad.optimization.lama.ImprovedQuantumLevyAdaptiveHybridSearch import ( + ImprovedQuantumLevyAdaptiveHybridSearch, + ) lama_register["ImprovedQuantumLevyAdaptiveHybridSearch"] = ImprovedQuantumLevyAdaptiveHybridSearch - res = NonObjectOptimizer(method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedQuantumLevyAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch").set_name("LLAMAImprovedQuantumLevyAdaptiveHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedQuantumLevyAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAImprovedQuantumLevyAdaptiveHybridSearch" + ).set_name("LLAMAImprovedQuantumLevyAdaptiveHybridSearch", register=True) +except Exception as e: # ImprovedQuantumLevyAdaptiveHybridSearch print("ImprovedQuantumLevyAdaptiveHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedQuantumSimulatedAnnealing import ImprovedQuantumSimulatedAnnealing +try: # ImprovedQuantumSimulatedAnnealing + from nevergrad.optimization.lama.ImprovedQuantumSimulatedAnnealing import ( + ImprovedQuantumSimulatedAnnealing, + ) lama_register["ImprovedQuantumSimulatedAnnealing"] = ImprovedQuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAImprovedQuantumSimulatedAnnealing").set_name("LLAMAImprovedQuantumSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedQuantumSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAImprovedQuantumSimulatedAnnealing" + ).set_name("LLAMAImprovedQuantumSimulatedAnnealing", register=True) +except Exception as e: # ImprovedQuantumSimulatedAnnealing print("ImprovedQuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedAdaptiveDynamicExplorationOptimization import ImprovedRefinedAdaptiveDynamicExplorationOptimization - - lama_register["ImprovedRefinedAdaptiveDynamicExplorationOptimization"] = ImprovedRefinedAdaptiveDynamicExplorationOptimization - res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization", register=True) -except Exception as e: +try: # ImprovedRefinedAdaptiveDynamicExplorationOptimization + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveDynamicExplorationOptimization import ( + ImprovedRefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["ImprovedRefinedAdaptiveDynamicExplorationOptimization"] = ( + ImprovedRefinedAdaptiveDynamicExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMAImprovedRefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: # ImprovedRefinedAdaptiveDynamicExplorationOptimization print("ImprovedRefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedAdaptiveMultiOperatorSearch import ImprovedRefinedAdaptiveMultiOperatorSearch +try: # ImprovedRefinedAdaptiveMultiOperatorSearch + from nevergrad.optimization.lama.ImprovedRefinedAdaptiveMultiOperatorSearch import ( + ImprovedRefinedAdaptiveMultiOperatorSearch, + ) lama_register["ImprovedRefinedAdaptiveMultiOperatorSearch"] = ImprovedRefinedAdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch").set_name("LLAMAImprovedRefinedAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMAImprovedRefinedAdaptiveMultiOperatorSearch" + ).set_name("LLAMAImprovedRefinedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: # ImprovedRefinedAdaptiveMultiOperatorSearch print("ImprovedRefinedAdaptiveMultiOperatorSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution import ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution - - lama_register["ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution").set_name("LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( + ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( + ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution print("ImprovedRefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization import ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization - - lama_register["ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization"] = ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) -except Exception as e: +try: # ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization + from nevergrad.optimization.lama.ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization import ( + ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization, + ) + + lama_register["ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( + ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization + ) + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMAImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: # ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization print("ImprovedRefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 import ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 - - lama_register["ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4"] = ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4", register=True) -except Exception as e: +try: # ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 + from nevergrad.optimization.lama.ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 import ( + ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4"] = ( + ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 + ) + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMAImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: # ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 print("ImprovedRefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO import ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO - - lama_register["ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO"] = ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO", register=True) -except Exception as e: +try: # ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO + from nevergrad.optimization.lama.ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO import ( + ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO, + ) + + lama_register["ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO"] = ( + ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO + ) + # res = NonObjectOptimizer(method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMAImprovedRefinedMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: # ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO print("ImprovedRefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveDifferentialEvolution import ImprovedSelfAdaptiveDifferentialEvolution +try: # ImprovedSelfAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ImprovedSelfAdaptiveDifferentialEvolution import ( + ImprovedSelfAdaptiveDifferentialEvolution, + ) lama_register["ImprovedSelfAdaptiveDifferentialEvolution"] = ImprovedSelfAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedSelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveDifferentialEvolution").set_name("LLAMAImprovedSelfAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedSelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveDifferentialEvolution" + ).set_name("LLAMAImprovedSelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ImprovedSelfAdaptiveDifferentialEvolution print("ImprovedSelfAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveHybridOptimizer import ImprovedSelfAdaptiveHybridOptimizer +try: # ImprovedSelfAdaptiveHybridOptimizer + from nevergrad.optimization.lama.ImprovedSelfAdaptiveHybridOptimizer import ( + ImprovedSelfAdaptiveHybridOptimizer, + ) lama_register["ImprovedSelfAdaptiveHybridOptimizer"] = ImprovedSelfAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedSelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveHybridOptimizer").set_name("LLAMAImprovedSelfAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedSelfAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveHybridOptimizer" + ).set_name("LLAMAImprovedSelfAdaptiveHybridOptimizer", register=True) +except Exception as e: # ImprovedSelfAdaptiveHybridOptimizer print("ImprovedSelfAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution import ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution - - lama_register["ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution"] = ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution").set_name("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution", register=True) -except Exception as e: +try: # ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution + from nevergrad.optimization.lama.ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution import ( + ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution, + ) + + lama_register["ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution"] = ( + ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution" + ).set_name("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution", register=True) +except Exception as e: # ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution print("ImprovedSelfAdaptiveOppositionBasedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ImprovedUnifiedAdaptiveMemeticOptimizer import ImprovedUnifiedAdaptiveMemeticOptimizer +try: # ImprovedUnifiedAdaptiveMemeticOptimizer + from nevergrad.optimization.lama.ImprovedUnifiedAdaptiveMemeticOptimizer import ( + ImprovedUnifiedAdaptiveMemeticOptimizer, + ) lama_register["ImprovedUnifiedAdaptiveMemeticOptimizer"] = ImprovedUnifiedAdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAImprovedUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer").set_name("LLAMAImprovedUnifiedAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAImprovedUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAImprovedUnifiedAdaptiveMemeticOptimizer" + ).set_name("LLAMAImprovedUnifiedAdaptiveMemeticOptimizer", register=True) +except Exception as e: # ImprovedUnifiedAdaptiveMemeticOptimizer print("ImprovedUnifiedAdaptiveMemeticOptimizer can not be imported: ", e) -try: +try: # IncrementalCrossoverOptimization from nevergrad.optimization.lama.IncrementalCrossoverOptimization import IncrementalCrossoverOptimization lama_register["IncrementalCrossoverOptimization"] = IncrementalCrossoverOptimization - res = NonObjectOptimizer(method="LLAMAIncrementalCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIncrementalCrossoverOptimization = NonObjectOptimizer(method="LLAMAIncrementalCrossoverOptimization").set_name("LLAMAIncrementalCrossoverOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIncrementalCrossoverOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIncrementalCrossoverOptimization = NonObjectOptimizer( + method="LLAMAIncrementalCrossoverOptimization" + ).set_name("LLAMAIncrementalCrossoverOptimization", register=True) +except Exception as e: # IncrementalCrossoverOptimization print("IncrementalCrossoverOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.IntelligentDynamicDualPhaseStrategyV39 import IntelligentDynamicDualPhaseStrategyV39 +try: # IntelligentDynamicDualPhaseStrategyV39 + from nevergrad.optimization.lama.IntelligentDynamicDualPhaseStrategyV39 import ( + IntelligentDynamicDualPhaseStrategyV39, + ) lama_register["IntelligentDynamicDualPhaseStrategyV39"] = IntelligentDynamicDualPhaseStrategyV39 - res = NonObjectOptimizer(method="LLAMAIntelligentDynamicDualPhaseStrategyV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIntelligentDynamicDualPhaseStrategyV39 = NonObjectOptimizer(method="LLAMAIntelligentDynamicDualPhaseStrategyV39").set_name("LLAMAIntelligentDynamicDualPhaseStrategyV39", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIntelligentDynamicDualPhaseStrategyV39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIntelligentDynamicDualPhaseStrategyV39 = NonObjectOptimizer( + method="LLAMAIntelligentDynamicDualPhaseStrategyV39" + ).set_name("LLAMAIntelligentDynamicDualPhaseStrategyV39", register=True) +except Exception as e: # IntelligentDynamicDualPhaseStrategyV39 print("IntelligentDynamicDualPhaseStrategyV39 can not be imported: ", e) -try: - from nevergrad.optimization.lama.IntelligentEvolvingAdaptiveStrategyV34 import IntelligentEvolvingAdaptiveStrategyV34 +try: # IntelligentEvolvingAdaptiveStrategyV34 + from nevergrad.optimization.lama.IntelligentEvolvingAdaptiveStrategyV34 import ( + IntelligentEvolvingAdaptiveStrategyV34, + ) lama_register["IntelligentEvolvingAdaptiveStrategyV34"] = IntelligentEvolvingAdaptiveStrategyV34 - res = NonObjectOptimizer(method="LLAMAIntelligentEvolvingAdaptiveStrategyV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIntelligentEvolvingAdaptiveStrategyV34 = NonObjectOptimizer(method="LLAMAIntelligentEvolvingAdaptiveStrategyV34").set_name("LLAMAIntelligentEvolvingAdaptiveStrategyV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIntelligentEvolvingAdaptiveStrategyV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIntelligentEvolvingAdaptiveStrategyV34 = NonObjectOptimizer( + method="LLAMAIntelligentEvolvingAdaptiveStrategyV34" + ).set_name("LLAMAIntelligentEvolvingAdaptiveStrategyV34", register=True) +except Exception as e: # IntelligentEvolvingAdaptiveStrategyV34 print("IntelligentEvolvingAdaptiveStrategyV34 can not be imported: ", e) -try: +try: # IntelligentPerturbationSearch from nevergrad.optimization.lama.IntelligentPerturbationSearch import IntelligentPerturbationSearch lama_register["IntelligentPerturbationSearch"] = IntelligentPerturbationSearch - res = NonObjectOptimizer(method="LLAMAIntelligentPerturbationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIntelligentPerturbationSearch = NonObjectOptimizer(method="LLAMAIntelligentPerturbationSearch").set_name("LLAMAIntelligentPerturbationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIntelligentPerturbationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIntelligentPerturbationSearch = NonObjectOptimizer( + method="LLAMAIntelligentPerturbationSearch" + ).set_name("LLAMAIntelligentPerturbationSearch", register=True) +except Exception as e: # IntelligentPerturbationSearch print("IntelligentPerturbationSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.IterativeAdaptiveDifferentialEvolution import IterativeAdaptiveDifferentialEvolution +try: # IterativeAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.IterativeAdaptiveDifferentialEvolution import ( + IterativeAdaptiveDifferentialEvolution, + ) lama_register["IterativeAdaptiveDifferentialEvolution"] = IterativeAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAIterativeAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIterativeAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAIterativeAdaptiveDifferentialEvolution").set_name("LLAMAIterativeAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIterativeAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIterativeAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAIterativeAdaptiveDifferentialEvolution" + ).set_name("LLAMAIterativeAdaptiveDifferentialEvolution", register=True) +except Exception as e: # IterativeAdaptiveDifferentialEvolution print("IterativeAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.IterativeProgressiveDifferentialEvolution import IterativeProgressiveDifferentialEvolution +try: # IterativeProgressiveDifferentialEvolution + from nevergrad.optimization.lama.IterativeProgressiveDifferentialEvolution import ( + IterativeProgressiveDifferentialEvolution, + ) lama_register["IterativeProgressiveDifferentialEvolution"] = IterativeProgressiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAIterativeProgressiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAIterativeProgressiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAIterativeProgressiveDifferentialEvolution").set_name("LLAMAIterativeProgressiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAIterativeProgressiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAIterativeProgressiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAIterativeProgressiveDifferentialEvolution" + ).set_name("LLAMAIterativeProgressiveDifferentialEvolution", register=True) +except Exception as e: # IterativeProgressiveDifferentialEvolution print("IterativeProgressiveDifferentialEvolution can not be imported: ", e) -try: +try: # LADESA from nevergrad.optimization.lama.LADESA import LADESA lama_register["LADESA"] = LADESA - res = NonObjectOptimizer(method="LLAMALADESA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMALADESA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMALADESA = NonObjectOptimizer(method="LLAMALADESA").set_name("LLAMALADESA", register=True) -except Exception as e: +except Exception as e: # LADESA print("LADESA can not be imported: ", e) -try: +try: # LAOS from nevergrad.optimization.lama.LAOS import LAOS lama_register["LAOS"] = LAOS - res = NonObjectOptimizer(method="LLAMALAOS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMALAOS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMALAOS = NonObjectOptimizer(method="LLAMALAOS").set_name("LLAMALAOS", register=True) -except Exception as e: +except Exception as e: # LAOS print("LAOS can not be imported: ", e) -try: - from nevergrad.optimization.lama.LearningAdaptiveMemoryEnhancedStrategyV42 import LearningAdaptiveMemoryEnhancedStrategyV42 +try: # LearningAdaptiveMemoryEnhancedStrategyV42 + from nevergrad.optimization.lama.LearningAdaptiveMemoryEnhancedStrategyV42 import ( + LearningAdaptiveMemoryEnhancedStrategyV42, + ) lama_register["LearningAdaptiveMemoryEnhancedStrategyV42"] = LearningAdaptiveMemoryEnhancedStrategyV42 - res = NonObjectOptimizer(method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMALearningAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer(method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42").set_name("LLAMALearningAdaptiveMemoryEnhancedStrategyV42", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMALearningAdaptiveMemoryEnhancedStrategyV42 = NonObjectOptimizer( + method="LLAMALearningAdaptiveMemoryEnhancedStrategyV42" + ).set_name("LLAMALearningAdaptiveMemoryEnhancedStrategyV42", register=True) +except Exception as e: # LearningAdaptiveMemoryEnhancedStrategyV42 print("LearningAdaptiveMemoryEnhancedStrategyV42 can not be imported: ", e) -try: +try: # LearningAdaptiveStrategyV24 from nevergrad.optimization.lama.LearningAdaptiveStrategyV24 import LearningAdaptiveStrategyV24 lama_register["LearningAdaptiveStrategyV24"] = LearningAdaptiveStrategyV24 - res = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMALearningAdaptiveStrategyV24 = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24").set_name("LLAMALearningAdaptiveStrategyV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMALearningAdaptiveStrategyV24 = NonObjectOptimizer(method="LLAMALearningAdaptiveStrategyV24").set_name( + "LLAMALearningAdaptiveStrategyV24", register=True + ) +except Exception as e: # LearningAdaptiveStrategyV24 print("LearningAdaptiveStrategyV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.LevyEnhancedAdaptiveSimulatedAnnealingDE import LevyEnhancedAdaptiveSimulatedAnnealingDE +try: # LevyEnhancedAdaptiveSimulatedAnnealingDE + from nevergrad.optimization.lama.LevyEnhancedAdaptiveSimulatedAnnealingDE import ( + LevyEnhancedAdaptiveSimulatedAnnealingDE, + ) lama_register["LevyEnhancedAdaptiveSimulatedAnnealingDE"] = LevyEnhancedAdaptiveSimulatedAnnealingDE - res = NonObjectOptimizer(method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE = NonObjectOptimizer(method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE").set_name("LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE = NonObjectOptimizer( + method="LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE" + ).set_name("LLAMALevyEnhancedAdaptiveSimulatedAnnealingDE", register=True) +except Exception as e: # LevyEnhancedAdaptiveSimulatedAnnealingDE print("LevyEnhancedAdaptiveSimulatedAnnealingDE can not be imported: ", e) -try: +try: # MADE from nevergrad.optimization.lama.MADE import MADE lama_register["MADE"] = MADE - res = NonObjectOptimizer(method="LLAMAMADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAMADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAMADE = NonObjectOptimizer(method="LLAMAMADE").set_name("LLAMAMADE", register=True) -except Exception as e: +except Exception as e: # MADE print("MADE can not be imported: ", e) -try: +try: # MIDEAT from nevergrad.optimization.lama.MIDEAT import MIDEAT lama_register["MIDEAT"] = MIDEAT - res = NonObjectOptimizer(method="LLAMAMIDEAT")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAMIDEAT")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAMIDEAT = NonObjectOptimizer(method="LLAMAMIDEAT").set_name("LLAMAMIDEAT", register=True) -except Exception as e: +except Exception as e: # MIDEAT print("MIDEAT can not be imported: ", e) -try: +try: # MSADE from nevergrad.optimization.lama.MSADE import MSADE lama_register["MSADE"] = MSADE - res = NonObjectOptimizer(method="LLAMAMSADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAMSADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAMSADE = NonObjectOptimizer(method="LLAMAMSADE").set_name("LLAMAMSADE", register=True) -except Exception as e: +except Exception as e: # MSADE print("MSADE can not be imported: ", e) -try: +try: # MSEAS from nevergrad.optimization.lama.MSEAS import MSEAS lama_register["MSEAS"] = MSEAS - res = NonObjectOptimizer(method="LLAMAMSEAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAMSEAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAMSEAS = NonObjectOptimizer(method="LLAMAMSEAS").set_name("LLAMAMSEAS", register=True) -except Exception as e: +except Exception as e: # MSEAS print("MSEAS can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemeticAdaptiveDifferentialEvolution import MemeticAdaptiveDifferentialEvolution +try: # MemeticAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.MemeticAdaptiveDifferentialEvolution import ( + MemeticAdaptiveDifferentialEvolution, + ) lama_register["MemeticAdaptiveDifferentialEvolution"] = MemeticAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAMemeticAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemeticAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAMemeticAdaptiveDifferentialEvolution").set_name("LLAMAMemeticAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemeticAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemeticAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMemeticAdaptiveDifferentialEvolution" + ).set_name("LLAMAMemeticAdaptiveDifferentialEvolution", register=True) +except Exception as e: # MemeticAdaptiveDifferentialEvolution print("MemeticAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemeticDifferentialEvolutionOptimizer import MemeticDifferentialEvolutionOptimizer +try: # MemeticDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.MemeticDifferentialEvolutionOptimizer import ( + MemeticDifferentialEvolutionOptimizer, + ) lama_register["MemeticDifferentialEvolutionOptimizer"] = MemeticDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMAMemeticDifferentialEvolutionOptimizer").set_name("LLAMAMemeticDifferentialEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemeticDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemeticDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAMemeticDifferentialEvolutionOptimizer" + ).set_name("LLAMAMemeticDifferentialEvolutionOptimizer", register=True) +except Exception as e: # MemeticDifferentialEvolutionOptimizer print("MemeticDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemeticElitistDifferentialEvolutionWithDynamicFandCR import MemeticElitistDifferentialEvolutionWithDynamicFandCR - - lama_register["MemeticElitistDifferentialEvolutionWithDynamicFandCR"] = MemeticElitistDifferentialEvolutionWithDynamicFandCR - res = NonObjectOptimizer(method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR = NonObjectOptimizer(method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR").set_name("LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR", register=True) -except Exception as e: +try: # MemeticElitistDifferentialEvolutionWithDynamicFandCR + from nevergrad.optimization.lama.MemeticElitistDifferentialEvolutionWithDynamicFandCR import ( + MemeticElitistDifferentialEvolutionWithDynamicFandCR, + ) + + lama_register["MemeticElitistDifferentialEvolutionWithDynamicFandCR"] = ( + MemeticElitistDifferentialEvolutionWithDynamicFandCR + ) + # res = NonObjectOptimizer(method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR = NonObjectOptimizer( + method="LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR" + ).set_name("LLAMAMemeticElitistDifferentialEvolutionWithDynamicFandCR", register=True) +except Exception as e: # MemeticElitistDifferentialEvolutionWithDynamicFandCR print("MemeticElitistDifferentialEvolutionWithDynamicFandCR can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemeticEnhancedParticleSwarmOptimization import MemeticEnhancedParticleSwarmOptimization +try: # MemeticEnhancedParticleSwarmOptimization + from nevergrad.optimization.lama.MemeticEnhancedParticleSwarmOptimization import ( + MemeticEnhancedParticleSwarmOptimization, + ) lama_register["MemeticEnhancedParticleSwarmOptimization"] = MemeticEnhancedParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAMemeticEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemeticEnhancedParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAMemeticEnhancedParticleSwarmOptimization").set_name("LLAMAMemeticEnhancedParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemeticEnhancedParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemeticEnhancedParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAMemeticEnhancedParticleSwarmOptimization" + ).set_name("LLAMAMemeticEnhancedParticleSwarmOptimization", register=True) +except Exception as e: # MemeticEnhancedParticleSwarmOptimization print("MemeticEnhancedParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemeticSpatialDifferentialEvolution import MemeticSpatialDifferentialEvolution +try: # MemeticSpatialDifferentialEvolution + from nevergrad.optimization.lama.MemeticSpatialDifferentialEvolution import ( + MemeticSpatialDifferentialEvolution, + ) lama_register["MemeticSpatialDifferentialEvolution"] = MemeticSpatialDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAMemeticSpatialDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemeticSpatialDifferentialEvolution = NonObjectOptimizer(method="LLAMAMemeticSpatialDifferentialEvolution").set_name("LLAMAMemeticSpatialDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemeticSpatialDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemeticSpatialDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMemeticSpatialDifferentialEvolution" + ).set_name("LLAMAMemeticSpatialDifferentialEvolution", register=True) +except Exception as e: # MemeticSpatialDifferentialEvolution print("MemeticSpatialDifferentialEvolution can not be imported: ", e) -try: +try: # MemoryBasedSimulatedAnnealing from nevergrad.optimization.lama.MemoryBasedSimulatedAnnealing import MemoryBasedSimulatedAnnealing lama_register["MemoryBasedSimulatedAnnealing"] = MemoryBasedSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAMemoryBasedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryBasedSimulatedAnnealing = NonObjectOptimizer(method="LLAMAMemoryBasedSimulatedAnnealing").set_name("LLAMAMemoryBasedSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryBasedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryBasedSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAMemoryBasedSimulatedAnnealing" + ).set_name("LLAMAMemoryBasedSimulatedAnnealing", register=True) +except Exception as e: # MemoryBasedSimulatedAnnealing print("MemoryBasedSimulatedAnnealing can not be imported: ", e) -try: +try: # MemoryEnhancedAdaptiveAnnealing from nevergrad.optimization.lama.MemoryEnhancedAdaptiveAnnealing import MemoryEnhancedAdaptiveAnnealing lama_register["MemoryEnhancedAdaptiveAnnealing"] = MemoryEnhancedAdaptiveAnnealing - res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryEnhancedAdaptiveAnnealing = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveAnnealing").set_name("LLAMAMemoryEnhancedAdaptiveAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryEnhancedAdaptiveAnnealing = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveAnnealing" + ).set_name("LLAMAMemoryEnhancedAdaptiveAnnealing", register=True) +except Exception as e: # MemoryEnhancedAdaptiveAnnealing print("MemoryEnhancedAdaptiveAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealing import MemoryEnhancedAdaptiveMultiPhaseAnnealing +try: # MemoryEnhancedAdaptiveMultiPhaseAnnealing + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealing import ( + MemoryEnhancedAdaptiveMultiPhaseAnnealing, + ) lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealing"] = MemoryEnhancedAdaptiveMultiPhaseAnnealing - res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing").set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing" + ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing", register=True) +except Exception as e: # MemoryEnhancedAdaptiveMultiPhaseAnnealing print("MemoryEnhancedAdaptiveMultiPhaseAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient import MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient - - lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient - res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient").set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) -except Exception as e: +try: # MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient + from nevergrad.optimization.lama.MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient import ( + MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient, + ) + + lama_register["MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient"] = ( + MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient + ) + # res = NonObjectOptimizer(method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient = NonObjectOptimizer( + method="LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient" + ).set_name("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient", register=True) +except Exception as e: # MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient print("MemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemoryEnhancedDynamicHybridOptimizer import MemoryEnhancedDynamicHybridOptimizer +try: # MemoryEnhancedDynamicHybridOptimizer + from nevergrad.optimization.lama.MemoryEnhancedDynamicHybridOptimizer import ( + MemoryEnhancedDynamicHybridOptimizer, + ) lama_register["MemoryEnhancedDynamicHybridOptimizer"] = MemoryEnhancedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMAMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMAMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMAMemoryEnhancedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMAMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMAMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: # MemoryEnhancedDynamicHybridOptimizer print("MemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.MemoryGuidedAdaptiveDualPhaseStrategyV40 import MemoryGuidedAdaptiveDualPhaseStrategyV40 +try: # MemoryGuidedAdaptiveDualPhaseStrategyV40 + from nevergrad.optimization.lama.MemoryGuidedAdaptiveDualPhaseStrategyV40 import ( + MemoryGuidedAdaptiveDualPhaseStrategyV40, + ) lama_register["MemoryGuidedAdaptiveDualPhaseStrategyV40"] = MemoryGuidedAdaptiveDualPhaseStrategyV40 - res = NonObjectOptimizer(method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40 = NonObjectOptimizer(method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40").set_name("LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40 = NonObjectOptimizer( + method="LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40" + ).set_name("LLAMAMemoryGuidedAdaptiveDualPhaseStrategyV40", register=True) +except Exception as e: # MemoryGuidedAdaptiveDualPhaseStrategyV40 print("MemoryGuidedAdaptiveDualPhaseStrategyV40 can not be imported: ", e) -try: +try: # MemoryHybridAdaptiveDE from nevergrad.optimization.lama.MemoryHybridAdaptiveDE import MemoryHybridAdaptiveDE lama_register["MemoryHybridAdaptiveDE"] = MemoryHybridAdaptiveDE - res = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMemoryHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE").set_name("LLAMAMemoryHybridAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMemoryHybridAdaptiveDE = NonObjectOptimizer(method="LLAMAMemoryHybridAdaptiveDE").set_name( + "LLAMAMemoryHybridAdaptiveDE", register=True + ) +except Exception as e: # MemoryHybridAdaptiveDE print("MemoryHybridAdaptiveDE can not be imported: ", e) -try: +try: # MetaDynamicPrecisionOptimizerV1 from nevergrad.optimization.lama.MetaDynamicPrecisionOptimizerV1 import MetaDynamicPrecisionOptimizerV1 lama_register["MetaDynamicPrecisionOptimizerV1"] = MetaDynamicPrecisionOptimizerV1 - res = NonObjectOptimizer(method="LLAMAMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMAMetaDynamicPrecisionOptimizerV1").set_name("LLAMAMetaDynamicPrecisionOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMetaDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMetaDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMAMetaDynamicPrecisionOptimizerV1" + ).set_name("LLAMAMetaDynamicPrecisionOptimizerV1", register=True) +except Exception as e: # MetaDynamicPrecisionOptimizerV1 print("MetaDynamicPrecisionOptimizerV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.MetaDynamicQuantumSwarmOptimization import MetaDynamicQuantumSwarmOptimization +try: # MetaDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.MetaDynamicQuantumSwarmOptimization import ( + MetaDynamicQuantumSwarmOptimization, + ) lama_register["MetaDynamicQuantumSwarmOptimization"] = MetaDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAMetaDynamicQuantumSwarmOptimization").set_name("LLAMAMetaDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMetaDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMetaDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAMetaDynamicQuantumSwarmOptimization" + ).set_name("LLAMAMetaDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # MetaDynamicQuantumSwarmOptimization print("MetaDynamicQuantumSwarmOptimization can not be imported: ", e) -try: +try: # MetaHarmonicSearch from nevergrad.optimization.lama.MetaHarmonicSearch import MetaHarmonicSearch lama_register["MetaHarmonicSearch"] = MetaHarmonicSearch - res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMetaHarmonicSearch = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch").set_name("LLAMAMetaHarmonicSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMetaHarmonicSearch = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch").set_name( + "LLAMAMetaHarmonicSearch", register=True + ) +except Exception as e: # MetaHarmonicSearch print("MetaHarmonicSearch can not be imported: ", e) -try: +try: # MetaHarmonicSearch2 from nevergrad.optimization.lama.MetaHarmonicSearch2 import MetaHarmonicSearch2 lama_register["MetaHarmonicSearch2"] = MetaHarmonicSearch2 - res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMetaHarmonicSearch2 = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2").set_name("LLAMAMetaHarmonicSearch2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMetaHarmonicSearch2 = NonObjectOptimizer(method="LLAMAMetaHarmonicSearch2").set_name( + "LLAMAMetaHarmonicSearch2", register=True + ) +except Exception as e: # MetaHarmonicSearch2 print("MetaHarmonicSearch2 can not be imported: ", e) -try: +try: # MetaNetAQAPSO from nevergrad.optimization.lama.MetaNetAQAPSO import MetaNetAQAPSO lama_register["MetaNetAQAPSO"] = MetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO").set_name("LLAMAMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAMetaNetAQAPSO").set_name( + "LLAMAMetaNetAQAPSO", register=True + ) +except Exception as e: # MetaNetAQAPSO print("MetaNetAQAPSO can not be imported: ", e) -try: +try: # MomentumGradientExploration from nevergrad.optimization.lama.MomentumGradientExploration import MomentumGradientExploration lama_register["MomentumGradientExploration"] = MomentumGradientExploration - res = NonObjectOptimizer(method="LLAMAMomentumGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMomentumGradientExploration = NonObjectOptimizer(method="LLAMAMomentumGradientExploration").set_name("LLAMAMomentumGradientExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMomentumGradientExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMomentumGradientExploration = NonObjectOptimizer(method="LLAMAMomentumGradientExploration").set_name( + "LLAMAMomentumGradientExploration", register=True + ) +except Exception as e: # MomentumGradientExploration print("MomentumGradientExploration can not be imported: ", e) -try: +try: # MultiFacetAdaptiveSearch from nevergrad.optimization.lama.MultiFacetAdaptiveSearch import MultiFacetAdaptiveSearch lama_register["MultiFacetAdaptiveSearch"] = MultiFacetAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiFacetAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch").set_name("LLAMAMultiFacetAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiFacetAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiFacetAdaptiveSearch").set_name( + "LLAMAMultiFacetAdaptiveSearch", register=True + ) +except Exception as e: # MultiFacetAdaptiveSearch print("MultiFacetAdaptiveSearch can not be imported: ", e) -try: +try: # MultiFocalAdaptiveOptimizer from nevergrad.optimization.lama.MultiFocalAdaptiveOptimizer import MultiFocalAdaptiveOptimizer lama_register["MultiFocalAdaptiveOptimizer"] = MultiFocalAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer").set_name("LLAMAMultiFocalAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiFocalAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAMultiFocalAdaptiveOptimizer").set_name( + "LLAMAMultiFocalAdaptiveOptimizer", register=True + ) +except Exception as e: # MultiFocalAdaptiveOptimizer print("MultiFocalAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiLayeredAdaptiveCovarianceMatrixEvolution import MultiLayeredAdaptiveCovarianceMatrixEvolution - - lama_register["MultiLayeredAdaptiveCovarianceMatrixEvolution"] = MultiLayeredAdaptiveCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution").set_name("LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution", register=True) -except Exception as e: +try: # MultiLayeredAdaptiveCovarianceMatrixEvolution + from nevergrad.optimization.lama.MultiLayeredAdaptiveCovarianceMatrixEvolution import ( + MultiLayeredAdaptiveCovarianceMatrixEvolution, + ) + + lama_register["MultiLayeredAdaptiveCovarianceMatrixEvolution"] = ( + MultiLayeredAdaptiveCovarianceMatrixEvolution + ) + # res = NonObjectOptimizer(method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMAMultiLayeredAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: # MultiLayeredAdaptiveCovarianceMatrixEvolution print("MultiLayeredAdaptiveCovarianceMatrixEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiModalMemoryEnhancedHybridOptimizer import MultiModalMemoryEnhancedHybridOptimizer +try: # MultiModalMemoryEnhancedHybridOptimizer + from nevergrad.optimization.lama.MultiModalMemoryEnhancedHybridOptimizer import ( + MultiModalMemoryEnhancedHybridOptimizer, + ) lama_register["MultiModalMemoryEnhancedHybridOptimizer"] = MultiModalMemoryEnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMAMultiModalMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiModalMemoryEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMAMultiModalMemoryEnhancedHybridOptimizer").set_name("LLAMAMultiModalMemoryEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiModalMemoryEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiModalMemoryEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMAMultiModalMemoryEnhancedHybridOptimizer" + ).set_name("LLAMAMultiModalMemoryEnhancedHybridOptimizer", register=True) +except Exception as e: # MultiModalMemoryEnhancedHybridOptimizer print("MultiModalMemoryEnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 import MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 - - lama_register["MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66"] = MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 - res = NonObjectOptimizer(method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 = NonObjectOptimizer(method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66").set_name("LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66", register=True) -except Exception as e: +try: # MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 + from nevergrad.optimization.lama.MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 import ( + MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66, + ) + + lama_register["MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66"] = ( + MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 + ) + # res = NonObjectOptimizer(method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 = NonObjectOptimizer( + method="LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66" + ).set_name("LLAMAMultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66", register=True) +except Exception as e: # MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 print("MultiObjectiveAdvancedEnhancedGuidedMassQGSA_v66 can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 import MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 - - lama_register["MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67"] = MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 - res = NonObjectOptimizer(method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 = NonObjectOptimizer(method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67").set_name("LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67", register=True) -except Exception as e: +try: # MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 + from nevergrad.optimization.lama.MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 import ( + MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67, + ) + + lama_register["MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67"] = ( + MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 + ) + # res = NonObjectOptimizer(method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 = NonObjectOptimizer( + method="LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67" + ).set_name("LLAMAMultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67", register=True) +except Exception as e: # MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 print("MultiObjectiveImprovedAdvancedEnhancedGuidedMassQGSA_v67 can not be imported: ", e) -try: +try: # MultiOperatorSearch from nevergrad.optimization.lama.MultiOperatorSearch import MultiOperatorSearch lama_register["MultiOperatorSearch"] = MultiOperatorSearch - res = NonObjectOptimizer(method="LLAMAMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiOperatorSearch = NonObjectOptimizer(method="LLAMAMultiOperatorSearch").set_name("LLAMAMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiOperatorSearch = NonObjectOptimizer(method="LLAMAMultiOperatorSearch").set_name( + "LLAMAMultiOperatorSearch", register=True + ) +except Exception as e: # MultiOperatorSearch print("MultiOperatorSearch can not be imported: ", e) -try: +try: # MultiPhaseAdaptiveDE from nevergrad.optimization.lama.MultiPhaseAdaptiveDE import MultiPhaseAdaptiveDE lama_register["MultiPhaseAdaptiveDE"] = MultiPhaseAdaptiveDE - res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE").set_name("LLAMAMultiPhaseAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPhaseAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDE").set_name( + "LLAMAMultiPhaseAdaptiveDE", register=True + ) +except Exception as e: # MultiPhaseAdaptiveDE print("MultiPhaseAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiPhaseAdaptiveDifferentialEvolution import MultiPhaseAdaptiveDifferentialEvolution +try: # MultiPhaseAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.MultiPhaseAdaptiveDifferentialEvolution import ( + MultiPhaseAdaptiveDifferentialEvolution, + ) lama_register["MultiPhaseAdaptiveDifferentialEvolution"] = MultiPhaseAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPhaseAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDifferentialEvolution").set_name("LLAMAMultiPhaseAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPhaseAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveDifferentialEvolution" + ).set_name("LLAMAMultiPhaseAdaptiveDifferentialEvolution", register=True) +except Exception as e: # MultiPhaseAdaptiveDifferentialEvolution print("MultiPhaseAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiPhaseAdaptiveExplorationOptimization import MultiPhaseAdaptiveExplorationOptimization +try: # MultiPhaseAdaptiveExplorationOptimization + from nevergrad.optimization.lama.MultiPhaseAdaptiveExplorationOptimization import ( + MultiPhaseAdaptiveExplorationOptimization, + ) lama_register["MultiPhaseAdaptiveExplorationOptimization"] = MultiPhaseAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPhaseAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveExplorationOptimization").set_name("LLAMAMultiPhaseAdaptiveExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPhaseAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveExplorationOptimization" + ).set_name("LLAMAMultiPhaseAdaptiveExplorationOptimization", register=True) +except Exception as e: # MultiPhaseAdaptiveExplorationOptimization print("MultiPhaseAdaptiveExplorationOptimization can not be imported: ", e) -try: +try: # MultiPhaseAdaptiveHybridDEPSO from nevergrad.optimization.lama.MultiPhaseAdaptiveHybridDEPSO import MultiPhaseAdaptiveHybridDEPSO lama_register["MultiPhaseAdaptiveHybridDEPSO"] = MultiPhaseAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMAMultiPhaseAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMAMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: # MultiPhaseAdaptiveHybridDEPSO print("MultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) -try: +try: # MultiPhaseDiversityAdaptiveDE from nevergrad.optimization.lama.MultiPhaseDiversityAdaptiveDE import MultiPhaseDiversityAdaptiveDE lama_register["MultiPhaseDiversityAdaptiveDE"] = MultiPhaseDiversityAdaptiveDE - res = NonObjectOptimizer(method="LLAMAMultiPhaseDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPhaseDiversityAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiPhaseDiversityAdaptiveDE").set_name("LLAMAMultiPhaseDiversityAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPhaseDiversityAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPhaseDiversityAdaptiveDE = NonObjectOptimizer( + method="LLAMAMultiPhaseDiversityAdaptiveDE" + ).set_name("LLAMAMultiPhaseDiversityAdaptiveDE", register=True) +except Exception as e: # MultiPhaseDiversityAdaptiveDE print("MultiPhaseDiversityAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiPopulationAdaptiveMemorySearch import MultiPopulationAdaptiveMemorySearch +try: # MultiPopulationAdaptiveMemorySearch + from nevergrad.optimization.lama.MultiPopulationAdaptiveMemorySearch import ( + MultiPopulationAdaptiveMemorySearch, + ) lama_register["MultiPopulationAdaptiveMemorySearch"] = MultiPopulationAdaptiveMemorySearch - res = NonObjectOptimizer(method="LLAMAMultiPopulationAdaptiveMemorySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiPopulationAdaptiveMemorySearch = NonObjectOptimizer(method="LLAMAMultiPopulationAdaptiveMemorySearch").set_name("LLAMAMultiPopulationAdaptiveMemorySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiPopulationAdaptiveMemorySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiPopulationAdaptiveMemorySearch = NonObjectOptimizer( + method="LLAMAMultiPopulationAdaptiveMemorySearch" + ).set_name("LLAMAMultiPopulationAdaptiveMemorySearch", register=True) +except Exception as e: # MultiPopulationAdaptiveMemorySearch print("MultiPopulationAdaptiveMemorySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiScaleAdaptiveHybridOptimization import MultiScaleAdaptiveHybridOptimization +try: # MultiScaleAdaptiveHybridOptimization + from nevergrad.optimization.lama.MultiScaleAdaptiveHybridOptimization import ( + MultiScaleAdaptiveHybridOptimization, + ) lama_register["MultiScaleAdaptiveHybridOptimization"] = MultiScaleAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMAMultiScaleAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiScaleAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMAMultiScaleAdaptiveHybridOptimization").set_name("LLAMAMultiScaleAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiScaleAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiScaleAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMAMultiScaleAdaptiveHybridOptimization" + ).set_name("LLAMAMultiScaleAdaptiveHybridOptimization", register=True) +except Exception as e: # MultiScaleAdaptiveHybridOptimization print("MultiScaleAdaptiveHybridOptimization can not be imported: ", e) -try: +try: # MultiScaleGradientExploration from nevergrad.optimization.lama.MultiScaleGradientExploration import MultiScaleGradientExploration lama_register["MultiScaleGradientExploration"] = MultiScaleGradientExploration - res = NonObjectOptimizer(method="LLAMAMultiScaleGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiScaleGradientExploration = NonObjectOptimizer(method="LLAMAMultiScaleGradientExploration").set_name("LLAMAMultiScaleGradientExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiScaleGradientExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiScaleGradientExploration = NonObjectOptimizer( + method="LLAMAMultiScaleGradientExploration" + ).set_name("LLAMAMultiScaleGradientExploration", register=True) +except Exception as e: # MultiScaleGradientExploration print("MultiScaleGradientExploration can not be imported: ", e) -try: +try: # MultiScaleGradientSearch from nevergrad.optimization.lama.MultiScaleGradientSearch import MultiScaleGradientSearch lama_register["MultiScaleGradientSearch"] = MultiScaleGradientSearch - res = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiScaleGradientSearch = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch").set_name("LLAMAMultiScaleGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiScaleGradientSearch = NonObjectOptimizer(method="LLAMAMultiScaleGradientSearch").set_name( + "LLAMAMultiScaleGradientSearch", register=True + ) +except Exception as e: # MultiScaleGradientSearch print("MultiScaleGradientSearch can not be imported: ", e) -try: +try: # MultiScaleQuadraticSearch from nevergrad.optimization.lama.MultiScaleQuadraticSearch import MultiScaleQuadraticSearch lama_register["MultiScaleQuadraticSearch"] = MultiScaleQuadraticSearch - res = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiScaleQuadraticSearch = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch").set_name("LLAMAMultiScaleQuadraticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiScaleQuadraticSearch = NonObjectOptimizer(method="LLAMAMultiScaleQuadraticSearch").set_name( + "LLAMAMultiScaleQuadraticSearch", register=True + ) +except Exception as e: # MultiScaleQuadraticSearch print("MultiScaleQuadraticSearch can not be imported: ", e) -try: +try: # MultiStageAdaptiveSearch from nevergrad.optimization.lama.MultiStageAdaptiveSearch import MultiStageAdaptiveSearch lama_register["MultiStageAdaptiveSearch"] = MultiStageAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch").set_name("LLAMAMultiStageAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMAMultiStageAdaptiveSearch").set_name( + "LLAMAMultiStageAdaptiveSearch", register=True + ) +except Exception as e: # MultiStageAdaptiveSearch print("MultiStageAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStageHybridGradientBoostedAnnealing import MultiStageHybridGradientBoostedAnnealing +try: # MultiStageHybridGradientBoostedAnnealing + from nevergrad.optimization.lama.MultiStageHybridGradientBoostedAnnealing import ( + MultiStageHybridGradientBoostedAnnealing, + ) lama_register["MultiStageHybridGradientBoostedAnnealing"] = MultiStageHybridGradientBoostedAnnealing - res = NonObjectOptimizer(method="LLAMAMultiStageHybridGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStageHybridGradientBoostedAnnealing = NonObjectOptimizer(method="LLAMAMultiStageHybridGradientBoostedAnnealing").set_name("LLAMAMultiStageHybridGradientBoostedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStageHybridGradientBoostedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStageHybridGradientBoostedAnnealing = NonObjectOptimizer( + method="LLAMAMultiStageHybridGradientBoostedAnnealing" + ).set_name("LLAMAMultiStageHybridGradientBoostedAnnealing", register=True) +except Exception as e: # MultiStageHybridGradientBoostedAnnealing print("MultiStageHybridGradientBoostedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStrategyAdaptiveGradientEvolution import MultiStrategyAdaptiveGradientEvolution +try: # MultiStrategyAdaptiveGradientEvolution + from nevergrad.optimization.lama.MultiStrategyAdaptiveGradientEvolution import ( + MultiStrategyAdaptiveGradientEvolution, + ) lama_register["MultiStrategyAdaptiveGradientEvolution"] = MultiStrategyAdaptiveGradientEvolution - res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyAdaptiveGradientEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveGradientEvolution").set_name("LLAMAMultiStrategyAdaptiveGradientEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveGradientEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyAdaptiveGradientEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyAdaptiveGradientEvolution" + ).set_name("LLAMAMultiStrategyAdaptiveGradientEvolution", register=True) +except Exception as e: # MultiStrategyAdaptiveGradientEvolution print("MultiStrategyAdaptiveGradientEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStrategyAdaptiveSwarmDifferentialEvolution import MultiStrategyAdaptiveSwarmDifferentialEvolution - - lama_register["MultiStrategyAdaptiveSwarmDifferentialEvolution"] = MultiStrategyAdaptiveSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution").set_name("LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # MultiStrategyAdaptiveSwarmDifferentialEvolution + from nevergrad.optimization.lama.MultiStrategyAdaptiveSwarmDifferentialEvolution import ( + MultiStrategyAdaptiveSwarmDifferentialEvolution, + ) + + lama_register["MultiStrategyAdaptiveSwarmDifferentialEvolution"] = ( + MultiStrategyAdaptiveSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAMultiStrategyAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: # MultiStrategyAdaptiveSwarmDifferentialEvolution print("MultiStrategyAdaptiveSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStrategyDifferentialEvolution import MultiStrategyDifferentialEvolution +try: # MultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.MultiStrategyDifferentialEvolution import ( + MultiStrategyDifferentialEvolution, + ) lama_register["MultiStrategyDifferentialEvolution"] = MultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMAMultiStrategyDifferentialEvolution").set_name("LLAMAMultiStrategyDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMAMultiStrategyDifferentialEvolution" + ).set_name("LLAMAMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # MultiStrategyDifferentialEvolution print("MultiStrategyDifferentialEvolution can not be imported: ", e) -try: +try: # MultiStrategyMemeticAlgorithm from nevergrad.optimization.lama.MultiStrategyMemeticAlgorithm import MultiStrategyMemeticAlgorithm lama_register["MultiStrategyMemeticAlgorithm"] = MultiStrategyMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAMultiStrategyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAMultiStrategyMemeticAlgorithm").set_name("LLAMAMultiStrategyMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAMultiStrategyMemeticAlgorithm" + ).set_name("LLAMAMultiStrategyMemeticAlgorithm", register=True) +except Exception as e: # MultiStrategyMemeticAlgorithm print("MultiStrategyMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStrategyQuantumCognitionOptimizerV9 import MultiStrategyQuantumCognitionOptimizerV9 +try: # MultiStrategyQuantumCognitionOptimizerV9 + from nevergrad.optimization.lama.MultiStrategyQuantumCognitionOptimizerV9 import ( + MultiStrategyQuantumCognitionOptimizerV9, + ) lama_register["MultiStrategyQuantumCognitionOptimizerV9"] = MultiStrategyQuantumCognitionOptimizerV9 - res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumCognitionOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyQuantumCognitionOptimizerV9 = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumCognitionOptimizerV9").set_name("LLAMAMultiStrategyQuantumCognitionOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumCognitionOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyQuantumCognitionOptimizerV9 = NonObjectOptimizer( + method="LLAMAMultiStrategyQuantumCognitionOptimizerV9" + ).set_name("LLAMAMultiStrategyQuantumCognitionOptimizerV9", register=True) +except Exception as e: # MultiStrategyQuantumCognitionOptimizerV9 print("MultiStrategyQuantumCognitionOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.MultiStrategyQuantumLevyOptimizer import MultiStrategyQuantumLevyOptimizer +try: # MultiStrategyQuantumLevyOptimizer + from nevergrad.optimization.lama.MultiStrategyQuantumLevyOptimizer import ( + MultiStrategyQuantumLevyOptimizer, + ) lama_register["MultiStrategyQuantumLevyOptimizer"] = MultiStrategyQuantumLevyOptimizer - res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumLevyOptimizer").set_name("LLAMAMultiStrategyQuantumLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategyQuantumLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategyQuantumLevyOptimizer = NonObjectOptimizer( + method="LLAMAMultiStrategyQuantumLevyOptimizer" + ).set_name("LLAMAMultiStrategyQuantumLevyOptimizer", register=True) +except Exception as e: # MultiStrategyQuantumLevyOptimizer print("MultiStrategyQuantumLevyOptimizer can not be imported: ", e) -try: +try: # MultiStrategySelfAdaptiveDE from nevergrad.optimization.lama.MultiStrategySelfAdaptiveDE import MultiStrategySelfAdaptiveDE lama_register["MultiStrategySelfAdaptiveDE"] = MultiStrategySelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE").set_name("LLAMAMultiStrategySelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMAMultiStrategySelfAdaptiveDE").set_name( + "LLAMAMultiStrategySelfAdaptiveDE", register=True + ) +except Exception as e: # MultiStrategySelfAdaptiveDE print("MultiStrategySelfAdaptiveDE can not be imported: ", e) -try: +try: # MultiSwarmAdaptiveDE_PSO from nevergrad.optimization.lama.MultiSwarmAdaptiveDE_PSO import MultiSwarmAdaptiveDE_PSO lama_register["MultiSwarmAdaptiveDE_PSO"] = MultiSwarmAdaptiveDE_PSO - res = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAMultiSwarmAdaptiveDE_PSO = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO").set_name("LLAMAMultiSwarmAdaptiveDE_PSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAMultiSwarmAdaptiveDE_PSO = NonObjectOptimizer(method="LLAMAMultiSwarmAdaptiveDE_PSO").set_name( + "LLAMAMultiSwarmAdaptiveDE_PSO", register=True + ) +except Exception as e: # MultiSwarmAdaptiveDE_PSO print("MultiSwarmAdaptiveDE_PSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.NovelAdaptiveHarmonicFireworksTabuSearch import NovelAdaptiveHarmonicFireworksTabuSearch +try: # NovelAdaptiveHarmonicFireworksTabuSearch + from nevergrad.optimization.lama.NovelAdaptiveHarmonicFireworksTabuSearch import ( + NovelAdaptiveHarmonicFireworksTabuSearch, + ) lama_register["NovelAdaptiveHarmonicFireworksTabuSearch"] = NovelAdaptiveHarmonicFireworksTabuSearch - res = NonObjectOptimizer(method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMANovelAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer(method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch").set_name("LLAMANovelAdaptiveHarmonicFireworksTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMANovelAdaptiveHarmonicFireworksTabuSearch = NonObjectOptimizer( + method="LLAMANovelAdaptiveHarmonicFireworksTabuSearch" + ).set_name("LLAMANovelAdaptiveHarmonicFireworksTabuSearch", register=True) +except Exception as e: # NovelAdaptiveHarmonicFireworksTabuSearch print("NovelAdaptiveHarmonicFireworksTabuSearch can not be imported: ", e) -try: +try: # NovelDynamicFireworkAlgorithm from nevergrad.optimization.lama.NovelDynamicFireworkAlgorithm import NovelDynamicFireworkAlgorithm lama_register["NovelDynamicFireworkAlgorithm"] = NovelDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMANovelDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMANovelDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMANovelDynamicFireworkAlgorithm").set_name("LLAMANovelDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMANovelDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMANovelDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMANovelDynamicFireworkAlgorithm" + ).set_name("LLAMANovelDynamicFireworkAlgorithm", register=True) +except Exception as e: # NovelDynamicFireworkAlgorithm print("NovelDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 import NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 - - lama_register["NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2"] = NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 - res = NonObjectOptimizer(method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer(method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2").set_name("LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2", register=True) -except Exception as e: +try: # NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 + from nevergrad.optimization.lama.NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 import ( + NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2, + ) + + lama_register["NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2"] = ( + NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 + ) + # res = NonObjectOptimizer(method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 = NonObjectOptimizer( + method="LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2" + ).set_name("LLAMANovelEnhancedDiversifiedMetaHeuristicAlgorithmV2", register=True) +except Exception as e: # NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 print("NovelEnhancedDiversifiedMetaHeuristicAlgorithmV2 can not be imported: ", e) -try: +try: # NovelHarmonyTabuSearch from nevergrad.optimization.lama.NovelHarmonyTabuSearch import NovelHarmonyTabuSearch lama_register["NovelHarmonyTabuSearch"] = NovelHarmonyTabuSearch - res = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMANovelHarmonyTabuSearch = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch").set_name("LLAMANovelHarmonyTabuSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMANovelHarmonyTabuSearch = NonObjectOptimizer(method="LLAMANovelHarmonyTabuSearch").set_name( + "LLAMANovelHarmonyTabuSearch", register=True + ) +except Exception as e: # NovelHarmonyTabuSearch print("NovelHarmonyTabuSearch can not be imported: ", e) -try: +try: # ODEMF from nevergrad.optimization.lama.ODEMF import ODEMF lama_register["ODEMF"] = ODEMF - res = NonObjectOptimizer(method="LLAMAODEMF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAODEMF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAODEMF = NonObjectOptimizer(method="LLAMAODEMF").set_name("LLAMAODEMF", register=True) -except Exception as e: +except Exception as e: # ODEMF print("ODEMF can not be imported: ", e) -try: +try: # ORAMED from nevergrad.optimization.lama.ORAMED import ORAMED lama_register["ORAMED"] = ORAMED - res = NonObjectOptimizer(method="LLAMAORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAORAMED")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAORAMED = NonObjectOptimizer(method="LLAMAORAMED").set_name("LLAMAORAMED", register=True) -except Exception as e: +except Exception as e: # ORAMED print("ORAMED can not be imported: ", e) -try: +try: # OctopusSwarmAlgorithm from nevergrad.optimization.lama.OctopusSwarmAlgorithm import OctopusSwarmAlgorithm lama_register["OctopusSwarmAlgorithm"] = OctopusSwarmAlgorithm - res = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOctopusSwarmAlgorithm = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm").set_name("LLAMAOctopusSwarmAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOctopusSwarmAlgorithm = NonObjectOptimizer(method="LLAMAOctopusSwarmAlgorithm").set_name( + "LLAMAOctopusSwarmAlgorithm", register=True + ) +except Exception as e: # OctopusSwarmAlgorithm print("OctopusSwarmAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalAdaptiveDifferentialEvolution import OptimalAdaptiveDifferentialEvolution +try: # OptimalAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialEvolution import ( + OptimalAdaptiveDifferentialEvolution, + ) lama_register["OptimalAdaptiveDifferentialEvolution"] = OptimalAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialEvolution").set_name("LLAMAOptimalAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveDifferentialEvolution" + ).set_name("LLAMAOptimalAdaptiveDifferentialEvolution", register=True) +except Exception as e: # OptimalAdaptiveDifferentialEvolution print("OptimalAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalAdaptiveDifferentialSearch import OptimalAdaptiveDifferentialSearch +try: # OptimalAdaptiveDifferentialSearch + from nevergrad.optimization.lama.OptimalAdaptiveDifferentialSearch import ( + OptimalAdaptiveDifferentialSearch, + ) lama_register["OptimalAdaptiveDifferentialSearch"] = OptimalAdaptiveDifferentialSearch - res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialSearch").set_name("LLAMAOptimalAdaptiveDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveDifferentialSearch" + ).set_name("LLAMAOptimalAdaptiveDifferentialSearch", register=True) +except Exception as e: # OptimalAdaptiveDifferentialSearch print("OptimalAdaptiveDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalAdaptiveMutationEnhancedSearch import OptimalAdaptiveMutationEnhancedSearch +try: # OptimalAdaptiveMutationEnhancedSearch + from nevergrad.optimization.lama.OptimalAdaptiveMutationEnhancedSearch import ( + OptimalAdaptiveMutationEnhancedSearch, + ) lama_register["OptimalAdaptiveMutationEnhancedSearch"] = OptimalAdaptiveMutationEnhancedSearch - res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveMutationEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalAdaptiveMutationEnhancedSearch = NonObjectOptimizer(method="LLAMAOptimalAdaptiveMutationEnhancedSearch").set_name("LLAMAOptimalAdaptiveMutationEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveMutationEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalAdaptiveMutationEnhancedSearch = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveMutationEnhancedSearch" + ).set_name("LLAMAOptimalAdaptiveMutationEnhancedSearch", register=True) +except Exception as e: # OptimalAdaptiveMutationEnhancedSearch print("OptimalAdaptiveMutationEnhancedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalAdaptiveSwarmDifferentialEvolution import OptimalAdaptiveSwarmDifferentialEvolution +try: # OptimalAdaptiveSwarmDifferentialEvolution + from nevergrad.optimization.lama.OptimalAdaptiveSwarmDifferentialEvolution import ( + OptimalAdaptiveSwarmDifferentialEvolution, + ) lama_register["OptimalAdaptiveSwarmDifferentialEvolution"] = OptimalAdaptiveSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution").set_name("LLAMAOptimalAdaptiveSwarmDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimalAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMAOptimalAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: # OptimalAdaptiveSwarmDifferentialEvolution print("OptimalAdaptiveSwarmDifferentialEvolution can not be imported: ", e) -try: +try: # OptimalBalanceSearch from nevergrad.optimization.lama.OptimalBalanceSearch import OptimalBalanceSearch lama_register["OptimalBalanceSearch"] = OptimalBalanceSearch - res = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalBalanceSearch = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch").set_name("LLAMAOptimalBalanceSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalBalanceSearch = NonObjectOptimizer(method="LLAMAOptimalBalanceSearch").set_name( + "LLAMAOptimalBalanceSearch", register=True + ) +except Exception as e: # OptimalBalanceSearch print("OptimalBalanceSearch can not be imported: ", e) -try: +try: # OptimalCohortDiversityOptimizer from nevergrad.optimization.lama.OptimalCohortDiversityOptimizer import OptimalCohortDiversityOptimizer lama_register["OptimalCohortDiversityOptimizer"] = OptimalCohortDiversityOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalCohortDiversityOptimizer = NonObjectOptimizer(method="LLAMAOptimalCohortDiversityOptimizer").set_name("LLAMAOptimalCohortDiversityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalCohortDiversityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalCohortDiversityOptimizer = NonObjectOptimizer( + method="LLAMAOptimalCohortDiversityOptimizer" + ).set_name("LLAMAOptimalCohortDiversityOptimizer", register=True) +except Exception as e: # OptimalCohortDiversityOptimizer print("OptimalCohortDiversityOptimizer can not be imported: ", e) -try: +try: # OptimalConvergenceDE from nevergrad.optimization.lama.OptimalConvergenceDE import OptimalConvergenceDE lama_register["OptimalConvergenceDE"] = OptimalConvergenceDE - res = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalConvergenceDE = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE").set_name("LLAMAOptimalConvergenceDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalConvergenceDE = NonObjectOptimizer(method="LLAMAOptimalConvergenceDE").set_name( + "LLAMAOptimalConvergenceDE", register=True + ) +except Exception as e: # OptimalConvergenceDE print("OptimalConvergenceDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalDynamicAdaptiveEvolutionOptimizer import OptimalDynamicAdaptiveEvolutionOptimizer +try: # OptimalDynamicAdaptiveEvolutionOptimizer + from nevergrad.optimization.lama.OptimalDynamicAdaptiveEvolutionOptimizer import ( + OptimalDynamicAdaptiveEvolutionOptimizer, + ) lama_register["OptimalDynamicAdaptiveEvolutionOptimizer"] = OptimalDynamicAdaptiveEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalDynamicAdaptiveEvolutionOptimizer = NonObjectOptimizer(method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer").set_name("LLAMAOptimalDynamicAdaptiveEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalDynamicAdaptiveEvolutionOptimizer = NonObjectOptimizer( + method="LLAMAOptimalDynamicAdaptiveEvolutionOptimizer" + ).set_name("LLAMAOptimalDynamicAdaptiveEvolutionOptimizer", register=True) +except Exception as e: # OptimalDynamicAdaptiveEvolutionOptimizer print("OptimalDynamicAdaptiveEvolutionOptimizer can not be imported: ", e) -try: +try: # OptimalDynamicMutationSearch from nevergrad.optimization.lama.OptimalDynamicMutationSearch import OptimalDynamicMutationSearch lama_register["OptimalDynamicMutationSearch"] = OptimalDynamicMutationSearch - res = NonObjectOptimizer(method="LLAMAOptimalDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalDynamicMutationSearch = NonObjectOptimizer(method="LLAMAOptimalDynamicMutationSearch").set_name("LLAMAOptimalDynamicMutationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalDynamicMutationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalDynamicMutationSearch = NonObjectOptimizer( + method="LLAMAOptimalDynamicMutationSearch" + ).set_name("LLAMAOptimalDynamicMutationSearch", register=True) +except Exception as e: # OptimalDynamicMutationSearch print("OptimalDynamicMutationSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV14 import OptimalDynamicPrecisionOptimizerV14 +try: # OptimalDynamicPrecisionOptimizerV14 + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV14 import ( + OptimalDynamicPrecisionOptimizerV14, + ) lama_register["OptimalDynamicPrecisionOptimizerV14"] = OptimalDynamicPrecisionOptimizerV14 - res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalDynamicPrecisionOptimizerV14 = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV14").set_name("LLAMAOptimalDynamicPrecisionOptimizerV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalDynamicPrecisionOptimizerV14 = NonObjectOptimizer( + method="LLAMAOptimalDynamicPrecisionOptimizerV14" + ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV14", register=True) +except Exception as e: # OptimalDynamicPrecisionOptimizerV14 print("OptimalDynamicPrecisionOptimizerV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV21 import OptimalDynamicPrecisionOptimizerV21 +try: # OptimalDynamicPrecisionOptimizerV21 + from nevergrad.optimization.lama.OptimalDynamicPrecisionOptimizerV21 import ( + OptimalDynamicPrecisionOptimizerV21, + ) lama_register["OptimalDynamicPrecisionOptimizerV21"] = OptimalDynamicPrecisionOptimizerV21 - res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalDynamicPrecisionOptimizerV21 = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV21").set_name("LLAMAOptimalDynamicPrecisionOptimizerV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalDynamicPrecisionOptimizerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalDynamicPrecisionOptimizerV21 = NonObjectOptimizer( + method="LLAMAOptimalDynamicPrecisionOptimizerV21" + ).set_name("LLAMAOptimalDynamicPrecisionOptimizerV21", register=True) +except Exception as e: # OptimalDynamicPrecisionOptimizerV21 print("OptimalDynamicPrecisionOptimizerV21 can not be imported: ", e) -try: +try: # OptimalEnhancedRAMEDS from nevergrad.optimization.lama.OptimalEnhancedRAMEDS import OptimalEnhancedRAMEDS lama_register["OptimalEnhancedRAMEDS"] = OptimalEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS").set_name("LLAMAOptimalEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalEnhancedRAMEDS").set_name( + "LLAMAOptimalEnhancedRAMEDS", register=True + ) +except Exception as e: # OptimalEnhancedRAMEDS print("OptimalEnhancedRAMEDS can not be imported: ", e) -try: +try: # OptimalEnhancedStrategyDE from nevergrad.optimization.lama.OptimalEnhancedStrategyDE import OptimalEnhancedStrategyDE lama_register["OptimalEnhancedStrategyDE"] = OptimalEnhancedStrategyDE - res = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE").set_name("LLAMAOptimalEnhancedStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMAOptimalEnhancedStrategyDE").set_name( + "LLAMAOptimalEnhancedStrategyDE", register=True + ) +except Exception as e: # OptimalEnhancedStrategyDE print("OptimalEnhancedStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientHybridOptimizerV8 import OptimalEvolutionaryGradientHybridOptimizerV8 - - lama_register["OptimalEvolutionaryGradientHybridOptimizerV8"] = OptimalEvolutionaryGradientHybridOptimizerV8 - res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalEvolutionaryGradientHybridOptimizerV8 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8").set_name("LLAMAOptimalEvolutionaryGradientHybridOptimizerV8", register=True) -except Exception as e: +try: # OptimalEvolutionaryGradientHybridOptimizerV8 + from nevergrad.optimization.lama.OptimalEvolutionaryGradientHybridOptimizerV8 import ( + OptimalEvolutionaryGradientHybridOptimizerV8, + ) + + lama_register["OptimalEvolutionaryGradientHybridOptimizerV8"] = ( + OptimalEvolutionaryGradientHybridOptimizerV8 + ) + # res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalEvolutionaryGradientHybridOptimizerV8 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientHybridOptimizerV8" + ).set_name("LLAMAOptimalEvolutionaryGradientHybridOptimizerV8", register=True) +except Exception as e: # OptimalEvolutionaryGradientHybridOptimizerV8 print("OptimalEvolutionaryGradientHybridOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV11 import OptimalEvolutionaryGradientOptimizerV11 +try: # OptimalEvolutionaryGradientOptimizerV11 + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV11 import ( + OptimalEvolutionaryGradientOptimizerV11, + ) lama_register["OptimalEvolutionaryGradientOptimizerV11"] = OptimalEvolutionaryGradientOptimizerV11 - res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalEvolutionaryGradientOptimizerV11 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV11").set_name("LLAMAOptimalEvolutionaryGradientOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalEvolutionaryGradientOptimizerV11 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientOptimizerV11" + ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV11", register=True) +except Exception as e: # OptimalEvolutionaryGradientOptimizerV11 print("OptimalEvolutionaryGradientOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV25 import OptimalEvolutionaryGradientOptimizerV25 +try: # OptimalEvolutionaryGradientOptimizerV25 + from nevergrad.optimization.lama.OptimalEvolutionaryGradientOptimizerV25 import ( + OptimalEvolutionaryGradientOptimizerV25, + ) lama_register["OptimalEvolutionaryGradientOptimizerV25"] = OptimalEvolutionaryGradientOptimizerV25 - res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalEvolutionaryGradientOptimizerV25 = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV25").set_name("LLAMAOptimalEvolutionaryGradientOptimizerV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalEvolutionaryGradientOptimizerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalEvolutionaryGradientOptimizerV25 = NonObjectOptimizer( + method="LLAMAOptimalEvolutionaryGradientOptimizerV25" + ).set_name("LLAMAOptimalEvolutionaryGradientOptimizerV25", register=True) +except Exception as e: # OptimalEvolutionaryGradientOptimizerV25 print("OptimalEvolutionaryGradientOptimizerV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalHybridDifferentialAnnealingOptimizer import OptimalHybridDifferentialAnnealingOptimizer +try: # OptimalHybridDifferentialAnnealingOptimizer + from nevergrad.optimization.lama.OptimalHybridDifferentialAnnealingOptimizer import ( + OptimalHybridDifferentialAnnealingOptimizer, + ) lama_register["OptimalHybridDifferentialAnnealingOptimizer"] = OptimalHybridDifferentialAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer(method="LLAMAOptimalHybridDifferentialAnnealingOptimizer").set_name("LLAMAOptimalHybridDifferentialAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalHybridDifferentialAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalHybridDifferentialAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAOptimalHybridDifferentialAnnealingOptimizer" + ).set_name("LLAMAOptimalHybridDifferentialAnnealingOptimizer", register=True) +except Exception as e: # OptimalHybridDifferentialAnnealingOptimizer print("OptimalHybridDifferentialAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalHyperStrategicOptimizerV51 import OptimalHyperStrategicOptimizerV51 +try: # OptimalHyperStrategicOptimizerV51 + from nevergrad.optimization.lama.OptimalHyperStrategicOptimizerV51 import ( + OptimalHyperStrategicOptimizerV51, + ) lama_register["OptimalHyperStrategicOptimizerV51"] = OptimalHyperStrategicOptimizerV51 - res = NonObjectOptimizer(method="LLAMAOptimalHyperStrategicOptimizerV51")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalHyperStrategicOptimizerV51 = NonObjectOptimizer(method="LLAMAOptimalHyperStrategicOptimizerV51").set_name("LLAMAOptimalHyperStrategicOptimizerV51", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalHyperStrategicOptimizerV51")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalHyperStrategicOptimizerV51 = NonObjectOptimizer( + method="LLAMAOptimalHyperStrategicOptimizerV51" + ).set_name("LLAMAOptimalHyperStrategicOptimizerV51", register=True) +except Exception as e: # OptimalHyperStrategicOptimizerV51 print("OptimalHyperStrategicOptimizerV51 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalPrecisionDynamicAdaptationOptimizer import OptimalPrecisionDynamicAdaptationOptimizer +try: # OptimalPrecisionDynamicAdaptationOptimizer + from nevergrad.optimization.lama.OptimalPrecisionDynamicAdaptationOptimizer import ( + OptimalPrecisionDynamicAdaptationOptimizer, + ) lama_register["OptimalPrecisionDynamicAdaptationOptimizer"] = OptimalPrecisionDynamicAdaptationOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalPrecisionDynamicAdaptationOptimizer = NonObjectOptimizer(method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer").set_name("LLAMAOptimalPrecisionDynamicAdaptationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalPrecisionDynamicAdaptationOptimizer = NonObjectOptimizer( + method="LLAMAOptimalPrecisionDynamicAdaptationOptimizer" + ).set_name("LLAMAOptimalPrecisionDynamicAdaptationOptimizer", register=True) +except Exception as e: # OptimalPrecisionDynamicAdaptationOptimizer print("OptimalPrecisionDynamicAdaptationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryOptimizerV37 import OptimalPrecisionEvolutionaryOptimizerV37 +try: # OptimalPrecisionEvolutionaryOptimizerV37 + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryOptimizerV37 import ( + OptimalPrecisionEvolutionaryOptimizerV37, + ) lama_register["OptimalPrecisionEvolutionaryOptimizerV37"] = OptimalPrecisionEvolutionaryOptimizerV37 - res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalPrecisionEvolutionaryOptimizerV37 = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37").set_name("LLAMAOptimalPrecisionEvolutionaryOptimizerV37", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalPrecisionEvolutionaryOptimizerV37 = NonObjectOptimizer( + method="LLAMAOptimalPrecisionEvolutionaryOptimizerV37" + ).set_name("LLAMAOptimalPrecisionEvolutionaryOptimizerV37", register=True) +except Exception as e: # OptimalPrecisionEvolutionaryOptimizerV37 print("OptimalPrecisionEvolutionaryOptimizerV37 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryThermalOptimizer import OptimalPrecisionEvolutionaryThermalOptimizer - - lama_register["OptimalPrecisionEvolutionaryThermalOptimizer"] = OptimalPrecisionEvolutionaryThermalOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAOptimalPrecisionEvolutionaryThermalOptimizer", register=True) -except Exception as e: +try: # OptimalPrecisionEvolutionaryThermalOptimizer + from nevergrad.optimization.lama.OptimalPrecisionEvolutionaryThermalOptimizer import ( + OptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["OptimalPrecisionEvolutionaryThermalOptimizer"] = ( + OptimalPrecisionEvolutionaryThermalOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: # OptimalPrecisionEvolutionaryThermalOptimizer print("OptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) -try: +try: # OptimalPrecisionHybridSearchV3 from nevergrad.optimization.lama.OptimalPrecisionHybridSearchV3 import OptimalPrecisionHybridSearchV3 lama_register["OptimalPrecisionHybridSearchV3"] = OptimalPrecisionHybridSearchV3 - res = NonObjectOptimizer(method="LLAMAOptimalPrecisionHybridSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalPrecisionHybridSearchV3 = NonObjectOptimizer(method="LLAMAOptimalPrecisionHybridSearchV3").set_name("LLAMAOptimalPrecisionHybridSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalPrecisionHybridSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalPrecisionHybridSearchV3 = NonObjectOptimizer( + method="LLAMAOptimalPrecisionHybridSearchV3" + ).set_name("LLAMAOptimalPrecisionHybridSearchV3", register=True) +except Exception as e: # OptimalPrecisionHybridSearchV3 print("OptimalPrecisionHybridSearchV3 can not be imported: ", e) -try: +try: # OptimalQuantumSynergyStrategy from nevergrad.optimization.lama.OptimalQuantumSynergyStrategy import OptimalQuantumSynergyStrategy lama_register["OptimalQuantumSynergyStrategy"] = OptimalQuantumSynergyStrategy - res = NonObjectOptimizer(method="LLAMAOptimalQuantumSynergyStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalQuantumSynergyStrategy = NonObjectOptimizer(method="LLAMAOptimalQuantumSynergyStrategy").set_name("LLAMAOptimalQuantumSynergyStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalQuantumSynergyStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalQuantumSynergyStrategy = NonObjectOptimizer( + method="LLAMAOptimalQuantumSynergyStrategy" + ).set_name("LLAMAOptimalQuantumSynergyStrategy", register=True) +except Exception as e: # OptimalQuantumSynergyStrategy print("OptimalQuantumSynergyStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalRefinedEnhancedUltraRefinedRAMEDS import OptimalRefinedEnhancedUltraRefinedRAMEDS +try: # OptimalRefinedEnhancedUltraRefinedRAMEDS + from nevergrad.optimization.lama.OptimalRefinedEnhancedUltraRefinedRAMEDS import ( + OptimalRefinedEnhancedUltraRefinedRAMEDS, + ) lama_register["OptimalRefinedEnhancedUltraRefinedRAMEDS"] = OptimalRefinedEnhancedUltraRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS").set_name("LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS" + ).set_name("LLAMAOptimalRefinedEnhancedUltraRefinedRAMEDS", register=True) +except Exception as e: # OptimalRefinedEnhancedUltraRefinedRAMEDS print("OptimalRefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalSelectiveEvolutionaryOptimizerV20 import OptimalSelectiveEvolutionaryOptimizerV20 +try: # OptimalSelectiveEvolutionaryOptimizerV20 + from nevergrad.optimization.lama.OptimalSelectiveEvolutionaryOptimizerV20 import ( + OptimalSelectiveEvolutionaryOptimizerV20, + ) lama_register["OptimalSelectiveEvolutionaryOptimizerV20"] = OptimalSelectiveEvolutionaryOptimizerV20 - res = NonObjectOptimizer(method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalSelectiveEvolutionaryOptimizerV20 = NonObjectOptimizer(method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20").set_name("LLAMAOptimalSelectiveEvolutionaryOptimizerV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalSelectiveEvolutionaryOptimizerV20 = NonObjectOptimizer( + method="LLAMAOptimalSelectiveEvolutionaryOptimizerV20" + ).set_name("LLAMAOptimalSelectiveEvolutionaryOptimizerV20", register=True) +except Exception as e: # OptimalSelectiveEvolutionaryOptimizerV20 print("OptimalSelectiveEvolutionaryOptimizerV20 can not be imported: ", e) -try: +try: # OptimalSmartRefinedRAMEDS from nevergrad.optimization.lama.OptimalSmartRefinedRAMEDS import OptimalSmartRefinedRAMEDS lama_register["OptimalSmartRefinedRAMEDS"] = OptimalSmartRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalSmartRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS").set_name("LLAMAOptimalSmartRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalSmartRefinedRAMEDS = NonObjectOptimizer(method="LLAMAOptimalSmartRefinedRAMEDS").set_name( + "LLAMAOptimalSmartRefinedRAMEDS", register=True + ) +except Exception as e: # OptimalSmartRefinedRAMEDS print("OptimalSmartRefinedRAMEDS can not be imported: ", e) -try: +try: # OptimalSpiralCentroidSearch from nevergrad.optimization.lama.OptimalSpiralCentroidSearch import OptimalSpiralCentroidSearch lama_register["OptimalSpiralCentroidSearch"] = OptimalSpiralCentroidSearch - res = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalSpiralCentroidSearch = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch").set_name("LLAMAOptimalSpiralCentroidSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalSpiralCentroidSearch = NonObjectOptimizer(method="LLAMAOptimalSpiralCentroidSearch").set_name( + "LLAMAOptimalSpiralCentroidSearch", register=True + ) +except Exception as e: # OptimalSpiralCentroidSearch print("OptimalSpiralCentroidSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimalStrategicAdaptiveOptimizer import OptimalStrategicAdaptiveOptimizer +try: # OptimalStrategicAdaptiveOptimizer + from nevergrad.optimization.lama.OptimalStrategicAdaptiveOptimizer import ( + OptimalStrategicAdaptiveOptimizer, + ) lama_register["OptimalStrategicAdaptiveOptimizer"] = OptimalStrategicAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAOptimalStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalStrategicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAOptimalStrategicAdaptiveOptimizer").set_name("LLAMAOptimalStrategicAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalStrategicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalStrategicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAOptimalStrategicAdaptiveOptimizer" + ).set_name("LLAMAOptimalStrategicAdaptiveOptimizer", register=True) +except Exception as e: # OptimalStrategicAdaptiveOptimizer print("OptimalStrategicAdaptiveOptimizer can not be imported: ", e) -try: +try: # OptimalStrategicHybridDE from nevergrad.optimization.lama.OptimalStrategicHybridDE import OptimalStrategicHybridDE lama_register["OptimalStrategicHybridDE"] = OptimalStrategicHybridDE - res = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimalStrategicHybridDE = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE").set_name("LLAMAOptimalStrategicHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimalStrategicHybridDE = NonObjectOptimizer(method="LLAMAOptimalStrategicHybridDE").set_name( + "LLAMAOptimalStrategicHybridDE", register=True + ) +except Exception as e: # OptimalStrategicHybridDE print("OptimalStrategicHybridDE can not be imported: ", e) -try: +try: # OptimallyBalancedQuantumStrategy from nevergrad.optimization.lama.OptimallyBalancedQuantumStrategy import OptimallyBalancedQuantumStrategy lama_register["OptimallyBalancedQuantumStrategy"] = OptimallyBalancedQuantumStrategy - res = NonObjectOptimizer(method="LLAMAOptimallyBalancedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimallyBalancedQuantumStrategy = NonObjectOptimizer(method="LLAMAOptimallyBalancedQuantumStrategy").set_name("LLAMAOptimallyBalancedQuantumStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimallyBalancedQuantumStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimallyBalancedQuantumStrategy = NonObjectOptimizer( + method="LLAMAOptimallyBalancedQuantumStrategy" + ).set_name("LLAMAOptimallyBalancedQuantumStrategy", register=True) +except Exception as e: # OptimallyBalancedQuantumStrategy print("OptimallyBalancedQuantumStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveDifferentialClimber import OptimizedAdaptiveDifferentialClimber +try: # OptimizedAdaptiveDifferentialClimber + from nevergrad.optimization.lama.OptimizedAdaptiveDifferentialClimber import ( + OptimizedAdaptiveDifferentialClimber, + ) lama_register["OptimizedAdaptiveDifferentialClimber"] = OptimizedAdaptiveDifferentialClimber - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDifferentialClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveDifferentialClimber = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDifferentialClimber").set_name("LLAMAOptimizedAdaptiveDifferentialClimber", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDifferentialClimber")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveDifferentialClimber = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDifferentialClimber" + ).set_name("LLAMAOptimizedAdaptiveDifferentialClimber", register=True) +except Exception as e: # OptimizedAdaptiveDifferentialClimber print("OptimizedAdaptiveDifferentialClimber can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategy import OptimizedAdaptiveDualPhaseStrategy +try: # OptimizedAdaptiveDualPhaseStrategy + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategy import ( + OptimizedAdaptiveDualPhaseStrategy, + ) lama_register["OptimizedAdaptiveDualPhaseStrategy"] = OptimizedAdaptiveDualPhaseStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategy").set_name("LLAMAOptimizedAdaptiveDualPhaseStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDualPhaseStrategy" + ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: # OptimizedAdaptiveDualPhaseStrategy print("OptimizedAdaptiveDualPhaseStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategyV4 import OptimizedAdaptiveDualPhaseStrategyV4 +try: # OptimizedAdaptiveDualPhaseStrategyV4 + from nevergrad.optimization.lama.OptimizedAdaptiveDualPhaseStrategyV4 import ( + OptimizedAdaptiveDualPhaseStrategyV4, + ) lama_register["OptimizedAdaptiveDualPhaseStrategyV4"] = OptimizedAdaptiveDualPhaseStrategyV4 - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveDualPhaseStrategyV4 = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4").set_name("LLAMAOptimizedAdaptiveDualPhaseStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveDualPhaseStrategyV4 = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDualPhaseStrategyV4" + ).set_name("LLAMAOptimizedAdaptiveDualPhaseStrategyV4", register=True) +except Exception as e: # OptimizedAdaptiveDualPhaseStrategyV4 print("OptimizedAdaptiveDualPhaseStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveDynamicStrategyV34 import OptimizedAdaptiveDynamicStrategyV34 +try: # OptimizedAdaptiveDynamicStrategyV34 + from nevergrad.optimization.lama.OptimizedAdaptiveDynamicStrategyV34 import ( + OptimizedAdaptiveDynamicStrategyV34, + ) lama_register["OptimizedAdaptiveDynamicStrategyV34"] = OptimizedAdaptiveDynamicStrategyV34 - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDynamicStrategyV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveDynamicStrategyV34 = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDynamicStrategyV34").set_name("LLAMAOptimizedAdaptiveDynamicStrategyV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveDynamicStrategyV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveDynamicStrategyV34 = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveDynamicStrategyV34" + ).set_name("LLAMAOptimizedAdaptiveDynamicStrategyV34", register=True) +except Exception as e: # OptimizedAdaptiveDynamicStrategyV34 print("OptimizedAdaptiveDynamicStrategyV34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveGlobalLocalSearch import OptimizedAdaptiveGlobalLocalSearch +try: # OptimizedAdaptiveGlobalLocalSearch + from nevergrad.optimization.lama.OptimizedAdaptiveGlobalLocalSearch import ( + OptimizedAdaptiveGlobalLocalSearch, + ) lama_register["OptimizedAdaptiveGlobalLocalSearch"] = OptimizedAdaptiveGlobalLocalSearch - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveGlobalLocalSearch = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveGlobalLocalSearch").set_name("LLAMAOptimizedAdaptiveGlobalLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveGlobalLocalSearch = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveGlobalLocalSearch" + ).set_name("LLAMAOptimizedAdaptiveGlobalLocalSearch", register=True) +except Exception as e: # OptimizedAdaptiveGlobalLocalSearch print("OptimizedAdaptiveGlobalLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveQuantumGradientHybridStrategy import OptimizedAdaptiveQuantumGradientHybridStrategy - - lama_register["OptimizedAdaptiveQuantumGradientHybridStrategy"] = OptimizedAdaptiveQuantumGradientHybridStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy").set_name("LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy", register=True) -except Exception as e: +try: # OptimizedAdaptiveQuantumGradientHybridStrategy + from nevergrad.optimization.lama.OptimizedAdaptiveQuantumGradientHybridStrategy import ( + OptimizedAdaptiveQuantumGradientHybridStrategy, + ) + + lama_register["OptimizedAdaptiveQuantumGradientHybridStrategy"] = ( + OptimizedAdaptiveQuantumGradientHybridStrategy + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy" + ).set_name("LLAMAOptimizedAdaptiveQuantumGradientHybridStrategy", register=True) +except Exception as e: # OptimizedAdaptiveQuantumGradientHybridStrategy print("OptimizedAdaptiveQuantumGradientHybridStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedAdaptiveSimulatedAnnealingWithSmartMemory import OptimizedAdaptiveSimulatedAnnealingWithSmartMemory - - lama_register["OptimizedAdaptiveSimulatedAnnealingWithSmartMemory"] = OptimizedAdaptiveSimulatedAnnealingWithSmartMemory - res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) -except Exception as e: +try: # OptimizedAdaptiveSimulatedAnnealingWithSmartMemory + from nevergrad.optimization.lama.OptimizedAdaptiveSimulatedAnnealingWithSmartMemory import ( + OptimizedAdaptiveSimulatedAnnealingWithSmartMemory, + ) + + lama_register["OptimizedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( + OptimizedAdaptiveSimulatedAnnealingWithSmartMemory + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: # OptimizedAdaptiveSimulatedAnnealingWithSmartMemory print("OptimizedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedBalancedDualStrategyAdaptiveDE import OptimizedBalancedDualStrategyAdaptiveDE +try: # OptimizedBalancedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.OptimizedBalancedDualStrategyAdaptiveDE import ( + OptimizedBalancedDualStrategyAdaptiveDE, + ) lama_register["OptimizedBalancedDualStrategyAdaptiveDE"] = OptimizedBalancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedBalancedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # OptimizedBalancedDualStrategyAdaptiveDE print("OptimizedBalancedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedConvergenceIslandStrategy import OptimizedConvergenceIslandStrategy +try: # OptimizedConvergenceIslandStrategy + from nevergrad.optimization.lama.OptimizedConvergenceIslandStrategy import ( + OptimizedConvergenceIslandStrategy, + ) lama_register["OptimizedConvergenceIslandStrategy"] = OptimizedConvergenceIslandStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedConvergenceIslandStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedConvergenceIslandStrategy = NonObjectOptimizer(method="LLAMAOptimizedConvergenceIslandStrategy").set_name("LLAMAOptimizedConvergenceIslandStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedConvergenceIslandStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedConvergenceIslandStrategy = NonObjectOptimizer( + method="LLAMAOptimizedConvergenceIslandStrategy" + ).set_name("LLAMAOptimizedConvergenceIslandStrategy", register=True) +except Exception as e: # OptimizedConvergenceIslandStrategy print("OptimizedConvergenceIslandStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedConvergentAdaptiveEvolver import OptimizedConvergentAdaptiveEvolver +try: # OptimizedConvergentAdaptiveEvolver + from nevergrad.optimization.lama.OptimizedConvergentAdaptiveEvolver import ( + OptimizedConvergentAdaptiveEvolver, + ) lama_register["OptimizedConvergentAdaptiveEvolver"] = OptimizedConvergentAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAOptimizedConvergentAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedConvergentAdaptiveEvolver = NonObjectOptimizer(method="LLAMAOptimizedConvergentAdaptiveEvolver").set_name("LLAMAOptimizedConvergentAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedConvergentAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedConvergentAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAOptimizedConvergentAdaptiveEvolver" + ).set_name("LLAMAOptimizedConvergentAdaptiveEvolver", register=True) +except Exception as e: # OptimizedConvergentAdaptiveEvolver print("OptimizedConvergentAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedCrossoverElitistStrategyV8 import OptimizedCrossoverElitistStrategyV8 +try: # OptimizedCrossoverElitistStrategyV8 + from nevergrad.optimization.lama.OptimizedCrossoverElitistStrategyV8 import ( + OptimizedCrossoverElitistStrategyV8, + ) lama_register["OptimizedCrossoverElitistStrategyV8"] = OptimizedCrossoverElitistStrategyV8 - res = NonObjectOptimizer(method="LLAMAOptimizedCrossoverElitistStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedCrossoverElitistStrategyV8 = NonObjectOptimizer(method="LLAMAOptimizedCrossoverElitistStrategyV8").set_name("LLAMAOptimizedCrossoverElitistStrategyV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedCrossoverElitistStrategyV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedCrossoverElitistStrategyV8 = NonObjectOptimizer( + method="LLAMAOptimizedCrossoverElitistStrategyV8" + ).set_name("LLAMAOptimizedCrossoverElitistStrategyV8", register=True) +except Exception as e: # OptimizedCrossoverElitistStrategyV8 print("OptimizedCrossoverElitistStrategyV8 can not be imported: ", e) -try: +try: # OptimizedDifferentialEvolution from nevergrad.optimization.lama.OptimizedDifferentialEvolution import OptimizedDifferentialEvolution lama_register["OptimizedDifferentialEvolution"] = OptimizedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAOptimizedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDifferentialEvolution = NonObjectOptimizer(method="LLAMAOptimizedDifferentialEvolution").set_name("LLAMAOptimizedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOptimizedDifferentialEvolution" + ).set_name("LLAMAOptimizedDifferentialEvolution", register=True) +except Exception as e: # OptimizedDifferentialEvolution print("OptimizedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDualPhaseAdaptiveHybridOptimizationV4 import OptimizedDualPhaseAdaptiveHybridOptimizationV4 - - lama_register["OptimizedDualPhaseAdaptiveHybridOptimizationV4"] = OptimizedDualPhaseAdaptiveHybridOptimizationV4 - res = NonObjectOptimizer(method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4 = NonObjectOptimizer(method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4").set_name("LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4", register=True) -except Exception as e: +try: # OptimizedDualPhaseAdaptiveHybridOptimizationV4 + from nevergrad.optimization.lama.OptimizedDualPhaseAdaptiveHybridOptimizationV4 import ( + OptimizedDualPhaseAdaptiveHybridOptimizationV4, + ) + + lama_register["OptimizedDualPhaseAdaptiveHybridOptimizationV4"] = ( + OptimizedDualPhaseAdaptiveHybridOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4" + ).set_name("LLAMAOptimizedDualPhaseAdaptiveHybridOptimizationV4", register=True) +except Exception as e: # OptimizedDualPhaseAdaptiveHybridOptimizationV4 print("OptimizedDualPhaseAdaptiveHybridOptimizationV4 can not be imported: ", e) -try: +try: # OptimizedDualStrategyAdaptiveDE from nevergrad.optimization.lama.OptimizedDualStrategyAdaptiveDE import OptimizedDualStrategyAdaptiveDE lama_register["OptimizedDualStrategyAdaptiveDE"] = OptimizedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAOptimizedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedDualStrategyAdaptiveDE", register=True) +except Exception as e: # OptimizedDualStrategyAdaptiveDE print("OptimizedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicAdaptiveHybridOptimizer import OptimizedDynamicAdaptiveHybridOptimizer +try: # OptimizedDynamicAdaptiveHybridOptimizer + from nevergrad.optimization.lama.OptimizedDynamicAdaptiveHybridOptimizer import ( + OptimizedDynamicAdaptiveHybridOptimizer, + ) lama_register["OptimizedDynamicAdaptiveHybridOptimizer"] = OptimizedDynamicAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer").set_name("LLAMAOptimizedDynamicAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAOptimizedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMAOptimizedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: # OptimizedDynamicAdaptiveHybridOptimizer print("OptimizedDynamicAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicDualPhaseStrategyV13 import OptimizedDynamicDualPhaseStrategyV13 +try: # OptimizedDynamicDualPhaseStrategyV13 + from nevergrad.optimization.lama.OptimizedDynamicDualPhaseStrategyV13 import ( + OptimizedDynamicDualPhaseStrategyV13, + ) lama_register["OptimizedDynamicDualPhaseStrategyV13"] = OptimizedDynamicDualPhaseStrategyV13 - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicDualPhaseStrategyV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicDualPhaseStrategyV13 = NonObjectOptimizer(method="LLAMAOptimizedDynamicDualPhaseStrategyV13").set_name("LLAMAOptimizedDynamicDualPhaseStrategyV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicDualPhaseStrategyV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicDualPhaseStrategyV13 = NonObjectOptimizer( + method="LLAMAOptimizedDynamicDualPhaseStrategyV13" + ).set_name("LLAMAOptimizedDynamicDualPhaseStrategyV13", register=True) +except Exception as e: # OptimizedDynamicDualPhaseStrategyV13 print("OptimizedDynamicDualPhaseStrategyV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus import OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus - - lama_register["OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) -except Exception as e: +try: # OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus import ( + OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus"] = ( + OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: # OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus print("OptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedSimulatedAnnealing import OptimizedDynamicGradientBoostedSimulatedAnnealing - - lama_register["OptimizedDynamicGradientBoostedSimulatedAnnealing"] = OptimizedDynamicGradientBoostedSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing").set_name("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing", register=True) -except Exception as e: +try: # OptimizedDynamicGradientBoostedSimulatedAnnealing + from nevergrad.optimization.lama.OptimizedDynamicGradientBoostedSimulatedAnnealing import ( + OptimizedDynamicGradientBoostedSimulatedAnnealing, + ) + + lama_register["OptimizedDynamicGradientBoostedSimulatedAnnealing"] = ( + OptimizedDynamicGradientBoostedSimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing = NonObjectOptimizer( + method="LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing" + ).set_name("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing", register=True) +except Exception as e: # OptimizedDynamicGradientBoostedSimulatedAnnealing print("OptimizedDynamicGradientBoostedSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicQuantumSwarmOptimization import OptimizedDynamicQuantumSwarmOptimization +try: # OptimizedDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.OptimizedDynamicQuantumSwarmOptimization import ( + OptimizedDynamicQuantumSwarmOptimization, + ) lama_register["OptimizedDynamicQuantumSwarmOptimization"] = OptimizedDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAOptimizedDynamicQuantumSwarmOptimization").set_name("LLAMAOptimizedDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMAOptimizedDynamicQuantumSwarmOptimization" + ).set_name("LLAMAOptimizedDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # OptimizedDynamicQuantumSwarmOptimization print("OptimizedDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedDynamicRestartAdaptiveDE import OptimizedDynamicRestartAdaptiveDE +try: # OptimizedDynamicRestartAdaptiveDE + from nevergrad.optimization.lama.OptimizedDynamicRestartAdaptiveDE import ( + OptimizedDynamicRestartAdaptiveDE, + ) lama_register["OptimizedDynamicRestartAdaptiveDE"] = OptimizedDynamicRestartAdaptiveDE - res = NonObjectOptimizer(method="LLAMAOptimizedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedDynamicRestartAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedDynamicRestartAdaptiveDE").set_name("LLAMAOptimizedDynamicRestartAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedDynamicRestartAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedDynamicRestartAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedDynamicRestartAdaptiveDE" + ).set_name("LLAMAOptimizedDynamicRestartAdaptiveDE", register=True) +except Exception as e: # OptimizedDynamicRestartAdaptiveDE print("OptimizedDynamicRestartAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedEliteAdaptiveMemoryHybridOptimizer import OptimizedEliteAdaptiveMemoryHybridOptimizer +try: # OptimizedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.OptimizedEliteAdaptiveMemoryHybridOptimizer import ( + OptimizedEliteAdaptiveMemoryHybridOptimizer, + ) lama_register["OptimizedEliteAdaptiveMemoryHybridOptimizer"] = OptimizedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAOptimizedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # OptimizedEliteAdaptiveMemoryHybridOptimizer print("OptimizedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedEnhancedAdaptiveMetaNetAQAPSO import OptimizedEnhancedAdaptiveMetaNetAQAPSO +try: # OptimizedEnhancedAdaptiveMetaNetAQAPSO + from nevergrad.optimization.lama.OptimizedEnhancedAdaptiveMetaNetAQAPSO import ( + OptimizedEnhancedAdaptiveMetaNetAQAPSO, + ) lama_register["OptimizedEnhancedAdaptiveMetaNetAQAPSO"] = OptimizedEnhancedAdaptiveMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer(method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO").set_name("LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO" + ).set_name("LLAMAOptimizedEnhancedAdaptiveMetaNetAQAPSO", register=True) +except Exception as e: # OptimizedEnhancedAdaptiveMetaNetAQAPSO print("OptimizedEnhancedAdaptiveMetaNetAQAPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedEnhancedDualStrategyAdaptiveDE import OptimizedEnhancedDualStrategyAdaptiveDE +try: # OptimizedEnhancedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.OptimizedEnhancedDualStrategyAdaptiveDE import ( + OptimizedEnhancedDualStrategyAdaptiveDE, + ) lama_register["OptimizedEnhancedDualStrategyAdaptiveDE"] = OptimizedEnhancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE").set_name("LLAMAOptimizedEnhancedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedDualStrategyAdaptiveDE" + ).set_name("LLAMAOptimizedEnhancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # OptimizedEnhancedDualStrategyAdaptiveDE print("OptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedEnhancedDynamicFireworkAlgorithm import OptimizedEnhancedDynamicFireworkAlgorithm +try: # OptimizedEnhancedDynamicFireworkAlgorithm + from nevergrad.optimization.lama.OptimizedEnhancedDynamicFireworkAlgorithm import ( + OptimizedEnhancedDynamicFireworkAlgorithm, + ) lama_register["OptimizedEnhancedDynamicFireworkAlgorithm"] = OptimizedEnhancedDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm").set_name("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedEnhancedDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAOptimizedEnhancedDynamicFireworkAlgorithm" + ).set_name("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm", register=True) +except Exception as e: # OptimizedEnhancedDynamicFireworkAlgorithm print("OptimizedEnhancedDynamicFireworkAlgorithm can not be imported: ", e) -try: +try: # OptimizedEvolutiveStrategy from nevergrad.optimization.lama.OptimizedEvolutiveStrategy import OptimizedEvolutiveStrategy lama_register["OptimizedEvolutiveStrategy"] = OptimizedEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy").set_name("LLAMAOptimizedEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedEvolutiveStrategy").set_name( + "LLAMAOptimizedEvolutiveStrategy", register=True + ) +except Exception as e: # OptimizedEvolutiveStrategy print("OptimizedEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedExplorationConvergenceStrategy import OptimizedExplorationConvergenceStrategy +try: # OptimizedExplorationConvergenceStrategy + from nevergrad.optimization.lama.OptimizedExplorationConvergenceStrategy import ( + OptimizedExplorationConvergenceStrategy, + ) lama_register["OptimizedExplorationConvergenceStrategy"] = OptimizedExplorationConvergenceStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedExplorationConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedExplorationConvergenceStrategy = NonObjectOptimizer(method="LLAMAOptimizedExplorationConvergenceStrategy").set_name("LLAMAOptimizedExplorationConvergenceStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedExplorationConvergenceStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedExplorationConvergenceStrategy = NonObjectOptimizer( + method="LLAMAOptimizedExplorationConvergenceStrategy" + ).set_name("LLAMAOptimizedExplorationConvergenceStrategy", register=True) +except Exception as e: # OptimizedExplorationConvergenceStrategy print("OptimizedExplorationConvergenceStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedGlobalStructureAwareEvolver import OptimizedGlobalStructureAwareEvolver +try: # OptimizedGlobalStructureAwareEvolver + from nevergrad.optimization.lama.OptimizedGlobalStructureAwareEvolver import ( + OptimizedGlobalStructureAwareEvolver, + ) lama_register["OptimizedGlobalStructureAwareEvolver"] = OptimizedGlobalStructureAwareEvolver - res = NonObjectOptimizer(method="LLAMAOptimizedGlobalStructureAwareEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedGlobalStructureAwareEvolver = NonObjectOptimizer(method="LLAMAOptimizedGlobalStructureAwareEvolver").set_name("LLAMAOptimizedGlobalStructureAwareEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedGlobalStructureAwareEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedGlobalStructureAwareEvolver = NonObjectOptimizer( + method="LLAMAOptimizedGlobalStructureAwareEvolver" + ).set_name("LLAMAOptimizedGlobalStructureAwareEvolver", register=True) +except Exception as e: # OptimizedGlobalStructureAwareEvolver print("OptimizedGlobalStructureAwareEvolver can not be imported: ", e) -try: +try: # OptimizedGradientBalancedPSO from nevergrad.optimization.lama.OptimizedGradientBalancedPSO import OptimizedGradientBalancedPSO lama_register["OptimizedGradientBalancedPSO"] = OptimizedGradientBalancedPSO - res = NonObjectOptimizer(method="LLAMAOptimizedGradientBalancedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedGradientBalancedPSO = NonObjectOptimizer(method="LLAMAOptimizedGradientBalancedPSO").set_name("LLAMAOptimizedGradientBalancedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedGradientBalancedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedGradientBalancedPSO = NonObjectOptimizer( + method="LLAMAOptimizedGradientBalancedPSO" + ).set_name("LLAMAOptimizedGradientBalancedPSO", register=True) +except Exception as e: # OptimizedGradientBalancedPSO print("OptimizedGradientBalancedPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch import OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch - - lama_register["OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch"] = OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch = NonObjectOptimizer(method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch").set_name("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch", register=True) -except Exception as e: +try: # OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch + from nevergrad.optimization.lama.OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch import ( + OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch, + ) + + lama_register["OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch"] = ( + OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch = NonObjectOptimizer( + method="LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch" + ).set_name("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch", register=True) +except Exception as e: # OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch print("OptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedGradientMemorySimulatedAnnealing import OptimizedGradientMemorySimulatedAnnealing +try: # OptimizedGradientMemorySimulatedAnnealing + from nevergrad.optimization.lama.OptimizedGradientMemorySimulatedAnnealing import ( + OptimizedGradientMemorySimulatedAnnealing, + ) lama_register["OptimizedGradientMemorySimulatedAnnealing"] = OptimizedGradientMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAOptimizedGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedGradientMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMAOptimizedGradientMemorySimulatedAnnealing").set_name("LLAMAOptimizedGradientMemorySimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedGradientMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedGradientMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMAOptimizedGradientMemorySimulatedAnnealing" + ).set_name("LLAMAOptimizedGradientMemorySimulatedAnnealing", register=True) +except Exception as e: # OptimizedGradientMemorySimulatedAnnealing print("OptimizedGradientMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedHybridAdaptiveDualPhaseStrategyV7 import OptimizedHybridAdaptiveDualPhaseStrategyV7 +try: # OptimizedHybridAdaptiveDualPhaseStrategyV7 + from nevergrad.optimization.lama.OptimizedHybridAdaptiveDualPhaseStrategyV7 import ( + OptimizedHybridAdaptiveDualPhaseStrategyV7, + ) lama_register["OptimizedHybridAdaptiveDualPhaseStrategyV7"] = OptimizedHybridAdaptiveDualPhaseStrategyV7 - res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7 = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7").set_name("LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7 = NonObjectOptimizer( + method="LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7" + ).set_name("LLAMAOptimizedHybridAdaptiveDualPhaseStrategyV7", register=True) +except Exception as e: # OptimizedHybridAdaptiveDualPhaseStrategyV7 print("OptimizedHybridAdaptiveDualPhaseStrategyV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedHybridAdaptiveMultiStageOptimization import OptimizedHybridAdaptiveMultiStageOptimization - - lama_register["OptimizedHybridAdaptiveMultiStageOptimization"] = OptimizedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMAOptimizedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: +try: # OptimizedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.OptimizedHybridAdaptiveMultiStageOptimization import ( + OptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["OptimizedHybridAdaptiveMultiStageOptimization"] = ( + OptimizedHybridAdaptiveMultiStageOptimization + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMAOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMAOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # OptimizedHybridAdaptiveMultiStageOptimization print("OptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedHybridExplorationOptimization import OptimizedHybridExplorationOptimization +try: # OptimizedHybridExplorationOptimization + from nevergrad.optimization.lama.OptimizedHybridExplorationOptimization import ( + OptimizedHybridExplorationOptimization, + ) lama_register["OptimizedHybridExplorationOptimization"] = OptimizedHybridExplorationOptimization - res = NonObjectOptimizer(method="LLAMAOptimizedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMAOptimizedHybridExplorationOptimization").set_name("LLAMAOptimizedHybridExplorationOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMAOptimizedHybridExplorationOptimization" + ).set_name("LLAMAOptimizedHybridExplorationOptimization", register=True) +except Exception as e: # OptimizedHybridExplorationOptimization print("OptimizedHybridExplorationOptimization can not be imported: ", e) -try: +try: # OptimizedHybridSearch from nevergrad.optimization.lama.OptimizedHybridSearch import OptimizedHybridSearch lama_register["OptimizedHybridSearch"] = OptimizedHybridSearch - res = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch").set_name("LLAMAOptimizedHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedHybridSearch").set_name( + "LLAMAOptimizedHybridSearch", register=True + ) +except Exception as e: # OptimizedHybridSearch print("OptimizedHybridSearch can not be imported: ", e) -try: +try: # OptimizedHybridStrategyDE from nevergrad.optimization.lama.OptimizedHybridStrategyDE import OptimizedHybridStrategyDE lama_register["OptimizedHybridStrategyDE"] = OptimizedHybridStrategyDE - res = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHybridStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE").set_name("LLAMAOptimizedHybridStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHybridStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedHybridStrategyDE").set_name( + "LLAMAOptimizedHybridStrategyDE", register=True + ) +except Exception as e: # OptimizedHybridStrategyDE print("OptimizedHybridStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedHyperStrategicOptimizerV53 import OptimizedHyperStrategicOptimizerV53 +try: # OptimizedHyperStrategicOptimizerV53 + from nevergrad.optimization.lama.OptimizedHyperStrategicOptimizerV53 import ( + OptimizedHyperStrategicOptimizerV53, + ) lama_register["OptimizedHyperStrategicOptimizerV53"] = OptimizedHyperStrategicOptimizerV53 - res = NonObjectOptimizer(method="LLAMAOptimizedHyperStrategicOptimizerV53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedHyperStrategicOptimizerV53 = NonObjectOptimizer(method="LLAMAOptimizedHyperStrategicOptimizerV53").set_name("LLAMAOptimizedHyperStrategicOptimizerV53", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedHyperStrategicOptimizerV53")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedHyperStrategicOptimizerV53 = NonObjectOptimizer( + method="LLAMAOptimizedHyperStrategicOptimizerV53" + ).set_name("LLAMAOptimizedHyperStrategicOptimizerV53", register=True) +except Exception as e: # OptimizedHyperStrategicOptimizerV53 print("OptimizedHyperStrategicOptimizerV53 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedIslandEvolutionStrategyV4 import OptimizedIslandEvolutionStrategyV4 +try: # OptimizedIslandEvolutionStrategyV4 + from nevergrad.optimization.lama.OptimizedIslandEvolutionStrategyV4 import ( + OptimizedIslandEvolutionStrategyV4, + ) lama_register["OptimizedIslandEvolutionStrategyV4"] = OptimizedIslandEvolutionStrategyV4 - res = NonObjectOptimizer(method="LLAMAOptimizedIslandEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedIslandEvolutionStrategyV4 = NonObjectOptimizer(method="LLAMAOptimizedIslandEvolutionStrategyV4").set_name("LLAMAOptimizedIslandEvolutionStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedIslandEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedIslandEvolutionStrategyV4 = NonObjectOptimizer( + method="LLAMAOptimizedIslandEvolutionStrategyV4" + ).set_name("LLAMAOptimizedIslandEvolutionStrategyV4", register=True) +except Exception as e: # OptimizedIslandEvolutionStrategyV4 print("OptimizedIslandEvolutionStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedMemoryEnhancedAdaptiveStrategyV70 import OptimizedMemoryEnhancedAdaptiveStrategyV70 +try: # OptimizedMemoryEnhancedAdaptiveStrategyV70 + from nevergrad.optimization.lama.OptimizedMemoryEnhancedAdaptiveStrategyV70 import ( + OptimizedMemoryEnhancedAdaptiveStrategyV70, + ) lama_register["OptimizedMemoryEnhancedAdaptiveStrategyV70"] = OptimizedMemoryEnhancedAdaptiveStrategyV70 - res = NonObjectOptimizer(method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70 = NonObjectOptimizer(method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70").set_name("LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70" + ).set_name("LLAMAOptimizedMemoryEnhancedAdaptiveStrategyV70", register=True) +except Exception as e: # OptimizedMemoryEnhancedAdaptiveStrategyV70 print("OptimizedMemoryEnhancedAdaptiveStrategyV70 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedMemoryGuidedAdaptiveStrategyV81 import OptimizedMemoryGuidedAdaptiveStrategyV81 +try: # OptimizedMemoryGuidedAdaptiveStrategyV81 + from nevergrad.optimization.lama.OptimizedMemoryGuidedAdaptiveStrategyV81 import ( + OptimizedMemoryGuidedAdaptiveStrategyV81, + ) lama_register["OptimizedMemoryGuidedAdaptiveStrategyV81"] = OptimizedMemoryGuidedAdaptiveStrategyV81 - res = NonObjectOptimizer(method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81 = NonObjectOptimizer(method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81").set_name("LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81" + ).set_name("LLAMAOptimizedMemoryGuidedAdaptiveStrategyV81", register=True) +except Exception as e: # OptimizedMemoryGuidedAdaptiveStrategyV81 print("OptimizedMemoryGuidedAdaptiveStrategyV81 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedMemoryResponsiveAdaptiveStrategyV78 import OptimizedMemoryResponsiveAdaptiveStrategyV78 - - lama_register["OptimizedMemoryResponsiveAdaptiveStrategyV78"] = OptimizedMemoryResponsiveAdaptiveStrategyV78 - res = NonObjectOptimizer(method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78 = NonObjectOptimizer(method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78").set_name("LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78", register=True) -except Exception as e: +try: # OptimizedMemoryResponsiveAdaptiveStrategyV78 + from nevergrad.optimization.lama.OptimizedMemoryResponsiveAdaptiveStrategyV78 import ( + OptimizedMemoryResponsiveAdaptiveStrategyV78, + ) + + lama_register["OptimizedMemoryResponsiveAdaptiveStrategyV78"] = ( + OptimizedMemoryResponsiveAdaptiveStrategyV78 + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78 = NonObjectOptimizer( + method="LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78" + ).set_name("LLAMAOptimizedMemoryResponsiveAdaptiveStrategyV78", register=True) +except Exception as e: # OptimizedMemoryResponsiveAdaptiveStrategyV78 print("OptimizedMemoryResponsiveAdaptiveStrategyV78 can not be imported: ", e) -try: +try: # OptimizedParallelStrategyDE from nevergrad.optimization.lama.OptimizedParallelStrategyDE import OptimizedParallelStrategyDE lama_register["OptimizedParallelStrategyDE"] = OptimizedParallelStrategyDE - res = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedParallelStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE").set_name("LLAMAOptimizedParallelStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedParallelStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedParallelStrategyDE").set_name( + "LLAMAOptimizedParallelStrategyDE", register=True + ) +except Exception as e: # OptimizedParallelStrategyDE print("OptimizedParallelStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedPrecisionAdaptiveStrategy import OptimizedPrecisionAdaptiveStrategy +try: # OptimizedPrecisionAdaptiveStrategy + from nevergrad.optimization.lama.OptimizedPrecisionAdaptiveStrategy import ( + OptimizedPrecisionAdaptiveStrategy, + ) lama_register["OptimizedPrecisionAdaptiveStrategy"] = OptimizedPrecisionAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedPrecisionAdaptiveStrategy = NonObjectOptimizer(method="LLAMAOptimizedPrecisionAdaptiveStrategy").set_name("LLAMAOptimizedPrecisionAdaptiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedPrecisionAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAOptimizedPrecisionAdaptiveStrategy" + ).set_name("LLAMAOptimizedPrecisionAdaptiveStrategy", register=True) +except Exception as e: # OptimizedPrecisionAdaptiveStrategy print("OptimizedPrecisionAdaptiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedPrecisionTunedCrossoverElitistStrategyV13 import OptimizedPrecisionTunedCrossoverElitistStrategyV13 - - lama_register["OptimizedPrecisionTunedCrossoverElitistStrategyV13"] = OptimizedPrecisionTunedCrossoverElitistStrategyV13 - res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13 = NonObjectOptimizer(method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13").set_name("LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13", register=True) -except Exception as e: +try: # OptimizedPrecisionTunedCrossoverElitistStrategyV13 + from nevergrad.optimization.lama.OptimizedPrecisionTunedCrossoverElitistStrategyV13 import ( + OptimizedPrecisionTunedCrossoverElitistStrategyV13, + ) + + lama_register["OptimizedPrecisionTunedCrossoverElitistStrategyV13"] = ( + OptimizedPrecisionTunedCrossoverElitistStrategyV13 + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13 = NonObjectOptimizer( + method="LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13" + ).set_name("LLAMAOptimizedPrecisionTunedCrossoverElitistStrategyV13", register=True) +except Exception as e: # OptimizedPrecisionTunedCrossoverElitistStrategyV13 print("OptimizedPrecisionTunedCrossoverElitistStrategyV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 import OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 - - lama_register["OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3"] = OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3", register=True) -except Exception as e: +try: # OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 + from nevergrad.optimization.lama.OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 import ( + OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3"] = ( + OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMAOptimizedQuantumCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: # OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 print("OptimizedQuantumCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedQuantumFluxDifferentialSwarm import OptimizedQuantumFluxDifferentialSwarm +try: # OptimizedQuantumFluxDifferentialSwarm + from nevergrad.optimization.lama.OptimizedQuantumFluxDifferentialSwarm import ( + OptimizedQuantumFluxDifferentialSwarm, + ) lama_register["OptimizedQuantumFluxDifferentialSwarm"] = OptimizedQuantumFluxDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMAOptimizedQuantumFluxDifferentialSwarm").set_name("LLAMAOptimizedQuantumFluxDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMAOptimizedQuantumFluxDifferentialSwarm" + ).set_name("LLAMAOptimizedQuantumFluxDifferentialSwarm", register=True) +except Exception as e: # OptimizedQuantumFluxDifferentialSwarm print("OptimizedQuantumFluxDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedQuantumGradientExplorationOptimization import OptimizedQuantumGradientExplorationOptimization - - lama_register["OptimizedQuantumGradientExplorationOptimization"] = OptimizedQuantumGradientExplorationOptimization - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMAOptimizedQuantumGradientExplorationOptimization").set_name("LLAMAOptimizedQuantumGradientExplorationOptimization", register=True) -except Exception as e: +try: # OptimizedQuantumGradientExplorationOptimization + from nevergrad.optimization.lama.OptimizedQuantumGradientExplorationOptimization import ( + OptimizedQuantumGradientExplorationOptimization, + ) + + lama_register["OptimizedQuantumGradientExplorationOptimization"] = ( + OptimizedQuantumGradientExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMAOptimizedQuantumGradientExplorationOptimization" + ).set_name("LLAMAOptimizedQuantumGradientExplorationOptimization", register=True) +except Exception as e: # OptimizedQuantumGradientExplorationOptimization print("OptimizedQuantumGradientExplorationOptimization can not be imported: ", e) -try: +try: # OptimizedQuantumHarmonySearch from nevergrad.optimization.lama.OptimizedQuantumHarmonySearch import OptimizedQuantumHarmonySearch lama_register["OptimizedQuantumHarmonySearch"] = OptimizedQuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAOptimizedQuantumHarmonySearch").set_name("LLAMAOptimizedQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumHarmonySearch = NonObjectOptimizer( + method="LLAMAOptimizedQuantumHarmonySearch" + ).set_name("LLAMAOptimizedQuantumHarmonySearch", register=True) +except Exception as e: # OptimizedQuantumHarmonySearch print("OptimizedQuantumHarmonySearch can not be imported: ", e) -try: +try: # OptimizedQuantumHybridDEPSO from nevergrad.optimization.lama.OptimizedQuantumHybridDEPSO import OptimizedQuantumHybridDEPSO lama_register["OptimizedQuantumHybridDEPSO"] = OptimizedQuantumHybridDEPSO - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumHybridDEPSO = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO").set_name("LLAMAOptimizedQuantumHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumHybridDEPSO = NonObjectOptimizer(method="LLAMAOptimizedQuantumHybridDEPSO").set_name( + "LLAMAOptimizedQuantumHybridDEPSO", register=True + ) +except Exception as e: # OptimizedQuantumHybridDEPSO print("OptimizedQuantumHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedQuantumLevyDifferentialSearch import OptimizedQuantumLevyDifferentialSearch +try: # OptimizedQuantumLevyDifferentialSearch + from nevergrad.optimization.lama.OptimizedQuantumLevyDifferentialSearch import ( + OptimizedQuantumLevyDifferentialSearch, + ) lama_register["OptimizedQuantumLevyDifferentialSearch"] = OptimizedQuantumLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMAOptimizedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedQuantumLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAOptimizedQuantumLevyDifferentialSearch").set_name("LLAMAOptimizedQuantumLevyDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedQuantumLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedQuantumLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAOptimizedQuantumLevyDifferentialSearch" + ).set_name("LLAMAOptimizedQuantumLevyDifferentialSearch", register=True) +except Exception as e: # OptimizedQuantumLevyDifferentialSearch print("OptimizedQuantumLevyDifferentialSearch can not be imported: ", e) -try: +try: # OptimizedRAMEDS from nevergrad.optimization.lama.OptimizedRAMEDS import OptimizedRAMEDS lama_register["OptimizedRAMEDS"] = OptimizedRAMEDS - res = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS").set_name("LLAMAOptimizedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAOptimizedRAMEDS").set_name( + "LLAMAOptimizedRAMEDS", register=True + ) +except Exception as e: # OptimizedRAMEDS print("OptimizedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO import OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO - - lama_register["OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) -except Exception as e: +try: # OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( + OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO, + ) + + lama_register["OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( + OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMAOptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: # OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO print("OptimizedRefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveHybridSearch import OptimizedRefinedAdaptiveHybridSearch +try: # OptimizedRefinedAdaptiveHybridSearch + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveHybridSearch import ( + OptimizedRefinedAdaptiveHybridSearch, + ) lama_register["OptimizedRefinedAdaptiveHybridSearch"] = OptimizedRefinedAdaptiveHybridSearch - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveHybridSearch").set_name("LLAMAOptimizedRefinedAdaptiveHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveHybridSearch" + ).set_name("LLAMAOptimizedRefinedAdaptiveHybridSearch", register=True) +except Exception as e: # OptimizedRefinedAdaptiveHybridSearch print("OptimizedRefinedAdaptiveHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveMultiStrategyDE import OptimizedRefinedAdaptiveMultiStrategyDE +try: # OptimizedRefinedAdaptiveMultiStrategyDE + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveMultiStrategyDE import ( + OptimizedRefinedAdaptiveMultiStrategyDE, + ) lama_register["OptimizedRefinedAdaptiveMultiStrategyDE"] = OptimizedRefinedAdaptiveMultiStrategyDE - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE").set_name("LLAMAOptimizedRefinedAdaptiveMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveMultiStrategyDE" + ).set_name("LLAMAOptimizedRefinedAdaptiveMultiStrategyDE", register=True) +except Exception as e: # OptimizedRefinedAdaptiveMultiStrategyDE print("OptimizedRefinedAdaptiveMultiStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedRefinedAdaptiveRefinementPSO import OptimizedRefinedAdaptiveRefinementPSO +try: # OptimizedRefinedAdaptiveRefinementPSO + from nevergrad.optimization.lama.OptimizedRefinedAdaptiveRefinementPSO import ( + OptimizedRefinedAdaptiveRefinementPSO, + ) lama_register["OptimizedRefinedAdaptiveRefinementPSO"] = OptimizedRefinedAdaptiveRefinementPSO - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveRefinementPSO").set_name("LLAMAOptimizedRefinedAdaptiveRefinementPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedAdaptiveRefinementPSO = NonObjectOptimizer( + method="LLAMAOptimizedRefinedAdaptiveRefinementPSO" + ).set_name("LLAMAOptimizedRefinedAdaptiveRefinementPSO", register=True) +except Exception as e: # OptimizedRefinedAdaptiveRefinementPSO print("OptimizedRefinedAdaptiveRefinementPSO can not be imported: ", e) -try: +try: # OptimizedRefinedEnhancedRAMEDSv5 from nevergrad.optimization.lama.OptimizedRefinedEnhancedRAMEDSv5 import OptimizedRefinedEnhancedRAMEDSv5 lama_register["OptimizedRefinedEnhancedRAMEDSv5"] = OptimizedRefinedEnhancedRAMEDSv5 - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedEnhancedRAMEDSv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedEnhancedRAMEDSv5 = NonObjectOptimizer(method="LLAMAOptimizedRefinedEnhancedRAMEDSv5").set_name("LLAMAOptimizedRefinedEnhancedRAMEDSv5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedEnhancedRAMEDSv5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedEnhancedRAMEDSv5 = NonObjectOptimizer( + method="LLAMAOptimizedRefinedEnhancedRAMEDSv5" + ).set_name("LLAMAOptimizedRefinedEnhancedRAMEDSv5", register=True) +except Exception as e: # OptimizedRefinedEnhancedRAMEDSv5 print("OptimizedRefinedEnhancedRAMEDSv5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedRefinedMemoryDualPhaseStrategyV65 import OptimizedRefinedMemoryDualPhaseStrategyV65 +try: # OptimizedRefinedMemoryDualPhaseStrategyV65 + from nevergrad.optimization.lama.OptimizedRefinedMemoryDualPhaseStrategyV65 import ( + OptimizedRefinedMemoryDualPhaseStrategyV65, + ) lama_register["OptimizedRefinedMemoryDualPhaseStrategyV65"] = OptimizedRefinedMemoryDualPhaseStrategyV65 - res = NonObjectOptimizer(method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65 = NonObjectOptimizer(method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65").set_name("LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65 = NonObjectOptimizer( + method="LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65" + ).set_name("LLAMAOptimizedRefinedMemoryDualPhaseStrategyV65", register=True) +except Exception as e: # OptimizedRefinedMemoryDualPhaseStrategyV65 print("OptimizedRefinedMemoryDualPhaseStrategyV65 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 import OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 - - lama_register["OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45"] = OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 - res = NonObjectOptimizer(method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 = NonObjectOptimizer(method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45").set_name("LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45", register=True) -except Exception as e: +try: # OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 + from nevergrad.optimization.lama.OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 import ( + OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45, + ) + + lama_register["OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45"] = ( + OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 + ) + # res = NonObjectOptimizer(method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 = NonObjectOptimizer( + method="LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45" + ).set_name("LLAMAOptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45", register=True) +except Exception as e: # OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 print("OptimizedUltraRefinedPrecisionEvolutionaryOptimizerV45 can not be imported: ", e) -try: - from nevergrad.optimization.lama.OscillatoryCrossoverDifferentialEvolution import OscillatoryCrossoverDifferentialEvolution +try: # OscillatoryCrossoverDifferentialEvolution + from nevergrad.optimization.lama.OscillatoryCrossoverDifferentialEvolution import ( + OscillatoryCrossoverDifferentialEvolution, + ) lama_register["OscillatoryCrossoverDifferentialEvolution"] = OscillatoryCrossoverDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer(method="LLAMAOscillatoryCrossoverDifferentialEvolution").set_name("LLAMAOscillatoryCrossoverDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAOscillatoryCrossoverDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAOscillatoryCrossoverDifferentialEvolution = NonObjectOptimizer( + method="LLAMAOscillatoryCrossoverDifferentialEvolution" + ).set_name("LLAMAOscillatoryCrossoverDifferentialEvolution", register=True) +except Exception as e: # OscillatoryCrossoverDifferentialEvolution print("OscillatoryCrossoverDifferentialEvolution can not be imported: ", e) -try: +try: # PADE from nevergrad.optimization.lama.PADE import PADE lama_register["PADE"] = PADE - res = NonObjectOptimizer(method="LLAMAPADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPADE = NonObjectOptimizer(method="LLAMAPADE").set_name("LLAMAPADE", register=True) -except Exception as e: +except Exception as e: # PADE print("PADE can not be imported: ", e) -try: +try: # PAMDMDESM from nevergrad.optimization.lama.PAMDMDESM import PAMDMDESM lama_register["PAMDMDESM"] = PAMDMDESM - res = NonObjectOptimizer(method="LLAMAPAMDMDESM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPAMDMDESM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPAMDMDESM = NonObjectOptimizer(method="LLAMAPAMDMDESM").set_name("LLAMAPAMDMDESM", register=True) -except Exception as e: +except Exception as e: # PAMDMDESM print("PAMDMDESM can not be imported: ", e) -try: +try: # PDEAF from nevergrad.optimization.lama.PDEAF import PDEAF lama_register["PDEAF"] = PDEAF - res = NonObjectOptimizer(method="LLAMAPDEAF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPDEAF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPDEAF = NonObjectOptimizer(method="LLAMAPDEAF").set_name("LLAMAPDEAF", register=True) -except Exception as e: +except Exception as e: # PDEAF print("PDEAF can not be imported: ", e) -try: +try: # PGDE from nevergrad.optimization.lama.PGDE import PGDE lama_register["PGDE"] = PGDE - res = NonObjectOptimizer(method="LLAMAPGDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPGDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPGDE = NonObjectOptimizer(method="LLAMAPGDE").set_name("LLAMAPGDE", register=True) -except Exception as e: +except Exception as e: # PGDE print("PGDE can not be imported: ", e) -try: +try: # PMFSA from nevergrad.optimization.lama.PMFSA import PMFSA lama_register["PMFSA"] = PMFSA - res = NonObjectOptimizer(method="LLAMAPMFSA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPMFSA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPMFSA = NonObjectOptimizer(method="LLAMAPMFSA").set_name("LLAMAPMFSA", register=True) -except Exception as e: +except Exception as e: # PMFSA print("PMFSA can not be imported: ", e) -try: +try: # PPDE from nevergrad.optimization.lama.PPDE import PPDE lama_register["PPDE"] = PPDE - res = NonObjectOptimizer(method="LLAMAPPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPPDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPPDE = NonObjectOptimizer(method="LLAMAPPDE").set_name("LLAMAPPDE", register=True) -except Exception as e: +except Exception as e: # PPDE print("PPDE can not be imported: ", e) -try: +try: # PWDE from nevergrad.optimization.lama.PWDE import PWDE lama_register["PWDE"] = PWDE - res = NonObjectOptimizer(method="LLAMAPWDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAPWDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAPWDE = NonObjectOptimizer(method="LLAMAPWDE").set_name("LLAMAPWDE", register=True) -except Exception as e: +except Exception as e: # PWDE print("PWDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimization import PrecisionAdaptiveCohortOptimization +try: # PrecisionAdaptiveCohortOptimization + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimization import ( + PrecisionAdaptiveCohortOptimization, + ) lama_register["PrecisionAdaptiveCohortOptimization"] = PrecisionAdaptiveCohortOptimization - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveCohortOptimization = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimization").set_name("LLAMAPrecisionAdaptiveCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveCohortOptimization = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveCohortOptimization" + ).set_name("LLAMAPrecisionAdaptiveCohortOptimization", register=True) +except Exception as e: # PrecisionAdaptiveCohortOptimization print("PrecisionAdaptiveCohortOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimizationV2 import PrecisionAdaptiveCohortOptimizationV2 +try: # PrecisionAdaptiveCohortOptimizationV2 + from nevergrad.optimization.lama.PrecisionAdaptiveCohortOptimizationV2 import ( + PrecisionAdaptiveCohortOptimizationV2, + ) lama_register["PrecisionAdaptiveCohortOptimizationV2"] = PrecisionAdaptiveCohortOptimizationV2 - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveCohortOptimizationV2 = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimizationV2").set_name("LLAMAPrecisionAdaptiveCohortOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveCohortOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveCohortOptimizationV2 = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveCohortOptimizationV2" + ).set_name("LLAMAPrecisionAdaptiveCohortOptimizationV2", register=True) +except Exception as e: # PrecisionAdaptiveCohortOptimizationV2 print("PrecisionAdaptiveCohortOptimizationV2 can not be imported: ", e) -try: +try: # PrecisionAdaptiveDecayOptimizer from nevergrad.optimization.lama.PrecisionAdaptiveDecayOptimizer import PrecisionAdaptiveDecayOptimizer lama_register["PrecisionAdaptiveDecayOptimizer"] = PrecisionAdaptiveDecayOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveDecayOptimizer = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDecayOptimizer").set_name("LLAMAPrecisionAdaptiveDecayOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDecayOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveDecayOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDecayOptimizer" + ).set_name("LLAMAPrecisionAdaptiveDecayOptimizer", register=True) +except Exception as e: # PrecisionAdaptiveDecayOptimizer print("PrecisionAdaptiveDecayOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveDifferentialEvolutionPlus import PrecisionAdaptiveDifferentialEvolutionPlus +try: # PrecisionAdaptiveDifferentialEvolutionPlus + from nevergrad.optimization.lama.PrecisionAdaptiveDifferentialEvolutionPlus import ( + PrecisionAdaptiveDifferentialEvolutionPlus, + ) lama_register["PrecisionAdaptiveDifferentialEvolutionPlus"] = PrecisionAdaptiveDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus").set_name("LLAMAPrecisionAdaptiveDifferentialEvolutionPlus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDifferentialEvolutionPlus" + ).set_name("LLAMAPrecisionAdaptiveDifferentialEvolutionPlus", register=True) +except Exception as e: # PrecisionAdaptiveDifferentialEvolutionPlus print("PrecisionAdaptiveDifferentialEvolutionPlus can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveDynamicStrategyV33 import PrecisionAdaptiveDynamicStrategyV33 +try: # PrecisionAdaptiveDynamicStrategyV33 + from nevergrad.optimization.lama.PrecisionAdaptiveDynamicStrategyV33 import ( + PrecisionAdaptiveDynamicStrategyV33, + ) lama_register["PrecisionAdaptiveDynamicStrategyV33"] = PrecisionAdaptiveDynamicStrategyV33 - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDynamicStrategyV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveDynamicStrategyV33 = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDynamicStrategyV33").set_name("LLAMAPrecisionAdaptiveDynamicStrategyV33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveDynamicStrategyV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveDynamicStrategyV33 = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveDynamicStrategyV33" + ).set_name("LLAMAPrecisionAdaptiveDynamicStrategyV33", register=True) +except Exception as e: # PrecisionAdaptiveDynamicStrategyV33 print("PrecisionAdaptiveDynamicStrategyV33 can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveGlobalClimbingEnhancer import PrecisionAdaptiveGlobalClimbingEnhancer +try: # PrecisionAdaptiveGlobalClimbingEnhancer + from nevergrad.optimization.lama.PrecisionAdaptiveGlobalClimbingEnhancer import ( + PrecisionAdaptiveGlobalClimbingEnhancer, + ) lama_register["PrecisionAdaptiveGlobalClimbingEnhancer"] = PrecisionAdaptiveGlobalClimbingEnhancer - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveGlobalClimbingEnhancer = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer").set_name("LLAMAPrecisionAdaptiveGlobalClimbingEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveGlobalClimbingEnhancer = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveGlobalClimbingEnhancer" + ).set_name("LLAMAPrecisionAdaptiveGlobalClimbingEnhancer", register=True) +except Exception as e: # PrecisionAdaptiveGlobalClimbingEnhancer print("PrecisionAdaptiveGlobalClimbingEnhancer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionAdaptiveGradientClusteringPSO import PrecisionAdaptiveGradientClusteringPSO +try: # PrecisionAdaptiveGradientClusteringPSO + from nevergrad.optimization.lama.PrecisionAdaptiveGradientClusteringPSO import ( + PrecisionAdaptiveGradientClusteringPSO, + ) lama_register["PrecisionAdaptiveGradientClusteringPSO"] = PrecisionAdaptiveGradientClusteringPSO - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGradientClusteringPSO").set_name("LLAMAPrecisionAdaptiveGradientClusteringPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptiveGradientClusteringPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptiveGradientClusteringPSO = NonObjectOptimizer( + method="LLAMAPrecisionAdaptiveGradientClusteringPSO" + ).set_name("LLAMAPrecisionAdaptiveGradientClusteringPSO", register=True) +except Exception as e: # PrecisionAdaptiveGradientClusteringPSO print("PrecisionAdaptiveGradientClusteringPSO can not be imported: ", e) -try: +try: # PrecisionAdaptivePSO from nevergrad.optimization.lama.PrecisionAdaptivePSO import PrecisionAdaptivePSO lama_register["PrecisionAdaptivePSO"] = PrecisionAdaptivePSO - res = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO").set_name("LLAMAPrecisionAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionAdaptivePSO").set_name( + "LLAMAPrecisionAdaptivePSO", register=True + ) +except Exception as e: # PrecisionAdaptivePSO print("PrecisionAdaptivePSO can not be imported: ", e) -try: +try: # PrecisionBalancedAdaptivePSO from nevergrad.optimization.lama.PrecisionBalancedAdaptivePSO import PrecisionBalancedAdaptivePSO lama_register["PrecisionBalancedAdaptivePSO"] = PrecisionBalancedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAPrecisionBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionBalancedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionBalancedAdaptivePSO").set_name("LLAMAPrecisionBalancedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionBalancedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionBalancedAdaptivePSO = NonObjectOptimizer( + method="LLAMAPrecisionBalancedAdaptivePSO" + ).set_name("LLAMAPrecisionBalancedAdaptivePSO", register=True) +except Exception as e: # PrecisionBalancedAdaptivePSO print("PrecisionBalancedAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionBalancedEvolutionStrategy import PrecisionBalancedEvolutionStrategy +try: # PrecisionBalancedEvolutionStrategy + from nevergrad.optimization.lama.PrecisionBalancedEvolutionStrategy import ( + PrecisionBalancedEvolutionStrategy, + ) lama_register["PrecisionBalancedEvolutionStrategy"] = PrecisionBalancedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAPrecisionBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionBalancedEvolutionStrategy").set_name("LLAMAPrecisionBalancedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionBalancedEvolutionStrategy" + ).set_name("LLAMAPrecisionBalancedEvolutionStrategy", register=True) +except Exception as e: # PrecisionBalancedEvolutionStrategy print("PrecisionBalancedEvolutionStrategy can not be imported: ", e) -try: +try: # PrecisionBalancedOptimizer from nevergrad.optimization.lama.PrecisionBalancedOptimizer import PrecisionBalancedOptimizer lama_register["PrecisionBalancedOptimizer"] = PrecisionBalancedOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionBalancedOptimizer = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer").set_name("LLAMAPrecisionBalancedOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionBalancedOptimizer = NonObjectOptimizer(method="LLAMAPrecisionBalancedOptimizer").set_name( + "LLAMAPrecisionBalancedOptimizer", register=True + ) +except Exception as e: # PrecisionBalancedOptimizer print("PrecisionBalancedOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionBoostedDifferentialEvolution import PrecisionBoostedDifferentialEvolution +try: # PrecisionBoostedDifferentialEvolution + from nevergrad.optimization.lama.PrecisionBoostedDifferentialEvolution import ( + PrecisionBoostedDifferentialEvolution, + ) lama_register["PrecisionBoostedDifferentialEvolution"] = PrecisionBoostedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionBoostedDifferentialEvolution = NonObjectOptimizer(method="LLAMAPrecisionBoostedDifferentialEvolution").set_name("LLAMAPrecisionBoostedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionBoostedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionBoostedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAPrecisionBoostedDifferentialEvolution" + ).set_name("LLAMAPrecisionBoostedDifferentialEvolution", register=True) +except Exception as e: # PrecisionBoostedDifferentialEvolution print("PrecisionBoostedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionCosineAdaptiveDifferentialSwarm import PrecisionCosineAdaptiveDifferentialSwarm +try: # PrecisionCosineAdaptiveDifferentialSwarm + from nevergrad.optimization.lama.PrecisionCosineAdaptiveDifferentialSwarm import ( + PrecisionCosineAdaptiveDifferentialSwarm, + ) lama_register["PrecisionCosineAdaptiveDifferentialSwarm"] = PrecisionCosineAdaptiveDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm").set_name("LLAMAPrecisionCosineAdaptiveDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAPrecisionCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMAPrecisionCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: # PrecisionCosineAdaptiveDifferentialSwarm print("PrecisionCosineAdaptiveDifferentialSwarm can not be imported: ", e) -try: +try: # PrecisionDifferentialEvolution from nevergrad.optimization.lama.PrecisionDifferentialEvolution import PrecisionDifferentialEvolution lama_register["PrecisionDifferentialEvolution"] = PrecisionDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAPrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMAPrecisionDifferentialEvolution").set_name("LLAMAPrecisionDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMAPrecisionDifferentialEvolution" + ).set_name("LLAMAPrecisionDifferentialEvolution", register=True) +except Exception as e: # PrecisionDifferentialEvolution print("PrecisionDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionDynamicAdaptiveOptimizerV6 import PrecisionDynamicAdaptiveOptimizerV6 +try: # PrecisionDynamicAdaptiveOptimizerV6 + from nevergrad.optimization.lama.PrecisionDynamicAdaptiveOptimizerV6 import ( + PrecisionDynamicAdaptiveOptimizerV6, + ) lama_register["PrecisionDynamicAdaptiveOptimizerV6"] = PrecisionDynamicAdaptiveOptimizerV6 - res = NonObjectOptimizer(method="LLAMAPrecisionDynamicAdaptiveOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionDynamicAdaptiveOptimizerV6 = NonObjectOptimizer(method="LLAMAPrecisionDynamicAdaptiveOptimizerV6").set_name("LLAMAPrecisionDynamicAdaptiveOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionDynamicAdaptiveOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionDynamicAdaptiveOptimizerV6 = NonObjectOptimizer( + method="LLAMAPrecisionDynamicAdaptiveOptimizerV6" + ).set_name("LLAMAPrecisionDynamicAdaptiveOptimizerV6", register=True) +except Exception as e: # PrecisionDynamicAdaptiveOptimizerV6 print("PrecisionDynamicAdaptiveOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEnhancedDualStrategyOptimizer import PrecisionEnhancedDualStrategyOptimizer +try: # PrecisionEnhancedDualStrategyOptimizer + from nevergrad.optimization.lama.PrecisionEnhancedDualStrategyOptimizer import ( + PrecisionEnhancedDualStrategyOptimizer, + ) lama_register["PrecisionEnhancedDualStrategyOptimizer"] = PrecisionEnhancedDualStrategyOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDualStrategyOptimizer").set_name("LLAMAPrecisionEnhancedDualStrategyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedDualStrategyOptimizer" + ).set_name("LLAMAPrecisionEnhancedDualStrategyOptimizer", register=True) +except Exception as e: # PrecisionEnhancedDualStrategyOptimizer print("PrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEnhancedDynamicOptimizerV13 import PrecisionEnhancedDynamicOptimizerV13 +try: # PrecisionEnhancedDynamicOptimizerV13 + from nevergrad.optimization.lama.PrecisionEnhancedDynamicOptimizerV13 import ( + PrecisionEnhancedDynamicOptimizerV13, + ) lama_register["PrecisionEnhancedDynamicOptimizerV13"] = PrecisionEnhancedDynamicOptimizerV13 - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDynamicOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedDynamicOptimizerV13 = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDynamicOptimizerV13").set_name("LLAMAPrecisionEnhancedDynamicOptimizerV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedDynamicOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedDynamicOptimizerV13 = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedDynamicOptimizerV13" + ).set_name("LLAMAPrecisionEnhancedDynamicOptimizerV13", register=True) +except Exception as e: # PrecisionEnhancedDynamicOptimizerV13 print("PrecisionEnhancedDynamicOptimizerV13 can not be imported: ", e) -try: +try: # PrecisionEnhancedSearch from nevergrad.optimization.lama.PrecisionEnhancedSearch import PrecisionEnhancedSearch lama_register["PrecisionEnhancedSearch"] = PrecisionEnhancedSearch - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedSearch = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch").set_name("LLAMAPrecisionEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedSearch = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSearch").set_name( + "LLAMAPrecisionEnhancedSearch", register=True + ) +except Exception as e: # PrecisionEnhancedSearch print("PrecisionEnhancedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEnhancedSpatialAdaptiveEvolver import PrecisionEnhancedSpatialAdaptiveEvolver +try: # PrecisionEnhancedSpatialAdaptiveEvolver + from nevergrad.optimization.lama.PrecisionEnhancedSpatialAdaptiveEvolver import ( + PrecisionEnhancedSpatialAdaptiveEvolver, + ) lama_register["PrecisionEnhancedSpatialAdaptiveEvolver"] = PrecisionEnhancedSpatialAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver").set_name("LLAMAPrecisionEnhancedSpatialAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMAPrecisionEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: # PrecisionEnhancedSpatialAdaptiveEvolver print("PrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEnhancedSpiralDifferentialClimberV4 import PrecisionEnhancedSpiralDifferentialClimberV4 - - lama_register["PrecisionEnhancedSpiralDifferentialClimberV4"] = PrecisionEnhancedSpiralDifferentialClimberV4 - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedSpiralDifferentialClimberV4 = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4").set_name("LLAMAPrecisionEnhancedSpiralDifferentialClimberV4", register=True) -except Exception as e: +try: # PrecisionEnhancedSpiralDifferentialClimberV4 + from nevergrad.optimization.lama.PrecisionEnhancedSpiralDifferentialClimberV4 import ( + PrecisionEnhancedSpiralDifferentialClimberV4, + ) + + lama_register["PrecisionEnhancedSpiralDifferentialClimberV4"] = ( + PrecisionEnhancedSpiralDifferentialClimberV4 + ) + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedSpiralDifferentialClimberV4 = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedSpiralDifferentialClimberV4" + ).set_name("LLAMAPrecisionEnhancedSpiralDifferentialClimberV4", register=True) +except Exception as e: # PrecisionEnhancedSpiralDifferentialClimberV4 print("PrecisionEnhancedSpiralDifferentialClimberV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEnhancedStrategicOptimizer import PrecisionEnhancedStrategicOptimizer +try: # PrecisionEnhancedStrategicOptimizer + from nevergrad.optimization.lama.PrecisionEnhancedStrategicOptimizer import ( + PrecisionEnhancedStrategicOptimizer, + ) lama_register["PrecisionEnhancedStrategicOptimizer"] = PrecisionEnhancedStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEnhancedStrategicOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEnhancedStrategicOptimizer").set_name("LLAMAPrecisionEnhancedStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEnhancedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEnhancedStrategicOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEnhancedStrategicOptimizer" + ).set_name("LLAMAPrecisionEnhancedStrategicOptimizer", register=True) +except Exception as e: # PrecisionEnhancedStrategicOptimizer print("PrecisionEnhancedStrategicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionEvolutionaryThermalOptimizer import PrecisionEvolutionaryThermalOptimizer +try: # PrecisionEvolutionaryThermalOptimizer + from nevergrad.optimization.lama.PrecisionEvolutionaryThermalOptimizer import ( + PrecisionEvolutionaryThermalOptimizer, + ) lama_register["PrecisionEvolutionaryThermalOptimizer"] = PrecisionEvolutionaryThermalOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMAPrecisionEvolutionaryThermalOptimizer").set_name("LLAMAPrecisionEvolutionaryThermalOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMAPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: # PrecisionEvolutionaryThermalOptimizer print("PrecisionEvolutionaryThermalOptimizer can not be imported: ", e) -try: +try: # PrecisionFocusedAdaptivePSO from nevergrad.optimization.lama.PrecisionFocusedAdaptivePSO import PrecisionFocusedAdaptivePSO lama_register["PrecisionFocusedAdaptivePSO"] = PrecisionFocusedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionFocusedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO").set_name("LLAMAPrecisionFocusedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionFocusedAdaptivePSO = NonObjectOptimizer(method="LLAMAPrecisionFocusedAdaptivePSO").set_name( + "LLAMAPrecisionFocusedAdaptivePSO", register=True + ) +except Exception as e: # PrecisionFocusedAdaptivePSO print("PrecisionFocusedAdaptivePSO can not be imported: ", e) -try: +try: # PrecisionGuidedEvolutionStrategy from nevergrad.optimization.lama.PrecisionGuidedEvolutionStrategy import PrecisionGuidedEvolutionStrategy lama_register["PrecisionGuidedEvolutionStrategy"] = PrecisionGuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionStrategy").set_name("LLAMAPrecisionGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionGuidedEvolutionStrategy" + ).set_name("LLAMAPrecisionGuidedEvolutionStrategy", register=True) +except Exception as e: # PrecisionGuidedEvolutionStrategy print("PrecisionGuidedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionGuidedEvolutionaryAlgorithm import PrecisionGuidedEvolutionaryAlgorithm +try: # PrecisionGuidedEvolutionaryAlgorithm + from nevergrad.optimization.lama.PrecisionGuidedEvolutionaryAlgorithm import ( + PrecisionGuidedEvolutionaryAlgorithm, + ) lama_register["PrecisionGuidedEvolutionaryAlgorithm"] = PrecisionGuidedEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionGuidedEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionaryAlgorithm").set_name("LLAMAPrecisionGuidedEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionGuidedEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionGuidedEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAPrecisionGuidedEvolutionaryAlgorithm" + ).set_name("LLAMAPrecisionGuidedEvolutionaryAlgorithm", register=True) +except Exception as e: # PrecisionGuidedEvolutionaryAlgorithm print("PrecisionGuidedEvolutionaryAlgorithm can not be imported: ", e) -try: +try: # PrecisionGuidedQuantumStrategy from nevergrad.optimization.lama.PrecisionGuidedQuantumStrategy import PrecisionGuidedQuantumStrategy lama_register["PrecisionGuidedQuantumStrategy"] = PrecisionGuidedQuantumStrategy - res = NonObjectOptimizer(method="LLAMAPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionGuidedQuantumStrategy = NonObjectOptimizer(method="LLAMAPrecisionGuidedQuantumStrategy").set_name("LLAMAPrecisionGuidedQuantumStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionGuidedQuantumStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionGuidedQuantumStrategy = NonObjectOptimizer( + method="LLAMAPrecisionGuidedQuantumStrategy" + ).set_name("LLAMAPrecisionGuidedQuantumStrategy", register=True) +except Exception as e: # PrecisionGuidedQuantumStrategy print("PrecisionGuidedQuantumStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionIncrementalEvolutionStrategy import PrecisionIncrementalEvolutionStrategy +try: # PrecisionIncrementalEvolutionStrategy + from nevergrad.optimization.lama.PrecisionIncrementalEvolutionStrategy import ( + PrecisionIncrementalEvolutionStrategy, + ) lama_register["PrecisionIncrementalEvolutionStrategy"] = PrecisionIncrementalEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAPrecisionIncrementalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionIncrementalEvolutionStrategy = NonObjectOptimizer(method="LLAMAPrecisionIncrementalEvolutionStrategy").set_name("LLAMAPrecisionIncrementalEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionIncrementalEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionIncrementalEvolutionStrategy = NonObjectOptimizer( + method="LLAMAPrecisionIncrementalEvolutionStrategy" + ).set_name("LLAMAPrecisionIncrementalEvolutionStrategy", register=True) +except Exception as e: # PrecisionIncrementalEvolutionStrategy print("PrecisionIncrementalEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionOptimizedEvolutionaryOptimizerV22 import PrecisionOptimizedEvolutionaryOptimizerV22 +try: # PrecisionOptimizedEvolutionaryOptimizerV22 + from nevergrad.optimization.lama.PrecisionOptimizedEvolutionaryOptimizerV22 import ( + PrecisionOptimizedEvolutionaryOptimizerV22, + ) lama_register["PrecisionOptimizedEvolutionaryOptimizerV22"] = PrecisionOptimizedEvolutionaryOptimizerV22 - res = NonObjectOptimizer(method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionOptimizedEvolutionaryOptimizerV22 = NonObjectOptimizer(method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22").set_name("LLAMAPrecisionOptimizedEvolutionaryOptimizerV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionOptimizedEvolutionaryOptimizerV22 = NonObjectOptimizer( + method="LLAMAPrecisionOptimizedEvolutionaryOptimizerV22" + ).set_name("LLAMAPrecisionOptimizedEvolutionaryOptimizerV22", register=True) +except Exception as e: # PrecisionOptimizedEvolutionaryOptimizerV22 print("PrecisionOptimizedEvolutionaryOptimizerV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionRotationalClimbOptimizer import PrecisionRotationalClimbOptimizer +try: # PrecisionRotationalClimbOptimizer + from nevergrad.optimization.lama.PrecisionRotationalClimbOptimizer import ( + PrecisionRotationalClimbOptimizer, + ) lama_register["PrecisionRotationalClimbOptimizer"] = PrecisionRotationalClimbOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionRotationalClimbOptimizer = NonObjectOptimizer(method="LLAMAPrecisionRotationalClimbOptimizer").set_name("LLAMAPrecisionRotationalClimbOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionRotationalClimbOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionRotationalClimbOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionRotationalClimbOptimizer" + ).set_name("LLAMAPrecisionRotationalClimbOptimizer", register=True) +except Exception as e: # PrecisionRotationalClimbOptimizer print("PrecisionRotationalClimbOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionScaledEvolutionarySearch import PrecisionScaledEvolutionarySearch +try: # PrecisionScaledEvolutionarySearch + from nevergrad.optimization.lama.PrecisionScaledEvolutionarySearch import ( + PrecisionScaledEvolutionarySearch, + ) lama_register["PrecisionScaledEvolutionarySearch"] = PrecisionScaledEvolutionarySearch - res = NonObjectOptimizer(method="LLAMAPrecisionScaledEvolutionarySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionScaledEvolutionarySearch = NonObjectOptimizer(method="LLAMAPrecisionScaledEvolutionarySearch").set_name("LLAMAPrecisionScaledEvolutionarySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionScaledEvolutionarySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionScaledEvolutionarySearch = NonObjectOptimizer( + method="LLAMAPrecisionScaledEvolutionarySearch" + ).set_name("LLAMAPrecisionScaledEvolutionarySearch", register=True) +except Exception as e: # PrecisionScaledEvolutionarySearch print("PrecisionScaledEvolutionarySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionSpiralDifferentialOptimizerV6 import PrecisionSpiralDifferentialOptimizerV6 +try: # PrecisionSpiralDifferentialOptimizerV6 + from nevergrad.optimization.lama.PrecisionSpiralDifferentialOptimizerV6 import ( + PrecisionSpiralDifferentialOptimizerV6, + ) lama_register["PrecisionSpiralDifferentialOptimizerV6"] = PrecisionSpiralDifferentialOptimizerV6 - res = NonObjectOptimizer(method="LLAMAPrecisionSpiralDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionSpiralDifferentialOptimizerV6 = NonObjectOptimizer(method="LLAMAPrecisionSpiralDifferentialOptimizerV6").set_name("LLAMAPrecisionSpiralDifferentialOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionSpiralDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionSpiralDifferentialOptimizerV6 = NonObjectOptimizer( + method="LLAMAPrecisionSpiralDifferentialOptimizerV6" + ).set_name("LLAMAPrecisionSpiralDifferentialOptimizerV6", register=True) +except Exception as e: # PrecisionSpiralDifferentialOptimizerV6 print("PrecisionSpiralDifferentialOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionTunedCrossoverElitistStrategyV11 import PrecisionTunedCrossoverElitistStrategyV11 +try: # PrecisionTunedCrossoverElitistStrategyV11 + from nevergrad.optimization.lama.PrecisionTunedCrossoverElitistStrategyV11 import ( + PrecisionTunedCrossoverElitistStrategyV11, + ) lama_register["PrecisionTunedCrossoverElitistStrategyV11"] = PrecisionTunedCrossoverElitistStrategyV11 - res = NonObjectOptimizer(method="LLAMAPrecisionTunedCrossoverElitistStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionTunedCrossoverElitistStrategyV11 = NonObjectOptimizer(method="LLAMAPrecisionTunedCrossoverElitistStrategyV11").set_name("LLAMAPrecisionTunedCrossoverElitistStrategyV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionTunedCrossoverElitistStrategyV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionTunedCrossoverElitistStrategyV11 = NonObjectOptimizer( + method="LLAMAPrecisionTunedCrossoverElitistStrategyV11" + ).set_name("LLAMAPrecisionTunedCrossoverElitistStrategyV11", register=True) +except Exception as e: # PrecisionTunedCrossoverElitistStrategyV11 print("PrecisionTunedCrossoverElitistStrategyV11 can not be imported: ", e) -try: +try: # PrecisionTunedEvolver from nevergrad.optimization.lama.PrecisionTunedEvolver import PrecisionTunedEvolver lama_register["PrecisionTunedEvolver"] = PrecisionTunedEvolver - res = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionTunedEvolver = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver").set_name("LLAMAPrecisionTunedEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionTunedEvolver = NonObjectOptimizer(method="LLAMAPrecisionTunedEvolver").set_name( + "LLAMAPrecisionTunedEvolver", register=True + ) +except Exception as e: # PrecisionTunedEvolver print("PrecisionTunedEvolver can not be imported: ", e) -try: +try: # PrecisionTunedHybridSearch from nevergrad.optimization.lama.PrecisionTunedHybridSearch import PrecisionTunedHybridSearch lama_register["PrecisionTunedHybridSearch"] = PrecisionTunedHybridSearch - res = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionTunedHybridSearch = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch").set_name("LLAMAPrecisionTunedHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionTunedHybridSearch = NonObjectOptimizer(method="LLAMAPrecisionTunedHybridSearch").set_name( + "LLAMAPrecisionTunedHybridSearch", register=True + ) +except Exception as e: # PrecisionTunedHybridSearch print("PrecisionTunedHybridSearch can not be imported: ", e) -try: +try: # PrecisionTunedPSO from nevergrad.optimization.lama.PrecisionTunedPSO import PrecisionTunedPSO lama_register["PrecisionTunedPSO"] = PrecisionTunedPSO - res = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionTunedPSO = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO").set_name("LLAMAPrecisionTunedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionTunedPSO = NonObjectOptimizer(method="LLAMAPrecisionTunedPSO").set_name( + "LLAMAPrecisionTunedPSO", register=True + ) +except Exception as e: # PrecisionTunedPSO print("PrecisionTunedPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.PrecisionTunedQuantumHarmonicFeedbackOptimizer import PrecisionTunedQuantumHarmonicFeedbackOptimizer - - lama_register["PrecisionTunedQuantumHarmonicFeedbackOptimizer"] = PrecisionTunedQuantumHarmonicFeedbackOptimizer - res = NonObjectOptimizer(method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer").set_name("LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer", register=True) -except Exception as e: +try: # PrecisionTunedQuantumHarmonicFeedbackOptimizer + from nevergrad.optimization.lama.PrecisionTunedQuantumHarmonicFeedbackOptimizer import ( + PrecisionTunedQuantumHarmonicFeedbackOptimizer, + ) + + lama_register["PrecisionTunedQuantumHarmonicFeedbackOptimizer"] = ( + PrecisionTunedQuantumHarmonicFeedbackOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAPrecisionTunedQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: # PrecisionTunedQuantumHarmonicFeedbackOptimizer print("PrecisionTunedQuantumHarmonicFeedbackOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveAdaptiveDifferentialEvolution import ProgressiveAdaptiveDifferentialEvolution +try: # ProgressiveAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ProgressiveAdaptiveDifferentialEvolution import ( + ProgressiveAdaptiveDifferentialEvolution, + ) lama_register["ProgressiveAdaptiveDifferentialEvolution"] = ProgressiveAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveDifferentialEvolution").set_name("LLAMAProgressiveAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAProgressiveAdaptiveDifferentialEvolution" + ).set_name("LLAMAProgressiveAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ProgressiveAdaptiveDifferentialEvolution print("ProgressiveAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveAdaptiveGlobalLocalSearch import ProgressiveAdaptiveGlobalLocalSearch +try: # ProgressiveAdaptiveGlobalLocalSearch + from nevergrad.optimization.lama.ProgressiveAdaptiveGlobalLocalSearch import ( + ProgressiveAdaptiveGlobalLocalSearch, + ) lama_register["ProgressiveAdaptiveGlobalLocalSearch"] = ProgressiveAdaptiveGlobalLocalSearch - res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveAdaptiveGlobalLocalSearch = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveGlobalLocalSearch").set_name("LLAMAProgressiveAdaptiveGlobalLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveAdaptiveGlobalLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveAdaptiveGlobalLocalSearch = NonObjectOptimizer( + method="LLAMAProgressiveAdaptiveGlobalLocalSearch" + ).set_name("LLAMAProgressiveAdaptiveGlobalLocalSearch", register=True) +except Exception as e: # ProgressiveAdaptiveGlobalLocalSearch print("ProgressiveAdaptiveGlobalLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveCohortDiversityOptimization import ProgressiveCohortDiversityOptimization +try: # ProgressiveCohortDiversityOptimization + from nevergrad.optimization.lama.ProgressiveCohortDiversityOptimization import ( + ProgressiveCohortDiversityOptimization, + ) lama_register["ProgressiveCohortDiversityOptimization"] = ProgressiveCohortDiversityOptimization - res = NonObjectOptimizer(method="LLAMAProgressiveCohortDiversityOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveCohortDiversityOptimization = NonObjectOptimizer(method="LLAMAProgressiveCohortDiversityOptimization").set_name("LLAMAProgressiveCohortDiversityOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveCohortDiversityOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveCohortDiversityOptimization = NonObjectOptimizer( + method="LLAMAProgressiveCohortDiversityOptimization" + ).set_name("LLAMAProgressiveCohortDiversityOptimization", register=True) +except Exception as e: # ProgressiveCohortDiversityOptimization print("ProgressiveCohortDiversityOptimization can not be imported: ", e) -try: +try: # ProgressiveDimensionalOptimizer from nevergrad.optimization.lama.ProgressiveDimensionalOptimizer import ProgressiveDimensionalOptimizer lama_register["ProgressiveDimensionalOptimizer"] = ProgressiveDimensionalOptimizer - res = NonObjectOptimizer(method="LLAMAProgressiveDimensionalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveDimensionalOptimizer = NonObjectOptimizer(method="LLAMAProgressiveDimensionalOptimizer").set_name("LLAMAProgressiveDimensionalOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveDimensionalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveDimensionalOptimizer = NonObjectOptimizer( + method="LLAMAProgressiveDimensionalOptimizer" + ).set_name("LLAMAProgressiveDimensionalOptimizer", register=True) +except Exception as e: # ProgressiveDimensionalOptimizer print("ProgressiveDimensionalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveEvolutionaryFireworkAlgorithm import ProgressiveEvolutionaryFireworkAlgorithm +try: # ProgressiveEvolutionaryFireworkAlgorithm + from nevergrad.optimization.lama.ProgressiveEvolutionaryFireworkAlgorithm import ( + ProgressiveEvolutionaryFireworkAlgorithm, + ) lama_register["ProgressiveEvolutionaryFireworkAlgorithm"] = ProgressiveEvolutionaryFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAProgressiveEvolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveEvolutionaryFireworkAlgorithm = NonObjectOptimizer(method="LLAMAProgressiveEvolutionaryFireworkAlgorithm").set_name("LLAMAProgressiveEvolutionaryFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveEvolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveEvolutionaryFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAProgressiveEvolutionaryFireworkAlgorithm" + ).set_name("LLAMAProgressiveEvolutionaryFireworkAlgorithm", register=True) +except Exception as e: # ProgressiveEvolutionaryFireworkAlgorithm print("ProgressiveEvolutionaryFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveHybridAdaptiveDifferentialEvolution import ProgressiveHybridAdaptiveDifferentialEvolution - - lama_register["ProgressiveHybridAdaptiveDifferentialEvolution"] = ProgressiveHybridAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution").set_name("LLAMAProgressiveHybridAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # ProgressiveHybridAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.ProgressiveHybridAdaptiveDifferentialEvolution import ( + ProgressiveHybridAdaptiveDifferentialEvolution, + ) + + lama_register["ProgressiveHybridAdaptiveDifferentialEvolution"] = ( + ProgressiveHybridAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAProgressiveHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMAProgressiveHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: # ProgressiveHybridAdaptiveDifferentialEvolution print("ProgressiveHybridAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveParticleSwarmOptimization import ProgressiveParticleSwarmOptimization +try: # ProgressiveParticleSwarmOptimization + from nevergrad.optimization.lama.ProgressiveParticleSwarmOptimization import ( + ProgressiveParticleSwarmOptimization, + ) lama_register["ProgressiveParticleSwarmOptimization"] = ProgressiveParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAProgressiveParticleSwarmOptimization").set_name("LLAMAProgressiveParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAProgressiveParticleSwarmOptimization" + ).set_name("LLAMAProgressiveParticleSwarmOptimization", register=True) +except Exception as e: # ProgressiveParticleSwarmOptimization print("ProgressiveParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressivePopulationRefinementStrategy import ProgressivePopulationRefinementStrategy +try: # ProgressivePopulationRefinementStrategy + from nevergrad.optimization.lama.ProgressivePopulationRefinementStrategy import ( + ProgressivePopulationRefinementStrategy, + ) lama_register["ProgressivePopulationRefinementStrategy"] = ProgressivePopulationRefinementStrategy - res = NonObjectOptimizer(method="LLAMAProgressivePopulationRefinementStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressivePopulationRefinementStrategy = NonObjectOptimizer(method="LLAMAProgressivePopulationRefinementStrategy").set_name("LLAMAProgressivePopulationRefinementStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressivePopulationRefinementStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressivePopulationRefinementStrategy = NonObjectOptimizer( + method="LLAMAProgressivePopulationRefinementStrategy" + ).set_name("LLAMAProgressivePopulationRefinementStrategy", register=True) +except Exception as e: # ProgressivePopulationRefinementStrategy print("ProgressivePopulationRefinementStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.ProgressiveQuorumEvolutionStrategy import ProgressiveQuorumEvolutionStrategy +try: # ProgressiveQuorumEvolutionStrategy + from nevergrad.optimization.lama.ProgressiveQuorumEvolutionStrategy import ( + ProgressiveQuorumEvolutionStrategy, + ) lama_register["ProgressiveQuorumEvolutionStrategy"] = ProgressiveQuorumEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveQuorumEvolutionStrategy = NonObjectOptimizer(method="LLAMAProgressiveQuorumEvolutionStrategy").set_name("LLAMAProgressiveQuorumEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( + method="LLAMAProgressiveQuorumEvolutionStrategy" + ).set_name("LLAMAProgressiveQuorumEvolutionStrategy", register=True) +except Exception as e: # ProgressiveQuorumEvolutionStrategy print("ProgressiveQuorumEvolutionStrategy can not be imported: ", e) -try: +try: # ProgressiveRefinementSearch from nevergrad.optimization.lama.ProgressiveRefinementSearch import ProgressiveRefinementSearch lama_register["ProgressiveRefinementSearch"] = ProgressiveRefinementSearch - res = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAProgressiveRefinementSearch = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch").set_name("LLAMAProgressiveRefinementSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAProgressiveRefinementSearch = NonObjectOptimizer(method="LLAMAProgressiveRefinementSearch").set_name( + "LLAMAProgressiveRefinementSearch", register=True + ) +except Exception as e: # ProgressiveRefinementSearch print("ProgressiveRefinementSearch can not be imported: ", e) -try: +try: # QAPSO from nevergrad.optimization.lama.QAPSO import QAPSO lama_register["QAPSO"] = QAPSO - res = NonObjectOptimizer(method="LLAMAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQAPSO = NonObjectOptimizer(method="LLAMAQAPSO").set_name("LLAMAQAPSO", register=True) -except Exception as e: +except Exception as e: # QAPSO print("QAPSO can not be imported: ", e) -try: +try: # QAPSOAIR from nevergrad.optimization.lama.QAPSOAIR import QAPSOAIR lama_register["QAPSOAIR"] = QAPSOAIR - res = NonObjectOptimizer(method="LLAMAQAPSOAIR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQAPSOAIR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQAPSOAIR = NonObjectOptimizer(method="LLAMAQAPSOAIR").set_name("LLAMAQAPSOAIR", register=True) -except Exception as e: +except Exception as e: # QAPSOAIR print("QAPSOAIR can not be imported: ", e) -try: +try: # QAPSOAIRVC from nevergrad.optimization.lama.QAPSOAIRVC import QAPSOAIRVC lama_register["QAPSOAIRVC"] = QAPSOAIRVC - res = NonObjectOptimizer(method="LLAMAQAPSOAIRVC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQAPSOAIRVC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQAPSOAIRVC = NonObjectOptimizer(method="LLAMAQAPSOAIRVC").set_name("LLAMAQAPSOAIRVC", register=True) -except Exception as e: +except Exception as e: # QAPSOAIRVC print("QAPSOAIRVC can not be imported: ", e) -try: +try: # QAPSOAIRVCHR from nevergrad.optimization.lama.QAPSOAIRVCHR import QAPSOAIRVCHR lama_register["QAPSOAIRVCHR"] = QAPSOAIRVCHR - res = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR").set_name("LLAMAQAPSOAIRVCHR", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQAPSOAIRVCHR = NonObjectOptimizer(method="LLAMAQAPSOAIRVCHR").set_name( + "LLAMAQAPSOAIRVCHR", register=True + ) +except Exception as e: # QAPSOAIRVCHR print("QAPSOAIRVCHR can not be imported: ", e) -try: +try: # QAPSOAIW from nevergrad.optimization.lama.QAPSOAIW import QAPSOAIW lama_register["QAPSOAIW"] = QAPSOAIW - res = NonObjectOptimizer(method="LLAMAQAPSOAIW")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQAPSOAIW")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQAPSOAIW = NonObjectOptimizer(method="LLAMAQAPSOAIW").set_name("LLAMAQAPSOAIW", register=True) -except Exception as e: +except Exception as e: # QAPSOAIW print("QAPSOAIW can not be imported: ", e) -try: +try: # QAPSOAIWRR from nevergrad.optimization.lama.QAPSOAIWRR import QAPSOAIWRR lama_register["QAPSOAIWRR"] = QAPSOAIWRR - res = NonObjectOptimizer(method="LLAMAQAPSOAIWRR")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQAPSOAIWRR")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQAPSOAIWRR = NonObjectOptimizer(method="LLAMAQAPSOAIWRR").set_name("LLAMAQAPSOAIWRR", register=True) -except Exception as e: +except Exception as e: # QAPSOAIWRR print("QAPSOAIWRR can not be imported: ", e) -try: +try: # QPSO from nevergrad.optimization.lama.QPSO import QPSO lama_register["QPSO"] = QPSO - res = NonObjectOptimizer(method="LLAMAQPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAQPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAQPSO = NonObjectOptimizer(method="LLAMAQPSO").set_name("LLAMAQPSO", register=True) -except Exception as e: +except Exception as e: # QPSO print("QPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAcceleratedEvolutionStrategy import QuantumAcceleratedEvolutionStrategy +try: # QuantumAcceleratedEvolutionStrategy + from nevergrad.optimization.lama.QuantumAcceleratedEvolutionStrategy import ( + QuantumAcceleratedEvolutionStrategy, + ) lama_register["QuantumAcceleratedEvolutionStrategy"] = QuantumAcceleratedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAcceleratedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumAcceleratedEvolutionStrategy").set_name("LLAMAQuantumAcceleratedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAcceleratedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedEvolutionStrategy" + ).set_name("LLAMAQuantumAcceleratedEvolutionStrategy", register=True) +except Exception as e: # QuantumAcceleratedEvolutionStrategy print("QuantumAcceleratedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAcceleratedNesterovOptimizer import QuantumAcceleratedNesterovOptimizer +try: # QuantumAcceleratedNesterovOptimizer + from nevergrad.optimization.lama.QuantumAcceleratedNesterovOptimizer import ( + QuantumAcceleratedNesterovOptimizer, + ) lama_register["QuantumAcceleratedNesterovOptimizer"] = QuantumAcceleratedNesterovOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAcceleratedNesterovOptimizer = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovOptimizer").set_name("LLAMAQuantumAcceleratedNesterovOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAcceleratedNesterovOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedNesterovOptimizer" + ).set_name("LLAMAQuantumAcceleratedNesterovOptimizer", register=True) +except Exception as e: # QuantumAcceleratedNesterovOptimizer print("QuantumAcceleratedNesterovOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAcceleratedNesterovPlusOptimizer import QuantumAcceleratedNesterovPlusOptimizer +try: # QuantumAcceleratedNesterovPlusOptimizer + from nevergrad.optimization.lama.QuantumAcceleratedNesterovPlusOptimizer import ( + QuantumAcceleratedNesterovPlusOptimizer, + ) lama_register["QuantumAcceleratedNesterovPlusOptimizer"] = QuantumAcceleratedNesterovPlusOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovPlusOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAcceleratedNesterovPlusOptimizer = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovPlusOptimizer").set_name("LLAMAQuantumAcceleratedNesterovPlusOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAcceleratedNesterovPlusOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAcceleratedNesterovPlusOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAcceleratedNesterovPlusOptimizer" + ).set_name("LLAMAQuantumAcceleratedNesterovPlusOptimizer", register=True) +except Exception as e: # QuantumAcceleratedNesterovPlusOptimizer print("QuantumAcceleratedNesterovPlusOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV5 import QuantumAdaptiveCognitionOptimizerV5 +try: # QuantumAdaptiveCognitionOptimizerV5 + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV5 import ( + QuantumAdaptiveCognitionOptimizerV5, + ) lama_register["QuantumAdaptiveCognitionOptimizerV5"] = QuantumAdaptiveCognitionOptimizerV5 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveCognitionOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV5").set_name("LLAMAQuantumAdaptiveCognitionOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveCognitionOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCognitionOptimizerV5" + ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV5", register=True) +except Exception as e: # QuantumAdaptiveCognitionOptimizerV5 print("QuantumAdaptiveCognitionOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV6 import QuantumAdaptiveCognitionOptimizerV6 +try: # QuantumAdaptiveCognitionOptimizerV6 + from nevergrad.optimization.lama.QuantumAdaptiveCognitionOptimizerV6 import ( + QuantumAdaptiveCognitionOptimizerV6, + ) lama_register["QuantumAdaptiveCognitionOptimizerV6"] = QuantumAdaptiveCognitionOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveCognitionOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV6").set_name("LLAMAQuantumAdaptiveCognitionOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCognitionOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveCognitionOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCognitionOptimizerV6" + ).set_name("LLAMAQuantumAdaptiveCognitionOptimizerV6", register=True) +except Exception as e: # QuantumAdaptiveCognitionOptimizerV6 print("QuantumAdaptiveCognitionOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveConvergenceOptimizer import QuantumAdaptiveConvergenceOptimizer +try: # QuantumAdaptiveConvergenceOptimizer + from nevergrad.optimization.lama.QuantumAdaptiveConvergenceOptimizer import ( + QuantumAdaptiveConvergenceOptimizer, + ) lama_register["QuantumAdaptiveConvergenceOptimizer"] = QuantumAdaptiveConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveConvergenceOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveConvergenceOptimizer").set_name("LLAMAQuantumAdaptiveConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveConvergenceOptimizer" + ).set_name("LLAMAQuantumAdaptiveConvergenceOptimizer", register=True) +except Exception as e: # QuantumAdaptiveConvergenceOptimizer print("QuantumAdaptiveConvergenceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveCrossoverRefinement import QuantumAdaptiveCrossoverRefinement +try: # QuantumAdaptiveCrossoverRefinement + from nevergrad.optimization.lama.QuantumAdaptiveCrossoverRefinement import ( + QuantumAdaptiveCrossoverRefinement, + ) lama_register["QuantumAdaptiveCrossoverRefinement"] = QuantumAdaptiveCrossoverRefinement - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCrossoverRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveCrossoverRefinement = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCrossoverRefinement").set_name("LLAMAQuantumAdaptiveCrossoverRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveCrossoverRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveCrossoverRefinement = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveCrossoverRefinement" + ).set_name("LLAMAQuantumAdaptiveCrossoverRefinement", register=True) +except Exception as e: # QuantumAdaptiveCrossoverRefinement print("QuantumAdaptiveCrossoverRefinement can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - - lama_register["QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory").set_name("LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) -except Exception as e: +try: # QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + from nevergrad.optimization.lama.QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory import ( + QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory, + ) + + lama_register["QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory"] = ( + QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory" + ).set_name("LLAMAQuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory", register=True) +except Exception as e: # QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory print("QuantumAdaptiveDEWithElitistDynamicRestartAndDifferentialMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolution import QuantumAdaptiveDifferentialEvolution +try: # QuantumAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolution import ( + QuantumAdaptiveDifferentialEvolution, + ) lama_register["QuantumAdaptiveDifferentialEvolution"] = QuantumAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolution").set_name("LLAMAQuantumAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolution" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolution", register=True) +except Exception as e: # QuantumAdaptiveDifferentialEvolution print("QuantumAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV3 import QuantumAdaptiveDifferentialEvolutionV3 +try: # QuantumAdaptiveDifferentialEvolutionV3 + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV3 import ( + QuantumAdaptiveDifferentialEvolutionV3, + ) lama_register["QuantumAdaptiveDifferentialEvolutionV3"] = QuantumAdaptiveDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV3").set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolutionV3" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV3", register=True) +except Exception as e: # QuantumAdaptiveDifferentialEvolutionV3 print("QuantumAdaptiveDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV4 import QuantumAdaptiveDifferentialEvolutionV4 +try: # QuantumAdaptiveDifferentialEvolutionV4 + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialEvolutionV4 import ( + QuantumAdaptiveDifferentialEvolutionV4, + ) lama_register["QuantumAdaptiveDifferentialEvolutionV4"] = QuantumAdaptiveDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV4").set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialEvolutionV4" + ).set_name("LLAMAQuantumAdaptiveDifferentialEvolutionV4", register=True) +except Exception as e: # QuantumAdaptiveDifferentialEvolutionV4 print("QuantumAdaptiveDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV10 import QuantumAdaptiveDifferentialStrategyV10 +try: # QuantumAdaptiveDifferentialStrategyV10 + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV10 import ( + QuantumAdaptiveDifferentialStrategyV10, + ) lama_register["QuantumAdaptiveDifferentialStrategyV10"] = QuantumAdaptiveDifferentialStrategyV10 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialStrategyV10 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV10").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialStrategyV10 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV10" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV10", register=True) +except Exception as e: # QuantumAdaptiveDifferentialStrategyV10 print("QuantumAdaptiveDifferentialStrategyV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV11 import QuantumAdaptiveDifferentialStrategyV11 +try: # QuantumAdaptiveDifferentialStrategyV11 + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV11 import ( + QuantumAdaptiveDifferentialStrategyV11, + ) lama_register["QuantumAdaptiveDifferentialStrategyV11"] = QuantumAdaptiveDifferentialStrategyV11 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialStrategyV11 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV11").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialStrategyV11 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV11" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV11", register=True) +except Exception as e: # QuantumAdaptiveDifferentialStrategyV11 print("QuantumAdaptiveDifferentialStrategyV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV12 import QuantumAdaptiveDifferentialStrategyV12 +try: # QuantumAdaptiveDifferentialStrategyV12 + from nevergrad.optimization.lama.QuantumAdaptiveDifferentialStrategyV12 import ( + QuantumAdaptiveDifferentialStrategyV12, + ) lama_register["QuantumAdaptiveDifferentialStrategyV12"] = QuantumAdaptiveDifferentialStrategyV12 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDifferentialStrategyV12 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV12").set_name("LLAMAQuantumAdaptiveDifferentialStrategyV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDifferentialStrategyV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDifferentialStrategyV12 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDifferentialStrategyV12" + ).set_name("LLAMAQuantumAdaptiveDifferentialStrategyV12", register=True) +except Exception as e: # QuantumAdaptiveDifferentialStrategyV12 print("QuantumAdaptiveDifferentialStrategyV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV11 import QuantumAdaptiveDiversifiedDynamicHybridSearchV11 - - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV11"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV11 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11", register=True) -except Exception as e: +try: # QuantumAdaptiveDiversifiedDynamicHybridSearchV11 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV11 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV11, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV11"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV11 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV11", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedDynamicHybridSearchV11 print("QuantumAdaptiveDiversifiedDynamicHybridSearchV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV12 import QuantumAdaptiveDiversifiedDynamicHybridSearchV12 - - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV12"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV12 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12", register=True) -except Exception as e: +try: # QuantumAdaptiveDiversifiedDynamicHybridSearchV12 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV12 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV12, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV12"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV12 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV12", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedDynamicHybridSearchV12 print("QuantumAdaptiveDiversifiedDynamicHybridSearchV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV13 import QuantumAdaptiveDiversifiedDynamicHybridSearchV13 - - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV13"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV13 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13", register=True) -except Exception as e: +try: # QuantumAdaptiveDiversifiedDynamicHybridSearchV13 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV13 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV13, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV13"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV13 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV13", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedDynamicHybridSearchV13 print("QuantumAdaptiveDiversifiedDynamicHybridSearchV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV14 import QuantumAdaptiveDiversifiedDynamicHybridSearchV14 - - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV14"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV14 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14", register=True) -except Exception as e: +try: # QuantumAdaptiveDiversifiedDynamicHybridSearchV14 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV14 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV14, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV14"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV14 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV14", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedDynamicHybridSearchV14 print("QuantumAdaptiveDiversifiedDynamicHybridSearchV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV15 import QuantumAdaptiveDiversifiedDynamicHybridSearchV15 - - lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV15"] = QuantumAdaptiveDiversifiedDynamicHybridSearchV15 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15").set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15", register=True) -except Exception as e: +try: # QuantumAdaptiveDiversifiedDynamicHybridSearchV15 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedDynamicHybridSearchV15 import ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV15, + ) + + lama_register["QuantumAdaptiveDiversifiedDynamicHybridSearchV15"] = ( + QuantumAdaptiveDiversifiedDynamicHybridSearchV15 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15" + ).set_name("LLAMAQuantumAdaptiveDiversifiedDynamicHybridSearchV15", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedDynamicHybridSearchV15 print("QuantumAdaptiveDiversifiedDynamicHybridSearchV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedHybridSearchV10 import QuantumAdaptiveDiversifiedHybridSearchV10 +try: # QuantumAdaptiveDiversifiedHybridSearchV10 + from nevergrad.optimization.lama.QuantumAdaptiveDiversifiedHybridSearchV10 import ( + QuantumAdaptiveDiversifiedHybridSearchV10, + ) lama_register["QuantumAdaptiveDiversifiedHybridSearchV10"] = QuantumAdaptiveDiversifiedHybridSearchV10 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDiversifiedHybridSearchV10 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10").set_name("LLAMAQuantumAdaptiveDiversifiedHybridSearchV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDiversifiedHybridSearchV10 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDiversifiedHybridSearchV10" + ).set_name("LLAMAQuantumAdaptiveDiversifiedHybridSearchV10", register=True) +except Exception as e: # QuantumAdaptiveDiversifiedHybridSearchV10 print("QuantumAdaptiveDiversifiedHybridSearchV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExploration import QuantumAdaptiveDynamicExploration +try: # QuantumAdaptiveDynamicExploration + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExploration import ( + QuantumAdaptiveDynamicExploration, + ) lama_register["QuantumAdaptiveDynamicExploration"] = QuantumAdaptiveDynamicExploration - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExploration = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExploration").set_name("LLAMAQuantumAdaptiveDynamicExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExploration = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExploration" + ).set_name("LLAMAQuantumAdaptiveDynamicExploration", register=True) +except Exception as e: # QuantumAdaptiveDynamicExploration print("QuantumAdaptiveDynamicExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV2 import QuantumAdaptiveDynamicExplorationV2 +try: # QuantumAdaptiveDynamicExplorationV2 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV2 import ( + QuantumAdaptiveDynamicExplorationV2, + ) lama_register["QuantumAdaptiveDynamicExplorationV2"] = QuantumAdaptiveDynamicExplorationV2 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV2").set_name("LLAMAQuantumAdaptiveDynamicExplorationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV2" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV2", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV2 print("QuantumAdaptiveDynamicExplorationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV3 import QuantumAdaptiveDynamicExplorationV3 +try: # QuantumAdaptiveDynamicExplorationV3 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV3 import ( + QuantumAdaptiveDynamicExplorationV3, + ) lama_register["QuantumAdaptiveDynamicExplorationV3"] = QuantumAdaptiveDynamicExplorationV3 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV3").set_name("LLAMAQuantumAdaptiveDynamicExplorationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV3" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV3", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV3 print("QuantumAdaptiveDynamicExplorationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV4 import QuantumAdaptiveDynamicExplorationV4 +try: # QuantumAdaptiveDynamicExplorationV4 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV4 import ( + QuantumAdaptiveDynamicExplorationV4, + ) lama_register["QuantumAdaptiveDynamicExplorationV4"] = QuantumAdaptiveDynamicExplorationV4 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV4").set_name("LLAMAQuantumAdaptiveDynamicExplorationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV4" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV4", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV4 print("QuantumAdaptiveDynamicExplorationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV5 import QuantumAdaptiveDynamicExplorationV5 +try: # QuantumAdaptiveDynamicExplorationV5 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV5 import ( + QuantumAdaptiveDynamicExplorationV5, + ) lama_register["QuantumAdaptiveDynamicExplorationV5"] = QuantumAdaptiveDynamicExplorationV5 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV5 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV5").set_name("LLAMAQuantumAdaptiveDynamicExplorationV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV5 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV5" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV5", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV5 print("QuantumAdaptiveDynamicExplorationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV6 import QuantumAdaptiveDynamicExplorationV6 +try: # QuantumAdaptiveDynamicExplorationV6 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV6 import ( + QuantumAdaptiveDynamicExplorationV6, + ) lama_register["QuantumAdaptiveDynamicExplorationV6"] = QuantumAdaptiveDynamicExplorationV6 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV6").set_name("LLAMAQuantumAdaptiveDynamicExplorationV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV6" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV6", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV6 print("QuantumAdaptiveDynamicExplorationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV7 import QuantumAdaptiveDynamicExplorationV7 +try: # QuantumAdaptiveDynamicExplorationV7 + from nevergrad.optimization.lama.QuantumAdaptiveDynamicExplorationV7 import ( + QuantumAdaptiveDynamicExplorationV7, + ) lama_register["QuantumAdaptiveDynamicExplorationV7"] = QuantumAdaptiveDynamicExplorationV7 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicExplorationV7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV7").set_name("LLAMAQuantumAdaptiveDynamicExplorationV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicExplorationV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicExplorationV7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicExplorationV7" + ).set_name("LLAMAQuantumAdaptiveDynamicExplorationV7", register=True) +except Exception as e: # QuantumAdaptiveDynamicExplorationV7 print("QuantumAdaptiveDynamicExplorationV7 can not be imported: ", e) -try: +try: # QuantumAdaptiveDynamicStrategyV7 from nevergrad.optimization.lama.QuantumAdaptiveDynamicStrategyV7 import QuantumAdaptiveDynamicStrategyV7 lama_register["QuantumAdaptiveDynamicStrategyV7"] = QuantumAdaptiveDynamicStrategyV7 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveDynamicStrategyV7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicStrategyV7").set_name("LLAMAQuantumAdaptiveDynamicStrategyV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveDynamicStrategyV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveDynamicStrategyV7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveDynamicStrategyV7" + ).set_name("LLAMAQuantumAdaptiveDynamicStrategyV7", register=True) +except Exception as e: # QuantumAdaptiveDynamicStrategyV7 print("QuantumAdaptiveDynamicStrategyV7 can not be imported: ", e) -try: +try: # QuantumAdaptiveEliteGuidedSearch from nevergrad.optimization.lama.QuantumAdaptiveEliteGuidedSearch import QuantumAdaptiveEliteGuidedSearch lama_register["QuantumAdaptiveEliteGuidedSearch"] = QuantumAdaptiveEliteGuidedSearch - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveEliteGuidedSearch").set_name("LLAMAQuantumAdaptiveEliteGuidedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveEliteGuidedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveEliteGuidedSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveEliteGuidedSearch" + ).set_name("LLAMAQuantumAdaptiveEliteGuidedSearch", register=True) +except Exception as e: # QuantumAdaptiveEliteGuidedSearch print("QuantumAdaptiveEliteGuidedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveFireworksOptimizer import QuantumAdaptiveFireworksOptimizer +try: # QuantumAdaptiveFireworksOptimizer + from nevergrad.optimization.lama.QuantumAdaptiveFireworksOptimizer import ( + QuantumAdaptiveFireworksOptimizer, + ) lama_register["QuantumAdaptiveFireworksOptimizer"] = QuantumAdaptiveFireworksOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveFireworksOptimizer").set_name("LLAMAQuantumAdaptiveFireworksOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveFireworksOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveFireworksOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveFireworksOptimizer" + ).set_name("LLAMAQuantumAdaptiveFireworksOptimizer", register=True) +except Exception as e: # QuantumAdaptiveFireworksOptimizer print("QuantumAdaptiveFireworksOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveGradientDiversityExplorer import QuantumAdaptiveGradientDiversityExplorer +try: # QuantumAdaptiveGradientDiversityExplorer + from nevergrad.optimization.lama.QuantumAdaptiveGradientDiversityExplorer import ( + QuantumAdaptiveGradientDiversityExplorer, + ) lama_register["QuantumAdaptiveGradientDiversityExplorer"] = QuantumAdaptiveGradientDiversityExplorer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientDiversityExplorer").set_name("LLAMAQuantumAdaptiveGradientDiversityExplorer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientDiversityExplorer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveGradientDiversityExplorer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveGradientDiversityExplorer" + ).set_name("LLAMAQuantumAdaptiveGradientDiversityExplorer", register=True) +except Exception as e: # QuantumAdaptiveGradientDiversityExplorer print("QuantumAdaptiveGradientDiversityExplorer can not be imported: ", e) -try: +try: # QuantumAdaptiveGradientSearch from nevergrad.optimization.lama.QuantumAdaptiveGradientSearch import QuantumAdaptiveGradientSearch lama_register["QuantumAdaptiveGradientSearch"] = QuantumAdaptiveGradientSearch - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveGradientSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientSearch").set_name("LLAMAQuantumAdaptiveGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveGradientSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveGradientSearch" + ).set_name("LLAMAQuantumAdaptiveGradientSearch", register=True) +except Exception as e: # QuantumAdaptiveGradientSearch print("QuantumAdaptiveGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveHarmonicOptimizerV8 import QuantumAdaptiveHarmonicOptimizerV8 +try: # QuantumAdaptiveHarmonicOptimizerV8 + from nevergrad.optimization.lama.QuantumAdaptiveHarmonicOptimizerV8 import ( + QuantumAdaptiveHarmonicOptimizerV8, + ) lama_register["QuantumAdaptiveHarmonicOptimizerV8"] = QuantumAdaptiveHarmonicOptimizerV8 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHarmonicOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveHarmonicOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHarmonicOptimizerV8").set_name("LLAMAQuantumAdaptiveHarmonicOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHarmonicOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveHarmonicOptimizerV8 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHarmonicOptimizerV8" + ).set_name("LLAMAQuantumAdaptiveHarmonicOptimizerV8", register=True) +except Exception as e: # QuantumAdaptiveHarmonicOptimizerV8 print("QuantumAdaptiveHarmonicOptimizerV8 can not be imported: ", e) -try: +try: # QuantumAdaptiveHybridDEPSO_V7 from nevergrad.optimization.lama.QuantumAdaptiveHybridDEPSO_V7 import QuantumAdaptiveHybridDEPSO_V7 lama_register["QuantumAdaptiveHybridDEPSO_V7"] = QuantumAdaptiveHybridDEPSO_V7 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridDEPSO_V7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveHybridDEPSO_V7 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridDEPSO_V7").set_name("LLAMAQuantumAdaptiveHybridDEPSO_V7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridDEPSO_V7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveHybridDEPSO_V7 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridDEPSO_V7" + ).set_name("LLAMAQuantumAdaptiveHybridDEPSO_V7", register=True) +except Exception as e: # QuantumAdaptiveHybridDEPSO_V7 print("QuantumAdaptiveHybridDEPSO_V7 can not be imported: ", e) -try: +try: # QuantumAdaptiveHybridOptimizer from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizer import QuantumAdaptiveHybridOptimizer lama_register["QuantumAdaptiveHybridOptimizer"] = QuantumAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizer").set_name("LLAMAQuantumAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridOptimizer" + ).set_name("LLAMAQuantumAdaptiveHybridOptimizer", register=True) +except Exception as e: # QuantumAdaptiveHybridOptimizer print("QuantumAdaptiveHybridOptimizer can not be imported: ", e) -try: +try: # QuantumAdaptiveHybridOptimizerV3 from nevergrad.optimization.lama.QuantumAdaptiveHybridOptimizerV3 import QuantumAdaptiveHybridOptimizerV3 lama_register["QuantumAdaptiveHybridOptimizerV3"] = QuantumAdaptiveHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizerV3").set_name("LLAMAQuantumAdaptiveHybridOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridOptimizerV3" + ).set_name("LLAMAQuantumAdaptiveHybridOptimizerV3", register=True) +except Exception as e: # QuantumAdaptiveHybridOptimizerV3 print("QuantumAdaptiveHybridOptimizerV3 can not be imported: ", e) -try: +try: # QuantumAdaptiveHybridStrategyV4 from nevergrad.optimization.lama.QuantumAdaptiveHybridStrategyV4 import QuantumAdaptiveHybridStrategyV4 lama_register["QuantumAdaptiveHybridStrategyV4"] = QuantumAdaptiveHybridStrategyV4 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveHybridStrategyV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridStrategyV4").set_name("LLAMAQuantumAdaptiveHybridStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveHybridStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveHybridStrategyV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveHybridStrategyV4" + ).set_name("LLAMAQuantumAdaptiveHybridStrategyV4", register=True) +except Exception as e: # QuantumAdaptiveHybridStrategyV4 print("QuantumAdaptiveHybridStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveLevyDifferentialSearch import QuantumAdaptiveLevyDifferentialSearch +try: # QuantumAdaptiveLevyDifferentialSearch + from nevergrad.optimization.lama.QuantumAdaptiveLevyDifferentialSearch import ( + QuantumAdaptiveLevyDifferentialSearch, + ) lama_register["QuantumAdaptiveLevyDifferentialSearch"] = QuantumAdaptiveLevyDifferentialSearch - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveLevyDifferentialSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDifferentialSearch").set_name("LLAMAQuantumAdaptiveLevyDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveLevyDifferentialSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyDifferentialSearch" + ).set_name("LLAMAQuantumAdaptiveLevyDifferentialSearch", register=True) +except Exception as e: # QuantumAdaptiveLevyDifferentialSearch print("QuantumAdaptiveLevyDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveLevyDynamicDifferentialSwarmV4 import QuantumAdaptiveLevyDynamicDifferentialSwarmV4 - - lama_register["QuantumAdaptiveLevyDynamicDifferentialSwarmV4"] = QuantumAdaptiveLevyDynamicDifferentialSwarmV4 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4").set_name("LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4", register=True) -except Exception as e: +try: # QuantumAdaptiveLevyDynamicDifferentialSwarmV4 + from nevergrad.optimization.lama.QuantumAdaptiveLevyDynamicDifferentialSwarmV4 import ( + QuantumAdaptiveLevyDynamicDifferentialSwarmV4, + ) + + lama_register["QuantumAdaptiveLevyDynamicDifferentialSwarmV4"] = ( + QuantumAdaptiveLevyDynamicDifferentialSwarmV4 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4" + ).set_name("LLAMAQuantumAdaptiveLevyDynamicDifferentialSwarmV4", register=True) +except Exception as e: # QuantumAdaptiveLevyDynamicDifferentialSwarmV4 print("QuantumAdaptiveLevyDynamicDifferentialSwarmV4 can not be imported: ", e) -try: +try: # QuantumAdaptiveLevyMemeticSearch from nevergrad.optimization.lama.QuantumAdaptiveLevyMemeticSearch import QuantumAdaptiveLevyMemeticSearch lama_register["QuantumAdaptiveLevyMemeticSearch"] = QuantumAdaptiveLevyMemeticSearch - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveLevyMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyMemeticSearch").set_name("LLAMAQuantumAdaptiveLevyMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveLevyMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyMemeticSearch" + ).set_name("LLAMAQuantumAdaptiveLevyMemeticSearch", register=True) +except Exception as e: # QuantumAdaptiveLevyMemeticSearch print("QuantumAdaptiveLevyMemeticSearch can not be imported: ", e) -try: +try: # QuantumAdaptiveLevyOptimizer from nevergrad.optimization.lama.QuantumAdaptiveLevyOptimizer import QuantumAdaptiveLevyOptimizer lama_register["QuantumAdaptiveLevyOptimizer"] = QuantumAdaptiveLevyOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveLevyOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyOptimizer").set_name("LLAMAQuantumAdaptiveLevyOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveLevyOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevyOptimizer" + ).set_name("LLAMAQuantumAdaptiveLevyOptimizer", register=True) +except Exception as e: # QuantumAdaptiveLevyOptimizer print("QuantumAdaptiveLevyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveLevySwarmOptimizationV2 import QuantumAdaptiveLevySwarmOptimizationV2 +try: # QuantumAdaptiveLevySwarmOptimizationV2 + from nevergrad.optimization.lama.QuantumAdaptiveLevySwarmOptimizationV2 import ( + QuantumAdaptiveLevySwarmOptimizationV2, + ) lama_register["QuantumAdaptiveLevySwarmOptimizationV2"] = QuantumAdaptiveLevySwarmOptimizationV2 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveLevySwarmOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2").set_name("LLAMAQuantumAdaptiveLevySwarmOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveLevySwarmOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveLevySwarmOptimizationV2" + ).set_name("LLAMAQuantumAdaptiveLevySwarmOptimizationV2", register=True) +except Exception as e: # QuantumAdaptiveLevySwarmOptimizationV2 print("QuantumAdaptiveLevySwarmOptimizationV2 can not be imported: ", e) -try: +try: # QuantumAdaptiveMemeticAlgorithm from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithm import QuantumAdaptiveMemeticAlgorithm lama_register["QuantumAdaptiveMemeticAlgorithm"] = QuantumAdaptiveMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMemeticAlgorithm = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithm").set_name("LLAMAQuantumAdaptiveMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticAlgorithm" + ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithm", register=True) +except Exception as e: # QuantumAdaptiveMemeticAlgorithm print("QuantumAdaptiveMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithmV2 import QuantumAdaptiveMemeticAlgorithmV2 +try: # QuantumAdaptiveMemeticAlgorithmV2 + from nevergrad.optimization.lama.QuantumAdaptiveMemeticAlgorithmV2 import ( + QuantumAdaptiveMemeticAlgorithmV2, + ) lama_register["QuantumAdaptiveMemeticAlgorithmV2"] = QuantumAdaptiveMemeticAlgorithmV2 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithmV2").set_name("LLAMAQuantumAdaptiveMemeticAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticAlgorithmV2" + ).set_name("LLAMAQuantumAdaptiveMemeticAlgorithmV2", register=True) +except Exception as e: # QuantumAdaptiveMemeticAlgorithmV2 print("QuantumAdaptiveMemeticAlgorithmV2 can not be imported: ", e) -try: +try: # QuantumAdaptiveMemeticSearchV2 from nevergrad.optimization.lama.QuantumAdaptiveMemeticSearchV2 import QuantumAdaptiveMemeticSearchV2 lama_register["QuantumAdaptiveMemeticSearchV2"] = QuantumAdaptiveMemeticSearchV2 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMemeticSearchV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticSearchV2").set_name("LLAMAQuantumAdaptiveMemeticSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMemeticSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMemeticSearchV2" + ).set_name("LLAMAQuantumAdaptiveMemeticSearchV2", register=True) +except Exception as e: # QuantumAdaptiveMemeticSearchV2 print("QuantumAdaptiveMemeticSearchV2 can not be imported: ", e) -try: +try: # QuantumAdaptiveMultiPhaseDE_v6 from nevergrad.optimization.lama.QuantumAdaptiveMultiPhaseDE_v6 import QuantumAdaptiveMultiPhaseDE_v6 lama_register["QuantumAdaptiveMultiPhaseDE_v6"] = QuantumAdaptiveMultiPhaseDE_v6 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPhaseDE_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMultiPhaseDE_v6 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPhaseDE_v6").set_name("LLAMAQuantumAdaptiveMultiPhaseDE_v6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPhaseDE_v6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMultiPhaseDE_v6 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiPhaseDE_v6" + ).set_name("LLAMAQuantumAdaptiveMultiPhaseDE_v6", register=True) +except Exception as e: # QuantumAdaptiveMultiPhaseDE_v6 print("QuantumAdaptiveMultiPhaseDE_v6 can not be imported: ", e) -try: +try: # QuantumAdaptiveMultiPopulationDE from nevergrad.optimization.lama.QuantumAdaptiveMultiPopulationDE import QuantumAdaptiveMultiPopulationDE lama_register["QuantumAdaptiveMultiPopulationDE"] = QuantumAdaptiveMultiPopulationDE - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPopulationDE").set_name("LLAMAQuantumAdaptiveMultiPopulationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiPopulationDE" + ).set_name("LLAMAQuantumAdaptiveMultiPopulationDE", register=True) +except Exception as e: # QuantumAdaptiveMultiPopulationDE print("QuantumAdaptiveMultiPopulationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveMultiStrategyEvolution import QuantumAdaptiveMultiStrategyEvolution +try: # QuantumAdaptiveMultiStrategyEvolution + from nevergrad.optimization.lama.QuantumAdaptiveMultiStrategyEvolution import ( + QuantumAdaptiveMultiStrategyEvolution, + ) lama_register["QuantumAdaptiveMultiStrategyEvolution"] = QuantumAdaptiveMultiStrategyEvolution - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiStrategyEvolution").set_name("LLAMAQuantumAdaptiveMultiStrategyEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveMultiStrategyEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveMultiStrategyEvolution = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveMultiStrategyEvolution" + ).set_name("LLAMAQuantumAdaptiveMultiStrategyEvolution", register=True) +except Exception as e: # QuantumAdaptiveMultiStrategyEvolution print("QuantumAdaptiveMultiStrategyEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveNesterovGradientEnhancer import QuantumAdaptiveNesterovGradientEnhancer +try: # QuantumAdaptiveNesterovGradientEnhancer + from nevergrad.optimization.lama.QuantumAdaptiveNesterovGradientEnhancer import ( + QuantumAdaptiveNesterovGradientEnhancer, + ) lama_register["QuantumAdaptiveNesterovGradientEnhancer"] = QuantumAdaptiveNesterovGradientEnhancer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovGradientEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveNesterovGradientEnhancer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovGradientEnhancer").set_name("LLAMAQuantumAdaptiveNesterovGradientEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovGradientEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveNesterovGradientEnhancer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveNesterovGradientEnhancer" + ).set_name("LLAMAQuantumAdaptiveNesterovGradientEnhancer", register=True) +except Exception as e: # QuantumAdaptiveNesterovGradientEnhancer print("QuantumAdaptiveNesterovGradientEnhancer can not be imported: ", e) -try: +try: # QuantumAdaptiveNesterovSynergy from nevergrad.optimization.lama.QuantumAdaptiveNesterovSynergy import QuantumAdaptiveNesterovSynergy lama_register["QuantumAdaptiveNesterovSynergy"] = QuantumAdaptiveNesterovSynergy - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveNesterovSynergy = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovSynergy").set_name("LLAMAQuantumAdaptiveNesterovSynergy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveNesterovSynergy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveNesterovSynergy = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveNesterovSynergy" + ).set_name("LLAMAQuantumAdaptiveNesterovSynergy", register=True) +except Exception as e: # QuantumAdaptiveNesterovSynergy print("QuantumAdaptiveNesterovSynergy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementOptimizer import QuantumAdaptiveRefinementOptimizer +try: # QuantumAdaptiveRefinementOptimizer + from nevergrad.optimization.lama.QuantumAdaptiveRefinementOptimizer import ( + QuantumAdaptiveRefinementOptimizer, + ) lama_register["QuantumAdaptiveRefinementOptimizer"] = QuantumAdaptiveRefinementOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementOptimizer").set_name("LLAMAQuantumAdaptiveRefinementOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementOptimizer" + ).set_name("LLAMAQuantumAdaptiveRefinementOptimizer", register=True) +except Exception as e: # QuantumAdaptiveRefinementOptimizer print("QuantumAdaptiveRefinementOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategy import QuantumAdaptiveRefinementStrategy +try: # QuantumAdaptiveRefinementStrategy + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategy import ( + QuantumAdaptiveRefinementStrategy, + ) lama_register["QuantumAdaptiveRefinementStrategy"] = QuantumAdaptiveRefinementStrategy - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveRefinementStrategy = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategy").set_name("LLAMAQuantumAdaptiveRefinementStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveRefinementStrategy = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementStrategy" + ).set_name("LLAMAQuantumAdaptiveRefinementStrategy", register=True) +except Exception as e: # QuantumAdaptiveRefinementStrategy print("QuantumAdaptiveRefinementStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategyV2 import QuantumAdaptiveRefinementStrategyV2 +try: # QuantumAdaptiveRefinementStrategyV2 + from nevergrad.optimization.lama.QuantumAdaptiveRefinementStrategyV2 import ( + QuantumAdaptiveRefinementStrategyV2, + ) lama_register["QuantumAdaptiveRefinementStrategyV2"] = QuantumAdaptiveRefinementStrategyV2 - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveRefinementStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategyV2").set_name("LLAMAQuantumAdaptiveRefinementStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveRefinementStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveRefinementStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveRefinementStrategyV2" + ).set_name("LLAMAQuantumAdaptiveRefinementStrategyV2", register=True) +except Exception as e: # QuantumAdaptiveRefinementStrategyV2 print("QuantumAdaptiveRefinementStrategyV2 can not be imported: ", e) -try: +try: # QuantumAdaptiveStrategicEnhancer from nevergrad.optimization.lama.QuantumAdaptiveStrategicEnhancer import QuantumAdaptiveStrategicEnhancer lama_register["QuantumAdaptiveStrategicEnhancer"] = QuantumAdaptiveStrategicEnhancer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveStrategicEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveStrategicEnhancer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveStrategicEnhancer").set_name("LLAMAQuantumAdaptiveStrategicEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveStrategicEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveStrategicEnhancer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveStrategicEnhancer" + ).set_name("LLAMAQuantumAdaptiveStrategicEnhancer", register=True) +except Exception as e: # QuantumAdaptiveStrategicEnhancer print("QuantumAdaptiveStrategicEnhancer can not be imported: ", e) -try: +try: # QuantumAdaptiveVelocityOptimizer from nevergrad.optimization.lama.QuantumAdaptiveVelocityOptimizer import QuantumAdaptiveVelocityOptimizer lama_register["QuantumAdaptiveVelocityOptimizer"] = QuantumAdaptiveVelocityOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer(method="LLAMAQuantumAdaptiveVelocityOptimizer").set_name("LLAMAQuantumAdaptiveVelocityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( + method="LLAMAQuantumAdaptiveVelocityOptimizer" + ).set_name("LLAMAQuantumAdaptiveVelocityOptimizer", register=True) +except Exception as e: # QuantumAdaptiveVelocityOptimizer print("QuantumAdaptiveVelocityOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumAnnealingDifferentialEvolution import QuantumAnnealingDifferentialEvolution +try: # QuantumAnnealingDifferentialEvolution + from nevergrad.optimization.lama.QuantumAnnealingDifferentialEvolution import ( + QuantumAnnealingDifferentialEvolution, + ) lama_register["QuantumAnnealingDifferentialEvolution"] = QuantumAnnealingDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAnnealingDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumAnnealingDifferentialEvolution").set_name("LLAMAQuantumAnnealingDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAnnealingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAnnealingDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumAnnealingDifferentialEvolution" + ).set_name("LLAMAQuantumAnnealingDifferentialEvolution", register=True) +except Exception as e: # QuantumAnnealingDifferentialEvolution print("QuantumAnnealingDifferentialEvolution can not be imported: ", e) -try: +try: # QuantumAssistedHybridOptimizerV1 from nevergrad.optimization.lama.QuantumAssistedHybridOptimizerV1 import QuantumAssistedHybridOptimizerV1 lama_register["QuantumAssistedHybridOptimizerV1"] = QuantumAssistedHybridOptimizerV1 - res = NonObjectOptimizer(method="LLAMAQuantumAssistedHybridOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumAssistedHybridOptimizerV1 = NonObjectOptimizer(method="LLAMAQuantumAssistedHybridOptimizerV1").set_name("LLAMAQuantumAssistedHybridOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumAssistedHybridOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumAssistedHybridOptimizerV1 = NonObjectOptimizer( + method="LLAMAQuantumAssistedHybridOptimizerV1" + ).set_name("LLAMAQuantumAssistedHybridOptimizerV1", register=True) +except Exception as e: # QuantumAssistedHybridOptimizerV1 print("QuantumAssistedHybridOptimizerV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumBalancedAdaptiveNesterovStrategy import QuantumBalancedAdaptiveNesterovStrategy +try: # QuantumBalancedAdaptiveNesterovStrategy + from nevergrad.optimization.lama.QuantumBalancedAdaptiveNesterovStrategy import ( + QuantumBalancedAdaptiveNesterovStrategy, + ) lama_register["QuantumBalancedAdaptiveNesterovStrategy"] = QuantumBalancedAdaptiveNesterovStrategy - res = NonObjectOptimizer(method="LLAMAQuantumBalancedAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumBalancedAdaptiveNesterovStrategy = NonObjectOptimizer(method="LLAMAQuantumBalancedAdaptiveNesterovStrategy").set_name("LLAMAQuantumBalancedAdaptiveNesterovStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumBalancedAdaptiveNesterovStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumBalancedAdaptiveNesterovStrategy = NonObjectOptimizer( + method="LLAMAQuantumBalancedAdaptiveNesterovStrategy" + ).set_name("LLAMAQuantumBalancedAdaptiveNesterovStrategy", register=True) +except Exception as e: # QuantumBalancedAdaptiveNesterovStrategy print("QuantumBalancedAdaptiveNesterovStrategy can not be imported: ", e) -try: +try: # QuantumBalancedEvolutionStrategy from nevergrad.optimization.lama.QuantumBalancedEvolutionStrategy import QuantumBalancedEvolutionStrategy lama_register["QuantumBalancedEvolutionStrategy"] = QuantumBalancedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumBalancedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumBalancedEvolutionStrategy").set_name("LLAMAQuantumBalancedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumBalancedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumBalancedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumBalancedEvolutionStrategy" + ).set_name("LLAMAQuantumBalancedEvolutionStrategy", register=True) +except Exception as e: # QuantumBalancedEvolutionStrategy print("QuantumBalancedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancedOptimizerV16 import QuantumCognitionAdaptiveEnhancedOptimizerV16 - - lama_register["QuantumCognitionAdaptiveEnhancedOptimizerV16"] = QuantumCognitionAdaptiveEnhancedOptimizerV16 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16").set_name("LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16", register=True) -except Exception as e: +try: # QuantumCognitionAdaptiveEnhancedOptimizerV16 + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancedOptimizerV16 import ( + QuantumCognitionAdaptiveEnhancedOptimizerV16, + ) + + lama_register["QuantumCognitionAdaptiveEnhancedOptimizerV16"] = ( + QuantumCognitionAdaptiveEnhancedOptimizerV16 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16" + ).set_name("LLAMAQuantumCognitionAdaptiveEnhancedOptimizerV16", register=True) +except Exception as e: # QuantumCognitionAdaptiveEnhancedOptimizerV16 print("QuantumCognitionAdaptiveEnhancedOptimizerV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancerV8 import QuantumCognitionAdaptiveEnhancerV8 +try: # QuantumCognitionAdaptiveEnhancerV8 + from nevergrad.optimization.lama.QuantumCognitionAdaptiveEnhancerV8 import ( + QuantumCognitionAdaptiveEnhancerV8, + ) lama_register["QuantumCognitionAdaptiveEnhancerV8"] = QuantumCognitionAdaptiveEnhancerV8 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionAdaptiveEnhancerV8 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancerV8").set_name("LLAMAQuantumCognitionAdaptiveEnhancerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveEnhancerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionAdaptiveEnhancerV8 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveEnhancerV8" + ).set_name("LLAMAQuantumCognitionAdaptiveEnhancerV8", register=True) +except Exception as e: # QuantumCognitionAdaptiveEnhancerV8 print("QuantumCognitionAdaptiveEnhancerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionAdaptiveTuningOptimizerV14 import QuantumCognitionAdaptiveTuningOptimizerV14 +try: # QuantumCognitionAdaptiveTuningOptimizerV14 + from nevergrad.optimization.lama.QuantumCognitionAdaptiveTuningOptimizerV14 import ( + QuantumCognitionAdaptiveTuningOptimizerV14, + ) lama_register["QuantumCognitionAdaptiveTuningOptimizerV14"] = QuantumCognitionAdaptiveTuningOptimizerV14 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionAdaptiveTuningOptimizerV14 = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14").set_name("LLAMAQuantumCognitionAdaptiveTuningOptimizerV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionAdaptiveTuningOptimizerV14 = NonObjectOptimizer( + method="LLAMAQuantumCognitionAdaptiveTuningOptimizerV14" + ).set_name("LLAMAQuantumCognitionAdaptiveTuningOptimizerV14", register=True) +except Exception as e: # QuantumCognitionAdaptiveTuningOptimizerV14 print("QuantumCognitionAdaptiveTuningOptimizerV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionDynamicAdaptationOptimizerV30 import QuantumCognitionDynamicAdaptationOptimizerV30 - - lama_register["QuantumCognitionDynamicAdaptationOptimizerV30"] = QuantumCognitionDynamicAdaptationOptimizerV30 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionDynamicAdaptationOptimizerV30 = NonObjectOptimizer(method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30").set_name("LLAMAQuantumCognitionDynamicAdaptationOptimizerV30", register=True) -except Exception as e: +try: # QuantumCognitionDynamicAdaptationOptimizerV30 + from nevergrad.optimization.lama.QuantumCognitionDynamicAdaptationOptimizerV30 import ( + QuantumCognitionDynamicAdaptationOptimizerV30, + ) + + lama_register["QuantumCognitionDynamicAdaptationOptimizerV30"] = ( + QuantumCognitionDynamicAdaptationOptimizerV30 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionDynamicAdaptationOptimizerV30 = NonObjectOptimizer( + method="LLAMAQuantumCognitionDynamicAdaptationOptimizerV30" + ).set_name("LLAMAQuantumCognitionDynamicAdaptationOptimizerV30", register=True) +except Exception as e: # QuantumCognitionDynamicAdaptationOptimizerV30 print("QuantumCognitionDynamicAdaptationOptimizerV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionEnhancedOptimizerV7 import QuantumCognitionEnhancedOptimizerV7 +try: # QuantumCognitionEnhancedOptimizerV7 + from nevergrad.optimization.lama.QuantumCognitionEnhancedOptimizerV7 import ( + QuantumCognitionEnhancedOptimizerV7, + ) lama_register["QuantumCognitionEnhancedOptimizerV7"] = QuantumCognitionEnhancedOptimizerV7 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionEnhancedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionEnhancedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumCognitionEnhancedOptimizerV7").set_name("LLAMAQuantumCognitionEnhancedOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionEnhancedOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionEnhancedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumCognitionEnhancedOptimizerV7" + ).set_name("LLAMAQuantumCognitionEnhancedOptimizerV7", register=True) +except Exception as e: # QuantumCognitionEnhancedOptimizerV7 print("QuantumCognitionEnhancedOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionFocusedHybridOptimizerV21 import QuantumCognitionFocusedHybridOptimizerV21 +try: # QuantumCognitionFocusedHybridOptimizerV21 + from nevergrad.optimization.lama.QuantumCognitionFocusedHybridOptimizerV21 import ( + QuantumCognitionFocusedHybridOptimizerV21, + ) lama_register["QuantumCognitionFocusedHybridOptimizerV21"] = QuantumCognitionFocusedHybridOptimizerV21 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedHybridOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionFocusedHybridOptimizerV21 = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedHybridOptimizerV21").set_name("LLAMAQuantumCognitionFocusedHybridOptimizerV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedHybridOptimizerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionFocusedHybridOptimizerV21 = NonObjectOptimizer( + method="LLAMAQuantumCognitionFocusedHybridOptimizerV21" + ).set_name("LLAMAQuantumCognitionFocusedHybridOptimizerV21", register=True) +except Exception as e: # QuantumCognitionFocusedHybridOptimizerV21 print("QuantumCognitionFocusedHybridOptimizerV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionFocusedOptimizerV17 import QuantumCognitionFocusedOptimizerV17 +try: # QuantumCognitionFocusedOptimizerV17 + from nevergrad.optimization.lama.QuantumCognitionFocusedOptimizerV17 import ( + QuantumCognitionFocusedOptimizerV17, + ) lama_register["QuantumCognitionFocusedOptimizerV17"] = QuantumCognitionFocusedOptimizerV17 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionFocusedOptimizerV17 = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedOptimizerV17").set_name("LLAMAQuantumCognitionFocusedOptimizerV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionFocusedOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionFocusedOptimizerV17 = NonObjectOptimizer( + method="LLAMAQuantumCognitionFocusedOptimizerV17" + ).set_name("LLAMAQuantumCognitionFocusedOptimizerV17", register=True) +except Exception as e: # QuantumCognitionFocusedOptimizerV17 print("QuantumCognitionFocusedOptimizerV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV19 import QuantumCognitionHybridEvolutionaryOptimizerV19 - - lama_register["QuantumCognitionHybridEvolutionaryOptimizerV19"] = QuantumCognitionHybridEvolutionaryOptimizerV19 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19").set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19", register=True) -except Exception as e: +try: # QuantumCognitionHybridEvolutionaryOptimizerV19 + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV19 import ( + QuantumCognitionHybridEvolutionaryOptimizerV19, + ) + + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV19"] = ( + QuantumCognitionHybridEvolutionaryOptimizerV19 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19" + ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV19", register=True) +except Exception as e: # QuantumCognitionHybridEvolutionaryOptimizerV19 print("QuantumCognitionHybridEvolutionaryOptimizerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV20 import QuantumCognitionHybridEvolutionaryOptimizerV20 - - lama_register["QuantumCognitionHybridEvolutionaryOptimizerV20"] = QuantumCognitionHybridEvolutionaryOptimizerV20 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20").set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20", register=True) -except Exception as e: +try: # QuantumCognitionHybridEvolutionaryOptimizerV20 + from nevergrad.optimization.lama.QuantumCognitionHybridEvolutionaryOptimizerV20 import ( + QuantumCognitionHybridEvolutionaryOptimizerV20, + ) + + lama_register["QuantumCognitionHybridEvolutionaryOptimizerV20"] = ( + QuantumCognitionHybridEvolutionaryOptimizerV20 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20" + ).set_name("LLAMAQuantumCognitionHybridEvolutionaryOptimizerV20", register=True) +except Exception as e: # QuantumCognitionHybridEvolutionaryOptimizerV20 print("QuantumCognitionHybridEvolutionaryOptimizerV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV23 import QuantumCognitionHybridOptimizerV23 +try: # QuantumCognitionHybridOptimizerV23 + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV23 import ( + QuantumCognitionHybridOptimizerV23, + ) lama_register["QuantumCognitionHybridOptimizerV23"] = QuantumCognitionHybridOptimizerV23 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridOptimizerV23 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV23").set_name("LLAMAQuantumCognitionHybridOptimizerV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridOptimizerV23 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV23" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV23", register=True) +except Exception as e: # QuantumCognitionHybridOptimizerV23 print("QuantumCognitionHybridOptimizerV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV24 import QuantumCognitionHybridOptimizerV24 +try: # QuantumCognitionHybridOptimizerV24 + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV24 import ( + QuantumCognitionHybridOptimizerV24, + ) lama_register["QuantumCognitionHybridOptimizerV24"] = QuantumCognitionHybridOptimizerV24 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridOptimizerV24 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV24").set_name("LLAMAQuantumCognitionHybridOptimizerV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridOptimizerV24 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV24" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV24", register=True) +except Exception as e: # QuantumCognitionHybridOptimizerV24 print("QuantumCognitionHybridOptimizerV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV25 import QuantumCognitionHybridOptimizerV25 +try: # QuantumCognitionHybridOptimizerV25 + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV25 import ( + QuantumCognitionHybridOptimizerV25, + ) lama_register["QuantumCognitionHybridOptimizerV25"] = QuantumCognitionHybridOptimizerV25 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridOptimizerV25 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV25").set_name("LLAMAQuantumCognitionHybridOptimizerV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridOptimizerV25 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV25" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV25", register=True) +except Exception as e: # QuantumCognitionHybridOptimizerV25 print("QuantumCognitionHybridOptimizerV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV26 import QuantumCognitionHybridOptimizerV26 +try: # QuantumCognitionHybridOptimizerV26 + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV26 import ( + QuantumCognitionHybridOptimizerV26, + ) lama_register["QuantumCognitionHybridOptimizerV26"] = QuantumCognitionHybridOptimizerV26 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridOptimizerV26 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV26").set_name("LLAMAQuantumCognitionHybridOptimizerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridOptimizerV26 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV26" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV26", register=True) +except Exception as e: # QuantumCognitionHybridOptimizerV26 print("QuantumCognitionHybridOptimizerV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV27 import QuantumCognitionHybridOptimizerV27 +try: # QuantumCognitionHybridOptimizerV27 + from nevergrad.optimization.lama.QuantumCognitionHybridOptimizerV27 import ( + QuantumCognitionHybridOptimizerV27, + ) lama_register["QuantumCognitionHybridOptimizerV27"] = QuantumCognitionHybridOptimizerV27 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionHybridOptimizerV27 = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV27").set_name("LLAMAQuantumCognitionHybridOptimizerV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionHybridOptimizerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionHybridOptimizerV27 = NonObjectOptimizer( + method="LLAMAQuantumCognitionHybridOptimizerV27" + ).set_name("LLAMAQuantumCognitionHybridOptimizerV27", register=True) +except Exception as e: # QuantumCognitionHybridOptimizerV27 print("QuantumCognitionHybridOptimizerV27 can not be imported: ", e) -try: +try: # QuantumCognitionOptimizerV2 from nevergrad.optimization.lama.QuantumCognitionOptimizerV2 import QuantumCognitionOptimizerV2 lama_register["QuantumCognitionOptimizerV2"] = QuantumCognitionOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2").set_name("LLAMAQuantumCognitionOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumCognitionOptimizerV2").set_name( + "LLAMAQuantumCognitionOptimizerV2", register=True + ) +except Exception as e: # QuantumCognitionOptimizerV2 print("QuantumCognitionOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitionTrajectoryOptimizerV28 import QuantumCognitionTrajectoryOptimizerV28 +try: # QuantumCognitionTrajectoryOptimizerV28 + from nevergrad.optimization.lama.QuantumCognitionTrajectoryOptimizerV28 import ( + QuantumCognitionTrajectoryOptimizerV28, + ) lama_register["QuantumCognitionTrajectoryOptimizerV28"] = QuantumCognitionTrajectoryOptimizerV28 - res = NonObjectOptimizer(method="LLAMAQuantumCognitionTrajectoryOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitionTrajectoryOptimizerV28 = NonObjectOptimizer(method="LLAMAQuantumCognitionTrajectoryOptimizerV28").set_name("LLAMAQuantumCognitionTrajectoryOptimizerV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitionTrajectoryOptimizerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitionTrajectoryOptimizerV28 = NonObjectOptimizer( + method="LLAMAQuantumCognitionTrajectoryOptimizerV28" + ).set_name("LLAMAQuantumCognitionTrajectoryOptimizerV28", register=True) +except Exception as e: # QuantumCognitionTrajectoryOptimizerV28 print("QuantumCognitionTrajectoryOptimizerV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCognitiveAdaptiveOptimizer import QuantumCognitiveAdaptiveOptimizer +try: # QuantumCognitiveAdaptiveOptimizer + from nevergrad.optimization.lama.QuantumCognitiveAdaptiveOptimizer import ( + QuantumCognitiveAdaptiveOptimizer, + ) lama_register["QuantumCognitiveAdaptiveOptimizer"] = QuantumCognitiveAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumCognitiveAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCognitiveAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumCognitiveAdaptiveOptimizer").set_name("LLAMAQuantumCognitiveAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCognitiveAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCognitiveAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumCognitiveAdaptiveOptimizer" + ).set_name("LLAMAQuantumCognitiveAdaptiveOptimizer", register=True) +except Exception as e: # QuantumCognitiveAdaptiveOptimizer print("QuantumCognitiveAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumControlledDiversityStrategy import QuantumControlledDiversityStrategy +try: # QuantumControlledDiversityStrategy + from nevergrad.optimization.lama.QuantumControlledDiversityStrategy import ( + QuantumControlledDiversityStrategy, + ) lama_register["QuantumControlledDiversityStrategy"] = QuantumControlledDiversityStrategy - res = NonObjectOptimizer(method="LLAMAQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumControlledDiversityStrategy = NonObjectOptimizer(method="LLAMAQuantumControlledDiversityStrategy").set_name("LLAMAQuantumControlledDiversityStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumControlledDiversityStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumControlledDiversityStrategy = NonObjectOptimizer( + method="LLAMAQuantumControlledDiversityStrategy" + ).set_name("LLAMAQuantumControlledDiversityStrategy", register=True) +except Exception as e: # QuantumControlledDiversityStrategy print("QuantumControlledDiversityStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCooperativeCrossoverStrategy import QuantumCooperativeCrossoverStrategy +try: # QuantumCooperativeCrossoverStrategy + from nevergrad.optimization.lama.QuantumCooperativeCrossoverStrategy import ( + QuantumCooperativeCrossoverStrategy, + ) lama_register["QuantumCooperativeCrossoverStrategy"] = QuantumCooperativeCrossoverStrategy - res = NonObjectOptimizer(method="LLAMAQuantumCooperativeCrossoverStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCooperativeCrossoverStrategy = NonObjectOptimizer(method="LLAMAQuantumCooperativeCrossoverStrategy").set_name("LLAMAQuantumCooperativeCrossoverStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumCooperativeCrossoverStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCooperativeCrossoverStrategy = NonObjectOptimizer( + method="LLAMAQuantumCooperativeCrossoverStrategy" + ).set_name("LLAMAQuantumCooperativeCrossoverStrategy", register=True) +except Exception as e: # QuantumCooperativeCrossoverStrategy print("QuantumCooperativeCrossoverStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolution import QuantumCovarianceMatrixDifferentialEvolution - - lama_register["QuantumCovarianceMatrixDifferentialEvolution"] = QuantumCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolution").set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # QuantumCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolution import ( + QuantumCovarianceMatrixDifferentialEvolution, + ) + + lama_register["QuantumCovarianceMatrixDifferentialEvolution"] = ( + QuantumCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # QuantumCovarianceMatrixDifferentialEvolution print("QuantumCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - - lama_register["QuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 - res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2").set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) -except Exception as e: +try: # QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + from nevergrad.optimization.lama.QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 import ( + QuantumCovarianceMatrixDifferentialEvolutionRefinedV2, + ) + + lama_register["QuantumCovarianceMatrixDifferentialEvolutionRefinedV2"] = ( + QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2 = NonObjectOptimizer( + method="LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2" + ).set_name("LLAMAQuantumCovarianceMatrixDifferentialEvolutionRefinedV2", register=True) +except Exception as e: # QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 print("QuantumCovarianceMatrixDifferentialEvolutionRefinedV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch - - lama_register["QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch print("QuantumDifferentialEvolutionWithAdaptiveElitismAndEnhancedLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart import QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart - - lama_register["QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart"] = QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart import ( + QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart"] = ( + QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveLearningAndRestart", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart print("QuantumDifferentialEvolutionWithAdaptiveLearningAndRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch - - lama_register["QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch print("QuantumDifferentialEvolutionWithAdaptiveMemoryAndEnhancedLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning import QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning - - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning"] = QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning import ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndLearning", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning print("QuantumDifferentialEvolutionWithAdaptiveRestartAndLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement import QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement - - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement"] = QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement import ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement print("QuantumDifferentialEvolutionWithAdaptiveRestartAndMemoryRefinement can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning import QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning - - lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning"] = QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning import ( + QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning"] = ( + QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning print("QuantumDifferentialEvolutionWithAdaptiveRestartsAndElitistLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning import QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning - - lama_register["QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning"] = QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning").set_name("LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning", register=True) -except Exception as e: - print("QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts import QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts - - lama_register["QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts"] = QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning import ( + QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning"] = ( + QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning" + ).set_name( + "LLAMAQuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning", register=True + ) +except Exception as e: # QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning + print( + "QuantumDifferentialEvolutionWithAdvancedRestartsAndEnhancedElitistLearning can not be imported: ", e + ) +try: # QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts import ( + QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts"] = ( + QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts print("QuantumDifferentialEvolutionWithDiverseElitismAndAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch import QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch - - lama_register["QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch"] = QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch import ( + QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch"] = ( + QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch print("QuantumDifferentialEvolutionWithDynamicAdaptiveMemoryAndEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicElitismAndRestarts import QuantumDifferentialEvolutionWithDynamicElitismAndRestarts - - lama_register["QuantumDifferentialEvolutionWithDynamicElitismAndRestarts"] = QuantumDifferentialEvolutionWithDynamicElitismAndRestarts - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithDynamicElitismAndRestarts + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicElitismAndRestarts import ( + QuantumDifferentialEvolutionWithDynamicElitismAndRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicElitismAndRestarts"] = ( + QuantumDifferentialEvolutionWithDynamicElitismAndRestarts + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicElitismAndRestarts", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithDynamicElitismAndRestarts print("QuantumDifferentialEvolutionWithDynamicElitismAndRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart import QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart - - lama_register["QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart"] = QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart").set_name("LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart import ( + QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart, + ) + + lama_register["QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart"] = ( + QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart" + ).set_name("LLAMAQuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart print("QuantumDifferentialEvolutionWithDynamicMemoryAndAdaptiveRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEliteGuidance import QuantumDifferentialEvolutionWithEliteGuidance - - lama_register["QuantumDifferentialEvolutionWithEliteGuidance"] = QuantumDifferentialEvolutionWithEliteGuidance - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithEliteGuidance = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance").set_name("LLAMAQuantumDifferentialEvolutionWithEliteGuidance", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithEliteGuidance + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEliteGuidance import ( + QuantumDifferentialEvolutionWithEliteGuidance, + ) + + lama_register["QuantumDifferentialEvolutionWithEliteGuidance"] = ( + QuantumDifferentialEvolutionWithEliteGuidance + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithEliteGuidance = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEliteGuidance" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEliteGuidance", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithEliteGuidance print("QuantumDifferentialEvolutionWithEliteGuidance can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitism import QuantumDifferentialEvolutionWithElitism +try: # QuantumDifferentialEvolutionWithElitism + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitism import ( + QuantumDifferentialEvolutionWithElitism, + ) lama_register["QuantumDifferentialEvolutionWithElitism"] = QuantumDifferentialEvolutionWithElitism - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithElitism = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitism").set_name("LLAMAQuantumDifferentialEvolutionWithElitism", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithElitism = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithElitism" + ).set_name("LLAMAQuantumDifferentialEvolutionWithElitism", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithElitism print("QuantumDifferentialEvolutionWithElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch import QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch - - lama_register["QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch"] = QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch").set_name("LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch import ( + QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch"] = ( + QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch print("QuantumDifferentialEvolutionWithElitistMemoryAndEnhancedLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch import QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch - - lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch print("QuantumDifferentialEvolutionWithEnhancedAdaptiveMemoryAndHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch import QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch - - lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch", register=True) -except Exception as e: - print("QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch import QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch - - lama_register["QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch"] = QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch" + ).set_name( + "LLAMAQuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch", register=True + ) +except Exception as e: # QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch + print( + "QuantumDifferentialEvolutionWithEnhancedAdaptiveRestartsAndDynamicHybridSearch can not be imported: ", + e, + ) +try: # QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch import ( + QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch"] = ( + QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch print("QuantumDifferentialEvolutionWithEnhancedLearningAndAdaptiveHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts import QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts - - lama_register["QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts"] = QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts").set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts import ( + QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts"] = ( + QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts print("QuantumDifferentialEvolutionWithEnhancedLocalSearchAndAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch import QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch - - lama_register["QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch"] = QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch").set_name("LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch import ( + QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch, + ) + + lama_register["QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch"] = ( + QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch" + ).set_name("LLAMAQuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch print("QuantumDifferentialEvolutionWithLearningAndAdaptiveHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithMultiStrategyLearning import QuantumDifferentialEvolutionWithMultiStrategyLearning - - lama_register["QuantumDifferentialEvolutionWithMultiStrategyLearning"] = QuantumDifferentialEvolutionWithMultiStrategyLearning - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning").set_name("LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning", register=True) -except Exception as e: +try: # QuantumDifferentialEvolutionWithMultiStrategyLearning + from nevergrad.optimization.lama.QuantumDifferentialEvolutionWithMultiStrategyLearning import ( + QuantumDifferentialEvolutionWithMultiStrategyLearning, + ) + + lama_register["QuantumDifferentialEvolutionWithMultiStrategyLearning"] = ( + QuantumDifferentialEvolutionWithMultiStrategyLearning + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning = NonObjectOptimizer( + method="LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning" + ).set_name("LLAMAQuantumDifferentialEvolutionWithMultiStrategyLearning", register=True) +except Exception as e: # QuantumDifferentialEvolutionWithMultiStrategyLearning print("QuantumDifferentialEvolutionWithMultiStrategyLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithAdaptiveRestarts import QuantumDifferentialParticleOptimizerWithAdaptiveRestarts - - lama_register["QuantumDifferentialParticleOptimizerWithAdaptiveRestarts"] = QuantumDifferentialParticleOptimizerWithAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts").set_name("LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts", register=True) -except Exception as e: +try: # QuantumDifferentialParticleOptimizerWithAdaptiveRestarts + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithAdaptiveRestarts import ( + QuantumDifferentialParticleOptimizerWithAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithAdaptiveRestarts"] = ( + QuantumDifferentialParticleOptimizerWithAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithAdaptiveRestarts", register=True) +except Exception as e: # QuantumDifferentialParticleOptimizerWithAdaptiveRestarts print("QuantumDifferentialParticleOptimizerWithAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteGuidedMutation import QuantumDifferentialParticleOptimizerWithEliteGuidedMutation - - lama_register["QuantumDifferentialParticleOptimizerWithEliteGuidedMutation"] = QuantumDifferentialParticleOptimizerWithEliteGuidedMutation - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation", register=True) -except Exception as e: +try: # QuantumDifferentialParticleOptimizerWithEliteGuidedMutation + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteGuidedMutation import ( + QuantumDifferentialParticleOptimizerWithEliteGuidedMutation, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEliteGuidedMutation"] = ( + QuantumDifferentialParticleOptimizerWithEliteGuidedMutation + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteGuidedMutation", register=True) +except Exception as e: # QuantumDifferentialParticleOptimizerWithEliteGuidedMutation print("QuantumDifferentialParticleOptimizerWithEliteGuidedMutation can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteRefinement import QuantumDifferentialParticleOptimizerWithEliteRefinement - - lama_register["QuantumDifferentialParticleOptimizerWithEliteRefinement"] = QuantumDifferentialParticleOptimizerWithEliteRefinement - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement", register=True) -except Exception as e: +try: # QuantumDifferentialParticleOptimizerWithEliteRefinement + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEliteRefinement import ( + QuantumDifferentialParticleOptimizerWithEliteRefinement, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEliteRefinement"] = ( + QuantumDifferentialParticleOptimizerWithEliteRefinement + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEliteRefinement", register=True) +except Exception as e: # QuantumDifferentialParticleOptimizerWithEliteRefinement print("QuantumDifferentialParticleOptimizerWithEliteRefinement can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithElitism import QuantumDifferentialParticleOptimizerWithElitism - - lama_register["QuantumDifferentialParticleOptimizerWithElitism"] = QuantumDifferentialParticleOptimizerWithElitism - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithElitism").set_name("LLAMAQuantumDifferentialParticleOptimizerWithElitism", register=True) -except Exception as e: +try: # QuantumDifferentialParticleOptimizerWithElitism + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithElitism import ( + QuantumDifferentialParticleOptimizerWithElitism, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithElitism"] = ( + QuantumDifferentialParticleOptimizerWithElitism + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithElitism" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithElitism", register=True) +except Exception as e: # QuantumDifferentialParticleOptimizerWithElitism print("QuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts import QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts - - lama_register["QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts"] = QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts").set_name("LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts", register=True) -except Exception as e: +try: # QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts + from nevergrad.optimization.lama.QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts import ( + QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts, + ) + + lama_register["QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts"] = ( + QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts" + ).set_name("LLAMAQuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts", register=True) +except Exception as e: # QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts print("QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDifferentialParticleSwarmRefinement import QuantumDifferentialParticleSwarmRefinement +try: # QuantumDifferentialParticleSwarmRefinement + from nevergrad.optimization.lama.QuantumDifferentialParticleSwarmRefinement import ( + QuantumDifferentialParticleSwarmRefinement, + ) lama_register["QuantumDifferentialParticleSwarmRefinement"] = QuantumDifferentialParticleSwarmRefinement - res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleSwarmRefinement")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDifferentialParticleSwarmRefinement = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleSwarmRefinement").set_name("LLAMAQuantumDifferentialParticleSwarmRefinement", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDifferentialParticleSwarmRefinement")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDifferentialParticleSwarmRefinement = NonObjectOptimizer( + method="LLAMAQuantumDifferentialParticleSwarmRefinement" + ).set_name("LLAMAQuantumDifferentialParticleSwarmRefinement", register=True) +except Exception as e: # QuantumDifferentialParticleSwarmRefinement print("QuantumDifferentialParticleSwarmRefinement can not be imported: ", e) -try: +try: # QuantumDirectionalAcceleratorV19 from nevergrad.optimization.lama.QuantumDirectionalAcceleratorV19 import QuantumDirectionalAcceleratorV19 lama_register["QuantumDirectionalAcceleratorV19"] = QuantumDirectionalAcceleratorV19 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalAcceleratorV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalAcceleratorV19 = NonObjectOptimizer(method="LLAMAQuantumDirectionalAcceleratorV19").set_name("LLAMAQuantumDirectionalAcceleratorV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalAcceleratorV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalAcceleratorV19 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalAcceleratorV19" + ).set_name("LLAMAQuantumDirectionalAcceleratorV19", register=True) +except Exception as e: # QuantumDirectionalAcceleratorV19 print("QuantumDirectionalAcceleratorV19 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancer from nevergrad.optimization.lama.QuantumDirectionalEnhancer import QuantumDirectionalEnhancer lama_register["QuantumDirectionalEnhancer"] = QuantumDirectionalEnhancer - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancer = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer").set_name("LLAMAQuantumDirectionalEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancer = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancer").set_name( + "LLAMAQuantumDirectionalEnhancer", register=True + ) +except Exception as e: # QuantumDirectionalEnhancer print("QuantumDirectionalEnhancer can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV10 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV10 import QuantumDirectionalEnhancerV10 lama_register["QuantumDirectionalEnhancerV10"] = QuantumDirectionalEnhancerV10 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV10 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV10").set_name("LLAMAQuantumDirectionalEnhancerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV10 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV10" + ).set_name("LLAMAQuantumDirectionalEnhancerV10", register=True) +except Exception as e: # QuantumDirectionalEnhancerV10 print("QuantumDirectionalEnhancerV10 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV11 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV11 import QuantumDirectionalEnhancerV11 lama_register["QuantumDirectionalEnhancerV11"] = QuantumDirectionalEnhancerV11 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV11 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV11").set_name("LLAMAQuantumDirectionalEnhancerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV11 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV11" + ).set_name("LLAMAQuantumDirectionalEnhancerV11", register=True) +except Exception as e: # QuantumDirectionalEnhancerV11 print("QuantumDirectionalEnhancerV11 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV12 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV12 import QuantumDirectionalEnhancerV12 lama_register["QuantumDirectionalEnhancerV12"] = QuantumDirectionalEnhancerV12 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV12 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV12").set_name("LLAMAQuantumDirectionalEnhancerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV12 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV12" + ).set_name("LLAMAQuantumDirectionalEnhancerV12", register=True) +except Exception as e: # QuantumDirectionalEnhancerV12 print("QuantumDirectionalEnhancerV12 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV13 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV13 import QuantumDirectionalEnhancerV13 lama_register["QuantumDirectionalEnhancerV13"] = QuantumDirectionalEnhancerV13 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV13 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV13").set_name("LLAMAQuantumDirectionalEnhancerV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV13 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV13" + ).set_name("LLAMAQuantumDirectionalEnhancerV13", register=True) +except Exception as e: # QuantumDirectionalEnhancerV13 print("QuantumDirectionalEnhancerV13 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV14 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV14 import QuantumDirectionalEnhancerV14 lama_register["QuantumDirectionalEnhancerV14"] = QuantumDirectionalEnhancerV14 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV14 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV14").set_name("LLAMAQuantumDirectionalEnhancerV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV14 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV14" + ).set_name("LLAMAQuantumDirectionalEnhancerV14", register=True) +except Exception as e: # QuantumDirectionalEnhancerV14 print("QuantumDirectionalEnhancerV14 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV15 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV15 import QuantumDirectionalEnhancerV15 lama_register["QuantumDirectionalEnhancerV15"] = QuantumDirectionalEnhancerV15 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV15 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV15").set_name("LLAMAQuantumDirectionalEnhancerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV15 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV15" + ).set_name("LLAMAQuantumDirectionalEnhancerV15", register=True) +except Exception as e: # QuantumDirectionalEnhancerV15 print("QuantumDirectionalEnhancerV15 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV16 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV16 import QuantumDirectionalEnhancerV16 lama_register["QuantumDirectionalEnhancerV16"] = QuantumDirectionalEnhancerV16 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV16 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV16").set_name("LLAMAQuantumDirectionalEnhancerV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV16 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV16" + ).set_name("LLAMAQuantumDirectionalEnhancerV16", register=True) +except Exception as e: # QuantumDirectionalEnhancerV16 print("QuantumDirectionalEnhancerV16 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV17 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV17 import QuantumDirectionalEnhancerV17 lama_register["QuantumDirectionalEnhancerV17"] = QuantumDirectionalEnhancerV17 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV17 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV17").set_name("LLAMAQuantumDirectionalEnhancerV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV17 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV17" + ).set_name("LLAMAQuantumDirectionalEnhancerV17", register=True) +except Exception as e: # QuantumDirectionalEnhancerV17 print("QuantumDirectionalEnhancerV17 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV18 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV18 import QuantumDirectionalEnhancerV18 lama_register["QuantumDirectionalEnhancerV18"] = QuantumDirectionalEnhancerV18 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV18 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV18").set_name("LLAMAQuantumDirectionalEnhancerV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV18 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV18" + ).set_name("LLAMAQuantumDirectionalEnhancerV18", register=True) +except Exception as e: # QuantumDirectionalEnhancerV18 print("QuantumDirectionalEnhancerV18 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV2 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV2 import QuantumDirectionalEnhancerV2 lama_register["QuantumDirectionalEnhancerV2"] = QuantumDirectionalEnhancerV2 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV2 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV2").set_name("LLAMAQuantumDirectionalEnhancerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV2 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV2" + ).set_name("LLAMAQuantumDirectionalEnhancerV2", register=True) +except Exception as e: # QuantumDirectionalEnhancerV2 print("QuantumDirectionalEnhancerV2 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV3 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV3 import QuantumDirectionalEnhancerV3 lama_register["QuantumDirectionalEnhancerV3"] = QuantumDirectionalEnhancerV3 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV3 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV3").set_name("LLAMAQuantumDirectionalEnhancerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV3 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV3" + ).set_name("LLAMAQuantumDirectionalEnhancerV3", register=True) +except Exception as e: # QuantumDirectionalEnhancerV3 print("QuantumDirectionalEnhancerV3 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV4 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV4 import QuantumDirectionalEnhancerV4 lama_register["QuantumDirectionalEnhancerV4"] = QuantumDirectionalEnhancerV4 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV4 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV4").set_name("LLAMAQuantumDirectionalEnhancerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV4 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV4" + ).set_name("LLAMAQuantumDirectionalEnhancerV4", register=True) +except Exception as e: # QuantumDirectionalEnhancerV4 print("QuantumDirectionalEnhancerV4 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV5 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV5 import QuantumDirectionalEnhancerV5 lama_register["QuantumDirectionalEnhancerV5"] = QuantumDirectionalEnhancerV5 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV5 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV5").set_name("LLAMAQuantumDirectionalEnhancerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV5 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV5" + ).set_name("LLAMAQuantumDirectionalEnhancerV5", register=True) +except Exception as e: # QuantumDirectionalEnhancerV5 print("QuantumDirectionalEnhancerV5 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV6 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV6 import QuantumDirectionalEnhancerV6 lama_register["QuantumDirectionalEnhancerV6"] = QuantumDirectionalEnhancerV6 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV6 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV6").set_name("LLAMAQuantumDirectionalEnhancerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV6 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV6" + ).set_name("LLAMAQuantumDirectionalEnhancerV6", register=True) +except Exception as e: # QuantumDirectionalEnhancerV6 print("QuantumDirectionalEnhancerV6 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV7 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV7 import QuantumDirectionalEnhancerV7 lama_register["QuantumDirectionalEnhancerV7"] = QuantumDirectionalEnhancerV7 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV7 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV7").set_name("LLAMAQuantumDirectionalEnhancerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV7 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV7" + ).set_name("LLAMAQuantumDirectionalEnhancerV7", register=True) +except Exception as e: # QuantumDirectionalEnhancerV7 print("QuantumDirectionalEnhancerV7 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV8 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV8 import QuantumDirectionalEnhancerV8 lama_register["QuantumDirectionalEnhancerV8"] = QuantumDirectionalEnhancerV8 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV8 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV8").set_name("LLAMAQuantumDirectionalEnhancerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV8 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV8" + ).set_name("LLAMAQuantumDirectionalEnhancerV8", register=True) +except Exception as e: # QuantumDirectionalEnhancerV8 print("QuantumDirectionalEnhancerV8 can not be imported: ", e) -try: +try: # QuantumDirectionalEnhancerV9 from nevergrad.optimization.lama.QuantumDirectionalEnhancerV9 import QuantumDirectionalEnhancerV9 lama_register["QuantumDirectionalEnhancerV9"] = QuantumDirectionalEnhancerV9 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalEnhancerV9 = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV9").set_name("LLAMAQuantumDirectionalEnhancerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalEnhancerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalEnhancerV9 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalEnhancerV9" + ).set_name("LLAMAQuantumDirectionalEnhancerV9", register=True) +except Exception as e: # QuantumDirectionalEnhancerV9 print("QuantumDirectionalEnhancerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizer import QuantumDirectionalFusionOptimizer +try: # QuantumDirectionalFusionOptimizer + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizer import ( + QuantumDirectionalFusionOptimizer, + ) lama_register["QuantumDirectionalFusionOptimizer"] = QuantumDirectionalFusionOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalFusionOptimizer = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizer").set_name("LLAMAQuantumDirectionalFusionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalFusionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumDirectionalFusionOptimizer" + ).set_name("LLAMAQuantumDirectionalFusionOptimizer", register=True) +except Exception as e: # QuantumDirectionalFusionOptimizer print("QuantumDirectionalFusionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizerV2 import QuantumDirectionalFusionOptimizerV2 +try: # QuantumDirectionalFusionOptimizerV2 + from nevergrad.optimization.lama.QuantumDirectionalFusionOptimizerV2 import ( + QuantumDirectionalFusionOptimizerV2, + ) lama_register["QuantumDirectionalFusionOptimizerV2"] = QuantumDirectionalFusionOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalFusionOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizerV2").set_name("LLAMAQuantumDirectionalFusionOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalFusionOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalFusionOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalFusionOptimizerV2" + ).set_name("LLAMAQuantumDirectionalFusionOptimizerV2", register=True) +except Exception as e: # QuantumDirectionalFusionOptimizerV2 print("QuantumDirectionalFusionOptimizerV2 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV20 from nevergrad.optimization.lama.QuantumDirectionalRefinerV20 import QuantumDirectionalRefinerV20 lama_register["QuantumDirectionalRefinerV20"] = QuantumDirectionalRefinerV20 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV20 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV20").set_name("LLAMAQuantumDirectionalRefinerV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV20 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV20" + ).set_name("LLAMAQuantumDirectionalRefinerV20", register=True) +except Exception as e: # QuantumDirectionalRefinerV20 print("QuantumDirectionalRefinerV20 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV21 from nevergrad.optimization.lama.QuantumDirectionalRefinerV21 import QuantumDirectionalRefinerV21 lama_register["QuantumDirectionalRefinerV21"] = QuantumDirectionalRefinerV21 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV21 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV21").set_name("LLAMAQuantumDirectionalRefinerV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV21 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV21" + ).set_name("LLAMAQuantumDirectionalRefinerV21", register=True) +except Exception as e: # QuantumDirectionalRefinerV21 print("QuantumDirectionalRefinerV21 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV22 from nevergrad.optimization.lama.QuantumDirectionalRefinerV22 import QuantumDirectionalRefinerV22 lama_register["QuantumDirectionalRefinerV22"] = QuantumDirectionalRefinerV22 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV22 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV22").set_name("LLAMAQuantumDirectionalRefinerV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV22 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV22" + ).set_name("LLAMAQuantumDirectionalRefinerV22", register=True) +except Exception as e: # QuantumDirectionalRefinerV22 print("QuantumDirectionalRefinerV22 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV23 from nevergrad.optimization.lama.QuantumDirectionalRefinerV23 import QuantumDirectionalRefinerV23 lama_register["QuantumDirectionalRefinerV23"] = QuantumDirectionalRefinerV23 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV23 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV23").set_name("LLAMAQuantumDirectionalRefinerV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV23 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV23" + ).set_name("LLAMAQuantumDirectionalRefinerV23", register=True) +except Exception as e: # QuantumDirectionalRefinerV23 print("QuantumDirectionalRefinerV23 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV24 from nevergrad.optimization.lama.QuantumDirectionalRefinerV24 import QuantumDirectionalRefinerV24 lama_register["QuantumDirectionalRefinerV24"] = QuantumDirectionalRefinerV24 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV24 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV24").set_name("LLAMAQuantumDirectionalRefinerV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV24 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV24" + ).set_name("LLAMAQuantumDirectionalRefinerV24", register=True) +except Exception as e: # QuantumDirectionalRefinerV24 print("QuantumDirectionalRefinerV24 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV25 from nevergrad.optimization.lama.QuantumDirectionalRefinerV25 import QuantumDirectionalRefinerV25 lama_register["QuantumDirectionalRefinerV25"] = QuantumDirectionalRefinerV25 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV25 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV25").set_name("LLAMAQuantumDirectionalRefinerV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV25 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV25" + ).set_name("LLAMAQuantumDirectionalRefinerV25", register=True) +except Exception as e: # QuantumDirectionalRefinerV25 print("QuantumDirectionalRefinerV25 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV26 from nevergrad.optimization.lama.QuantumDirectionalRefinerV26 import QuantumDirectionalRefinerV26 lama_register["QuantumDirectionalRefinerV26"] = QuantumDirectionalRefinerV26 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV26 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV26").set_name("LLAMAQuantumDirectionalRefinerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV26 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV26" + ).set_name("LLAMAQuantumDirectionalRefinerV26", register=True) +except Exception as e: # QuantumDirectionalRefinerV26 print("QuantumDirectionalRefinerV26 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV27 from nevergrad.optimization.lama.QuantumDirectionalRefinerV27 import QuantumDirectionalRefinerV27 lama_register["QuantumDirectionalRefinerV27"] = QuantumDirectionalRefinerV27 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV27 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV27").set_name("LLAMAQuantumDirectionalRefinerV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV27 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV27" + ).set_name("LLAMAQuantumDirectionalRefinerV27", register=True) +except Exception as e: # QuantumDirectionalRefinerV27 print("QuantumDirectionalRefinerV27 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV28 from nevergrad.optimization.lama.QuantumDirectionalRefinerV28 import QuantumDirectionalRefinerV28 lama_register["QuantumDirectionalRefinerV28"] = QuantumDirectionalRefinerV28 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV28 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV28").set_name("LLAMAQuantumDirectionalRefinerV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV28 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV28" + ).set_name("LLAMAQuantumDirectionalRefinerV28", register=True) +except Exception as e: # QuantumDirectionalRefinerV28 print("QuantumDirectionalRefinerV28 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV29 from nevergrad.optimization.lama.QuantumDirectionalRefinerV29 import QuantumDirectionalRefinerV29 lama_register["QuantumDirectionalRefinerV29"] = QuantumDirectionalRefinerV29 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV29 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV29").set_name("LLAMAQuantumDirectionalRefinerV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV29 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV29" + ).set_name("LLAMAQuantumDirectionalRefinerV29", register=True) +except Exception as e: # QuantumDirectionalRefinerV29 print("QuantumDirectionalRefinerV29 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV30 from nevergrad.optimization.lama.QuantumDirectionalRefinerV30 import QuantumDirectionalRefinerV30 lama_register["QuantumDirectionalRefinerV30"] = QuantumDirectionalRefinerV30 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV30 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV30").set_name("LLAMAQuantumDirectionalRefinerV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV30 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV30" + ).set_name("LLAMAQuantumDirectionalRefinerV30", register=True) +except Exception as e: # QuantumDirectionalRefinerV30 print("QuantumDirectionalRefinerV30 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV31 from nevergrad.optimization.lama.QuantumDirectionalRefinerV31 import QuantumDirectionalRefinerV31 lama_register["QuantumDirectionalRefinerV31"] = QuantumDirectionalRefinerV31 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV31 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV31").set_name("LLAMAQuantumDirectionalRefinerV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV31 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV31" + ).set_name("LLAMAQuantumDirectionalRefinerV31", register=True) +except Exception as e: # QuantumDirectionalRefinerV31 print("QuantumDirectionalRefinerV31 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV32 from nevergrad.optimization.lama.QuantumDirectionalRefinerV32 import QuantumDirectionalRefinerV32 lama_register["QuantumDirectionalRefinerV32"] = QuantumDirectionalRefinerV32 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV32 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV32").set_name("LLAMAQuantumDirectionalRefinerV32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV32 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV32" + ).set_name("LLAMAQuantumDirectionalRefinerV32", register=True) +except Exception as e: # QuantumDirectionalRefinerV32 print("QuantumDirectionalRefinerV32 can not be imported: ", e) -try: +try: # QuantumDirectionalRefinerV33 from nevergrad.optimization.lama.QuantumDirectionalRefinerV33 import QuantumDirectionalRefinerV33 lama_register["QuantumDirectionalRefinerV33"] = QuantumDirectionalRefinerV33 - res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDirectionalRefinerV33 = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV33").set_name("LLAMAQuantumDirectionalRefinerV33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDirectionalRefinerV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDirectionalRefinerV33 = NonObjectOptimizer( + method="LLAMAQuantumDirectionalRefinerV33" + ).set_name("LLAMAQuantumDirectionalRefinerV33", register=True) +except Exception as e: # QuantumDirectionalRefinerV33 print("QuantumDirectionalRefinerV33 can not be imported: ", e) -try: +try: # QuantumDualStrategyAdaptiveDE from nevergrad.optimization.lama.QuantumDualStrategyAdaptiveDE import QuantumDualStrategyAdaptiveDE lama_register["QuantumDualStrategyAdaptiveDE"] = QuantumDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMAQuantumDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumDualStrategyAdaptiveDE").set_name("LLAMAQuantumDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumDualStrategyAdaptiveDE" + ).set_name("LLAMAQuantumDualStrategyAdaptiveDE", register=True) +except Exception as e: # QuantumDualStrategyAdaptiveDE print("QuantumDualStrategyAdaptiveDE can not be imported: ", e) -try: +try: # QuantumDynamicAdaptationStrategy from nevergrad.optimization.lama.QuantumDynamicAdaptationStrategy import QuantumDynamicAdaptationStrategy lama_register["QuantumDynamicAdaptationStrategy"] = QuantumDynamicAdaptationStrategy - res = NonObjectOptimizer(method="LLAMAQuantumDynamicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicAdaptationStrategy = NonObjectOptimizer(method="LLAMAQuantumDynamicAdaptationStrategy").set_name("LLAMAQuantumDynamicAdaptationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAQuantumDynamicAdaptationStrategy" + ).set_name("LLAMAQuantumDynamicAdaptationStrategy", register=True) +except Exception as e: # QuantumDynamicAdaptationStrategy print("QuantumDynamicAdaptationStrategy can not be imported: ", e) -try: +try: # QuantumDynamicBalanceOptimizer from nevergrad.optimization.lama.QuantumDynamicBalanceOptimizer import QuantumDynamicBalanceOptimizer lama_register["QuantumDynamicBalanceOptimizer"] = QuantumDynamicBalanceOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAQuantumDynamicBalanceOptimizer").set_name("LLAMAQuantumDynamicBalanceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumDynamicBalanceOptimizer" + ).set_name("LLAMAQuantumDynamicBalanceOptimizer", register=True) +except Exception as e: # QuantumDynamicBalanceOptimizer print("QuantumDynamicBalanceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDynamicBalancedOptimizerV7 import QuantumDynamicBalancedOptimizerV7 +try: # QuantumDynamicBalancedOptimizerV7 + from nevergrad.optimization.lama.QuantumDynamicBalancedOptimizerV7 import ( + QuantumDynamicBalancedOptimizerV7, + ) lama_register["QuantumDynamicBalancedOptimizerV7"] = QuantumDynamicBalancedOptimizerV7 - res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalancedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicBalancedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumDynamicBalancedOptimizerV7").set_name("LLAMAQuantumDynamicBalancedOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicBalancedOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicBalancedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumDynamicBalancedOptimizerV7" + ).set_name("LLAMAQuantumDynamicBalancedOptimizerV7", register=True) +except Exception as e: # QuantumDynamicBalancedOptimizerV7 print("QuantumDynamicBalancedOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDynamicExplorationOptimizerV6 import QuantumDynamicExplorationOptimizerV6 +try: # QuantumDynamicExplorationOptimizerV6 + from nevergrad.optimization.lama.QuantumDynamicExplorationOptimizerV6 import ( + QuantumDynamicExplorationOptimizerV6, + ) lama_register["QuantumDynamicExplorationOptimizerV6"] = QuantumDynamicExplorationOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumDynamicExplorationOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicExplorationOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumDynamicExplorationOptimizerV6").set_name("LLAMAQuantumDynamicExplorationOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicExplorationOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicExplorationOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumDynamicExplorationOptimizerV6" + ).set_name("LLAMAQuantumDynamicExplorationOptimizerV6", register=True) +except Exception as e: # QuantumDynamicExplorationOptimizerV6 print("QuantumDynamicExplorationOptimizerV6 can not be imported: ", e) -try: +try: # QuantumDynamicGradientClimberV2 from nevergrad.optimization.lama.QuantumDynamicGradientClimberV2 import QuantumDynamicGradientClimberV2 lama_register["QuantumDynamicGradientClimberV2"] = QuantumDynamicGradientClimberV2 - res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicGradientClimberV2 = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV2").set_name("LLAMAQuantumDynamicGradientClimberV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicGradientClimberV2 = NonObjectOptimizer( + method="LLAMAQuantumDynamicGradientClimberV2" + ).set_name("LLAMAQuantumDynamicGradientClimberV2", register=True) +except Exception as e: # QuantumDynamicGradientClimberV2 print("QuantumDynamicGradientClimberV2 can not be imported: ", e) -try: +try: # QuantumDynamicGradientClimberV3 from nevergrad.optimization.lama.QuantumDynamicGradientClimberV3 import QuantumDynamicGradientClimberV3 lama_register["QuantumDynamicGradientClimberV3"] = QuantumDynamicGradientClimberV3 - res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicGradientClimberV3 = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV3").set_name("LLAMAQuantumDynamicGradientClimberV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicGradientClimberV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicGradientClimberV3 = NonObjectOptimizer( + method="LLAMAQuantumDynamicGradientClimberV3" + ).set_name("LLAMAQuantumDynamicGradientClimberV3", register=True) +except Exception as e: # QuantumDynamicGradientClimberV3 print("QuantumDynamicGradientClimberV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumDynamicallyAdaptiveFireworksAlgorithm import QuantumDynamicallyAdaptiveFireworksAlgorithm - - lama_register["QuantumDynamicallyAdaptiveFireworksAlgorithm"] = QuantumDynamicallyAdaptiveFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm").set_name("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm", register=True) -except Exception as e: +try: # QuantumDynamicallyAdaptiveFireworksAlgorithm + from nevergrad.optimization.lama.QuantumDynamicallyAdaptiveFireworksAlgorithm import ( + QuantumDynamicallyAdaptiveFireworksAlgorithm, + ) + + lama_register["QuantumDynamicallyAdaptiveFireworksAlgorithm"] = ( + QuantumDynamicallyAdaptiveFireworksAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm" + ).set_name("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm", register=True) +except Exception as e: # QuantumDynamicallyAdaptiveFireworksAlgorithm print("QuantumDynamicallyAdaptiveFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEliteMemeticAdaptiveSearch import QuantumEliteMemeticAdaptiveSearch +try: # QuantumEliteMemeticAdaptiveSearch + from nevergrad.optimization.lama.QuantumEliteMemeticAdaptiveSearch import ( + QuantumEliteMemeticAdaptiveSearch, + ) lama_register["QuantumEliteMemeticAdaptiveSearch"] = QuantumEliteMemeticAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAQuantumEliteMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEliteMemeticAdaptiveSearch = NonObjectOptimizer(method="LLAMAQuantumEliteMemeticAdaptiveSearch").set_name("LLAMAQuantumEliteMemeticAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEliteMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEliteMemeticAdaptiveSearch = NonObjectOptimizer( + method="LLAMAQuantumEliteMemeticAdaptiveSearch" + ).set_name("LLAMAQuantumEliteMemeticAdaptiveSearch", register=True) +except Exception as e: # QuantumEliteMemeticAdaptiveSearch print("QuantumEliteMemeticAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v4 import QuantumEnhancedAdaptiveDifferentialEvolution_v4 - - lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v4"] = QuantumEnhancedAdaptiveDifferentialEvolution_v4 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4").set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4", register=True) -except Exception as e: +try: # QuantumEnhancedAdaptiveDifferentialEvolution_v4 + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v4 import ( + QuantumEnhancedAdaptiveDifferentialEvolution_v4, + ) + + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v4"] = ( + QuantumEnhancedAdaptiveDifferentialEvolution_v4 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4" + ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v4", register=True) +except Exception as e: # QuantumEnhancedAdaptiveDifferentialEvolution_v4 print("QuantumEnhancedAdaptiveDifferentialEvolution_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v5 import QuantumEnhancedAdaptiveDifferentialEvolution_v5 - - lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v5"] = QuantumEnhancedAdaptiveDifferentialEvolution_v5 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5").set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5", register=True) -except Exception as e: +try: # QuantumEnhancedAdaptiveDifferentialEvolution_v5 + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDifferentialEvolution_v5 import ( + QuantumEnhancedAdaptiveDifferentialEvolution_v5, + ) + + lama_register["QuantumEnhancedAdaptiveDifferentialEvolution_v5"] = ( + QuantumEnhancedAdaptiveDifferentialEvolution_v5 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5" + ).set_name("LLAMAQuantumEnhancedAdaptiveDifferentialEvolution_v5", register=True) +except Exception as e: # QuantumEnhancedAdaptiveDifferentialEvolution_v5 print("QuantumEnhancedAdaptiveDifferentialEvolution_v5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDiversityStrategyV6 import QuantumEnhancedAdaptiveDiversityStrategyV6 +try: # QuantumEnhancedAdaptiveDiversityStrategyV6 + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDiversityStrategyV6 import ( + QuantumEnhancedAdaptiveDiversityStrategyV6, + ) lama_register["QuantumEnhancedAdaptiveDiversityStrategyV6"] = QuantumEnhancedAdaptiveDiversityStrategyV6 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6").set_name("LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6" + ).set_name("LLAMAQuantumEnhancedAdaptiveDiversityStrategyV6", register=True) +except Exception as e: # QuantumEnhancedAdaptiveDiversityStrategyV6 print("QuantumEnhancedAdaptiveDiversityStrategyV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDualStrategyDE import QuantumEnhancedAdaptiveDualStrategyDE +try: # QuantumEnhancedAdaptiveDualStrategyDE + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveDualStrategyDE import ( + QuantumEnhancedAdaptiveDualStrategyDE, + ) lama_register["QuantumEnhancedAdaptiveDualStrategyDE"] = QuantumEnhancedAdaptiveDualStrategyDE - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveDualStrategyDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE").set_name("LLAMAQuantumEnhancedAdaptiveDualStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveDualStrategyDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveDualStrategyDE" + ).set_name("LLAMAQuantumEnhancedAdaptiveDualStrategyDE", register=True) +except Exception as e: # QuantumEnhancedAdaptiveDualStrategyDE print("QuantumEnhancedAdaptiveDualStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveExplorationOptimization import QuantumEnhancedAdaptiveExplorationOptimization - - lama_register["QuantumEnhancedAdaptiveExplorationOptimization"] = QuantumEnhancedAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization").set_name("LLAMAQuantumEnhancedAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # QuantumEnhancedAdaptiveExplorationOptimization + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveExplorationOptimization import ( + QuantumEnhancedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumEnhancedAdaptiveExplorationOptimization"] = ( + QuantumEnhancedAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumEnhancedAdaptiveExplorationOptimization", register=True) +except Exception as e: # QuantumEnhancedAdaptiveExplorationOptimization print("QuantumEnhancedAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE import QuantumEnhancedAdaptiveMultiPhaseDE +try: # QuantumEnhancedAdaptiveMultiPhaseDE + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE import ( + QuantumEnhancedAdaptiveMultiPhaseDE, + ) lama_register["QuantumEnhancedAdaptiveMultiPhaseDE"] = QuantumEnhancedAdaptiveMultiPhaseDE - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE").set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE" + ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE", register=True) +except Exception as e: # QuantumEnhancedAdaptiveMultiPhaseDE print("QuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE_v7 import QuantumEnhancedAdaptiveMultiPhaseDE_v7 +try: # QuantumEnhancedAdaptiveMultiPhaseDE_v7 + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveMultiPhaseDE_v7 import ( + QuantumEnhancedAdaptiveMultiPhaseDE_v7, + ) lama_register["QuantumEnhancedAdaptiveMultiPhaseDE_v7"] = QuantumEnhancedAdaptiveMultiPhaseDE_v7 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7 = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7").set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7" + ).set_name("LLAMAQuantumEnhancedAdaptiveMultiPhaseDE_v7", register=True) +except Exception as e: # QuantumEnhancedAdaptiveMultiPhaseDE_v7 print("QuantumEnhancedAdaptiveMultiPhaseDE_v7 can not be imported: ", e) -try: +try: # QuantumEnhancedAdaptiveOptimizer from nevergrad.optimization.lama.QuantumEnhancedAdaptiveOptimizer import QuantumEnhancedAdaptiveOptimizer lama_register["QuantumEnhancedAdaptiveOptimizer"] = QuantumEnhancedAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveOptimizer").set_name("LLAMAQuantumEnhancedAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveOptimizer" + ).set_name("LLAMAQuantumEnhancedAdaptiveOptimizer", register=True) +except Exception as e: # QuantumEnhancedAdaptiveOptimizer print("QuantumEnhancedAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedAdaptiveSwarmOptimization import QuantumEnhancedAdaptiveSwarmOptimization +try: # QuantumEnhancedAdaptiveSwarmOptimization + from nevergrad.optimization.lama.QuantumEnhancedAdaptiveSwarmOptimization import ( + QuantumEnhancedAdaptiveSwarmOptimization, + ) lama_register["QuantumEnhancedAdaptiveSwarmOptimization"] = QuantumEnhancedAdaptiveSwarmOptimization - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedAdaptiveSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization").set_name("LLAMAQuantumEnhancedAdaptiveSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedAdaptiveSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedAdaptiveSwarmOptimization" + ).set_name("LLAMAQuantumEnhancedAdaptiveSwarmOptimization", register=True) +except Exception as e: # QuantumEnhancedAdaptiveSwarmOptimization print("QuantumEnhancedAdaptiveSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolution import QuantumEnhancedDifferentialEvolution +try: # QuantumEnhancedDifferentialEvolution + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolution import ( + QuantumEnhancedDifferentialEvolution, + ) lama_register["QuantumEnhancedDifferentialEvolution"] = QuantumEnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolution").set_name("LLAMAQuantumEnhancedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDifferentialEvolution" + ).set_name("LLAMAQuantumEnhancedDifferentialEvolution", register=True) +except Exception as e: # QuantumEnhancedDifferentialEvolution print("QuantumEnhancedDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart import QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart - - lama_register["QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart"] = QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart").set_name("LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart", register=True) -except Exception as e: +try: # QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart + from nevergrad.optimization.lama.QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart import ( + QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart, + ) + + lama_register["QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart"] = ( + QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart" + ).set_name("LLAMAQuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart", register=True) +except Exception as e: # QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart print("QuantumEnhancedDifferentialEvolutionWithAdaptiveElitismAndDynamicRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDiversityExplorerV8 import QuantumEnhancedDiversityExplorerV8 +try: # QuantumEnhancedDiversityExplorerV8 + from nevergrad.optimization.lama.QuantumEnhancedDiversityExplorerV8 import ( + QuantumEnhancedDiversityExplorerV8, + ) lama_register["QuantumEnhancedDiversityExplorerV8"] = QuantumEnhancedDiversityExplorerV8 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDiversityExplorerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDiversityExplorerV8 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDiversityExplorerV8").set_name("LLAMAQuantumEnhancedDiversityExplorerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDiversityExplorerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDiversityExplorerV8 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDiversityExplorerV8" + ).set_name("LLAMAQuantumEnhancedDiversityExplorerV8", register=True) +except Exception as e: # QuantumEnhancedDiversityExplorerV8 print("QuantumEnhancedDiversityExplorerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO import QuantumEnhancedDynamicAdaptiveHybridDEPSO +try: # QuantumEnhancedDynamicAdaptiveHybridDEPSO + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO, + ) lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: # QuantumEnhancedDynamicAdaptiveHybridDEPSO print("QuantumEnhancedDynamicAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 - - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V2", register=True) +except Exception as e: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 - - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V3", register=True) +except Exception as e: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 - - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V4", register=True) +except Exception as e: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 import QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 - - lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5"] = QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5").set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 + from nevergrad.optimization.lama.QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 import ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5, + ) + + lama_register["QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5"] = ( + QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5" + ).set_name("LLAMAQuantumEnhancedDynamicAdaptiveHybridDEPSO_V5", register=True) +except Exception as e: # QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 print("QuantumEnhancedDynamicAdaptiveHybridDEPSO_V5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution import QuantumEnhancedDynamicDifferentialEvolution +try: # QuantumEnhancedDynamicDifferentialEvolution + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution import ( + QuantumEnhancedDynamicDifferentialEvolution, + ) lama_register["QuantumEnhancedDynamicDifferentialEvolution"] = QuantumEnhancedDynamicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution", register=True) +except Exception as e: # QuantumEnhancedDynamicDifferentialEvolution print("QuantumEnhancedDynamicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v2 import QuantumEnhancedDynamicDifferentialEvolution_v2 - - lama_register["QuantumEnhancedDynamicDifferentialEvolution_v2"] = QuantumEnhancedDynamicDifferentialEvolution_v2 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicDifferentialEvolution_v2 + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v2 import ( + QuantumEnhancedDynamicDifferentialEvolution_v2, + ) + + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v2"] = ( + QuantumEnhancedDynamicDifferentialEvolution_v2 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v2", register=True) +except Exception as e: # QuantumEnhancedDynamicDifferentialEvolution_v2 print("QuantumEnhancedDynamicDifferentialEvolution_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v3 import QuantumEnhancedDynamicDifferentialEvolution_v3 - - lama_register["QuantumEnhancedDynamicDifferentialEvolution_v3"] = QuantumEnhancedDynamicDifferentialEvolution_v3 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3").set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3", register=True) -except Exception as e: +try: # QuantumEnhancedDynamicDifferentialEvolution_v3 + from nevergrad.optimization.lama.QuantumEnhancedDynamicDifferentialEvolution_v3 import ( + QuantumEnhancedDynamicDifferentialEvolution_v3, + ) + + lama_register["QuantumEnhancedDynamicDifferentialEvolution_v3"] = ( + QuantumEnhancedDynamicDifferentialEvolution_v3 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3" + ).set_name("LLAMAQuantumEnhancedDynamicDifferentialEvolution_v3", register=True) +except Exception as e: # QuantumEnhancedDynamicDifferentialEvolution_v3 print("QuantumEnhancedDynamicDifferentialEvolution_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicHybridSearchV9 import QuantumEnhancedDynamicHybridSearchV9 +try: # QuantumEnhancedDynamicHybridSearchV9 + from nevergrad.optimization.lama.QuantumEnhancedDynamicHybridSearchV9 import ( + QuantumEnhancedDynamicHybridSearchV9, + ) lama_register["QuantumEnhancedDynamicHybridSearchV9"] = QuantumEnhancedDynamicHybridSearchV9 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicHybridSearchV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicHybridSearchV9 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicHybridSearchV9").set_name("LLAMAQuantumEnhancedDynamicHybridSearchV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicHybridSearchV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicHybridSearchV9 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicHybridSearchV9" + ).set_name("LLAMAQuantumEnhancedDynamicHybridSearchV9", register=True) +except Exception as e: # QuantumEnhancedDynamicHybridSearchV9 print("QuantumEnhancedDynamicHybridSearchV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE import QuantumEnhancedDynamicMultiStrategyDE +try: # QuantumEnhancedDynamicMultiStrategyDE + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE import ( + QuantumEnhancedDynamicMultiStrategyDE, + ) lama_register["QuantumEnhancedDynamicMultiStrategyDE"] = QuantumEnhancedDynamicMultiStrategyDE - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicMultiStrategyDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE").set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicMultiStrategyDE = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicMultiStrategyDE" + ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE", register=True) +except Exception as e: # QuantumEnhancedDynamicMultiStrategyDE print("QuantumEnhancedDynamicMultiStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE_v2 import QuantumEnhancedDynamicMultiStrategyDE_v2 +try: # QuantumEnhancedDynamicMultiStrategyDE_v2 + from nevergrad.optimization.lama.QuantumEnhancedDynamicMultiStrategyDE_v2 import ( + QuantumEnhancedDynamicMultiStrategyDE_v2, + ) lama_register["QuantumEnhancedDynamicMultiStrategyDE_v2"] = QuantumEnhancedDynamicMultiStrategyDE_v2 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2").set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2" + ).set_name("LLAMAQuantumEnhancedDynamicMultiStrategyDE_v2", register=True) +except Exception as e: # QuantumEnhancedDynamicMultiStrategyDE_v2 print("QuantumEnhancedDynamicMultiStrategyDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedGlobalTacticalOptimizer import QuantumEnhancedGlobalTacticalOptimizer +try: # QuantumEnhancedGlobalTacticalOptimizer + from nevergrad.optimization.lama.QuantumEnhancedGlobalTacticalOptimizer import ( + QuantumEnhancedGlobalTacticalOptimizer, + ) lama_register["QuantumEnhancedGlobalTacticalOptimizer"] = QuantumEnhancedGlobalTacticalOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGlobalTacticalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedGlobalTacticalOptimizer = NonObjectOptimizer(method="LLAMAQuantumEnhancedGlobalTacticalOptimizer").set_name("LLAMAQuantumEnhancedGlobalTacticalOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGlobalTacticalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedGlobalTacticalOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEnhancedGlobalTacticalOptimizer" + ).set_name("LLAMAQuantumEnhancedGlobalTacticalOptimizer", register=True) +except Exception as e: # QuantumEnhancedGlobalTacticalOptimizer print("QuantumEnhancedGlobalTacticalOptimizer can not be imported: ", e) -try: +try: # QuantumEnhancedGradientClimber from nevergrad.optimization.lama.QuantumEnhancedGradientClimber import QuantumEnhancedGradientClimber lama_register["QuantumEnhancedGradientClimber"] = QuantumEnhancedGradientClimber - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGradientClimber")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedGradientClimber = NonObjectOptimizer(method="LLAMAQuantumEnhancedGradientClimber").set_name("LLAMAQuantumEnhancedGradientClimber", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedGradientClimber")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedGradientClimber = NonObjectOptimizer( + method="LLAMAQuantumEnhancedGradientClimber" + ).set_name("LLAMAQuantumEnhancedGradientClimber", register=True) +except Exception as e: # QuantumEnhancedGradientClimber print("QuantumEnhancedGradientClimber can not be imported: ", e) -try: +try: # QuantumEnhancedHybridDEPSO from nevergrad.optimization.lama.QuantumEnhancedHybridDEPSO import QuantumEnhancedHybridDEPSO lama_register["QuantumEnhancedHybridDEPSO"] = QuantumEnhancedHybridDEPSO - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO").set_name("LLAMAQuantumEnhancedHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumEnhancedHybridDEPSO").set_name( + "LLAMAQuantumEnhancedHybridDEPSO", register=True + ) +except Exception as e: # QuantumEnhancedHybridDEPSO print("QuantumEnhancedHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedMemeticAdaptiveSearch import QuantumEnhancedMemeticAdaptiveSearch +try: # QuantumEnhancedMemeticAdaptiveSearch + from nevergrad.optimization.lama.QuantumEnhancedMemeticAdaptiveSearch import ( + QuantumEnhancedMemeticAdaptiveSearch, + ) lama_register["QuantumEnhancedMemeticAdaptiveSearch"] = QuantumEnhancedMemeticAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMemeticAdaptiveSearch = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticAdaptiveSearch").set_name("LLAMAQuantumEnhancedMemeticAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMemeticAdaptiveSearch = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMemeticAdaptiveSearch" + ).set_name("LLAMAQuantumEnhancedMemeticAdaptiveSearch", register=True) +except Exception as e: # QuantumEnhancedMemeticAdaptiveSearch print("QuantumEnhancedMemeticAdaptiveSearch can not be imported: ", e) -try: +try: # QuantumEnhancedMemeticSearch from nevergrad.optimization.lama.QuantumEnhancedMemeticSearch import QuantumEnhancedMemeticSearch lama_register["QuantumEnhancedMemeticSearch"] = QuantumEnhancedMemeticSearch - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticSearch").set_name("LLAMAQuantumEnhancedMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMemeticSearch" + ).set_name("LLAMAQuantumEnhancedMemeticSearch", register=True) +except Exception as e: # QuantumEnhancedMemeticSearch print("QuantumEnhancedMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v8 import QuantumEnhancedMultiPhaseAdaptiveDE_v8 +try: # QuantumEnhancedMultiPhaseAdaptiveDE_v8 + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v8 import ( + QuantumEnhancedMultiPhaseAdaptiveDE_v8, + ) lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v8"] = QuantumEnhancedMultiPhaseAdaptiveDE_v8 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8").set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8" + ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v8", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseAdaptiveDE_v8 print("QuantumEnhancedMultiPhaseAdaptiveDE_v8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v9 import QuantumEnhancedMultiPhaseAdaptiveDE_v9 +try: # QuantumEnhancedMultiPhaseAdaptiveDE_v9 + from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseAdaptiveDE_v9 import ( + QuantumEnhancedMultiPhaseAdaptiveDE_v9, + ) lama_register["QuantumEnhancedMultiPhaseAdaptiveDE_v9"] = QuantumEnhancedMultiPhaseAdaptiveDE_v9 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9").set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9" + ).set_name("LLAMAQuantumEnhancedMultiPhaseAdaptiveDE_v9", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseAdaptiveDE_v9 print("QuantumEnhancedMultiPhaseAdaptiveDE_v9 can not be imported: ", e) -try: +try: # QuantumEnhancedMultiPhaseDE from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE import QuantumEnhancedMultiPhaseDE lama_register["QuantumEnhancedMultiPhaseDE"] = QuantumEnhancedMultiPhaseDE - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE").set_name("LLAMAQuantumEnhancedMultiPhaseDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseDE = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE").set_name( + "LLAMAQuantumEnhancedMultiPhaseDE", register=True + ) +except Exception as e: # QuantumEnhancedMultiPhaseDE print("QuantumEnhancedMultiPhaseDE can not be imported: ", e) -try: +try: # QuantumEnhancedMultiPhaseDE_v2 from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v2 import QuantumEnhancedMultiPhaseDE_v2 lama_register["QuantumEnhancedMultiPhaseDE_v2"] = QuantumEnhancedMultiPhaseDE_v2 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseDE_v2 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v2").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v2" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v2", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseDE_v2 print("QuantumEnhancedMultiPhaseDE_v2 can not be imported: ", e) -try: +try: # QuantumEnhancedMultiPhaseDE_v3 from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v3 import QuantumEnhancedMultiPhaseDE_v3 lama_register["QuantumEnhancedMultiPhaseDE_v3"] = QuantumEnhancedMultiPhaseDE_v3 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseDE_v3 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v3").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v3" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v3", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseDE_v3 print("QuantumEnhancedMultiPhaseDE_v3 can not be imported: ", e) -try: +try: # QuantumEnhancedMultiPhaseDE_v4 from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v4 import QuantumEnhancedMultiPhaseDE_v4 lama_register["QuantumEnhancedMultiPhaseDE_v4"] = QuantumEnhancedMultiPhaseDE_v4 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseDE_v4 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v4").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseDE_v4 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v4" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v4", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseDE_v4 print("QuantumEnhancedMultiPhaseDE_v4 can not be imported: ", e) -try: +try: # QuantumEnhancedMultiPhaseDE_v5 from nevergrad.optimization.lama.QuantumEnhancedMultiPhaseDE_v5 import QuantumEnhancedMultiPhaseDE_v5 lama_register["QuantumEnhancedMultiPhaseDE_v5"] = QuantumEnhancedMultiPhaseDE_v5 - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedMultiPhaseDE_v5 = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v5").set_name("LLAMAQuantumEnhancedMultiPhaseDE_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedMultiPhaseDE_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedMultiPhaseDE_v5 = NonObjectOptimizer( + method="LLAMAQuantumEnhancedMultiPhaseDE_v5" + ).set_name("LLAMAQuantumEnhancedMultiPhaseDE_v5", register=True) +except Exception as e: # QuantumEnhancedMultiPhaseDE_v5 print("QuantumEnhancedMultiPhaseDE_v5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEnhancedRefinedAdaptiveExplorationOptimization import QuantumEnhancedRefinedAdaptiveExplorationOptimization - - lama_register["QuantumEnhancedRefinedAdaptiveExplorationOptimization"] = QuantumEnhancedRefinedAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization").set_name("LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # QuantumEnhancedRefinedAdaptiveExplorationOptimization + from nevergrad.optimization.lama.QuantumEnhancedRefinedAdaptiveExplorationOptimization import ( + QuantumEnhancedRefinedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumEnhancedRefinedAdaptiveExplorationOptimization"] = ( + QuantumEnhancedRefinedAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumEnhancedRefinedAdaptiveExplorationOptimization", register=True) +except Exception as e: # QuantumEnhancedRefinedAdaptiveExplorationOptimization print("QuantumEnhancedRefinedAdaptiveExplorationOptimization can not be imported: ", e) -try: +try: # QuantumEntropyEnhancedDE from nevergrad.optimization.lama.QuantumEntropyEnhancedDE import QuantumEntropyEnhancedDE lama_register["QuantumEntropyEnhancedDE"] = QuantumEntropyEnhancedDE - res = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEntropyEnhancedDE = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE").set_name("LLAMAQuantumEntropyEnhancedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEntropyEnhancedDE = NonObjectOptimizer(method="LLAMAQuantumEntropyEnhancedDE").set_name( + "LLAMAQuantumEntropyEnhancedDE", register=True + ) +except Exception as e: # QuantumEntropyEnhancedDE print("QuantumEntropyEnhancedDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolutionaryAdaptiveOptimizer import QuantumEvolutionaryAdaptiveOptimizer +try: # QuantumEvolutionaryAdaptiveOptimizer + from nevergrad.optimization.lama.QuantumEvolutionaryAdaptiveOptimizer import ( + QuantumEvolutionaryAdaptiveOptimizer, + ) lama_register["QuantumEvolutionaryAdaptiveOptimizer"] = QuantumEvolutionaryAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryAdaptiveOptimizer").set_name("LLAMAQuantumEvolutionaryAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryAdaptiveOptimizer" + ).set_name("LLAMAQuantumEvolutionaryAdaptiveOptimizer", register=True) +except Exception as e: # QuantumEvolutionaryAdaptiveOptimizer print("QuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategy import QuantumEvolutionaryConvergenceStrategy +try: # QuantumEvolutionaryConvergenceStrategy + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategy import ( + QuantumEvolutionaryConvergenceStrategy, + ) lama_register["QuantumEvolutionaryConvergenceStrategy"] = QuantumEvolutionaryConvergenceStrategy - res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolutionaryConvergenceStrategy = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategy").set_name("LLAMAQuantumEvolutionaryConvergenceStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolutionaryConvergenceStrategy = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryConvergenceStrategy" + ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategy", register=True) +except Exception as e: # QuantumEvolutionaryConvergenceStrategy print("QuantumEvolutionaryConvergenceStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategyV2 import QuantumEvolutionaryConvergenceStrategyV2 +try: # QuantumEvolutionaryConvergenceStrategyV2 + from nevergrad.optimization.lama.QuantumEvolutionaryConvergenceStrategyV2 import ( + QuantumEvolutionaryConvergenceStrategyV2, + ) lama_register["QuantumEvolutionaryConvergenceStrategyV2"] = QuantumEvolutionaryConvergenceStrategyV2 - res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolutionaryConvergenceStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategyV2").set_name("LLAMAQuantumEvolutionaryConvergenceStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryConvergenceStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolutionaryConvergenceStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryConvergenceStrategyV2" + ).set_name("LLAMAQuantumEvolutionaryConvergenceStrategyV2", register=True) +except Exception as e: # QuantumEvolutionaryConvergenceStrategyV2 print("QuantumEvolutionaryConvergenceStrategyV2 can not be imported: ", e) -try: +try: # QuantumEvolutionaryOptimization from nevergrad.optimization.lama.QuantumEvolutionaryOptimization import QuantumEvolutionaryOptimization lama_register["QuantumEvolutionaryOptimization"] = QuantumEvolutionaryOptimization - res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolutionaryOptimization = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryOptimization").set_name("LLAMAQuantumEvolutionaryOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolutionaryOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolutionaryOptimization = NonObjectOptimizer( + method="LLAMAQuantumEvolutionaryOptimization" + ).set_name("LLAMAQuantumEvolutionaryOptimization", register=True) +except Exception as e: # QuantumEvolutionaryOptimization print("QuantumEvolutionaryOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV10 import QuantumEvolvedDiversityExplorerV10 +try: # QuantumEvolvedDiversityExplorerV10 + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV10 import ( + QuantumEvolvedDiversityExplorerV10, + ) lama_register["QuantumEvolvedDiversityExplorerV10"] = QuantumEvolvedDiversityExplorerV10 - res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolvedDiversityExplorerV10 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV10").set_name("LLAMAQuantumEvolvedDiversityExplorerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolvedDiversityExplorerV10 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV10" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV10", register=True) +except Exception as e: # QuantumEvolvedDiversityExplorerV10 print("QuantumEvolvedDiversityExplorerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV11 import QuantumEvolvedDiversityExplorerV11 +try: # QuantumEvolvedDiversityExplorerV11 + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV11 import ( + QuantumEvolvedDiversityExplorerV11, + ) lama_register["QuantumEvolvedDiversityExplorerV11"] = QuantumEvolvedDiversityExplorerV11 - res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolvedDiversityExplorerV11 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV11").set_name("LLAMAQuantumEvolvedDiversityExplorerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolvedDiversityExplorerV11 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV11" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV11", register=True) +except Exception as e: # QuantumEvolvedDiversityExplorerV11 print("QuantumEvolvedDiversityExplorerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV12 import QuantumEvolvedDiversityExplorerV12 +try: # QuantumEvolvedDiversityExplorerV12 + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV12 import ( + QuantumEvolvedDiversityExplorerV12, + ) lama_register["QuantumEvolvedDiversityExplorerV12"] = QuantumEvolvedDiversityExplorerV12 - res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolvedDiversityExplorerV12 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV12").set_name("LLAMAQuantumEvolvedDiversityExplorerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolvedDiversityExplorerV12 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV12" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV12", register=True) +except Exception as e: # QuantumEvolvedDiversityExplorerV12 print("QuantumEvolvedDiversityExplorerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV9 import QuantumEvolvedDiversityExplorerV9 +try: # QuantumEvolvedDiversityExplorerV9 + from nevergrad.optimization.lama.QuantumEvolvedDiversityExplorerV9 import ( + QuantumEvolvedDiversityExplorerV9, + ) lama_register["QuantumEvolvedDiversityExplorerV9"] = QuantumEvolvedDiversityExplorerV9 - res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumEvolvedDiversityExplorerV9 = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV9").set_name("LLAMAQuantumEvolvedDiversityExplorerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumEvolvedDiversityExplorerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumEvolvedDiversityExplorerV9 = NonObjectOptimizer( + method="LLAMAQuantumEvolvedDiversityExplorerV9" + ).set_name("LLAMAQuantumEvolvedDiversityExplorerV9", register=True) +except Exception as e: # QuantumEvolvedDiversityExplorerV9 print("QuantumEvolvedDiversityExplorerV9 can not be imported: ", e) -try: +try: # QuantumFeedbackEvolutionStrategy from nevergrad.optimization.lama.QuantumFeedbackEvolutionStrategy import QuantumFeedbackEvolutionStrategy lama_register["QuantumFeedbackEvolutionStrategy"] = QuantumFeedbackEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumFeedbackEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumFeedbackEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumFeedbackEvolutionStrategy").set_name("LLAMAQuantumFeedbackEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumFeedbackEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumFeedbackEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumFeedbackEvolutionStrategy" + ).set_name("LLAMAQuantumFeedbackEvolutionStrategy", register=True) +except Exception as e: # QuantumFeedbackEvolutionStrategy print("QuantumFeedbackEvolutionStrategy can not be imported: ", e) -try: +try: # QuantumFireworksAlgorithm from nevergrad.optimization.lama.QuantumFireworksAlgorithm import QuantumFireworksAlgorithm lama_register["QuantumFireworksAlgorithm"] = QuantumFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm").set_name("LLAMAQuantumFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumFireworksAlgorithm").set_name( + "LLAMAQuantumFireworksAlgorithm", register=True + ) +except Exception as e: # QuantumFireworksAlgorithm print("QuantumFireworksAlgorithm can not be imported: ", e) -try: +try: # QuantumFluxDifferentialSwarm from nevergrad.optimization.lama.QuantumFluxDifferentialSwarm import QuantumFluxDifferentialSwarm lama_register["QuantumFluxDifferentialSwarm"] = QuantumFluxDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumFluxDifferentialSwarm").set_name("LLAMAQuantumFluxDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumFluxDifferentialSwarm" + ).set_name("LLAMAQuantumFluxDifferentialSwarm", register=True) +except Exception as e: # QuantumFluxDifferentialSwarm print("QuantumFluxDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGeneticDifferentialEvolution import QuantumGeneticDifferentialEvolution +try: # QuantumGeneticDifferentialEvolution + from nevergrad.optimization.lama.QuantumGeneticDifferentialEvolution import ( + QuantumGeneticDifferentialEvolution, + ) lama_register["QuantumGeneticDifferentialEvolution"] = QuantumGeneticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumGeneticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGeneticDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumGeneticDifferentialEvolution").set_name("LLAMAQuantumGeneticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGeneticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGeneticDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumGeneticDifferentialEvolution" + ).set_name("LLAMAQuantumGeneticDifferentialEvolution", register=True) +except Exception as e: # QuantumGeneticDifferentialEvolution print("QuantumGeneticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimization import QuantumGradientAdaptiveExplorationOptimization - - lama_register["QuantumGradientAdaptiveExplorationOptimization"] = QuantumGradientAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationOptimization + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimization import ( + QuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimization"] = ( + QuantumGradientAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationOptimization print("QuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV2 import QuantumGradientAdaptiveExplorationOptimizationV2 - - lama_register["QuantumGradientAdaptiveExplorationOptimizationV2"] = QuantumGradientAdaptiveExplorationOptimizationV2 - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV2", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationOptimizationV2 + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV2 import ( + QuantumGradientAdaptiveExplorationOptimizationV2, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV2"] = ( + QuantumGradientAdaptiveExplorationOptimizationV2 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV2" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV2", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationOptimizationV2 print("QuantumGradientAdaptiveExplorationOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV3 import QuantumGradientAdaptiveExplorationOptimizationV3 - - lama_register["QuantumGradientAdaptiveExplorationOptimizationV3"] = QuantumGradientAdaptiveExplorationOptimizationV3 - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV3", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationOptimizationV3 + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV3 import ( + QuantumGradientAdaptiveExplorationOptimizationV3, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV3"] = ( + QuantumGradientAdaptiveExplorationOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV3" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV3", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationOptimizationV3 print("QuantumGradientAdaptiveExplorationOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV4 import QuantumGradientAdaptiveExplorationOptimizationV4 - - lama_register["QuantumGradientAdaptiveExplorationOptimizationV4"] = QuantumGradientAdaptiveExplorationOptimizationV4 - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationOptimizationV4 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV4", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationOptimizationV4 + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV4 import ( + QuantumGradientAdaptiveExplorationOptimizationV4, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV4"] = ( + QuantumGradientAdaptiveExplorationOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV4 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV4" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV4", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationOptimizationV4 print("QuantumGradientAdaptiveExplorationOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV5 import QuantumGradientAdaptiveExplorationOptimizationV5 - - lama_register["QuantumGradientAdaptiveExplorationOptimizationV5"] = QuantumGradientAdaptiveExplorationOptimizationV5 - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5").set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV5", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationOptimizationV5 + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationOptimizationV5 import ( + QuantumGradientAdaptiveExplorationOptimizationV5, + ) + + lama_register["QuantumGradientAdaptiveExplorationOptimizationV5"] = ( + QuantumGradientAdaptiveExplorationOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationOptimizationV5 = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationOptimizationV5" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationOptimizationV5", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationOptimizationV5 print("QuantumGradientAdaptiveExplorationOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationRefinedOptimization import QuantumGradientAdaptiveExplorationRefinedOptimization - - lama_register["QuantumGradientAdaptiveExplorationRefinedOptimization"] = QuantumGradientAdaptiveExplorationRefinedOptimization - res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization").set_name("LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization", register=True) -except Exception as e: +try: # QuantumGradientAdaptiveExplorationRefinedOptimization + from nevergrad.optimization.lama.QuantumGradientAdaptiveExplorationRefinedOptimization import ( + QuantumGradientAdaptiveExplorationRefinedOptimization, + ) + + lama_register["QuantumGradientAdaptiveExplorationRefinedOptimization"] = ( + QuantumGradientAdaptiveExplorationRefinedOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization" + ).set_name("LLAMAQuantumGradientAdaptiveExplorationRefinedOptimization", register=True) +except Exception as e: # QuantumGradientAdaptiveExplorationRefinedOptimization print("QuantumGradientAdaptiveExplorationRefinedOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientBalancedOptimizerV6 import QuantumGradientBalancedOptimizerV6 +try: # QuantumGradientBalancedOptimizerV6 + from nevergrad.optimization.lama.QuantumGradientBalancedOptimizerV6 import ( + QuantumGradientBalancedOptimizerV6, + ) lama_register["QuantumGradientBalancedOptimizerV6"] = QuantumGradientBalancedOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumGradientBalancedOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientBalancedOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumGradientBalancedOptimizerV6").set_name("LLAMAQuantumGradientBalancedOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientBalancedOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientBalancedOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumGradientBalancedOptimizerV6" + ).set_name("LLAMAQuantumGradientBalancedOptimizerV6", register=True) +except Exception as e: # QuantumGradientBalancedOptimizerV6 print("QuantumGradientBalancedOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientBoostedMemeticSearch import QuantumGradientBoostedMemeticSearch +try: # QuantumGradientBoostedMemeticSearch + from nevergrad.optimization.lama.QuantumGradientBoostedMemeticSearch import ( + QuantumGradientBoostedMemeticSearch, + ) lama_register["QuantumGradientBoostedMemeticSearch"] = QuantumGradientBoostedMemeticSearch - res = NonObjectOptimizer(method="LLAMAQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumGradientBoostedMemeticSearch").set_name("LLAMAQuantumGradientBoostedMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMAQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: # QuantumGradientBoostedMemeticSearch print("QuantumGradientBoostedMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientEnhancedExplorationOptimization import QuantumGradientEnhancedExplorationOptimization - - lama_register["QuantumGradientEnhancedExplorationOptimization"] = QuantumGradientEnhancedExplorationOptimization - res = NonObjectOptimizer(method="LLAMAQuantumGradientEnhancedExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientEnhancedExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientEnhancedExplorationOptimization").set_name("LLAMAQuantumGradientEnhancedExplorationOptimization", register=True) -except Exception as e: +try: # QuantumGradientEnhancedExplorationOptimization + from nevergrad.optimization.lama.QuantumGradientEnhancedExplorationOptimization import ( + QuantumGradientEnhancedExplorationOptimization, + ) + + lama_register["QuantumGradientEnhancedExplorationOptimization"] = ( + QuantumGradientEnhancedExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumGradientEnhancedExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientEnhancedExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientEnhancedExplorationOptimization" + ).set_name("LLAMAQuantumGradientEnhancedExplorationOptimization", register=True) +except Exception as e: # QuantumGradientEnhancedExplorationOptimization print("QuantumGradientEnhancedExplorationOptimization can not be imported: ", e) -try: +try: # QuantumGradientFusionOptimizer from nevergrad.optimization.lama.QuantumGradientFusionOptimizer import QuantumGradientFusionOptimizer lama_register["QuantumGradientFusionOptimizer"] = QuantumGradientFusionOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumGradientFusionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientFusionOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientFusionOptimizer").set_name("LLAMAQuantumGradientFusionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientFusionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientFusionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientFusionOptimizer" + ).set_name("LLAMAQuantumGradientFusionOptimizer", register=True) +except Exception as e: # QuantumGradientFusionOptimizer print("QuantumGradientFusionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientGuidedFireworksAlgorithm import QuantumGradientGuidedFireworksAlgorithm +try: # QuantumGradientGuidedFireworksAlgorithm + from nevergrad.optimization.lama.QuantumGradientGuidedFireworksAlgorithm import ( + QuantumGradientGuidedFireworksAlgorithm, + ) lama_register["QuantumGradientGuidedFireworksAlgorithm"] = QuantumGradientGuidedFireworksAlgorithm - res = NonObjectOptimizer(method="LLAMAQuantumGradientGuidedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientGuidedFireworksAlgorithm = NonObjectOptimizer(method="LLAMAQuantumGradientGuidedFireworksAlgorithm").set_name("LLAMAQuantumGradientGuidedFireworksAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientGuidedFireworksAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientGuidedFireworksAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumGradientGuidedFireworksAlgorithm" + ).set_name("LLAMAQuantumGradientGuidedFireworksAlgorithm", register=True) +except Exception as e: # QuantumGradientGuidedFireworksAlgorithm print("QuantumGradientGuidedFireworksAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimization import QuantumGradientHybridOptimization +try: # QuantumGradientHybridOptimization + from nevergrad.optimization.lama.QuantumGradientHybridOptimization import ( + QuantumGradientHybridOptimization, + ) lama_register["QuantumGradientHybridOptimization"] = QuantumGradientHybridOptimization - res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientHybridOptimization = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimization").set_name("LLAMAQuantumGradientHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientHybridOptimization = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimization" + ).set_name("LLAMAQuantumGradientHybridOptimization", register=True) +except Exception as e: # QuantumGradientHybridOptimization print("QuantumGradientHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV2 import QuantumGradientHybridOptimizationV2 +try: # QuantumGradientHybridOptimizationV2 + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV2 import ( + QuantumGradientHybridOptimizationV2, + ) lama_register["QuantumGradientHybridOptimizationV2"] = QuantumGradientHybridOptimizationV2 - res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientHybridOptimizationV2 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV2").set_name("LLAMAQuantumGradientHybridOptimizationV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientHybridOptimizationV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV2" + ).set_name("LLAMAQuantumGradientHybridOptimizationV2", register=True) +except Exception as e: # QuantumGradientHybridOptimizationV2 print("QuantumGradientHybridOptimizationV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV3 import QuantumGradientHybridOptimizationV3 +try: # QuantumGradientHybridOptimizationV3 + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV3 import ( + QuantumGradientHybridOptimizationV3, + ) lama_register["QuantumGradientHybridOptimizationV3"] = QuantumGradientHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientHybridOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV3").set_name("LLAMAQuantumGradientHybridOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV3" + ).set_name("LLAMAQuantumGradientHybridOptimizationV3", register=True) +except Exception as e: # QuantumGradientHybridOptimizationV3 print("QuantumGradientHybridOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV4 import QuantumGradientHybridOptimizationV4 +try: # QuantumGradientHybridOptimizationV4 + from nevergrad.optimization.lama.QuantumGradientHybridOptimizationV4 import ( + QuantumGradientHybridOptimizationV4, + ) lama_register["QuantumGradientHybridOptimizationV4"] = QuantumGradientHybridOptimizationV4 - res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientHybridOptimizationV4 = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV4").set_name("LLAMAQuantumGradientHybridOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizationV4" + ).set_name("LLAMAQuantumGradientHybridOptimizationV4", register=True) +except Exception as e: # QuantumGradientHybridOptimizationV4 print("QuantumGradientHybridOptimizationV4 can not be imported: ", e) -try: +try: # QuantumGradientHybridOptimizer from nevergrad.optimization.lama.QuantumGradientHybridOptimizer import QuantumGradientHybridOptimizer lama_register["QuantumGradientHybridOptimizer"] = QuantumGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizer").set_name("LLAMAQuantumGradientHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientHybridOptimizer" + ).set_name("LLAMAQuantumGradientHybridOptimizer", register=True) +except Exception as e: # QuantumGradientHybridOptimizer print("QuantumGradientHybridOptimizer can not be imported: ", e) -try: +try: # QuantumGradientMemeticOptimizer from nevergrad.optimization.lama.QuantumGradientMemeticOptimizer import QuantumGradientMemeticOptimizer lama_register["QuantumGradientMemeticOptimizer"] = QuantumGradientMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticOptimizer").set_name("LLAMAQuantumGradientMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticOptimizer" + ).set_name("LLAMAQuantumGradientMemeticOptimizer", register=True) +except Exception as e: # QuantumGradientMemeticOptimizer print("QuantumGradientMemeticOptimizer can not be imported: ", e) -try: +try: # QuantumGradientMemeticSearch from nevergrad.optimization.lama.QuantumGradientMemeticSearch import QuantumGradientMemeticSearch lama_register["QuantumGradientMemeticSearch"] = QuantumGradientMemeticSearch - res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientMemeticSearch = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearch").set_name("LLAMAQuantumGradientMemeticSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientMemeticSearch = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearch" + ).set_name("LLAMAQuantumGradientMemeticSearch", register=True) +except Exception as e: # QuantumGradientMemeticSearch print("QuantumGradientMemeticSearch can not be imported: ", e) -try: +try: # QuantumGradientMemeticSearchV2 from nevergrad.optimization.lama.QuantumGradientMemeticSearchV2 import QuantumGradientMemeticSearchV2 lama_register["QuantumGradientMemeticSearchV2"] = QuantumGradientMemeticSearchV2 - res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientMemeticSearchV2 = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV2").set_name("LLAMAQuantumGradientMemeticSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientMemeticSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearchV2" + ).set_name("LLAMAQuantumGradientMemeticSearchV2", register=True) +except Exception as e: # QuantumGradientMemeticSearchV2 print("QuantumGradientMemeticSearchV2 can not be imported: ", e) -try: +try: # QuantumGradientMemeticSearchV3 from nevergrad.optimization.lama.QuantumGradientMemeticSearchV3 import QuantumGradientMemeticSearchV3 lama_register["QuantumGradientMemeticSearchV3"] = QuantumGradientMemeticSearchV3 - res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGradientMemeticSearchV3 = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV3").set_name("LLAMAQuantumGradientMemeticSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGradientMemeticSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGradientMemeticSearchV3 = NonObjectOptimizer( + method="LLAMAQuantumGradientMemeticSearchV3" + ).set_name("LLAMAQuantumGradientMemeticSearchV3", register=True) +except Exception as e: # QuantumGradientMemeticSearchV3 print("QuantumGradientMemeticSearchV3 can not be imported: ", e) -try: +try: # QuantumGuidedAdaptiveStrategy from nevergrad.optimization.lama.QuantumGuidedAdaptiveStrategy import QuantumGuidedAdaptiveStrategy lama_register["QuantumGuidedAdaptiveStrategy"] = QuantumGuidedAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAQuantumGuidedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGuidedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumGuidedAdaptiveStrategy").set_name("LLAMAQuantumGuidedAdaptiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGuidedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGuidedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumGuidedAdaptiveStrategy" + ).set_name("LLAMAQuantumGuidedAdaptiveStrategy", register=True) +except Exception as e: # QuantumGuidedAdaptiveStrategy print("QuantumGuidedAdaptiveStrategy can not be imported: ", e) -try: +try: # QuantumGuidedCrossoverAdaptation from nevergrad.optimization.lama.QuantumGuidedCrossoverAdaptation import QuantumGuidedCrossoverAdaptation lama_register["QuantumGuidedCrossoverAdaptation"] = QuantumGuidedCrossoverAdaptation - res = NonObjectOptimizer(method="LLAMAQuantumGuidedCrossoverAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGuidedCrossoverAdaptation = NonObjectOptimizer(method="LLAMAQuantumGuidedCrossoverAdaptation").set_name("LLAMAQuantumGuidedCrossoverAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGuidedCrossoverAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGuidedCrossoverAdaptation = NonObjectOptimizer( + method="LLAMAQuantumGuidedCrossoverAdaptation" + ).set_name("LLAMAQuantumGuidedCrossoverAdaptation", register=True) +except Exception as e: # QuantumGuidedCrossoverAdaptation print("QuantumGuidedCrossoverAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumGuidedHybridDifferentialSwarm import QuantumGuidedHybridDifferentialSwarm +try: # QuantumGuidedHybridDifferentialSwarm + from nevergrad.optimization.lama.QuantumGuidedHybridDifferentialSwarm import ( + QuantumGuidedHybridDifferentialSwarm, + ) lama_register["QuantumGuidedHybridDifferentialSwarm"] = QuantumGuidedHybridDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAQuantumGuidedHybridDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGuidedHybridDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumGuidedHybridDifferentialSwarm").set_name("LLAMAQuantumGuidedHybridDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGuidedHybridDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGuidedHybridDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumGuidedHybridDifferentialSwarm" + ).set_name("LLAMAQuantumGuidedHybridDifferentialSwarm", register=True) +except Exception as e: # QuantumGuidedHybridDifferentialSwarm print("QuantumGuidedHybridDifferentialSwarm can not be imported: ", e) -try: +try: # QuantumGuidedLevyAdaptiveSwarm from nevergrad.optimization.lama.QuantumGuidedLevyAdaptiveSwarm import QuantumGuidedLevyAdaptiveSwarm lama_register["QuantumGuidedLevyAdaptiveSwarm"] = QuantumGuidedLevyAdaptiveSwarm - res = NonObjectOptimizer(method="LLAMAQuantumGuidedLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumGuidedLevyAdaptiveSwarm = NonObjectOptimizer(method="LLAMAQuantumGuidedLevyAdaptiveSwarm").set_name("LLAMAQuantumGuidedLevyAdaptiveSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumGuidedLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumGuidedLevyAdaptiveSwarm = NonObjectOptimizer( + method="LLAMAQuantumGuidedLevyAdaptiveSwarm" + ).set_name("LLAMAQuantumGuidedLevyAdaptiveSwarm", register=True) +except Exception as e: # QuantumGuidedLevyAdaptiveSwarm print("QuantumGuidedLevyAdaptiveSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptationStrategy import QuantumHarmonicAdaptationStrategy +try: # QuantumHarmonicAdaptationStrategy + from nevergrad.optimization.lama.QuantumHarmonicAdaptationStrategy import ( + QuantumHarmonicAdaptationStrategy, + ) lama_register["QuantumHarmonicAdaptationStrategy"] = QuantumHarmonicAdaptationStrategy - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicAdaptationStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptationStrategy").set_name("LLAMAQuantumHarmonicAdaptationStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptationStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicAdaptationStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptationStrategy" + ).set_name("LLAMAQuantumHarmonicAdaptationStrategy", register=True) +except Exception as e: # QuantumHarmonicAdaptationStrategy print("QuantumHarmonicAdaptationStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptiveFeedbackOptimizer import QuantumHarmonicAdaptiveFeedbackOptimizer +try: # QuantumHarmonicAdaptiveFeedbackOptimizer + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveFeedbackOptimizer import ( + QuantumHarmonicAdaptiveFeedbackOptimizer, + ) lama_register["QuantumHarmonicAdaptiveFeedbackOptimizer"] = QuantumHarmonicAdaptiveFeedbackOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveFeedbackOptimizer", register=True) +except Exception as e: # QuantumHarmonicAdaptiveFeedbackOptimizer print("QuantumHarmonicAdaptiveFeedbackOptimizer can not be imported: ", e) -try: +try: # QuantumHarmonicAdaptiveOptimizer from nevergrad.optimization.lama.QuantumHarmonicAdaptiveOptimizer import QuantumHarmonicAdaptiveOptimizer lama_register["QuantumHarmonicAdaptiveOptimizer"] = QuantumHarmonicAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicAdaptiveOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveOptimizer", register=True) +except Exception as e: # QuantumHarmonicAdaptiveOptimizer print("QuantumHarmonicAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicAdaptiveRefinementOptimizer import QuantumHarmonicAdaptiveRefinementOptimizer +try: # QuantumHarmonicAdaptiveRefinementOptimizer + from nevergrad.optimization.lama.QuantumHarmonicAdaptiveRefinementOptimizer import ( + QuantumHarmonicAdaptiveRefinementOptimizer, + ) lama_register["QuantumHarmonicAdaptiveRefinementOptimizer"] = QuantumHarmonicAdaptiveRefinementOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicAdaptiveRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer").set_name("LLAMAQuantumHarmonicAdaptiveRefinementOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicAdaptiveRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicAdaptiveRefinementOptimizer" + ).set_name("LLAMAQuantumHarmonicAdaptiveRefinementOptimizer", register=True) +except Exception as e: # QuantumHarmonicAdaptiveRefinementOptimizer print("QuantumHarmonicAdaptiveRefinementOptimizer can not be imported: ", e) -try: +try: # QuantumHarmonicDynamicAdaptation from nevergrad.optimization.lama.QuantumHarmonicDynamicAdaptation import QuantumHarmonicDynamicAdaptation lama_register["QuantumHarmonicDynamicAdaptation"] = QuantumHarmonicDynamicAdaptation - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicDynamicAdaptation = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicAdaptation").set_name("LLAMAQuantumHarmonicDynamicAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicDynamicAdaptation = NonObjectOptimizer( + method="LLAMAQuantumHarmonicDynamicAdaptation" + ).set_name("LLAMAQuantumHarmonicDynamicAdaptation", register=True) +except Exception as e: # QuantumHarmonicDynamicAdaptation print("QuantumHarmonicDynamicAdaptation can not be imported: ", e) -try: +try: # QuantumHarmonicDynamicOptimizer from nevergrad.optimization.lama.QuantumHarmonicDynamicOptimizer import QuantumHarmonicDynamicOptimizer lama_register["QuantumHarmonicDynamicOptimizer"] = QuantumHarmonicDynamicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicOptimizer").set_name("LLAMAQuantumHarmonicDynamicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicDynamicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicDynamicOptimizer" + ).set_name("LLAMAQuantumHarmonicDynamicOptimizer", register=True) +except Exception as e: # QuantumHarmonicDynamicOptimizer print("QuantumHarmonicDynamicOptimizer can not be imported: ", e) -try: +try: # QuantumHarmonicEvolutionStrategy from nevergrad.optimization.lama.QuantumHarmonicEvolutionStrategy import QuantumHarmonicEvolutionStrategy lama_register["QuantumHarmonicEvolutionStrategy"] = QuantumHarmonicEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicEvolutionStrategy").set_name("LLAMAQuantumHarmonicEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicEvolutionStrategy" + ).set_name("LLAMAQuantumHarmonicEvolutionStrategy", register=True) +except Exception as e: # QuantumHarmonicEvolutionStrategy print("QuantumHarmonicEvolutionStrategy can not be imported: ", e) -try: +try: # QuantumHarmonicFeedbackOptimizer from nevergrad.optimization.lama.QuantumHarmonicFeedbackOptimizer import QuantumHarmonicFeedbackOptimizer lama_register["QuantumHarmonicFeedbackOptimizer"] = QuantumHarmonicFeedbackOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicFeedbackOptimizer").set_name("LLAMAQuantumHarmonicFeedbackOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFeedbackOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFeedbackOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFeedbackOptimizer" + ).set_name("LLAMAQuantumHarmonicFeedbackOptimizer", register=True) +except Exception as e: # QuantumHarmonicFeedbackOptimizer print("QuantumHarmonicFeedbackOptimizer can not be imported: ", e) -try: +try: # QuantumHarmonicFocusedOptimizer from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizer import QuantumHarmonicFocusedOptimizer lama_register["QuantumHarmonicFocusedOptimizer"] = QuantumHarmonicFocusedOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizer").set_name("LLAMAQuantumHarmonicFocusedOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizer" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizer", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizer print("QuantumHarmonicFocusedOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV2 import QuantumHarmonicFocusedOptimizerV2 +try: # QuantumHarmonicFocusedOptimizerV2 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV2 import ( + QuantumHarmonicFocusedOptimizerV2, + ) lama_register["QuantumHarmonicFocusedOptimizerV2"] = QuantumHarmonicFocusedOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV2").set_name("LLAMAQuantumHarmonicFocusedOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV2" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV2", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV2 print("QuantumHarmonicFocusedOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV3 import QuantumHarmonicFocusedOptimizerV3 +try: # QuantumHarmonicFocusedOptimizerV3 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV3 import ( + QuantumHarmonicFocusedOptimizerV3, + ) lama_register["QuantumHarmonicFocusedOptimizerV3"] = QuantumHarmonicFocusedOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV3").set_name("LLAMAQuantumHarmonicFocusedOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV3" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV3", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV3 print("QuantumHarmonicFocusedOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV4 import QuantumHarmonicFocusedOptimizerV4 +try: # QuantumHarmonicFocusedOptimizerV4 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV4 import ( + QuantumHarmonicFocusedOptimizerV4, + ) lama_register["QuantumHarmonicFocusedOptimizerV4"] = QuantumHarmonicFocusedOptimizerV4 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV4").set_name("LLAMAQuantumHarmonicFocusedOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV4" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV4", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV4 print("QuantumHarmonicFocusedOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV5 import QuantumHarmonicFocusedOptimizerV5 +try: # QuantumHarmonicFocusedOptimizerV5 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV5 import ( + QuantumHarmonicFocusedOptimizerV5, + ) lama_register["QuantumHarmonicFocusedOptimizerV5"] = QuantumHarmonicFocusedOptimizerV5 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV5").set_name("LLAMAQuantumHarmonicFocusedOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV5" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV5", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV5 print("QuantumHarmonicFocusedOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV6 import QuantumHarmonicFocusedOptimizerV6 +try: # QuantumHarmonicFocusedOptimizerV6 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV6 import ( + QuantumHarmonicFocusedOptimizerV6, + ) lama_register["QuantumHarmonicFocusedOptimizerV6"] = QuantumHarmonicFocusedOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV6").set_name("LLAMAQuantumHarmonicFocusedOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV6" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV6", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV6 print("QuantumHarmonicFocusedOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV7 import QuantumHarmonicFocusedOptimizerV7 +try: # QuantumHarmonicFocusedOptimizerV7 + from nevergrad.optimization.lama.QuantumHarmonicFocusedOptimizerV7 import ( + QuantumHarmonicFocusedOptimizerV7, + ) lama_register["QuantumHarmonicFocusedOptimizerV7"] = QuantumHarmonicFocusedOptimizerV7 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicFocusedOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV7").set_name("LLAMAQuantumHarmonicFocusedOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicFocusedOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicFocusedOptimizerV7 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicFocusedOptimizerV7" + ).set_name("LLAMAQuantumHarmonicFocusedOptimizerV7", register=True) +except Exception as e: # QuantumHarmonicFocusedOptimizerV7 print("QuantumHarmonicFocusedOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicImpulseOptimizerV9 import QuantumHarmonicImpulseOptimizerV9 +try: # QuantumHarmonicImpulseOptimizerV9 + from nevergrad.optimization.lama.QuantumHarmonicImpulseOptimizerV9 import ( + QuantumHarmonicImpulseOptimizerV9, + ) lama_register["QuantumHarmonicImpulseOptimizerV9"] = QuantumHarmonicImpulseOptimizerV9 - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicImpulseOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicImpulseOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumHarmonicImpulseOptimizerV9").set_name("LLAMAQuantumHarmonicImpulseOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicImpulseOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicImpulseOptimizerV9 = NonObjectOptimizer( + method="LLAMAQuantumHarmonicImpulseOptimizerV9" + ).set_name("LLAMAQuantumHarmonicImpulseOptimizerV9", register=True) +except Exception as e: # QuantumHarmonicImpulseOptimizerV9 print("QuantumHarmonicImpulseOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicPrecisionOptimizer import QuantumHarmonicPrecisionOptimizer +try: # QuantumHarmonicPrecisionOptimizer + from nevergrad.optimization.lama.QuantumHarmonicPrecisionOptimizer import ( + QuantumHarmonicPrecisionOptimizer, + ) lama_register["QuantumHarmonicPrecisionOptimizer"] = QuantumHarmonicPrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicPrecisionOptimizer = NonObjectOptimizer(method="LLAMAQuantumHarmonicPrecisionOptimizer").set_name("LLAMAQuantumHarmonicPrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMAQuantumHarmonicPrecisionOptimizer" + ).set_name("LLAMAQuantumHarmonicPrecisionOptimizer", register=True) +except Exception as e: # QuantumHarmonicPrecisionOptimizer print("QuantumHarmonicPrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonicResilientEvolutionStrategy import QuantumHarmonicResilientEvolutionStrategy +try: # QuantumHarmonicResilientEvolutionStrategy + from nevergrad.optimization.lama.QuantumHarmonicResilientEvolutionStrategy import ( + QuantumHarmonicResilientEvolutionStrategy, + ) lama_register["QuantumHarmonicResilientEvolutionStrategy"] = QuantumHarmonicResilientEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumHarmonicResilientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonicResilientEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumHarmonicResilientEvolutionStrategy").set_name("LLAMAQuantumHarmonicResilientEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonicResilientEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonicResilientEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumHarmonicResilientEvolutionStrategy" + ).set_name("LLAMAQuantumHarmonicResilientEvolutionStrategy", register=True) +except Exception as e: # QuantumHarmonicResilientEvolutionStrategy print("QuantumHarmonicResilientEvolutionStrategy can not be imported: ", e) -try: +try: # QuantumHarmonizedPSO from nevergrad.optimization.lama.QuantumHarmonizedPSO import QuantumHarmonizedPSO lama_register["QuantumHarmonizedPSO"] = QuantumHarmonizedPSO - res = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO").set_name("LLAMAQuantumHarmonizedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonizedPSO = NonObjectOptimizer(method="LLAMAQuantumHarmonizedPSO").set_name( + "LLAMAQuantumHarmonizedPSO", register=True + ) +except Exception as e: # QuantumHarmonizedPSO print("QuantumHarmonizedPSO can not be imported: ", e) -try: +try: # QuantumHarmonyMemeticAlgorithm from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithm import QuantumHarmonyMemeticAlgorithm lama_register["QuantumHarmonyMemeticAlgorithm"] = QuantumHarmonyMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithm").set_name("LLAMAQuantumHarmonyMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonyMemeticAlgorithm = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithm" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithm", register=True) +except Exception as e: # QuantumHarmonyMemeticAlgorithm print("QuantumHarmonyMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmImproved import QuantumHarmonyMemeticAlgorithmImproved +try: # QuantumHarmonyMemeticAlgorithmImproved + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmImproved import ( + QuantumHarmonyMemeticAlgorithmImproved, + ) lama_register["QuantumHarmonyMemeticAlgorithmImproved"] = QuantumHarmonyMemeticAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonyMemeticAlgorithmImproved = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmImproved").set_name("LLAMAQuantumHarmonyMemeticAlgorithmImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonyMemeticAlgorithmImproved = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithmImproved" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmImproved", register=True) +except Exception as e: # QuantumHarmonyMemeticAlgorithmImproved print("QuantumHarmonyMemeticAlgorithmImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmRefined import QuantumHarmonyMemeticAlgorithmRefined +try: # QuantumHarmonyMemeticAlgorithmRefined + from nevergrad.optimization.lama.QuantumHarmonyMemeticAlgorithmRefined import ( + QuantumHarmonyMemeticAlgorithmRefined, + ) lama_register["QuantumHarmonyMemeticAlgorithmRefined"] = QuantumHarmonyMemeticAlgorithmRefined - res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmRefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonyMemeticAlgorithmRefined = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmRefined").set_name("LLAMAQuantumHarmonyMemeticAlgorithmRefined", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonyMemeticAlgorithmRefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonyMemeticAlgorithmRefined = NonObjectOptimizer( + method="LLAMAQuantumHarmonyMemeticAlgorithmRefined" + ).set_name("LLAMAQuantumHarmonyMemeticAlgorithmRefined", register=True) +except Exception as e: # QuantumHarmonyMemeticAlgorithmRefined print("QuantumHarmonyMemeticAlgorithmRefined can not be imported: ", e) -try: +try: # QuantumHarmonySearch from nevergrad.optimization.lama.QuantumHarmonySearch import QuantumHarmonySearch lama_register["QuantumHarmonySearch"] = QuantumHarmonySearch - res = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch").set_name("LLAMAQuantumHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHarmonySearch = NonObjectOptimizer(method="LLAMAQuantumHarmonySearch").set_name( + "LLAMAQuantumHarmonySearch", register=True + ) +except Exception as e: # QuantumHarmonySearch print("QuantumHarmonySearch can not be imported: ", e) -try: +try: # QuantumHybridAdaptiveStrategy from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategy import QuantumHybridAdaptiveStrategy lama_register["QuantumHybridAdaptiveStrategy"] = QuantumHybridAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategy").set_name("LLAMAQuantumHybridAdaptiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategy" + ).set_name("LLAMAQuantumHybridAdaptiveStrategy", register=True) +except Exception as e: # QuantumHybridAdaptiveStrategy print("QuantumHybridAdaptiveStrategy can not be imported: ", e) -try: +try: # QuantumHybridAdaptiveStrategyV2 from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV2 import QuantumHybridAdaptiveStrategyV2 lama_register["QuantumHybridAdaptiveStrategyV2"] = QuantumHybridAdaptiveStrategyV2 - res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridAdaptiveStrategyV2 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV2").set_name("LLAMAQuantumHybridAdaptiveStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridAdaptiveStrategyV2 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV2" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV2", register=True) +except Exception as e: # QuantumHybridAdaptiveStrategyV2 print("QuantumHybridAdaptiveStrategyV2 can not be imported: ", e) -try: +try: # QuantumHybridAdaptiveStrategyV8 from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV8 import QuantumHybridAdaptiveStrategyV8 lama_register["QuantumHybridAdaptiveStrategyV8"] = QuantumHybridAdaptiveStrategyV8 - res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridAdaptiveStrategyV8 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV8").set_name("LLAMAQuantumHybridAdaptiveStrategyV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridAdaptiveStrategyV8 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV8" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV8", register=True) +except Exception as e: # QuantumHybridAdaptiveStrategyV8 print("QuantumHybridAdaptiveStrategyV8 can not be imported: ", e) -try: +try: # QuantumHybridAdaptiveStrategyV9 from nevergrad.optimization.lama.QuantumHybridAdaptiveStrategyV9 import QuantumHybridAdaptiveStrategyV9 lama_register["QuantumHybridAdaptiveStrategyV9"] = QuantumHybridAdaptiveStrategyV9 - res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridAdaptiveStrategyV9 = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV9").set_name("LLAMAQuantumHybridAdaptiveStrategyV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridAdaptiveStrategyV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridAdaptiveStrategyV9 = NonObjectOptimizer( + method="LLAMAQuantumHybridAdaptiveStrategyV9" + ).set_name("LLAMAQuantumHybridAdaptiveStrategyV9", register=True) +except Exception as e: # QuantumHybridAdaptiveStrategyV9 print("QuantumHybridAdaptiveStrategyV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHybridDifferentialEvolution import QuantumHybridDifferentialEvolution +try: # QuantumHybridDifferentialEvolution + from nevergrad.optimization.lama.QuantumHybridDifferentialEvolution import ( + QuantumHybridDifferentialEvolution, + ) lama_register["QuantumHybridDifferentialEvolution"] = QuantumHybridDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumHybridDifferentialEvolution").set_name("LLAMAQuantumHybridDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumHybridDifferentialEvolution" + ).set_name("LLAMAQuantumHybridDifferentialEvolution", register=True) +except Exception as e: # QuantumHybridDifferentialEvolution print("QuantumHybridDifferentialEvolution can not be imported: ", e) -try: +try: # QuantumHybridDynamicAdaptiveDE from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE import QuantumHybridDynamicAdaptiveDE lama_register["QuantumHybridDynamicAdaptiveDE"] = QuantumHybridDynamicAdaptiveDE - res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE").set_name("LLAMAQuantumHybridDynamicAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE", register=True) +except Exception as e: # QuantumHybridDynamicAdaptiveDE print("QuantumHybridDynamicAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v2 import QuantumHybridDynamicAdaptiveDE_v2 +try: # QuantumHybridDynamicAdaptiveDE_v2 + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v2 import ( + QuantumHybridDynamicAdaptiveDE_v2, + ) lama_register["QuantumHybridDynamicAdaptiveDE_v2"] = QuantumHybridDynamicAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridDynamicAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v2").set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridDynamicAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE_v2" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v2", register=True) +except Exception as e: # QuantumHybridDynamicAdaptiveDE_v2 print("QuantumHybridDynamicAdaptiveDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v3 import QuantumHybridDynamicAdaptiveDE_v3 +try: # QuantumHybridDynamicAdaptiveDE_v3 + from nevergrad.optimization.lama.QuantumHybridDynamicAdaptiveDE_v3 import ( + QuantumHybridDynamicAdaptiveDE_v3, + ) lama_register["QuantumHybridDynamicAdaptiveDE_v3"] = QuantumHybridDynamicAdaptiveDE_v3 - res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridDynamicAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v3").set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridDynamicAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridDynamicAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumHybridDynamicAdaptiveDE_v3" + ).set_name("LLAMAQuantumHybridDynamicAdaptiveDE_v3", register=True) +except Exception as e: # QuantumHybridDynamicAdaptiveDE_v3 print("QuantumHybridDynamicAdaptiveDE_v3 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE import QuantumHybridEliteAdaptiveDE lama_register["QuantumHybridEliteAdaptiveDE"] = QuantumHybridEliteAdaptiveDE - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE").set_name("LLAMAQuantumHybridEliteAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE print("QuantumHybridEliteAdaptiveDE can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v2 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v2 import QuantumHybridEliteAdaptiveDE_v2 lama_register["QuantumHybridEliteAdaptiveDE_v2"] = QuantumHybridEliteAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v2").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v2" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v2", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v2 print("QuantumHybridEliteAdaptiveDE_v2 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v3 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v3 import QuantumHybridEliteAdaptiveDE_v3 lama_register["QuantumHybridEliteAdaptiveDE_v3"] = QuantumHybridEliteAdaptiveDE_v3 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v3").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v3" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v3", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v3 print("QuantumHybridEliteAdaptiveDE_v3 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v4 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v4 import QuantumHybridEliteAdaptiveDE_v4 lama_register["QuantumHybridEliteAdaptiveDE_v4"] = QuantumHybridEliteAdaptiveDE_v4 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v4 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v4").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v4 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v4" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v4", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v4 print("QuantumHybridEliteAdaptiveDE_v4 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v5 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v5 import QuantumHybridEliteAdaptiveDE_v5 lama_register["QuantumHybridEliteAdaptiveDE_v5"] = QuantumHybridEliteAdaptiveDE_v5 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v5 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v5").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v5 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v5" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v5", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v5 print("QuantumHybridEliteAdaptiveDE_v5 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v6 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v6 import QuantumHybridEliteAdaptiveDE_v6 lama_register["QuantumHybridEliteAdaptiveDE_v6"] = QuantumHybridEliteAdaptiveDE_v6 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v6 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v6").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v6 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v6" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v6", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v6 print("QuantumHybridEliteAdaptiveDE_v6 can not be imported: ", e) -try: +try: # QuantumHybridEliteAdaptiveDE_v7 from nevergrad.optimization.lama.QuantumHybridEliteAdaptiveDE_v7 import QuantumHybridEliteAdaptiveDE_v7 lama_register["QuantumHybridEliteAdaptiveDE_v7"] = QuantumHybridEliteAdaptiveDE_v7 - res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridEliteAdaptiveDE_v7 = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v7").set_name("LLAMAQuantumHybridEliteAdaptiveDE_v7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridEliteAdaptiveDE_v7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridEliteAdaptiveDE_v7 = NonObjectOptimizer( + method="LLAMAQuantumHybridEliteAdaptiveDE_v7" + ).set_name("LLAMAQuantumHybridEliteAdaptiveDE_v7", register=True) +except Exception as e: # QuantumHybridEliteAdaptiveDE_v7 print("QuantumHybridEliteAdaptiveDE_v7 can not be imported: ", e) -try: +try: # QuantumHybridImprovedDE from nevergrad.optimization.lama.QuantumHybridImprovedDE import QuantumHybridImprovedDE lama_register["QuantumHybridImprovedDE"] = QuantumHybridImprovedDE - res = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridImprovedDE = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE").set_name("LLAMAQuantumHybridImprovedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridImprovedDE = NonObjectOptimizer(method="LLAMAQuantumHybridImprovedDE").set_name( + "LLAMAQuantumHybridImprovedDE", register=True + ) +except Exception as e: # QuantumHybridImprovedDE print("QuantumHybridImprovedDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumHybridParticleDifferentialSearch import QuantumHybridParticleDifferentialSearch +try: # QuantumHybridParticleDifferentialSearch + from nevergrad.optimization.lama.QuantumHybridParticleDifferentialSearch import ( + QuantumHybridParticleDifferentialSearch, + ) lama_register["QuantumHybridParticleDifferentialSearch"] = QuantumHybridParticleDifferentialSearch - res = NonObjectOptimizer(method="LLAMAQuantumHybridParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumHybridParticleDifferentialSearch = NonObjectOptimizer(method="LLAMAQuantumHybridParticleDifferentialSearch").set_name("LLAMAQuantumHybridParticleDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumHybridParticleDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumHybridParticleDifferentialSearch = NonObjectOptimizer( + method="LLAMAQuantumHybridParticleDifferentialSearch" + ).set_name("LLAMAQuantumHybridParticleDifferentialSearch", register=True) +except Exception as e: # QuantumHybridParticleDifferentialSearch print("QuantumHybridParticleDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInfluenceCrossoverOptimizer import QuantumInfluenceCrossoverOptimizer +try: # QuantumInfluenceCrossoverOptimizer + from nevergrad.optimization.lama.QuantumInfluenceCrossoverOptimizer import ( + QuantumInfluenceCrossoverOptimizer, + ) lama_register["QuantumInfluenceCrossoverOptimizer"] = QuantumInfluenceCrossoverOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInfluenceCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInfluenceCrossoverOptimizer = NonObjectOptimizer(method="LLAMAQuantumInfluenceCrossoverOptimizer").set_name("LLAMAQuantumInfluenceCrossoverOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInfluenceCrossoverOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInfluenceCrossoverOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInfluenceCrossoverOptimizer" + ).set_name("LLAMAQuantumInfluenceCrossoverOptimizer", register=True) +except Exception as e: # QuantumInfluenceCrossoverOptimizer print("QuantumInfluenceCrossoverOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInfluencedAdaptiveDifferentialSwarm import QuantumInfluencedAdaptiveDifferentialSwarm +try: # QuantumInfluencedAdaptiveDifferentialSwarm + from nevergrad.optimization.lama.QuantumInfluencedAdaptiveDifferentialSwarm import ( + QuantumInfluencedAdaptiveDifferentialSwarm, + ) lama_register["QuantumInfluencedAdaptiveDifferentialSwarm"] = QuantumInfluencedAdaptiveDifferentialSwarm - res = NonObjectOptimizer(method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInfluencedAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm").set_name("LLAMAQuantumInfluencedAdaptiveDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInfluencedAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMAQuantumInfluencedAdaptiveDifferentialSwarm" + ).set_name("LLAMAQuantumInfluencedAdaptiveDifferentialSwarm", register=True) +except Exception as e: # QuantumInfluencedAdaptiveDifferentialSwarm print("QuantumInfluencedAdaptiveDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearch import QuantumInformedAdaptiveHybridSearch +try: # QuantumInformedAdaptiveHybridSearch + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearch import ( + QuantumInformedAdaptiveHybridSearch, + ) lama_register["QuantumInformedAdaptiveHybridSearch"] = QuantumInformedAdaptiveHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveHybridSearch = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearch").set_name("LLAMAQuantumInformedAdaptiveHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveHybridSearch" + ).set_name("LLAMAQuantumInformedAdaptiveHybridSearch", register=True) +except Exception as e: # QuantumInformedAdaptiveHybridSearch print("QuantumInformedAdaptiveHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearchV4 import QuantumInformedAdaptiveHybridSearchV4 +try: # QuantumInformedAdaptiveHybridSearchV4 + from nevergrad.optimization.lama.QuantumInformedAdaptiveHybridSearchV4 import ( + QuantumInformedAdaptiveHybridSearchV4, + ) lama_register["QuantumInformedAdaptiveHybridSearchV4"] = QuantumInformedAdaptiveHybridSearchV4 - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveHybridSearchV4 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearchV4").set_name("LLAMAQuantumInformedAdaptiveHybridSearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveHybridSearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveHybridSearchV4 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveHybridSearchV4" + ).set_name("LLAMAQuantumInformedAdaptiveHybridSearchV4", register=True) +except Exception as e: # QuantumInformedAdaptiveHybridSearchV4 print("QuantumInformedAdaptiveHybridSearchV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedAdaptiveInertiaOptimizer import QuantumInformedAdaptiveInertiaOptimizer +try: # QuantumInformedAdaptiveInertiaOptimizer + from nevergrad.optimization.lama.QuantumInformedAdaptiveInertiaOptimizer import ( + QuantumInformedAdaptiveInertiaOptimizer, + ) lama_register["QuantumInformedAdaptiveInertiaOptimizer"] = QuantumInformedAdaptiveInertiaOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveInertiaOptimizer").set_name("LLAMAQuantumInformedAdaptiveInertiaOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveInertiaOptimizer" + ).set_name("LLAMAQuantumInformedAdaptiveInertiaOptimizer", register=True) +except Exception as e: # QuantumInformedAdaptiveInertiaOptimizer print("QuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) -try: +try: # QuantumInformedAdaptivePSO from nevergrad.optimization.lama.QuantumInformedAdaptivePSO import QuantumInformedAdaptivePSO lama_register["QuantumInformedAdaptivePSO"] = QuantumInformedAdaptivePSO - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO").set_name("LLAMAQuantumInformedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptivePSO").set_name( + "LLAMAQuantumInformedAdaptivePSO", register=True + ) +except Exception as e: # QuantumInformedAdaptivePSO print("QuantumInformedAdaptivePSO can not be imported: ", e) -try: +try: # QuantumInformedAdaptiveSearchV4 from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV4 import QuantumInformedAdaptiveSearchV4 lama_register["QuantumInformedAdaptiveSearchV4"] = QuantumInformedAdaptiveSearchV4 - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveSearchV4 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV4").set_name("LLAMAQuantumInformedAdaptiveSearchV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveSearchV4 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV4" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV4", register=True) +except Exception as e: # QuantumInformedAdaptiveSearchV4 print("QuantumInformedAdaptiveSearchV4 can not be imported: ", e) -try: +try: # QuantumInformedAdaptiveSearchV5 from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV5 import QuantumInformedAdaptiveSearchV5 lama_register["QuantumInformedAdaptiveSearchV5"] = QuantumInformedAdaptiveSearchV5 - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveSearchV5 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV5").set_name("LLAMAQuantumInformedAdaptiveSearchV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveSearchV5 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV5" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV5", register=True) +except Exception as e: # QuantumInformedAdaptiveSearchV5 print("QuantumInformedAdaptiveSearchV5 can not be imported: ", e) -try: +try: # QuantumInformedAdaptiveSearchV6 from nevergrad.optimization.lama.QuantumInformedAdaptiveSearchV6 import QuantumInformedAdaptiveSearchV6 lama_register["QuantumInformedAdaptiveSearchV6"] = QuantumInformedAdaptiveSearchV6 - res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedAdaptiveSearchV6 = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV6").set_name("LLAMAQuantumInformedAdaptiveSearchV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedAdaptiveSearchV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedAdaptiveSearchV6 = NonObjectOptimizer( + method="LLAMAQuantumInformedAdaptiveSearchV6" + ).set_name("LLAMAQuantumInformedAdaptiveSearchV6", register=True) +except Exception as e: # QuantumInformedAdaptiveSearchV6 print("QuantumInformedAdaptiveSearchV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedCooperativeSearchV1 import QuantumInformedCooperativeSearchV1 +try: # QuantumInformedCooperativeSearchV1 + from nevergrad.optimization.lama.QuantumInformedCooperativeSearchV1 import ( + QuantumInformedCooperativeSearchV1, + ) lama_register["QuantumInformedCooperativeSearchV1"] = QuantumInformedCooperativeSearchV1 - res = NonObjectOptimizer(method="LLAMAQuantumInformedCooperativeSearchV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedCooperativeSearchV1 = NonObjectOptimizer(method="LLAMAQuantumInformedCooperativeSearchV1").set_name("LLAMAQuantumInformedCooperativeSearchV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedCooperativeSearchV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedCooperativeSearchV1 = NonObjectOptimizer( + method="LLAMAQuantumInformedCooperativeSearchV1" + ).set_name("LLAMAQuantumInformedCooperativeSearchV1", register=True) +except Exception as e: # QuantumInformedCooperativeSearchV1 print("QuantumInformedCooperativeSearchV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedCrossoverEvolution import QuantumInformedCrossoverEvolution +try: # QuantumInformedCrossoverEvolution + from nevergrad.optimization.lama.QuantumInformedCrossoverEvolution import ( + QuantumInformedCrossoverEvolution, + ) lama_register["QuantumInformedCrossoverEvolution"] = QuantumInformedCrossoverEvolution - res = NonObjectOptimizer(method="LLAMAQuantumInformedCrossoverEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedCrossoverEvolution = NonObjectOptimizer(method="LLAMAQuantumInformedCrossoverEvolution").set_name("LLAMAQuantumInformedCrossoverEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedCrossoverEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedCrossoverEvolution = NonObjectOptimizer( + method="LLAMAQuantumInformedCrossoverEvolution" + ).set_name("LLAMAQuantumInformedCrossoverEvolution", register=True) +except Exception as e: # QuantumInformedCrossoverEvolution print("QuantumInformedCrossoverEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedDifferentialStrategy import QuantumInformedDifferentialStrategy +try: # QuantumInformedDifferentialStrategy + from nevergrad.optimization.lama.QuantumInformedDifferentialStrategy import ( + QuantumInformedDifferentialStrategy, + ) lama_register["QuantumInformedDifferentialStrategy"] = QuantumInformedDifferentialStrategy - res = NonObjectOptimizer(method="LLAMAQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedDifferentialStrategy = NonObjectOptimizer(method="LLAMAQuantumInformedDifferentialStrategy").set_name("LLAMAQuantumInformedDifferentialStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedDifferentialStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedDifferentialStrategy = NonObjectOptimizer( + method="LLAMAQuantumInformedDifferentialStrategy" + ).set_name("LLAMAQuantumInformedDifferentialStrategy", register=True) +except Exception as e: # QuantumInformedDifferentialStrategy print("QuantumInformedDifferentialStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedDynamicSwarmOptimizer import QuantumInformedDynamicSwarmOptimizer +try: # QuantumInformedDynamicSwarmOptimizer + from nevergrad.optimization.lama.QuantumInformedDynamicSwarmOptimizer import ( + QuantumInformedDynamicSwarmOptimizer, + ) lama_register["QuantumInformedDynamicSwarmOptimizer"] = QuantumInformedDynamicSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedDynamicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedDynamicSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedDynamicSwarmOptimizer").set_name("LLAMAQuantumInformedDynamicSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedDynamicSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedDynamicSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedDynamicSwarmOptimizer" + ).set_name("LLAMAQuantumInformedDynamicSwarmOptimizer", register=True) +except Exception as e: # QuantumInformedDynamicSwarmOptimizer print("QuantumInformedDynamicSwarmOptimizer can not be imported: ", e) -try: +try: # QuantumInformedEvolutionStrategy from nevergrad.optimization.lama.QuantumInformedEvolutionStrategy import QuantumInformedEvolutionStrategy lama_register["QuantumInformedEvolutionStrategy"] = QuantumInformedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMAQuantumInformedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedEvolutionStrategy = NonObjectOptimizer(method="LLAMAQuantumInformedEvolutionStrategy").set_name("LLAMAQuantumInformedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedEvolutionStrategy = NonObjectOptimizer( + method="LLAMAQuantumInformedEvolutionStrategy" + ).set_name("LLAMAQuantumInformedEvolutionStrategy", register=True) +except Exception as e: # QuantumInformedEvolutionStrategy print("QuantumInformedEvolutionStrategy can not be imported: ", e) -try: +try: # QuantumInformedGradientOptimizer from nevergrad.optimization.lama.QuantumInformedGradientOptimizer import QuantumInformedGradientOptimizer lama_register["QuantumInformedGradientOptimizer"] = QuantumInformedGradientOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedGradientOptimizer").set_name("LLAMAQuantumInformedGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedGradientOptimizer" + ).set_name("LLAMAQuantumInformedGradientOptimizer", register=True) +except Exception as e: # QuantumInformedGradientOptimizer print("QuantumInformedGradientOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedHyperStrategicOptimizer import QuantumInformedHyperStrategicOptimizer +try: # QuantumInformedHyperStrategicOptimizer + from nevergrad.optimization.lama.QuantumInformedHyperStrategicOptimizer import ( + QuantumInformedHyperStrategicOptimizer, + ) lama_register["QuantumInformedHyperStrategicOptimizer"] = QuantumInformedHyperStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedHyperStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedHyperStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedHyperStrategicOptimizer").set_name("LLAMAQuantumInformedHyperStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedHyperStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedHyperStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedHyperStrategicOptimizer" + ).set_name("LLAMAQuantumInformedHyperStrategicOptimizer", register=True) +except Exception as e: # QuantumInformedHyperStrategicOptimizer print("QuantumInformedHyperStrategicOptimizer can not be imported: ", e) -try: +try: # QuantumInformedOptimizer from nevergrad.optimization.lama.QuantumInformedOptimizer import QuantumInformedOptimizer lama_register["QuantumInformedOptimizer"] = QuantumInformedOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer").set_name("LLAMAQuantumInformedOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedOptimizer").set_name( + "LLAMAQuantumInformedOptimizer", register=True + ) +except Exception as e: # QuantumInformedOptimizer print("QuantumInformedOptimizer can not be imported: ", e) -try: +try: # QuantumInformedPSO from nevergrad.optimization.lama.QuantumInformedPSO import QuantumInformedPSO lama_register["QuantumInformedPSO"] = QuantumInformedPSO - res = NonObjectOptimizer(method="LLAMAQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedPSO = NonObjectOptimizer(method="LLAMAQuantumInformedPSO").set_name("LLAMAQuantumInformedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedPSO = NonObjectOptimizer(method="LLAMAQuantumInformedPSO").set_name( + "LLAMAQuantumInformedPSO", register=True + ) +except Exception as e: # QuantumInformedPSO print("QuantumInformedPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedParticleSwarmOptimizer import QuantumInformedParticleSwarmOptimizer +try: # QuantumInformedParticleSwarmOptimizer + from nevergrad.optimization.lama.QuantumInformedParticleSwarmOptimizer import ( + QuantumInformedParticleSwarmOptimizer, + ) lama_register["QuantumInformedParticleSwarmOptimizer"] = QuantumInformedParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedParticleSwarmOptimizer").set_name("LLAMAQuantumInformedParticleSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedParticleSwarmOptimizer" + ).set_name("LLAMAQuantumInformedParticleSwarmOptimizer", register=True) +except Exception as e: # QuantumInformedParticleSwarmOptimizer print("QuantumInformedParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInformedStrategicOptimizer import QuantumInformedStrategicOptimizer +try: # QuantumInformedStrategicOptimizer + from nevergrad.optimization.lama.QuantumInformedStrategicOptimizer import ( + QuantumInformedStrategicOptimizer, + ) lama_register["QuantumInformedStrategicOptimizer"] = QuantumInformedStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInformedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInformedStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumInformedStrategicOptimizer").set_name("LLAMAQuantumInformedStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInformedStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInformedStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInformedStrategicOptimizer" + ).set_name("LLAMAQuantumInformedStrategicOptimizer", register=True) +except Exception as e: # QuantumInformedStrategicOptimizer print("QuantumInformedStrategicOptimizer can not be imported: ", e) -try: +try: # QuantumInfusedAdaptiveStrategy from nevergrad.optimization.lama.QuantumInfusedAdaptiveStrategy import QuantumInfusedAdaptiveStrategy lama_register["QuantumInfusedAdaptiveStrategy"] = QuantumInfusedAdaptiveStrategy - res = NonObjectOptimizer(method="LLAMAQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInfusedAdaptiveStrategy = NonObjectOptimizer(method="LLAMAQuantumInfusedAdaptiveStrategy").set_name("LLAMAQuantumInfusedAdaptiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInfusedAdaptiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInfusedAdaptiveStrategy = NonObjectOptimizer( + method="LLAMAQuantumInfusedAdaptiveStrategy" + ).set_name("LLAMAQuantumInfusedAdaptiveStrategy", register=True) +except Exception as e: # QuantumInfusedAdaptiveStrategy print("QuantumInfusedAdaptiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEElitistLocalSearch import QuantumInspiredAdaptiveDEElitistLocalSearch +try: # QuantumInspiredAdaptiveDEElitistLocalSearch + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEElitistLocalSearch import ( + QuantumInspiredAdaptiveDEElitistLocalSearch, + ) lama_register["QuantumInspiredAdaptiveDEElitistLocalSearch"] = QuantumInspiredAdaptiveDEElitistLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch", register=True) +except Exception as e: # QuantumInspiredAdaptiveDEElitistLocalSearch print("QuantumInspiredAdaptiveDEElitistLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEHybridLocalSearch import QuantumInspiredAdaptiveDEHybridLocalSearch +try: # QuantumInspiredAdaptiveDEHybridLocalSearch + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDEHybridLocalSearch import ( + QuantumInspiredAdaptiveDEHybridLocalSearch, + ) lama_register["QuantumInspiredAdaptiveDEHybridLocalSearch"] = QuantumInspiredAdaptiveDEHybridLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch", register=True) +except Exception as e: # QuantumInspiredAdaptiveDEHybridLocalSearch print("QuantumInspiredAdaptiveDEHybridLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning import QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning - - lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning"] = QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning").set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning", register=True) -except Exception as e: +try: # QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning import ( + QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning, + ) + + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning"] = ( + QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning + ) + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning" + ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning", register=True) +except Exception as e: # QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning print("QuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch import QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch - - lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch"] = QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch").set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch", register=True) -except Exception as e: +try: # QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch + from nevergrad.optimization.lama.QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch import ( + QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch, + ) + + lama_register["QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch"] = ( + QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch" + ).set_name("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch", register=True) +except Exception as e: # QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch print("QuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridDEPSO import QuantumInspiredAdaptiveHybridDEPSO +try: # QuantumInspiredAdaptiveHybridDEPSO + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridDEPSO import ( + QuantumInspiredAdaptiveHybridDEPSO, + ) lama_register["QuantumInspiredAdaptiveHybridDEPSO"] = QuantumInspiredAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridDEPSO").set_name("LLAMAQuantumInspiredAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumInspiredAdaptiveHybridDEPSO", register=True) +except Exception as e: # QuantumInspiredAdaptiveHybridDEPSO print("QuantumInspiredAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridOptimizer import QuantumInspiredAdaptiveHybridOptimizer +try: # QuantumInspiredAdaptiveHybridOptimizer + from nevergrad.optimization.lama.QuantumInspiredAdaptiveHybridOptimizer import ( + QuantumInspiredAdaptiveHybridOptimizer, + ) lama_register["QuantumInspiredAdaptiveHybridOptimizer"] = QuantumInspiredAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridOptimizer").set_name("LLAMAQuantumInspiredAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveHybridOptimizer" + ).set_name("LLAMAQuantumInspiredAdaptiveHybridOptimizer", register=True) +except Exception as e: # QuantumInspiredAdaptiveHybridOptimizer print("QuantumInspiredAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredAdaptiveMemeticOptimizer import QuantumInspiredAdaptiveMemeticOptimizer +try: # QuantumInspiredAdaptiveMemeticOptimizer + from nevergrad.optimization.lama.QuantumInspiredAdaptiveMemeticOptimizer import ( + QuantumInspiredAdaptiveMemeticOptimizer, + ) lama_register["QuantumInspiredAdaptiveMemeticOptimizer"] = QuantumInspiredAdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer").set_name("LLAMAQuantumInspiredAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredAdaptiveMemeticOptimizer" + ).set_name("LLAMAQuantumInspiredAdaptiveMemeticOptimizer", register=True) +except Exception as e: # QuantumInspiredAdaptiveMemeticOptimizer print("QuantumInspiredAdaptiveMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredDifferentialEvolution import QuantumInspiredDifferentialEvolution +try: # QuantumInspiredDifferentialEvolution + from nevergrad.optimization.lama.QuantumInspiredDifferentialEvolution import ( + QuantumInspiredDifferentialEvolution, + ) lama_register["QuantumInspiredDifferentialEvolution"] = QuantumInspiredDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialEvolution").set_name("LLAMAQuantumInspiredDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumInspiredDifferentialEvolution" + ).set_name("LLAMAQuantumInspiredDifferentialEvolution", register=True) +except Exception as e: # QuantumInspiredDifferentialEvolution print("QuantumInspiredDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumInspiredDifferentialParticleSwarmOptimizer import QuantumInspiredDifferentialParticleSwarmOptimizer - - lama_register["QuantumInspiredDifferentialParticleSwarmOptimizer"] = QuantumInspiredDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer").set_name("LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # QuantumInspiredDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.QuantumInspiredDifferentialParticleSwarmOptimizer import ( + QuantumInspiredDifferentialParticleSwarmOptimizer, + ) + + lama_register["QuantumInspiredDifferentialParticleSwarmOptimizer"] = ( + QuantumInspiredDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer" + ).set_name("LLAMAQuantumInspiredDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # QuantumInspiredDifferentialParticleSwarmOptimizer print("QuantumInspiredDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: +try: # QuantumInspiredHybridOptimizer from nevergrad.optimization.lama.QuantumInspiredHybridOptimizer import QuantumInspiredHybridOptimizer lama_register["QuantumInspiredHybridOptimizer"] = QuantumInspiredHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredHybridOptimizer").set_name("LLAMAQuantumInspiredHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredHybridOptimizer" + ).set_name("LLAMAQuantumInspiredHybridOptimizer", register=True) +except Exception as e: # QuantumInspiredHybridOptimizer print("QuantumInspiredHybridOptimizer can not be imported: ", e) -try: +try: # QuantumInspiredMetaheuristic from nevergrad.optimization.lama.QuantumInspiredMetaheuristic import QuantumInspiredMetaheuristic lama_register["QuantumInspiredMetaheuristic"] = QuantumInspiredMetaheuristic - res = NonObjectOptimizer(method="LLAMAQuantumInspiredMetaheuristic")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredMetaheuristic = NonObjectOptimizer(method="LLAMAQuantumInspiredMetaheuristic").set_name("LLAMAQuantumInspiredMetaheuristic", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredMetaheuristic")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredMetaheuristic = NonObjectOptimizer( + method="LLAMAQuantumInspiredMetaheuristic" + ).set_name("LLAMAQuantumInspiredMetaheuristic", register=True) +except Exception as e: # QuantumInspiredMetaheuristic print("QuantumInspiredMetaheuristic can not be imported: ", e) -try: +try: # QuantumInspiredOptimization from nevergrad.optimization.lama.QuantumInspiredOptimization import QuantumInspiredOptimization lama_register["QuantumInspiredOptimization"] = QuantumInspiredOptimization - res = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredOptimization = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization").set_name("LLAMAQuantumInspiredOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredOptimization = NonObjectOptimizer(method="LLAMAQuantumInspiredOptimization").set_name( + "LLAMAQuantumInspiredOptimization", register=True + ) +except Exception as e: # QuantumInspiredOptimization print("QuantumInspiredOptimization can not be imported: ", e) -try: +try: # QuantumInspiredSpiralOptimizer from nevergrad.optimization.lama.QuantumInspiredSpiralOptimizer import QuantumInspiredSpiralOptimizer lama_register["QuantumInspiredSpiralOptimizer"] = QuantumInspiredSpiralOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumInspiredSpiralOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumInspiredSpiralOptimizer = NonObjectOptimizer(method="LLAMAQuantumInspiredSpiralOptimizer").set_name("LLAMAQuantumInspiredSpiralOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumInspiredSpiralOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumInspiredSpiralOptimizer = NonObjectOptimizer( + method="LLAMAQuantumInspiredSpiralOptimizer" + ).set_name("LLAMAQuantumInspiredSpiralOptimizer", register=True) +except Exception as e: # QuantumInspiredSpiralOptimizer print("QuantumInspiredSpiralOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumIterativeDeepeningHybridSearch import QuantumIterativeDeepeningHybridSearch +try: # QuantumIterativeDeepeningHybridSearch + from nevergrad.optimization.lama.QuantumIterativeDeepeningHybridSearch import ( + QuantumIterativeDeepeningHybridSearch, + ) lama_register["QuantumIterativeDeepeningHybridSearch"] = QuantumIterativeDeepeningHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumIterativeDeepeningHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumIterativeDeepeningHybridSearch = NonObjectOptimizer(method="LLAMAQuantumIterativeDeepeningHybridSearch").set_name("LLAMAQuantumIterativeDeepeningHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumIterativeDeepeningHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumIterativeDeepeningHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumIterativeDeepeningHybridSearch" + ).set_name("LLAMAQuantumIterativeDeepeningHybridSearch", register=True) +except Exception as e: # QuantumIterativeDeepeningHybridSearch print("QuantumIterativeDeepeningHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumIterativeRefinementOptimizer import QuantumIterativeRefinementOptimizer +try: # QuantumIterativeRefinementOptimizer + from nevergrad.optimization.lama.QuantumIterativeRefinementOptimizer import ( + QuantumIterativeRefinementOptimizer, + ) lama_register["QuantumIterativeRefinementOptimizer"] = QuantumIterativeRefinementOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumIterativeRefinementOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumIterativeRefinementOptimizer = NonObjectOptimizer(method="LLAMAQuantumIterativeRefinementOptimizer").set_name("LLAMAQuantumIterativeRefinementOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumIterativeRefinementOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumIterativeRefinementOptimizer = NonObjectOptimizer( + method="LLAMAQuantumIterativeRefinementOptimizer" + ).set_name("LLAMAQuantumIterativeRefinementOptimizer", register=True) +except Exception as e: # QuantumIterativeRefinementOptimizer print("QuantumIterativeRefinementOptimizer can not be imported: ", e) -try: +try: # QuantumLeapOptimizer from nevergrad.optimization.lama.QuantumLeapOptimizer import QuantumLeapOptimizer lama_register["QuantumLeapOptimizer"] = QuantumLeapOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer").set_name("LLAMAQuantumLeapOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLeapOptimizer = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizer").set_name( + "LLAMAQuantumLeapOptimizer", register=True + ) +except Exception as e: # QuantumLeapOptimizer print("QuantumLeapOptimizer can not be imported: ", e) -try: +try: # QuantumLeapOptimizerV2 from nevergrad.optimization.lama.QuantumLeapOptimizerV2 import QuantumLeapOptimizerV2 lama_register["QuantumLeapOptimizerV2"] = QuantumLeapOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLeapOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2").set_name("LLAMAQuantumLeapOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLeapOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLeapOptimizerV2").set_name( + "LLAMAQuantumLeapOptimizerV2", register=True + ) +except Exception as e: # QuantumLeapOptimizerV2 print("QuantumLeapOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDEHybridLocalSearch import QuantumLevyAdaptiveDEHybridLocalSearch +try: # QuantumLevyAdaptiveDEHybridLocalSearch + from nevergrad.optimization.lama.QuantumLevyAdaptiveDEHybridLocalSearch import ( + QuantumLevyAdaptiveDEHybridLocalSearch, + ) lama_register["QuantumLevyAdaptiveDEHybridLocalSearch"] = QuantumLevyAdaptiveDEHybridLocalSearch - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDEHybridLocalSearch = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch").set_name("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDEHybridLocalSearch = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDEHybridLocalSearch" + ).set_name("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch", register=True) +except Exception as e: # QuantumLevyAdaptiveDEHybridLocalSearch print("QuantumLevyAdaptiveDEHybridLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV2 import QuantumLevyAdaptiveDifferentialOptimizerV2 +try: # QuantumLevyAdaptiveDifferentialOptimizerV2 + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV2 import ( + QuantumLevyAdaptiveDifferentialOptimizerV2, + ) lama_register["QuantumLevyAdaptiveDifferentialOptimizerV2"] = QuantumLevyAdaptiveDifferentialOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV2", register=True) +except Exception as e: # QuantumLevyAdaptiveDifferentialOptimizerV2 print("QuantumLevyAdaptiveDifferentialOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV3 import QuantumLevyAdaptiveDifferentialOptimizerV3 +try: # QuantumLevyAdaptiveDifferentialOptimizerV3 + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV3 import ( + QuantumLevyAdaptiveDifferentialOptimizerV3, + ) lama_register["QuantumLevyAdaptiveDifferentialOptimizerV3"] = QuantumLevyAdaptiveDifferentialOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV3", register=True) +except Exception as e: # QuantumLevyAdaptiveDifferentialOptimizerV3 print("QuantumLevyAdaptiveDifferentialOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV4 import QuantumLevyAdaptiveDifferentialOptimizerV4 +try: # QuantumLevyAdaptiveDifferentialOptimizerV4 + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV4 import ( + QuantumLevyAdaptiveDifferentialOptimizerV4, + ) lama_register["QuantumLevyAdaptiveDifferentialOptimizerV4"] = QuantumLevyAdaptiveDifferentialOptimizerV4 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV4", register=True) +except Exception as e: # QuantumLevyAdaptiveDifferentialOptimizerV4 print("QuantumLevyAdaptiveDifferentialOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV5 import QuantumLevyAdaptiveDifferentialOptimizerV5 +try: # QuantumLevyAdaptiveDifferentialOptimizerV5 + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV5 import ( + QuantumLevyAdaptiveDifferentialOptimizerV5, + ) lama_register["QuantumLevyAdaptiveDifferentialOptimizerV5"] = QuantumLevyAdaptiveDifferentialOptimizerV5 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV5", register=True) +except Exception as e: # QuantumLevyAdaptiveDifferentialOptimizerV5 print("QuantumLevyAdaptiveDifferentialOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV6 import QuantumLevyAdaptiveDifferentialOptimizerV6 +try: # QuantumLevyAdaptiveDifferentialOptimizerV6 + from nevergrad.optimization.lama.QuantumLevyAdaptiveDifferentialOptimizerV6 import ( + QuantumLevyAdaptiveDifferentialOptimizerV6, + ) lama_register["QuantumLevyAdaptiveDifferentialOptimizerV6"] = QuantumLevyAdaptiveDifferentialOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6").set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6" + ).set_name("LLAMAQuantumLevyAdaptiveDifferentialOptimizerV6", register=True) +except Exception as e: # QuantumLevyAdaptiveDifferentialOptimizerV6 print("QuantumLevyAdaptiveDifferentialOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyAdaptiveMemeticOptimizerV3 import QuantumLevyAdaptiveMemeticOptimizerV3 +try: # QuantumLevyAdaptiveMemeticOptimizerV3 + from nevergrad.optimization.lama.QuantumLevyAdaptiveMemeticOptimizerV3 import ( + QuantumLevyAdaptiveMemeticOptimizerV3, + ) lama_register["QuantumLevyAdaptiveMemeticOptimizerV3"] = QuantumLevyAdaptiveMemeticOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyAdaptiveMemeticOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3").set_name("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyAdaptiveMemeticOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyAdaptiveMemeticOptimizerV3" + ).set_name("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3", register=True) +except Exception as e: # QuantumLevyAdaptiveMemeticOptimizerV3 print("QuantumLevyAdaptiveMemeticOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizer import QuantumLevyDifferentialDynamicOptimizer +try: # QuantumLevyDifferentialDynamicOptimizer + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizer import ( + QuantumLevyDifferentialDynamicOptimizer, + ) lama_register["QuantumLevyDifferentialDynamicOptimizer"] = QuantumLevyDifferentialDynamicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizer").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizer" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizer", register=True) +except Exception as e: # QuantumLevyDifferentialDynamicOptimizer print("QuantumLevyDifferentialDynamicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV2 import QuantumLevyDifferentialDynamicOptimizerV2 +try: # QuantumLevyDifferentialDynamicOptimizerV2 + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV2 import ( + QuantumLevyDifferentialDynamicOptimizerV2, + ) lama_register["QuantumLevyDifferentialDynamicOptimizerV2"] = QuantumLevyDifferentialDynamicOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialDynamicOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialDynamicOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizerV2" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV2", register=True) +except Exception as e: # QuantumLevyDifferentialDynamicOptimizerV2 print("QuantumLevyDifferentialDynamicOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV3 import QuantumLevyDifferentialDynamicOptimizerV3 +try: # QuantumLevyDifferentialDynamicOptimizerV3 + from nevergrad.optimization.lama.QuantumLevyDifferentialDynamicOptimizerV3 import ( + QuantumLevyDifferentialDynamicOptimizerV3, + ) lama_register["QuantumLevyDifferentialDynamicOptimizerV3"] = QuantumLevyDifferentialDynamicOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialDynamicOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3").set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialDynamicOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialDynamicOptimizerV3" + ).set_name("LLAMAQuantumLevyDifferentialDynamicOptimizerV3", register=True) +except Exception as e: # QuantumLevyDifferentialDynamicOptimizerV3 print("QuantumLevyDifferentialDynamicOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizer import QuantumLevyDifferentialHybridOptimizer +try: # QuantumLevyDifferentialHybridOptimizer + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizer import ( + QuantumLevyDifferentialHybridOptimizer, + ) lama_register["QuantumLevyDifferentialHybridOptimizer"] = QuantumLevyDifferentialHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizer").set_name("LLAMAQuantumLevyDifferentialHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridOptimizer" + ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizer", register=True) +except Exception as e: # QuantumLevyDifferentialHybridOptimizer print("QuantumLevyDifferentialHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizerV2 import QuantumLevyDifferentialHybridOptimizerV2 +try: # QuantumLevyDifferentialHybridOptimizerV2 + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridOptimizerV2 import ( + QuantumLevyDifferentialHybridOptimizerV2, + ) lama_register["QuantumLevyDifferentialHybridOptimizerV2"] = QuantumLevyDifferentialHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizerV2").set_name("LLAMAQuantumLevyDifferentialHybridOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridOptimizerV2" + ).set_name("LLAMAQuantumLevyDifferentialHybridOptimizerV2", register=True) +except Exception as e: # QuantumLevyDifferentialHybridOptimizerV2 print("QuantumLevyDifferentialHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDifferentialHybridSearch import QuantumLevyDifferentialHybridSearch +try: # QuantumLevyDifferentialHybridSearch + from nevergrad.optimization.lama.QuantumLevyDifferentialHybridSearch import ( + QuantumLevyDifferentialHybridSearch, + ) lama_register["QuantumLevyDifferentialHybridSearch"] = QuantumLevyDifferentialHybridSearch - res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDifferentialHybridSearch = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridSearch").set_name("LLAMAQuantumLevyDifferentialHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDifferentialHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDifferentialHybridSearch = NonObjectOptimizer( + method="LLAMAQuantumLevyDifferentialHybridSearch" + ).set_name("LLAMAQuantumLevyDifferentialHybridSearch", register=True) +except Exception as e: # QuantumLevyDifferentialHybridSearch print("QuantumLevyDifferentialHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmOptimizerV3 import QuantumLevyDynamicDifferentialSwarmOptimizerV3 - - lama_register["QuantumLevyDynamicDifferentialSwarmOptimizerV3"] = QuantumLevyDynamicDifferentialSwarmOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3").set_name("LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3", register=True) -except Exception as e: +try: # QuantumLevyDynamicDifferentialSwarmOptimizerV3 + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmOptimizerV3 import ( + QuantumLevyDynamicDifferentialSwarmOptimizerV3, + ) + + lama_register["QuantumLevyDynamicDifferentialSwarmOptimizerV3"] = ( + QuantumLevyDynamicDifferentialSwarmOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3" + ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmOptimizerV3", register=True) +except Exception as e: # QuantumLevyDynamicDifferentialSwarmOptimizerV3 print("QuantumLevyDynamicDifferentialSwarmOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmV5 import QuantumLevyDynamicDifferentialSwarmV5 +try: # QuantumLevyDynamicDifferentialSwarmV5 + from nevergrad.optimization.lama.QuantumLevyDynamicDifferentialSwarmV5 import ( + QuantumLevyDynamicDifferentialSwarmV5, + ) lama_register["QuantumLevyDynamicDifferentialSwarmV5"] = QuantumLevyDynamicDifferentialSwarmV5 - res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDynamicDifferentialSwarmV5 = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmV5").set_name("LLAMAQuantumLevyDynamicDifferentialSwarmV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicDifferentialSwarmV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDynamicDifferentialSwarmV5 = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicDifferentialSwarmV5" + ).set_name("LLAMAQuantumLevyDynamicDifferentialSwarmV5", register=True) +except Exception as e: # QuantumLevyDynamicDifferentialSwarmV5 print("QuantumLevyDynamicDifferentialSwarmV5 can not be imported: ", e) -try: +try: # QuantumLevyDynamicParticleSwarm from nevergrad.optimization.lama.QuantumLevyDynamicParticleSwarm import QuantumLevyDynamicParticleSwarm lama_register["QuantumLevyDynamicParticleSwarm"] = QuantumLevyDynamicParticleSwarm - res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicParticleSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDynamicParticleSwarm = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicParticleSwarm").set_name("LLAMAQuantumLevyDynamicParticleSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicParticleSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDynamicParticleSwarm = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicParticleSwarm" + ).set_name("LLAMAQuantumLevyDynamicParticleSwarm", register=True) +except Exception as e: # QuantumLevyDynamicParticleSwarm print("QuantumLevyDynamicParticleSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyDynamicSwarmOptimization import QuantumLevyDynamicSwarmOptimization +try: # QuantumLevyDynamicSwarmOptimization + from nevergrad.optimization.lama.QuantumLevyDynamicSwarmOptimization import ( + QuantumLevyDynamicSwarmOptimization, + ) lama_register["QuantumLevyDynamicSwarmOptimization"] = QuantumLevyDynamicSwarmOptimization - res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicSwarmOptimization").set_name("LLAMAQuantumLevyDynamicSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyDynamicSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyDynamicSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyDynamicSwarmOptimization" + ).set_name("LLAMAQuantumLevyDynamicSwarmOptimization", register=True) +except Exception as e: # QuantumLevyDynamicSwarmOptimization print("QuantumLevyDynamicSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyEliteMemeticDEHybridOptimizer import QuantumLevyEliteMemeticDEHybridOptimizer +try: # QuantumLevyEliteMemeticDEHybridOptimizer + from nevergrad.optimization.lama.QuantumLevyEliteMemeticDEHybridOptimizer import ( + QuantumLevyEliteMemeticDEHybridOptimizer, + ) lama_register["QuantumLevyEliteMemeticDEHybridOptimizer"] = QuantumLevyEliteMemeticDEHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEliteMemeticDEHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer").set_name("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEliteMemeticDEHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEliteMemeticDEHybridOptimizer" + ).set_name("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer", register=True) +except Exception as e: # QuantumLevyEliteMemeticDEHybridOptimizer print("QuantumLevyEliteMemeticDEHybridOptimizer can not be imported: ", e) -try: +try: # QuantumLevyEliteMemeticOptimizer from nevergrad.optimization.lama.QuantumLevyEliteMemeticOptimizer import QuantumLevyEliteMemeticOptimizer lama_register["QuantumLevyEliteMemeticOptimizer"] = QuantumLevyEliteMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEliteMemeticOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticOptimizer").set_name("LLAMAQuantumLevyEliteMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEliteMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEliteMemeticOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEliteMemeticOptimizer" + ).set_name("LLAMAQuantumLevyEliteMemeticOptimizer", register=True) +except Exception as e: # QuantumLevyEliteMemeticOptimizer print("QuantumLevyEliteMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveDifferentialOptimizer import QuantumLevyEnhancedAdaptiveDifferentialOptimizer - - lama_register["QuantumLevyEnhancedAdaptiveDifferentialOptimizer"] = QuantumLevyEnhancedAdaptiveDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer").set_name("LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer", register=True) -except Exception as e: +try: # QuantumLevyEnhancedAdaptiveDifferentialOptimizer + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveDifferentialOptimizer import ( + QuantumLevyEnhancedAdaptiveDifferentialOptimizer, + ) + + lama_register["QuantumLevyEnhancedAdaptiveDifferentialOptimizer"] = ( + QuantumLevyEnhancedAdaptiveDifferentialOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer" + ).set_name("LLAMAQuantumLevyEnhancedAdaptiveDifferentialOptimizer", register=True) +except Exception as e: # QuantumLevyEnhancedAdaptiveDifferentialOptimizer print("QuantumLevyEnhancedAdaptiveDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveOptimizerV2 import QuantumLevyEnhancedAdaptiveOptimizerV2 +try: # QuantumLevyEnhancedAdaptiveOptimizerV2 + from nevergrad.optimization.lama.QuantumLevyEnhancedAdaptiveOptimizerV2 import ( + QuantumLevyEnhancedAdaptiveOptimizerV2, + ) lama_register["QuantumLevyEnhancedAdaptiveOptimizerV2"] = QuantumLevyEnhancedAdaptiveOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2").set_name("LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2" + ).set_name("LLAMAQuantumLevyEnhancedAdaptiveOptimizerV2", register=True) +except Exception as e: # QuantumLevyEnhancedAdaptiveOptimizerV2 print("QuantumLevyEnhancedAdaptiveOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyEnhancedDifferentialOptimizer import QuantumLevyEnhancedDifferentialOptimizer +try: # QuantumLevyEnhancedDifferentialOptimizer + from nevergrad.optimization.lama.QuantumLevyEnhancedDifferentialOptimizer import ( + QuantumLevyEnhancedDifferentialOptimizer, + ) lama_register["QuantumLevyEnhancedDifferentialOptimizer"] = QuantumLevyEnhancedDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedDifferentialOptimizer").set_name("LLAMAQuantumLevyEnhancedDifferentialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEnhancedDifferentialOptimizer = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedDifferentialOptimizer" + ).set_name("LLAMAQuantumLevyEnhancedDifferentialOptimizer", register=True) +except Exception as e: # QuantumLevyEnhancedDifferentialOptimizer print("QuantumLevyEnhancedDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyEnhancedMemeticOptimizerV2 import QuantumLevyEnhancedMemeticOptimizerV2 +try: # QuantumLevyEnhancedMemeticOptimizerV2 + from nevergrad.optimization.lama.QuantumLevyEnhancedMemeticOptimizerV2 import ( + QuantumLevyEnhancedMemeticOptimizerV2, + ) lama_register["QuantumLevyEnhancedMemeticOptimizerV2"] = QuantumLevyEnhancedMemeticOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyEnhancedMemeticOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2").set_name("LLAMAQuantumLevyEnhancedMemeticOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyEnhancedMemeticOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumLevyEnhancedMemeticOptimizerV2" + ).set_name("LLAMAQuantumLevyEnhancedMemeticOptimizerV2", register=True) +except Exception as e: # QuantumLevyEnhancedMemeticOptimizerV2 print("QuantumLevyEnhancedMemeticOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyImprovedDifferentialSwarmOptimization import QuantumLevyImprovedDifferentialSwarmOptimization - - lama_register["QuantumLevyImprovedDifferentialSwarmOptimization"] = QuantumLevyImprovedDifferentialSwarmOptimization - res = NonObjectOptimizer(method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyImprovedDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization").set_name("LLAMAQuantumLevyImprovedDifferentialSwarmOptimization", register=True) -except Exception as e: +try: # QuantumLevyImprovedDifferentialSwarmOptimization + from nevergrad.optimization.lama.QuantumLevyImprovedDifferentialSwarmOptimization import ( + QuantumLevyImprovedDifferentialSwarmOptimization, + ) + + lama_register["QuantumLevyImprovedDifferentialSwarmOptimization"] = ( + QuantumLevyImprovedDifferentialSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyImprovedDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyImprovedDifferentialSwarmOptimization" + ).set_name("LLAMAQuantumLevyImprovedDifferentialSwarmOptimization", register=True) +except Exception as e: # QuantumLevyImprovedDifferentialSwarmOptimization print("QuantumLevyImprovedDifferentialSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumLevyParticleAdaptiveOptimization import QuantumLevyParticleAdaptiveOptimization +try: # QuantumLevyParticleAdaptiveOptimization + from nevergrad.optimization.lama.QuantumLevyParticleAdaptiveOptimization import ( + QuantumLevyParticleAdaptiveOptimization, + ) lama_register["QuantumLevyParticleAdaptiveOptimization"] = QuantumLevyParticleAdaptiveOptimization - res = NonObjectOptimizer(method="LLAMAQuantumLevyParticleAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevyParticleAdaptiveOptimization = NonObjectOptimizer(method="LLAMAQuantumLevyParticleAdaptiveOptimization").set_name("LLAMAQuantumLevyParticleAdaptiveOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevyParticleAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevyParticleAdaptiveOptimization = NonObjectOptimizer( + method="LLAMAQuantumLevyParticleAdaptiveOptimization" + ).set_name("LLAMAQuantumLevyParticleAdaptiveOptimization", register=True) +except Exception as e: # QuantumLevyParticleAdaptiveOptimization print("QuantumLevyParticleAdaptiveOptimization can not be imported: ", e) -try: +try: # QuantumLevySwarmOptimizationV3 from nevergrad.optimization.lama.QuantumLevySwarmOptimizationV3 import QuantumLevySwarmOptimizationV3 lama_register["QuantumLevySwarmOptimizationV3"] = QuantumLevySwarmOptimizationV3 - res = NonObjectOptimizer(method="LLAMAQuantumLevySwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLevySwarmOptimizationV3 = NonObjectOptimizer(method="LLAMAQuantumLevySwarmOptimizationV3").set_name("LLAMAQuantumLevySwarmOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLevySwarmOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLevySwarmOptimizationV3 = NonObjectOptimizer( + method="LLAMAQuantumLevySwarmOptimizationV3" + ).set_name("LLAMAQuantumLevySwarmOptimizationV3", register=True) +except Exception as e: # QuantumLevySwarmOptimizationV3 print("QuantumLevySwarmOptimizationV3 can not be imported: ", e) -try: +try: # QuantumLocustSearch from nevergrad.optimization.lama.QuantumLocustSearch import QuantumLocustSearch lama_register["QuantumLocustSearch"] = QuantumLocustSearch - res = NonObjectOptimizer(method="LLAMAQuantumLocustSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLocustSearch = NonObjectOptimizer(method="LLAMAQuantumLocustSearch").set_name("LLAMAQuantumLocustSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLocustSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLocustSearch = NonObjectOptimizer(method="LLAMAQuantumLocustSearch").set_name( + "LLAMAQuantumLocustSearch", register=True + ) +except Exception as e: # QuantumLocustSearch print("QuantumLocustSearch can not be imported: ", e) -try: +try: # QuantumLocustSearchV2 from nevergrad.optimization.lama.QuantumLocustSearchV2 import QuantumLocustSearchV2 lama_register["QuantumLocustSearchV2"] = QuantumLocustSearchV2 - res = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumLocustSearchV2 = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2").set_name("LLAMAQuantumLocustSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumLocustSearchV2 = NonObjectOptimizer(method="LLAMAQuantumLocustSearchV2").set_name( + "LLAMAQuantumLocustSearchV2", register=True + ) +except Exception as e: # QuantumLocustSearchV2 print("QuantumLocustSearchV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalAdaptiveCrossoverOptimizerV20 import QuantumOrbitalAdaptiveCrossoverOptimizerV20 +try: # QuantumOrbitalAdaptiveCrossoverOptimizerV20 + from nevergrad.optimization.lama.QuantumOrbitalAdaptiveCrossoverOptimizerV20 import ( + QuantumOrbitalAdaptiveCrossoverOptimizerV20, + ) lama_register["QuantumOrbitalAdaptiveCrossoverOptimizerV20"] = QuantumOrbitalAdaptiveCrossoverOptimizerV20 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20 = NonObjectOptimizer(method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20").set_name("LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20" + ).set_name("LLAMAQuantumOrbitalAdaptiveCrossoverOptimizerV20", register=True) +except Exception as e: # QuantumOrbitalAdaptiveCrossoverOptimizerV20 print("QuantumOrbitalAdaptiveCrossoverOptimizerV20 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV12 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV12 import QuantumOrbitalDynamicEnhancerV12 lama_register["QuantumOrbitalDynamicEnhancerV12"] = QuantumOrbitalDynamicEnhancerV12 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV12 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV12").set_name("LLAMAQuantumOrbitalDynamicEnhancerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV12 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV12" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV12", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV12 print("QuantumOrbitalDynamicEnhancerV12 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV13 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV13 import QuantumOrbitalDynamicEnhancerV13 lama_register["QuantumOrbitalDynamicEnhancerV13"] = QuantumOrbitalDynamicEnhancerV13 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV13 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV13").set_name("LLAMAQuantumOrbitalDynamicEnhancerV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV13 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV13" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV13", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV13 print("QuantumOrbitalDynamicEnhancerV13 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV14 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV14 import QuantumOrbitalDynamicEnhancerV14 lama_register["QuantumOrbitalDynamicEnhancerV14"] = QuantumOrbitalDynamicEnhancerV14 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV14 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV14").set_name("LLAMAQuantumOrbitalDynamicEnhancerV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV14 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV14" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV14", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV14 print("QuantumOrbitalDynamicEnhancerV14 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV15 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV15 import QuantumOrbitalDynamicEnhancerV15 lama_register["QuantumOrbitalDynamicEnhancerV15"] = QuantumOrbitalDynamicEnhancerV15 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV15 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV15").set_name("LLAMAQuantumOrbitalDynamicEnhancerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV15 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV15" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV15", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV15 print("QuantumOrbitalDynamicEnhancerV15 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV16 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV16 import QuantumOrbitalDynamicEnhancerV16 lama_register["QuantumOrbitalDynamicEnhancerV16"] = QuantumOrbitalDynamicEnhancerV16 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV16 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV16").set_name("LLAMAQuantumOrbitalDynamicEnhancerV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV16 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV16" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV16", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV16 print("QuantumOrbitalDynamicEnhancerV16 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV17 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV17 import QuantumOrbitalDynamicEnhancerV17 lama_register["QuantumOrbitalDynamicEnhancerV17"] = QuantumOrbitalDynamicEnhancerV17 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV17 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV17").set_name("LLAMAQuantumOrbitalDynamicEnhancerV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV17 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV17" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV17", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV17 print("QuantumOrbitalDynamicEnhancerV17 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV18 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV18 import QuantumOrbitalDynamicEnhancerV18 lama_register["QuantumOrbitalDynamicEnhancerV18"] = QuantumOrbitalDynamicEnhancerV18 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV18 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV18").set_name("LLAMAQuantumOrbitalDynamicEnhancerV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV18 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV18" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV18", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV18 print("QuantumOrbitalDynamicEnhancerV18 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV24 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV24 import QuantumOrbitalDynamicEnhancerV24 lama_register["QuantumOrbitalDynamicEnhancerV24"] = QuantumOrbitalDynamicEnhancerV24 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV24 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV24").set_name("LLAMAQuantumOrbitalDynamicEnhancerV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV24 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV24" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV24", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV24 print("QuantumOrbitalDynamicEnhancerV24 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV25 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV25 import QuantumOrbitalDynamicEnhancerV25 lama_register["QuantumOrbitalDynamicEnhancerV25"] = QuantumOrbitalDynamicEnhancerV25 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV25 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV25").set_name("LLAMAQuantumOrbitalDynamicEnhancerV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV25 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV25" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV25", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV25 print("QuantumOrbitalDynamicEnhancerV25 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV26 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV26 import QuantumOrbitalDynamicEnhancerV26 lama_register["QuantumOrbitalDynamicEnhancerV26"] = QuantumOrbitalDynamicEnhancerV26 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV26 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV26").set_name("LLAMAQuantumOrbitalDynamicEnhancerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV26 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV26" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV26", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV26 print("QuantumOrbitalDynamicEnhancerV26 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV27 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV27 import QuantumOrbitalDynamicEnhancerV27 lama_register["QuantumOrbitalDynamicEnhancerV27"] = QuantumOrbitalDynamicEnhancerV27 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV27 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV27").set_name("LLAMAQuantumOrbitalDynamicEnhancerV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV27 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV27" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV27", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV27 print("QuantumOrbitalDynamicEnhancerV27 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV28 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV28 import QuantumOrbitalDynamicEnhancerV28 lama_register["QuantumOrbitalDynamicEnhancerV28"] = QuantumOrbitalDynamicEnhancerV28 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV28 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV28").set_name("LLAMAQuantumOrbitalDynamicEnhancerV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV28 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV28" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV28", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV28 print("QuantumOrbitalDynamicEnhancerV28 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV29 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV29 import QuantumOrbitalDynamicEnhancerV29 lama_register["QuantumOrbitalDynamicEnhancerV29"] = QuantumOrbitalDynamicEnhancerV29 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV29 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV29").set_name("LLAMAQuantumOrbitalDynamicEnhancerV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV29 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV29" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV29", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV29 print("QuantumOrbitalDynamicEnhancerV29 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV30 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV30 import QuantumOrbitalDynamicEnhancerV30 lama_register["QuantumOrbitalDynamicEnhancerV30"] = QuantumOrbitalDynamicEnhancerV30 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV30 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV30").set_name("LLAMAQuantumOrbitalDynamicEnhancerV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV30 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV30" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV30", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV30 print("QuantumOrbitalDynamicEnhancerV30 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV31 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV31 import QuantumOrbitalDynamicEnhancerV31 lama_register["QuantumOrbitalDynamicEnhancerV31"] = QuantumOrbitalDynamicEnhancerV31 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV31 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV31").set_name("LLAMAQuantumOrbitalDynamicEnhancerV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV31 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV31" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV31", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV31 print("QuantumOrbitalDynamicEnhancerV31 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV32 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV32 import QuantumOrbitalDynamicEnhancerV32 lama_register["QuantumOrbitalDynamicEnhancerV32"] = QuantumOrbitalDynamicEnhancerV32 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV32 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV32").set_name("LLAMAQuantumOrbitalDynamicEnhancerV32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV32 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV32" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV32", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV32 print("QuantumOrbitalDynamicEnhancerV32 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV33 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV33 import QuantumOrbitalDynamicEnhancerV33 lama_register["QuantumOrbitalDynamicEnhancerV33"] = QuantumOrbitalDynamicEnhancerV33 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV33 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV33").set_name("LLAMAQuantumOrbitalDynamicEnhancerV33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV33 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV33" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV33", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV33 print("QuantumOrbitalDynamicEnhancerV33 can not be imported: ", e) -try: +try: # QuantumOrbitalDynamicEnhancerV34 from nevergrad.optimization.lama.QuantumOrbitalDynamicEnhancerV34 import QuantumOrbitalDynamicEnhancerV34 lama_register["QuantumOrbitalDynamicEnhancerV34"] = QuantumOrbitalDynamicEnhancerV34 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicEnhancerV34 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV34").set_name("LLAMAQuantumOrbitalDynamicEnhancerV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicEnhancerV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicEnhancerV34 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicEnhancerV34" + ).set_name("LLAMAQuantumOrbitalDynamicEnhancerV34", register=True) +except Exception as e: # QuantumOrbitalDynamicEnhancerV34 print("QuantumOrbitalDynamicEnhancerV34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalDynamicOptimizerV11 import QuantumOrbitalDynamicOptimizerV11 +try: # QuantumOrbitalDynamicOptimizerV11 + from nevergrad.optimization.lama.QuantumOrbitalDynamicOptimizerV11 import ( + QuantumOrbitalDynamicOptimizerV11, + ) lama_register["QuantumOrbitalDynamicOptimizerV11"] = QuantumOrbitalDynamicOptimizerV11 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalDynamicOptimizerV11 = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicOptimizerV11").set_name("LLAMAQuantumOrbitalDynamicOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalDynamicOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalDynamicOptimizerV11 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalDynamicOptimizerV11" + ).set_name("LLAMAQuantumOrbitalDynamicOptimizerV11", register=True) +except Exception as e: # QuantumOrbitalDynamicOptimizerV11 print("QuantumOrbitalDynamicOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalEnhancedCrossoverOptimizerV22 import QuantumOrbitalEnhancedCrossoverOptimizerV22 +try: # QuantumOrbitalEnhancedCrossoverOptimizerV22 + from nevergrad.optimization.lama.QuantumOrbitalEnhancedCrossoverOptimizerV22 import ( + QuantumOrbitalEnhancedCrossoverOptimizerV22, + ) lama_register["QuantumOrbitalEnhancedCrossoverOptimizerV22"] = QuantumOrbitalEnhancedCrossoverOptimizerV22 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22 = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22").set_name("LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22" + ).set_name("LLAMAQuantumOrbitalEnhancedCrossoverOptimizerV22", register=True) +except Exception as e: # QuantumOrbitalEnhancedCrossoverOptimizerV22 print("QuantumOrbitalEnhancedCrossoverOptimizerV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalEnhancedDynamicEnhancerV19 import QuantumOrbitalEnhancedDynamicEnhancerV19 +try: # QuantumOrbitalEnhancedDynamicEnhancerV19 + from nevergrad.optimization.lama.QuantumOrbitalEnhancedDynamicEnhancerV19 import ( + QuantumOrbitalEnhancedDynamicEnhancerV19, + ) lama_register["QuantumOrbitalEnhancedDynamicEnhancerV19"] = QuantumOrbitalEnhancedDynamicEnhancerV19 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19 = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19").set_name("LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19" + ).set_name("LLAMAQuantumOrbitalEnhancedDynamicEnhancerV19", register=True) +except Exception as e: # QuantumOrbitalEnhancedDynamicEnhancerV19 print("QuantumOrbitalEnhancedDynamicEnhancerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalHarmonicOptimizerV10 import QuantumOrbitalHarmonicOptimizerV10 +try: # QuantumOrbitalHarmonicOptimizerV10 + from nevergrad.optimization.lama.QuantumOrbitalHarmonicOptimizerV10 import ( + QuantumOrbitalHarmonicOptimizerV10, + ) lama_register["QuantumOrbitalHarmonicOptimizerV10"] = QuantumOrbitalHarmonicOptimizerV10 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalHarmonicOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalHarmonicOptimizerV10 = NonObjectOptimizer(method="LLAMAQuantumOrbitalHarmonicOptimizerV10").set_name("LLAMAQuantumOrbitalHarmonicOptimizerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalHarmonicOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalHarmonicOptimizerV10 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalHarmonicOptimizerV10" + ).set_name("LLAMAQuantumOrbitalHarmonicOptimizerV10", register=True) +except Exception as e: # QuantumOrbitalHarmonicOptimizerV10 print("QuantumOrbitalHarmonicOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalPrecisionOptimizerV34 import QuantumOrbitalPrecisionOptimizerV34 +try: # QuantumOrbitalPrecisionOptimizerV34 + from nevergrad.optimization.lama.QuantumOrbitalPrecisionOptimizerV34 import ( + QuantumOrbitalPrecisionOptimizerV34, + ) lama_register["QuantumOrbitalPrecisionOptimizerV34"] = QuantumOrbitalPrecisionOptimizerV34 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalPrecisionOptimizerV34 = NonObjectOptimizer(method="LLAMAQuantumOrbitalPrecisionOptimizerV34").set_name("LLAMAQuantumOrbitalPrecisionOptimizerV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalPrecisionOptimizerV34 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalPrecisionOptimizerV34" + ).set_name("LLAMAQuantumOrbitalPrecisionOptimizerV34", register=True) +except Exception as e: # QuantumOrbitalPrecisionOptimizerV34 print("QuantumOrbitalPrecisionOptimizerV34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV21 import QuantumOrbitalRefinedCrossoverOptimizerV21 +try: # QuantumOrbitalRefinedCrossoverOptimizerV21 + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV21 import ( + QuantumOrbitalRefinedCrossoverOptimizerV21, + ) lama_register["QuantumOrbitalRefinedCrossoverOptimizerV21"] = QuantumOrbitalRefinedCrossoverOptimizerV21 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21 = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21").set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21" + ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV21", register=True) +except Exception as e: # QuantumOrbitalRefinedCrossoverOptimizerV21 print("QuantumOrbitalRefinedCrossoverOptimizerV21 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV23 import QuantumOrbitalRefinedCrossoverOptimizerV23 +try: # QuantumOrbitalRefinedCrossoverOptimizerV23 + from nevergrad.optimization.lama.QuantumOrbitalRefinedCrossoverOptimizerV23 import ( + QuantumOrbitalRefinedCrossoverOptimizerV23, + ) lama_register["QuantumOrbitalRefinedCrossoverOptimizerV23"] = QuantumOrbitalRefinedCrossoverOptimizerV23 - res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23 = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23").set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23 = NonObjectOptimizer( + method="LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23" + ).set_name("LLAMAQuantumOrbitalRefinedCrossoverOptimizerV23", register=True) +except Exception as e: # QuantumOrbitalRefinedCrossoverOptimizerV23 print("QuantumOrbitalRefinedCrossoverOptimizerV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumParticleSwarmDifferentialEvolution import QuantumParticleSwarmDifferentialEvolution +try: # QuantumParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.QuantumParticleSwarmDifferentialEvolution import ( + QuantumParticleSwarmDifferentialEvolution, + ) lama_register["QuantumParticleSwarmDifferentialEvolution"] = QuantumParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmDifferentialEvolution").set_name("LLAMAQuantumParticleSwarmDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMAQuantumParticleSwarmDifferentialEvolution" + ).set_name("LLAMAQuantumParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # QuantumParticleSwarmDifferentialEvolution print("QuantumParticleSwarmDifferentialEvolution can not be imported: ", e) -try: +try: # QuantumParticleSwarmOptimization from nevergrad.optimization.lama.QuantumParticleSwarmOptimization import QuantumParticleSwarmOptimization lama_register["QuantumParticleSwarmOptimization"] = QuantumParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumParticleSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmOptimization").set_name("LLAMAQuantumParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMAQuantumParticleSwarmOptimization" + ).set_name("LLAMAQuantumParticleSwarmOptimization", register=True) +except Exception as e: # QuantumParticleSwarmOptimization print("QuantumParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumReactiveCooperativeStrategy import QuantumReactiveCooperativeStrategy +try: # QuantumReactiveCooperativeStrategy + from nevergrad.optimization.lama.QuantumReactiveCooperativeStrategy import ( + QuantumReactiveCooperativeStrategy, + ) lama_register["QuantumReactiveCooperativeStrategy"] = QuantumReactiveCooperativeStrategy - res = NonObjectOptimizer(method="LLAMAQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumReactiveCooperativeStrategy = NonObjectOptimizer(method="LLAMAQuantumReactiveCooperativeStrategy").set_name("LLAMAQuantumReactiveCooperativeStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumReactiveCooperativeStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumReactiveCooperativeStrategy = NonObjectOptimizer( + method="LLAMAQuantumReactiveCooperativeStrategy" + ).set_name("LLAMAQuantumReactiveCooperativeStrategy", register=True) +except Exception as e: # QuantumReactiveCooperativeStrategy print("QuantumReactiveCooperativeStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveExplorationOptimization import QuantumRefinedAdaptiveExplorationOptimization - - lama_register["QuantumRefinedAdaptiveExplorationOptimization"] = QuantumRefinedAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumRefinedAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveExplorationOptimization").set_name("LLAMAQuantumRefinedAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # QuantumRefinedAdaptiveExplorationOptimization + from nevergrad.optimization.lama.QuantumRefinedAdaptiveExplorationOptimization import ( + QuantumRefinedAdaptiveExplorationOptimization, + ) + + lama_register["QuantumRefinedAdaptiveExplorationOptimization"] = ( + QuantumRefinedAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumRefinedAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveExplorationOptimization" + ).set_name("LLAMAQuantumRefinedAdaptiveExplorationOptimization", register=True) +except Exception as e: # QuantumRefinedAdaptiveExplorationOptimization print("QuantumRefinedAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveHybridStrategyV5 import QuantumRefinedAdaptiveHybridStrategyV5 +try: # QuantumRefinedAdaptiveHybridStrategyV5 + from nevergrad.optimization.lama.QuantumRefinedAdaptiveHybridStrategyV5 import ( + QuantumRefinedAdaptiveHybridStrategyV5, + ) lama_register["QuantumRefinedAdaptiveHybridStrategyV5"] = QuantumRefinedAdaptiveHybridStrategyV5 - res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumRefinedAdaptiveHybridStrategyV5 = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5").set_name("LLAMAQuantumRefinedAdaptiveHybridStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumRefinedAdaptiveHybridStrategyV5 = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveHybridStrategyV5" + ).set_name("LLAMAQuantumRefinedAdaptiveHybridStrategyV5", register=True) +except Exception as e: # QuantumRefinedAdaptiveHybridStrategyV5 print("QuantumRefinedAdaptiveHybridStrategyV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumRefinedAdaptiveStrategicOptimizer import QuantumRefinedAdaptiveStrategicOptimizer +try: # QuantumRefinedAdaptiveStrategicOptimizer + from nevergrad.optimization.lama.QuantumRefinedAdaptiveStrategicOptimizer import ( + QuantumRefinedAdaptiveStrategicOptimizer, + ) lama_register["QuantumRefinedAdaptiveStrategicOptimizer"] = QuantumRefinedAdaptiveStrategicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumRefinedAdaptiveStrategicOptimizer = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer").set_name("LLAMAQuantumRefinedAdaptiveStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumRefinedAdaptiveStrategicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumRefinedAdaptiveStrategicOptimizer" + ).set_name("LLAMAQuantumRefinedAdaptiveStrategicOptimizer", register=True) +except Exception as e: # QuantumRefinedAdaptiveStrategicOptimizer print("QuantumRefinedAdaptiveStrategicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumRefinedDynamicAdaptiveHybridDEPSO import QuantumRefinedDynamicAdaptiveHybridDEPSO +try: # QuantumRefinedDynamicAdaptiveHybridDEPSO + from nevergrad.optimization.lama.QuantumRefinedDynamicAdaptiveHybridDEPSO import ( + QuantumRefinedDynamicAdaptiveHybridDEPSO, + ) lama_register["QuantumRefinedDynamicAdaptiveHybridDEPSO"] = QuantumRefinedDynamicAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO").set_name("LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO" + ).set_name("LLAMAQuantumRefinedDynamicAdaptiveHybridDEPSO", register=True) +except Exception as e: # QuantumRefinedDynamicAdaptiveHybridDEPSO print("QuantumRefinedDynamicAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumReinforcedNesterovAccelerator import QuantumReinforcedNesterovAccelerator +try: # QuantumReinforcedNesterovAccelerator + from nevergrad.optimization.lama.QuantumReinforcedNesterovAccelerator import ( + QuantumReinforcedNesterovAccelerator, + ) lama_register["QuantumReinforcedNesterovAccelerator"] = QuantumReinforcedNesterovAccelerator - res = NonObjectOptimizer(method="LLAMAQuantumReinforcedNesterovAccelerator")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumReinforcedNesterovAccelerator = NonObjectOptimizer(method="LLAMAQuantumReinforcedNesterovAccelerator").set_name("LLAMAQuantumReinforcedNesterovAccelerator", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumReinforcedNesterovAccelerator")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumReinforcedNesterovAccelerator = NonObjectOptimizer( + method="LLAMAQuantumReinforcedNesterovAccelerator" + ).set_name("LLAMAQuantumReinforcedNesterovAccelerator", register=True) +except Exception as e: # QuantumReinforcedNesterovAccelerator print("QuantumReinforcedNesterovAccelerator can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumResonanceEvolutionaryStrategy import QuantumResonanceEvolutionaryStrategy +try: # QuantumResonanceEvolutionaryStrategy + from nevergrad.optimization.lama.QuantumResonanceEvolutionaryStrategy import ( + QuantumResonanceEvolutionaryStrategy, + ) lama_register["QuantumResonanceEvolutionaryStrategy"] = QuantumResonanceEvolutionaryStrategy - res = NonObjectOptimizer(method="LLAMAQuantumResonanceEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumResonanceEvolutionaryStrategy = NonObjectOptimizer(method="LLAMAQuantumResonanceEvolutionaryStrategy").set_name("LLAMAQuantumResonanceEvolutionaryStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumResonanceEvolutionaryStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumResonanceEvolutionaryStrategy = NonObjectOptimizer( + method="LLAMAQuantumResonanceEvolutionaryStrategy" + ).set_name("LLAMAQuantumResonanceEvolutionaryStrategy", register=True) +except Exception as e: # QuantumResonanceEvolutionaryStrategy print("QuantumResonanceEvolutionaryStrategy can not be imported: ", e) -try: +try: # QuantumSearch from nevergrad.optimization.lama.QuantumSearch import QuantumSearch lama_register["QuantumSearch"] = QuantumSearch - res = NonObjectOptimizer(method="LLAMAQuantumSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSearch = NonObjectOptimizer(method="LLAMAQuantumSearch").set_name("LLAMAQuantumSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSearch = NonObjectOptimizer(method="LLAMAQuantumSearch").set_name( + "LLAMAQuantumSearch", register=True + ) +except Exception as e: # QuantumSearch print("QuantumSearch can not be imported: ", e) -try: +try: # QuantumSimulatedAnnealing from nevergrad.optimization.lama.QuantumSimulatedAnnealing import QuantumSimulatedAnnealing lama_register["QuantumSimulatedAnnealing"] = QuantumSimulatedAnnealing - res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing").set_name("LLAMAQuantumSimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSimulatedAnnealing = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealing").set_name( + "LLAMAQuantumSimulatedAnnealing", register=True + ) +except Exception as e: # QuantumSimulatedAnnealing print("QuantumSimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSimulatedAnnealingHybridOptimizer import QuantumSimulatedAnnealingHybridOptimizer +try: # QuantumSimulatedAnnealingHybridOptimizer + from nevergrad.optimization.lama.QuantumSimulatedAnnealingHybridOptimizer import ( + QuantumSimulatedAnnealingHybridOptimizer, + ) lama_register["QuantumSimulatedAnnealingHybridOptimizer"] = QuantumSimulatedAnnealingHybridOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSimulatedAnnealingHybridOptimizer = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingHybridOptimizer").set_name("LLAMAQuantumSimulatedAnnealingHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSimulatedAnnealingHybridOptimizer = NonObjectOptimizer( + method="LLAMAQuantumSimulatedAnnealingHybridOptimizer" + ).set_name("LLAMAQuantumSimulatedAnnealingHybridOptimizer", register=True) +except Exception as e: # QuantumSimulatedAnnealingHybridOptimizer print("QuantumSimulatedAnnealingHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSimulatedAnnealingImproved import QuantumSimulatedAnnealingImproved +try: # QuantumSimulatedAnnealingImproved + from nevergrad.optimization.lama.QuantumSimulatedAnnealingImproved import ( + QuantumSimulatedAnnealingImproved, + ) lama_register["QuantumSimulatedAnnealingImproved"] = QuantumSimulatedAnnealingImproved - res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSimulatedAnnealingImproved = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingImproved").set_name("LLAMAQuantumSimulatedAnnealingImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSimulatedAnnealingImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSimulatedAnnealingImproved = NonObjectOptimizer( + method="LLAMAQuantumSimulatedAnnealingImproved" + ).set_name("LLAMAQuantumSimulatedAnnealingImproved", register=True) +except Exception as e: # QuantumSimulatedAnnealingImproved print("QuantumSimulatedAnnealingImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveHybridStrategy import QuantumSpectralAdaptiveHybridStrategy +try: # QuantumSpectralAdaptiveHybridStrategy + from nevergrad.optimization.lama.QuantumSpectralAdaptiveHybridStrategy import ( + QuantumSpectralAdaptiveHybridStrategy, + ) lama_register["QuantumSpectralAdaptiveHybridStrategy"] = QuantumSpectralAdaptiveHybridStrategy - res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralAdaptiveHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveHybridStrategy").set_name("LLAMAQuantumSpectralAdaptiveHybridStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveHybridStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralAdaptiveHybridStrategy = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveHybridStrategy" + ).set_name("LLAMAQuantumSpectralAdaptiveHybridStrategy", register=True) +except Exception as e: # QuantumSpectralAdaptiveHybridStrategy print("QuantumSpectralAdaptiveHybridStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV2 import QuantumSpectralAdaptiveOptimizerV2 +try: # QuantumSpectralAdaptiveOptimizerV2 + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV2 import ( + QuantumSpectralAdaptiveOptimizerV2, + ) lama_register["QuantumSpectralAdaptiveOptimizerV2"] = QuantumSpectralAdaptiveOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV2").set_name("LLAMAQuantumSpectralAdaptiveOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveOptimizerV2" + ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV2", register=True) +except Exception as e: # QuantumSpectralAdaptiveOptimizerV2 print("QuantumSpectralAdaptiveOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV3 import QuantumSpectralAdaptiveOptimizerV3 +try: # QuantumSpectralAdaptiveOptimizerV3 + from nevergrad.optimization.lama.QuantumSpectralAdaptiveOptimizerV3 import ( + QuantumSpectralAdaptiveOptimizerV3, + ) lama_register["QuantumSpectralAdaptiveOptimizerV3"] = QuantumSpectralAdaptiveOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralAdaptiveOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV3").set_name("LLAMAQuantumSpectralAdaptiveOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralAdaptiveOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralAdaptiveOptimizerV3 = NonObjectOptimizer( + method="LLAMAQuantumSpectralAdaptiveOptimizerV3" + ).set_name("LLAMAQuantumSpectralAdaptiveOptimizerV3", register=True) +except Exception as e: # QuantumSpectralAdaptiveOptimizerV3 print("QuantumSpectralAdaptiveOptimizerV3 can not be imported: ", e) -try: +try: # QuantumSpectralDynamicOptimizer from nevergrad.optimization.lama.QuantumSpectralDynamicOptimizer import QuantumSpectralDynamicOptimizer lama_register["QuantumSpectralDynamicOptimizer"] = QuantumSpectralDynamicOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumSpectralDynamicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralDynamicOptimizer = NonObjectOptimizer(method="LLAMAQuantumSpectralDynamicOptimizer").set_name("LLAMAQuantumSpectralDynamicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralDynamicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralDynamicOptimizer = NonObjectOptimizer( + method="LLAMAQuantumSpectralDynamicOptimizer" + ).set_name("LLAMAQuantumSpectralDynamicOptimizer", register=True) +except Exception as e: # QuantumSpectralDynamicOptimizer print("QuantumSpectralDynamicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSpectralEnhancedOptimizerV5 import QuantumSpectralEnhancedOptimizerV5 +try: # QuantumSpectralEnhancedOptimizerV5 + from nevergrad.optimization.lama.QuantumSpectralEnhancedOptimizerV5 import ( + QuantumSpectralEnhancedOptimizerV5, + ) lama_register["QuantumSpectralEnhancedOptimizerV5"] = QuantumSpectralEnhancedOptimizerV5 - res = NonObjectOptimizer(method="LLAMAQuantumSpectralEnhancedOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralEnhancedOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumSpectralEnhancedOptimizerV5").set_name("LLAMAQuantumSpectralEnhancedOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralEnhancedOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralEnhancedOptimizerV5 = NonObjectOptimizer( + method="LLAMAQuantumSpectralEnhancedOptimizerV5" + ).set_name("LLAMAQuantumSpectralEnhancedOptimizerV5", register=True) +except Exception as e: # QuantumSpectralEnhancedOptimizerV5 print("QuantumSpectralEnhancedOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSpectralRefinedOptimizerV4 import QuantumSpectralRefinedOptimizerV4 +try: # QuantumSpectralRefinedOptimizerV4 + from nevergrad.optimization.lama.QuantumSpectralRefinedOptimizerV4 import ( + QuantumSpectralRefinedOptimizerV4, + ) lama_register["QuantumSpectralRefinedOptimizerV4"] = QuantumSpectralRefinedOptimizerV4 - res = NonObjectOptimizer(method="LLAMAQuantumSpectralRefinedOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSpectralRefinedOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumSpectralRefinedOptimizerV4").set_name("LLAMAQuantumSpectralRefinedOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSpectralRefinedOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSpectralRefinedOptimizerV4 = NonObjectOptimizer( + method="LLAMAQuantumSpectralRefinedOptimizerV4" + ).set_name("LLAMAQuantumSpectralRefinedOptimizerV4", register=True) +except Exception as e: # QuantumSpectralRefinedOptimizerV4 print("QuantumSpectralRefinedOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumStabilizedDynamicBalanceOptimizer import QuantumStabilizedDynamicBalanceOptimizer +try: # QuantumStabilizedDynamicBalanceOptimizer + from nevergrad.optimization.lama.QuantumStabilizedDynamicBalanceOptimizer import ( + QuantumStabilizedDynamicBalanceOptimizer, + ) lama_register["QuantumStabilizedDynamicBalanceOptimizer"] = QuantumStabilizedDynamicBalanceOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumStabilizedDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStabilizedDynamicBalanceOptimizer = NonObjectOptimizer(method="LLAMAQuantumStabilizedDynamicBalanceOptimizer").set_name("LLAMAQuantumStabilizedDynamicBalanceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStabilizedDynamicBalanceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStabilizedDynamicBalanceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStabilizedDynamicBalanceOptimizer" + ).set_name("LLAMAQuantumStabilizedDynamicBalanceOptimizer", register=True) +except Exception as e: # QuantumStabilizedDynamicBalanceOptimizer print("QuantumStabilizedDynamicBalanceOptimizer can not be imported: ", e) -try: +try: # QuantumStateConvergenceOptimizer from nevergrad.optimization.lama.QuantumStateConvergenceOptimizer import QuantumStateConvergenceOptimizer lama_register["QuantumStateConvergenceOptimizer"] = QuantumStateConvergenceOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStateConvergenceOptimizer = NonObjectOptimizer(method="LLAMAQuantumStateConvergenceOptimizer").set_name("LLAMAQuantumStateConvergenceOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStateConvergenceOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStateConvergenceOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStateConvergenceOptimizer" + ).set_name("LLAMAQuantumStateConvergenceOptimizer", register=True) +except Exception as e: # QuantumStateConvergenceOptimizer print("QuantumStateConvergenceOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumStateCrossoverOptimization import QuantumStateCrossoverOptimization +try: # QuantumStateCrossoverOptimization + from nevergrad.optimization.lama.QuantumStateCrossoverOptimization import ( + QuantumStateCrossoverOptimization, + ) lama_register["QuantumStateCrossoverOptimization"] = QuantumStateCrossoverOptimization - res = NonObjectOptimizer(method="LLAMAQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStateCrossoverOptimization = NonObjectOptimizer(method="LLAMAQuantumStateCrossoverOptimization").set_name("LLAMAQuantumStateCrossoverOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStateCrossoverOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStateCrossoverOptimization = NonObjectOptimizer( + method="LLAMAQuantumStateCrossoverOptimization" + ).set_name("LLAMAQuantumStateCrossoverOptimization", register=True) +except Exception as e: # QuantumStateCrossoverOptimization print("QuantumStateCrossoverOptimization can not be imported: ", e) -try: +try: # QuantumStateHybridStrategy from nevergrad.optimization.lama.QuantumStateHybridStrategy import QuantumStateHybridStrategy lama_register["QuantumStateHybridStrategy"] = QuantumStateHybridStrategy - res = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStateHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy").set_name("LLAMAQuantumStateHybridStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStateHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateHybridStrategy").set_name( + "LLAMAQuantumStateHybridStrategy", register=True + ) +except Exception as e: # QuantumStateHybridStrategy print("QuantumStateHybridStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumStateRefinedHybridStrategy import QuantumStateRefinedHybridStrategy +try: # QuantumStateRefinedHybridStrategy + from nevergrad.optimization.lama.QuantumStateRefinedHybridStrategy import ( + QuantumStateRefinedHybridStrategy, + ) lama_register["QuantumStateRefinedHybridStrategy"] = QuantumStateRefinedHybridStrategy - res = NonObjectOptimizer(method="LLAMAQuantumStateRefinedHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStateRefinedHybridStrategy = NonObjectOptimizer(method="LLAMAQuantumStateRefinedHybridStrategy").set_name("LLAMAQuantumStateRefinedHybridStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStateRefinedHybridStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStateRefinedHybridStrategy = NonObjectOptimizer( + method="LLAMAQuantumStateRefinedHybridStrategy" + ).set_name("LLAMAQuantumStateRefinedHybridStrategy", register=True) +except Exception as e: # QuantumStateRefinedHybridStrategy print("QuantumStateRefinedHybridStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumStochasticGradientDescentFireworks import QuantumStochasticGradientDescentFireworks +try: # QuantumStochasticGradientDescentFireworks + from nevergrad.optimization.lama.QuantumStochasticGradientDescentFireworks import ( + QuantumStochasticGradientDescentFireworks, + ) lama_register["QuantumStochasticGradientDescentFireworks"] = QuantumStochasticGradientDescentFireworks - res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientDescentFireworks")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStochasticGradientDescentFireworks = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientDescentFireworks").set_name("LLAMAQuantumStochasticGradientDescentFireworks", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientDescentFireworks")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStochasticGradientDescentFireworks = NonObjectOptimizer( + method="LLAMAQuantumStochasticGradientDescentFireworks" + ).set_name("LLAMAQuantumStochasticGradientDescentFireworks", register=True) +except Exception as e: # QuantumStochasticGradientDescentFireworks print("QuantumStochasticGradientDescentFireworks can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumStochasticGradientOptimizer import QuantumStochasticGradientOptimizer +try: # QuantumStochasticGradientOptimizer + from nevergrad.optimization.lama.QuantumStochasticGradientOptimizer import ( + QuantumStochasticGradientOptimizer, + ) lama_register["QuantumStochasticGradientOptimizer"] = QuantumStochasticGradientOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumStochasticGradientOptimizer = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientOptimizer").set_name("LLAMAQuantumStochasticGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumStochasticGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumStochasticGradientOptimizer = NonObjectOptimizer( + method="LLAMAQuantumStochasticGradientOptimizer" + ).set_name("LLAMAQuantumStochasticGradientOptimizer", register=True) +except Exception as e: # QuantumStochasticGradientOptimizer print("QuantumStochasticGradientOptimizer can not be imported: ", e) -try: +try: # QuantumSwarmOptimization from nevergrad.optimization.lama.QuantumSwarmOptimization import QuantumSwarmOptimization lama_register["QuantumSwarmOptimization"] = QuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization").set_name("LLAMAQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimization").set_name( + "LLAMAQuantumSwarmOptimization", register=True + ) +except Exception as e: # QuantumSwarmOptimization print("QuantumSwarmOptimization can not be imported: ", e) -try: +try: # QuantumSwarmOptimizationImproved from nevergrad.optimization.lama.QuantumSwarmOptimizationImproved import QuantumSwarmOptimizationImproved lama_register["QuantumSwarmOptimizationImproved"] = QuantumSwarmOptimizationImproved - res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimizationImproved").set_name("LLAMAQuantumSwarmOptimizationImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMAQuantumSwarmOptimizationImproved" + ).set_name("LLAMAQuantumSwarmOptimizationImproved", register=True) +except Exception as e: # QuantumSwarmOptimizationImproved print("QuantumSwarmOptimizationImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.QuantumSymbioticEnhancedStrategyV3 import QuantumSymbioticEnhancedStrategyV3 +try: # QuantumSymbioticEnhancedStrategyV3 + from nevergrad.optimization.lama.QuantumSymbioticEnhancedStrategyV3 import ( + QuantumSymbioticEnhancedStrategyV3, + ) lama_register["QuantumSymbioticEnhancedStrategyV3"] = QuantumSymbioticEnhancedStrategyV3 - res = NonObjectOptimizer(method="LLAMAQuantumSymbioticEnhancedStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumSymbioticEnhancedStrategyV3 = NonObjectOptimizer(method="LLAMAQuantumSymbioticEnhancedStrategyV3").set_name("LLAMAQuantumSymbioticEnhancedStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumSymbioticEnhancedStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumSymbioticEnhancedStrategyV3 = NonObjectOptimizer( + method="LLAMAQuantumSymbioticEnhancedStrategyV3" + ).set_name("LLAMAQuantumSymbioticEnhancedStrategyV3", register=True) +except Exception as e: # QuantumSymbioticEnhancedStrategyV3 print("QuantumSymbioticEnhancedStrategyV3 can not be imported: ", e) -try: +try: # QuantumTunedGradientSearchV2 from nevergrad.optimization.lama.QuantumTunedGradientSearchV2 import QuantumTunedGradientSearchV2 lama_register["QuantumTunedGradientSearchV2"] = QuantumTunedGradientSearchV2 - res = NonObjectOptimizer(method="LLAMAQuantumTunedGradientSearchV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunedGradientSearchV2 = NonObjectOptimizer(method="LLAMAQuantumTunedGradientSearchV2").set_name("LLAMAQuantumTunedGradientSearchV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunedGradientSearchV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunedGradientSearchV2 = NonObjectOptimizer( + method="LLAMAQuantumTunedGradientSearchV2" + ).set_name("LLAMAQuantumTunedGradientSearchV2", register=True) +except Exception as e: # QuantumTunedGradientSearchV2 print("QuantumTunedGradientSearchV2 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizer from nevergrad.optimization.lama.QuantumTunnelingOptimizer import QuantumTunnelingOptimizer lama_register["QuantumTunnelingOptimizer"] = QuantumTunnelingOptimizer - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer").set_name("LLAMAQuantumTunnelingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizer = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizer").set_name( + "LLAMAQuantumTunnelingOptimizer", register=True + ) +except Exception as e: # QuantumTunnelingOptimizer print("QuantumTunnelingOptimizer can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV10 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV10 import QuantumTunnelingOptimizerV10 lama_register["QuantumTunnelingOptimizerV10"] = QuantumTunnelingOptimizerV10 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV10 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV10").set_name("LLAMAQuantumTunnelingOptimizerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV10 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV10" + ).set_name("LLAMAQuantumTunnelingOptimizerV10", register=True) +except Exception as e: # QuantumTunnelingOptimizerV10 print("QuantumTunnelingOptimizerV10 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV11 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV11 import QuantumTunnelingOptimizerV11 lama_register["QuantumTunnelingOptimizerV11"] = QuantumTunnelingOptimizerV11 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV11 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV11").set_name("LLAMAQuantumTunnelingOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV11 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV11" + ).set_name("LLAMAQuantumTunnelingOptimizerV11", register=True) +except Exception as e: # QuantumTunnelingOptimizerV11 print("QuantumTunnelingOptimizerV11 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV12 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV12 import QuantumTunnelingOptimizerV12 lama_register["QuantumTunnelingOptimizerV12"] = QuantumTunnelingOptimizerV12 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV12 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV12").set_name("LLAMAQuantumTunnelingOptimizerV12", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV12 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV12" + ).set_name("LLAMAQuantumTunnelingOptimizerV12", register=True) +except Exception as e: # QuantumTunnelingOptimizerV12 print("QuantumTunnelingOptimizerV12 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV13 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV13 import QuantumTunnelingOptimizerV13 lama_register["QuantumTunnelingOptimizerV13"] = QuantumTunnelingOptimizerV13 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV13 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV13").set_name("LLAMAQuantumTunnelingOptimizerV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV13 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV13" + ).set_name("LLAMAQuantumTunnelingOptimizerV13", register=True) +except Exception as e: # QuantumTunnelingOptimizerV13 print("QuantumTunnelingOptimizerV13 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV14 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV14 import QuantumTunnelingOptimizerV14 lama_register["QuantumTunnelingOptimizerV14"] = QuantumTunnelingOptimizerV14 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV14 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV14").set_name("LLAMAQuantumTunnelingOptimizerV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV14 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV14" + ).set_name("LLAMAQuantumTunnelingOptimizerV14", register=True) +except Exception as e: # QuantumTunnelingOptimizerV14 print("QuantumTunnelingOptimizerV14 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV15 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV15 import QuantumTunnelingOptimizerV15 lama_register["QuantumTunnelingOptimizerV15"] = QuantumTunnelingOptimizerV15 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV15 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV15").set_name("LLAMAQuantumTunnelingOptimizerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV15 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV15" + ).set_name("LLAMAQuantumTunnelingOptimizerV15", register=True) +except Exception as e: # QuantumTunnelingOptimizerV15 print("QuantumTunnelingOptimizerV15 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV16 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV16 import QuantumTunnelingOptimizerV16 lama_register["QuantumTunnelingOptimizerV16"] = QuantumTunnelingOptimizerV16 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV16 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV16").set_name("LLAMAQuantumTunnelingOptimizerV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV16 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV16" + ).set_name("LLAMAQuantumTunnelingOptimizerV16", register=True) +except Exception as e: # QuantumTunnelingOptimizerV16 print("QuantumTunnelingOptimizerV16 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV17 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV17 import QuantumTunnelingOptimizerV17 lama_register["QuantumTunnelingOptimizerV17"] = QuantumTunnelingOptimizerV17 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV17 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV17").set_name("LLAMAQuantumTunnelingOptimizerV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV17 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV17" + ).set_name("LLAMAQuantumTunnelingOptimizerV17", register=True) +except Exception as e: # QuantumTunnelingOptimizerV17 print("QuantumTunnelingOptimizerV17 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV18 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV18 import QuantumTunnelingOptimizerV18 lama_register["QuantumTunnelingOptimizerV18"] = QuantumTunnelingOptimizerV18 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV18 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV18").set_name("LLAMAQuantumTunnelingOptimizerV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV18 = NonObjectOptimizer( + method="LLAMAQuantumTunnelingOptimizerV18" + ).set_name("LLAMAQuantumTunnelingOptimizerV18", register=True) +except Exception as e: # QuantumTunnelingOptimizerV18 print("QuantumTunnelingOptimizerV18 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV2 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV2 import QuantumTunnelingOptimizerV2 lama_register["QuantumTunnelingOptimizerV2"] = QuantumTunnelingOptimizerV2 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2").set_name("LLAMAQuantumTunnelingOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV2 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV2").set_name( + "LLAMAQuantumTunnelingOptimizerV2", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV2 print("QuantumTunnelingOptimizerV2 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV3 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV3 import QuantumTunnelingOptimizerV3 lama_register["QuantumTunnelingOptimizerV3"] = QuantumTunnelingOptimizerV3 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3").set_name("LLAMAQuantumTunnelingOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV3 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV3").set_name( + "LLAMAQuantumTunnelingOptimizerV3", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV3 print("QuantumTunnelingOptimizerV3 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV4 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV4 import QuantumTunnelingOptimizerV4 lama_register["QuantumTunnelingOptimizerV4"] = QuantumTunnelingOptimizerV4 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4").set_name("LLAMAQuantumTunnelingOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV4 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV4").set_name( + "LLAMAQuantumTunnelingOptimizerV4", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV4 print("QuantumTunnelingOptimizerV4 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV5 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV5 import QuantumTunnelingOptimizerV5 lama_register["QuantumTunnelingOptimizerV5"] = QuantumTunnelingOptimizerV5 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5").set_name("LLAMAQuantumTunnelingOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV5 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV5").set_name( + "LLAMAQuantumTunnelingOptimizerV5", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV5 print("QuantumTunnelingOptimizerV5 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV6 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV6 import QuantumTunnelingOptimizerV6 lama_register["QuantumTunnelingOptimizerV6"] = QuantumTunnelingOptimizerV6 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6").set_name("LLAMAQuantumTunnelingOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV6 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV6").set_name( + "LLAMAQuantumTunnelingOptimizerV6", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV6 print("QuantumTunnelingOptimizerV6 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV7 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV7 import QuantumTunnelingOptimizerV7 lama_register["QuantumTunnelingOptimizerV7"] = QuantumTunnelingOptimizerV7 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7").set_name("LLAMAQuantumTunnelingOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV7 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV7").set_name( + "LLAMAQuantumTunnelingOptimizerV7", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV7 print("QuantumTunnelingOptimizerV7 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV8 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV8 import QuantumTunnelingOptimizerV8 lama_register["QuantumTunnelingOptimizerV8"] = QuantumTunnelingOptimizerV8 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8").set_name("LLAMAQuantumTunnelingOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV8 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV8").set_name( + "LLAMAQuantumTunnelingOptimizerV8", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV8 print("QuantumTunnelingOptimizerV8 can not be imported: ", e) -try: +try: # QuantumTunnelingOptimizerV9 from nevergrad.optimization.lama.QuantumTunnelingOptimizerV9 import QuantumTunnelingOptimizerV9 lama_register["QuantumTunnelingOptimizerV9"] = QuantumTunnelingOptimizerV9 - res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAQuantumTunnelingOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9").set_name("LLAMAQuantumTunnelingOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAQuantumTunnelingOptimizerV9 = NonObjectOptimizer(method="LLAMAQuantumTunnelingOptimizerV9").set_name( + "LLAMAQuantumTunnelingOptimizerV9", register=True + ) +except Exception as e: # QuantumTunnelingOptimizerV9 print("QuantumTunnelingOptimizerV9 can not be imported: ", e) -try: +try: # RADE from nevergrad.optimization.lama.RADE import RADE lama_register["RADE"] = RADE - res = NonObjectOptimizer(method="LLAMARADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADE = NonObjectOptimizer(method="LLAMARADE").set_name("LLAMARADE", register=True) -except Exception as e: +except Exception as e: # RADE print("RADE can not be imported: ", e) -try: +try: # RADEA from nevergrad.optimization.lama.RADEA import RADEA lama_register["RADEA"] = RADEA - res = NonObjectOptimizer(method="LLAMARADEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADEA = NonObjectOptimizer(method="LLAMARADEA").set_name("LLAMARADEA", register=True) -except Exception as e: +except Exception as e: # RADEA print("RADEA can not be imported: ", e) -try: +try: # RADECM from nevergrad.optimization.lama.RADECM import RADECM lama_register["RADECM"] = RADECM - res = NonObjectOptimizer(method="LLAMARADECM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADECM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADECM = NonObjectOptimizer(method="LLAMARADECM").set_name("LLAMARADECM", register=True) -except Exception as e: +except Exception as e: # RADECM print("RADECM can not be imported: ", e) -try: +try: # RADEDM from nevergrad.optimization.lama.RADEDM import RADEDM lama_register["RADEDM"] = RADEDM - res = NonObjectOptimizer(method="LLAMARADEDM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADEDM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADEDM = NonObjectOptimizer(method="LLAMARADEDM").set_name("LLAMARADEDM", register=True) -except Exception as e: +except Exception as e: # RADEDM print("RADEDM can not be imported: ", e) -try: +try: # RADEEM from nevergrad.optimization.lama.RADEEM import RADEEM lama_register["RADEEM"] = RADEEM - res = NonObjectOptimizer(method="LLAMARADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADEEM = NonObjectOptimizer(method="LLAMARADEEM").set_name("LLAMARADEEM", register=True) -except Exception as e: +except Exception as e: # RADEEM print("RADEEM can not be imported: ", e) -try: +try: # RADEPM from nevergrad.optimization.lama.RADEPM import RADEPM lama_register["RADEPM"] = RADEPM - res = NonObjectOptimizer(method="LLAMARADEPM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARADEPM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARADEPM = NonObjectOptimizer(method="LLAMARADEPM").set_name("LLAMARADEPM", register=True) -except Exception as e: +except Exception as e: # RADEPM print("RADEPM can not be imported: ", e) -try: +try: # RADSDiffEvo from nevergrad.optimization.lama.RADSDiffEvo import RADSDiffEvo lama_register["RADSDiffEvo"] = RADSDiffEvo - res = NonObjectOptimizer(method="LLAMARADSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARADSDiffEvo = NonObjectOptimizer(method="LLAMARADSDiffEvo").set_name("LLAMARADSDiffEvo", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARADSDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARADSDiffEvo = NonObjectOptimizer(method="LLAMARADSDiffEvo").set_name( + "LLAMARADSDiffEvo", register=True + ) +except Exception as e: # RADSDiffEvo print("RADSDiffEvo can not be imported: ", e) -try: +try: # RAGCES from nevergrad.optimization.lama.RAGCES import RAGCES lama_register["RAGCES"] = RAGCES - res = NonObjectOptimizer(method="LLAMARAGCES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAGCES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAGCES = NonObjectOptimizer(method="LLAMARAGCES").set_name("LLAMARAGCES", register=True) -except Exception as e: +except Exception as e: # RAGCES print("RAGCES can not be imported: ", e) -try: +try: # RAGEA from nevergrad.optimization.lama.RAGEA import RAGEA lama_register["RAGEA"] = RAGEA - res = NonObjectOptimizer(method="LLAMARAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAGEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAGEA = NonObjectOptimizer(method="LLAMARAGEA").set_name("LLAMARAGEA", register=True) -except Exception as e: +except Exception as e: # RAGEA print("RAGEA can not be imported: ", e) -try: +try: # RAHDEMI from nevergrad.optimization.lama.RAHDEMI import RAHDEMI lama_register["RAHDEMI"] = RAHDEMI - res = NonObjectOptimizer(method="LLAMARAHDEMI")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAHDEMI")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAHDEMI = NonObjectOptimizer(method="LLAMARAHDEMI").set_name("LLAMARAHDEMI", register=True) -except Exception as e: +except Exception as e: # RAHDEMI print("RAHDEMI can not be imported: ", e) -try: +try: # RALES from nevergrad.optimization.lama.RALES import RALES lama_register["RALES"] = RALES - res = NonObjectOptimizer(method="LLAMARALES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARALES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARALES = NonObjectOptimizer(method="LLAMARALES").set_name("LLAMARALES", register=True) -except Exception as e: +except Exception as e: # RALES print("RALES can not be imported: ", e) -try: +try: # RAMDE from nevergrad.optimization.lama.RAMDE import RAMDE lama_register["RAMDE"] = RAMDE - res = NonObjectOptimizer(method="LLAMARAMDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAMDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAMDE = NonObjectOptimizer(method="LLAMARAMDE").set_name("LLAMARAMDE", register=True) -except Exception as e: +except Exception as e: # RAMDE print("RAMDE can not be imported: ", e) -try: +try: # RAMEDS from nevergrad.optimization.lama.RAMEDS import RAMEDS lama_register["RAMEDS"] = RAMEDS - res = NonObjectOptimizer(method="LLAMARAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAMEDS = NonObjectOptimizer(method="LLAMARAMEDS").set_name("LLAMARAMEDS", register=True) -except Exception as e: +except Exception as e: # RAMEDS print("RAMEDS can not be imported: ", e) -try: +try: # RAMEDSPlus from nevergrad.optimization.lama.RAMEDSPlus import RAMEDSPlus lama_register["RAMEDSPlus"] = RAMEDSPlus - res = NonObjectOptimizer(method="LLAMARAMEDSPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAMEDSPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAMEDSPlus = NonObjectOptimizer(method="LLAMARAMEDSPlus").set_name("LLAMARAMEDSPlus", register=True) -except Exception as e: +except Exception as e: # RAMEDSPlus print("RAMEDSPlus can not be imported: ", e) -try: +try: # RAMEDSPro from nevergrad.optimization.lama.RAMEDSPro import RAMEDSPro lama_register["RAMEDSPro"] = RAMEDSPro - res = NonObjectOptimizer(method="LLAMARAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAMEDSPro")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAMEDSPro = NonObjectOptimizer(method="LLAMARAMEDSPro").set_name("LLAMARAMEDSPro", register=True) -except Exception as e: +except Exception as e: # RAMEDSPro print("RAMEDSPro can not be imported: ", e) -try: +try: # RAMSDiffEvo from nevergrad.optimization.lama.RAMSDiffEvo import RAMSDiffEvo lama_register["RAMSDiffEvo"] = RAMSDiffEvo - res = NonObjectOptimizer(method="LLAMARAMSDiffEvo")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARAMSDiffEvo = NonObjectOptimizer(method="LLAMARAMSDiffEvo").set_name("LLAMARAMSDiffEvo", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARAMSDiffEvo")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARAMSDiffEvo = NonObjectOptimizer(method="LLAMARAMSDiffEvo").set_name( + "LLAMARAMSDiffEvo", register=True + ) +except Exception as e: # RAMSDiffEvo print("RAMSDiffEvo can not be imported: ", e) -try: +try: # RAPDE from nevergrad.optimization.lama.RAPDE import RAPDE lama_register["RAPDE"] = RAPDE - res = NonObjectOptimizer(method="LLAMARAPDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAPDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAPDE = NonObjectOptimizer(method="LLAMARAPDE").set_name("LLAMARAPDE", register=True) -except Exception as e: +except Exception as e: # RAPDE print("RAPDE can not be imported: ", e) -try: +try: # RASES from nevergrad.optimization.lama.RASES import RASES lama_register["RASES"] = RASES - res = NonObjectOptimizer(method="LLAMARASES")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARASES")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARASES = NonObjectOptimizer(method="LLAMARASES").set_name("LLAMARASES", register=True) -except Exception as e: +except Exception as e: # RASES print("RASES can not be imported: ", e) -try: +try: # RAVDE from nevergrad.optimization.lama.RAVDE import RAVDE lama_register["RAVDE"] = RAVDE - res = NonObjectOptimizer(method="LLAMARAVDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARAVDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARAVDE = NonObjectOptimizer(method="LLAMARAVDE").set_name("LLAMARAVDE", register=True) -except Exception as e: +except Exception as e: # RAVDE print("RAVDE can not be imported: ", e) -try: +try: # RDACE from nevergrad.optimization.lama.RDACE import RDACE lama_register["RDACE"] = RDACE - res = NonObjectOptimizer(method="LLAMARDACE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARDACE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARDACE = NonObjectOptimizer(method="LLAMARDACE").set_name("LLAMARDACE", register=True) -except Exception as e: +except Exception as e: # RDACE print("RDACE can not be imported: ", e) -try: +try: # RDSAS from nevergrad.optimization.lama.RDSAS import RDSAS lama_register["RDSAS"] = RDSAS - res = NonObjectOptimizer(method="LLAMARDSAS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARDSAS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARDSAS = NonObjectOptimizer(method="LLAMARDSAS").set_name("LLAMARDSAS", register=True) -except Exception as e: +except Exception as e: # RDSAS print("RDSAS can not be imported: ", e) -try: +try: # READEPMC from nevergrad.optimization.lama.READEPMC import READEPMC lama_register["READEPMC"] = READEPMC - res = NonObjectOptimizer(method="LLAMAREADEPMC")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAREADEPMC")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAREADEPMC = NonObjectOptimizer(method="LLAMAREADEPMC").set_name("LLAMAREADEPMC", register=True) -except Exception as e: +except Exception as e: # READEPMC print("READEPMC can not be imported: ", e) -try: +try: # REAMSEA from nevergrad.optimization.lama.REAMSEA import REAMSEA lama_register["REAMSEA"] = REAMSEA - res = NonObjectOptimizer(method="LLAMAREAMSEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAREAMSEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAREAMSEA = NonObjectOptimizer(method="LLAMAREAMSEA").set_name("LLAMAREAMSEA", register=True) -except Exception as e: +except Exception as e: # REAMSEA print("REAMSEA can not be imported: ", e) -try: +try: # RE_ADMMMS from nevergrad.optimization.lama.RE_ADMMMS import RE_ADMMMS lama_register["RE_ADMMMS"] = RE_ADMMMS - res = NonObjectOptimizer(method="LLAMARE_ADMMMS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARE_ADMMMS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARE_ADMMMS = NonObjectOptimizer(method="LLAMARE_ADMMMS").set_name("LLAMARE_ADMMMS", register=True) -except Exception as e: +except Exception as e: # RE_ADMMMS print("RE_ADMMMS can not be imported: ", e) -try: +try: # RPWDE from nevergrad.optimization.lama.RPWDE import RPWDE lama_register["RPWDE"] = RPWDE - res = NonObjectOptimizer(method="LLAMARPWDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMARPWDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMARPWDE = NonObjectOptimizer(method="LLAMARPWDE").set_name("LLAMARPWDE", register=True) -except Exception as e: +except Exception as e: # RPWDE print("RPWDE can not be imported: ", e) -try: +try: # RankingDifferentialEvolution from nevergrad.optimization.lama.RankingDifferentialEvolution import RankingDifferentialEvolution lama_register["RankingDifferentialEvolution"] = RankingDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARankingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARankingDifferentialEvolution = NonObjectOptimizer(method="LLAMARankingDifferentialEvolution").set_name("LLAMARankingDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARankingDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARankingDifferentialEvolution = NonObjectOptimizer( + method="LLAMARankingDifferentialEvolution" + ).set_name("LLAMARankingDifferentialEvolution", register=True) +except Exception as e: # RankingDifferentialEvolution print("RankingDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveClusteredDifferentialEvolution import RefinedAdaptiveClusteredDifferentialEvolution - - lama_register["RefinedAdaptiveClusteredDifferentialEvolution"] = RefinedAdaptiveClusteredDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveClusteredDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveClusteredDifferentialEvolution").set_name("LLAMARefinedAdaptiveClusteredDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptiveClusteredDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveClusteredDifferentialEvolution import ( + RefinedAdaptiveClusteredDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveClusteredDifferentialEvolution"] = ( + RefinedAdaptiveClusteredDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveClusteredDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveClusteredDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveClusteredDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveClusteredDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveClusteredDifferentialEvolution print("RefinedAdaptiveClusteredDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixAdaptation import RefinedAdaptiveCovarianceMatrixAdaptation +try: # RefinedAdaptiveCovarianceMatrixAdaptation + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixAdaptation import ( + RefinedAdaptiveCovarianceMatrixAdaptation, + ) lama_register["RefinedAdaptiveCovarianceMatrixAdaptation"] = RefinedAdaptiveCovarianceMatrixAdaptation - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation").set_name("LLAMARefinedAdaptiveCovarianceMatrixAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveCovarianceMatrixAdaptation = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCovarianceMatrixAdaptation" + ).set_name("LLAMARefinedAdaptiveCovarianceMatrixAdaptation", register=True) +except Exception as e: # RefinedAdaptiveCovarianceMatrixAdaptation print("RefinedAdaptiveCovarianceMatrixAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixEvolution import RefinedAdaptiveCovarianceMatrixEvolution +try: # RefinedAdaptiveCovarianceMatrixEvolution + from nevergrad.optimization.lama.RefinedAdaptiveCovarianceMatrixEvolution import ( + RefinedAdaptiveCovarianceMatrixEvolution, + ) lama_register["RefinedAdaptiveCovarianceMatrixEvolution"] = RefinedAdaptiveCovarianceMatrixEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixEvolution").set_name("LLAMARefinedAdaptiveCovarianceMatrixEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCovarianceMatrixEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveCovarianceMatrixEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCovarianceMatrixEvolution" + ).set_name("LLAMARefinedAdaptiveCovarianceMatrixEvolution", register=True) +except Exception as e: # RefinedAdaptiveCovarianceMatrixEvolution print("RefinedAdaptiveCovarianceMatrixEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveCrossoverElitistStrategyV7 import RefinedAdaptiveCrossoverElitistStrategyV7 +try: # RefinedAdaptiveCrossoverElitistStrategyV7 + from nevergrad.optimization.lama.RefinedAdaptiveCrossoverElitistStrategyV7 import ( + RefinedAdaptiveCrossoverElitistStrategyV7, + ) lama_register["RefinedAdaptiveCrossoverElitistStrategyV7"] = RefinedAdaptiveCrossoverElitistStrategyV7 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveCrossoverElitistStrategyV7 = NonObjectOptimizer(method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7").set_name("LLAMARefinedAdaptiveCrossoverElitistStrategyV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveCrossoverElitistStrategyV7 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveCrossoverElitistStrategyV7" + ).set_name("LLAMARefinedAdaptiveCrossoverElitistStrategyV7", register=True) +except Exception as e: # RefinedAdaptiveCrossoverElitistStrategyV7 print("RefinedAdaptiveCrossoverElitistStrategyV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolution import RefinedAdaptiveDifferentialEvolution +try: # RefinedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolution import ( + RefinedAdaptiveDifferentialEvolution, + ) lama_register["RefinedAdaptiveDifferentialEvolution"] = RefinedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolution").set_name("LLAMARefinedAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveDifferentialEvolution print("RefinedAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionStrategy import RefinedAdaptiveDifferentialEvolutionStrategy - - lama_register["RefinedAdaptiveDifferentialEvolutionStrategy"] = RefinedAdaptiveDifferentialEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy").set_name("LLAMARefinedAdaptiveDifferentialEvolutionStrategy", register=True) -except Exception as e: +try: # RefinedAdaptiveDifferentialEvolutionStrategy + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionStrategy import ( + RefinedAdaptiveDifferentialEvolutionStrategy, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionStrategy"] = ( + RefinedAdaptiveDifferentialEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionStrategy", register=True) +except Exception as e: # RefinedAdaptiveDifferentialEvolutionStrategy print("RefinedAdaptiveDifferentialEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation import RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation - - lama_register["RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation").set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) -except Exception as e: +try: # RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation import ( + RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation"] = ( + RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation", register=True) +except Exception as e: # RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation print("RefinedAdaptiveDifferentialEvolutionWithAdaptivePerturbation can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithGradientBoost import RefinedAdaptiveDifferentialEvolutionWithGradientBoost - - lama_register["RefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = RefinedAdaptiveDifferentialEvolutionWithGradientBoost - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) -except Exception as e: +try: # RefinedAdaptiveDifferentialEvolutionWithGradientBoost + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialEvolutionWithGradientBoost import ( + RefinedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["RefinedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + RefinedAdaptiveDifferentialEvolutionWithGradientBoost + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMARefinedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: # RefinedAdaptiveDifferentialEvolutionWithGradientBoost print("RefinedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSearch import RefinedAdaptiveDifferentialSearch +try: # RefinedAdaptiveDifferentialSearch + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSearch import ( + RefinedAdaptiveDifferentialSearch, + ) lama_register["RefinedAdaptiveDifferentialSearch"] = RefinedAdaptiveDifferentialSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSearch").set_name("LLAMARefinedAdaptiveDifferentialSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialSearch" + ).set_name("LLAMARefinedAdaptiveDifferentialSearch", register=True) +except Exception as e: # RefinedAdaptiveDifferentialSearch print("RefinedAdaptiveDifferentialSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSpiralSearch import RefinedAdaptiveDifferentialSpiralSearch +try: # RefinedAdaptiveDifferentialSpiralSearch + from nevergrad.optimization.lama.RefinedAdaptiveDifferentialSpiralSearch import ( + RefinedAdaptiveDifferentialSpiralSearch, + ) lama_register["RefinedAdaptiveDifferentialSpiralSearch"] = RefinedAdaptiveDifferentialSpiralSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSpiralSearch").set_name("LLAMARefinedAdaptiveDifferentialSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDifferentialSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDifferentialSpiralSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDifferentialSpiralSearch" + ).set_name("LLAMARefinedAdaptiveDifferentialSpiralSearch", register=True) +except Exception as e: # RefinedAdaptiveDifferentialSpiralSearch print("RefinedAdaptiveDifferentialSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDimensionalClimbingStrategy import RefinedAdaptiveDimensionalClimbingStrategy +try: # RefinedAdaptiveDimensionalClimbingStrategy + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalClimbingStrategy import ( + RefinedAdaptiveDimensionalClimbingStrategy, + ) lama_register["RefinedAdaptiveDimensionalClimbingStrategy"] = RefinedAdaptiveDimensionalClimbingStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDimensionalClimbingStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalClimbingStrategy").set_name("LLAMARefinedAdaptiveDimensionalClimbingStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalClimbingStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDimensionalClimbingStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDimensionalClimbingStrategy" + ).set_name("LLAMARefinedAdaptiveDimensionalClimbingStrategy", register=True) +except Exception as e: # RefinedAdaptiveDimensionalClimbingStrategy print("RefinedAdaptiveDimensionalClimbingStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDimensionalCrossoverEvolver import RefinedAdaptiveDimensionalCrossoverEvolver +try: # RefinedAdaptiveDimensionalCrossoverEvolver + from nevergrad.optimization.lama.RefinedAdaptiveDimensionalCrossoverEvolver import ( + RefinedAdaptiveDimensionalCrossoverEvolver, + ) lama_register["RefinedAdaptiveDimensionalCrossoverEvolver"] = RefinedAdaptiveDimensionalCrossoverEvolver - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver").set_name("LLAMARefinedAdaptiveDimensionalCrossoverEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDimensionalCrossoverEvolver = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDimensionalCrossoverEvolver" + ).set_name("LLAMARefinedAdaptiveDimensionalCrossoverEvolver", register=True) +except Exception as e: # RefinedAdaptiveDimensionalCrossoverEvolver print("RefinedAdaptiveDimensionalCrossoverEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDirectionalBiasQuorumOptimization import RefinedAdaptiveDirectionalBiasQuorumOptimization - - lama_register["RefinedAdaptiveDirectionalBiasQuorumOptimization"] = RefinedAdaptiveDirectionalBiasQuorumOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization").set_name("LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization", register=True) -except Exception as e: +try: # RefinedAdaptiveDirectionalBiasQuorumOptimization + from nevergrad.optimization.lama.RefinedAdaptiveDirectionalBiasQuorumOptimization import ( + RefinedAdaptiveDirectionalBiasQuorumOptimization, + ) + + lama_register["RefinedAdaptiveDirectionalBiasQuorumOptimization"] = ( + RefinedAdaptiveDirectionalBiasQuorumOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization" + ).set_name("LLAMARefinedAdaptiveDirectionalBiasQuorumOptimization", register=True) +except Exception as e: # RefinedAdaptiveDirectionalBiasQuorumOptimization print("RefinedAdaptiveDirectionalBiasQuorumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDivergenceClusteringSearch import RefinedAdaptiveDivergenceClusteringSearch +try: # RefinedAdaptiveDivergenceClusteringSearch + from nevergrad.optimization.lama.RefinedAdaptiveDivergenceClusteringSearch import ( + RefinedAdaptiveDivergenceClusteringSearch, + ) lama_register["RefinedAdaptiveDivergenceClusteringSearch"] = RefinedAdaptiveDivergenceClusteringSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDivergenceClusteringSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveDivergenceClusteringSearch").set_name("LLAMARefinedAdaptiveDivergenceClusteringSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDivergenceClusteringSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDivergenceClusteringSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDivergenceClusteringSearch" + ).set_name("LLAMARefinedAdaptiveDivergenceClusteringSearch", register=True) +except Exception as e: # RefinedAdaptiveDivergenceClusteringSearch print("RefinedAdaptiveDivergenceClusteringSearch can not be imported: ", e) -try: +try: # RefinedAdaptiveDiversityPSO from nevergrad.optimization.lama.RefinedAdaptiveDiversityPSO import RefinedAdaptiveDiversityPSO lama_register["RefinedAdaptiveDiversityPSO"] = RefinedAdaptiveDiversityPSO - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO").set_name("LLAMARefinedAdaptiveDiversityPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDiversityPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveDiversityPSO").set_name( + "LLAMARefinedAdaptiveDiversityPSO", register=True + ) +except Exception as e: # RefinedAdaptiveDiversityPSO print("RefinedAdaptiveDiversityPSO can not be imported: ", e) -try: +try: # RefinedAdaptiveDualPhaseStrategy from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategy import RefinedAdaptiveDualPhaseStrategy lama_register["RefinedAdaptiveDualPhaseStrategy"] = RefinedAdaptiveDualPhaseStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDualPhaseStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategy").set_name("LLAMARefinedAdaptiveDualPhaseStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDualPhaseStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDualPhaseStrategy" + ).set_name("LLAMARefinedAdaptiveDualPhaseStrategy", register=True) +except Exception as e: # RefinedAdaptiveDualPhaseStrategy print("RefinedAdaptiveDualPhaseStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategyV3 import RefinedAdaptiveDualPhaseStrategyV3 +try: # RefinedAdaptiveDualPhaseStrategyV3 + from nevergrad.optimization.lama.RefinedAdaptiveDualPhaseStrategyV3 import ( + RefinedAdaptiveDualPhaseStrategyV3, + ) lama_register["RefinedAdaptiveDualPhaseStrategyV3"] = RefinedAdaptiveDualPhaseStrategyV3 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDualPhaseStrategyV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategyV3").set_name("LLAMARefinedAdaptiveDualPhaseStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDualPhaseStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDualPhaseStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDualPhaseStrategyV3" + ).set_name("LLAMARefinedAdaptiveDualPhaseStrategyV3", register=True) +except Exception as e: # RefinedAdaptiveDualPhaseStrategyV3 print("RefinedAdaptiveDualPhaseStrategyV3 can not be imported: ", e) -try: +try: # RefinedAdaptiveDynamicDE from nevergrad.optimization.lama.RefinedAdaptiveDynamicDE import RefinedAdaptiveDynamicDE lama_register["RefinedAdaptiveDynamicDE"] = RefinedAdaptiveDynamicDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE").set_name("LLAMARefinedAdaptiveDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDE").set_name( + "LLAMARefinedAdaptiveDynamicDE", register=True + ) +except Exception as e: # RefinedAdaptiveDynamicDE print("RefinedAdaptiveDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV14 import RefinedAdaptiveDynamicDualPhaseStrategyV14 +try: # RefinedAdaptiveDynamicDualPhaseStrategyV14 + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV14 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV14, + ) lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV14"] = RefinedAdaptiveDynamicDualPhaseStrategyV14 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV14", register=True) +except Exception as e: # RefinedAdaptiveDynamicDualPhaseStrategyV14 print("RefinedAdaptiveDynamicDualPhaseStrategyV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV17 import RefinedAdaptiveDynamicDualPhaseStrategyV17 +try: # RefinedAdaptiveDynamicDualPhaseStrategyV17 + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV17 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV17, + ) lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV17"] = RefinedAdaptiveDynamicDualPhaseStrategyV17 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV17", register=True) +except Exception as e: # RefinedAdaptiveDynamicDualPhaseStrategyV17 print("RefinedAdaptiveDynamicDualPhaseStrategyV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV20 import RefinedAdaptiveDynamicDualPhaseStrategyV20 +try: # RefinedAdaptiveDynamicDualPhaseStrategyV20 + from nevergrad.optimization.lama.RefinedAdaptiveDynamicDualPhaseStrategyV20 import ( + RefinedAdaptiveDynamicDualPhaseStrategyV20, + ) lama_register["RefinedAdaptiveDynamicDualPhaseStrategyV20"] = RefinedAdaptiveDynamicDualPhaseStrategyV20 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20").set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20" + ).set_name("LLAMARefinedAdaptiveDynamicDualPhaseStrategyV20", register=True) +except Exception as e: # RefinedAdaptiveDynamicDualPhaseStrategyV20 print("RefinedAdaptiveDynamicDualPhaseStrategyV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicExplorationOptimization import RefinedAdaptiveDynamicExplorationOptimization - - lama_register["RefinedAdaptiveDynamicExplorationOptimization"] = RefinedAdaptiveDynamicExplorationOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicExplorationOptimization").set_name("LLAMARefinedAdaptiveDynamicExplorationOptimization", register=True) -except Exception as e: +try: # RefinedAdaptiveDynamicExplorationOptimization + from nevergrad.optimization.lama.RefinedAdaptiveDynamicExplorationOptimization import ( + RefinedAdaptiveDynamicExplorationOptimization, + ) + + lama_register["RefinedAdaptiveDynamicExplorationOptimization"] = ( + RefinedAdaptiveDynamicExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicExplorationOptimization" + ).set_name("LLAMARefinedAdaptiveDynamicExplorationOptimization", register=True) +except Exception as e: # RefinedAdaptiveDynamicExplorationOptimization print("RefinedAdaptiveDynamicExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm import RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm - - lama_register["RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm").set_name("LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: +try: # RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm import ( + RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm, + ) + + lama_register["RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm"] = ( + RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm" + ).set_name("LLAMARefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm print("RefinedAdaptiveDynamicMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveDynamicStrategyV25 import RefinedAdaptiveDynamicStrategyV25 +try: # RefinedAdaptiveDynamicStrategyV25 + from nevergrad.optimization.lama.RefinedAdaptiveDynamicStrategyV25 import ( + RefinedAdaptiveDynamicStrategyV25, + ) lama_register["RefinedAdaptiveDynamicStrategyV25"] = RefinedAdaptiveDynamicStrategyV25 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicStrategyV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveDynamicStrategyV25 = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicStrategyV25").set_name("LLAMARefinedAdaptiveDynamicStrategyV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveDynamicStrategyV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveDynamicStrategyV25 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveDynamicStrategyV25" + ).set_name("LLAMARefinedAdaptiveDynamicStrategyV25", register=True) +except Exception as e: # RefinedAdaptiveDynamicStrategyV25 print("RefinedAdaptiveDynamicStrategyV25 can not be imported: ", e) -try: +try: # RefinedAdaptiveEliteGuidedDE from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedDE import RefinedAdaptiveEliteGuidedDE lama_register["RefinedAdaptiveEliteGuidedDE"] = RefinedAdaptiveEliteGuidedDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEliteGuidedDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedDE").set_name("LLAMARefinedAdaptiveEliteGuidedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEliteGuidedDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedDE" + ).set_name("LLAMARefinedAdaptiveEliteGuidedDE", register=True) +except Exception as e: # RefinedAdaptiveEliteGuidedDE print("RefinedAdaptiveEliteGuidedDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE import RefinedAdaptiveEliteGuidedMutationDE +try: # RefinedAdaptiveEliteGuidedMutationDE + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE import ( + RefinedAdaptiveEliteGuidedMutationDE, + ) lama_register["RefinedAdaptiveEliteGuidedMutationDE"] = RefinedAdaptiveEliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE").set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedMutationDE" + ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE", register=True) +except Exception as e: # RefinedAdaptiveEliteGuidedMutationDE print("RefinedAdaptiveEliteGuidedMutationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE_v5 import RefinedAdaptiveEliteGuidedMutationDE_v5 +try: # RefinedAdaptiveEliteGuidedMutationDE_v5 + from nevergrad.optimization.lama.RefinedAdaptiveEliteGuidedMutationDE_v5 import ( + RefinedAdaptiveEliteGuidedMutationDE_v5, + ) lama_register["RefinedAdaptiveEliteGuidedMutationDE_v5"] = RefinedAdaptiveEliteGuidedMutationDE_v5 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEliteGuidedMutationDE_v5 = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5").set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE_v5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEliteGuidedMutationDE_v5 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEliteGuidedMutationDE_v5" + ).set_name("LLAMARefinedAdaptiveEliteGuidedMutationDE_v5", register=True) +except Exception as e: # RefinedAdaptiveEliteGuidedMutationDE_v5 print("RefinedAdaptiveEliteGuidedMutationDE_v5 can not be imported: ", e) -try: +try: # RefinedAdaptiveElitistDE_v4 from nevergrad.optimization.lama.RefinedAdaptiveElitistDE_v4 import RefinedAdaptiveElitistDE_v4 lama_register["RefinedAdaptiveElitistDE_v4"] = RefinedAdaptiveElitistDE_v4 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveElitistDE_v4 = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4").set_name("LLAMARefinedAdaptiveElitistDE_v4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveElitistDE_v4 = NonObjectOptimizer(method="LLAMARefinedAdaptiveElitistDE_v4").set_name( + "LLAMARefinedAdaptiveElitistDE_v4", register=True + ) +except Exception as e: # RefinedAdaptiveElitistDE_v4 print("RefinedAdaptiveElitistDE_v4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch import RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch - - lama_register["RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch").set_name("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) -except Exception as e: +try: # RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch import ( + RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch, + ) + + lama_register["RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch"] = ( + RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch" + ).set_name("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch", register=True) +except Exception as e: # RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch print("RefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedGradientGuidedHybridPSO import RefinedAdaptiveEnhancedGradientGuidedHybridPSO - - lama_register["RefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = RefinedAdaptiveEnhancedGradientGuidedHybridPSO - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO").set_name("LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) -except Exception as e: +try: # RefinedAdaptiveEnhancedGradientGuidedHybridPSO + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedGradientGuidedHybridPSO import ( + RefinedAdaptiveEnhancedGradientGuidedHybridPSO, + ) + + lama_register["RefinedAdaptiveEnhancedGradientGuidedHybridPSO"] = ( + RefinedAdaptiveEnhancedGradientGuidedHybridPSO + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO" + ).set_name("LLAMARefinedAdaptiveEnhancedGradientGuidedHybridPSO", register=True) +except Exception as e: # RefinedAdaptiveEnhancedGradientGuidedHybridPSO print("RefinedAdaptiveEnhancedGradientGuidedHybridPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 import RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 - - lama_register["RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2"] = RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2").set_name("LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2", register=True) -except Exception as e: +try: # RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 + from nevergrad.optimization.lama.RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 import ( + RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2, + ) + + lama_register["RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2"] = ( + RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2" + ).set_name("LLAMARefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2", register=True) +except Exception as e: # RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 print("RefinedAdaptiveEnhancedSuperchargedAQAPSO_LS_DIW_AP_V2 can not be imported: ", e) -try: +try: # RefinedAdaptiveEvolutionStrategy from nevergrad.optimization.lama.RefinedAdaptiveEvolutionStrategy import RefinedAdaptiveEvolutionStrategy lama_register["RefinedAdaptiveEvolutionStrategy"] = RefinedAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveEvolutionStrategy").set_name("LLAMARefinedAdaptiveEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveEvolutionStrategy", register=True) +except Exception as e: # RefinedAdaptiveEvolutionStrategy print("RefinedAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveExplorationOptimizer import RefinedAdaptiveExplorationOptimizer +try: # RefinedAdaptiveExplorationOptimizer + from nevergrad.optimization.lama.RefinedAdaptiveExplorationOptimizer import ( + RefinedAdaptiveExplorationOptimizer, + ) lama_register["RefinedAdaptiveExplorationOptimizer"] = RefinedAdaptiveExplorationOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveExplorationOptimizer").set_name("LLAMARefinedAdaptiveExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveExplorationOptimizer" + ).set_name("LLAMARefinedAdaptiveExplorationOptimizer", register=True) +except Exception as e: # RefinedAdaptiveExplorationOptimizer print("RefinedAdaptiveExplorationOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingOptimizerV5 import RefinedAdaptiveGlobalClimbingOptimizerV5 +try: # RefinedAdaptiveGlobalClimbingOptimizerV5 + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingOptimizerV5 import ( + RefinedAdaptiveGlobalClimbingOptimizerV5, + ) lama_register["RefinedAdaptiveGlobalClimbingOptimizerV5"] = RefinedAdaptiveGlobalClimbingOptimizerV5 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGlobalClimbingOptimizerV5 = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5").set_name("LLAMARefinedAdaptiveGlobalClimbingOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGlobalClimbingOptimizerV5 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGlobalClimbingOptimizerV5" + ).set_name("LLAMARefinedAdaptiveGlobalClimbingOptimizerV5", register=True) +except Exception as e: # RefinedAdaptiveGlobalClimbingOptimizerV5 print("RefinedAdaptiveGlobalClimbingOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingStrategy import RefinedAdaptiveGlobalClimbingStrategy +try: # RefinedAdaptiveGlobalClimbingStrategy + from nevergrad.optimization.lama.RefinedAdaptiveGlobalClimbingStrategy import ( + RefinedAdaptiveGlobalClimbingStrategy, + ) lama_register["RefinedAdaptiveGlobalClimbingStrategy"] = RefinedAdaptiveGlobalClimbingStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGlobalClimbingStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingStrategy").set_name("LLAMARefinedAdaptiveGlobalClimbingStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGlobalClimbingStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGlobalClimbingStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGlobalClimbingStrategy" + ).set_name("LLAMARefinedAdaptiveGlobalClimbingStrategy", register=True) +except Exception as e: # RefinedAdaptiveGlobalClimbingStrategy print("RefinedAdaptiveGlobalClimbingStrategy can not be imported: ", e) -try: +try: # RefinedAdaptiveGradientCrossover from nevergrad.optimization.lama.RefinedAdaptiveGradientCrossover import RefinedAdaptiveGradientCrossover lama_register["RefinedAdaptiveGradientCrossover"] = RefinedAdaptiveGradientCrossover - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientCrossover = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientCrossover").set_name("LLAMARefinedAdaptiveGradientCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientCrossover = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientCrossover" + ).set_name("LLAMARefinedAdaptiveGradientCrossover", register=True) +except Exception as e: # RefinedAdaptiveGradientCrossover print("RefinedAdaptiveGradientCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientDifferentialEvolution import RefinedAdaptiveGradientDifferentialEvolution - - lama_register["RefinedAdaptiveGradientDifferentialEvolution"] = RefinedAdaptiveGradientDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientDifferentialEvolution").set_name("LLAMARefinedAdaptiveGradientDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptiveGradientDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveGradientDifferentialEvolution import ( + RefinedAdaptiveGradientDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveGradientDifferentialEvolution"] = ( + RefinedAdaptiveGradientDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveGradientDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveGradientDifferentialEvolution print("RefinedAdaptiveGradientDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientEnhancedRAMEDS import RefinedAdaptiveGradientEnhancedRAMEDS +try: # RefinedAdaptiveGradientEnhancedRAMEDS + from nevergrad.optimization.lama.RefinedAdaptiveGradientEnhancedRAMEDS import ( + RefinedAdaptiveGradientEnhancedRAMEDS, + ) lama_register["RefinedAdaptiveGradientEnhancedRAMEDS"] = RefinedAdaptiveGradientEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS").set_name("LLAMARefinedAdaptiveGradientEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientEnhancedRAMEDS" + ).set_name("LLAMARefinedAdaptiveGradientEnhancedRAMEDS", register=True) +except Exception as e: # RefinedAdaptiveGradientEnhancedRAMEDS print("RefinedAdaptiveGradientEnhancedRAMEDS can not be imported: ", e) -try: +try: # RefinedAdaptiveGradientEvolverV2 from nevergrad.optimization.lama.RefinedAdaptiveGradientEvolverV2 import RefinedAdaptiveGradientEvolverV2 lama_register["RefinedAdaptiveGradientEvolverV2"] = RefinedAdaptiveGradientEvolverV2 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientEvolverV2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEvolverV2").set_name("LLAMARefinedAdaptiveGradientEvolverV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientEvolverV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientEvolverV2" + ).set_name("LLAMARefinedAdaptiveGradientEvolverV2", register=True) +except Exception as e: # RefinedAdaptiveGradientEvolverV2 print("RefinedAdaptiveGradientEvolverV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientGuidedEvolution import RefinedAdaptiveGradientGuidedEvolution +try: # RefinedAdaptiveGradientGuidedEvolution + from nevergrad.optimization.lama.RefinedAdaptiveGradientGuidedEvolution import ( + RefinedAdaptiveGradientGuidedEvolution, + ) lama_register["RefinedAdaptiveGradientGuidedEvolution"] = RefinedAdaptiveGradientGuidedEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientGuidedEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientGuidedEvolution").set_name("LLAMARefinedAdaptiveGradientGuidedEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientGuidedEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientGuidedEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientGuidedEvolution" + ).set_name("LLAMARefinedAdaptiveGradientGuidedEvolution", register=True) +except Exception as e: # RefinedAdaptiveGradientGuidedEvolution print("RefinedAdaptiveGradientGuidedEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGradientHybridOptimizer import RefinedAdaptiveGradientHybridOptimizer +try: # RefinedAdaptiveGradientHybridOptimizer + from nevergrad.optimization.lama.RefinedAdaptiveGradientHybridOptimizer import ( + RefinedAdaptiveGradientHybridOptimizer, + ) lama_register["RefinedAdaptiveGradientHybridOptimizer"] = RefinedAdaptiveGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGradientHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientHybridOptimizer").set_name("LLAMARefinedAdaptiveGradientHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGradientHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveGradientHybridOptimizer", register=True) +except Exception as e: # RefinedAdaptiveGradientHybridOptimizer print("RefinedAdaptiveGradientHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveGuidedEvolutionStrategy import RefinedAdaptiveGuidedEvolutionStrategy +try: # RefinedAdaptiveGuidedEvolutionStrategy + from nevergrad.optimization.lama.RefinedAdaptiveGuidedEvolutionStrategy import ( + RefinedAdaptiveGuidedEvolutionStrategy, + ) lama_register["RefinedAdaptiveGuidedEvolutionStrategy"] = RefinedAdaptiveGuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveGuidedEvolutionStrategy").set_name("LLAMARefinedAdaptiveGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveGuidedEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveGuidedEvolutionStrategy", register=True) +except Exception as e: # RefinedAdaptiveGuidedEvolutionStrategy print("RefinedAdaptiveGuidedEvolutionStrategy can not be imported: ", e) -try: +try: # RefinedAdaptiveHybridDE from nevergrad.optimization.lama.RefinedAdaptiveHybridDE import RefinedAdaptiveHybridDE lama_register["RefinedAdaptiveHybridDE"] = RefinedAdaptiveHybridDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE").set_name("LLAMARefinedAdaptiveHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridDE").set_name( + "LLAMARefinedAdaptiveHybridDE", register=True + ) +except Exception as e: # RefinedAdaptiveHybridDE print("RefinedAdaptiveHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridEvolutionStrategyV6 import RefinedAdaptiveHybridEvolutionStrategyV6 +try: # RefinedAdaptiveHybridEvolutionStrategyV6 + from nevergrad.optimization.lama.RefinedAdaptiveHybridEvolutionStrategyV6 import ( + RefinedAdaptiveHybridEvolutionStrategyV6, + ) lama_register["RefinedAdaptiveHybridEvolutionStrategyV6"] = RefinedAdaptiveHybridEvolutionStrategyV6 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridEvolutionStrategyV6 = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6").set_name("LLAMARefinedAdaptiveHybridEvolutionStrategyV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridEvolutionStrategyV6 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridEvolutionStrategyV6" + ).set_name("LLAMARefinedAdaptiveHybridEvolutionStrategyV6", register=True) +except Exception as e: # RefinedAdaptiveHybridEvolutionStrategyV6 print("RefinedAdaptiveHybridEvolutionStrategyV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimization import RefinedAdaptiveHybridOptimization +try: # RefinedAdaptiveHybridOptimization + from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimization import ( + RefinedAdaptiveHybridOptimization, + ) lama_register["RefinedAdaptiveHybridOptimization"] = RefinedAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimization").set_name("LLAMARefinedAdaptiveHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridOptimization" + ).set_name("LLAMARefinedAdaptiveHybridOptimization", register=True) +except Exception as e: # RefinedAdaptiveHybridOptimization print("RefinedAdaptiveHybridOptimization can not be imported: ", e) -try: +try: # RefinedAdaptiveHybridOptimizer from nevergrad.optimization.lama.RefinedAdaptiveHybridOptimizer import RefinedAdaptiveHybridOptimizer lama_register["RefinedAdaptiveHybridOptimizer"] = RefinedAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimizer").set_name("LLAMARefinedAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveHybridOptimizer", register=True) +except Exception as e: # RefinedAdaptiveHybridOptimizer print("RefinedAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridParticleSwarmDifferentialEvolution import RefinedAdaptiveHybridParticleSwarmDifferentialEvolution - - lama_register["RefinedAdaptiveHybridParticleSwarmDifferentialEvolution"] = RefinedAdaptiveHybridParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution").set_name("LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptiveHybridParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveHybridParticleSwarmDifferentialEvolution import ( + RefinedAdaptiveHybridParticleSwarmDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveHybridParticleSwarmDifferentialEvolution"] = ( + RefinedAdaptiveHybridParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveHybridParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveHybridParticleSwarmDifferentialEvolution print("RefinedAdaptiveHybridParticleSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridQuasiRandomGradientDE import RefinedAdaptiveHybridQuasiRandomGradientDE +try: # RefinedAdaptiveHybridQuasiRandomGradientDE + from nevergrad.optimization.lama.RefinedAdaptiveHybridQuasiRandomGradientDE import ( + RefinedAdaptiveHybridQuasiRandomGradientDE, + ) lama_register["RefinedAdaptiveHybridQuasiRandomGradientDE"] = RefinedAdaptiveHybridQuasiRandomGradientDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE").set_name("LLAMARefinedAdaptiveHybridQuasiRandomGradientDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridQuasiRandomGradientDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridQuasiRandomGradientDE" + ).set_name("LLAMARefinedAdaptiveHybridQuasiRandomGradientDE", register=True) +except Exception as e: # RefinedAdaptiveHybridQuasiRandomGradientDE print("RefinedAdaptiveHybridQuasiRandomGradientDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveHybridSwarmEvolutionOptimization import RefinedAdaptiveHybridSwarmEvolutionOptimization - - lama_register["RefinedAdaptiveHybridSwarmEvolutionOptimization"] = RefinedAdaptiveHybridSwarmEvolutionOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization").set_name("LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization", register=True) -except Exception as e: +try: # RefinedAdaptiveHybridSwarmEvolutionOptimization + from nevergrad.optimization.lama.RefinedAdaptiveHybridSwarmEvolutionOptimization import ( + RefinedAdaptiveHybridSwarmEvolutionOptimization, + ) + + lama_register["RefinedAdaptiveHybridSwarmEvolutionOptimization"] = ( + RefinedAdaptiveHybridSwarmEvolutionOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization" + ).set_name("LLAMARefinedAdaptiveHybridSwarmEvolutionOptimization", register=True) +except Exception as e: # RefinedAdaptiveHybridSwarmEvolutionOptimization print("RefinedAdaptiveHybridSwarmEvolutionOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveIncrementalCrossover import RefinedAdaptiveIncrementalCrossover +try: # RefinedAdaptiveIncrementalCrossover + from nevergrad.optimization.lama.RefinedAdaptiveIncrementalCrossover import ( + RefinedAdaptiveIncrementalCrossover, + ) lama_register["RefinedAdaptiveIncrementalCrossover"] = RefinedAdaptiveIncrementalCrossover - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIncrementalCrossover")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveIncrementalCrossover = NonObjectOptimizer(method="LLAMARefinedAdaptiveIncrementalCrossover").set_name("LLAMARefinedAdaptiveIncrementalCrossover", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIncrementalCrossover")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveIncrementalCrossover = NonObjectOptimizer( + method="LLAMARefinedAdaptiveIncrementalCrossover" + ).set_name("LLAMARefinedAdaptiveIncrementalCrossover", register=True) +except Exception as e: # RefinedAdaptiveIncrementalCrossover print("RefinedAdaptiveIncrementalCrossover can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveIslandEvolutionStrategy import RefinedAdaptiveIslandEvolutionStrategy +try: # RefinedAdaptiveIslandEvolutionStrategy + from nevergrad.optimization.lama.RefinedAdaptiveIslandEvolutionStrategy import ( + RefinedAdaptiveIslandEvolutionStrategy, + ) lama_register["RefinedAdaptiveIslandEvolutionStrategy"] = RefinedAdaptiveIslandEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveIslandEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptiveIslandEvolutionStrategy").set_name("LLAMARefinedAdaptiveIslandEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveIslandEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveIslandEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptiveIslandEvolutionStrategy" + ).set_name("LLAMARefinedAdaptiveIslandEvolutionStrategy", register=True) +except Exception as e: # RefinedAdaptiveIslandEvolutionStrategy print("RefinedAdaptiveIslandEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMemeticDifferentialEvolution import RefinedAdaptiveMemeticDifferentialEvolution +try: # RefinedAdaptiveMemeticDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDifferentialEvolution import ( + RefinedAdaptiveMemeticDifferentialEvolution, + ) lama_register["RefinedAdaptiveMemeticDifferentialEvolution"] = RefinedAdaptiveMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDifferentialEvolution").set_name("LLAMARefinedAdaptiveMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemeticDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveMemeticDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveMemeticDifferentialEvolution print("RefinedAdaptiveMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMemeticDiverseOptimizer import RefinedAdaptiveMemeticDiverseOptimizer +try: # RefinedAdaptiveMemeticDiverseOptimizer + from nevergrad.optimization.lama.RefinedAdaptiveMemeticDiverseOptimizer import ( + RefinedAdaptiveMemeticDiverseOptimizer, + ) lama_register["RefinedAdaptiveMemeticDiverseOptimizer"] = RefinedAdaptiveMemeticDiverseOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDiverseOptimizer").set_name("LLAMARefinedAdaptiveMemeticDiverseOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemeticDiverseOptimizer" + ).set_name("LLAMARefinedAdaptiveMemeticDiverseOptimizer", register=True) +except Exception as e: # RefinedAdaptiveMemeticDiverseOptimizer print("RefinedAdaptiveMemeticDiverseOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedSearch import RefinedAdaptiveMemoryEnhancedSearch +try: # RefinedAdaptiveMemoryEnhancedSearch + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedSearch import ( + RefinedAdaptiveMemoryEnhancedSearch, + ) lama_register["RefinedAdaptiveMemoryEnhancedSearch"] = RefinedAdaptiveMemoryEnhancedSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedSearch").set_name("LLAMARefinedAdaptiveMemoryEnhancedSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMemoryEnhancedSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryEnhancedSearch" + ).set_name("LLAMARefinedAdaptiveMemoryEnhancedSearch", register=True) +except Exception as e: # RefinedAdaptiveMemoryEnhancedSearch print("RefinedAdaptiveMemoryEnhancedSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedStrategyV55 import RefinedAdaptiveMemoryEnhancedStrategyV55 +try: # RefinedAdaptiveMemoryEnhancedStrategyV55 + from nevergrad.optimization.lama.RefinedAdaptiveMemoryEnhancedStrategyV55 import ( + RefinedAdaptiveMemoryEnhancedStrategyV55, + ) lama_register["RefinedAdaptiveMemoryEnhancedStrategyV55"] = RefinedAdaptiveMemoryEnhancedStrategyV55 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMemoryEnhancedStrategyV55 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55").set_name("LLAMARefinedAdaptiveMemoryEnhancedStrategyV55", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMemoryEnhancedStrategyV55 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryEnhancedStrategyV55" + ).set_name("LLAMARefinedAdaptiveMemoryEnhancedStrategyV55", register=True) +except Exception as e: # RefinedAdaptiveMemoryEnhancedStrategyV55 print("RefinedAdaptiveMemoryEnhancedStrategyV55 can not be imported: ", e) -try: +try: # RefinedAdaptiveMemoryStrategyV67 from nevergrad.optimization.lama.RefinedAdaptiveMemoryStrategyV67 import RefinedAdaptiveMemoryStrategyV67 lama_register["RefinedAdaptiveMemoryStrategyV67"] = RefinedAdaptiveMemoryStrategyV67 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryStrategyV67")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMemoryStrategyV67 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryStrategyV67").set_name("LLAMARefinedAdaptiveMemoryStrategyV67", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMemoryStrategyV67")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMemoryStrategyV67 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMemoryStrategyV67" + ).set_name("LLAMARefinedAdaptiveMemoryStrategyV67", register=True) +except Exception as e: # RefinedAdaptiveMemoryStrategyV67 print("RefinedAdaptiveMemoryStrategyV67 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiOperatorSearch import RefinedAdaptiveMultiOperatorSearch +try: # RefinedAdaptiveMultiOperatorSearch + from nevergrad.optimization.lama.RefinedAdaptiveMultiOperatorSearch import ( + RefinedAdaptiveMultiOperatorSearch, + ) lama_register["RefinedAdaptiveMultiOperatorSearch"] = RefinedAdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiOperatorSearch").set_name("LLAMARefinedAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiOperatorSearch" + ).set_name("LLAMARefinedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: # RefinedAdaptiveMultiOperatorSearch print("RefinedAdaptiveMultiOperatorSearch can not be imported: ", e) -try: +try: # RefinedAdaptiveMultiStrategyDE from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE import RefinedAdaptiveMultiStrategyDE lama_register["RefinedAdaptiveMultiStrategyDE"] = RefinedAdaptiveMultiStrategyDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE").set_name("LLAMARefinedAdaptiveMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDE" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDE", register=True) +except Exception as e: # RefinedAdaptiveMultiStrategyDE print("RefinedAdaptiveMultiStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE_v2 import RefinedAdaptiveMultiStrategyDE_v2 +try: # RefinedAdaptiveMultiStrategyDE_v2 + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDE_v2 import ( + RefinedAdaptiveMultiStrategyDE_v2, + ) lama_register["RefinedAdaptiveMultiStrategyDE_v2"] = RefinedAdaptiveMultiStrategyDE_v2 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMultiStrategyDE_v2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE_v2").set_name("LLAMARefinedAdaptiveMultiStrategyDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMultiStrategyDE_v2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDE_v2" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDE_v2", register=True) +except Exception as e: # RefinedAdaptiveMultiStrategyDE_v2 print("RefinedAdaptiveMultiStrategyDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolution import RefinedAdaptiveMultiStrategyDifferentialEvolution - - lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolution"] = RefinedAdaptiveMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptiveMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolution import ( + RefinedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolution"] = ( + RefinedAdaptiveMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveMultiStrategyDifferentialEvolution print("RefinedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 import RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 - - lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolutionV2"] = RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2").set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2", register=True) -except Exception as e: +try: # RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 + from nevergrad.optimization.lama.RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 import ( + RefinedAdaptiveMultiStrategyDifferentialEvolutionV2, + ) + + lama_register["RefinedAdaptiveMultiStrategyDifferentialEvolutionV2"] = ( + RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2" + ).set_name("LLAMARefinedAdaptiveMultiStrategyDifferentialEvolutionV2", register=True) +except Exception as e: # RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 print("RefinedAdaptiveMultiStrategyDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveParameterStrategyV38 import RefinedAdaptiveParameterStrategyV38 +try: # RefinedAdaptiveParameterStrategyV38 + from nevergrad.optimization.lama.RefinedAdaptiveParameterStrategyV38 import ( + RefinedAdaptiveParameterStrategyV38, + ) lama_register["RefinedAdaptiveParameterStrategyV38"] = RefinedAdaptiveParameterStrategyV38 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveParameterStrategyV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveParameterStrategyV38 = NonObjectOptimizer(method="LLAMARefinedAdaptiveParameterStrategyV38").set_name("LLAMARefinedAdaptiveParameterStrategyV38", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveParameterStrategyV38")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveParameterStrategyV38 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveParameterStrategyV38" + ).set_name("LLAMARefinedAdaptiveParameterStrategyV38", register=True) +except Exception as e: # RefinedAdaptiveParameterStrategyV38 print("RefinedAdaptiveParameterStrategyV38 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - - lama_register["RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True) -except Exception as e: - print("RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - - lama_register["RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch").set_name("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) -except Exception as e: +try: # RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + from nevergrad.optimization.lama.RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch import ( + RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch"] = ( + RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch" + ).set_name( + "LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch", register=True + ) +except Exception as e: # RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch + print( + "RefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e + ) +try: # RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + from nevergrad.optimization.lama.RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch import ( + RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch, + ) + + lama_register["RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch"] = ( + RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch" + ).set_name("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch", register=True) +except Exception as e: # RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch print("RefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionBalanceStrategy import RefinedAdaptivePrecisionBalanceStrategy +try: # RefinedAdaptivePrecisionBalanceStrategy + from nevergrad.optimization.lama.RefinedAdaptivePrecisionBalanceStrategy import ( + RefinedAdaptivePrecisionBalanceStrategy, + ) lama_register["RefinedAdaptivePrecisionBalanceStrategy"] = RefinedAdaptivePrecisionBalanceStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionBalanceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionBalanceStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionBalanceStrategy").set_name("LLAMARefinedAdaptivePrecisionBalanceStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionBalanceStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionBalanceStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionBalanceStrategy" + ).set_name("LLAMARefinedAdaptivePrecisionBalanceStrategy", register=True) +except Exception as e: # RefinedAdaptivePrecisionBalanceStrategy print("RefinedAdaptivePrecisionBalanceStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV4 import RefinedAdaptivePrecisionCohortOptimizationV4 - - lama_register["RefinedAdaptivePrecisionCohortOptimizationV4"] = RefinedAdaptivePrecisionCohortOptimizationV4 - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionCohortOptimizationV4 = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4").set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV4", register=True) -except Exception as e: +try: # RefinedAdaptivePrecisionCohortOptimizationV4 + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV4 import ( + RefinedAdaptivePrecisionCohortOptimizationV4, + ) + + lama_register["RefinedAdaptivePrecisionCohortOptimizationV4"] = ( + RefinedAdaptivePrecisionCohortOptimizationV4 + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionCohortOptimizationV4 = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionCohortOptimizationV4" + ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV4", register=True) +except Exception as e: # RefinedAdaptivePrecisionCohortOptimizationV4 print("RefinedAdaptivePrecisionCohortOptimizationV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV6 import RefinedAdaptivePrecisionCohortOptimizationV6 - - lama_register["RefinedAdaptivePrecisionCohortOptimizationV6"] = RefinedAdaptivePrecisionCohortOptimizationV6 - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionCohortOptimizationV6 = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6").set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV6", register=True) -except Exception as e: +try: # RefinedAdaptivePrecisionCohortOptimizationV6 + from nevergrad.optimization.lama.RefinedAdaptivePrecisionCohortOptimizationV6 import ( + RefinedAdaptivePrecisionCohortOptimizationV6, + ) + + lama_register["RefinedAdaptivePrecisionCohortOptimizationV6"] = ( + RefinedAdaptivePrecisionCohortOptimizationV6 + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionCohortOptimizationV6 = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionCohortOptimizationV6" + ).set_name("LLAMARefinedAdaptivePrecisionCohortOptimizationV6", register=True) +except Exception as e: # RefinedAdaptivePrecisionCohortOptimizationV6 print("RefinedAdaptivePrecisionCohortOptimizationV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionDifferentialEvolution import RefinedAdaptivePrecisionDifferentialEvolution - - lama_register["RefinedAdaptivePrecisionDifferentialEvolution"] = RefinedAdaptivePrecisionDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDifferentialEvolution").set_name("LLAMARefinedAdaptivePrecisionDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptivePrecisionDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDifferentialEvolution import ( + RefinedAdaptivePrecisionDifferentialEvolution, + ) + + lama_register["RefinedAdaptivePrecisionDifferentialEvolution"] = ( + RefinedAdaptivePrecisionDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionDifferentialEvolution" + ).set_name("LLAMARefinedAdaptivePrecisionDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptivePrecisionDifferentialEvolution print("RefinedAdaptivePrecisionDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionDivideSearch import RefinedAdaptivePrecisionDivideSearch +try: # RefinedAdaptivePrecisionDivideSearch + from nevergrad.optimization.lama.RefinedAdaptivePrecisionDivideSearch import ( + RefinedAdaptivePrecisionDivideSearch, + ) lama_register["RefinedAdaptivePrecisionDivideSearch"] = RefinedAdaptivePrecisionDivideSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionDivideSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDivideSearch").set_name("LLAMARefinedAdaptivePrecisionDivideSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionDivideSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionDivideSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionDivideSearch" + ).set_name("LLAMARefinedAdaptivePrecisionDivideSearch", register=True) +except Exception as e: # RefinedAdaptivePrecisionDivideSearch print("RefinedAdaptivePrecisionDivideSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionEvolutionStrategy import RefinedAdaptivePrecisionEvolutionStrategy +try: # RefinedAdaptivePrecisionEvolutionStrategy + from nevergrad.optimization.lama.RefinedAdaptivePrecisionEvolutionStrategy import ( + RefinedAdaptivePrecisionEvolutionStrategy, + ) lama_register["RefinedAdaptivePrecisionEvolutionStrategy"] = RefinedAdaptivePrecisionEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionEvolutionStrategy").set_name("LLAMARefinedAdaptivePrecisionEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionEvolutionStrategy" + ).set_name("LLAMARefinedAdaptivePrecisionEvolutionStrategy", register=True) +except Exception as e: # RefinedAdaptivePrecisionEvolutionStrategy print("RefinedAdaptivePrecisionEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionFocalHybrid import RefinedAdaptivePrecisionFocalHybrid +try: # RefinedAdaptivePrecisionFocalHybrid + from nevergrad.optimization.lama.RefinedAdaptivePrecisionFocalHybrid import ( + RefinedAdaptivePrecisionFocalHybrid, + ) lama_register["RefinedAdaptivePrecisionFocalHybrid"] = RefinedAdaptivePrecisionFocalHybrid - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionFocalHybrid")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionFocalHybrid = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionFocalHybrid").set_name("LLAMARefinedAdaptivePrecisionFocalHybrid", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionFocalHybrid")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionFocalHybrid = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionFocalHybrid" + ).set_name("LLAMARefinedAdaptivePrecisionFocalHybrid", register=True) +except Exception as e: # RefinedAdaptivePrecisionFocalHybrid print("RefinedAdaptivePrecisionFocalHybrid can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionHybridSearch import RefinedAdaptivePrecisionHybridSearch +try: # RefinedAdaptivePrecisionHybridSearch + from nevergrad.optimization.lama.RefinedAdaptivePrecisionHybridSearch import ( + RefinedAdaptivePrecisionHybridSearch, + ) lama_register["RefinedAdaptivePrecisionHybridSearch"] = RefinedAdaptivePrecisionHybridSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionHybridSearch = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionHybridSearch").set_name("LLAMARefinedAdaptivePrecisionHybridSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionHybridSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionHybridSearch" + ).set_name("LLAMARefinedAdaptivePrecisionHybridSearch", register=True) +except Exception as e: # RefinedAdaptivePrecisionHybridSearch print("RefinedAdaptivePrecisionHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptivePrecisionStrategicOptimizer import RefinedAdaptivePrecisionStrategicOptimizer +try: # RefinedAdaptivePrecisionStrategicOptimizer + from nevergrad.optimization.lama.RefinedAdaptivePrecisionStrategicOptimizer import ( + RefinedAdaptivePrecisionStrategicOptimizer, + ) lama_register["RefinedAdaptivePrecisionStrategicOptimizer"] = RefinedAdaptivePrecisionStrategicOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionStrategicOptimizer").set_name("LLAMARefinedAdaptivePrecisionStrategicOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptivePrecisionStrategicOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptivePrecisionStrategicOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptivePrecisionStrategicOptimizer" + ).set_name("LLAMARefinedAdaptivePrecisionStrategicOptimizer", register=True) +except Exception as e: # RefinedAdaptivePrecisionStrategicOptimizer print("RefinedAdaptivePrecisionStrategicOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumCrossoverStrategyV3 import RefinedAdaptiveQuantumCrossoverStrategyV3 +try: # RefinedAdaptiveQuantumCrossoverStrategyV3 + from nevergrad.optimization.lama.RefinedAdaptiveQuantumCrossoverStrategyV3 import ( + RefinedAdaptiveQuantumCrossoverStrategyV3, + ) lama_register["RefinedAdaptiveQuantumCrossoverStrategyV3"] = RefinedAdaptiveQuantumCrossoverStrategyV3 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumCrossoverStrategyV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3").set_name("LLAMARefinedAdaptiveQuantumCrossoverStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumCrossoverStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumCrossoverStrategyV3" + ).set_name("LLAMARefinedAdaptiveQuantumCrossoverStrategyV3", register=True) +except Exception as e: # RefinedAdaptiveQuantumCrossoverStrategyV3 print("RefinedAdaptiveQuantumCrossoverStrategyV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolution import RefinedAdaptiveQuantumDifferentialEvolution +try: # RefinedAdaptiveQuantumDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolution import ( + RefinedAdaptiveQuantumDifferentialEvolution, + ) lama_register["RefinedAdaptiveQuantumDifferentialEvolution"] = RefinedAdaptiveQuantumDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolution").set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveQuantumDifferentialEvolution print("RefinedAdaptiveQuantumDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolutionPlus import RefinedAdaptiveQuantumDifferentialEvolutionPlus - - lama_register["RefinedAdaptiveQuantumDifferentialEvolutionPlus"] = RefinedAdaptiveQuantumDifferentialEvolutionPlus - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus").set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus", register=True) -except Exception as e: +try: # RefinedAdaptiveQuantumDifferentialEvolutionPlus + from nevergrad.optimization.lama.RefinedAdaptiveQuantumDifferentialEvolutionPlus import ( + RefinedAdaptiveQuantumDifferentialEvolutionPlus, + ) + + lama_register["RefinedAdaptiveQuantumDifferentialEvolutionPlus"] = ( + RefinedAdaptiveQuantumDifferentialEvolutionPlus + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus" + ).set_name("LLAMARefinedAdaptiveQuantumDifferentialEvolutionPlus", register=True) +except Exception as e: # RefinedAdaptiveQuantumDifferentialEvolutionPlus print("RefinedAdaptiveQuantumDifferentialEvolutionPlus can not be imported: ", e) -try: +try: # RefinedAdaptiveQuantumEliteDE from nevergrad.optimization.lama.RefinedAdaptiveQuantumEliteDE import RefinedAdaptiveQuantumEliteDE lama_register["RefinedAdaptiveQuantumEliteDE"] = RefinedAdaptiveQuantumEliteDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEliteDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumEliteDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEliteDE").set_name("LLAMARefinedAdaptiveQuantumEliteDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEliteDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumEliteDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumEliteDE" + ).set_name("LLAMARefinedAdaptiveQuantumEliteDE", register=True) +except Exception as e: # RefinedAdaptiveQuantumEliteDE print("RefinedAdaptiveQuantumEliteDE can not be imported: ", e) -try: +try: # RefinedAdaptiveQuantumEntropyDE from nevergrad.optimization.lama.RefinedAdaptiveQuantumEntropyDE import RefinedAdaptiveQuantumEntropyDE lama_register["RefinedAdaptiveQuantumEntropyDE"] = RefinedAdaptiveQuantumEntropyDE - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumEntropyDE = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEntropyDE").set_name("LLAMARefinedAdaptiveQuantumEntropyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumEntropyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumEntropyDE = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumEntropyDE" + ).set_name("LLAMARefinedAdaptiveQuantumEntropyDE", register=True) +except Exception as e: # RefinedAdaptiveQuantumEntropyDE print("RefinedAdaptiveQuantumEntropyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientBoostedMemeticSearch import RefinedAdaptiveQuantumGradientBoostedMemeticSearch - - lama_register["RefinedAdaptiveQuantumGradientBoostedMemeticSearch"] = RefinedAdaptiveQuantumGradientBoostedMemeticSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch").set_name("LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch", register=True) -except Exception as e: +try: # RefinedAdaptiveQuantumGradientBoostedMemeticSearch + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientBoostedMemeticSearch import ( + RefinedAdaptiveQuantumGradientBoostedMemeticSearch, + ) + + lama_register["RefinedAdaptiveQuantumGradientBoostedMemeticSearch"] = ( + RefinedAdaptiveQuantumGradientBoostedMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch" + ).set_name("LLAMARefinedAdaptiveQuantumGradientBoostedMemeticSearch", register=True) +except Exception as e: # RefinedAdaptiveQuantumGradientBoostedMemeticSearch print("RefinedAdaptiveQuantumGradientBoostedMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientExplorationOptimization import RefinedAdaptiveQuantumGradientExplorationOptimization - - lama_register["RefinedAdaptiveQuantumGradientExplorationOptimization"] = RefinedAdaptiveQuantumGradientExplorationOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization").set_name("LLAMARefinedAdaptiveQuantumGradientExplorationOptimization", register=True) -except Exception as e: +try: # RefinedAdaptiveQuantumGradientExplorationOptimization + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientExplorationOptimization import ( + RefinedAdaptiveQuantumGradientExplorationOptimization, + ) + + lama_register["RefinedAdaptiveQuantumGradientExplorationOptimization"] = ( + RefinedAdaptiveQuantumGradientExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumGradientExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientExplorationOptimization" + ).set_name("LLAMARefinedAdaptiveQuantumGradientExplorationOptimization", register=True) +except Exception as e: # RefinedAdaptiveQuantumGradientExplorationOptimization print("RefinedAdaptiveQuantumGradientExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientHybridOptimizer import RefinedAdaptiveQuantumGradientHybridOptimizer - - lama_register["RefinedAdaptiveQuantumGradientHybridOptimizer"] = RefinedAdaptiveQuantumGradientHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer").set_name("LLAMARefinedAdaptiveQuantumGradientHybridOptimizer", register=True) -except Exception as e: +try: # RefinedAdaptiveQuantumGradientHybridOptimizer + from nevergrad.optimization.lama.RefinedAdaptiveQuantumGradientHybridOptimizer import ( + RefinedAdaptiveQuantumGradientHybridOptimizer, + ) + + lama_register["RefinedAdaptiveQuantumGradientHybridOptimizer"] = ( + RefinedAdaptiveQuantumGradientHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumGradientHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumGradientHybridOptimizer" + ).set_name("LLAMARefinedAdaptiveQuantumGradientHybridOptimizer", register=True) +except Exception as e: # RefinedAdaptiveQuantumGradientHybridOptimizer print("RefinedAdaptiveQuantumGradientHybridOptimizer can not be imported: ", e) -try: +try: # RefinedAdaptiveQuantumPSO from nevergrad.optimization.lama.RefinedAdaptiveQuantumPSO import RefinedAdaptiveQuantumPSO lama_register["RefinedAdaptiveQuantumPSO"] = RefinedAdaptiveQuantumPSO - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO").set_name("LLAMARefinedAdaptiveQuantumPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumPSO").set_name( + "LLAMARefinedAdaptiveQuantumPSO", register=True + ) +except Exception as e: # RefinedAdaptiveQuantumPSO print("RefinedAdaptiveQuantumPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuantumSwarmOptimizerV3 import RefinedAdaptiveQuantumSwarmOptimizerV3 +try: # RefinedAdaptiveQuantumSwarmOptimizerV3 + from nevergrad.optimization.lama.RefinedAdaptiveQuantumSwarmOptimizerV3 import ( + RefinedAdaptiveQuantumSwarmOptimizerV3, + ) lama_register["RefinedAdaptiveQuantumSwarmOptimizerV3"] = RefinedAdaptiveQuantumSwarmOptimizerV3 - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuantumSwarmOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3").set_name("LLAMARefinedAdaptiveQuantumSwarmOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuantumSwarmOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuantumSwarmOptimizerV3" + ).set_name("LLAMARefinedAdaptiveQuantumSwarmOptimizerV3", register=True) +except Exception as e: # RefinedAdaptiveQuantumSwarmOptimizerV3 print("RefinedAdaptiveQuantumSwarmOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomDEGradientAnnealing import RefinedAdaptiveQuasiRandomDEGradientAnnealing - - lama_register["RefinedAdaptiveQuasiRandomDEGradientAnnealing"] = RefinedAdaptiveQuasiRandomDEGradientAnnealing - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing").set_name("LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing", register=True) -except Exception as e: +try: # RefinedAdaptiveQuasiRandomDEGradientAnnealing + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomDEGradientAnnealing import ( + RefinedAdaptiveQuasiRandomDEGradientAnnealing, + ) + + lama_register["RefinedAdaptiveQuasiRandomDEGradientAnnealing"] = ( + RefinedAdaptiveQuasiRandomDEGradientAnnealing + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing" + ).set_name("LLAMARefinedAdaptiveQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: # RefinedAdaptiveQuasiRandomDEGradientAnnealing print("RefinedAdaptiveQuasiRandomDEGradientAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution import RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution - - lama_register["RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution"] = RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution").set_name("LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution import ( + RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution, + ) + + lama_register["RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution"] = ( + RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution print("RefinedAdaptiveQuasiRandomEnhancedDifferentialEvolution can not be imported: ", e) -try: +try: # RefinedAdaptiveRefinementPSO from nevergrad.optimization.lama.RefinedAdaptiveRefinementPSO import RefinedAdaptiveRefinementPSO lama_register["RefinedAdaptiveRefinementPSO"] = RefinedAdaptiveRefinementPSO - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveRefinementPSO = NonObjectOptimizer(method="LLAMARefinedAdaptiveRefinementPSO").set_name("LLAMARefinedAdaptiveRefinementPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveRefinementPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveRefinementPSO = NonObjectOptimizer( + method="LLAMARefinedAdaptiveRefinementPSO" + ).set_name("LLAMARefinedAdaptiveRefinementPSO", register=True) +except Exception as e: # RefinedAdaptiveRefinementPSO print("RefinedAdaptiveRefinementPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveSimulatedAnnealingWithSmartMemory import RefinedAdaptiveSimulatedAnnealingWithSmartMemory - - lama_register["RefinedAdaptiveSimulatedAnnealingWithSmartMemory"] = RefinedAdaptiveSimulatedAnnealingWithSmartMemory - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer(method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory").set_name("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) -except Exception as e: +try: # RefinedAdaptiveSimulatedAnnealingWithSmartMemory + from nevergrad.optimization.lama.RefinedAdaptiveSimulatedAnnealingWithSmartMemory import ( + RefinedAdaptiveSimulatedAnnealingWithSmartMemory, + ) + + lama_register["RefinedAdaptiveSimulatedAnnealingWithSmartMemory"] = ( + RefinedAdaptiveSimulatedAnnealingWithSmartMemory + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory" + ).set_name("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory", register=True) +except Exception as e: # RefinedAdaptiveSimulatedAnnealingWithSmartMemory print("RefinedAdaptiveSimulatedAnnealingWithSmartMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveSpatialExplorationOptimizer import RefinedAdaptiveSpatialExplorationOptimizer +try: # RefinedAdaptiveSpatialExplorationOptimizer + from nevergrad.optimization.lama.RefinedAdaptiveSpatialExplorationOptimizer import ( + RefinedAdaptiveSpatialExplorationOptimizer, + ) lama_register["RefinedAdaptiveSpatialExplorationOptimizer"] = RefinedAdaptiveSpatialExplorationOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialExplorationOptimizer").set_name("LLAMARefinedAdaptiveSpatialExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSpatialExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpatialExplorationOptimizer" + ).set_name("LLAMARefinedAdaptiveSpatialExplorationOptimizer", register=True) +except Exception as e: # RefinedAdaptiveSpatialExplorationOptimizer print("RefinedAdaptiveSpatialExplorationOptimizer can not be imported: ", e) -try: +try: # RefinedAdaptiveSpatialOptimizer from nevergrad.optimization.lama.RefinedAdaptiveSpatialOptimizer import RefinedAdaptiveSpatialOptimizer lama_register["RefinedAdaptiveSpatialOptimizer"] = RefinedAdaptiveSpatialOptimizer - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSpatialOptimizer = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialOptimizer").set_name("LLAMARefinedAdaptiveSpatialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpatialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSpatialOptimizer = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpatialOptimizer" + ).set_name("LLAMARefinedAdaptiveSpatialOptimizer", register=True) +except Exception as e: # RefinedAdaptiveSpatialOptimizer print("RefinedAdaptiveSpatialOptimizer can not be imported: ", e) -try: +try: # RefinedAdaptiveSpectralEvolution from nevergrad.optimization.lama.RefinedAdaptiveSpectralEvolution import RefinedAdaptiveSpectralEvolution lama_register["RefinedAdaptiveSpectralEvolution"] = RefinedAdaptiveSpectralEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpectralEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSpectralEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpectralEvolution").set_name("LLAMARefinedAdaptiveSpectralEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpectralEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSpectralEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpectralEvolution" + ).set_name("LLAMARefinedAdaptiveSpectralEvolution", register=True) +except Exception as e: # RefinedAdaptiveSpectralEvolution print("RefinedAdaptiveSpectralEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveSpiralGradientSearch import RefinedAdaptiveSpiralGradientSearch +try: # RefinedAdaptiveSpiralGradientSearch + from nevergrad.optimization.lama.RefinedAdaptiveSpiralGradientSearch import ( + RefinedAdaptiveSpiralGradientSearch, + ) lama_register["RefinedAdaptiveSpiralGradientSearch"] = RefinedAdaptiveSpiralGradientSearch - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpiralGradientSearch").set_name("LLAMARefinedAdaptiveSpiralGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSpiralGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSpiralGradientSearch = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSpiralGradientSearch" + ).set_name("LLAMARefinedAdaptiveSpiralGradientSearch", register=True) +except Exception as e: # RefinedAdaptiveSpiralGradientSearch print("RefinedAdaptiveSpiralGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveStochasticGradientQuorumOptimization import RefinedAdaptiveStochasticGradientQuorumOptimization - - lama_register["RefinedAdaptiveStochasticGradientQuorumOptimization"] = RefinedAdaptiveStochasticGradientQuorumOptimization - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization").set_name("LLAMARefinedAdaptiveStochasticGradientQuorumOptimization", register=True) -except Exception as e: +try: # RefinedAdaptiveStochasticGradientQuorumOptimization + from nevergrad.optimization.lama.RefinedAdaptiveStochasticGradientQuorumOptimization import ( + RefinedAdaptiveStochasticGradientQuorumOptimization, + ) + + lama_register["RefinedAdaptiveStochasticGradientQuorumOptimization"] = ( + RefinedAdaptiveStochasticGradientQuorumOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMARefinedAdaptiveStochasticGradientQuorumOptimization" + ).set_name("LLAMARefinedAdaptiveStochasticGradientQuorumOptimization", register=True) +except Exception as e: # RefinedAdaptiveStochasticGradientQuorumOptimization print("RefinedAdaptiveStochasticGradientQuorumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveStochasticHybridEvolution import RefinedAdaptiveStochasticHybridEvolution +try: # RefinedAdaptiveStochasticHybridEvolution + from nevergrad.optimization.lama.RefinedAdaptiveStochasticHybridEvolution import ( + RefinedAdaptiveStochasticHybridEvolution, + ) lama_register["RefinedAdaptiveStochasticHybridEvolution"] = RefinedAdaptiveStochasticHybridEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveStochasticHybridEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticHybridEvolution").set_name("LLAMARefinedAdaptiveStochasticHybridEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveStochasticHybridEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveStochasticHybridEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveStochasticHybridEvolution" + ).set_name("LLAMARefinedAdaptiveStochasticHybridEvolution", register=True) +except Exception as e: # RefinedAdaptiveStochasticHybridEvolution print("RefinedAdaptiveStochasticHybridEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdaptiveSwarmDifferentialEvolution import RefinedAdaptiveSwarmDifferentialEvolution +try: # RefinedAdaptiveSwarmDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdaptiveSwarmDifferentialEvolution import ( + RefinedAdaptiveSwarmDifferentialEvolution, + ) lama_register["RefinedAdaptiveSwarmDifferentialEvolution"] = RefinedAdaptiveSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdaptiveSwarmDifferentialEvolution").set_name("LLAMARefinedAdaptiveSwarmDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAdaptiveSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdaptiveSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdaptiveSwarmDifferentialEvolution" + ).set_name("LLAMARefinedAdaptiveSwarmDifferentialEvolution", register=True) +except Exception as e: # RefinedAdaptiveSwarmDifferentialEvolution print("RefinedAdaptiveSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - - lama_register["RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution").set_name("LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution import ( + RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution"] = ( + RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution print("RefinedAdvancedAdaptiveDynamicMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - - lama_register["RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) -except Exception as e: +try: # RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + # res = NonObjectOptimizer(method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMARefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory print("RefinedAdvancedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedArchiveEnhancedAdaptiveDifferentialEvolution import RefinedArchiveEnhancedAdaptiveDifferentialEvolution - - lama_register["RefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = RefinedArchiveEnhancedAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution").set_name("LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedArchiveEnhancedAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RefinedArchiveEnhancedAdaptiveDifferentialEvolution import ( + RefinedArchiveEnhancedAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedArchiveEnhancedAdaptiveDifferentialEvolution"] = ( + RefinedArchiveEnhancedAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedArchiveEnhancedAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RefinedArchiveEnhancedAdaptiveDifferentialEvolution print("RefinedArchiveEnhancedAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # RefinedAttenuatedAdaptiveEvolver from nevergrad.optimization.lama.RefinedAttenuatedAdaptiveEvolver import RefinedAttenuatedAdaptiveEvolver lama_register["RefinedAttenuatedAdaptiveEvolver"] = RefinedAttenuatedAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMARefinedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedAttenuatedAdaptiveEvolver = NonObjectOptimizer(method="LLAMARefinedAttenuatedAdaptiveEvolver").set_name("LLAMARefinedAttenuatedAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedAttenuatedAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedAttenuatedAdaptiveEvolver = NonObjectOptimizer( + method="LLAMARefinedAttenuatedAdaptiveEvolver" + ).set_name("LLAMARefinedAttenuatedAdaptiveEvolver", register=True) +except Exception as e: # RefinedAttenuatedAdaptiveEvolver print("RefinedAttenuatedAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedBalancedAdaptiveElitistStrategy import RefinedBalancedAdaptiveElitistStrategy +try: # RefinedBalancedAdaptiveElitistStrategy + from nevergrad.optimization.lama.RefinedBalancedAdaptiveElitistStrategy import ( + RefinedBalancedAdaptiveElitistStrategy, + ) lama_register["RefinedBalancedAdaptiveElitistStrategy"] = RefinedBalancedAdaptiveElitistStrategy - res = NonObjectOptimizer(method="LLAMARefinedBalancedAdaptiveElitistStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedBalancedAdaptiveElitistStrategy = NonObjectOptimizer(method="LLAMARefinedBalancedAdaptiveElitistStrategy").set_name("LLAMARefinedBalancedAdaptiveElitistStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedBalancedAdaptiveElitistStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedBalancedAdaptiveElitistStrategy = NonObjectOptimizer( + method="LLAMARefinedBalancedAdaptiveElitistStrategy" + ).set_name("LLAMARefinedBalancedAdaptiveElitistStrategy", register=True) +except Exception as e: # RefinedBalancedAdaptiveElitistStrategy print("RefinedBalancedAdaptiveElitistStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedBalancedExplorationOptimizer import RefinedBalancedExplorationOptimizer +try: # RefinedBalancedExplorationOptimizer + from nevergrad.optimization.lama.RefinedBalancedExplorationOptimizer import ( + RefinedBalancedExplorationOptimizer, + ) lama_register["RefinedBalancedExplorationOptimizer"] = RefinedBalancedExplorationOptimizer - res = NonObjectOptimizer(method="LLAMARefinedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedBalancedExplorationOptimizer = NonObjectOptimizer(method="LLAMARefinedBalancedExplorationOptimizer").set_name("LLAMARefinedBalancedExplorationOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedBalancedExplorationOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedBalancedExplorationOptimizer = NonObjectOptimizer( + method="LLAMARefinedBalancedExplorationOptimizer" + ).set_name("LLAMARefinedBalancedExplorationOptimizer", register=True) +except Exception as e: # RefinedBalancedExplorationOptimizer print("RefinedBalancedExplorationOptimizer can not be imported: ", e) -try: +try: # RefinedCMADiffEvoPSO from nevergrad.optimization.lama.RefinedCMADiffEvoPSO import RefinedCMADiffEvoPSO lama_register["RefinedCMADiffEvoPSO"] = RefinedCMADiffEvoPSO - res = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedCMADiffEvoPSO = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO").set_name("LLAMARefinedCMADiffEvoPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedCMADiffEvoPSO = NonObjectOptimizer(method="LLAMARefinedCMADiffEvoPSO").set_name( + "LLAMARefinedCMADiffEvoPSO", register=True + ) +except Exception as e: # RefinedCMADiffEvoPSO print("RefinedCMADiffEvoPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedConcentricDiversityStrategy import RefinedConcentricDiversityStrategy +try: # RefinedConcentricDiversityStrategy + from nevergrad.optimization.lama.RefinedConcentricDiversityStrategy import ( + RefinedConcentricDiversityStrategy, + ) lama_register["RefinedConcentricDiversityStrategy"] = RefinedConcentricDiversityStrategy - res = NonObjectOptimizer(method="LLAMARefinedConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedConcentricDiversityStrategy = NonObjectOptimizer(method="LLAMARefinedConcentricDiversityStrategy").set_name("LLAMARefinedConcentricDiversityStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedConcentricDiversityStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedConcentricDiversityStrategy = NonObjectOptimizer( + method="LLAMARefinedConcentricDiversityStrategy" + ).set_name("LLAMARefinedConcentricDiversityStrategy", register=True) +except Exception as e: # RefinedConcentricDiversityStrategy print("RefinedConcentricDiversityStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedConcentricQuantumCrossoverStrategyV5 import RefinedConcentricQuantumCrossoverStrategyV5 +try: # RefinedConcentricQuantumCrossoverStrategyV5 + from nevergrad.optimization.lama.RefinedConcentricQuantumCrossoverStrategyV5 import ( + RefinedConcentricQuantumCrossoverStrategyV5, + ) lama_register["RefinedConcentricQuantumCrossoverStrategyV5"] = RefinedConcentricQuantumCrossoverStrategyV5 - res = NonObjectOptimizer(method="LLAMARefinedConcentricQuantumCrossoverStrategyV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedConcentricQuantumCrossoverStrategyV5 = NonObjectOptimizer(method="LLAMARefinedConcentricQuantumCrossoverStrategyV5").set_name("LLAMARefinedConcentricQuantumCrossoverStrategyV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedConcentricQuantumCrossoverStrategyV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedConcentricQuantumCrossoverStrategyV5 = NonObjectOptimizer( + method="LLAMARefinedConcentricQuantumCrossoverStrategyV5" + ).set_name("LLAMARefinedConcentricQuantumCrossoverStrategyV5", register=True) +except Exception as e: # RefinedConcentricQuantumCrossoverStrategyV5 print("RefinedConcentricQuantumCrossoverStrategyV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedConvergenceAdaptiveOptimizer import RefinedConvergenceAdaptiveOptimizer +try: # RefinedConvergenceAdaptiveOptimizer + from nevergrad.optimization.lama.RefinedConvergenceAdaptiveOptimizer import ( + RefinedConvergenceAdaptiveOptimizer, + ) lama_register["RefinedConvergenceAdaptiveOptimizer"] = RefinedConvergenceAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMARefinedConvergenceAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedConvergenceAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedConvergenceAdaptiveOptimizer").set_name("LLAMARefinedConvergenceAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedConvergenceAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedConvergenceAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedConvergenceAdaptiveOptimizer" + ).set_name("LLAMARefinedConvergenceAdaptiveOptimizer", register=True) +except Exception as e: # RefinedConvergenceAdaptiveOptimizer print("RefinedConvergenceAdaptiveOptimizer can not be imported: ", e) -try: +try: # RefinedConvergenceDE from nevergrad.optimization.lama.RefinedConvergenceDE import RefinedConvergenceDE lama_register["RefinedConvergenceDE"] = RefinedConvergenceDE - res = NonObjectOptimizer(method="LLAMARefinedConvergenceDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedConvergenceDE = NonObjectOptimizer(method="LLAMARefinedConvergenceDE").set_name("LLAMARefinedConvergenceDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedConvergenceDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedConvergenceDE = NonObjectOptimizer(method="LLAMARefinedConvergenceDE").set_name( + "LLAMARefinedConvergenceDE", register=True + ) +except Exception as e: # RefinedConvergenceDE print("RefinedConvergenceDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedConvergentAdaptiveEvolutionStrategy import RefinedConvergentAdaptiveEvolutionStrategy +try: # RefinedConvergentAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.RefinedConvergentAdaptiveEvolutionStrategy import ( + RefinedConvergentAdaptiveEvolutionStrategy, + ) lama_register["RefinedConvergentAdaptiveEvolutionStrategy"] = RefinedConvergentAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedConvergentAdaptiveEvolutionStrategy").set_name("LLAMARefinedConvergentAdaptiveEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedConvergentAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedConvergentAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedConvergentAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedConvergentAdaptiveEvolutionStrategy", register=True) +except Exception as e: # RefinedConvergentAdaptiveEvolutionStrategy print("RefinedConvergentAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedCooperativeDifferentialEvolution import RefinedCooperativeDifferentialEvolution +try: # RefinedCooperativeDifferentialEvolution + from nevergrad.optimization.lama.RefinedCooperativeDifferentialEvolution import ( + RefinedCooperativeDifferentialEvolution, + ) lama_register["RefinedCooperativeDifferentialEvolution"] = RefinedCooperativeDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedCooperativeDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedCooperativeDifferentialEvolution").set_name("LLAMARefinedCooperativeDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedCooperativeDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedCooperativeDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedCooperativeDifferentialEvolution" + ).set_name("LLAMARefinedCooperativeDifferentialEvolution", register=True) +except Exception as e: # RefinedCooperativeDifferentialEvolution print("RefinedCooperativeDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedCosineAdaptiveDifferentialSwarm import RefinedCosineAdaptiveDifferentialSwarm +try: # RefinedCosineAdaptiveDifferentialSwarm + from nevergrad.optimization.lama.RefinedCosineAdaptiveDifferentialSwarm import ( + RefinedCosineAdaptiveDifferentialSwarm, + ) lama_register["RefinedCosineAdaptiveDifferentialSwarm"] = RefinedCosineAdaptiveDifferentialSwarm - res = NonObjectOptimizer(method="LLAMARefinedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer(method="LLAMARefinedCosineAdaptiveDifferentialSwarm").set_name("LLAMARefinedCosineAdaptiveDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedCosineAdaptiveDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedCosineAdaptiveDifferentialSwarm = NonObjectOptimizer( + method="LLAMARefinedCosineAdaptiveDifferentialSwarm" + ).set_name("LLAMARefinedCosineAdaptiveDifferentialSwarm", register=True) +except Exception as e: # RefinedCosineAdaptiveDifferentialSwarm print("RefinedCosineAdaptiveDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDifferentialEvolutionWithAdaptiveLearningRate import RefinedDifferentialEvolutionWithAdaptiveLearningRate - - lama_register["RefinedDifferentialEvolutionWithAdaptiveLearningRate"] = RefinedDifferentialEvolutionWithAdaptiveLearningRate - res = NonObjectOptimizer(method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer(method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate").set_name("LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate", register=True) -except Exception as e: +try: # RefinedDifferentialEvolutionWithAdaptiveLearningRate + from nevergrad.optimization.lama.RefinedDifferentialEvolutionWithAdaptiveLearningRate import ( + RefinedDifferentialEvolutionWithAdaptiveLearningRate, + ) + + lama_register["RefinedDifferentialEvolutionWithAdaptiveLearningRate"] = ( + RefinedDifferentialEvolutionWithAdaptiveLearningRate + ) + # res = NonObjectOptimizer(method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate = NonObjectOptimizer( + method="LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate" + ).set_name("LLAMARefinedDifferentialEvolutionWithAdaptiveLearningRate", register=True) +except Exception as e: # RefinedDifferentialEvolutionWithAdaptiveLearningRate print("RefinedDifferentialEvolutionWithAdaptiveLearningRate can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDifferentialParticleSwarmOptimization import RefinedDifferentialParticleSwarmOptimization - - lama_register["RefinedDifferentialParticleSwarmOptimization"] = RefinedDifferentialParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMARefinedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDifferentialParticleSwarmOptimization = NonObjectOptimizer(method="LLAMARefinedDifferentialParticleSwarmOptimization").set_name("LLAMARefinedDifferentialParticleSwarmOptimization", register=True) -except Exception as e: +try: # RefinedDifferentialParticleSwarmOptimization + from nevergrad.optimization.lama.RefinedDifferentialParticleSwarmOptimization import ( + RefinedDifferentialParticleSwarmOptimization, + ) + + lama_register["RefinedDifferentialParticleSwarmOptimization"] = ( + RefinedDifferentialParticleSwarmOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedDifferentialParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDifferentialParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedDifferentialParticleSwarmOptimization" + ).set_name("LLAMARefinedDifferentialParticleSwarmOptimization", register=True) +except Exception as e: # RefinedDifferentialParticleSwarmOptimization print("RefinedDifferentialParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDimensionalCyclicCrossoverEvolver import RefinedDimensionalCyclicCrossoverEvolver +try: # RefinedDimensionalCyclicCrossoverEvolver + from nevergrad.optimization.lama.RefinedDimensionalCyclicCrossoverEvolver import ( + RefinedDimensionalCyclicCrossoverEvolver, + ) lama_register["RefinedDimensionalCyclicCrossoverEvolver"] = RefinedDimensionalCyclicCrossoverEvolver - res = NonObjectOptimizer(method="LLAMARefinedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer(method="LLAMARefinedDimensionalCyclicCrossoverEvolver").set_name("LLAMARefinedDimensionalCyclicCrossoverEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDimensionalCyclicCrossoverEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDimensionalCyclicCrossoverEvolver = NonObjectOptimizer( + method="LLAMARefinedDimensionalCyclicCrossoverEvolver" + ).set_name("LLAMARefinedDimensionalCyclicCrossoverEvolver", register=True) +except Exception as e: # RefinedDimensionalCyclicCrossoverEvolver print("RefinedDimensionalCyclicCrossoverEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV2 import RefinedDimensionalFeedbackEvolverV2 +try: # RefinedDimensionalFeedbackEvolverV2 + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV2 import ( + RefinedDimensionalFeedbackEvolverV2, + ) lama_register["RefinedDimensionalFeedbackEvolverV2"] = RefinedDimensionalFeedbackEvolverV2 - res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDimensionalFeedbackEvolverV2 = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV2").set_name("LLAMARefinedDimensionalFeedbackEvolverV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDimensionalFeedbackEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedDimensionalFeedbackEvolverV2" + ).set_name("LLAMARefinedDimensionalFeedbackEvolverV2", register=True) +except Exception as e: # RefinedDimensionalFeedbackEvolverV2 print("RefinedDimensionalFeedbackEvolverV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV4 import RefinedDimensionalFeedbackEvolverV4 +try: # RefinedDimensionalFeedbackEvolverV4 + from nevergrad.optimization.lama.RefinedDimensionalFeedbackEvolverV4 import ( + RefinedDimensionalFeedbackEvolverV4, + ) lama_register["RefinedDimensionalFeedbackEvolverV4"] = RefinedDimensionalFeedbackEvolverV4 - res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDimensionalFeedbackEvolverV4 = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV4").set_name("LLAMARefinedDimensionalFeedbackEvolverV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDimensionalFeedbackEvolverV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDimensionalFeedbackEvolverV4 = NonObjectOptimizer( + method="LLAMARefinedDimensionalFeedbackEvolverV4" + ).set_name("LLAMARefinedDimensionalFeedbackEvolverV4", register=True) +except Exception as e: # RefinedDimensionalFeedbackEvolverV4 print("RefinedDimensionalFeedbackEvolverV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDualConvergenceEvolutiveStrategy import RefinedDualConvergenceEvolutiveStrategy +try: # RefinedDualConvergenceEvolutiveStrategy + from nevergrad.optimization.lama.RefinedDualConvergenceEvolutiveStrategy import ( + RefinedDualConvergenceEvolutiveStrategy, + ) lama_register["RefinedDualConvergenceEvolutiveStrategy"] = RefinedDualConvergenceEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMARefinedDualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDualConvergenceEvolutiveStrategy = NonObjectOptimizer(method="LLAMARefinedDualConvergenceEvolutiveStrategy").set_name("LLAMARefinedDualConvergenceEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDualConvergenceEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDualConvergenceEvolutiveStrategy = NonObjectOptimizer( + method="LLAMARefinedDualConvergenceEvolutiveStrategy" + ).set_name("LLAMARefinedDualConvergenceEvolutiveStrategy", register=True) +except Exception as e: # RefinedDualConvergenceEvolutiveStrategy print("RefinedDualConvergenceEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDualPhaseADPSO_DE_V3_Enhanced import RefinedDualPhaseADPSO_DE_V3_Enhanced +try: # RefinedDualPhaseADPSO_DE_V3_Enhanced + from nevergrad.optimization.lama.RefinedDualPhaseADPSO_DE_V3_Enhanced import ( + RefinedDualPhaseADPSO_DE_V3_Enhanced, + ) lama_register["RefinedDualPhaseADPSO_DE_V3_Enhanced"] = RefinedDualPhaseADPSO_DE_V3_Enhanced - res = NonObjectOptimizer(method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced = NonObjectOptimizer(method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced").set_name("LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced = NonObjectOptimizer( + method="LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced" + ).set_name("LLAMARefinedDualPhaseADPSO_DE_V3_Enhanced", register=True) +except Exception as e: # RefinedDualPhaseADPSO_DE_V3_Enhanced print("RefinedDualPhaseADPSO_DE_V3_Enhanced can not be imported: ", e) -try: +try: # RefinedDualPhaseOptimization from nevergrad.optimization.lama.RefinedDualPhaseOptimization import RefinedDualPhaseOptimization lama_register["RefinedDualPhaseOptimization"] = RefinedDualPhaseOptimization - res = NonObjectOptimizer(method="LLAMARefinedDualPhaseOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDualPhaseOptimization = NonObjectOptimizer(method="LLAMARefinedDualPhaseOptimization").set_name("LLAMARefinedDualPhaseOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDualPhaseOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDualPhaseOptimization = NonObjectOptimizer( + method="LLAMARefinedDualPhaseOptimization" + ).set_name("LLAMARefinedDualPhaseOptimization", register=True) +except Exception as e: # RefinedDualPhaseOptimization print("RefinedDualPhaseOptimization can not be imported: ", e) -try: +try: # RefinedDualStrategyAdaptiveDE from nevergrad.optimization.lama.RefinedDualStrategyAdaptiveDE import RefinedDualStrategyAdaptiveDE lama_register["RefinedDualStrategyAdaptiveDE"] = RefinedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDualStrategyAdaptiveDE").set_name("LLAMARefinedDualStrategyAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedDualStrategyAdaptiveDE", register=True) +except Exception as e: # RefinedDualStrategyAdaptiveDE print("RefinedDualStrategyAdaptiveDE can not be imported: ", e) -try: +try: # RefinedDynamicAdaptiveDE from nevergrad.optimization.lama.RefinedDynamicAdaptiveDE import RefinedDynamicAdaptiveDE lama_register["RefinedDynamicAdaptiveDE"] = RefinedDynamicAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE").set_name("LLAMARefinedDynamicAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveDE").set_name( + "LLAMARefinedDynamicAdaptiveDE", register=True + ) +except Exception as e: # RefinedDynamicAdaptiveDE print("RefinedDynamicAdaptiveDE can not be imported: ", e) -try: +try: # RefinedDynamicAdaptiveHybridDE from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDE import RefinedDynamicAdaptiveHybridDE lama_register["RefinedDynamicAdaptiveHybridDE"] = RefinedDynamicAdaptiveHybridDE - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveHybridDE = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDE").set_name("LLAMARefinedDynamicAdaptiveHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveHybridDE = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridDE" + ).set_name("LLAMARefinedDynamicAdaptiveHybridDE", register=True) +except Exception as e: # RefinedDynamicAdaptiveHybridDE print("RefinedDynamicAdaptiveHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory import RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory - - lama_register["RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory").set_name("LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) -except Exception as e: +try: # RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory import ( + RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory, + ) + + lama_register["RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory"] = ( + RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory + ) + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory" + ).set_name("LLAMARefinedDynamicAdaptiveHybridDEPSOWithEliteMemory", register=True) +except Exception as e: # RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory print("RefinedDynamicAdaptiveHybridDEPSOWithEliteMemory can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizer import RefinedDynamicAdaptiveHybridOptimizer +try: # RefinedDynamicAdaptiveHybridOptimizer + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizer import ( + RefinedDynamicAdaptiveHybridOptimizer, + ) lama_register["RefinedDynamicAdaptiveHybridOptimizer"] = RefinedDynamicAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizer").set_name("LLAMARefinedDynamicAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizer", register=True) +except Exception as e: # RefinedDynamicAdaptiveHybridOptimizer print("RefinedDynamicAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizerV2 import RefinedDynamicAdaptiveHybridOptimizerV2 +try: # RefinedDynamicAdaptiveHybridOptimizerV2 + from nevergrad.optimization.lama.RefinedDynamicAdaptiveHybridOptimizerV2 import ( + RefinedDynamicAdaptiveHybridOptimizerV2, + ) lama_register["RefinedDynamicAdaptiveHybridOptimizerV2"] = RefinedDynamicAdaptiveHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveHybridOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2").set_name("LLAMARefinedDynamicAdaptiveHybridOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveHybridOptimizerV2" + ).set_name("LLAMARefinedDynamicAdaptiveHybridOptimizerV2", register=True) +except Exception as e: # RefinedDynamicAdaptiveHybridOptimizerV2 print("RefinedDynamicAdaptiveHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicAdaptiveStrategyV23 import RefinedDynamicAdaptiveStrategyV23 +try: # RefinedDynamicAdaptiveStrategyV23 + from nevergrad.optimization.lama.RefinedDynamicAdaptiveStrategyV23 import ( + RefinedDynamicAdaptiveStrategyV23, + ) lama_register["RefinedDynamicAdaptiveStrategyV23"] = RefinedDynamicAdaptiveStrategyV23 - res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveStrategyV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicAdaptiveStrategyV23 = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveStrategyV23").set_name("LLAMARefinedDynamicAdaptiveStrategyV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicAdaptiveStrategyV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicAdaptiveStrategyV23 = NonObjectOptimizer( + method="LLAMARefinedDynamicAdaptiveStrategyV23" + ).set_name("LLAMARefinedDynamicAdaptiveStrategyV23", register=True) +except Exception as e: # RefinedDynamicAdaptiveStrategyV23 print("RefinedDynamicAdaptiveStrategyV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV3 import RefinedDynamicClusterHybridOptimizationV3 +try: # RefinedDynamicClusterHybridOptimizationV3 + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV3 import ( + RefinedDynamicClusterHybridOptimizationV3, + ) lama_register["RefinedDynamicClusterHybridOptimizationV3"] = RefinedDynamicClusterHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicClusterHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV3").set_name("LLAMARefinedDynamicClusterHybridOptimizationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicClusterHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedDynamicClusterHybridOptimizationV3" + ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV3", register=True) +except Exception as e: # RefinedDynamicClusterHybridOptimizationV3 print("RefinedDynamicClusterHybridOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV4 import RefinedDynamicClusterHybridOptimizationV4 +try: # RefinedDynamicClusterHybridOptimizationV4 + from nevergrad.optimization.lama.RefinedDynamicClusterHybridOptimizationV4 import ( + RefinedDynamicClusterHybridOptimizationV4, + ) lama_register["RefinedDynamicClusterHybridOptimizationV4"] = RefinedDynamicClusterHybridOptimizationV4 - res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicClusterHybridOptimizationV4 = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV4").set_name("LLAMARefinedDynamicClusterHybridOptimizationV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicClusterHybridOptimizationV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicClusterHybridOptimizationV4 = NonObjectOptimizer( + method="LLAMARefinedDynamicClusterHybridOptimizationV4" + ).set_name("LLAMARefinedDynamicClusterHybridOptimizationV4", register=True) +except Exception as e: # RefinedDynamicClusterHybridOptimizationV4 print("RefinedDynamicClusterHybridOptimizationV4 can not be imported: ", e) -try: +try: # RefinedDynamicClusteringPSO from nevergrad.optimization.lama.RefinedDynamicClusteringPSO import RefinedDynamicClusteringPSO lama_register["RefinedDynamicClusteringPSO"] = RefinedDynamicClusteringPSO - res = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicClusteringPSO = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO").set_name("LLAMARefinedDynamicClusteringPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicClusteringPSO = NonObjectOptimizer(method="LLAMARefinedDynamicClusteringPSO").set_name( + "LLAMARefinedDynamicClusteringPSO", register=True + ) +except Exception as e: # RefinedDynamicClusteringPSO print("RefinedDynamicClusteringPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicCrowdingHybridOptimizer import RefinedDynamicCrowdingHybridOptimizer +try: # RefinedDynamicCrowdingHybridOptimizer + from nevergrad.optimization.lama.RefinedDynamicCrowdingHybridOptimizer import ( + RefinedDynamicCrowdingHybridOptimizer, + ) lama_register["RefinedDynamicCrowdingHybridOptimizer"] = RefinedDynamicCrowdingHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedDynamicCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicCrowdingHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicCrowdingHybridOptimizer").set_name("LLAMARefinedDynamicCrowdingHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicCrowdingHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicCrowdingHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicCrowdingHybridOptimizer" + ).set_name("LLAMARefinedDynamicCrowdingHybridOptimizer", register=True) +except Exception as e: # RefinedDynamicCrowdingHybridOptimizer print("RefinedDynamicCrowdingHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicEliteAdaptiveHybridOptimizer import RefinedDynamicEliteAdaptiveHybridOptimizer +try: # RefinedDynamicEliteAdaptiveHybridOptimizer + from nevergrad.optimization.lama.RefinedDynamicEliteAdaptiveHybridOptimizer import ( + RefinedDynamicEliteAdaptiveHybridOptimizer, + ) lama_register["RefinedDynamicEliteAdaptiveHybridOptimizer"] = RefinedDynamicEliteAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicEliteAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer").set_name("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicEliteAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicEliteAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer", register=True) +except Exception as e: # RefinedDynamicEliteAdaptiveHybridOptimizer print("RefinedDynamicEliteAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicEnhancedHybridOptimizer import RefinedDynamicEnhancedHybridOptimizer +try: # RefinedDynamicEnhancedHybridOptimizer + from nevergrad.optimization.lama.RefinedDynamicEnhancedHybridOptimizer import ( + RefinedDynamicEnhancedHybridOptimizer, + ) lama_register["RefinedDynamicEnhancedHybridOptimizer"] = RefinedDynamicEnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedDynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicEnhancedHybridOptimizer").set_name("LLAMARefinedDynamicEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicEnhancedHybridOptimizer" + ).set_name("LLAMARefinedDynamicEnhancedHybridOptimizer", register=True) +except Exception as e: # RefinedDynamicEnhancedHybridOptimizer print("RefinedDynamicEnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicGradientBoostedMemorySimulatedAnnealing import RefinedDynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["RefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = RefinedDynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # RefinedDynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.RefinedDynamicGradientBoostedMemorySimulatedAnnealing import ( + RefinedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedDynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # RefinedDynamicGradientBoostedMemorySimulatedAnnealing print("RefinedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedDynamicHybridDEPSOWithEliteMemoryV2 import RefinedDynamicHybridDEPSOWithEliteMemoryV2 +try: # RefinedDynamicHybridDEPSOWithEliteMemoryV2 + from nevergrad.optimization.lama.RefinedDynamicHybridDEPSOWithEliteMemoryV2 import ( + RefinedDynamicHybridDEPSOWithEliteMemoryV2, + ) lama_register["RefinedDynamicHybridDEPSOWithEliteMemoryV2"] = RefinedDynamicHybridDEPSOWithEliteMemoryV2 - res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2 = NonObjectOptimizer(method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2").set_name("LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2 = NonObjectOptimizer( + method="LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2" + ).set_name("LLAMARefinedDynamicHybridDEPSOWithEliteMemoryV2", register=True) +except Exception as e: # RefinedDynamicHybridDEPSOWithEliteMemoryV2 print("RefinedDynamicHybridDEPSOWithEliteMemoryV2 can not be imported: ", e) -try: +try: # RefinedDynamicHybridOptimizer from nevergrad.optimization.lama.RefinedDynamicHybridOptimizer import RefinedDynamicHybridOptimizer lama_register["RefinedDynamicHybridOptimizer"] = RefinedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedDynamicHybridOptimizer").set_name("LLAMARefinedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedDynamicHybridOptimizer" + ).set_name("LLAMARefinedDynamicHybridOptimizer", register=True) +except Exception as e: # RefinedDynamicHybridOptimizer print("RefinedDynamicHybridOptimizer can not be imported: ", e) -try: +try: # RefinedDynamicQuantumEvolution from nevergrad.optimization.lama.RefinedDynamicQuantumEvolution import RefinedDynamicQuantumEvolution lama_register["RefinedDynamicQuantumEvolution"] = RefinedDynamicQuantumEvolution - res = NonObjectOptimizer(method="LLAMARefinedDynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedDynamicQuantumEvolution = NonObjectOptimizer(method="LLAMARefinedDynamicQuantumEvolution").set_name("LLAMARefinedDynamicQuantumEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedDynamicQuantumEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedDynamicQuantumEvolution = NonObjectOptimizer( + method="LLAMARefinedDynamicQuantumEvolution" + ).set_name("LLAMARefinedDynamicQuantumEvolution", register=True) +except Exception as e: # RefinedDynamicQuantumEvolution print("RefinedDynamicQuantumEvolution can not be imported: ", e) -try: +try: # RefinedEliteAdaptiveHybridDEPSO from nevergrad.optimization.lama.RefinedEliteAdaptiveHybridDEPSO import RefinedEliteAdaptiveHybridDEPSO lama_register["RefinedEliteAdaptiveHybridDEPSO"] = RefinedEliteAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveHybridDEPSO").set_name("LLAMARefinedEliteAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedEliteAdaptiveHybridDEPSO", register=True) +except Exception as e: # RefinedEliteAdaptiveHybridDEPSO print("RefinedEliteAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - - lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) -except Exception as e: +try: # RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 import RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 - - lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3"] = RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3").set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3", register=True) -except Exception as e: +try: # RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 import ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3, + ) + + lama_register["RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3"] = ( + RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3" + ).set_name("LLAMARefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 print("RefinedEliteAdaptiveMemoryDynamicCrowdingOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizer import RefinedEliteAdaptiveMemoryHybridOptimizer +try: # RefinedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizer import ( + RefinedEliteAdaptiveMemoryHybridOptimizer, + ) lama_register["RefinedEliteAdaptiveMemoryHybridOptimizer"] = RefinedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryHybridOptimizer print("RefinedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV3 import RefinedEliteAdaptiveMemoryHybridOptimizerV3 +try: # RefinedEliteAdaptiveMemoryHybridOptimizerV3 + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV3 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV3, + ) lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV3"] = RefinedEliteAdaptiveMemoryHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV3", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryHybridOptimizerV3 print("RefinedEliteAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV4 import RefinedEliteAdaptiveMemoryHybridOptimizerV4 +try: # RefinedEliteAdaptiveMemoryHybridOptimizerV4 + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV4 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV4, + ) lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV4"] = RefinedEliteAdaptiveMemoryHybridOptimizerV4 - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV4", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryHybridOptimizerV4 print("RefinedEliteAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV5 import RefinedEliteAdaptiveMemoryHybridOptimizerV5 +try: # RefinedEliteAdaptiveMemoryHybridOptimizerV5 + from nevergrad.optimization.lama.RefinedEliteAdaptiveMemoryHybridOptimizerV5 import ( + RefinedEliteAdaptiveMemoryHybridOptimizerV5, + ) lama_register["RefinedEliteAdaptiveMemoryHybridOptimizerV5"] = RefinedEliteAdaptiveMemoryHybridOptimizerV5 - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5").set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5" + ).set_name("LLAMARefinedEliteAdaptiveMemoryHybridOptimizerV5", register=True) +except Exception as e: # RefinedEliteAdaptiveMemoryHybridOptimizerV5 print("RefinedEliteAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch import RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch - - lama_register["RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch"] = RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch - res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch").set_name("LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch", register=True) -except Exception as e: +try: # RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch + from nevergrad.optimization.lama.RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch import ( + RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch, + ) + + lama_register["RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch"] = ( + RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch = NonObjectOptimizer( + method="LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch" + ).set_name("LLAMARefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch", register=True) +except Exception as e: # RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch print("RefinedEliteAdaptiveQuantumDEWithEnhancedHybridSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteDynamicHybridOptimizer import RefinedEliteDynamicHybridOptimizer +try: # RefinedEliteDynamicHybridOptimizer + from nevergrad.optimization.lama.RefinedEliteDynamicHybridOptimizer import ( + RefinedEliteDynamicHybridOptimizer, + ) lama_register["RefinedEliteDynamicHybridOptimizer"] = RefinedEliteDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteDynamicHybridOptimizer").set_name("LLAMARefinedEliteDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteDynamicHybridOptimizer" + ).set_name("LLAMARefinedEliteDynamicHybridOptimizer", register=True) +except Exception as e: # RefinedEliteDynamicHybridOptimizer print("RefinedEliteDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEliteDynamicMemoryHybridOptimizer import RefinedEliteDynamicMemoryHybridOptimizer +try: # RefinedEliteDynamicMemoryHybridOptimizer + from nevergrad.optimization.lama.RefinedEliteDynamicMemoryHybridOptimizer import ( + RefinedEliteDynamicMemoryHybridOptimizer, + ) lama_register["RefinedEliteDynamicMemoryHybridOptimizer"] = RefinedEliteDynamicMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedEliteDynamicMemoryHybridOptimizer").set_name("LLAMARefinedEliteDynamicMemoryHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteDynamicMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteDynamicMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedEliteDynamicMemoryHybridOptimizer" + ).set_name("LLAMARefinedEliteDynamicMemoryHybridOptimizer", register=True) +except Exception as e: # RefinedEliteDynamicMemoryHybridOptimizer print("RefinedEliteDynamicMemoryHybridOptimizer can not be imported: ", e) -try: +try: # RefinedEliteGuidedAdaptiveDE from nevergrad.optimization.lama.RefinedEliteGuidedAdaptiveDE import RefinedEliteGuidedAdaptiveDE lama_register["RefinedEliteGuidedAdaptiveDE"] = RefinedEliteGuidedAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteGuidedAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedEliteGuidedAdaptiveDE").set_name("LLAMARefinedEliteGuidedAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteGuidedAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedAdaptiveDE" + ).set_name("LLAMARefinedEliteGuidedAdaptiveDE", register=True) +except Exception as e: # RefinedEliteGuidedAdaptiveDE print("RefinedEliteGuidedAdaptiveDE can not be imported: ", e) -try: +try: # RefinedEliteGuidedMutationDE from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE import RefinedEliteGuidedMutationDE lama_register["RefinedEliteGuidedMutationDE"] = RefinedEliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE").set_name("LLAMARefinedEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedMutationDE" + ).set_name("LLAMARefinedEliteGuidedMutationDE", register=True) +except Exception as e: # RefinedEliteGuidedMutationDE print("RefinedEliteGuidedMutationDE can not be imported: ", e) -try: +try: # RefinedEliteGuidedMutationDE_v3 from nevergrad.optimization.lama.RefinedEliteGuidedMutationDE_v3 import RefinedEliteGuidedMutationDE_v3 lama_register["RefinedEliteGuidedMutationDE_v3"] = RefinedEliteGuidedMutationDE_v3 - res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE_v3").set_name("LLAMARefinedEliteGuidedMutationDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMARefinedEliteGuidedMutationDE_v3" + ).set_name("LLAMARefinedEliteGuidedMutationDE_v3", register=True) +except Exception as e: # RefinedEliteGuidedMutationDE_v3 print("RefinedEliteGuidedMutationDE_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - - lama_register["RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer(method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined").set_name("LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) -except Exception as e: +try: # RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + from nevergrad.optimization.lama.RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined import ( + RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined, + ) + + lama_register["RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined"] = ( + RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined = NonObjectOptimizer( + method="LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined" + ).set_name("LLAMARefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined", register=True) +except Exception as e: # RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined print("RefinedEnhancedAQAPSO_LS_DIW_AP_Ultimate_Redefined can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 import RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 - - lama_register["RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5"] = RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5").set_name("LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 import ( + RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5, + ) + + lama_register["RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5"] = ( + RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5" + ).set_name("LLAMARefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5", register=True) +except Exception as e: # RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 print("RefinedEnhancedAdaptiveCovarianceMatrixDifferentialEvolutionV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost import RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost - - lama_register["RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost"] = RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost").set_name("LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost import ( + RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost, + ) + + lama_register["RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost"] = ( + RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost" + ).set_name("LLAMARefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost", register=True) +except Exception as e: # RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost print("RefinedEnhancedAdaptiveDifferentialEvolutionWithGradientBoost can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDualPhaseStrategyV9 import RefinedEnhancedAdaptiveDualPhaseStrategyV9 +try: # RefinedEnhancedAdaptiveDualPhaseStrategyV9 + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveDualPhaseStrategyV9 import ( + RefinedEnhancedAdaptiveDualPhaseStrategyV9, + ) lama_register["RefinedEnhancedAdaptiveDualPhaseStrategyV9"] = RefinedEnhancedAdaptiveDualPhaseStrategyV9 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9").set_name("LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9" + ).set_name("LLAMARefinedEnhancedAdaptiveDualPhaseStrategyV9", register=True) +except Exception as e: # RefinedEnhancedAdaptiveDualPhaseStrategyV9 print("RefinedEnhancedAdaptiveDualPhaseStrategyV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO import RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO - - lama_register["RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO"] = RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO").set_name("LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO import ( + RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO, + ) + + lama_register["RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO"] = ( + RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO" + ).set_name("LLAMARefinedEnhancedAdaptiveGradientBalancedCrossoverPSO", register=True) +except Exception as e: # RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO print("RefinedEnhancedAdaptiveGradientBalancedCrossoverPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 import RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 - - lama_register["RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9"] = RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9").set_name("LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 import ( + RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9, + ) + + lama_register["RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9"] = ( + RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9" + ).set_name("LLAMARefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9", register=True) +except Exception as e: # RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 print("RefinedEnhancedAdaptiveHarmonyMemeticOptimizationV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonySearch import RefinedEnhancedAdaptiveHarmonySearch +try: # RefinedEnhancedAdaptiveHarmonySearch + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHarmonySearch import ( + RefinedEnhancedAdaptiveHarmonySearch, + ) lama_register["RefinedEnhancedAdaptiveHarmonySearch"] = RefinedEnhancedAdaptiveHarmonySearch - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveHarmonySearch = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonySearch").set_name("LLAMARefinedEnhancedAdaptiveHarmonySearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHarmonySearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveHarmonySearch = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHarmonySearch" + ).set_name("LLAMARefinedEnhancedAdaptiveHarmonySearch", register=True) +except Exception as e: # RefinedEnhancedAdaptiveHarmonySearch print("RefinedEnhancedAdaptiveHarmonySearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 import RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 - - lama_register["RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2"] = RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2").set_name("LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 import ( + RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2, + ) + + lama_register["RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2"] = ( + RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2" + ).set_name("LLAMARefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2", register=True) +except Exception as e: # RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 print("RefinedEnhancedAdaptiveHybridParticleSwarmDifferentialEvolutionPlusV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm import RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm - - lama_register["RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm"] = RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: +try: # RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm import ( + RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm, + ) + + lama_register["RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm"] = ( + RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMARefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm print("RefinedEnhancedAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiOperatorSearch import RefinedEnhancedAdaptiveMultiOperatorSearch +try: # RefinedEnhancedAdaptiveMultiOperatorSearch + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiOperatorSearch import ( + RefinedEnhancedAdaptiveMultiOperatorSearch, + ) lama_register["RefinedEnhancedAdaptiveMultiOperatorSearch"] = RefinedEnhancedAdaptiveMultiOperatorSearch - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch").set_name("LLAMARefinedEnhancedAdaptiveMultiOperatorSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveMultiOperatorSearch = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMultiOperatorSearch" + ).set_name("LLAMARefinedEnhancedAdaptiveMultiOperatorSearch", register=True) +except Exception as e: # RefinedEnhancedAdaptiveMultiOperatorSearch print("RefinedEnhancedAdaptiveMultiOperatorSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiStrategyDE import RefinedEnhancedAdaptiveMultiStrategyDE +try: # RefinedEnhancedAdaptiveMultiStrategyDE + from nevergrad.optimization.lama.RefinedEnhancedAdaptiveMultiStrategyDE import ( + RefinedEnhancedAdaptiveMultiStrategyDE, + ) lama_register["RefinedEnhancedAdaptiveMultiStrategyDE"] = RefinedEnhancedAdaptiveMultiStrategyDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE").set_name("LLAMARefinedEnhancedAdaptiveMultiStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveMultiStrategyDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveMultiStrategyDE" + ).set_name("LLAMARefinedEnhancedAdaptiveMultiStrategyDE", register=True) +except Exception as e: # RefinedEnhancedAdaptiveMultiStrategyDE print("RefinedEnhancedAdaptiveMultiStrategyDE can not be imported: ", e) -try: +try: # RefinedEnhancedAdaptiveQGSA_v45 from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v45 import RefinedEnhancedAdaptiveQGSA_v45 lama_register["RefinedEnhancedAdaptiveQGSA_v45"] = RefinedEnhancedAdaptiveQGSA_v45 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveQGSA_v45 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v45").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v45", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v45")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveQGSA_v45 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v45" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v45", register=True) +except Exception as e: # RefinedEnhancedAdaptiveQGSA_v45 print("RefinedEnhancedAdaptiveQGSA_v45 can not be imported: ", e) -try: +try: # RefinedEnhancedAdaptiveQGSA_v46 from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v46 import RefinedEnhancedAdaptiveQGSA_v46 lama_register["RefinedEnhancedAdaptiveQGSA_v46"] = RefinedEnhancedAdaptiveQGSA_v46 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveQGSA_v46 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v46").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v46", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v46")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveQGSA_v46 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v46" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v46", register=True) +except Exception as e: # RefinedEnhancedAdaptiveQGSA_v46 print("RefinedEnhancedAdaptiveQGSA_v46 can not be imported: ", e) -try: +try: # RefinedEnhancedAdaptiveQGSA_v48 from nevergrad.optimization.lama.RefinedEnhancedAdaptiveQGSA_v48 import RefinedEnhancedAdaptiveQGSA_v48 lama_register["RefinedEnhancedAdaptiveQGSA_v48"] = RefinedEnhancedAdaptiveQGSA_v48 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v48")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedAdaptiveQGSA_v48 = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v48").set_name("LLAMARefinedEnhancedAdaptiveQGSA_v48", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedAdaptiveQGSA_v48")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedAdaptiveQGSA_v48 = NonObjectOptimizer( + method="LLAMARefinedEnhancedAdaptiveQGSA_v48" + ).set_name("LLAMARefinedEnhancedAdaptiveQGSA_v48", register=True) +except Exception as e: # RefinedEnhancedAdaptiveQGSA_v48 print("RefinedEnhancedAdaptiveQGSA_v48 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedBalancedDualStrategyAdaptiveDE import RefinedEnhancedBalancedDualStrategyAdaptiveDE - - lama_register["RefinedEnhancedBalancedDualStrategyAdaptiveDE"] = RefinedEnhancedBalancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE").set_name("LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE", register=True) -except Exception as e: +try: # RefinedEnhancedBalancedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.RefinedEnhancedBalancedDualStrategyAdaptiveDE import ( + RefinedEnhancedBalancedDualStrategyAdaptiveDE, + ) + + lama_register["RefinedEnhancedBalancedDualStrategyAdaptiveDE"] = ( + RefinedEnhancedBalancedDualStrategyAdaptiveDE + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedEnhancedBalancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # RefinedEnhancedBalancedDualStrategyAdaptiveDE print("RefinedEnhancedBalancedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedCovarianceMatrixDifferentialEvolution import RefinedEnhancedCovarianceMatrixDifferentialEvolution - - lama_register["RefinedEnhancedCovarianceMatrixDifferentialEvolution"] = RefinedEnhancedCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedEnhancedCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.RefinedEnhancedCovarianceMatrixDifferentialEvolution import ( + RefinedEnhancedCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedEnhancedCovarianceMatrixDifferentialEvolution"] = ( + RefinedEnhancedCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedEnhancedCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # RefinedEnhancedCovarianceMatrixDifferentialEvolution print("RefinedEnhancedCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDifferentialEvolutionLocalSearch_v42 import RefinedEnhancedDifferentialEvolutionLocalSearch_v42 - - lama_register["RefinedEnhancedDifferentialEvolutionLocalSearch_v42"] = RefinedEnhancedDifferentialEvolutionLocalSearch_v42 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42 = NonObjectOptimizer(method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42").set_name("LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42", register=True) -except Exception as e: +try: # RefinedEnhancedDifferentialEvolutionLocalSearch_v42 + from nevergrad.optimization.lama.RefinedEnhancedDifferentialEvolutionLocalSearch_v42 import ( + RefinedEnhancedDifferentialEvolutionLocalSearch_v42, + ) + + lama_register["RefinedEnhancedDifferentialEvolutionLocalSearch_v42"] = ( + RefinedEnhancedDifferentialEvolutionLocalSearch_v42 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42" + ).set_name("LLAMARefinedEnhancedDifferentialEvolutionLocalSearch_v42", register=True) +except Exception as e: # RefinedEnhancedDifferentialEvolutionLocalSearch_v42 print("RefinedEnhancedDifferentialEvolutionLocalSearch_v42 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 import RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 - - lama_register["RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3"] = RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3").set_name("LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) -except Exception as e: +try: # RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 import ( + RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3, + ) + + lama_register["RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3"] = ( + RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3" + ).set_name("LLAMARefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3", register=True) +except Exception as e: # RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 print("RefinedEnhancedDualPhaseAdaptiveHybridOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimization import RefinedEnhancedDualPhaseHybridOptimization +try: # RefinedEnhancedDualPhaseHybridOptimization + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimization import ( + RefinedEnhancedDualPhaseHybridOptimization, + ) lama_register["RefinedEnhancedDualPhaseHybridOptimization"] = RefinedEnhancedDualPhaseHybridOptimization - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualPhaseHybridOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimization").set_name("LLAMARefinedEnhancedDualPhaseHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualPhaseHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseHybridOptimization" + ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimization", register=True) +except Exception as e: # RefinedEnhancedDualPhaseHybridOptimization print("RefinedEnhancedDualPhaseHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimizationV3 import RefinedEnhancedDualPhaseHybridOptimizationV3 - - lama_register["RefinedEnhancedDualPhaseHybridOptimizationV3"] = RefinedEnhancedDualPhaseHybridOptimizationV3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualPhaseHybridOptimizationV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3").set_name("LLAMARefinedEnhancedDualPhaseHybridOptimizationV3", register=True) -except Exception as e: +try: # RefinedEnhancedDualPhaseHybridOptimizationV3 + from nevergrad.optimization.lama.RefinedEnhancedDualPhaseHybridOptimizationV3 import ( + RefinedEnhancedDualPhaseHybridOptimizationV3, + ) + + lama_register["RefinedEnhancedDualPhaseHybridOptimizationV3"] = ( + RefinedEnhancedDualPhaseHybridOptimizationV3 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualPhaseHybridOptimizationV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualPhaseHybridOptimizationV3" + ).set_name("LLAMARefinedEnhancedDualPhaseHybridOptimizationV3", register=True) +except Exception as e: # RefinedEnhancedDualPhaseHybridOptimizationV3 print("RefinedEnhancedDualPhaseHybridOptimizationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v2 import RefinedEnhancedDualStrategyAdaptiveDE_v2 +try: # RefinedEnhancedDualStrategyAdaptiveDE_v2 + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v2 import ( + RefinedEnhancedDualStrategyAdaptiveDE_v2, + ) lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v2"] = RefinedEnhancedDualStrategyAdaptiveDE_v2 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2").set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2" + ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v2", register=True) +except Exception as e: # RefinedEnhancedDualStrategyAdaptiveDE_v2 print("RefinedEnhancedDualStrategyAdaptiveDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v3 import RefinedEnhancedDualStrategyAdaptiveDE_v3 +try: # RefinedEnhancedDualStrategyAdaptiveDE_v3 + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyAdaptiveDE_v3 import ( + RefinedEnhancedDualStrategyAdaptiveDE_v3, + ) lama_register["RefinedEnhancedDualStrategyAdaptiveDE_v3"] = RefinedEnhancedDualStrategyAdaptiveDE_v3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3").set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3" + ).set_name("LLAMARefinedEnhancedDualStrategyAdaptiveDE_v3", register=True) +except Exception as e: # RefinedEnhancedDualStrategyAdaptiveDE_v3 print("RefinedEnhancedDualStrategyAdaptiveDE_v3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyDynamicDE import RefinedEnhancedDualStrategyDynamicDE +try: # RefinedEnhancedDualStrategyDynamicDE + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyDynamicDE import ( + RefinedEnhancedDualStrategyDynamicDE, + ) lama_register["RefinedEnhancedDualStrategyDynamicDE"] = RefinedEnhancedDualStrategyDynamicDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualStrategyDynamicDE = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyDynamicDE").set_name("LLAMARefinedEnhancedDualStrategyDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualStrategyDynamicDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyDynamicDE" + ).set_name("LLAMARefinedEnhancedDualStrategyDynamicDE", register=True) +except Exception as e: # RefinedEnhancedDualStrategyDynamicDE print("RefinedEnhancedDualStrategyDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDualStrategyElitistDE_v2 import RefinedEnhancedDualStrategyElitistDE_v2 +try: # RefinedEnhancedDualStrategyElitistDE_v2 + from nevergrad.optimization.lama.RefinedEnhancedDualStrategyElitistDE_v2 import ( + RefinedEnhancedDualStrategyElitistDE_v2, + ) lama_register["RefinedEnhancedDualStrategyElitistDE_v2"] = RefinedEnhancedDualStrategyElitistDE_v2 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyElitistDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDualStrategyElitistDE_v2 = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyElitistDE_v2").set_name("LLAMARefinedEnhancedDualStrategyElitistDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDualStrategyElitistDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDualStrategyElitistDE_v2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedDualStrategyElitistDE_v2" + ).set_name("LLAMARefinedEnhancedDualStrategyElitistDE_v2", register=True) +except Exception as e: # RefinedEnhancedDualStrategyElitistDE_v2 print("RefinedEnhancedDualStrategyElitistDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDynamicAdaptiveHybridOptimization import RefinedEnhancedDynamicAdaptiveHybridOptimization - - lama_register["RefinedEnhancedDynamicAdaptiveHybridOptimization"] = RefinedEnhancedDynamicAdaptiveHybridOptimization - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization").set_name("LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) -except Exception as e: +try: # RefinedEnhancedDynamicAdaptiveHybridOptimization + from nevergrad.optimization.lama.RefinedEnhancedDynamicAdaptiveHybridOptimization import ( + RefinedEnhancedDynamicAdaptiveHybridOptimization, + ) + + lama_register["RefinedEnhancedDynamicAdaptiveHybridOptimization"] = ( + RefinedEnhancedDynamicAdaptiveHybridOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization" + ).set_name("LLAMARefinedEnhancedDynamicAdaptiveHybridOptimization", register=True) +except Exception as e: # RefinedEnhancedDynamicAdaptiveHybridOptimization print("RefinedEnhancedDynamicAdaptiveHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedDynamicDualStrategyHybridDE import RefinedEnhancedDynamicDualStrategyHybridDE +try: # RefinedEnhancedDynamicDualStrategyHybridDE + from nevergrad.optimization.lama.RefinedEnhancedDynamicDualStrategyHybridDE import ( + RefinedEnhancedDynamicDualStrategyHybridDE, + ) lama_register["RefinedEnhancedDynamicDualStrategyHybridDE"] = RefinedEnhancedDynamicDualStrategyHybridDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedDynamicDualStrategyHybridDE = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE").set_name("LLAMARefinedEnhancedDynamicDualStrategyHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedDynamicDualStrategyHybridDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedDynamicDualStrategyHybridDE" + ).set_name("LLAMARefinedEnhancedDynamicDualStrategyHybridDE", register=True) +except Exception as e: # RefinedEnhancedDynamicDualStrategyHybridDE print("RefinedEnhancedDynamicDualStrategyHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedAdaptiveRestartDE import RefinedEnhancedEliteGuidedAdaptiveRestartDE +try: # RefinedEnhancedEliteGuidedAdaptiveRestartDE + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedAdaptiveRestartDE import ( + RefinedEnhancedEliteGuidedAdaptiveRestartDE, + ) lama_register["RefinedEnhancedEliteGuidedAdaptiveRestartDE"] = RefinedEnhancedEliteGuidedAdaptiveRestartDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE").set_name("LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE = NonObjectOptimizer( + method="LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE" + ).set_name("LLAMARefinedEnhancedEliteGuidedAdaptiveRestartDE", register=True) +except Exception as e: # RefinedEnhancedEliteGuidedAdaptiveRestartDE print("RefinedEnhancedEliteGuidedAdaptiveRestartDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedMassQGSA_v87 import RefinedEnhancedEliteGuidedMassQGSA_v87 +try: # RefinedEnhancedEliteGuidedMassQGSA_v87 + from nevergrad.optimization.lama.RefinedEnhancedEliteGuidedMassQGSA_v87 import ( + RefinedEnhancedEliteGuidedMassQGSA_v87, + ) lama_register["RefinedEnhancedEliteGuidedMassQGSA_v87"] = RefinedEnhancedEliteGuidedMassQGSA_v87 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedEliteGuidedMassQGSA_v87 = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87").set_name("LLAMARefinedEnhancedEliteGuidedMassQGSA_v87", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedEliteGuidedMassQGSA_v87 = NonObjectOptimizer( + method="LLAMARefinedEnhancedEliteGuidedMassQGSA_v87" + ).set_name("LLAMARefinedEnhancedEliteGuidedMassQGSA_v87", register=True) +except Exception as e: # RefinedEnhancedEliteGuidedMassQGSA_v87 print("RefinedEnhancedEliteGuidedMassQGSA_v87 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHybridAdaptiveMultiStageOptimization import RefinedEnhancedHybridAdaptiveMultiStageOptimization - - lama_register["RefinedEnhancedHybridAdaptiveMultiStageOptimization"] = RefinedEnhancedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: +try: # RefinedEnhancedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.RefinedEnhancedHybridAdaptiveMultiStageOptimization import ( + RefinedEnhancedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["RefinedEnhancedHybridAdaptiveMultiStageOptimization"] = ( + RefinedEnhancedHybridAdaptiveMultiStageOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedEnhancedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # RefinedEnhancedHybridAdaptiveMultiStageOptimization print("RefinedEnhancedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 import RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 - - lama_register["RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3"] = RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3").set_name("LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) -except Exception as e: +try: # RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 + from nevergrad.optimization.lama.RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 import ( + RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3, + ) + + lama_register["RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3"] = ( + RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3" + ).set_name("LLAMARefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3", register=True) +except Exception as e: # RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 print("RefinedEnhancedHybridCovarianceMatrixDifferentialEvolutionV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 import RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 - - lama_register["RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2"] = RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2").set_name("LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2", register=True) -except Exception as e: +try: # RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 + from nevergrad.optimization.lama.RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 import ( + RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2, + ) + + lama_register["RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2"] = ( + RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2" + ).set_name("LLAMARefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2", register=True) +except Exception as e: # RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 print("RefinedEnhancedHybridDEPSOWithQuantumLevyFlightV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHybridExplorationOptimization import RefinedEnhancedHybridExplorationOptimization - - lama_register["RefinedEnhancedHybridExplorationOptimization"] = RefinedEnhancedHybridExplorationOptimization - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHybridExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridExplorationOptimization").set_name("LLAMARefinedEnhancedHybridExplorationOptimization", register=True) -except Exception as e: +try: # RefinedEnhancedHybridExplorationOptimization + from nevergrad.optimization.lama.RefinedEnhancedHybridExplorationOptimization import ( + RefinedEnhancedHybridExplorationOptimization, + ) + + lama_register["RefinedEnhancedHybridExplorationOptimization"] = ( + RefinedEnhancedHybridExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHybridExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHybridExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedEnhancedHybridExplorationOptimization" + ).set_name("LLAMARefinedEnhancedHybridExplorationOptimization", register=True) +except Exception as e: # RefinedEnhancedHybridExplorationOptimization print("RefinedEnhancedHybridExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHyperAdaptiveHybridDEPSO import RefinedEnhancedHyperAdaptiveHybridDEPSO +try: # RefinedEnhancedHyperAdaptiveHybridDEPSO + from nevergrad.optimization.lama.RefinedEnhancedHyperAdaptiveHybridDEPSO import ( + RefinedEnhancedHyperAdaptiveHybridDEPSO, + ) lama_register["RefinedEnhancedHyperAdaptiveHybridDEPSO"] = RefinedEnhancedHyperAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO").set_name("LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedEnhancedHyperAdaptiveHybridDEPSO", register=True) +except Exception as e: # RefinedEnhancedHyperAdaptiveHybridDEPSO print("RefinedEnhancedHyperAdaptiveHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 import RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 - - lama_register["RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63"] = RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63").set_name("LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63", register=True) -except Exception as e: +try: # RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 + from nevergrad.optimization.lama.RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 import ( + RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63, + ) + + lama_register["RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63"] = ( + RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63" + ).set_name("LLAMARefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63", register=True) +except Exception as e: # RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 print("RefinedEnhancedHyperOptimizedEvolutionaryGradientOptimizerV63 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedHyperStrategicOptimizerV57 import RefinedEnhancedHyperStrategicOptimizerV57 +try: # RefinedEnhancedHyperStrategicOptimizerV57 + from nevergrad.optimization.lama.RefinedEnhancedHyperStrategicOptimizerV57 import ( + RefinedEnhancedHyperStrategicOptimizerV57, + ) lama_register["RefinedEnhancedHyperStrategicOptimizerV57"] = RefinedEnhancedHyperStrategicOptimizerV57 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperStrategicOptimizerV57")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedHyperStrategicOptimizerV57 = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperStrategicOptimizerV57").set_name("LLAMARefinedEnhancedHyperStrategicOptimizerV57", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedHyperStrategicOptimizerV57")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedHyperStrategicOptimizerV57 = NonObjectOptimizer( + method="LLAMARefinedEnhancedHyperStrategicOptimizerV57" + ).set_name("LLAMARefinedEnhancedHyperStrategicOptimizerV57", register=True) +except Exception as e: # RefinedEnhancedHyperStrategicOptimizerV57 print("RefinedEnhancedHyperStrategicOptimizerV57 can not be imported: ", e) -try: +try: # RefinedEnhancedMetaNetAQAPSOv7 from nevergrad.optimization.lama.RefinedEnhancedMetaNetAQAPSOv7 import RefinedEnhancedMetaNetAQAPSOv7 lama_register["RefinedEnhancedMetaNetAQAPSOv7"] = RefinedEnhancedMetaNetAQAPSOv7 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedMetaNetAQAPSOv7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedMetaNetAQAPSOv7 = NonObjectOptimizer(method="LLAMARefinedEnhancedMetaNetAQAPSOv7").set_name("LLAMARefinedEnhancedMetaNetAQAPSOv7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedMetaNetAQAPSOv7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedMetaNetAQAPSOv7 = NonObjectOptimizer( + method="LLAMARefinedEnhancedMetaNetAQAPSOv7" + ).set_name("LLAMARefinedEnhancedMetaNetAQAPSOv7", register=True) +except Exception as e: # RefinedEnhancedMetaNetAQAPSOv7 print("RefinedEnhancedMetaNetAQAPSOv7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedOptimizedEvolutiveStrategy import RefinedEnhancedOptimizedEvolutiveStrategy +try: # RefinedEnhancedOptimizedEvolutiveStrategy + from nevergrad.optimization.lama.RefinedEnhancedOptimizedEvolutiveStrategy import ( + RefinedEnhancedOptimizedEvolutiveStrategy, + ) lama_register["RefinedEnhancedOptimizedEvolutiveStrategy"] = RefinedEnhancedOptimizedEvolutiveStrategy - res = NonObjectOptimizer(method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer(method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy").set_name("LLAMARefinedEnhancedOptimizedEvolutiveStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedOptimizedEvolutiveStrategy = NonObjectOptimizer( + method="LLAMARefinedEnhancedOptimizedEvolutiveStrategy" + ).set_name("LLAMARefinedEnhancedOptimizedEvolutiveStrategy", register=True) +except Exception as e: # RefinedEnhancedOptimizedEvolutiveStrategy print("RefinedEnhancedOptimizedEvolutiveStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedPrecisionEvolutionaryOptimizerV40 import RefinedEnhancedPrecisionEvolutionaryOptimizerV40 - - lama_register["RefinedEnhancedPrecisionEvolutionaryOptimizerV40"] = RefinedEnhancedPrecisionEvolutionaryOptimizerV40 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40 = NonObjectOptimizer(method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40").set_name("LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40", register=True) -except Exception as e: +try: # RefinedEnhancedPrecisionEvolutionaryOptimizerV40 + from nevergrad.optimization.lama.RefinedEnhancedPrecisionEvolutionaryOptimizerV40 import ( + RefinedEnhancedPrecisionEvolutionaryOptimizerV40, + ) + + lama_register["RefinedEnhancedPrecisionEvolutionaryOptimizerV40"] = ( + RefinedEnhancedPrecisionEvolutionaryOptimizerV40 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40 = NonObjectOptimizer( + method="LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40" + ).set_name("LLAMARefinedEnhancedPrecisionEvolutionaryOptimizerV40", register=True) +except Exception as e: # RefinedEnhancedPrecisionEvolutionaryOptimizerV40 print("RefinedEnhancedPrecisionEvolutionaryOptimizerV40 can not be imported: ", e) -try: +try: # RefinedEnhancedQAPSOAIRVCHRLS from nevergrad.optimization.lama.RefinedEnhancedQAPSOAIRVCHRLS import RefinedEnhancedQAPSOAIRVCHRLS lama_register["RefinedEnhancedQAPSOAIRVCHRLS"] = RefinedEnhancedQAPSOAIRVCHRLS - res = NonObjectOptimizer(method="LLAMARefinedEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer(method="LLAMARefinedEnhancedQAPSOAIRVCHRLS").set_name("LLAMARefinedEnhancedQAPSOAIRVCHRLS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedQAPSOAIRVCHRLS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedQAPSOAIRVCHRLS = NonObjectOptimizer( + method="LLAMARefinedEnhancedQAPSOAIRVCHRLS" + ).set_name("LLAMARefinedEnhancedQAPSOAIRVCHRLS", register=True) +except Exception as e: # RefinedEnhancedQAPSOAIRVCHRLS print("RefinedEnhancedQAPSOAIRVCHRLS can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 import RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 - - lama_register["RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2"] = RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2").set_name("LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2", register=True) -except Exception as e: +try: # RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 + from nevergrad.optimization.lama.RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 import ( + RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2, + ) + + lama_register["RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2"] = ( + RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2" + ).set_name("LLAMARefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2", register=True) +except Exception as e: # RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 print("RefinedEnhancedQuantumCovarianceMatrixDifferentialEvolutionV2 can not be imported: ", e) -try: +try: # RefinedEnhancedRAMEDSProV3 from nevergrad.optimization.lama.RefinedEnhancedRAMEDSProV3 import RefinedEnhancedRAMEDSProV3 lama_register["RefinedEnhancedRAMEDSProV3"] = RefinedEnhancedRAMEDSProV3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedRAMEDSProV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3").set_name("LLAMARefinedEnhancedRAMEDSProV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedRAMEDSProV3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSProV3").set_name( + "LLAMARefinedEnhancedRAMEDSProV3", register=True + ) +except Exception as e: # RefinedEnhancedRAMEDSProV3 print("RefinedEnhancedRAMEDSProV3 can not be imported: ", e) -try: +try: # RefinedEnhancedRAMEDSv3 from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv3 import RefinedEnhancedRAMEDSv3 lama_register["RefinedEnhancedRAMEDSv3"] = RefinedEnhancedRAMEDSv3 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3").set_name("LLAMARefinedEnhancedRAMEDSv3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedRAMEDSv3 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv3").set_name( + "LLAMARefinedEnhancedRAMEDSv3", register=True + ) +except Exception as e: # RefinedEnhancedRAMEDSv3 print("RefinedEnhancedRAMEDSv3 can not be imported: ", e) -try: +try: # RefinedEnhancedRAMEDSv4 from nevergrad.optimization.lama.RefinedEnhancedRAMEDSv4 import RefinedEnhancedRAMEDSv4 lama_register["RefinedEnhancedRAMEDSv4"] = RefinedEnhancedRAMEDSv4 - res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4").set_name("LLAMARefinedEnhancedRAMEDSv4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedRAMEDSv4 = NonObjectOptimizer(method="LLAMARefinedEnhancedRAMEDSv4").set_name( + "LLAMARefinedEnhancedRAMEDSv4", register=True + ) +except Exception as e: # RefinedEnhancedRAMEDSv4 print("RefinedEnhancedRAMEDSv4 can not be imported: ", e) -try: +try: # RefinedEnhancedStrategyDE from nevergrad.optimization.lama.RefinedEnhancedStrategyDE import RefinedEnhancedStrategyDE lama_register["RefinedEnhancedStrategyDE"] = RefinedEnhancedStrategyDE - res = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE").set_name("LLAMARefinedEnhancedStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedStrategyDE = NonObjectOptimizer(method="LLAMARefinedEnhancedStrategyDE").set_name( + "LLAMARefinedEnhancedStrategyDE", register=True + ) +except Exception as e: # RefinedEnhancedStrategyDE print("RefinedEnhancedStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEnhancedUltraRefinedRAMEDS import RefinedEnhancedUltraRefinedRAMEDS +try: # RefinedEnhancedUltraRefinedRAMEDS + from nevergrad.optimization.lama.RefinedEnhancedUltraRefinedRAMEDS import ( + RefinedEnhancedUltraRefinedRAMEDS, + ) lama_register["RefinedEnhancedUltraRefinedRAMEDS"] = RefinedEnhancedUltraRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMARefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedEnhancedUltraRefinedRAMEDS").set_name("LLAMARefinedEnhancedUltraRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnhancedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnhancedUltraRefinedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedEnhancedUltraRefinedRAMEDS" + ).set_name("LLAMARefinedEnhancedUltraRefinedRAMEDS", register=True) +except Exception as e: # RefinedEnhancedUltraRefinedRAMEDS print("RefinedEnhancedUltraRefinedRAMEDS can not be imported: ", e) -try: +try: # RefinedEnsembleAdaptiveQuantumDE from nevergrad.optimization.lama.RefinedEnsembleAdaptiveQuantumDE import RefinedEnsembleAdaptiveQuantumDE lama_register["RefinedEnsembleAdaptiveQuantumDE"] = RefinedEnsembleAdaptiveQuantumDE - res = NonObjectOptimizer(method="LLAMARefinedEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEnsembleAdaptiveQuantumDE = NonObjectOptimizer(method="LLAMARefinedEnsembleAdaptiveQuantumDE").set_name("LLAMARefinedEnsembleAdaptiveQuantumDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEnsembleAdaptiveQuantumDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEnsembleAdaptiveQuantumDE = NonObjectOptimizer( + method="LLAMARefinedEnsembleAdaptiveQuantumDE" + ).set_name("LLAMARefinedEnsembleAdaptiveQuantumDE", register=True) +except Exception as e: # RefinedEnsembleAdaptiveQuantumDE print("RefinedEnsembleAdaptiveQuantumDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEvolutionaryGradientHybridOptimizerV3 import RefinedEvolutionaryGradientHybridOptimizerV3 - - lama_register["RefinedEvolutionaryGradientHybridOptimizerV3"] = RefinedEvolutionaryGradientHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEvolutionaryGradientHybridOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3").set_name("LLAMARefinedEvolutionaryGradientHybridOptimizerV3", register=True) -except Exception as e: +try: # RefinedEvolutionaryGradientHybridOptimizerV3 + from nevergrad.optimization.lama.RefinedEvolutionaryGradientHybridOptimizerV3 import ( + RefinedEvolutionaryGradientHybridOptimizerV3, + ) + + lama_register["RefinedEvolutionaryGradientHybridOptimizerV3"] = ( + RefinedEvolutionaryGradientHybridOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEvolutionaryGradientHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedEvolutionaryGradientHybridOptimizerV3" + ).set_name("LLAMARefinedEvolutionaryGradientHybridOptimizerV3", register=True) +except Exception as e: # RefinedEvolutionaryGradientHybridOptimizerV3 print("RefinedEvolutionaryGradientHybridOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedEvolutionaryTuningStrategy import RefinedEvolutionaryTuningStrategy +try: # RefinedEvolutionaryTuningStrategy + from nevergrad.optimization.lama.RefinedEvolutionaryTuningStrategy import ( + RefinedEvolutionaryTuningStrategy, + ) lama_register["RefinedEvolutionaryTuningStrategy"] = RefinedEvolutionaryTuningStrategy - res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryTuningStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedEvolutionaryTuningStrategy = NonObjectOptimizer(method="LLAMARefinedEvolutionaryTuningStrategy").set_name("LLAMARefinedEvolutionaryTuningStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedEvolutionaryTuningStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedEvolutionaryTuningStrategy = NonObjectOptimizer( + method="LLAMARefinedEvolutionaryTuningStrategy" + ).set_name("LLAMARefinedEvolutionaryTuningStrategy", register=True) +except Exception as e: # RefinedEvolutionaryTuningStrategy print("RefinedEvolutionaryTuningStrategy can not be imported: ", e) -try: +try: # RefinedGlobalClimbingOptimizerV2 from nevergrad.optimization.lama.RefinedGlobalClimbingOptimizerV2 import RefinedGlobalClimbingOptimizerV2 lama_register["RefinedGlobalClimbingOptimizerV2"] = RefinedGlobalClimbingOptimizerV2 - res = NonObjectOptimizer(method="LLAMARefinedGlobalClimbingOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGlobalClimbingOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedGlobalClimbingOptimizerV2").set_name("LLAMARefinedGlobalClimbingOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGlobalClimbingOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGlobalClimbingOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalClimbingOptimizerV2" + ).set_name("LLAMARefinedGlobalClimbingOptimizerV2", register=True) +except Exception as e: # RefinedGlobalClimbingOptimizerV2 print("RefinedGlobalClimbingOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGlobalLocalBalancingOptimizer import RefinedGlobalLocalBalancingOptimizer +try: # RefinedGlobalLocalBalancingOptimizer + from nevergrad.optimization.lama.RefinedGlobalLocalBalancingOptimizer import ( + RefinedGlobalLocalBalancingOptimizer, + ) lama_register["RefinedGlobalLocalBalancingOptimizer"] = RefinedGlobalLocalBalancingOptimizer - res = NonObjectOptimizer(method="LLAMARefinedGlobalLocalBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGlobalLocalBalancingOptimizer = NonObjectOptimizer(method="LLAMARefinedGlobalLocalBalancingOptimizer").set_name("LLAMARefinedGlobalLocalBalancingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGlobalLocalBalancingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGlobalLocalBalancingOptimizer = NonObjectOptimizer( + method="LLAMARefinedGlobalLocalBalancingOptimizer" + ).set_name("LLAMARefinedGlobalLocalBalancingOptimizer", register=True) +except Exception as e: # RefinedGlobalLocalBalancingOptimizer print("RefinedGlobalLocalBalancingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGlobalStructureAdaptiveEvolverV2 import RefinedGlobalStructureAdaptiveEvolverV2 +try: # RefinedGlobalStructureAdaptiveEvolverV2 + from nevergrad.optimization.lama.RefinedGlobalStructureAdaptiveEvolverV2 import ( + RefinedGlobalStructureAdaptiveEvolverV2, + ) lama_register["RefinedGlobalStructureAdaptiveEvolverV2"] = RefinedGlobalStructureAdaptiveEvolverV2 - res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAdaptiveEvolverV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGlobalStructureAdaptiveEvolverV2 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAdaptiveEvolverV2").set_name("LLAMARefinedGlobalStructureAdaptiveEvolverV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAdaptiveEvolverV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGlobalStructureAdaptiveEvolverV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAdaptiveEvolverV2" + ).set_name("LLAMARefinedGlobalStructureAdaptiveEvolverV2", register=True) +except Exception as e: # RefinedGlobalStructureAdaptiveEvolverV2 print("RefinedGlobalStructureAdaptiveEvolverV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV2 import RefinedGlobalStructureAwareOptimizerV2 +try: # RefinedGlobalStructureAwareOptimizerV2 + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV2 import ( + RefinedGlobalStructureAwareOptimizerV2, + ) lama_register["RefinedGlobalStructureAwareOptimizerV2"] = RefinedGlobalStructureAwareOptimizerV2 - res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGlobalStructureAwareOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV2").set_name("LLAMARefinedGlobalStructureAwareOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGlobalStructureAwareOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAwareOptimizerV2" + ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV2", register=True) +except Exception as e: # RefinedGlobalStructureAwareOptimizerV2 print("RefinedGlobalStructureAwareOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV3 import RefinedGlobalStructureAwareOptimizerV3 +try: # RefinedGlobalStructureAwareOptimizerV3 + from nevergrad.optimization.lama.RefinedGlobalStructureAwareOptimizerV3 import ( + RefinedGlobalStructureAwareOptimizerV3, + ) lama_register["RefinedGlobalStructureAwareOptimizerV3"] = RefinedGlobalStructureAwareOptimizerV3 - res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV3").set_name("LLAMARefinedGlobalStructureAwareOptimizerV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGlobalStructureAwareOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGlobalStructureAwareOptimizerV3 = NonObjectOptimizer( + method="LLAMARefinedGlobalStructureAwareOptimizerV3" + ).set_name("LLAMARefinedGlobalStructureAwareOptimizerV3", register=True) +except Exception as e: # RefinedGlobalStructureAwareOptimizerV3 print("RefinedGlobalStructureAwareOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientBalancedExplorationPSO import RefinedGradientBalancedExplorationPSO +try: # RefinedGradientBalancedExplorationPSO + from nevergrad.optimization.lama.RefinedGradientBalancedExplorationPSO import ( + RefinedGradientBalancedExplorationPSO, + ) lama_register["RefinedGradientBalancedExplorationPSO"] = RefinedGradientBalancedExplorationPSO - res = NonObjectOptimizer(method="LLAMARefinedGradientBalancedExplorationPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBalancedExplorationPSO = NonObjectOptimizer(method="LLAMARefinedGradientBalancedExplorationPSO").set_name("LLAMARefinedGradientBalancedExplorationPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGradientBalancedExplorationPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBalancedExplorationPSO = NonObjectOptimizer( + method="LLAMARefinedGradientBalancedExplorationPSO" + ).set_name("LLAMARefinedGradientBalancedExplorationPSO", register=True) +except Exception as e: # RefinedGradientBalancedExplorationPSO print("RefinedGradientBalancedExplorationPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration import RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration - - lama_register["RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration"] = RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration - res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration = NonObjectOptimizer(method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration").set_name("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration", register=True) -except Exception as e: +try: # RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration + from nevergrad.optimization.lama.RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration import ( + RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration, + ) + + lama_register["RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration"] = ( + RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration + ) + # res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration" + ).set_name("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration", register=True) +except Exception as e: # RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration print("RefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemoryAnnealing import RefinedGradientBoostedMemoryAnnealing +try: # RefinedGradientBoostedMemoryAnnealing + from nevergrad.optimization.lama.RefinedGradientBoostedMemoryAnnealing import ( + RefinedGradientBoostedMemoryAnnealing, + ) lama_register["RefinedGradientBoostedMemoryAnnealing"] = RefinedGradientBoostedMemoryAnnealing - res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemoryAnnealing").set_name("LLAMARefinedGradientBoostedMemoryAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemoryAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBoostedMemoryAnnealing = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemoryAnnealing" + ).set_name("LLAMARefinedGradientBoostedMemoryAnnealing", register=True) +except Exception as e: # RefinedGradientBoostedMemoryAnnealing print("RefinedGradientBoostedMemoryAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealing import RefinedGradientBoostedMemorySimulatedAnnealing - - lama_register["RefinedGradientBoostedMemorySimulatedAnnealing"] = RefinedGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # RefinedGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealing import ( + RefinedGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # RefinedGradientBoostedMemorySimulatedAnnealing print("RefinedGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealingPlus import RefinedGradientBoostedMemorySimulatedAnnealingPlus - - lama_register["RefinedGradientBoostedMemorySimulatedAnnealingPlus"] = RefinedGradientBoostedMemorySimulatedAnnealingPlus - res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus").set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus", register=True) -except Exception as e: +try: # RefinedGradientBoostedMemorySimulatedAnnealingPlus + from nevergrad.optimization.lama.RefinedGradientBoostedMemorySimulatedAnnealingPlus import ( + RefinedGradientBoostedMemorySimulatedAnnealingPlus, + ) + + lama_register["RefinedGradientBoostedMemorySimulatedAnnealingPlus"] = ( + RefinedGradientBoostedMemorySimulatedAnnealingPlus + ) + # res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus" + ).set_name("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus", register=True) +except Exception as e: # RefinedGradientBoostedMemorySimulatedAnnealingPlus print("RefinedGradientBoostedMemorySimulatedAnnealingPlus can not be imported: ", e) -try: +try: # RefinedGradientBoostedOptimizer from nevergrad.optimization.lama.RefinedGradientBoostedOptimizer import RefinedGradientBoostedOptimizer lama_register["RefinedGradientBoostedOptimizer"] = RefinedGradientBoostedOptimizer - res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientBoostedOptimizer = NonObjectOptimizer(method="LLAMARefinedGradientBoostedOptimizer").set_name("LLAMARefinedGradientBoostedOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGradientBoostedOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientBoostedOptimizer = NonObjectOptimizer( + method="LLAMARefinedGradientBoostedOptimizer" + ).set_name("LLAMARefinedGradientBoostedOptimizer", register=True) +except Exception as e: # RefinedGradientBoostedOptimizer print("RefinedGradientBoostedOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedGradientGuidedEvolutionStrategy import RefinedGradientGuidedEvolutionStrategy +try: # RefinedGradientGuidedEvolutionStrategy + from nevergrad.optimization.lama.RefinedGradientGuidedEvolutionStrategy import ( + RefinedGradientGuidedEvolutionStrategy, + ) lama_register["RefinedGradientGuidedEvolutionStrategy"] = RefinedGradientGuidedEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedGradientGuidedEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedGradientGuidedEvolutionStrategy").set_name("LLAMARefinedGradientGuidedEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedGradientGuidedEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedGradientGuidedEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedGradientGuidedEvolutionStrategy" + ).set_name("LLAMARefinedGradientGuidedEvolutionStrategy", register=True) +except Exception as e: # RefinedGradientGuidedEvolutionStrategy print("RefinedGradientGuidedEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution import RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution - - lama_register["RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution"] = RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution import ( + RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution print("RefinedHybridAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveDifferentialEvolution import RefinedHybridAdaptiveDifferentialEvolution +try: # RefinedHybridAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RefinedHybridAdaptiveDifferentialEvolution import ( + RefinedHybridAdaptiveDifferentialEvolution, + ) lama_register["RefinedHybridAdaptiveDifferentialEvolution"] = RefinedHybridAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveDifferentialEvolution").set_name("LLAMARefinedHybridAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedHybridAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RefinedHybridAdaptiveDifferentialEvolution print("RefinedHybridAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # RefinedHybridAdaptiveGradientPSO from nevergrad.optimization.lama.RefinedHybridAdaptiveGradientPSO import RefinedHybridAdaptiveGradientPSO lama_register["RefinedHybridAdaptiveGradientPSO"] = RefinedHybridAdaptiveGradientPSO - res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridAdaptiveGradientPSO = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveGradientPSO").set_name("LLAMARefinedHybridAdaptiveGradientPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveGradientPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridAdaptiveGradientPSO = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveGradientPSO" + ).set_name("LLAMARefinedHybridAdaptiveGradientPSO", register=True) +except Exception as e: # RefinedHybridAdaptiveGradientPSO print("RefinedHybridAdaptiveGradientPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridAdaptiveMultiStageOptimization import RefinedHybridAdaptiveMultiStageOptimization +try: # RefinedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.RefinedHybridAdaptiveMultiStageOptimization import ( + RefinedHybridAdaptiveMultiStageOptimization, + ) lama_register["RefinedHybridAdaptiveMultiStageOptimization"] = RefinedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # RefinedHybridAdaptiveMultiStageOptimization print("RefinedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridCovarianceMatrixDifferentialEvolution import RefinedHybridCovarianceMatrixDifferentialEvolution - - lama_register["RefinedHybridCovarianceMatrixDifferentialEvolution"] = RefinedHybridCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution").set_name("LLAMARefinedHybridCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedHybridCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.RefinedHybridCovarianceMatrixDifferentialEvolution import ( + RefinedHybridCovarianceMatrixDifferentialEvolution, + ) + + lama_register["RefinedHybridCovarianceMatrixDifferentialEvolution"] = ( + RefinedHybridCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMARefinedHybridCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # RefinedHybridCovarianceMatrixDifferentialEvolution print("RefinedHybridCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: +try: # RefinedHybridDEPSO from nevergrad.optimization.lama.RefinedHybridDEPSO import RefinedHybridDEPSO lama_register["RefinedHybridDEPSO"] = RefinedHybridDEPSO - res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO").set_name("LLAMARefinedHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedHybridDEPSO").set_name( + "LLAMARefinedHybridDEPSO", register=True + ) +except Exception as e: # RefinedHybridDEPSO print("RefinedHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridDEPSOWithAdaptiveMemoryV4 import RefinedHybridDEPSOWithAdaptiveMemoryV4 +try: # RefinedHybridDEPSOWithAdaptiveMemoryV4 + from nevergrad.optimization.lama.RefinedHybridDEPSOWithAdaptiveMemoryV4 import ( + RefinedHybridDEPSOWithAdaptiveMemoryV4, + ) lama_register["RefinedHybridDEPSOWithAdaptiveMemoryV4"] = RefinedHybridDEPSOWithAdaptiveMemoryV4 - res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4 = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4").set_name("LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4 = NonObjectOptimizer( + method="LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4" + ).set_name("LLAMARefinedHybridDEPSOWithAdaptiveMemoryV4", register=True) +except Exception as e: # RefinedHybridDEPSOWithAdaptiveMemoryV4 print("RefinedHybridDEPSOWithAdaptiveMemoryV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridDEPSOWithDynamicAdaptationV3 import RefinedHybridDEPSOWithDynamicAdaptationV3 +try: # RefinedHybridDEPSOWithDynamicAdaptationV3 + from nevergrad.optimization.lama.RefinedHybridDEPSOWithDynamicAdaptationV3 import ( + RefinedHybridDEPSOWithDynamicAdaptationV3, + ) lama_register["RefinedHybridDEPSOWithDynamicAdaptationV3"] = RefinedHybridDEPSOWithDynamicAdaptationV3 - res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridDEPSOWithDynamicAdaptationV3 = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3").set_name("LLAMARefinedHybridDEPSOWithDynamicAdaptationV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridDEPSOWithDynamicAdaptationV3 = NonObjectOptimizer( + method="LLAMARefinedHybridDEPSOWithDynamicAdaptationV3" + ).set_name("LLAMARefinedHybridDEPSOWithDynamicAdaptationV3", register=True) +except Exception as e: # RefinedHybridDEPSOWithDynamicAdaptationV3 print("RefinedHybridDEPSOWithDynamicAdaptationV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridDualPhaseParticleSwarmDifferentialEvolution import RefinedHybridDualPhaseParticleSwarmDifferentialEvolution - - lama_register["RefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = RefinedHybridDualPhaseParticleSwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution").set_name("LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedHybridDualPhaseParticleSwarmDifferentialEvolution + from nevergrad.optimization.lama.RefinedHybridDualPhaseParticleSwarmDifferentialEvolution import ( + RefinedHybridDualPhaseParticleSwarmDifferentialEvolution, + ) + + lama_register["RefinedHybridDualPhaseParticleSwarmDifferentialEvolution"] = ( + RefinedHybridDualPhaseParticleSwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution" + ).set_name("LLAMARefinedHybridDualPhaseParticleSwarmDifferentialEvolution", register=True) +except Exception as e: # RefinedHybridDualPhaseParticleSwarmDifferentialEvolution print("RefinedHybridDualPhaseParticleSwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridDynamicClusterOptimization import RefinedHybridDynamicClusterOptimization +try: # RefinedHybridDynamicClusterOptimization + from nevergrad.optimization.lama.RefinedHybridDynamicClusterOptimization import ( + RefinedHybridDynamicClusterOptimization, + ) lama_register["RefinedHybridDynamicClusterOptimization"] = RefinedHybridDynamicClusterOptimization - res = NonObjectOptimizer(method="LLAMARefinedHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridDynamicClusterOptimization = NonObjectOptimizer(method="LLAMARefinedHybridDynamicClusterOptimization").set_name("LLAMARefinedHybridDynamicClusterOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridDynamicClusterOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridDynamicClusterOptimization = NonObjectOptimizer( + method="LLAMARefinedHybridDynamicClusterOptimization" + ).set_name("LLAMARefinedHybridDynamicClusterOptimization", register=True) +except Exception as e: # RefinedHybridDynamicClusterOptimization print("RefinedHybridDynamicClusterOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE import RefinedHybridEliteGuidedMutationDE +try: # RefinedHybridEliteGuidedMutationDE + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE import ( + RefinedHybridEliteGuidedMutationDE, + ) lama_register["RefinedHybridEliteGuidedMutationDE"] = RefinedHybridEliteGuidedMutationDE - res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridEliteGuidedMutationDE = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE").set_name("LLAMARefinedHybridEliteGuidedMutationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridEliteGuidedMutationDE = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE", register=True) +except Exception as e: # RefinedHybridEliteGuidedMutationDE print("RefinedHybridEliteGuidedMutationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v2 import RefinedHybridEliteGuidedMutationDE_v2 +try: # RefinedHybridEliteGuidedMutationDE_v2 + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v2 import ( + RefinedHybridEliteGuidedMutationDE_v2, + ) lama_register["RefinedHybridEliteGuidedMutationDE_v2"] = RefinedHybridEliteGuidedMutationDE_v2 - res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridEliteGuidedMutationDE_v2 = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v2").set_name("LLAMARefinedHybridEliteGuidedMutationDE_v2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridEliteGuidedMutationDE_v2 = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE_v2" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v2", register=True) +except Exception as e: # RefinedHybridEliteGuidedMutationDE_v2 print("RefinedHybridEliteGuidedMutationDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v3 import RefinedHybridEliteGuidedMutationDE_v3 +try: # RefinedHybridEliteGuidedMutationDE_v3 + from nevergrad.optimization.lama.RefinedHybridEliteGuidedMutationDE_v3 import ( + RefinedHybridEliteGuidedMutationDE_v3, + ) lama_register["RefinedHybridEliteGuidedMutationDE_v3"] = RefinedHybridEliteGuidedMutationDE_v3 - res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridEliteGuidedMutationDE_v3 = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v3").set_name("LLAMARefinedHybridEliteGuidedMutationDE_v3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridEliteGuidedMutationDE_v3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridEliteGuidedMutationDE_v3 = NonObjectOptimizer( + method="LLAMARefinedHybridEliteGuidedMutationDE_v3" + ).set_name("LLAMARefinedHybridEliteGuidedMutationDE_v3", register=True) +except Exception as e: # RefinedHybridEliteGuidedMutationDE_v3 print("RefinedHybridEliteGuidedMutationDE_v3 can not be imported: ", e) -try: +try: # RefinedHybridEvolutionStrategyV4 from nevergrad.optimization.lama.RefinedHybridEvolutionStrategyV4 import RefinedHybridEvolutionStrategyV4 lama_register["RefinedHybridEvolutionStrategyV4"] = RefinedHybridEvolutionStrategyV4 - res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridEvolutionStrategyV4 = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionStrategyV4").set_name("LLAMARefinedHybridEvolutionStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridEvolutionStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedHybridEvolutionStrategyV4" + ).set_name("LLAMARefinedHybridEvolutionStrategyV4", register=True) +except Exception as e: # RefinedHybridEvolutionStrategyV4 print("RefinedHybridEvolutionStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridEvolutionaryAnnealingOptimizer import RefinedHybridEvolutionaryAnnealingOptimizer +try: # RefinedHybridEvolutionaryAnnealingOptimizer + from nevergrad.optimization.lama.RefinedHybridEvolutionaryAnnealingOptimizer import ( + RefinedHybridEvolutionaryAnnealingOptimizer, + ) lama_register["RefinedHybridEvolutionaryAnnealingOptimizer"] = RefinedHybridEvolutionaryAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMARefinedHybridEvolutionaryAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMARefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMARefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: # RefinedHybridEvolutionaryAnnealingOptimizer print("RefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) -try: +try: # RefinedHybridOptimizer from nevergrad.optimization.lama.RefinedHybridOptimizer import RefinedHybridOptimizer lama_register["RefinedHybridOptimizer"] = RefinedHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer").set_name("LLAMARefinedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridOptimizer").set_name( + "LLAMARefinedHybridOptimizer", register=True + ) +except Exception as e: # RefinedHybridOptimizer print("RefinedHybridOptimizer can not be imported: ", e) -try: +try: # RefinedHybridPSODEOptimizer from nevergrad.optimization.lama.RefinedHybridPSODEOptimizer import RefinedHybridPSODEOptimizer lama_register["RefinedHybridPSODEOptimizer"] = RefinedHybridPSODEOptimizer - res = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer").set_name("LLAMARefinedHybridPSODEOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridPSODEOptimizer = NonObjectOptimizer(method="LLAMARefinedHybridPSODEOptimizer").set_name( + "LLAMARefinedHybridPSODEOptimizer", register=True + ) +except Exception as e: # RefinedHybridPSODEOptimizer print("RefinedHybridPSODEOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridPSODESimulatedAnnealing import RefinedHybridPSODESimulatedAnnealing +try: # RefinedHybridPSODESimulatedAnnealing + from nevergrad.optimization.lama.RefinedHybridPSODESimulatedAnnealing import ( + RefinedHybridPSODESimulatedAnnealing, + ) lama_register["RefinedHybridPSODESimulatedAnnealing"] = RefinedHybridPSODESimulatedAnnealing - res = NonObjectOptimizer(method="LLAMARefinedHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridPSODESimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedHybridPSODESimulatedAnnealing").set_name("LLAMARefinedHybridPSODESimulatedAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridPSODESimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridPSODESimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedHybridPSODESimulatedAnnealing" + ).set_name("LLAMARefinedHybridPSODESimulatedAnnealing", register=True) +except Exception as e: # RefinedHybridPSODESimulatedAnnealing print("RefinedHybridPSODESimulatedAnnealing can not be imported: ", e) -try: +try: # RefinedHybridPSO_DE from nevergrad.optimization.lama.RefinedHybridPSO_DE import RefinedHybridPSO_DE lama_register["RefinedHybridPSO_DE"] = RefinedHybridPSO_DE - res = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE").set_name("LLAMARefinedHybridPSO_DE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridPSO_DE = NonObjectOptimizer(method="LLAMARefinedHybridPSO_DE").set_name( + "LLAMARefinedHybridPSO_DE", register=True + ) +except Exception as e: # RefinedHybridPSO_DE print("RefinedHybridPSO_DE can not be imported: ", e) -try: +try: # RefinedHybridPrecisionSearch from nevergrad.optimization.lama.RefinedHybridPrecisionSearch import RefinedHybridPrecisionSearch lama_register["RefinedHybridPrecisionSearch"] = RefinedHybridPrecisionSearch - res = NonObjectOptimizer(method="LLAMARefinedHybridPrecisionSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridPrecisionSearch = NonObjectOptimizer(method="LLAMARefinedHybridPrecisionSearch").set_name("LLAMARefinedHybridPrecisionSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridPrecisionSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridPrecisionSearch = NonObjectOptimizer( + method="LLAMARefinedHybridPrecisionSearch" + ).set_name("LLAMARefinedHybridPrecisionSearch", register=True) +except Exception as e: # RefinedHybridPrecisionSearch print("RefinedHybridPrecisionSearch can not be imported: ", e) -try: +try: # RefinedHybridQuantumAdaptiveDE from nevergrad.optimization.lama.RefinedHybridQuantumAdaptiveDE import RefinedHybridQuantumAdaptiveDE lama_register["RefinedHybridQuantumAdaptiveDE"] = RefinedHybridQuantumAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridQuantumAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedHybridQuantumAdaptiveDE").set_name("LLAMARefinedHybridQuantumAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridQuantumAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedHybridQuantumAdaptiveDE" + ).set_name("LLAMARefinedHybridQuantumAdaptiveDE", register=True) +except Exception as e: # RefinedHybridQuantumAdaptiveDE print("RefinedHybridQuantumAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridQuantumLevyAdaptiveSwarm import RefinedHybridQuantumLevyAdaptiveSwarm +try: # RefinedHybridQuantumLevyAdaptiveSwarm + from nevergrad.optimization.lama.RefinedHybridQuantumLevyAdaptiveSwarm import ( + RefinedHybridQuantumLevyAdaptiveSwarm, + ) lama_register["RefinedHybridQuantumLevyAdaptiveSwarm"] = RefinedHybridQuantumLevyAdaptiveSwarm - res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridQuantumLevyAdaptiveSwarm = NonObjectOptimizer(method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm").set_name("LLAMARefinedHybridQuantumLevyAdaptiveSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridQuantumLevyAdaptiveSwarm = NonObjectOptimizer( + method="LLAMARefinedHybridQuantumLevyAdaptiveSwarm" + ).set_name("LLAMARefinedHybridQuantumLevyAdaptiveSwarm", register=True) +except Exception as e: # RefinedHybridQuantumLevyAdaptiveSwarm print("RefinedHybridQuantumLevyAdaptiveSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHybridQuasiRandomDEGradientAnnealing import RefinedHybridQuasiRandomDEGradientAnnealing +try: # RefinedHybridQuasiRandomDEGradientAnnealing + from nevergrad.optimization.lama.RefinedHybridQuasiRandomDEGradientAnnealing import ( + RefinedHybridQuasiRandomDEGradientAnnealing, + ) lama_register["RefinedHybridQuasiRandomDEGradientAnnealing"] = RefinedHybridQuasiRandomDEGradientAnnealing - res = NonObjectOptimizer(method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer(method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing").set_name("LLAMARefinedHybridQuasiRandomDEGradientAnnealing", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHybridQuasiRandomDEGradientAnnealing = NonObjectOptimizer( + method="LLAMARefinedHybridQuasiRandomDEGradientAnnealing" + ).set_name("LLAMARefinedHybridQuasiRandomDEGradientAnnealing", register=True) +except Exception as e: # RefinedHybridQuasiRandomDEGradientAnnealing print("RefinedHybridQuasiRandomDEGradientAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 import RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 - - lama_register["RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2"] = RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 - res = NonObjectOptimizer(method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 = NonObjectOptimizer(method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2").set_name("LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2", register=True) -except Exception as e: +try: # RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 + from nevergrad.optimization.lama.RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 import ( + RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2, + ) + + lama_register["RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2"] = ( + RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 = NonObjectOptimizer( + method="LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2" + ).set_name("LLAMARefinedHyperAdaptiveSinusoidalDifferentialSwarmV2", register=True) +except Exception as e: # RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 print("RefinedHyperAdaptiveSinusoidalDifferentialSwarmV2 can not be imported: ", e) -try: +try: # RefinedHyperEvolvedDynamicRAMEDS from nevergrad.optimization.lama.RefinedHyperEvolvedDynamicRAMEDS import RefinedHyperEvolvedDynamicRAMEDS lama_register["RefinedHyperEvolvedDynamicRAMEDS"] = RefinedHyperEvolvedDynamicRAMEDS - res = NonObjectOptimizer(method="LLAMARefinedHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperEvolvedDynamicRAMEDS = NonObjectOptimizer(method="LLAMARefinedHyperEvolvedDynamicRAMEDS").set_name("LLAMARefinedHyperEvolvedDynamicRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHyperEvolvedDynamicRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperEvolvedDynamicRAMEDS = NonObjectOptimizer( + method="LLAMARefinedHyperEvolvedDynamicRAMEDS" + ).set_name("LLAMARefinedHyperEvolvedDynamicRAMEDS", register=True) +except Exception as e: # RefinedHyperEvolvedDynamicRAMEDS print("RefinedHyperEvolvedDynamicRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperOptimizedDynamicPrecisionOptimizer import RefinedHyperOptimizedDynamicPrecisionOptimizer - - lama_register["RefinedHyperOptimizedDynamicPrecisionOptimizer"] = RefinedHyperOptimizedDynamicPrecisionOptimizer - res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer").set_name("LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer", register=True) -except Exception as e: +try: # RefinedHyperOptimizedDynamicPrecisionOptimizer + from nevergrad.optimization.lama.RefinedHyperOptimizedDynamicPrecisionOptimizer import ( + RefinedHyperOptimizedDynamicPrecisionOptimizer, + ) + + lama_register["RefinedHyperOptimizedDynamicPrecisionOptimizer"] = ( + RefinedHyperOptimizedDynamicPrecisionOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer = NonObjectOptimizer( + method="LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer" + ).set_name("LLAMARefinedHyperOptimizedDynamicPrecisionOptimizer", register=True) +except Exception as e: # RefinedHyperOptimizedDynamicPrecisionOptimizer print("RefinedHyperOptimizedDynamicPrecisionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperOptimizedThermalEvolutionaryOptimizer import RefinedHyperOptimizedThermalEvolutionaryOptimizer - - lama_register["RefinedHyperOptimizedThermalEvolutionaryOptimizer"] = RefinedHyperOptimizedThermalEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer").set_name("LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) -except Exception as e: +try: # RefinedHyperOptimizedThermalEvolutionaryOptimizer + from nevergrad.optimization.lama.RefinedHyperOptimizedThermalEvolutionaryOptimizer import ( + RefinedHyperOptimizedThermalEvolutionaryOptimizer, + ) + + lama_register["RefinedHyperOptimizedThermalEvolutionaryOptimizer"] = ( + RefinedHyperOptimizedThermalEvolutionaryOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer" + ).set_name("LLAMARefinedHyperOptimizedThermalEvolutionaryOptimizer", register=True) +except Exception as e: # RefinedHyperOptimizedThermalEvolutionaryOptimizer print("RefinedHyperOptimizedThermalEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperRefinedDynamicPrecisionOptimizerV50 import RefinedHyperRefinedDynamicPrecisionOptimizerV50 - - lama_register["RefinedHyperRefinedDynamicPrecisionOptimizerV50"] = RefinedHyperRefinedDynamicPrecisionOptimizerV50 - res = NonObjectOptimizer(method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50 = NonObjectOptimizer(method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50").set_name("LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50", register=True) -except Exception as e: +try: # RefinedHyperRefinedDynamicPrecisionOptimizerV50 + from nevergrad.optimization.lama.RefinedHyperRefinedDynamicPrecisionOptimizerV50 import ( + RefinedHyperRefinedDynamicPrecisionOptimizerV50, + ) + + lama_register["RefinedHyperRefinedDynamicPrecisionOptimizerV50"] = ( + RefinedHyperRefinedDynamicPrecisionOptimizerV50 + ) + # res = NonObjectOptimizer(method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50 = NonObjectOptimizer( + method="LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50" + ).set_name("LLAMARefinedHyperRefinedDynamicPrecisionOptimizerV50", register=True) +except Exception as e: # RefinedHyperRefinedDynamicPrecisionOptimizerV50 print("RefinedHyperRefinedDynamicPrecisionOptimizerV50 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV52 import RefinedHyperStrategicOptimizerV52 +try: # RefinedHyperStrategicOptimizerV52 + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV52 import ( + RefinedHyperStrategicOptimizerV52, + ) lama_register["RefinedHyperStrategicOptimizerV52"] = RefinedHyperStrategicOptimizerV52 - res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperStrategicOptimizerV52 = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV52").set_name("LLAMARefinedHyperStrategicOptimizerV52", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperStrategicOptimizerV52 = NonObjectOptimizer( + method="LLAMARefinedHyperStrategicOptimizerV52" + ).set_name("LLAMARefinedHyperStrategicOptimizerV52", register=True) +except Exception as e: # RefinedHyperStrategicOptimizerV52 print("RefinedHyperStrategicOptimizerV52 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV55 import RefinedHyperStrategicOptimizerV55 +try: # RefinedHyperStrategicOptimizerV55 + from nevergrad.optimization.lama.RefinedHyperStrategicOptimizerV55 import ( + RefinedHyperStrategicOptimizerV55, + ) lama_register["RefinedHyperStrategicOptimizerV55"] = RefinedHyperStrategicOptimizerV55 - res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedHyperStrategicOptimizerV55 = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV55").set_name("LLAMARefinedHyperStrategicOptimizerV55", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedHyperStrategicOptimizerV55")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedHyperStrategicOptimizerV55 = NonObjectOptimizer( + method="LLAMARefinedHyperStrategicOptimizerV55" + ).set_name("LLAMARefinedHyperStrategicOptimizerV55", register=True) +except Exception as e: # RefinedHyperStrategicOptimizerV55 print("RefinedHyperStrategicOptimizerV55 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution import RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution - - lama_register["RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution"] = RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution").set_name("LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution import ( + RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution, + ) + + lama_register["RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution"] = ( + RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedImprovedAdaptiveMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution print("RefinedImprovedAdaptiveMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 import RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 - - lama_register["RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2"] = RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 - res = NonObjectOptimizer(method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 = NonObjectOptimizer(method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2").set_name("LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2", register=True) -except Exception as e: +try: # RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 + from nevergrad.optimization.lama.RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 import ( + RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2, + ) + + lama_register["RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2"] = ( + RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 = NonObjectOptimizer( + method="LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2" + ).set_name("LLAMARefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2", register=True) +except Exception as e: # RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 print("RefinedImprovedDualPhaseAdaptiveParticleSwarmDifferentialEvolutionV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 import RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 - - lama_register["RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4"] = RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 - res = NonObjectOptimizer(method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 = NonObjectOptimizer(method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4").set_name("LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4", register=True) -except Exception as e: +try: # RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 + from nevergrad.optimization.lama.RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 import ( + RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4, + ) + + lama_register["RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4"] = ( + RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 + ) + # res = NonObjectOptimizer(method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 = NonObjectOptimizer( + method="LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4" + ).set_name("LLAMARefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4", register=True) +except Exception as e: # RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 print("RefinedImprovedDynamicHybridDEPSOWithEliteMemoryV4 can not be imported: ", e) -try: +try: # RefinedInertiaFocalOptimizer from nevergrad.optimization.lama.RefinedInertiaFocalOptimizer import RefinedInertiaFocalOptimizer lama_register["RefinedInertiaFocalOptimizer"] = RefinedInertiaFocalOptimizer - res = NonObjectOptimizer(method="LLAMARefinedInertiaFocalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedInertiaFocalOptimizer = NonObjectOptimizer(method="LLAMARefinedInertiaFocalOptimizer").set_name("LLAMARefinedInertiaFocalOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedInertiaFocalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedInertiaFocalOptimizer = NonObjectOptimizer( + method="LLAMARefinedInertiaFocalOptimizer" + ).set_name("LLAMARefinedInertiaFocalOptimizer", register=True) +except Exception as e: # RefinedInertiaFocalOptimizer print("RefinedInertiaFocalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedIntelligentEvolvingAdaptiveStrategyV35 import RefinedIntelligentEvolvingAdaptiveStrategyV35 - - lama_register["RefinedIntelligentEvolvingAdaptiveStrategyV35"] = RefinedIntelligentEvolvingAdaptiveStrategyV35 - res = NonObjectOptimizer(method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35 = NonObjectOptimizer(method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35").set_name("LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35", register=True) -except Exception as e: +try: # RefinedIntelligentEvolvingAdaptiveStrategyV35 + from nevergrad.optimization.lama.RefinedIntelligentEvolvingAdaptiveStrategyV35 import ( + RefinedIntelligentEvolvingAdaptiveStrategyV35, + ) + + lama_register["RefinedIntelligentEvolvingAdaptiveStrategyV35"] = ( + RefinedIntelligentEvolvingAdaptiveStrategyV35 + ) + # res = NonObjectOptimizer(method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35 = NonObjectOptimizer( + method="LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35" + ).set_name("LLAMARefinedIntelligentEvolvingAdaptiveStrategyV35", register=True) +except Exception as e: # RefinedIntelligentEvolvingAdaptiveStrategyV35 print("RefinedIntelligentEvolvingAdaptiveStrategyV35 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV10Plus import RefinedIslandEvolutionStrategyV10Plus +try: # RefinedIslandEvolutionStrategyV10Plus + from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV10Plus import ( + RefinedIslandEvolutionStrategyV10Plus, + ) lama_register["RefinedIslandEvolutionStrategyV10Plus"] = RefinedIslandEvolutionStrategyV10Plus - res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV10Plus")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedIslandEvolutionStrategyV10Plus = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV10Plus").set_name("LLAMARefinedIslandEvolutionStrategyV10Plus", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV10Plus")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedIslandEvolutionStrategyV10Plus = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV10Plus" + ).set_name("LLAMARefinedIslandEvolutionStrategyV10Plus", register=True) +except Exception as e: # RefinedIslandEvolutionStrategyV10Plus print("RefinedIslandEvolutionStrategyV10Plus can not be imported: ", e) -try: +try: # RefinedIslandEvolutionStrategyV2 from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV2 import RefinedIslandEvolutionStrategyV2 lama_register["RefinedIslandEvolutionStrategyV2"] = RefinedIslandEvolutionStrategyV2 - res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedIslandEvolutionStrategyV2 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV2").set_name("LLAMARefinedIslandEvolutionStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedIslandEvolutionStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV2" + ).set_name("LLAMARefinedIslandEvolutionStrategyV2", register=True) +except Exception as e: # RefinedIslandEvolutionStrategyV2 print("RefinedIslandEvolutionStrategyV2 can not be imported: ", e) -try: +try: # RefinedIslandEvolutionStrategyV6 from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV6 import RefinedIslandEvolutionStrategyV6 lama_register["RefinedIslandEvolutionStrategyV6"] = RefinedIslandEvolutionStrategyV6 - res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedIslandEvolutionStrategyV6 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV6").set_name("LLAMARefinedIslandEvolutionStrategyV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedIslandEvolutionStrategyV6 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV6" + ).set_name("LLAMARefinedIslandEvolutionStrategyV6", register=True) +except Exception as e: # RefinedIslandEvolutionStrategyV6 print("RefinedIslandEvolutionStrategyV6 can not be imported: ", e) -try: +try: # RefinedIslandEvolutionStrategyV9 from nevergrad.optimization.lama.RefinedIslandEvolutionStrategyV9 import RefinedIslandEvolutionStrategyV9 lama_register["RefinedIslandEvolutionStrategyV9"] = RefinedIslandEvolutionStrategyV9 - res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedIslandEvolutionStrategyV9 = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV9").set_name("LLAMARefinedIslandEvolutionStrategyV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedIslandEvolutionStrategyV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedIslandEvolutionStrategyV9 = NonObjectOptimizer( + method="LLAMARefinedIslandEvolutionStrategyV9" + ).set_name("LLAMARefinedIslandEvolutionStrategyV9", register=True) +except Exception as e: # RefinedIslandEvolutionStrategyV9 print("RefinedIslandEvolutionStrategyV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemeticDifferentialEvolution import RefinedMemeticDifferentialEvolution +try: # RefinedMemeticDifferentialEvolution + from nevergrad.optimization.lama.RefinedMemeticDifferentialEvolution import ( + RefinedMemeticDifferentialEvolution, + ) lama_register["RefinedMemeticDifferentialEvolution"] = RefinedMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMemeticDifferentialEvolution").set_name("LLAMARefinedMemeticDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMemeticDifferentialEvolution" + ).set_name("LLAMARefinedMemeticDifferentialEvolution", register=True) +except Exception as e: # RefinedMemeticDifferentialEvolution print("RefinedMemeticDifferentialEvolution can not be imported: ", e) -try: +try: # RefinedMemeticDiverseOptimizer from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizer import RefinedMemeticDiverseOptimizer lama_register["RefinedMemeticDiverseOptimizer"] = RefinedMemeticDiverseOptimizer - res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemeticDiverseOptimizer = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizer").set_name("LLAMARefinedMemeticDiverseOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemeticDiverseOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemeticDiverseOptimizer" + ).set_name("LLAMARefinedMemeticDiverseOptimizer", register=True) +except Exception as e: # RefinedMemeticDiverseOptimizer print("RefinedMemeticDiverseOptimizer can not be imported: ", e) -try: +try: # RefinedMemeticDiverseOptimizerV4 from nevergrad.optimization.lama.RefinedMemeticDiverseOptimizerV4 import RefinedMemeticDiverseOptimizerV4 lama_register["RefinedMemeticDiverseOptimizerV4"] = RefinedMemeticDiverseOptimizerV4 - res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemeticDiverseOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizerV4").set_name("LLAMARefinedMemeticDiverseOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemeticDiverseOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemeticDiverseOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedMemeticDiverseOptimizerV4" + ).set_name("LLAMARefinedMemeticDiverseOptimizerV4", register=True) +except Exception as e: # RefinedMemeticDiverseOptimizerV4 print("RefinedMemeticDiverseOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemeticQuantumDifferentialOptimizer import RefinedMemeticQuantumDifferentialOptimizer +try: # RefinedMemeticQuantumDifferentialOptimizer + from nevergrad.optimization.lama.RefinedMemeticQuantumDifferentialOptimizer import ( + RefinedMemeticQuantumDifferentialOptimizer, + ) lama_register["RefinedMemeticQuantumDifferentialOptimizer"] = RefinedMemeticQuantumDifferentialOptimizer - res = NonObjectOptimizer(method="LLAMARefinedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer(method="LLAMARefinedMemeticQuantumDifferentialOptimizer").set_name("LLAMARefinedMemeticQuantumDifferentialOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemeticQuantumDifferentialOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemeticQuantumDifferentialOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemeticQuantumDifferentialOptimizer" + ).set_name("LLAMARefinedMemeticQuantumDifferentialOptimizer", register=True) +except Exception as e: # RefinedMemeticQuantumDifferentialOptimizer print("RefinedMemeticQuantumDifferentialOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryAdaptiveDynamicHybridOptimizer import RefinedMemoryAdaptiveDynamicHybridOptimizer +try: # RefinedMemoryAdaptiveDynamicHybridOptimizer + from nevergrad.optimization.lama.RefinedMemoryAdaptiveDynamicHybridOptimizer import ( + RefinedMemoryAdaptiveDynamicHybridOptimizer, + ) lama_register["RefinedMemoryAdaptiveDynamicHybridOptimizer"] = RefinedMemoryAdaptiveDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer").set_name("LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer" + ).set_name("LLAMARefinedMemoryAdaptiveDynamicHybridOptimizer", register=True) +except Exception as e: # RefinedMemoryAdaptiveDynamicHybridOptimizer print("RefinedMemoryAdaptiveDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryAdaptiveHybridOptimizer import RefinedMemoryAdaptiveHybridOptimizer +try: # RefinedMemoryAdaptiveHybridOptimizer + from nevergrad.optimization.lama.RefinedMemoryAdaptiveHybridOptimizer import ( + RefinedMemoryAdaptiveHybridOptimizer, + ) lama_register["RefinedMemoryAdaptiveHybridOptimizer"] = RefinedMemoryAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveHybridOptimizer").set_name("LLAMARefinedMemoryAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemoryAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryAdaptiveHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryAdaptiveHybridOptimizer" + ).set_name("LLAMARefinedMemoryAdaptiveHybridOptimizer", register=True) +except Exception as e: # RefinedMemoryAdaptiveHybridOptimizer print("RefinedMemoryAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryEnhancedDynamicHybridOptimizer import RefinedMemoryEnhancedDynamicHybridOptimizer +try: # RefinedMemoryEnhancedDynamicHybridOptimizer + from nevergrad.optimization.lama.RefinedMemoryEnhancedDynamicHybridOptimizer import ( + RefinedMemoryEnhancedDynamicHybridOptimizer, + ) lama_register["RefinedMemoryEnhancedDynamicHybridOptimizer"] = RefinedMemoryEnhancedDynamicHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer").set_name("LLAMARefinedMemoryEnhancedDynamicHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryEnhancedDynamicHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedMemoryEnhancedDynamicHybridOptimizer" + ).set_name("LLAMARefinedMemoryEnhancedDynamicHybridOptimizer", register=True) +except Exception as e: # RefinedMemoryEnhancedDynamicHybridOptimizer print("RefinedMemoryEnhancedDynamicHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryEnhancedHybridOptimizerV2 import RefinedMemoryEnhancedHybridOptimizerV2 +try: # RefinedMemoryEnhancedHybridOptimizerV2 + from nevergrad.optimization.lama.RefinedMemoryEnhancedHybridOptimizerV2 import ( + RefinedMemoryEnhancedHybridOptimizerV2, + ) lama_register["RefinedMemoryEnhancedHybridOptimizerV2"] = RefinedMemoryEnhancedHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryEnhancedHybridOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedHybridOptimizerV2").set_name("LLAMARefinedMemoryEnhancedHybridOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemoryEnhancedHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryEnhancedHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedMemoryEnhancedHybridOptimizerV2" + ).set_name("LLAMARefinedMemoryEnhancedHybridOptimizerV2", register=True) +except Exception as e: # RefinedMemoryEnhancedHybridOptimizerV2 print("RefinedMemoryEnhancedHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 import RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 - - lama_register["RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72"] = RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 - res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72").set_name("LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72", register=True) -except Exception as e: +try: # RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 + from nevergrad.optimization.lama.RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 import ( + RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72, + ) + + lama_register["RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72"] = ( + RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 + ) + # res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 = NonObjectOptimizer( + method="LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72" + ).set_name("LLAMARefinedMemoryGuidedAdaptiveDualPhaseStrategyV72", register=True) +except Exception as e: # RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 print("RefinedMemoryGuidedAdaptiveDualPhaseStrategyV72 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMemoryGuidedHybridStrategyV63 import RefinedMemoryGuidedHybridStrategyV63 +try: # RefinedMemoryGuidedHybridStrategyV63 + from nevergrad.optimization.lama.RefinedMemoryGuidedHybridStrategyV63 import ( + RefinedMemoryGuidedHybridStrategyV63, + ) lama_register["RefinedMemoryGuidedHybridStrategyV63"] = RefinedMemoryGuidedHybridStrategyV63 - res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedHybridStrategyV63")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMemoryGuidedHybridStrategyV63 = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedHybridStrategyV63").set_name("LLAMARefinedMemoryGuidedHybridStrategyV63", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMemoryGuidedHybridStrategyV63")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMemoryGuidedHybridStrategyV63 = NonObjectOptimizer( + method="LLAMARefinedMemoryGuidedHybridStrategyV63" + ).set_name("LLAMARefinedMemoryGuidedHybridStrategyV63", register=True) +except Exception as e: # RefinedMemoryGuidedHybridStrategyV63 print("RefinedMemoryGuidedHybridStrategyV63 can not be imported: ", e) -try: +try: # RefinedMetaNetAQAPSO from nevergrad.optimization.lama.RefinedMetaNetAQAPSO import RefinedMetaNetAQAPSO lama_register["RefinedMetaNetAQAPSO"] = RefinedMetaNetAQAPSO - res = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO").set_name("LLAMARefinedMetaNetAQAPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMetaNetAQAPSO = NonObjectOptimizer(method="LLAMARefinedMetaNetAQAPSO").set_name( + "LLAMARefinedMetaNetAQAPSO", register=True + ) +except Exception as e: # RefinedMetaNetAQAPSO print("RefinedMetaNetAQAPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiFocalAdaptiveElitistStrategyV4 import RefinedMultiFocalAdaptiveElitistStrategyV4 +try: # RefinedMultiFocalAdaptiveElitistStrategyV4 + from nevergrad.optimization.lama.RefinedMultiFocalAdaptiveElitistStrategyV4 import ( + RefinedMultiFocalAdaptiveElitistStrategyV4, + ) lama_register["RefinedMultiFocalAdaptiveElitistStrategyV4"] = RefinedMultiFocalAdaptiveElitistStrategyV4 - res = NonObjectOptimizer(method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiFocalAdaptiveElitistStrategyV4 = NonObjectOptimizer(method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4").set_name("LLAMARefinedMultiFocalAdaptiveElitistStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiFocalAdaptiveElitistStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedMultiFocalAdaptiveElitistStrategyV4" + ).set_name("LLAMARefinedMultiFocalAdaptiveElitistStrategyV4", register=True) +except Exception as e: # RefinedMultiFocalAdaptiveElitistStrategyV4 print("RefinedMultiFocalAdaptiveElitistStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiOperatorAdaptiveOptimization import RefinedMultiOperatorAdaptiveOptimization +try: # RefinedMultiOperatorAdaptiveOptimization + from nevergrad.optimization.lama.RefinedMultiOperatorAdaptiveOptimization import ( + RefinedMultiOperatorAdaptiveOptimization, + ) lama_register["RefinedMultiOperatorAdaptiveOptimization"] = RefinedMultiOperatorAdaptiveOptimization - res = NonObjectOptimizer(method="LLAMARefinedMultiOperatorAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiOperatorAdaptiveOptimization = NonObjectOptimizer(method="LLAMARefinedMultiOperatorAdaptiveOptimization").set_name("LLAMARefinedMultiOperatorAdaptiveOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiOperatorAdaptiveOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiOperatorAdaptiveOptimization = NonObjectOptimizer( + method="LLAMARefinedMultiOperatorAdaptiveOptimization" + ).set_name("LLAMARefinedMultiOperatorAdaptiveOptimization", register=True) +except Exception as e: # RefinedMultiOperatorAdaptiveOptimization print("RefinedMultiOperatorAdaptiveOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiPhaseAdaptiveHybridDEPSO import RefinedMultiPhaseAdaptiveHybridDEPSO +try: # RefinedMultiPhaseAdaptiveHybridDEPSO + from nevergrad.optimization.lama.RefinedMultiPhaseAdaptiveHybridDEPSO import ( + RefinedMultiPhaseAdaptiveHybridDEPSO, + ) lama_register["RefinedMultiPhaseAdaptiveHybridDEPSO"] = RefinedMultiPhaseAdaptiveHybridDEPSO - res = NonObjectOptimizer(method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO").set_name("LLAMARefinedMultiPhaseAdaptiveHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiPhaseAdaptiveHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedMultiPhaseAdaptiveHybridDEPSO" + ).set_name("LLAMARefinedMultiPhaseAdaptiveHybridDEPSO", register=True) +except Exception as e: # RefinedMultiPhaseAdaptiveHybridDEPSO print("RefinedMultiPhaseAdaptiveHybridDEPSO can not be imported: ", e) -try: +try: # RefinedMultiStageAdaptiveSearch from nevergrad.optimization.lama.RefinedMultiStageAdaptiveSearch import RefinedMultiStageAdaptiveSearch lama_register["RefinedMultiStageAdaptiveSearch"] = RefinedMultiStageAdaptiveSearch - res = NonObjectOptimizer(method="LLAMARefinedMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiStageAdaptiveSearch = NonObjectOptimizer(method="LLAMARefinedMultiStageAdaptiveSearch").set_name("LLAMARefinedMultiStageAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiStageAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiStageAdaptiveSearch = NonObjectOptimizer( + method="LLAMARefinedMultiStageAdaptiveSearch" + ).set_name("LLAMARefinedMultiStageAdaptiveSearch", register=True) +except Exception as e: # RefinedMultiStageAdaptiveSearch print("RefinedMultiStageAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiStrategyDifferentialEvolution import RefinedMultiStrategyDifferentialEvolution +try: # RefinedMultiStrategyDifferentialEvolution + from nevergrad.optimization.lama.RefinedMultiStrategyDifferentialEvolution import ( + RefinedMultiStrategyDifferentialEvolution, + ) lama_register["RefinedMultiStrategyDifferentialEvolution"] = RefinedMultiStrategyDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiStrategyDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMultiStrategyDifferentialEvolution").set_name("LLAMARefinedMultiStrategyDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiStrategyDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiStrategyDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMultiStrategyDifferentialEvolution" + ).set_name("LLAMARefinedMultiStrategyDifferentialEvolution", register=True) +except Exception as e: # RefinedMultiStrategyDifferentialEvolution print("RefinedMultiStrategyDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiStrategySelfAdaptiveDE import RefinedMultiStrategySelfAdaptiveDE +try: # RefinedMultiStrategySelfAdaptiveDE + from nevergrad.optimization.lama.RefinedMultiStrategySelfAdaptiveDE import ( + RefinedMultiStrategySelfAdaptiveDE, + ) lama_register["RefinedMultiStrategySelfAdaptiveDE"] = RefinedMultiStrategySelfAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiStrategySelfAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedMultiStrategySelfAdaptiveDE").set_name("LLAMARefinedMultiStrategySelfAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySelfAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiStrategySelfAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedMultiStrategySelfAdaptiveDE" + ).set_name("LLAMARefinedMultiStrategySelfAdaptiveDE", register=True) +except Exception as e: # RefinedMultiStrategySelfAdaptiveDE print("RefinedMultiStrategySelfAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedMultiStrategySwarmDifferentialEvolution import RefinedMultiStrategySwarmDifferentialEvolution - - lama_register["RefinedMultiStrategySwarmDifferentialEvolution"] = RefinedMultiStrategySwarmDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedMultiStrategySwarmDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedMultiStrategySwarmDifferentialEvolution").set_name("LLAMARefinedMultiStrategySwarmDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedMultiStrategySwarmDifferentialEvolution + from nevergrad.optimization.lama.RefinedMultiStrategySwarmDifferentialEvolution import ( + RefinedMultiStrategySwarmDifferentialEvolution, + ) + + lama_register["RefinedMultiStrategySwarmDifferentialEvolution"] = ( + RefinedMultiStrategySwarmDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedMultiStrategySwarmDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedMultiStrategySwarmDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedMultiStrategySwarmDifferentialEvolution" + ).set_name("LLAMARefinedMultiStrategySwarmDifferentialEvolution", register=True) +except Exception as e: # RefinedMultiStrategySwarmDifferentialEvolution print("RefinedMultiStrategySwarmDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedNicheDifferentialParticleSwarmOptimizer import RefinedNicheDifferentialParticleSwarmOptimizer - - lama_register["RefinedNicheDifferentialParticleSwarmOptimizer"] = RefinedNicheDifferentialParticleSwarmOptimizer - res = NonObjectOptimizer(method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer(method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer").set_name("LLAMARefinedNicheDifferentialParticleSwarmOptimizer", register=True) -except Exception as e: +try: # RefinedNicheDifferentialParticleSwarmOptimizer + from nevergrad.optimization.lama.RefinedNicheDifferentialParticleSwarmOptimizer import ( + RefinedNicheDifferentialParticleSwarmOptimizer, + ) + + lama_register["RefinedNicheDifferentialParticleSwarmOptimizer"] = ( + RefinedNicheDifferentialParticleSwarmOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedNicheDifferentialParticleSwarmOptimizer = NonObjectOptimizer( + method="LLAMARefinedNicheDifferentialParticleSwarmOptimizer" + ).set_name("LLAMARefinedNicheDifferentialParticleSwarmOptimizer", register=True) +except Exception as e: # RefinedNicheDifferentialParticleSwarmOptimizer print("RefinedNicheDifferentialParticleSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimalDynamicPrecisionOptimizerV15 import RefinedOptimalDynamicPrecisionOptimizerV15 +try: # RefinedOptimalDynamicPrecisionOptimizerV15 + from nevergrad.optimization.lama.RefinedOptimalDynamicPrecisionOptimizerV15 import ( + RefinedOptimalDynamicPrecisionOptimizerV15, + ) lama_register["RefinedOptimalDynamicPrecisionOptimizerV15"] = RefinedOptimalDynamicPrecisionOptimizerV15 - res = NonObjectOptimizer(method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimalDynamicPrecisionOptimizerV15 = NonObjectOptimizer(method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15").set_name("LLAMARefinedOptimalDynamicPrecisionOptimizerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimalDynamicPrecisionOptimizerV15 = NonObjectOptimizer( + method="LLAMARefinedOptimalDynamicPrecisionOptimizerV15" + ).set_name("LLAMARefinedOptimalDynamicPrecisionOptimizerV15", register=True) +except Exception as e: # RefinedOptimalDynamicPrecisionOptimizerV15 print("RefinedOptimalDynamicPrecisionOptimizerV15 can not be imported: ", e) -try: +try: # RefinedOptimalEnhancedRAMEDS from nevergrad.optimization.lama.RefinedOptimalEnhancedRAMEDS import RefinedOptimalEnhancedRAMEDS lama_register["RefinedOptimalEnhancedRAMEDS"] = RefinedOptimalEnhancedRAMEDS - res = NonObjectOptimizer(method="LLAMARefinedOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimalEnhancedRAMEDS = NonObjectOptimizer(method="LLAMARefinedOptimalEnhancedRAMEDS").set_name("LLAMARefinedOptimalEnhancedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedOptimalEnhancedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimalEnhancedRAMEDS = NonObjectOptimizer( + method="LLAMARefinedOptimalEnhancedRAMEDS" + ).set_name("LLAMARefinedOptimalEnhancedRAMEDS", register=True) +except Exception as e: # RefinedOptimalEnhancedRAMEDS print("RefinedOptimalEnhancedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimalEvolutionaryGradientOptimizerV12 import RefinedOptimalEvolutionaryGradientOptimizerV12 - - lama_register["RefinedOptimalEvolutionaryGradientOptimizerV12"] = RefinedOptimalEvolutionaryGradientOptimizerV12 - res = NonObjectOptimizer(method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimalEvolutionaryGradientOptimizerV12 = NonObjectOptimizer(method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12").set_name("LLAMARefinedOptimalEvolutionaryGradientOptimizerV12", register=True) -except Exception as e: +try: # RefinedOptimalEvolutionaryGradientOptimizerV12 + from nevergrad.optimization.lama.RefinedOptimalEvolutionaryGradientOptimizerV12 import ( + RefinedOptimalEvolutionaryGradientOptimizerV12, + ) + + lama_register["RefinedOptimalEvolutionaryGradientOptimizerV12"] = ( + RefinedOptimalEvolutionaryGradientOptimizerV12 + ) + # res = NonObjectOptimizer(method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimalEvolutionaryGradientOptimizerV12 = NonObjectOptimizer( + method="LLAMARefinedOptimalEvolutionaryGradientOptimizerV12" + ).set_name("LLAMARefinedOptimalEvolutionaryGradientOptimizerV12", register=True) +except Exception as e: # RefinedOptimalEvolutionaryGradientOptimizerV12 print("RefinedOptimalEvolutionaryGradientOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 import RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 - - lama_register["RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5"] = RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 - res = NonObjectOptimizer(method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 = NonObjectOptimizer(method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5").set_name("LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5", register=True) -except Exception as e: +try: # RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 + from nevergrad.optimization.lama.RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 import ( + RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5, + ) + + lama_register["RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5"] = ( + RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 + ) + # res = NonObjectOptimizer(method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 = NonObjectOptimizer( + method="LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5" + ).set_name("LLAMARefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5", register=True) +except Exception as e: # RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 print("RefinedOptimizedDualPhaseAdaptiveHybridOptimizationV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing import RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing - - lama_register["RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing"] = RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing - res = NonObjectOptimizer(method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer(method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing").set_name("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) -except Exception as e: +try: # RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing + from nevergrad.optimization.lama.RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing import ( + RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing, + ) + + lama_register["RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing"] = ( + RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing + ) + # res = NonObjectOptimizer(method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing = NonObjectOptimizer( + method="LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing" + ).set_name("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing", register=True) +except Exception as e: # RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing print("RefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimizedEnhancedDualStrategyAdaptiveDE import RefinedOptimizedEnhancedDualStrategyAdaptiveDE - - lama_register["RefinedOptimizedEnhancedDualStrategyAdaptiveDE"] = RefinedOptimizedEnhancedDualStrategyAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE").set_name("LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE", register=True) -except Exception as e: +try: # RefinedOptimizedEnhancedDualStrategyAdaptiveDE + from nevergrad.optimization.lama.RefinedOptimizedEnhancedDualStrategyAdaptiveDE import ( + RefinedOptimizedEnhancedDualStrategyAdaptiveDE, + ) + + lama_register["RefinedOptimizedEnhancedDualStrategyAdaptiveDE"] = ( + RefinedOptimizedEnhancedDualStrategyAdaptiveDE + ) + # res = NonObjectOptimizer(method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE" + ).set_name("LLAMARefinedOptimizedEnhancedDualStrategyAdaptiveDE", register=True) +except Exception as e: # RefinedOptimizedEnhancedDualStrategyAdaptiveDE print("RefinedOptimizedEnhancedDualStrategyAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedOptimizedHybridAdaptiveMultiStageOptimization import RefinedOptimizedHybridAdaptiveMultiStageOptimization - - lama_register["RefinedOptimizedHybridAdaptiveMultiStageOptimization"] = RefinedOptimizedHybridAdaptiveMultiStageOptimization - res = NonObjectOptimizer(method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer(method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization").set_name("LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) -except Exception as e: +try: # RefinedOptimizedHybridAdaptiveMultiStageOptimization + from nevergrad.optimization.lama.RefinedOptimizedHybridAdaptiveMultiStageOptimization import ( + RefinedOptimizedHybridAdaptiveMultiStageOptimization, + ) + + lama_register["RefinedOptimizedHybridAdaptiveMultiStageOptimization"] = ( + RefinedOptimizedHybridAdaptiveMultiStageOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization = NonObjectOptimizer( + method="LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization" + ).set_name("LLAMARefinedOptimizedHybridAdaptiveMultiStageOptimization", register=True) +except Exception as e: # RefinedOptimizedHybridAdaptiveMultiStageOptimization print("RefinedOptimizedHybridAdaptiveMultiStageOptimization can not be imported: ", e) -try: +try: # RefinedPrecisionAdaptivePSO from nevergrad.optimization.lama.RefinedPrecisionAdaptivePSO import RefinedPrecisionAdaptivePSO lama_register["RefinedPrecisionAdaptivePSO"] = RefinedPrecisionAdaptivePSO - res = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO").set_name("LLAMARefinedPrecisionAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedPrecisionAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedPrecisionAdaptivePSO").set_name( + "LLAMARefinedPrecisionAdaptivePSO", register=True + ) +except Exception as e: # RefinedPrecisionAdaptivePSO print("RefinedPrecisionAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedPrecisionEnhancedDualStrategyOptimizer import RefinedPrecisionEnhancedDualStrategyOptimizer - - lama_register["RefinedPrecisionEnhancedDualStrategyOptimizer"] = RefinedPrecisionEnhancedDualStrategyOptimizer - res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer").set_name("LLAMARefinedPrecisionEnhancedDualStrategyOptimizer", register=True) -except Exception as e: +try: # RefinedPrecisionEnhancedDualStrategyOptimizer + from nevergrad.optimization.lama.RefinedPrecisionEnhancedDualStrategyOptimizer import ( + RefinedPrecisionEnhancedDualStrategyOptimizer, + ) + + lama_register["RefinedPrecisionEnhancedDualStrategyOptimizer"] = ( + RefinedPrecisionEnhancedDualStrategyOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedPrecisionEnhancedDualStrategyOptimizer = NonObjectOptimizer( + method="LLAMARefinedPrecisionEnhancedDualStrategyOptimizer" + ).set_name("LLAMARefinedPrecisionEnhancedDualStrategyOptimizer", register=True) +except Exception as e: # RefinedPrecisionEnhancedDualStrategyOptimizer print("RefinedPrecisionEnhancedDualStrategyOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedPrecisionEnhancedSpatialAdaptiveEvolver import RefinedPrecisionEnhancedSpatialAdaptiveEvolver - - lama_register["RefinedPrecisionEnhancedSpatialAdaptiveEvolver"] = RefinedPrecisionEnhancedSpatialAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver").set_name("LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver", register=True) -except Exception as e: +try: # RefinedPrecisionEnhancedSpatialAdaptiveEvolver + from nevergrad.optimization.lama.RefinedPrecisionEnhancedSpatialAdaptiveEvolver import ( + RefinedPrecisionEnhancedSpatialAdaptiveEvolver, + ) + + lama_register["RefinedPrecisionEnhancedSpatialAdaptiveEvolver"] = ( + RefinedPrecisionEnhancedSpatialAdaptiveEvolver + ) + # res = NonObjectOptimizer(method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver = NonObjectOptimizer( + method="LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver" + ).set_name("LLAMARefinedPrecisionEnhancedSpatialAdaptiveEvolver", register=True) +except Exception as e: # RefinedPrecisionEnhancedSpatialAdaptiveEvolver print("RefinedPrecisionEnhancedSpatialAdaptiveEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedPrecisionEvolutionaryThermalOptimizer import RefinedPrecisionEvolutionaryThermalOptimizer - - lama_register["RefinedPrecisionEvolutionaryThermalOptimizer"] = RefinedPrecisionEvolutionaryThermalOptimizer - res = NonObjectOptimizer(method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer").set_name("LLAMARefinedPrecisionEvolutionaryThermalOptimizer", register=True) -except Exception as e: +try: # RefinedPrecisionEvolutionaryThermalOptimizer + from nevergrad.optimization.lama.RefinedPrecisionEvolutionaryThermalOptimizer import ( + RefinedPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["RefinedPrecisionEvolutionaryThermalOptimizer"] = ( + RefinedPrecisionEvolutionaryThermalOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMARefinedPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMARefinedPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: # RefinedPrecisionEvolutionaryThermalOptimizer print("RefinedPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedPrecisionTunedCrossoverElitistStrategyV12 import RefinedPrecisionTunedCrossoverElitistStrategyV12 - - lama_register["RefinedPrecisionTunedCrossoverElitistStrategyV12"] = RefinedPrecisionTunedCrossoverElitistStrategyV12 - res = NonObjectOptimizer(method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12 = NonObjectOptimizer(method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12").set_name("LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12", register=True) -except Exception as e: +try: # RefinedPrecisionTunedCrossoverElitistStrategyV12 + from nevergrad.optimization.lama.RefinedPrecisionTunedCrossoverElitistStrategyV12 import ( + RefinedPrecisionTunedCrossoverElitistStrategyV12, + ) + + lama_register["RefinedPrecisionTunedCrossoverElitistStrategyV12"] = ( + RefinedPrecisionTunedCrossoverElitistStrategyV12 + ) + # res = NonObjectOptimizer(method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12 = NonObjectOptimizer( + method="LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12" + ).set_name("LLAMARefinedPrecisionTunedCrossoverElitistStrategyV12", register=True) +except Exception as e: # RefinedPrecisionTunedCrossoverElitistStrategyV12 print("RefinedPrecisionTunedCrossoverElitistStrategyV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedProgressiveParticleSwarmOptimization import RefinedProgressiveParticleSwarmOptimization +try: # RefinedProgressiveParticleSwarmOptimization + from nevergrad.optimization.lama.RefinedProgressiveParticleSwarmOptimization import ( + RefinedProgressiveParticleSwarmOptimization, + ) lama_register["RefinedProgressiveParticleSwarmOptimization"] = RefinedProgressiveParticleSwarmOptimization - res = NonObjectOptimizer(method="LLAMARefinedProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedProgressiveParticleSwarmOptimization = NonObjectOptimizer(method="LLAMARefinedProgressiveParticleSwarmOptimization").set_name("LLAMARefinedProgressiveParticleSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedProgressiveParticleSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedProgressiveParticleSwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedProgressiveParticleSwarmOptimization" + ).set_name("LLAMARefinedProgressiveParticleSwarmOptimization", register=True) +except Exception as e: # RefinedProgressiveParticleSwarmOptimization print("RefinedProgressiveParticleSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedProgressiveQuorumEvolutionStrategy import RefinedProgressiveQuorumEvolutionStrategy +try: # RefinedProgressiveQuorumEvolutionStrategy + from nevergrad.optimization.lama.RefinedProgressiveQuorumEvolutionStrategy import ( + RefinedProgressiveQuorumEvolutionStrategy, + ) lama_register["RefinedProgressiveQuorumEvolutionStrategy"] = RefinedProgressiveQuorumEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedProgressiveQuorumEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedProgressiveQuorumEvolutionStrategy").set_name("LLAMARefinedProgressiveQuorumEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedProgressiveQuorumEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedProgressiveQuorumEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedProgressiveQuorumEvolutionStrategy" + ).set_name("LLAMARefinedProgressiveQuorumEvolutionStrategy", register=True) +except Exception as e: # RefinedProgressiveQuorumEvolutionStrategy print("RefinedProgressiveQuorumEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuadraticAdaptiveEvolutionStrategy import RefinedQuadraticAdaptiveEvolutionStrategy +try: # RefinedQuadraticAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.RefinedQuadraticAdaptiveEvolutionStrategy import ( + RefinedQuadraticAdaptiveEvolutionStrategy, + ) lama_register["RefinedQuadraticAdaptiveEvolutionStrategy"] = RefinedQuadraticAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy").set_name("LLAMARefinedQuadraticAdaptiveEvolutionStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMARefinedQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMARefinedQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: # RefinedQuadraticAdaptiveEvolutionStrategy print("RefinedQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveExplorationOptimization import RefinedQuantumAdaptiveExplorationOptimization - - lama_register["RefinedQuantumAdaptiveExplorationOptimization"] = RefinedQuantumAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveExplorationOptimization").set_name("LLAMARefinedQuantumAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # RefinedQuantumAdaptiveExplorationOptimization + from nevergrad.optimization.lama.RefinedQuantumAdaptiveExplorationOptimization import ( + RefinedQuantumAdaptiveExplorationOptimization, + ) + + lama_register["RefinedQuantumAdaptiveExplorationOptimization"] = ( + RefinedQuantumAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveExplorationOptimization" + ).set_name("LLAMARefinedQuantumAdaptiveExplorationOptimization", register=True) +except Exception as e: # RefinedQuantumAdaptiveExplorationOptimization print("RefinedQuantumAdaptiveExplorationOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridOptimizerV4 import RefinedQuantumAdaptiveHybridOptimizerV4 +try: # RefinedQuantumAdaptiveHybridOptimizerV4 + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridOptimizerV4 import ( + RefinedQuantumAdaptiveHybridOptimizerV4, + ) lama_register["RefinedQuantumAdaptiveHybridOptimizerV4"] = RefinedQuantumAdaptiveHybridOptimizerV4 - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveHybridOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4").set_name("LLAMARefinedQuantumAdaptiveHybridOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveHybridOptimizerV4" + ).set_name("LLAMARefinedQuantumAdaptiveHybridOptimizerV4", register=True) +except Exception as e: # RefinedQuantumAdaptiveHybridOptimizerV4 print("RefinedQuantumAdaptiveHybridOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridSearchV3 import RefinedQuantumAdaptiveHybridSearchV3 +try: # RefinedQuantumAdaptiveHybridSearchV3 + from nevergrad.optimization.lama.RefinedQuantumAdaptiveHybridSearchV3 import ( + RefinedQuantumAdaptiveHybridSearchV3, + ) lama_register["RefinedQuantumAdaptiveHybridSearchV3"] = RefinedQuantumAdaptiveHybridSearchV3 - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridSearchV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveHybridSearchV3 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridSearchV3").set_name("LLAMARefinedQuantumAdaptiveHybridSearchV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveHybridSearchV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveHybridSearchV3 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveHybridSearchV3" + ).set_name("LLAMARefinedQuantumAdaptiveHybridSearchV3", register=True) +except Exception as e: # RefinedQuantumAdaptiveHybridSearchV3 print("RefinedQuantumAdaptiveHybridSearchV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveLevySwarmOptimization import RefinedQuantumAdaptiveLevySwarmOptimization +try: # RefinedQuantumAdaptiveLevySwarmOptimization + from nevergrad.optimization.lama.RefinedQuantumAdaptiveLevySwarmOptimization import ( + RefinedQuantumAdaptiveLevySwarmOptimization, + ) lama_register["RefinedQuantumAdaptiveLevySwarmOptimization"] = RefinedQuantumAdaptiveLevySwarmOptimization - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization").set_name("LLAMARefinedQuantumAdaptiveLevySwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveLevySwarmOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveLevySwarmOptimization" + ).set_name("LLAMARefinedQuantumAdaptiveLevySwarmOptimization", register=True) +except Exception as e: # RefinedQuantumAdaptiveLevySwarmOptimization print("RefinedQuantumAdaptiveLevySwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveMultiPopulationDE import RefinedQuantumAdaptiveMultiPopulationDE +try: # RefinedQuantumAdaptiveMultiPopulationDE + from nevergrad.optimization.lama.RefinedQuantumAdaptiveMultiPopulationDE import ( + RefinedQuantumAdaptiveMultiPopulationDE, + ) lama_register["RefinedQuantumAdaptiveMultiPopulationDE"] = RefinedQuantumAdaptiveMultiPopulationDE - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveMultiPopulationDE").set_name("LLAMARefinedQuantumAdaptiveMultiPopulationDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveMultiPopulationDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveMultiPopulationDE = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveMultiPopulationDE" + ).set_name("LLAMARefinedQuantumAdaptiveMultiPopulationDE", register=True) +except Exception as e: # RefinedQuantumAdaptiveMultiPopulationDE print("RefinedQuantumAdaptiveMultiPopulationDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveOptimizerV2 import RefinedQuantumAdaptiveOptimizerV2 +try: # RefinedQuantumAdaptiveOptimizerV2 + from nevergrad.optimization.lama.RefinedQuantumAdaptiveOptimizerV2 import ( + RefinedQuantumAdaptiveOptimizerV2, + ) lama_register["RefinedQuantumAdaptiveOptimizerV2"] = RefinedQuantumAdaptiveOptimizerV2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveOptimizerV2 = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveOptimizerV2").set_name("LLAMARefinedQuantumAdaptiveOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveOptimizerV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveOptimizerV2" + ).set_name("LLAMARefinedQuantumAdaptiveOptimizerV2", register=True) +except Exception as e: # RefinedQuantumAdaptiveOptimizerV2 print("RefinedQuantumAdaptiveOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumAdaptiveVelocityOptimizer import RefinedQuantumAdaptiveVelocityOptimizer +try: # RefinedQuantumAdaptiveVelocityOptimizer + from nevergrad.optimization.lama.RefinedQuantumAdaptiveVelocityOptimizer import ( + RefinedQuantumAdaptiveVelocityOptimizer, + ) lama_register["RefinedQuantumAdaptiveVelocityOptimizer"] = RefinedQuantumAdaptiveVelocityOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveVelocityOptimizer").set_name("LLAMARefinedQuantumAdaptiveVelocityOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumAdaptiveVelocityOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumAdaptiveVelocityOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumAdaptiveVelocityOptimizer" + ).set_name("LLAMARefinedQuantumAdaptiveVelocityOptimizer", register=True) +except Exception as e: # RefinedQuantumAdaptiveVelocityOptimizer print("RefinedQuantumAdaptiveVelocityOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumCognitionAdaptiveTuningOptimizerV15 import RefinedQuantumCognitionAdaptiveTuningOptimizerV15 - - lama_register["RefinedQuantumCognitionAdaptiveTuningOptimizerV15"] = RefinedQuantumCognitionAdaptiveTuningOptimizerV15 - res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15").set_name("LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15", register=True) -except Exception as e: +try: # RefinedQuantumCognitionAdaptiveTuningOptimizerV15 + from nevergrad.optimization.lama.RefinedQuantumCognitionAdaptiveTuningOptimizerV15 import ( + RefinedQuantumCognitionAdaptiveTuningOptimizerV15, + ) + + lama_register["RefinedQuantumCognitionAdaptiveTuningOptimizerV15"] = ( + RefinedQuantumCognitionAdaptiveTuningOptimizerV15 + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15" + ).set_name("LLAMARefinedQuantumCognitionAdaptiveTuningOptimizerV15", register=True) +except Exception as e: # RefinedQuantumCognitionAdaptiveTuningOptimizerV15 print("RefinedQuantumCognitionAdaptiveTuningOptimizerV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumCognitionHybridOptimizerV22 import RefinedQuantumCognitionHybridOptimizerV22 +try: # RefinedQuantumCognitionHybridOptimizerV22 + from nevergrad.optimization.lama.RefinedQuantumCognitionHybridOptimizerV22 import ( + RefinedQuantumCognitionHybridOptimizerV22, + ) lama_register["RefinedQuantumCognitionHybridOptimizerV22"] = RefinedQuantumCognitionHybridOptimizerV22 - res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionHybridOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumCognitionHybridOptimizerV22 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionHybridOptimizerV22").set_name("LLAMARefinedQuantumCognitionHybridOptimizerV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionHybridOptimizerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumCognitionHybridOptimizerV22 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionHybridOptimizerV22" + ).set_name("LLAMARefinedQuantumCognitionHybridOptimizerV22", register=True) +except Exception as e: # RefinedQuantumCognitionHybridOptimizerV22 print("RefinedQuantumCognitionHybridOptimizerV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV13 import RefinedQuantumCognitionOptimizerV13 +try: # RefinedQuantumCognitionOptimizerV13 + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV13 import ( + RefinedQuantumCognitionOptimizerV13, + ) lama_register["RefinedQuantumCognitionOptimizerV13"] = RefinedQuantumCognitionOptimizerV13 - res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumCognitionOptimizerV13 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV13").set_name("LLAMARefinedQuantumCognitionOptimizerV13", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumCognitionOptimizerV13 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionOptimizerV13" + ).set_name("LLAMARefinedQuantumCognitionOptimizerV13", register=True) +except Exception as e: # RefinedQuantumCognitionOptimizerV13 print("RefinedQuantumCognitionOptimizerV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV4 import RefinedQuantumCognitionOptimizerV4 +try: # RefinedQuantumCognitionOptimizerV4 + from nevergrad.optimization.lama.RefinedQuantumCognitionOptimizerV4 import ( + RefinedQuantumCognitionOptimizerV4, + ) lama_register["RefinedQuantumCognitionOptimizerV4"] = RefinedQuantumCognitionOptimizerV4 - res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumCognitionOptimizerV4 = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV4").set_name("LLAMARefinedQuantumCognitionOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumCognitionOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumCognitionOptimizerV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumCognitionOptimizerV4" + ).set_name("LLAMARefinedQuantumCognitionOptimizerV4", register=True) +except Exception as e: # RefinedQuantumCognitionOptimizerV4 print("RefinedQuantumCognitionOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 import RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 - - lama_register["RefinedQuantumCovarianceMatrixDifferentialEvolutionV4"] = RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 - res = NonObjectOptimizer(method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer(method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4").set_name("LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4", register=True) -except Exception as e: +try: # RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 + from nevergrad.optimization.lama.RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 import ( + RefinedQuantumCovarianceMatrixDifferentialEvolutionV4, + ) + + lama_register["RefinedQuantumCovarianceMatrixDifferentialEvolutionV4"] = ( + RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4" + ).set_name("LLAMARefinedQuantumCovarianceMatrixDifferentialEvolutionV4", register=True) +except Exception as e: # RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 print("RefinedQuantumCovarianceMatrixDifferentialEvolutionV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism import RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism - - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism"] = RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism", register=True) -except Exception as e: +try: # RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism", register=True) +except Exception as e: # RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism print("RefinedQuantumDifferentialEvolutionWithAdaptiveHybridSearchAndElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveLearning import RefinedQuantumDifferentialEvolutionWithAdaptiveLearning - - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveLearning"] = RefinedQuantumDifferentialEvolutionWithAdaptiveLearning - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) -except Exception as e: +try: # RefinedQuantumDifferentialEvolutionWithAdaptiveLearning + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveLearning import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveLearning, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveLearning"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveLearning + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveLearning", register=True) +except Exception as e: # RefinedQuantumDifferentialEvolutionWithAdaptiveLearning print("RefinedQuantumDifferentialEvolutionWithAdaptiveLearning can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True) -except Exception as e: - print("RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism import RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism - - lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism"] = RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism").set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism", register=True) -except Exception as e: +try: # RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch" + ).set_name( + "LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch", register=True + ) +except Exception as e: # RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch + print( + "RefinedQuantumDifferentialEvolutionWithAdaptiveMemoryAndHybridLocalSearch can not be imported: ", e + ) +try: # RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism + from nevergrad.optimization.lama.RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism import ( + RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism, + ) + + lama_register["RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism"] = ( + RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism" + ).set_name("LLAMARefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism", register=True) +except Exception as e: # RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism print("RefinedQuantumDifferentialEvolutionWithAdaptiveRestartsAndElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialMemeticOptimizer import RefinedQuantumDifferentialMemeticOptimizer +try: # RefinedQuantumDifferentialMemeticOptimizer + from nevergrad.optimization.lama.RefinedQuantumDifferentialMemeticOptimizer import ( + RefinedQuantumDifferentialMemeticOptimizer, + ) lama_register["RefinedQuantumDifferentialMemeticOptimizer"] = RefinedQuantumDifferentialMemeticOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialMemeticOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialMemeticOptimizer").set_name("LLAMARefinedQuantumDifferentialMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialMemeticOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialMemeticOptimizer" + ).set_name("LLAMARefinedQuantumDifferentialMemeticOptimizer", register=True) +except Exception as e: # RefinedQuantumDifferentialMemeticOptimizer print("RefinedQuantumDifferentialMemeticOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumDifferentialParticleOptimizerWithElitism import RefinedQuantumDifferentialParticleOptimizerWithElitism - - lama_register["RefinedQuantumDifferentialParticleOptimizerWithElitism"] = RefinedQuantumDifferentialParticleOptimizerWithElitism - res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism").set_name("LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism", register=True) -except Exception as e: +try: # RefinedQuantumDifferentialParticleOptimizerWithElitism + from nevergrad.optimization.lama.RefinedQuantumDifferentialParticleOptimizerWithElitism import ( + RefinedQuantumDifferentialParticleOptimizerWithElitism, + ) + + lama_register["RefinedQuantumDifferentialParticleOptimizerWithElitism"] = ( + RefinedQuantumDifferentialParticleOptimizerWithElitism + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism = NonObjectOptimizer( + method="LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism" + ).set_name("LLAMARefinedQuantumDifferentialParticleOptimizerWithElitism", register=True) +except Exception as e: # RefinedQuantumDifferentialParticleOptimizerWithElitism print("RefinedQuantumDifferentialParticleOptimizerWithElitism can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE import RefinedQuantumEnhancedAdaptiveMultiPhaseDE +try: # RefinedQuantumEnhancedAdaptiveMultiPhaseDE + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE import ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE, + ) lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE"] = RefinedQuantumEnhancedAdaptiveMultiPhaseDE - res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE").set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE" + ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE", register=True) +except Exception as e: # RefinedQuantumEnhancedAdaptiveMultiPhaseDE print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 import RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 - - lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2"] = RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2").set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2", register=True) -except Exception as e: +try: # RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 + from nevergrad.optimization.lama.RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 import ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2, + ) + + lama_register["RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2"] = ( + RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2" + ).set_name("LLAMARefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2", register=True) +except Exception as e: # RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 print("RefinedQuantumEnhancedAdaptiveMultiPhaseDE_v2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 import RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 - - lama_register["RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6"] = RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 - res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6").set_name("LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6", register=True) -except Exception as e: +try: # RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 + from nevergrad.optimization.lama.RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 import ( + RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6, + ) + + lama_register["RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6"] = ( + RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6" + ).set_name("LLAMARefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6", register=True) +except Exception as e: # RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 print("RefinedQuantumEnhancedDynamicAdaptiveHybridDEPSO_V6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEnhancedHybridDEPSO import RefinedQuantumEnhancedHybridDEPSO +try: # RefinedQuantumEnhancedHybridDEPSO + from nevergrad.optimization.lama.RefinedQuantumEnhancedHybridDEPSO import ( + RefinedQuantumEnhancedHybridDEPSO, + ) lama_register["RefinedQuantumEnhancedHybridDEPSO"] = RefinedQuantumEnhancedHybridDEPSO - res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEnhancedHybridDEPSO = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedHybridDEPSO").set_name("LLAMARefinedQuantumEnhancedHybridDEPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEnhancedHybridDEPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEnhancedHybridDEPSO = NonObjectOptimizer( + method="LLAMARefinedQuantumEnhancedHybridDEPSO" + ).set_name("LLAMARefinedQuantumEnhancedHybridDEPSO", register=True) +except Exception as e: # RefinedQuantumEnhancedHybridDEPSO print("RefinedQuantumEnhancedHybridDEPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptation import RefinedQuantumEvolutionaryAdaptation +try: # RefinedQuantumEvolutionaryAdaptation + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptation import ( + RefinedQuantumEvolutionaryAdaptation, + ) lama_register["RefinedQuantumEvolutionaryAdaptation"] = RefinedQuantumEvolutionaryAdaptation - res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptation")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEvolutionaryAdaptation = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptation").set_name("LLAMARefinedQuantumEvolutionaryAdaptation", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptation")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEvolutionaryAdaptation = NonObjectOptimizer( + method="LLAMARefinedQuantumEvolutionaryAdaptation" + ).set_name("LLAMARefinedQuantumEvolutionaryAdaptation", register=True) +except Exception as e: # RefinedQuantumEvolutionaryAdaptation print("RefinedQuantumEvolutionaryAdaptation can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptiveOptimizer import RefinedQuantumEvolutionaryAdaptiveOptimizer +try: # RefinedQuantumEvolutionaryAdaptiveOptimizer + from nevergrad.optimization.lama.RefinedQuantumEvolutionaryAdaptiveOptimizer import ( + RefinedQuantumEvolutionaryAdaptiveOptimizer, + ) lama_register["RefinedQuantumEvolutionaryAdaptiveOptimizer"] = RefinedQuantumEvolutionaryAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer").set_name("LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer" + ).set_name("LLAMARefinedQuantumEvolutionaryAdaptiveOptimizer", register=True) +except Exception as e: # RefinedQuantumEvolutionaryAdaptiveOptimizer print("RefinedQuantumEvolutionaryAdaptiveOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumFluxDifferentialSwarm import RefinedQuantumFluxDifferentialSwarm +try: # RefinedQuantumFluxDifferentialSwarm + from nevergrad.optimization.lama.RefinedQuantumFluxDifferentialSwarm import ( + RefinedQuantumFluxDifferentialSwarm, + ) lama_register["RefinedQuantumFluxDifferentialSwarm"] = RefinedQuantumFluxDifferentialSwarm - res = NonObjectOptimizer(method="LLAMARefinedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumFluxDifferentialSwarm = NonObjectOptimizer(method="LLAMARefinedQuantumFluxDifferentialSwarm").set_name("LLAMARefinedQuantumFluxDifferentialSwarm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumFluxDifferentialSwarm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumFluxDifferentialSwarm = NonObjectOptimizer( + method="LLAMARefinedQuantumFluxDifferentialSwarm" + ).set_name("LLAMARefinedQuantumFluxDifferentialSwarm", register=True) +except Exception as e: # RefinedQuantumFluxDifferentialSwarm print("RefinedQuantumFluxDifferentialSwarm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumGradientAdaptiveExplorationOptimization import RefinedQuantumGradientAdaptiveExplorationOptimization - - lama_register["RefinedQuantumGradientAdaptiveExplorationOptimization"] = RefinedQuantumGradientAdaptiveExplorationOptimization - res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization").set_name("LLAMARefinedQuantumGradientAdaptiveExplorationOptimization", register=True) -except Exception as e: +try: # RefinedQuantumGradientAdaptiveExplorationOptimization + from nevergrad.optimization.lama.RefinedQuantumGradientAdaptiveExplorationOptimization import ( + RefinedQuantumGradientAdaptiveExplorationOptimization, + ) + + lama_register["RefinedQuantumGradientAdaptiveExplorationOptimization"] = ( + RefinedQuantumGradientAdaptiveExplorationOptimization + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumGradientAdaptiveExplorationOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumGradientAdaptiveExplorationOptimization" + ).set_name("LLAMARefinedQuantumGradientAdaptiveExplorationOptimization", register=True) +except Exception as e: # RefinedQuantumGradientAdaptiveExplorationOptimization print("RefinedQuantumGradientAdaptiveExplorationOptimization can not be imported: ", e) -try: +try: # RefinedQuantumGradientSearch from nevergrad.optimization.lama.RefinedQuantumGradientSearch import RefinedQuantumGradientSearch lama_register["RefinedQuantumGradientSearch"] = RefinedQuantumGradientSearch - res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumGradientSearch = NonObjectOptimizer(method="LLAMARefinedQuantumGradientSearch").set_name("LLAMARefinedQuantumGradientSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumGradientSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumGradientSearch = NonObjectOptimizer( + method="LLAMARefinedQuantumGradientSearch" + ).set_name("LLAMARefinedQuantumGradientSearch", register=True) +except Exception as e: # RefinedQuantumGradientSearch print("RefinedQuantumGradientSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV6 import RefinedQuantumGuidedHybridSearchV6 +try: # RefinedQuantumGuidedHybridSearchV6 + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV6 import ( + RefinedQuantumGuidedHybridSearchV6, + ) lama_register["RefinedQuantumGuidedHybridSearchV6"] = RefinedQuantumGuidedHybridSearchV6 - res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumGuidedHybridSearchV6 = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV6").set_name("LLAMARefinedQuantumGuidedHybridSearchV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumGuidedHybridSearchV6 = NonObjectOptimizer( + method="LLAMARefinedQuantumGuidedHybridSearchV6" + ).set_name("LLAMARefinedQuantumGuidedHybridSearchV6", register=True) +except Exception as e: # RefinedQuantumGuidedHybridSearchV6 print("RefinedQuantumGuidedHybridSearchV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV8 import RefinedQuantumGuidedHybridSearchV8 +try: # RefinedQuantumGuidedHybridSearchV8 + from nevergrad.optimization.lama.RefinedQuantumGuidedHybridSearchV8 import ( + RefinedQuantumGuidedHybridSearchV8, + ) lama_register["RefinedQuantumGuidedHybridSearchV8"] = RefinedQuantumGuidedHybridSearchV8 - res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumGuidedHybridSearchV8 = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV8").set_name("LLAMARefinedQuantumGuidedHybridSearchV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumGuidedHybridSearchV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumGuidedHybridSearchV8 = NonObjectOptimizer( + method="LLAMARefinedQuantumGuidedHybridSearchV8" + ).set_name("LLAMARefinedQuantumGuidedHybridSearchV8", register=True) +except Exception as e: # RefinedQuantumGuidedHybridSearchV8 print("RefinedQuantumGuidedHybridSearchV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumHybridAdaptiveStrategyV3 import RefinedQuantumHybridAdaptiveStrategyV3 +try: # RefinedQuantumHybridAdaptiveStrategyV3 + from nevergrad.optimization.lama.RefinedQuantumHybridAdaptiveStrategyV3 import ( + RefinedQuantumHybridAdaptiveStrategyV3, + ) lama_register["RefinedQuantumHybridAdaptiveStrategyV3"] = RefinedQuantumHybridAdaptiveStrategyV3 - res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumHybridAdaptiveStrategyV3 = NonObjectOptimizer(method="LLAMARefinedQuantumHybridAdaptiveStrategyV3").set_name("LLAMARefinedQuantumHybridAdaptiveStrategyV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridAdaptiveStrategyV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumHybridAdaptiveStrategyV3 = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridAdaptiveStrategyV3" + ).set_name("LLAMARefinedQuantumHybridAdaptiveStrategyV3", register=True) +except Exception as e: # RefinedQuantumHybridAdaptiveStrategyV3 print("RefinedQuantumHybridAdaptiveStrategyV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumHybridDynamicAdaptiveDE import RefinedQuantumHybridDynamicAdaptiveDE +try: # RefinedQuantumHybridDynamicAdaptiveDE + from nevergrad.optimization.lama.RefinedQuantumHybridDynamicAdaptiveDE import ( + RefinedQuantumHybridDynamicAdaptiveDE, + ) lama_register["RefinedQuantumHybridDynamicAdaptiveDE"] = RefinedQuantumHybridDynamicAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedQuantumHybridDynamicAdaptiveDE").set_name("LLAMARefinedQuantumHybridDynamicAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridDynamicAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumHybridDynamicAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridDynamicAdaptiveDE" + ).set_name("LLAMARefinedQuantumHybridDynamicAdaptiveDE", register=True) +except Exception as e: # RefinedQuantumHybridDynamicAdaptiveDE print("RefinedQuantumHybridDynamicAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumHybridEliteAdaptiveDE import RefinedQuantumHybridEliteAdaptiveDE +try: # RefinedQuantumHybridEliteAdaptiveDE + from nevergrad.optimization.lama.RefinedQuantumHybridEliteAdaptiveDE import ( + RefinedQuantumHybridEliteAdaptiveDE, + ) lama_register["RefinedQuantumHybridEliteAdaptiveDE"] = RefinedQuantumHybridEliteAdaptiveDE - res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumHybridEliteAdaptiveDE = NonObjectOptimizer(method="LLAMARefinedQuantumHybridEliteAdaptiveDE").set_name("LLAMARefinedQuantumHybridEliteAdaptiveDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumHybridEliteAdaptiveDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumHybridEliteAdaptiveDE = NonObjectOptimizer( + method="LLAMARefinedQuantumHybridEliteAdaptiveDE" + ).set_name("LLAMARefinedQuantumHybridEliteAdaptiveDE", register=True) +except Exception as e: # RefinedQuantumHybridEliteAdaptiveDE print("RefinedQuantumHybridEliteAdaptiveDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInfluenceLocalSearchOptimizer import RefinedQuantumInfluenceLocalSearchOptimizer +try: # RefinedQuantumInfluenceLocalSearchOptimizer + from nevergrad.optimization.lama.RefinedQuantumInfluenceLocalSearchOptimizer import ( + RefinedQuantumInfluenceLocalSearchOptimizer, + ) lama_register["RefinedQuantumInfluenceLocalSearchOptimizer"] = RefinedQuantumInfluenceLocalSearchOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInfluenceLocalSearchOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer").set_name("LLAMARefinedQuantumInfluenceLocalSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInfluenceLocalSearchOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInfluenceLocalSearchOptimizer" + ).set_name("LLAMARefinedQuantumInfluenceLocalSearchOptimizer", register=True) +except Exception as e: # RefinedQuantumInfluenceLocalSearchOptimizer print("RefinedQuantumInfluenceLocalSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInformedAdaptiveInertiaOptimizer import RefinedQuantumInformedAdaptiveInertiaOptimizer - - lama_register["RefinedQuantumInformedAdaptiveInertiaOptimizer"] = RefinedQuantumInformedAdaptiveInertiaOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer").set_name("LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer", register=True) -except Exception as e: +try: # RefinedQuantumInformedAdaptiveInertiaOptimizer + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptiveInertiaOptimizer import ( + RefinedQuantumInformedAdaptiveInertiaOptimizer, + ) + + lama_register["RefinedQuantumInformedAdaptiveInertiaOptimizer"] = ( + RefinedQuantumInformedAdaptiveInertiaOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer" + ).set_name("LLAMARefinedQuantumInformedAdaptiveInertiaOptimizer", register=True) +except Exception as e: # RefinedQuantumInformedAdaptiveInertiaOptimizer print("RefinedQuantumInformedAdaptiveInertiaOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInformedAdaptivePSO import RefinedQuantumInformedAdaptivePSO +try: # RefinedQuantumInformedAdaptivePSO + from nevergrad.optimization.lama.RefinedQuantumInformedAdaptivePSO import ( + RefinedQuantumInformedAdaptivePSO, + ) lama_register["RefinedQuantumInformedAdaptivePSO"] = RefinedQuantumInformedAdaptivePSO - res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInformedAdaptivePSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptivePSO").set_name("LLAMARefinedQuantumInformedAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInformedAdaptivePSO = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedAdaptivePSO" + ).set_name("LLAMARefinedQuantumInformedAdaptivePSO", register=True) +except Exception as e: # RefinedQuantumInformedAdaptivePSO print("RefinedQuantumInformedAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInformedDifferentialStrategyV2 import RefinedQuantumInformedDifferentialStrategyV2 - - lama_register["RefinedQuantumInformedDifferentialStrategyV2"] = RefinedQuantumInformedDifferentialStrategyV2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedDifferentialStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInformedDifferentialStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumInformedDifferentialStrategyV2").set_name("LLAMARefinedQuantumInformedDifferentialStrategyV2", register=True) -except Exception as e: +try: # RefinedQuantumInformedDifferentialStrategyV2 + from nevergrad.optimization.lama.RefinedQuantumInformedDifferentialStrategyV2 import ( + RefinedQuantumInformedDifferentialStrategyV2, + ) + + lama_register["RefinedQuantumInformedDifferentialStrategyV2"] = ( + RefinedQuantumInformedDifferentialStrategyV2 + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedDifferentialStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInformedDifferentialStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedDifferentialStrategyV2" + ).set_name("LLAMARefinedQuantumInformedDifferentialStrategyV2", register=True) +except Exception as e: # RefinedQuantumInformedDifferentialStrategyV2 print("RefinedQuantumInformedDifferentialStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInformedGradientOptimizer import RefinedQuantumInformedGradientOptimizer +try: # RefinedQuantumInformedGradientOptimizer + from nevergrad.optimization.lama.RefinedQuantumInformedGradientOptimizer import ( + RefinedQuantumInformedGradientOptimizer, + ) lama_register["RefinedQuantumInformedGradientOptimizer"] = RefinedQuantumInformedGradientOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInformedGradientOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumInformedGradientOptimizer").set_name("LLAMARefinedQuantumInformedGradientOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedGradientOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInformedGradientOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumInformedGradientOptimizer" + ).set_name("LLAMARefinedQuantumInformedGradientOptimizer", register=True) +except Exception as e: # RefinedQuantumInformedGradientOptimizer print("RefinedQuantumInformedGradientOptimizer can not be imported: ", e) -try: +try: # RefinedQuantumInformedPSO from nevergrad.optimization.lama.RefinedQuantumInformedPSO import RefinedQuantumInformedPSO lama_register["RefinedQuantumInformedPSO"] = RefinedQuantumInformedPSO - res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInformedPSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO").set_name("LLAMARefinedQuantumInformedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInformedPSO = NonObjectOptimizer(method="LLAMARefinedQuantumInformedPSO").set_name( + "LLAMARefinedQuantumInformedPSO", register=True + ) +except Exception as e: # RefinedQuantumInformedPSO print("RefinedQuantumInformedPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumInfusedAdaptiveStrategyV2 import RefinedQuantumInfusedAdaptiveStrategyV2 +try: # RefinedQuantumInfusedAdaptiveStrategyV2 + from nevergrad.optimization.lama.RefinedQuantumInfusedAdaptiveStrategyV2 import ( + RefinedQuantumInfusedAdaptiveStrategyV2, + ) lama_register["RefinedQuantumInfusedAdaptiveStrategyV2"] = RefinedQuantumInfusedAdaptiveStrategyV2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumInfusedAdaptiveStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2").set_name("LLAMARefinedQuantumInfusedAdaptiveStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumInfusedAdaptiveStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumInfusedAdaptiveStrategyV2" + ).set_name("LLAMARefinedQuantumInfusedAdaptiveStrategyV2", register=True) +except Exception as e: # RefinedQuantumInfusedAdaptiveStrategyV2 print("RefinedQuantumInfusedAdaptiveStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumLevyMemeticDifferentialEvolution import RefinedQuantumLevyMemeticDifferentialEvolution - - lama_register["RefinedQuantumLevyMemeticDifferentialEvolution"] = RefinedQuantumLevyMemeticDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumLevyMemeticDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution").set_name("LLAMARefinedQuantumLevyMemeticDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedQuantumLevyMemeticDifferentialEvolution + from nevergrad.optimization.lama.RefinedQuantumLevyMemeticDifferentialEvolution import ( + RefinedQuantumLevyMemeticDifferentialEvolution, + ) + + lama_register["RefinedQuantumLevyMemeticDifferentialEvolution"] = ( + RefinedQuantumLevyMemeticDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumLevyMemeticDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedQuantumLevyMemeticDifferentialEvolution" + ).set_name("LLAMARefinedQuantumLevyMemeticDifferentialEvolution", register=True) +except Exception as e: # RefinedQuantumLevyMemeticDifferentialEvolution print("RefinedQuantumLevyMemeticDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumMultiStrategyOptimization import RefinedQuantumMultiStrategyOptimization +try: # RefinedQuantumMultiStrategyOptimization + from nevergrad.optimization.lama.RefinedQuantumMultiStrategyOptimization import ( + RefinedQuantumMultiStrategyOptimization, + ) lama_register["RefinedQuantumMultiStrategyOptimization"] = RefinedQuantumMultiStrategyOptimization - res = NonObjectOptimizer(method="LLAMARefinedQuantumMultiStrategyOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumMultiStrategyOptimization = NonObjectOptimizer(method="LLAMARefinedQuantumMultiStrategyOptimization").set_name("LLAMARefinedQuantumMultiStrategyOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumMultiStrategyOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumMultiStrategyOptimization = NonObjectOptimizer( + method="LLAMARefinedQuantumMultiStrategyOptimization" + ).set_name("LLAMARefinedQuantumMultiStrategyOptimization", register=True) +except Exception as e: # RefinedQuantumMultiStrategyOptimization print("RefinedQuantumMultiStrategyOptimization can not be imported: ", e) -try: +try: # RefinedQuantumNesterovSynergyV2 from nevergrad.optimization.lama.RefinedQuantumNesterovSynergyV2 import RefinedQuantumNesterovSynergyV2 lama_register["RefinedQuantumNesterovSynergyV2"] = RefinedQuantumNesterovSynergyV2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumNesterovSynergyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumNesterovSynergyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumNesterovSynergyV2").set_name("LLAMARefinedQuantumNesterovSynergyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumNesterovSynergyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumNesterovSynergyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumNesterovSynergyV2" + ).set_name("LLAMARefinedQuantumNesterovSynergyV2", register=True) +except Exception as e: # RefinedQuantumNesterovSynergyV2 print("RefinedQuantumNesterovSynergyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumResilientCrossoverEnhancer import RefinedQuantumResilientCrossoverEnhancer +try: # RefinedQuantumResilientCrossoverEnhancer + from nevergrad.optimization.lama.RefinedQuantumResilientCrossoverEnhancer import ( + RefinedQuantumResilientCrossoverEnhancer, + ) lama_register["RefinedQuantumResilientCrossoverEnhancer"] = RefinedQuantumResilientCrossoverEnhancer - res = NonObjectOptimizer(method="LLAMARefinedQuantumResilientCrossoverEnhancer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumResilientCrossoverEnhancer = NonObjectOptimizer(method="LLAMARefinedQuantumResilientCrossoverEnhancer").set_name("LLAMARefinedQuantumResilientCrossoverEnhancer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumResilientCrossoverEnhancer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumResilientCrossoverEnhancer = NonObjectOptimizer( + method="LLAMARefinedQuantumResilientCrossoverEnhancer" + ).set_name("LLAMARefinedQuantumResilientCrossoverEnhancer", register=True) +except Exception as e: # RefinedQuantumResilientCrossoverEnhancer print("RefinedQuantumResilientCrossoverEnhancer can not be imported: ", e) -try: +try: # RefinedQuantumSwarmOptimizer from nevergrad.optimization.lama.RefinedQuantumSwarmOptimizer import RefinedQuantumSwarmOptimizer lama_register["RefinedQuantumSwarmOptimizer"] = RefinedQuantumSwarmOptimizer - res = NonObjectOptimizer(method="LLAMARefinedQuantumSwarmOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumSwarmOptimizer = NonObjectOptimizer(method="LLAMARefinedQuantumSwarmOptimizer").set_name("LLAMARefinedQuantumSwarmOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumSwarmOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumSwarmOptimizer = NonObjectOptimizer( + method="LLAMARefinedQuantumSwarmOptimizer" + ).set_name("LLAMARefinedQuantumSwarmOptimizer", register=True) +except Exception as e: # RefinedQuantumSwarmOptimizer print("RefinedQuantumSwarmOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV2 import RefinedQuantumSymbioticStrategyV2 +try: # RefinedQuantumSymbioticStrategyV2 + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV2 import ( + RefinedQuantumSymbioticStrategyV2, + ) lama_register["RefinedQuantumSymbioticStrategyV2"] = RefinedQuantumSymbioticStrategyV2 - res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumSymbioticStrategyV2 = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV2").set_name("LLAMARefinedQuantumSymbioticStrategyV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumSymbioticStrategyV2 = NonObjectOptimizer( + method="LLAMARefinedQuantumSymbioticStrategyV2" + ).set_name("LLAMARefinedQuantumSymbioticStrategyV2", register=True) +except Exception as e: # RefinedQuantumSymbioticStrategyV2 print("RefinedQuantumSymbioticStrategyV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV4 import RefinedQuantumSymbioticStrategyV4 +try: # RefinedQuantumSymbioticStrategyV4 + from nevergrad.optimization.lama.RefinedQuantumSymbioticStrategyV4 import ( + RefinedQuantumSymbioticStrategyV4, + ) lama_register["RefinedQuantumSymbioticStrategyV4"] = RefinedQuantumSymbioticStrategyV4 - res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumSymbioticStrategyV4 = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV4").set_name("LLAMARefinedQuantumSymbioticStrategyV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumSymbioticStrategyV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumSymbioticStrategyV4 = NonObjectOptimizer( + method="LLAMARefinedQuantumSymbioticStrategyV4" + ).set_name("LLAMARefinedQuantumSymbioticStrategyV4", register=True) +except Exception as e: # RefinedQuantumSymbioticStrategyV4 print("RefinedQuantumSymbioticStrategyV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedQuantumTunnelingOptimizerV19 import RefinedQuantumTunnelingOptimizerV19 +try: # RefinedQuantumTunnelingOptimizerV19 + from nevergrad.optimization.lama.RefinedQuantumTunnelingOptimizerV19 import ( + RefinedQuantumTunnelingOptimizerV19, + ) lama_register["RefinedQuantumTunnelingOptimizerV19"] = RefinedQuantumTunnelingOptimizerV19 - res = NonObjectOptimizer(method="LLAMARefinedQuantumTunnelingOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedQuantumTunnelingOptimizerV19 = NonObjectOptimizer(method="LLAMARefinedQuantumTunnelingOptimizerV19").set_name("LLAMARefinedQuantumTunnelingOptimizerV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedQuantumTunnelingOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedQuantumTunnelingOptimizerV19 = NonObjectOptimizer( + method="LLAMARefinedQuantumTunnelingOptimizerV19" + ).set_name("LLAMARefinedQuantumTunnelingOptimizerV19", register=True) +except Exception as e: # RefinedQuantumTunnelingOptimizerV19 print("RefinedQuantumTunnelingOptimizerV19 can not be imported: ", e) -try: +try: # RefinedRAMEDSPro from nevergrad.optimization.lama.RefinedRAMEDSPro import RefinedRAMEDSPro lama_register["RefinedRAMEDSPro"] = RefinedRAMEDSPro - res = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedRAMEDSPro = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro").set_name("LLAMARefinedRAMEDSPro", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedRAMEDSPro = NonObjectOptimizer(method="LLAMARefinedRAMEDSPro").set_name( + "LLAMARefinedRAMEDSPro", register=True + ) +except Exception as e: # RefinedRAMEDSPro print("RefinedRAMEDSPro can not be imported: ", e) -try: +try: # RefinedRAMEDSv2 from nevergrad.optimization.lama.RefinedRAMEDSv2 import RefinedRAMEDSv2 lama_register["RefinedRAMEDSv2"] = RefinedRAMEDSv2 - res = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedRAMEDSv2 = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2").set_name("LLAMARefinedRAMEDSv2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedRAMEDSv2 = NonObjectOptimizer(method="LLAMARefinedRAMEDSv2").set_name( + "LLAMARefinedRAMEDSv2", register=True + ) +except Exception as e: # RefinedRAMEDSv2 print("RefinedRAMEDSv2 can not be imported: ", e) -try: +try: # RefinedSpatialAdaptiveOptimizer from nevergrad.optimization.lama.RefinedSpatialAdaptiveOptimizer import RefinedSpatialAdaptiveOptimizer lama_register["RefinedSpatialAdaptiveOptimizer"] = RefinedSpatialAdaptiveOptimizer - res = NonObjectOptimizer(method="LLAMARefinedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedSpatialAdaptiveOptimizer = NonObjectOptimizer(method="LLAMARefinedSpatialAdaptiveOptimizer").set_name("LLAMARefinedSpatialAdaptiveOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedSpatialAdaptiveOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedSpatialAdaptiveOptimizer = NonObjectOptimizer( + method="LLAMARefinedSpatialAdaptiveOptimizer" + ).set_name("LLAMARefinedSpatialAdaptiveOptimizer", register=True) +except Exception as e: # RefinedSpatialAdaptiveOptimizer print("RefinedSpatialAdaptiveOptimizer can not be imported: ", e) -try: +try: # RefinedSpiralSearchOptimizer from nevergrad.optimization.lama.RefinedSpiralSearchOptimizer import RefinedSpiralSearchOptimizer lama_register["RefinedSpiralSearchOptimizer"] = RefinedSpiralSearchOptimizer - res = NonObjectOptimizer(method="LLAMARefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedSpiralSearchOptimizer = NonObjectOptimizer(method="LLAMARefinedSpiralSearchOptimizer").set_name("LLAMARefinedSpiralSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedSpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedSpiralSearchOptimizer = NonObjectOptimizer( + method="LLAMARefinedSpiralSearchOptimizer" + ).set_name("LLAMARefinedSpiralSearchOptimizer", register=True) +except Exception as e: # RefinedSpiralSearchOptimizer print("RefinedSpiralSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedStochasticBalancingOptimizer import RefinedStochasticBalancingOptimizer +try: # RefinedStochasticBalancingOptimizer + from nevergrad.optimization.lama.RefinedStochasticBalancingOptimizer import ( + RefinedStochasticBalancingOptimizer, + ) lama_register["RefinedStochasticBalancingOptimizer"] = RefinedStochasticBalancingOptimizer - res = NonObjectOptimizer(method="LLAMARefinedStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedStochasticBalancingOptimizer = NonObjectOptimizer(method="LLAMARefinedStochasticBalancingOptimizer").set_name("LLAMARefinedStochasticBalancingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedStochasticBalancingOptimizer = NonObjectOptimizer( + method="LLAMARefinedStochasticBalancingOptimizer" + ).set_name("LLAMARefinedStochasticBalancingOptimizer", register=True) +except Exception as e: # RefinedStochasticBalancingOptimizer print("RefinedStochasticBalancingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedStrategicAdaptiveDifferentialEvolution import RefinedStrategicAdaptiveDifferentialEvolution - - lama_register["RefinedStrategicAdaptiveDifferentialEvolution"] = RefinedStrategicAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedStrategicAdaptiveDifferentialEvolution").set_name("LLAMARefinedStrategicAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedStrategicAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RefinedStrategicAdaptiveDifferentialEvolution import ( + RefinedStrategicAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedStrategicAdaptiveDifferentialEvolution"] = ( + RefinedStrategicAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedStrategicAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedStrategicAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RefinedStrategicAdaptiveDifferentialEvolution print("RefinedStrategicAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedStrategicDiminishingEvolver import RefinedStrategicDiminishingEvolver +try: # RefinedStrategicDiminishingEvolver + from nevergrad.optimization.lama.RefinedStrategicDiminishingEvolver import ( + RefinedStrategicDiminishingEvolver, + ) lama_register["RefinedStrategicDiminishingEvolver"] = RefinedStrategicDiminishingEvolver - res = NonObjectOptimizer(method="LLAMARefinedStrategicDiminishingEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedStrategicDiminishingEvolver = NonObjectOptimizer(method="LLAMARefinedStrategicDiminishingEvolver").set_name("LLAMARefinedStrategicDiminishingEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedStrategicDiminishingEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedStrategicDiminishingEvolver = NonObjectOptimizer( + method="LLAMARefinedStrategicDiminishingEvolver" + ).set_name("LLAMARefinedStrategicDiminishingEvolver", register=True) +except Exception as e: # RefinedStrategicDiminishingEvolver print("RefinedStrategicDiminishingEvolver can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedStrategicQuorumWithDirectionalBias import RefinedStrategicQuorumWithDirectionalBias +try: # RefinedStrategicQuorumWithDirectionalBias + from nevergrad.optimization.lama.RefinedStrategicQuorumWithDirectionalBias import ( + RefinedStrategicQuorumWithDirectionalBias, + ) lama_register["RefinedStrategicQuorumWithDirectionalBias"] = RefinedStrategicQuorumWithDirectionalBias - res = NonObjectOptimizer(method="LLAMARefinedStrategicQuorumWithDirectionalBias")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedStrategicQuorumWithDirectionalBias = NonObjectOptimizer(method="LLAMARefinedStrategicQuorumWithDirectionalBias").set_name("LLAMARefinedStrategicQuorumWithDirectionalBias", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedStrategicQuorumWithDirectionalBias")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedStrategicQuorumWithDirectionalBias = NonObjectOptimizer( + method="LLAMARefinedStrategicQuorumWithDirectionalBias" + ).set_name("LLAMARefinedStrategicQuorumWithDirectionalBias", register=True) +except Exception as e: # RefinedStrategicQuorumWithDirectionalBias print("RefinedStrategicQuorumWithDirectionalBias can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedSuperiorAdaptiveStrategyDE import RefinedSuperiorAdaptiveStrategyDE +try: # RefinedSuperiorAdaptiveStrategyDE + from nevergrad.optimization.lama.RefinedSuperiorAdaptiveStrategyDE import ( + RefinedSuperiorAdaptiveStrategyDE, + ) lama_register["RefinedSuperiorAdaptiveStrategyDE"] = RefinedSuperiorAdaptiveStrategyDE - res = NonObjectOptimizer(method="LLAMARefinedSuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedSuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMARefinedSuperiorAdaptiveStrategyDE").set_name("LLAMARefinedSuperiorAdaptiveStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedSuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedSuperiorAdaptiveStrategyDE = NonObjectOptimizer( + method="LLAMARefinedSuperiorAdaptiveStrategyDE" + ).set_name("LLAMARefinedSuperiorAdaptiveStrategyDE", register=True) +except Exception as e: # RefinedSuperiorAdaptiveStrategyDE print("RefinedSuperiorAdaptiveStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedTemporalAdaptiveDifferentialEvolution import RefinedTemporalAdaptiveDifferentialEvolution - - lama_register["RefinedTemporalAdaptiveDifferentialEvolution"] = RefinedTemporalAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARefinedTemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedTemporalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARefinedTemporalAdaptiveDifferentialEvolution").set_name("LLAMARefinedTemporalAdaptiveDifferentialEvolution", register=True) -except Exception as e: +try: # RefinedTemporalAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RefinedTemporalAdaptiveDifferentialEvolution import ( + RefinedTemporalAdaptiveDifferentialEvolution, + ) + + lama_register["RefinedTemporalAdaptiveDifferentialEvolution"] = ( + RefinedTemporalAdaptiveDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMARefinedTemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedTemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARefinedTemporalAdaptiveDifferentialEvolution" + ).set_name("LLAMARefinedTemporalAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RefinedTemporalAdaptiveDifferentialEvolution print("RefinedTemporalAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimateEnhancedGuidedMassQGSA_v71 import RefinedUltimateEnhancedGuidedMassQGSA_v71 +try: # RefinedUltimateEnhancedGuidedMassQGSA_v71 + from nevergrad.optimization.lama.RefinedUltimateEnhancedGuidedMassQGSA_v71 import ( + RefinedUltimateEnhancedGuidedMassQGSA_v71, + ) lama_register["RefinedUltimateEnhancedGuidedMassQGSA_v71"] = RefinedUltimateEnhancedGuidedMassQGSA_v71 - res = NonObjectOptimizer(method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71 = NonObjectOptimizer(method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71").set_name("LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71 = NonObjectOptimizer( + method="LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71" + ).set_name("LLAMARefinedUltimateEnhancedGuidedMassQGSA_v71", register=True) +except Exception as e: # RefinedUltimateEnhancedGuidedMassQGSA_v71 print("RefinedUltimateEnhancedGuidedMassQGSA_v71 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV16 import RefinedUltimateEvolutionaryGradientOptimizerV16 - - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV16"] = RefinedUltimateEvolutionaryGradientOptimizerV16 - res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimateEvolutionaryGradientOptimizerV16 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV16", register=True) -except Exception as e: +try: # RefinedUltimateEvolutionaryGradientOptimizerV16 + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV16 import ( + RefinedUltimateEvolutionaryGradientOptimizerV16, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV16"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV16 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV16 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV16" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV16", register=True) +except Exception as e: # RefinedUltimateEvolutionaryGradientOptimizerV16 print("RefinedUltimateEvolutionaryGradientOptimizerV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV17 import RefinedUltimateEvolutionaryGradientOptimizerV17 - - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV17"] = RefinedUltimateEvolutionaryGradientOptimizerV17 - res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimateEvolutionaryGradientOptimizerV17 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV17", register=True) -except Exception as e: +try: # RefinedUltimateEvolutionaryGradientOptimizerV17 + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV17 import ( + RefinedUltimateEvolutionaryGradientOptimizerV17, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV17"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV17 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV17 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV17" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV17", register=True) +except Exception as e: # RefinedUltimateEvolutionaryGradientOptimizerV17 print("RefinedUltimateEvolutionaryGradientOptimizerV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV34 import RefinedUltimateEvolutionaryGradientOptimizerV34 - - lama_register["RefinedUltimateEvolutionaryGradientOptimizerV34"] = RefinedUltimateEvolutionaryGradientOptimizerV34 - res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimateEvolutionaryGradientOptimizerV34 = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34").set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV34", register=True) -except Exception as e: +try: # RefinedUltimateEvolutionaryGradientOptimizerV34 + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryGradientOptimizerV34 import ( + RefinedUltimateEvolutionaryGradientOptimizerV34, + ) + + lama_register["RefinedUltimateEvolutionaryGradientOptimizerV34"] = ( + RefinedUltimateEvolutionaryGradientOptimizerV34 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimateEvolutionaryGradientOptimizerV34 = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryGradientOptimizerV34" + ).set_name("LLAMARefinedUltimateEvolutionaryGradientOptimizerV34", register=True) +except Exception as e: # RefinedUltimateEvolutionaryGradientOptimizerV34 print("RefinedUltimateEvolutionaryGradientOptimizerV34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimateEvolutionaryOptimizer import RefinedUltimateEvolutionaryOptimizer +try: # RefinedUltimateEvolutionaryOptimizer + from nevergrad.optimization.lama.RefinedUltimateEvolutionaryOptimizer import ( + RefinedUltimateEvolutionaryOptimizer, + ) lama_register["RefinedUltimateEvolutionaryOptimizer"] = RefinedUltimateEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimateEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryOptimizer").set_name("LLAMARefinedUltimateEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimateEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMARefinedUltimateEvolutionaryOptimizer" + ).set_name("LLAMARefinedUltimateEvolutionaryOptimizer", register=True) +except Exception as e: # RefinedUltimateEvolutionaryOptimizer print("RefinedUltimateEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltimatePrecisionEvolutionaryOptimizerV42 import RefinedUltimatePrecisionEvolutionaryOptimizerV42 - - lama_register["RefinedUltimatePrecisionEvolutionaryOptimizerV42"] = RefinedUltimatePrecisionEvolutionaryOptimizerV42 - res = NonObjectOptimizer(method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42 = NonObjectOptimizer(method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42").set_name("LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42", register=True) -except Exception as e: +try: # RefinedUltimatePrecisionEvolutionaryOptimizerV42 + from nevergrad.optimization.lama.RefinedUltimatePrecisionEvolutionaryOptimizerV42 import ( + RefinedUltimatePrecisionEvolutionaryOptimizerV42, + ) + + lama_register["RefinedUltimatePrecisionEvolutionaryOptimizerV42"] = ( + RefinedUltimatePrecisionEvolutionaryOptimizerV42 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42 = NonObjectOptimizer( + method="LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42" + ).set_name("LLAMARefinedUltimatePrecisionEvolutionaryOptimizerV42", register=True) +except Exception as e: # RefinedUltimatePrecisionEvolutionaryOptimizerV42 print("RefinedUltimatePrecisionEvolutionaryOptimizerV42 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer import RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer - - lama_register["RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: +try: # RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( + RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( + RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMARefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer print("RefinedUltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltraEvolutionaryGradientOptimizerV28 import RefinedUltraEvolutionaryGradientOptimizerV28 - - lama_register["RefinedUltraEvolutionaryGradientOptimizerV28"] = RefinedUltraEvolutionaryGradientOptimizerV28 - res = NonObjectOptimizer(method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltraEvolutionaryGradientOptimizerV28 = NonObjectOptimizer(method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28").set_name("LLAMARefinedUltraEvolutionaryGradientOptimizerV28", register=True) -except Exception as e: +try: # RefinedUltraEvolutionaryGradientOptimizerV28 + from nevergrad.optimization.lama.RefinedUltraEvolutionaryGradientOptimizerV28 import ( + RefinedUltraEvolutionaryGradientOptimizerV28, + ) + + lama_register["RefinedUltraEvolutionaryGradientOptimizerV28"] = ( + RefinedUltraEvolutionaryGradientOptimizerV28 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltraEvolutionaryGradientOptimizerV28 = NonObjectOptimizer( + method="LLAMARefinedUltraEvolutionaryGradientOptimizerV28" + ).set_name("LLAMARefinedUltraEvolutionaryGradientOptimizerV28", register=True) +except Exception as e: # RefinedUltraEvolutionaryGradientOptimizerV28 print("RefinedUltraEvolutionaryGradientOptimizerV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltraOptimizedDynamicPrecisionOptimizerV20 import RefinedUltraOptimizedDynamicPrecisionOptimizerV20 - - lama_register["RefinedUltraOptimizedDynamicPrecisionOptimizerV20"] = RefinedUltraOptimizedDynamicPrecisionOptimizerV20 - res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20 = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20").set_name("LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20", register=True) -except Exception as e: +try: # RefinedUltraOptimizedDynamicPrecisionOptimizerV20 + from nevergrad.optimization.lama.RefinedUltraOptimizedDynamicPrecisionOptimizerV20 import ( + RefinedUltraOptimizedDynamicPrecisionOptimizerV20, + ) + + lama_register["RefinedUltraOptimizedDynamicPrecisionOptimizerV20"] = ( + RefinedUltraOptimizedDynamicPrecisionOptimizerV20 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20 = NonObjectOptimizer( + method="LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20" + ).set_name("LLAMARefinedUltraOptimizedDynamicPrecisionOptimizerV20", register=True) +except Exception as e: # RefinedUltraOptimizedDynamicPrecisionOptimizerV20 print("RefinedUltraOptimizedDynamicPrecisionOptimizerV20 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 import RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 - - lama_register["RefinedUltraOptimizedEvolutionaryGradientOptimizerV31"] = RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 - res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31 = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31").set_name("LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31", register=True) -except Exception as e: +try: # RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 + from nevergrad.optimization.lama.RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 import ( + RefinedUltraOptimizedEvolutionaryGradientOptimizerV31, + ) + + lama_register["RefinedUltraOptimizedEvolutionaryGradientOptimizerV31"] = ( + RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 + ) + # res = NonObjectOptimizer(method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31 = NonObjectOptimizer( + method="LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31" + ).set_name("LLAMARefinedUltraOptimizedEvolutionaryGradientOptimizerV31", register=True) +except Exception as e: # RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 print("RefinedUltraOptimizedEvolutionaryGradientOptimizerV31 can not be imported: ", e) -try: +try: # RefinedUltraRefinedRAMEDS from nevergrad.optimization.lama.RefinedUltraRefinedRAMEDS import RefinedUltraRefinedRAMEDS lama_register["RefinedUltraRefinedRAMEDS"] = RefinedUltraRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS").set_name("LLAMARefinedUltraRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinedUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMARefinedUltraRefinedRAMEDS").set_name( + "LLAMARefinedUltraRefinedRAMEDS", register=True + ) +except Exception as e: # RefinedUltraRefinedRAMEDS print("RefinedUltraRefinedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinementEnhancedHybridOptimizer import RefinementEnhancedHybridOptimizer +try: # RefinementEnhancedHybridOptimizer + from nevergrad.optimization.lama.RefinementEnhancedHybridOptimizer import ( + RefinementEnhancedHybridOptimizer, + ) lama_register["RefinementEnhancedHybridOptimizer"] = RefinementEnhancedHybridOptimizer - res = NonObjectOptimizer(method="LLAMARefinementEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinementEnhancedHybridOptimizer = NonObjectOptimizer(method="LLAMARefinementEnhancedHybridOptimizer").set_name("LLAMARefinementEnhancedHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinementEnhancedHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinementEnhancedHybridOptimizer = NonObjectOptimizer( + method="LLAMARefinementEnhancedHybridOptimizer" + ).set_name("LLAMARefinementEnhancedHybridOptimizer", register=True) +except Exception as e: # RefinementEnhancedHybridOptimizer print("RefinementEnhancedHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.RefinementSelectiveCohortOptimization import RefinementSelectiveCohortOptimization +try: # RefinementSelectiveCohortOptimization + from nevergrad.optimization.lama.RefinementSelectiveCohortOptimization import ( + RefinementSelectiveCohortOptimization, + ) lama_register["RefinementSelectiveCohortOptimization"] = RefinementSelectiveCohortOptimization - res = NonObjectOptimizer(method="LLAMARefinementSelectiveCohortOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinementSelectiveCohortOptimization = NonObjectOptimizer(method="LLAMARefinementSelectiveCohortOptimization").set_name("LLAMARefinementSelectiveCohortOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinementSelectiveCohortOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinementSelectiveCohortOptimization = NonObjectOptimizer( + method="LLAMARefinementSelectiveCohortOptimization" + ).set_name("LLAMARefinementSelectiveCohortOptimization", register=True) +except Exception as e: # RefinementSelectiveCohortOptimization print("RefinementSelectiveCohortOptimization can not be imported: ", e) -try: +try: # RefinementTunedPSO from nevergrad.optimization.lama.RefinementTunedPSO import RefinementTunedPSO lama_register["RefinementTunedPSO"] = RefinementTunedPSO - res = NonObjectOptimizer(method="LLAMARefinementTunedPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARefinementTunedPSO = NonObjectOptimizer(method="LLAMARefinementTunedPSO").set_name("LLAMARefinementTunedPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARefinementTunedPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARefinementTunedPSO = NonObjectOptimizer(method="LLAMARefinementTunedPSO").set_name( + "LLAMARefinementTunedPSO", register=True + ) +except Exception as e: # RefinementTunedPSO print("RefinementTunedPSO can not be imported: ", e) -try: +try: # ResilientAdaptivePSO from nevergrad.optimization.lama.ResilientAdaptivePSO import ResilientAdaptivePSO lama_register["ResilientAdaptivePSO"] = ResilientAdaptivePSO - res = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO").set_name("LLAMAResilientAdaptivePSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAResilientAdaptivePSO = NonObjectOptimizer(method="LLAMAResilientAdaptivePSO").set_name( + "LLAMAResilientAdaptivePSO", register=True + ) +except Exception as e: # ResilientAdaptivePSO print("ResilientAdaptivePSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.ResponsiveAdaptiveMemoryStrategyV52 import ResponsiveAdaptiveMemoryStrategyV52 +try: # ResponsiveAdaptiveMemoryStrategyV52 + from nevergrad.optimization.lama.ResponsiveAdaptiveMemoryStrategyV52 import ( + ResponsiveAdaptiveMemoryStrategyV52, + ) lama_register["ResponsiveAdaptiveMemoryStrategyV52"] = ResponsiveAdaptiveMemoryStrategyV52 - res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveMemoryStrategyV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAResponsiveAdaptiveMemoryStrategyV52 = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveMemoryStrategyV52").set_name("LLAMAResponsiveAdaptiveMemoryStrategyV52", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveMemoryStrategyV52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAResponsiveAdaptiveMemoryStrategyV52 = NonObjectOptimizer( + method="LLAMAResponsiveAdaptiveMemoryStrategyV52" + ).set_name("LLAMAResponsiveAdaptiveMemoryStrategyV52", register=True) +except Exception as e: # ResponsiveAdaptiveMemoryStrategyV52 print("ResponsiveAdaptiveMemoryStrategyV52 can not be imported: ", e) -try: +try: # ResponsiveAdaptiveStrategyV27 from nevergrad.optimization.lama.ResponsiveAdaptiveStrategyV27 import ResponsiveAdaptiveStrategyV27 lama_register["ResponsiveAdaptiveStrategyV27"] = ResponsiveAdaptiveStrategyV27 - res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveStrategyV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAResponsiveAdaptiveStrategyV27 = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveStrategyV27").set_name("LLAMAResponsiveAdaptiveStrategyV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAResponsiveAdaptiveStrategyV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAResponsiveAdaptiveStrategyV27 = NonObjectOptimizer( + method="LLAMAResponsiveAdaptiveStrategyV27" + ).set_name("LLAMAResponsiveAdaptiveStrategyV27", register=True) +except Exception as e: # ResponsiveAdaptiveStrategyV27 print("ResponsiveAdaptiveStrategyV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RestartAdaptiveDifferentialEvolutionPSO import RestartAdaptiveDifferentialEvolutionPSO +try: # RestartAdaptiveDifferentialEvolutionPSO + from nevergrad.optimization.lama.RestartAdaptiveDifferentialEvolutionPSO import ( + RestartAdaptiveDifferentialEvolutionPSO, + ) lama_register["RestartAdaptiveDifferentialEvolutionPSO"] = RestartAdaptiveDifferentialEvolutionPSO - res = NonObjectOptimizer(method="LLAMARestartAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARestartAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer(method="LLAMARestartAdaptiveDifferentialEvolutionPSO").set_name("LLAMARestartAdaptiveDifferentialEvolutionPSO", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARestartAdaptiveDifferentialEvolutionPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARestartAdaptiveDifferentialEvolutionPSO = NonObjectOptimizer( + method="LLAMARestartAdaptiveDifferentialEvolutionPSO" + ).set_name("LLAMARestartAdaptiveDifferentialEvolutionPSO", register=True) +except Exception as e: # RestartAdaptiveDifferentialEvolutionPSO print("RestartAdaptiveDifferentialEvolutionPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.RevisedEnhancedDifferentialEvolutionLSRefinement_v20 import RevisedEnhancedDifferentialEvolutionLSRefinement_v20 - - lama_register["RevisedEnhancedDifferentialEvolutionLSRefinement_v20"] = RevisedEnhancedDifferentialEvolutionLSRefinement_v20 - res = NonObjectOptimizer(method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20 = NonObjectOptimizer(method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20").set_name("LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20", register=True) -except Exception as e: +try: # RevisedEnhancedDifferentialEvolutionLSRefinement_v20 + from nevergrad.optimization.lama.RevisedEnhancedDifferentialEvolutionLSRefinement_v20 import ( + RevisedEnhancedDifferentialEvolutionLSRefinement_v20, + ) + + lama_register["RevisedEnhancedDifferentialEvolutionLSRefinement_v20"] = ( + RevisedEnhancedDifferentialEvolutionLSRefinement_v20 + ) + # res = NonObjectOptimizer(method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20 = NonObjectOptimizer( + method="LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20" + ).set_name("LLAMARevisedEnhancedDifferentialEvolutionLSRefinement_v20", register=True) +except Exception as e: # RevisedEnhancedDifferentialEvolutionLSRefinement_v20 print("RevisedEnhancedDifferentialEvolutionLSRefinement_v20 can not be imported: ", e) -try: +try: # RevolutionaryFireworkAlgorithm from nevergrad.optimization.lama.RevolutionaryFireworkAlgorithm import RevolutionaryFireworkAlgorithm lama_register["RevolutionaryFireworkAlgorithm"] = RevolutionaryFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMARevolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARevolutionaryFireworkAlgorithm = NonObjectOptimizer(method="LLAMARevolutionaryFireworkAlgorithm").set_name("LLAMARevolutionaryFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARevolutionaryFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARevolutionaryFireworkAlgorithm = NonObjectOptimizer( + method="LLAMARevolutionaryFireworkAlgorithm" + ).set_name("LLAMARevolutionaryFireworkAlgorithm", register=True) +except Exception as e: # RevolutionaryFireworkAlgorithm print("RevolutionaryFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.RobustAdaptiveDifferentialEvolution import RobustAdaptiveDifferentialEvolution +try: # RobustAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.RobustAdaptiveDifferentialEvolution import ( + RobustAdaptiveDifferentialEvolution, + ) lama_register["RobustAdaptiveDifferentialEvolution"] = RobustAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMARobustAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARobustAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMARobustAdaptiveDifferentialEvolution").set_name("LLAMARobustAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARobustAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARobustAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMARobustAdaptiveDifferentialEvolution" + ).set_name("LLAMARobustAdaptiveDifferentialEvolution", register=True) +except Exception as e: # RobustAdaptiveDifferentialEvolution print("RobustAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.RobustAdaptiveMemoryLeveragedStrategyV43 import RobustAdaptiveMemoryLeveragedStrategyV43 +try: # RobustAdaptiveMemoryLeveragedStrategyV43 + from nevergrad.optimization.lama.RobustAdaptiveMemoryLeveragedStrategyV43 import ( + RobustAdaptiveMemoryLeveragedStrategyV43, + ) lama_register["RobustAdaptiveMemoryLeveragedStrategyV43"] = RobustAdaptiveMemoryLeveragedStrategyV43 - res = NonObjectOptimizer(method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARobustAdaptiveMemoryLeveragedStrategyV43 = NonObjectOptimizer(method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43").set_name("LLAMARobustAdaptiveMemoryLeveragedStrategyV43", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARobustAdaptiveMemoryLeveragedStrategyV43 = NonObjectOptimizer( + method="LLAMARobustAdaptiveMemoryLeveragedStrategyV43" + ).set_name("LLAMARobustAdaptiveMemoryLeveragedStrategyV43", register=True) +except Exception as e: # RobustAdaptiveMemoryLeveragedStrategyV43 print("RobustAdaptiveMemoryLeveragedStrategyV43 can not be imported: ", e) -try: - from nevergrad.optimization.lama.RobustCovarianceMatrixAdaptationMemeticSearch import RobustCovarianceMatrixAdaptationMemeticSearch - - lama_register["RobustCovarianceMatrixAdaptationMemeticSearch"] = RobustCovarianceMatrixAdaptationMemeticSearch - res = NonObjectOptimizer(method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMARobustCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer(method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch").set_name("LLAMARobustCovarianceMatrixAdaptationMemeticSearch", register=True) -except Exception as e: +try: # RobustCovarianceMatrixAdaptationMemeticSearch + from nevergrad.optimization.lama.RobustCovarianceMatrixAdaptationMemeticSearch import ( + RobustCovarianceMatrixAdaptationMemeticSearch, + ) + + lama_register["RobustCovarianceMatrixAdaptationMemeticSearch"] = ( + RobustCovarianceMatrixAdaptationMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMARobustCovarianceMatrixAdaptationMemeticSearch = NonObjectOptimizer( + method="LLAMARobustCovarianceMatrixAdaptationMemeticSearch" + ).set_name("LLAMARobustCovarianceMatrixAdaptationMemeticSearch", register=True) +except Exception as e: # RobustCovarianceMatrixAdaptationMemeticSearch print("RobustCovarianceMatrixAdaptationMemeticSearch can not be imported: ", e) -try: +try: # SADE from nevergrad.optimization.lama.SADE import SADE lama_register["SADE"] = SADE - res = NonObjectOptimizer(method="LLAMASADE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASADE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASADE = NonObjectOptimizer(method="LLAMASADE").set_name("LLAMASADE", register=True) -except Exception as e: +except Exception as e: # SADE print("SADE can not be imported: ", e) -try: +try: # SADEEM from nevergrad.optimization.lama.SADEEM import SADEEM lama_register["SADEEM"] = SADEEM - res = NonObjectOptimizer(method="LLAMASADEEM")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASADEEM")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASADEEM = NonObjectOptimizer(method="LLAMASADEEM").set_name("LLAMASADEEM", register=True) -except Exception as e: +except Exception as e: # SADEEM print("SADEEM can not be imported: ", e) -try: +try: # SADEIOL from nevergrad.optimization.lama.SADEIOL import SADEIOL lama_register["SADEIOL"] = SADEIOL - res = NonObjectOptimizer(method="LLAMASADEIOL")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASADEIOL")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASADEIOL = NonObjectOptimizer(method="LLAMASADEIOL").set_name("LLAMASADEIOL", register=True) -except Exception as e: +except Exception as e: # SADEIOL print("SADEIOL can not be imported: ", e) -try: +try: # SADEPF from nevergrad.optimization.lama.SADEPF import SADEPF lama_register["SADEPF"] = SADEPF - res = NonObjectOptimizer(method="LLAMASADEPF")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASADEPF")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASADEPF = NonObjectOptimizer(method="LLAMASADEPF").set_name("LLAMASADEPF", register=True) -except Exception as e: +except Exception as e: # SADEPF print("SADEPF can not be imported: ", e) -try: +try: # SAGEA from nevergrad.optimization.lama.SAGEA import SAGEA lama_register["SAGEA"] = SAGEA - res = NonObjectOptimizer(method="LLAMASAGEA")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASAGEA")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASAGEA = NonObjectOptimizer(method="LLAMASAGEA").set_name("LLAMASAGEA", register=True) -except Exception as e: +except Exception as e: # SAGEA print("SAGEA can not be imported: ", e) -try: +try: # SGAE from nevergrad.optimization.lama.SGAE import SGAE lama_register["SGAE"] = SGAE - res = NonObjectOptimizer(method="LLAMASGAE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASGAE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASGAE = NonObjectOptimizer(method="LLAMASGAE").set_name("LLAMASGAE", register=True) -except Exception as e: +except Exception as e: # SGAE print("SGAE can not be imported: ", e) -try: +try: # SGE from nevergrad.optimization.lama.SGE import SGE lama_register["SGE"] = SGE - res = NonObjectOptimizer(method="LLAMASGE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASGE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASGE = NonObjectOptimizer(method="LLAMASGE").set_name("LLAMASGE", register=True) -except Exception as e: +except Exception as e: # SGE print("SGE can not be imported: ", e) -try: +try: # SORAMED from nevergrad.optimization.lama.SORAMED import SORAMED lama_register["SORAMED"] = SORAMED - res = NonObjectOptimizer(method="LLAMASORAMED")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMASORAMED")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMASORAMED = NonObjectOptimizer(method="LLAMASORAMED").set_name("LLAMASORAMED", register=True) -except Exception as e: +except Exception as e: # SORAMED print("SORAMED can not be imported: ", e) -try: - from nevergrad.optimization.lama.ScaledHybridDifferentialEvolution import ScaledHybridDifferentialEvolution +try: # ScaledHybridDifferentialEvolution + from nevergrad.optimization.lama.ScaledHybridDifferentialEvolution import ( + ScaledHybridDifferentialEvolution, + ) lama_register["ScaledHybridDifferentialEvolution"] = ScaledHybridDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAScaledHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAScaledHybridDifferentialEvolution = NonObjectOptimizer(method="LLAMAScaledHybridDifferentialEvolution").set_name("LLAMAScaledHybridDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAScaledHybridDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAScaledHybridDifferentialEvolution = NonObjectOptimizer( + method="LLAMAScaledHybridDifferentialEvolution" + ).set_name("LLAMAScaledHybridDifferentialEvolution", register=True) +except Exception as e: # ScaledHybridDifferentialEvolution print("ScaledHybridDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptingDifferentialEvolutionOptimizer import SelfAdaptingDifferentialEvolutionOptimizer +try: # SelfAdaptingDifferentialEvolutionOptimizer + from nevergrad.optimization.lama.SelfAdaptingDifferentialEvolutionOptimizer import ( + SelfAdaptingDifferentialEvolutionOptimizer, + ) lama_register["SelfAdaptingDifferentialEvolutionOptimizer"] = SelfAdaptingDifferentialEvolutionOptimizer - res = NonObjectOptimizer(method="LLAMASelfAdaptingDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptingDifferentialEvolutionOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptingDifferentialEvolutionOptimizer").set_name("LLAMASelfAdaptingDifferentialEvolutionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptingDifferentialEvolutionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptingDifferentialEvolutionOptimizer = NonObjectOptimizer( + method="LLAMASelfAdaptingDifferentialEvolutionOptimizer" + ).set_name("LLAMASelfAdaptingDifferentialEvolutionOptimizer", register=True) +except Exception as e: # SelfAdaptingDifferentialEvolutionOptimizer print("SelfAdaptingDifferentialEvolutionOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveCovarianceMatrixDifferentialEvolution import SelfAdaptiveCovarianceMatrixDifferentialEvolution - - lama_register["SelfAdaptiveCovarianceMatrixDifferentialEvolution"] = SelfAdaptiveCovarianceMatrixDifferentialEvolution - res = NonObjectOptimizer(method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer(method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution").set_name("LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) -except Exception as e: +try: # SelfAdaptiveCovarianceMatrixDifferentialEvolution + from nevergrad.optimization.lama.SelfAdaptiveCovarianceMatrixDifferentialEvolution import ( + SelfAdaptiveCovarianceMatrixDifferentialEvolution, + ) + + lama_register["SelfAdaptiveCovarianceMatrixDifferentialEvolution"] = ( + SelfAdaptiveCovarianceMatrixDifferentialEvolution + ) + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution = NonObjectOptimizer( + method="LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution" + ).set_name("LLAMASelfAdaptiveCovarianceMatrixDifferentialEvolution", register=True) +except Exception as e: # SelfAdaptiveCovarianceMatrixDifferentialEvolution print("SelfAdaptiveCovarianceMatrixDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolution import SelfAdaptiveDifferentialEvolution +try: # SelfAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolution import ( + SelfAdaptiveDifferentialEvolution, + ) lama_register["SelfAdaptiveDifferentialEvolution"] = SelfAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolution").set_name("LLAMASelfAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolution" + ).set_name("LLAMASelfAdaptiveDifferentialEvolution", register=True) +except Exception as e: # SelfAdaptiveDifferentialEvolution print("SelfAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithLocalRestart import SelfAdaptiveDifferentialEvolutionWithLocalRestart - - lama_register["SelfAdaptiveDifferentialEvolutionWithLocalRestart"] = SelfAdaptiveDifferentialEvolutionWithLocalRestart - res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart", register=True) -except Exception as e: +try: # SelfAdaptiveDifferentialEvolutionWithLocalRestart + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithLocalRestart import ( + SelfAdaptiveDifferentialEvolutionWithLocalRestart, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithLocalRestart"] = ( + SelfAdaptiveDifferentialEvolutionWithLocalRestart + ) + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithLocalRestart", register=True) +except Exception as e: # SelfAdaptiveDifferentialEvolutionWithLocalRestart print("SelfAdaptiveDifferentialEvolutionWithLocalRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithMemeticSearch import SelfAdaptiveDifferentialEvolutionWithMemeticSearch - - lama_register["SelfAdaptiveDifferentialEvolutionWithMemeticSearch"] = SelfAdaptiveDifferentialEvolutionWithMemeticSearch - res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) -except Exception as e: +try: # SelfAdaptiveDifferentialEvolutionWithMemeticSearch + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithMemeticSearch import ( + SelfAdaptiveDifferentialEvolutionWithMemeticSearch, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithMemeticSearch"] = ( + SelfAdaptiveDifferentialEvolutionWithMemeticSearch + ) + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithMemeticSearch", register=True) +except Exception as e: # SelfAdaptiveDifferentialEvolutionWithMemeticSearch print("SelfAdaptiveDifferentialEvolutionWithMemeticSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithRestart import SelfAdaptiveDifferentialEvolutionWithRestart - - lama_register["SelfAdaptiveDifferentialEvolutionWithRestart"] = SelfAdaptiveDifferentialEvolutionWithRestart - res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveDifferentialEvolutionWithRestart = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart").set_name("LLAMASelfAdaptiveDifferentialEvolutionWithRestart", register=True) -except Exception as e: +try: # SelfAdaptiveDifferentialEvolutionWithRestart + from nevergrad.optimization.lama.SelfAdaptiveDifferentialEvolutionWithRestart import ( + SelfAdaptiveDifferentialEvolutionWithRestart, + ) + + lama_register["SelfAdaptiveDifferentialEvolutionWithRestart"] = ( + SelfAdaptiveDifferentialEvolutionWithRestart + ) + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveDifferentialEvolutionWithRestart = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialEvolutionWithRestart" + ).set_name("LLAMASelfAdaptiveDifferentialEvolutionWithRestart", register=True) +except Exception as e: # SelfAdaptiveDifferentialEvolutionWithRestart print("SelfAdaptiveDifferentialEvolutionWithRestart can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveDifferentialSwarmOptimization import SelfAdaptiveDifferentialSwarmOptimization +try: # SelfAdaptiveDifferentialSwarmOptimization + from nevergrad.optimization.lama.SelfAdaptiveDifferentialSwarmOptimization import ( + SelfAdaptiveDifferentialSwarmOptimization, + ) lama_register["SelfAdaptiveDifferentialSwarmOptimization"] = SelfAdaptiveDifferentialSwarmOptimization - res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveDifferentialSwarmOptimization = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialSwarmOptimization").set_name("LLAMASelfAdaptiveDifferentialSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveDifferentialSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveDifferentialSwarmOptimization = NonObjectOptimizer( + method="LLAMASelfAdaptiveDifferentialSwarmOptimization" + ).set_name("LLAMASelfAdaptiveDifferentialSwarmOptimization", register=True) +except Exception as e: # SelfAdaptiveDifferentialSwarmOptimization print("SelfAdaptiveDifferentialSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveEvolutionaryAlgorithm import SelfAdaptiveEvolutionaryAlgorithm +try: # SelfAdaptiveEvolutionaryAlgorithm + from nevergrad.optimization.lama.SelfAdaptiveEvolutionaryAlgorithm import ( + SelfAdaptiveEvolutionaryAlgorithm, + ) lama_register["SelfAdaptiveEvolutionaryAlgorithm"] = SelfAdaptiveEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMASelfAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveEvolutionaryAlgorithm").set_name("LLAMASelfAdaptiveEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveEvolutionaryAlgorithm" + ).set_name("LLAMASelfAdaptiveEvolutionaryAlgorithm", register=True) +except Exception as e: # SelfAdaptiveEvolutionaryAlgorithm print("SelfAdaptiveEvolutionaryAlgorithm can not be imported: ", e) -try: +try: # SelfAdaptiveHybridOptimizer from nevergrad.optimization.lama.SelfAdaptiveHybridOptimizer import SelfAdaptiveHybridOptimizer lama_register["SelfAdaptiveHybridOptimizer"] = SelfAdaptiveHybridOptimizer - res = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer").set_name("LLAMASelfAdaptiveHybridOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveHybridOptimizer = NonObjectOptimizer(method="LLAMASelfAdaptiveHybridOptimizer").set_name( + "LLAMASelfAdaptiveHybridOptimizer", register=True + ) +except Exception as e: # SelfAdaptiveHybridOptimizer print("SelfAdaptiveHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveInterleavedOptimization import SelfAdaptiveInterleavedOptimization +try: # SelfAdaptiveInterleavedOptimization + from nevergrad.optimization.lama.SelfAdaptiveInterleavedOptimization import ( + SelfAdaptiveInterleavedOptimization, + ) lama_register["SelfAdaptiveInterleavedOptimization"] = SelfAdaptiveInterleavedOptimization - res = NonObjectOptimizer(method="LLAMASelfAdaptiveInterleavedOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveInterleavedOptimization = NonObjectOptimizer(method="LLAMASelfAdaptiveInterleavedOptimization").set_name("LLAMASelfAdaptiveInterleavedOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveInterleavedOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveInterleavedOptimization = NonObjectOptimizer( + method="LLAMASelfAdaptiveInterleavedOptimization" + ).set_name("LLAMASelfAdaptiveInterleavedOptimization", register=True) +except Exception as e: # SelfAdaptiveInterleavedOptimization print("SelfAdaptiveInterleavedOptimization can not be imported: ", e) -try: +try: # SelfAdaptiveMemeticAlgorithmV2 from nevergrad.optimization.lama.SelfAdaptiveMemeticAlgorithmV2 import SelfAdaptiveMemeticAlgorithmV2 lama_register["SelfAdaptiveMemeticAlgorithmV2"] = SelfAdaptiveMemeticAlgorithmV2 - res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticAlgorithmV2").set_name("LLAMASelfAdaptiveMemeticAlgorithmV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticAlgorithmV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveMemeticAlgorithmV2 = NonObjectOptimizer( + method="LLAMASelfAdaptiveMemeticAlgorithmV2" + ).set_name("LLAMASelfAdaptiveMemeticAlgorithmV2", register=True) +except Exception as e: # SelfAdaptiveMemeticAlgorithmV2 print("SelfAdaptiveMemeticAlgorithmV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveMemeticEvolutionaryAlgorithm import SelfAdaptiveMemeticEvolutionaryAlgorithm +try: # SelfAdaptiveMemeticEvolutionaryAlgorithm + from nevergrad.optimization.lama.SelfAdaptiveMemeticEvolutionaryAlgorithm import ( + SelfAdaptiveMemeticEvolutionaryAlgorithm, + ) lama_register["SelfAdaptiveMemeticEvolutionaryAlgorithm"] = SelfAdaptiveMemeticEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm").set_name("LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm" + ).set_name("LLAMASelfAdaptiveMemeticEvolutionaryAlgorithm", register=True) +except Exception as e: # SelfAdaptiveMemeticEvolutionaryAlgorithm print("SelfAdaptiveMemeticEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveOppositionBasedHarmonySearchDE import SelfAdaptiveOppositionBasedHarmonySearchDE +try: # SelfAdaptiveOppositionBasedHarmonySearchDE + from nevergrad.optimization.lama.SelfAdaptiveOppositionBasedHarmonySearchDE import ( + SelfAdaptiveOppositionBasedHarmonySearchDE, + ) lama_register["SelfAdaptiveOppositionBasedHarmonySearchDE"] = SelfAdaptiveOppositionBasedHarmonySearchDE - res = NonObjectOptimizer(method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveOppositionBasedHarmonySearchDE = NonObjectOptimizer(method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE").set_name("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveOppositionBasedHarmonySearchDE = NonObjectOptimizer( + method="LLAMASelfAdaptiveOppositionBasedHarmonySearchDE" + ).set_name("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE", register=True) +except Exception as e: # SelfAdaptiveOppositionBasedHarmonySearchDE print("SelfAdaptiveOppositionBasedHarmonySearchDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.SelfAdaptiveQuantumMemeticAlgorithm import SelfAdaptiveQuantumMemeticAlgorithm +try: # SelfAdaptiveQuantumMemeticAlgorithm + from nevergrad.optimization.lama.SelfAdaptiveQuantumMemeticAlgorithm import ( + SelfAdaptiveQuantumMemeticAlgorithm, + ) lama_register["SelfAdaptiveQuantumMemeticAlgorithm"] = SelfAdaptiveQuantumMemeticAlgorithm - res = NonObjectOptimizer(method="LLAMASelfAdaptiveQuantumMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASelfAdaptiveQuantumMemeticAlgorithm = NonObjectOptimizer(method="LLAMASelfAdaptiveQuantumMemeticAlgorithm").set_name("LLAMASelfAdaptiveQuantumMemeticAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASelfAdaptiveQuantumMemeticAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASelfAdaptiveQuantumMemeticAlgorithm = NonObjectOptimizer( + method="LLAMASelfAdaptiveQuantumMemeticAlgorithm" + ).set_name("LLAMASelfAdaptiveQuantumMemeticAlgorithm", register=True) +except Exception as e: # SelfAdaptiveQuantumMemeticAlgorithm print("SelfAdaptiveQuantumMemeticAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.SequentialAdaptiveDifferentialEvolution import SequentialAdaptiveDifferentialEvolution +try: # SequentialAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.SequentialAdaptiveDifferentialEvolution import ( + SequentialAdaptiveDifferentialEvolution, + ) lama_register["SequentialAdaptiveDifferentialEvolution"] = SequentialAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMASequentialAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASequentialAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMASequentialAdaptiveDifferentialEvolution").set_name("LLAMASequentialAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASequentialAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASequentialAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMASequentialAdaptiveDifferentialEvolution" + ).set_name("LLAMASequentialAdaptiveDifferentialEvolution", register=True) +except Exception as e: # SequentialAdaptiveDifferentialEvolution print("SequentialAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.SequentialQuadraticAdaptiveEvolutionStrategy import SequentialQuadraticAdaptiveEvolutionStrategy - - lama_register["SequentialQuadraticAdaptiveEvolutionStrategy"] = SequentialQuadraticAdaptiveEvolutionStrategy - res = NonObjectOptimizer(method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer(method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy").set_name("LLAMASequentialQuadraticAdaptiveEvolutionStrategy", register=True) -except Exception as e: +try: # SequentialQuadraticAdaptiveEvolutionStrategy + from nevergrad.optimization.lama.SequentialQuadraticAdaptiveEvolutionStrategy import ( + SequentialQuadraticAdaptiveEvolutionStrategy, + ) + + lama_register["SequentialQuadraticAdaptiveEvolutionStrategy"] = ( + SequentialQuadraticAdaptiveEvolutionStrategy + ) + # res = NonObjectOptimizer(method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASequentialQuadraticAdaptiveEvolutionStrategy = NonObjectOptimizer( + method="LLAMASequentialQuadraticAdaptiveEvolutionStrategy" + ).set_name("LLAMASequentialQuadraticAdaptiveEvolutionStrategy", register=True) +except Exception as e: # SequentialQuadraticAdaptiveEvolutionStrategy print("SequentialQuadraticAdaptiveEvolutionStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.SequentialQuadraticExploitationSearch import SequentialQuadraticExploitationSearch +try: # SequentialQuadraticExploitationSearch + from nevergrad.optimization.lama.SequentialQuadraticExploitationSearch import ( + SequentialQuadraticExploitationSearch, + ) lama_register["SequentialQuadraticExploitationSearch"] = SequentialQuadraticExploitationSearch - res = NonObjectOptimizer(method="LLAMASequentialQuadraticExploitationSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASequentialQuadraticExploitationSearch = NonObjectOptimizer(method="LLAMASequentialQuadraticExploitationSearch").set_name("LLAMASequentialQuadraticExploitationSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASequentialQuadraticExploitationSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASequentialQuadraticExploitationSearch = NonObjectOptimizer( + method="LLAMASequentialQuadraticExploitationSearch" + ).set_name("LLAMASequentialQuadraticExploitationSearch", register=True) +except Exception as e: # SequentialQuadraticExploitationSearch print("SequentialQuadraticExploitationSearch can not be imported: ", e) -try: +try: # SimpleHybridDE from nevergrad.optimization.lama.SimpleHybridDE import SimpleHybridDE lama_register["SimpleHybridDE"] = SimpleHybridDE - res = NonObjectOptimizer(method="LLAMASimpleHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASimpleHybridDE = NonObjectOptimizer(method="LLAMASimpleHybridDE").set_name("LLAMASimpleHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASimpleHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASimpleHybridDE = NonObjectOptimizer(method="LLAMASimpleHybridDE").set_name( + "LLAMASimpleHybridDE", register=True + ) +except Exception as e: # SimpleHybridDE print("SimpleHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.SimplifiedAdaptiveDynamicDualPhaseStrategyV18 import SimplifiedAdaptiveDynamicDualPhaseStrategyV18 - - lama_register["SimplifiedAdaptiveDynamicDualPhaseStrategyV18"] = SimplifiedAdaptiveDynamicDualPhaseStrategyV18 - res = NonObjectOptimizer(method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18 = NonObjectOptimizer(method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18").set_name("LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18", register=True) -except Exception as e: +try: # SimplifiedAdaptiveDynamicDualPhaseStrategyV18 + from nevergrad.optimization.lama.SimplifiedAdaptiveDynamicDualPhaseStrategyV18 import ( + SimplifiedAdaptiveDynamicDualPhaseStrategyV18, + ) + + lama_register["SimplifiedAdaptiveDynamicDualPhaseStrategyV18"] = ( + SimplifiedAdaptiveDynamicDualPhaseStrategyV18 + ) + # res = NonObjectOptimizer(method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18 = NonObjectOptimizer( + method="LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18" + ).set_name("LLAMASimplifiedAdaptiveDynamicDualPhaseStrategyV18", register=True) +except Exception as e: # SimplifiedAdaptiveDynamicDualPhaseStrategyV18 print("SimplifiedAdaptiveDynamicDualPhaseStrategyV18 can not be imported: ", e) -try: +try: # SimulatedAnnealingOptimizer from nevergrad.optimization.lama.SimulatedAnnealingOptimizer import SimulatedAnnealingOptimizer lama_register["SimulatedAnnealingOptimizer"] = SimulatedAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer").set_name("LLAMASimulatedAnnealingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASimulatedAnnealingOptimizer = NonObjectOptimizer(method="LLAMASimulatedAnnealingOptimizer").set_name( + "LLAMASimulatedAnnealingOptimizer", register=True + ) +except Exception as e: # SimulatedAnnealingOptimizer print("SimulatedAnnealingOptimizer can not be imported: ", e) -try: +try: # SpiralSearchOptimizer from nevergrad.optimization.lama.SpiralSearchOptimizer import SpiralSearchOptimizer lama_register["SpiralSearchOptimizer"] = SpiralSearchOptimizer - res = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASpiralSearchOptimizer = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer").set_name("LLAMASpiralSearchOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASpiralSearchOptimizer = NonObjectOptimizer(method="LLAMASpiralSearchOptimizer").set_name( + "LLAMASpiralSearchOptimizer", register=True + ) +except Exception as e: # SpiralSearchOptimizer print("SpiralSearchOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.StabilizedQuantumCognitionOptimizerV11 import StabilizedQuantumCognitionOptimizerV11 +try: # StabilizedQuantumCognitionOptimizerV11 + from nevergrad.optimization.lama.StabilizedQuantumCognitionOptimizerV11 import ( + StabilizedQuantumCognitionOptimizerV11, + ) lama_register["StabilizedQuantumCognitionOptimizerV11"] = StabilizedQuantumCognitionOptimizerV11 - res = NonObjectOptimizer(method="LLAMAStabilizedQuantumCognitionOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStabilizedQuantumCognitionOptimizerV11 = NonObjectOptimizer(method="LLAMAStabilizedQuantumCognitionOptimizerV11").set_name("LLAMAStabilizedQuantumCognitionOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStabilizedQuantumCognitionOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStabilizedQuantumCognitionOptimizerV11 = NonObjectOptimizer( + method="LLAMAStabilizedQuantumCognitionOptimizerV11" + ).set_name("LLAMAStabilizedQuantumCognitionOptimizerV11", register=True) +except Exception as e: # StabilizedQuantumCognitionOptimizerV11 print("StabilizedQuantumCognitionOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.StabilizedQuantumConcentricOptimizer import StabilizedQuantumConcentricOptimizer +try: # StabilizedQuantumConcentricOptimizer + from nevergrad.optimization.lama.StabilizedQuantumConcentricOptimizer import ( + StabilizedQuantumConcentricOptimizer, + ) lama_register["StabilizedQuantumConcentricOptimizer"] = StabilizedQuantumConcentricOptimizer - res = NonObjectOptimizer(method="LLAMAStabilizedQuantumConcentricOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStabilizedQuantumConcentricOptimizer = NonObjectOptimizer(method="LLAMAStabilizedQuantumConcentricOptimizer").set_name("LLAMAStabilizedQuantumConcentricOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStabilizedQuantumConcentricOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStabilizedQuantumConcentricOptimizer = NonObjectOptimizer( + method="LLAMAStabilizedQuantumConcentricOptimizer" + ).set_name("LLAMAStabilizedQuantumConcentricOptimizer", register=True) +except Exception as e: # StabilizedQuantumConcentricOptimizer print("StabilizedQuantumConcentricOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.StabilizedRefinedEnhancedDynamicBalancingPSO import StabilizedRefinedEnhancedDynamicBalancingPSO - - lama_register["StabilizedRefinedEnhancedDynamicBalancingPSO"] = StabilizedRefinedEnhancedDynamicBalancingPSO - res = NonObjectOptimizer(method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO = NonObjectOptimizer(method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO").set_name("LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO", register=True) -except Exception as e: +try: # StabilizedRefinedEnhancedDynamicBalancingPSO + from nevergrad.optimization.lama.StabilizedRefinedEnhancedDynamicBalancingPSO import ( + StabilizedRefinedEnhancedDynamicBalancingPSO, + ) + + lama_register["StabilizedRefinedEnhancedDynamicBalancingPSO"] = ( + StabilizedRefinedEnhancedDynamicBalancingPSO + ) + # res = NonObjectOptimizer(method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO = NonObjectOptimizer( + method="LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO" + ).set_name("LLAMAStabilizedRefinedEnhancedDynamicBalancingPSO", register=True) +except Exception as e: # StabilizedRefinedEnhancedDynamicBalancingPSO print("StabilizedRefinedEnhancedDynamicBalancingPSO can not be imported: ", e) -try: - from nevergrad.optimization.lama.StochasticAdaptiveEvolutionaryOptimizer import StochasticAdaptiveEvolutionaryOptimizer +try: # StochasticAdaptiveEvolutionaryOptimizer + from nevergrad.optimization.lama.StochasticAdaptiveEvolutionaryOptimizer import ( + StochasticAdaptiveEvolutionaryOptimizer, + ) lama_register["StochasticAdaptiveEvolutionaryOptimizer"] = StochasticAdaptiveEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAStochasticAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAStochasticAdaptiveEvolutionaryOptimizer").set_name("LLAMAStochasticAdaptiveEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAStochasticAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAStochasticAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: # StochasticAdaptiveEvolutionaryOptimizer print("StochasticAdaptiveEvolutionaryOptimizer can not be imported: ", e) -try: +try: # StochasticBalancingOptimizer from nevergrad.optimization.lama.StochasticBalancingOptimizer import StochasticBalancingOptimizer lama_register["StochasticBalancingOptimizer"] = StochasticBalancingOptimizer - res = NonObjectOptimizer(method="LLAMAStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticBalancingOptimizer = NonObjectOptimizer(method="LLAMAStochasticBalancingOptimizer").set_name("LLAMAStochasticBalancingOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticBalancingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticBalancingOptimizer = NonObjectOptimizer( + method="LLAMAStochasticBalancingOptimizer" + ).set_name("LLAMAStochasticBalancingOptimizer", register=True) +except Exception as e: # StochasticBalancingOptimizer print("StochasticBalancingOptimizer can not be imported: ", e) -try: +try: # StochasticGradientEnhancedDE from nevergrad.optimization.lama.StochasticGradientEnhancedDE import StochasticGradientEnhancedDE lama_register["StochasticGradientEnhancedDE"] = StochasticGradientEnhancedDE - res = NonObjectOptimizer(method="LLAMAStochasticGradientEnhancedDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticGradientEnhancedDE = NonObjectOptimizer(method="LLAMAStochasticGradientEnhancedDE").set_name("LLAMAStochasticGradientEnhancedDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticGradientEnhancedDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticGradientEnhancedDE = NonObjectOptimizer( + method="LLAMAStochasticGradientEnhancedDE" + ).set_name("LLAMAStochasticGradientEnhancedDE", register=True) +except Exception as e: # StochasticGradientEnhancedDE print("StochasticGradientEnhancedDE can not be imported: ", e) -try: +try: # StochasticGradientExploration from nevergrad.optimization.lama.StochasticGradientExploration import StochasticGradientExploration lama_register["StochasticGradientExploration"] = StochasticGradientExploration - res = NonObjectOptimizer(method="LLAMAStochasticGradientExploration")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticGradientExploration = NonObjectOptimizer(method="LLAMAStochasticGradientExploration").set_name("LLAMAStochasticGradientExploration", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticGradientExploration")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticGradientExploration = NonObjectOptimizer( + method="LLAMAStochasticGradientExploration" + ).set_name("LLAMAStochasticGradientExploration", register=True) +except Exception as e: # StochasticGradientExploration print("StochasticGradientExploration can not be imported: ", e) -try: - from nevergrad.optimization.lama.StochasticGradientHybridOptimization import StochasticGradientHybridOptimization +try: # StochasticGradientHybridOptimization + from nevergrad.optimization.lama.StochasticGradientHybridOptimization import ( + StochasticGradientHybridOptimization, + ) lama_register["StochasticGradientHybridOptimization"] = StochasticGradientHybridOptimization - res = NonObjectOptimizer(method="LLAMAStochasticGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticGradientHybridOptimization = NonObjectOptimizer(method="LLAMAStochasticGradientHybridOptimization").set_name("LLAMAStochasticGradientHybridOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticGradientHybridOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticGradientHybridOptimization = NonObjectOptimizer( + method="LLAMAStochasticGradientHybridOptimization" + ).set_name("LLAMAStochasticGradientHybridOptimization", register=True) +except Exception as e: # StochasticGradientHybridOptimization print("StochasticGradientHybridOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.StochasticGradientQuorumOptimization import StochasticGradientQuorumOptimization +try: # StochasticGradientQuorumOptimization + from nevergrad.optimization.lama.StochasticGradientQuorumOptimization import ( + StochasticGradientQuorumOptimization, + ) lama_register["StochasticGradientQuorumOptimization"] = StochasticGradientQuorumOptimization - res = NonObjectOptimizer(method="LLAMAStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStochasticGradientQuorumOptimization = NonObjectOptimizer(method="LLAMAStochasticGradientQuorumOptimization").set_name("LLAMAStochasticGradientQuorumOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStochasticGradientQuorumOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStochasticGradientQuorumOptimization = NonObjectOptimizer( + method="LLAMAStochasticGradientQuorumOptimization" + ).set_name("LLAMAStochasticGradientQuorumOptimization", register=True) +except Exception as e: # StochasticGradientQuorumOptimization print("StochasticGradientQuorumOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.StrategicAdaptiveDifferentialEvolution import StrategicAdaptiveDifferentialEvolution +try: # StrategicAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.StrategicAdaptiveDifferentialEvolution import ( + StrategicAdaptiveDifferentialEvolution, + ) lama_register["StrategicAdaptiveDifferentialEvolution"] = StrategicAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMAStrategicAdaptiveDifferentialEvolution").set_name("LLAMAStrategicAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMAStrategicAdaptiveDifferentialEvolution" + ).set_name("LLAMAStrategicAdaptiveDifferentialEvolution", register=True) +except Exception as e: # StrategicAdaptiveDifferentialEvolution print("StrategicAdaptiveDifferentialEvolution can not be imported: ", e) -try: +try: # StrategicDifferentialEvolution from nevergrad.optimization.lama.StrategicDifferentialEvolution import StrategicDifferentialEvolution lama_register["StrategicDifferentialEvolution"] = StrategicDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicDifferentialEvolution = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution").set_name("LLAMAStrategicDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicDifferentialEvolution = NonObjectOptimizer( + method="LLAMAStrategicDifferentialEvolution" + ).set_name("LLAMAStrategicDifferentialEvolution", register=True) +except Exception as e: # StrategicDifferentialEvolution print("StrategicDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.StrategicDiminishingAdaptiveEvolver import StrategicDiminishingAdaptiveEvolver +try: # StrategicDiminishingAdaptiveEvolver + from nevergrad.optimization.lama.StrategicDiminishingAdaptiveEvolver import ( + StrategicDiminishingAdaptiveEvolver, + ) lama_register["StrategicDiminishingAdaptiveEvolver"] = StrategicDiminishingAdaptiveEvolver - res = NonObjectOptimizer(method="LLAMAStrategicDiminishingAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicDiminishingAdaptiveEvolver = NonObjectOptimizer(method="LLAMAStrategicDiminishingAdaptiveEvolver").set_name("LLAMAStrategicDiminishingAdaptiveEvolver", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicDiminishingAdaptiveEvolver")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicDiminishingAdaptiveEvolver = NonObjectOptimizer( + method="LLAMAStrategicDiminishingAdaptiveEvolver" + ).set_name("LLAMAStrategicDiminishingAdaptiveEvolver", register=True) +except Exception as e: # StrategicDiminishingAdaptiveEvolver print("StrategicDiminishingAdaptiveEvolver can not be imported: ", e) -try: +try: # StrategicHybridDE from nevergrad.optimization.lama.StrategicHybridDE import StrategicHybridDE lama_register["StrategicHybridDE"] = StrategicHybridDE - res = NonObjectOptimizer(method="LLAMAStrategicHybridDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicHybridDE = NonObjectOptimizer(method="LLAMAStrategicHybridDE").set_name("LLAMAStrategicHybridDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicHybridDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicHybridDE = NonObjectOptimizer(method="LLAMAStrategicHybridDE").set_name( + "LLAMAStrategicHybridDE", register=True + ) +except Exception as e: # StrategicHybridDE print("StrategicHybridDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.StrategicMultiPhaseEvolutionaryAlgorithm import StrategicMultiPhaseEvolutionaryAlgorithm +try: # StrategicMultiPhaseEvolutionaryAlgorithm + from nevergrad.optimization.lama.StrategicMultiPhaseEvolutionaryAlgorithm import ( + StrategicMultiPhaseEvolutionaryAlgorithm, + ) lama_register["StrategicMultiPhaseEvolutionaryAlgorithm"] = StrategicMultiPhaseEvolutionaryAlgorithm - res = NonObjectOptimizer(method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicMultiPhaseEvolutionaryAlgorithm = NonObjectOptimizer(method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm").set_name("LLAMAStrategicMultiPhaseEvolutionaryAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicMultiPhaseEvolutionaryAlgorithm = NonObjectOptimizer( + method="LLAMAStrategicMultiPhaseEvolutionaryAlgorithm" + ).set_name("LLAMAStrategicMultiPhaseEvolutionaryAlgorithm", register=True) +except Exception as e: # StrategicMultiPhaseEvolutionaryAlgorithm print("StrategicMultiPhaseEvolutionaryAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.StrategicQuorumMutationWithAdaptiveElites import StrategicQuorumMutationWithAdaptiveElites +try: # StrategicQuorumMutationWithAdaptiveElites + from nevergrad.optimization.lama.StrategicQuorumMutationWithAdaptiveElites import ( + StrategicQuorumMutationWithAdaptiveElites, + ) lama_register["StrategicQuorumMutationWithAdaptiveElites"] = StrategicQuorumMutationWithAdaptiveElites - res = NonObjectOptimizer(method="LLAMAStrategicQuorumMutationWithAdaptiveElites")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicQuorumMutationWithAdaptiveElites = NonObjectOptimizer(method="LLAMAStrategicQuorumMutationWithAdaptiveElites").set_name("LLAMAStrategicQuorumMutationWithAdaptiveElites", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicQuorumMutationWithAdaptiveElites")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicQuorumMutationWithAdaptiveElites = NonObjectOptimizer( + method="LLAMAStrategicQuorumMutationWithAdaptiveElites" + ).set_name("LLAMAStrategicQuorumMutationWithAdaptiveElites", register=True) +except Exception as e: # StrategicQuorumMutationWithAdaptiveElites print("StrategicQuorumMutationWithAdaptiveElites can not be imported: ", e) -try: - from nevergrad.optimization.lama.StrategicResilienceAdaptiveSearch import StrategicResilienceAdaptiveSearch +try: # StrategicResilienceAdaptiveSearch + from nevergrad.optimization.lama.StrategicResilienceAdaptiveSearch import ( + StrategicResilienceAdaptiveSearch, + ) lama_register["StrategicResilienceAdaptiveSearch"] = StrategicResilienceAdaptiveSearch - res = NonObjectOptimizer(method="LLAMAStrategicResilienceAdaptiveSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAStrategicResilienceAdaptiveSearch = NonObjectOptimizer(method="LLAMAStrategicResilienceAdaptiveSearch").set_name("LLAMAStrategicResilienceAdaptiveSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAStrategicResilienceAdaptiveSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAStrategicResilienceAdaptiveSearch = NonObjectOptimizer( + method="LLAMAStrategicResilienceAdaptiveSearch" + ).set_name("LLAMAStrategicResilienceAdaptiveSearch", register=True) +except Exception as e: # StrategicResilienceAdaptiveSearch print("StrategicResilienceAdaptiveSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimization import SuperDynamicQuantumSwarmOptimization +try: # SuperDynamicQuantumSwarmOptimization + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimization import ( + SuperDynamicQuantumSwarmOptimization, + ) lama_register["SuperDynamicQuantumSwarmOptimization"] = SuperDynamicQuantumSwarmOptimization - res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperDynamicQuantumSwarmOptimization = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimization").set_name("LLAMASuperDynamicQuantumSwarmOptimization", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimization")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperDynamicQuantumSwarmOptimization = NonObjectOptimizer( + method="LLAMASuperDynamicQuantumSwarmOptimization" + ).set_name("LLAMASuperDynamicQuantumSwarmOptimization", register=True) +except Exception as e: # SuperDynamicQuantumSwarmOptimization print("SuperDynamicQuantumSwarmOptimization can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimizationImproved import SuperDynamicQuantumSwarmOptimizationImproved - - lama_register["SuperDynamicQuantumSwarmOptimizationImproved"] = SuperDynamicQuantumSwarmOptimizationImproved - res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimizationImproved").set_name("LLAMASuperDynamicQuantumSwarmOptimizationImproved", register=True) -except Exception as e: +try: # SuperDynamicQuantumSwarmOptimizationImproved + from nevergrad.optimization.lama.SuperDynamicQuantumSwarmOptimizationImproved import ( + SuperDynamicQuantumSwarmOptimizationImproved, + ) + + lama_register["SuperDynamicQuantumSwarmOptimizationImproved"] = ( + SuperDynamicQuantumSwarmOptimizationImproved + ) + # res = NonObjectOptimizer(method="LLAMASuperDynamicQuantumSwarmOptimizationImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperDynamicQuantumSwarmOptimizationImproved = NonObjectOptimizer( + method="LLAMASuperDynamicQuantumSwarmOptimizationImproved" + ).set_name("LLAMASuperDynamicQuantumSwarmOptimizationImproved", register=True) +except Exception as e: # SuperDynamicQuantumSwarmOptimizationImproved print("SuperDynamicQuantumSwarmOptimizationImproved can not be imported: ", e) -try: +try: # SuperOptimizedRAMEDS from nevergrad.optimization.lama.SuperOptimizedRAMEDS import SuperOptimizedRAMEDS lama_register["SuperOptimizedRAMEDS"] = SuperOptimizedRAMEDS - res = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS").set_name("LLAMASuperOptimizedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperOptimizedRAMEDS = NonObjectOptimizer(method="LLAMASuperOptimizedRAMEDS").set_name( + "LLAMASuperOptimizedRAMEDS", register=True + ) +except Exception as e: # SuperOptimizedRAMEDS print("SuperOptimizedRAMEDS can not be imported: ", e) -try: +try: # SuperRefinedRAMEDSv5 from nevergrad.optimization.lama.SuperRefinedRAMEDSv5 import SuperRefinedRAMEDSv5 lama_register["SuperRefinedRAMEDSv5"] = SuperRefinedRAMEDSv5 - res = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperRefinedRAMEDSv5 = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5").set_name("LLAMASuperRefinedRAMEDSv5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperRefinedRAMEDSv5 = NonObjectOptimizer(method="LLAMASuperRefinedRAMEDSv5").set_name( + "LLAMASuperRefinedRAMEDSv5", register=True + ) +except Exception as e: # SuperRefinedRAMEDSv5 print("SuperRefinedRAMEDSv5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 import SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 - - lama_register["SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5"] = SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 - res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5").set_name("LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5", register=True) -except Exception as e: +try: # SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 + from nevergrad.optimization.lama.SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 import ( + SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5, + ) + + lama_register["SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5"] = ( + SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 + ) + # res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 = NonObjectOptimizer( + method="LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5" + ).set_name("LLAMASuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5", register=True) +except Exception as e: # SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 print("SuperchargedEnhancedAQAPSO_LS_DIW_AP_Refined_V5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 import SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 - - lama_register["SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16"] = SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 - res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16").set_name("LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16", register=True) -except Exception as e: +try: # SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 + from nevergrad.optimization.lama.SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 import ( + SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16, + ) + + lama_register["SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16"] = ( + SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 + ) + # res = NonObjectOptimizer(method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 = NonObjectOptimizer( + method="LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16" + ).set_name("LLAMASuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16", register=True) +except Exception as e: # SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 print("SuperchargedEnhancedAdvancedQuantumSwarmOptimizationV16 can not be imported: ", e) -try: +try: # SuperiorAdaptiveStrategyDE from nevergrad.optimization.lama.SuperiorAdaptiveStrategyDE import SuperiorAdaptiveStrategyDE lama_register["SuperiorAdaptiveStrategyDE"] = SuperiorAdaptiveStrategyDE - res = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE").set_name("LLAMASuperiorAdaptiveStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperiorAdaptiveStrategyDE = NonObjectOptimizer(method="LLAMASuperiorAdaptiveStrategyDE").set_name( + "LLAMASuperiorAdaptiveStrategyDE", register=True + ) +except Exception as e: # SuperiorAdaptiveStrategyDE print("SuperiorAdaptiveStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperiorEnhancedDynamicPrecisionOptimizerV1 import SuperiorEnhancedDynamicPrecisionOptimizerV1 +try: # SuperiorEnhancedDynamicPrecisionOptimizerV1 + from nevergrad.optimization.lama.SuperiorEnhancedDynamicPrecisionOptimizerV1 import ( + SuperiorEnhancedDynamicPrecisionOptimizerV1, + ) lama_register["SuperiorEnhancedDynamicPrecisionOptimizerV1"] = SuperiorEnhancedDynamicPrecisionOptimizerV1 - res = NonObjectOptimizer(method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1").set_name("LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1" + ).set_name("LLAMASuperiorEnhancedDynamicPrecisionOptimizerV1", register=True) +except Exception as e: # SuperiorEnhancedDynamicPrecisionOptimizerV1 print("SuperiorEnhancedDynamicPrecisionOptimizerV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperiorHybridEvolutionaryAnnealingOptimizer import SuperiorHybridEvolutionaryAnnealingOptimizer - - lama_register["SuperiorHybridEvolutionaryAnnealingOptimizer"] = SuperiorHybridEvolutionaryAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperiorHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer").set_name("LLAMASuperiorHybridEvolutionaryAnnealingOptimizer", register=True) -except Exception as e: +try: # SuperiorHybridEvolutionaryAnnealingOptimizer + from nevergrad.optimization.lama.SuperiorHybridEvolutionaryAnnealingOptimizer import ( + SuperiorHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["SuperiorHybridEvolutionaryAnnealingOptimizer"] = ( + SuperiorHybridEvolutionaryAnnealingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperiorHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMASuperiorHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMASuperiorHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: # SuperiorHybridEvolutionaryAnnealingOptimizer print("SuperiorHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperiorOptimalEnhancedStrategyDE import SuperiorOptimalEnhancedStrategyDE +try: # SuperiorOptimalEnhancedStrategyDE + from nevergrad.optimization.lama.SuperiorOptimalEnhancedStrategyDE import ( + SuperiorOptimalEnhancedStrategyDE, + ) lama_register["SuperiorOptimalEnhancedStrategyDE"] = SuperiorOptimalEnhancedStrategyDE - res = NonObjectOptimizer(method="LLAMASuperiorOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperiorOptimalEnhancedStrategyDE = NonObjectOptimizer(method="LLAMASuperiorOptimalEnhancedStrategyDE").set_name("LLAMASuperiorOptimalEnhancedStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASuperiorOptimalEnhancedStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperiorOptimalEnhancedStrategyDE = NonObjectOptimizer( + method="LLAMASuperiorOptimalEnhancedStrategyDE" + ).set_name("LLAMASuperiorOptimalEnhancedStrategyDE", register=True) +except Exception as e: # SuperiorOptimalEnhancedStrategyDE print("SuperiorOptimalEnhancedStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.SuperiorRefinedEvolutionaryGradientOptimizerV13 import SuperiorRefinedEvolutionaryGradientOptimizerV13 - - lama_register["SuperiorRefinedEvolutionaryGradientOptimizerV13"] = SuperiorRefinedEvolutionaryGradientOptimizerV13 - res = NonObjectOptimizer(method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13 = NonObjectOptimizer(method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13").set_name("LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13", register=True) -except Exception as e: +try: # SuperiorRefinedEvolutionaryGradientOptimizerV13 + from nevergrad.optimization.lama.SuperiorRefinedEvolutionaryGradientOptimizerV13 import ( + SuperiorRefinedEvolutionaryGradientOptimizerV13, + ) + + lama_register["SuperiorRefinedEvolutionaryGradientOptimizerV13"] = ( + SuperiorRefinedEvolutionaryGradientOptimizerV13 + ) + # res = NonObjectOptimizer(method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13 = NonObjectOptimizer( + method="LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13" + ).set_name("LLAMASuperiorRefinedEvolutionaryGradientOptimizerV13", register=True) +except Exception as e: # SuperiorRefinedEvolutionaryGradientOptimizerV13 print("SuperiorRefinedEvolutionaryGradientOptimizerV13 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeDynamicAdaptiveOptimizerV5 import SupremeDynamicAdaptiveOptimizerV5 +try: # SupremeDynamicAdaptiveOptimizerV5 + from nevergrad.optimization.lama.SupremeDynamicAdaptiveOptimizerV5 import ( + SupremeDynamicAdaptiveOptimizerV5, + ) lama_register["SupremeDynamicAdaptiveOptimizerV5"] = SupremeDynamicAdaptiveOptimizerV5 - res = NonObjectOptimizer(method="LLAMASupremeDynamicAdaptiveOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeDynamicAdaptiveOptimizerV5 = NonObjectOptimizer(method="LLAMASupremeDynamicAdaptiveOptimizerV5").set_name("LLAMASupremeDynamicAdaptiveOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASupremeDynamicAdaptiveOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeDynamicAdaptiveOptimizerV5 = NonObjectOptimizer( + method="LLAMASupremeDynamicAdaptiveOptimizerV5" + ).set_name("LLAMASupremeDynamicAdaptiveOptimizerV5", register=True) +except Exception as e: # SupremeDynamicAdaptiveOptimizerV5 print("SupremeDynamicAdaptiveOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV1 import SupremeDynamicPrecisionOptimizerV1 +try: # SupremeDynamicPrecisionOptimizerV1 + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV1 import ( + SupremeDynamicPrecisionOptimizerV1, + ) lama_register["SupremeDynamicPrecisionOptimizerV1"] = SupremeDynamicPrecisionOptimizerV1 - res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV1").set_name("LLAMASupremeDynamicPrecisionOptimizerV1", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV1")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeDynamicPrecisionOptimizerV1 = NonObjectOptimizer( + method="LLAMASupremeDynamicPrecisionOptimizerV1" + ).set_name("LLAMASupremeDynamicPrecisionOptimizerV1", register=True) +except Exception as e: # SupremeDynamicPrecisionOptimizerV1 print("SupremeDynamicPrecisionOptimizerV1 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV2 import SupremeDynamicPrecisionOptimizerV2 +try: # SupremeDynamicPrecisionOptimizerV2 + from nevergrad.optimization.lama.SupremeDynamicPrecisionOptimizerV2 import ( + SupremeDynamicPrecisionOptimizerV2, + ) lama_register["SupremeDynamicPrecisionOptimizerV2"] = SupremeDynamicPrecisionOptimizerV2 - res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeDynamicPrecisionOptimizerV2 = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV2").set_name("LLAMASupremeDynamicPrecisionOptimizerV2", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASupremeDynamicPrecisionOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeDynamicPrecisionOptimizerV2 = NonObjectOptimizer( + method="LLAMASupremeDynamicPrecisionOptimizerV2" + ).set_name("LLAMASupremeDynamicPrecisionOptimizerV2", register=True) +except Exception as e: # SupremeDynamicPrecisionOptimizerV2 print("SupremeDynamicPrecisionOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeEvolutionaryGradientHybridOptimizerV6 import SupremeEvolutionaryGradientHybridOptimizerV6 - - lama_register["SupremeEvolutionaryGradientHybridOptimizerV6"] = SupremeEvolutionaryGradientHybridOptimizerV6 - res = NonObjectOptimizer(method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeEvolutionaryGradientHybridOptimizerV6 = NonObjectOptimizer(method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6").set_name("LLAMASupremeEvolutionaryGradientHybridOptimizerV6", register=True) -except Exception as e: +try: # SupremeEvolutionaryGradientHybridOptimizerV6 + from nevergrad.optimization.lama.SupremeEvolutionaryGradientHybridOptimizerV6 import ( + SupremeEvolutionaryGradientHybridOptimizerV6, + ) + + lama_register["SupremeEvolutionaryGradientHybridOptimizerV6"] = ( + SupremeEvolutionaryGradientHybridOptimizerV6 + ) + # res = NonObjectOptimizer(method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeEvolutionaryGradientHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMASupremeEvolutionaryGradientHybridOptimizerV6" + ).set_name("LLAMASupremeEvolutionaryGradientHybridOptimizerV6", register=True) +except Exception as e: # SupremeEvolutionaryGradientHybridOptimizerV6 print("SupremeEvolutionaryGradientHybridOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeOptimalPrecisionEvolutionaryThermalOptimizer import SupremeOptimalPrecisionEvolutionaryThermalOptimizer - - lama_register["SupremeOptimalPrecisionEvolutionaryThermalOptimizer"] = SupremeOptimalPrecisionEvolutionaryThermalOptimizer - res = NonObjectOptimizer(method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer(method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer").set_name("LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer", register=True) -except Exception as e: +try: # SupremeOptimalPrecisionEvolutionaryThermalOptimizer + from nevergrad.optimization.lama.SupremeOptimalPrecisionEvolutionaryThermalOptimizer import ( + SupremeOptimalPrecisionEvolutionaryThermalOptimizer, + ) + + lama_register["SupremeOptimalPrecisionEvolutionaryThermalOptimizer"] = ( + SupremeOptimalPrecisionEvolutionaryThermalOptimizer + ) + # res = NonObjectOptimizer(method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer = NonObjectOptimizer( + method="LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer" + ).set_name("LLAMASupremeOptimalPrecisionEvolutionaryThermalOptimizer", register=True) +except Exception as e: # SupremeOptimalPrecisionEvolutionaryThermalOptimizer print("SupremeOptimalPrecisionEvolutionaryThermalOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.SupremeUltraEnhancedEvolutionaryOptimizer import SupremeUltraEnhancedEvolutionaryOptimizer +try: # SupremeUltraEnhancedEvolutionaryOptimizer + from nevergrad.optimization.lama.SupremeUltraEnhancedEvolutionaryOptimizer import ( + SupremeUltraEnhancedEvolutionaryOptimizer, + ) lama_register["SupremeUltraEnhancedEvolutionaryOptimizer"] = SupremeUltraEnhancedEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMASupremeUltraEnhancedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer").set_name("LLAMASupremeUltraEnhancedEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMASupremeUltraEnhancedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMASupremeUltraEnhancedEvolutionaryOptimizer" + ).set_name("LLAMASupremeUltraEnhancedEvolutionaryOptimizer", register=True) +except Exception as e: # SupremeUltraEnhancedEvolutionaryOptimizer print("SupremeUltraEnhancedEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.TemporalAdaptiveDifferentialEvolution import TemporalAdaptiveDifferentialEvolution +try: # TemporalAdaptiveDifferentialEvolution + from nevergrad.optimization.lama.TemporalAdaptiveDifferentialEvolution import ( + TemporalAdaptiveDifferentialEvolution, + ) lama_register["TemporalAdaptiveDifferentialEvolution"] = TemporalAdaptiveDifferentialEvolution - res = NonObjectOptimizer(method="LLAMATemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMATemporalAdaptiveDifferentialEvolution = NonObjectOptimizer(method="LLAMATemporalAdaptiveDifferentialEvolution").set_name("LLAMATemporalAdaptiveDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMATemporalAdaptiveDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMATemporalAdaptiveDifferentialEvolution = NonObjectOptimizer( + method="LLAMATemporalAdaptiveDifferentialEvolution" + ).set_name("LLAMATemporalAdaptiveDifferentialEvolution", register=True) +except Exception as e: # TemporalAdaptiveDifferentialEvolution print("TemporalAdaptiveDifferentialEvolution can not be imported: ", e) -try: - from nevergrad.optimization.lama.TurbochargedDifferentialEvolution import TurbochargedDifferentialEvolution +try: # TurbochargedDifferentialEvolution + from nevergrad.optimization.lama.TurbochargedDifferentialEvolution import ( + TurbochargedDifferentialEvolution, + ) lama_register["TurbochargedDifferentialEvolution"] = TurbochargedDifferentialEvolution - res = NonObjectOptimizer(method="LLAMATurbochargedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMATurbochargedDifferentialEvolution = NonObjectOptimizer(method="LLAMATurbochargedDifferentialEvolution").set_name("LLAMATurbochargedDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMATurbochargedDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMATurbochargedDifferentialEvolution = NonObjectOptimizer( + method="LLAMATurbochargedDifferentialEvolution" + ).set_name("LLAMATurbochargedDifferentialEvolution", register=True) +except Exception as e: # TurbochargedDifferentialEvolution print("TurbochargedDifferentialEvolution can not be imported: ", e) -try: +try: # UltimateDynamicFireworkAlgorithm from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithm import UltimateDynamicFireworkAlgorithm lama_register["UltimateDynamicFireworkAlgorithm"] = UltimateDynamicFireworkAlgorithm - res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateDynamicFireworkAlgorithm = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithm").set_name("LLAMAUltimateDynamicFireworkAlgorithm", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithm")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateDynamicFireworkAlgorithm = NonObjectOptimizer( + method="LLAMAUltimateDynamicFireworkAlgorithm" + ).set_name("LLAMAUltimateDynamicFireworkAlgorithm", register=True) +except Exception as e: # UltimateDynamicFireworkAlgorithm print("UltimateDynamicFireworkAlgorithm can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithmImproved import UltimateDynamicFireworkAlgorithmImproved +try: # UltimateDynamicFireworkAlgorithmImproved + from nevergrad.optimization.lama.UltimateDynamicFireworkAlgorithmImproved import ( + UltimateDynamicFireworkAlgorithmImproved, + ) lama_register["UltimateDynamicFireworkAlgorithmImproved"] = UltimateDynamicFireworkAlgorithmImproved - res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithmImproved").set_name("LLAMAUltimateDynamicFireworkAlgorithmImproved", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateDynamicFireworkAlgorithmImproved")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateDynamicFireworkAlgorithmImproved = NonObjectOptimizer( + method="LLAMAUltimateDynamicFireworkAlgorithmImproved" + ).set_name("LLAMAUltimateDynamicFireworkAlgorithmImproved", register=True) +except Exception as e: # UltimateDynamicFireworkAlgorithmImproved print("UltimateDynamicFireworkAlgorithmImproved can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 import UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 - - lama_register["UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19"] = UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 - res = NonObjectOptimizer(method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 = NonObjectOptimizer(method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19").set_name("LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19", register=True) -except Exception as e: +try: # UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 + from nevergrad.optimization.lama.UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 import ( + UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19, + ) + + lama_register["UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19"] = ( + UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 + ) + # res = NonObjectOptimizer(method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 = NonObjectOptimizer( + method="LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19" + ).set_name("LLAMAUltimateEnhancedRefinedEvolutionaryGradientOptimizerV19", register=True) +except Exception as e: # UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 print("UltimateEnhancedRefinedEvolutionaryGradientOptimizerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV15 import UltimateEvolutionaryGradientOptimizerV15 +try: # UltimateEvolutionaryGradientOptimizerV15 + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV15 import ( + UltimateEvolutionaryGradientOptimizerV15, + ) lama_register["UltimateEvolutionaryGradientOptimizerV15"] = UltimateEvolutionaryGradientOptimizerV15 - res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV15")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateEvolutionaryGradientOptimizerV15 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV15").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV15", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV15")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateEvolutionaryGradientOptimizerV15 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV15" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV15", register=True) +except Exception as e: # UltimateEvolutionaryGradientOptimizerV15 print("UltimateEvolutionaryGradientOptimizerV15 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV26 import UltimateEvolutionaryGradientOptimizerV26 +try: # UltimateEvolutionaryGradientOptimizerV26 + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV26 import ( + UltimateEvolutionaryGradientOptimizerV26, + ) lama_register["UltimateEvolutionaryGradientOptimizerV26"] = UltimateEvolutionaryGradientOptimizerV26 - res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateEvolutionaryGradientOptimizerV26 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV26").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateEvolutionaryGradientOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV26" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV26", register=True) +except Exception as e: # UltimateEvolutionaryGradientOptimizerV26 print("UltimateEvolutionaryGradientOptimizerV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV33 import UltimateEvolutionaryGradientOptimizerV33 +try: # UltimateEvolutionaryGradientOptimizerV33 + from nevergrad.optimization.lama.UltimateEvolutionaryGradientOptimizerV33 import ( + UltimateEvolutionaryGradientOptimizerV33, + ) lama_register["UltimateEvolutionaryGradientOptimizerV33"] = UltimateEvolutionaryGradientOptimizerV33 - res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateEvolutionaryGradientOptimizerV33 = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV33").set_name("LLAMAUltimateEvolutionaryGradientOptimizerV33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryGradientOptimizerV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateEvolutionaryGradientOptimizerV33 = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryGradientOptimizerV33" + ).set_name("LLAMAUltimateEvolutionaryGradientOptimizerV33", register=True) +except Exception as e: # UltimateEvolutionaryGradientOptimizerV33 print("UltimateEvolutionaryGradientOptimizerV33 can not be imported: ", e) -try: +try: # UltimateEvolutionaryOptimizer from nevergrad.optimization.lama.UltimateEvolutionaryOptimizer import UltimateEvolutionaryOptimizer lama_register["UltimateEvolutionaryOptimizer"] = UltimateEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryOptimizer").set_name("LLAMAUltimateEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltimateEvolutionaryOptimizer" + ).set_name("LLAMAUltimateEvolutionaryOptimizer", register=True) +except Exception as e: # UltimateEvolutionaryOptimizer print("UltimateEvolutionaryOptimizer can not be imported: ", e) -try: +try: # UltimateRefinedAQAPSO_LS_DIW_AP from nevergrad.optimization.lama.UltimateRefinedAQAPSO_LS_DIW_AP import UltimateRefinedAQAPSO_LS_DIW_AP lama_register["UltimateRefinedAQAPSO_LS_DIW_AP"] = UltimateRefinedAQAPSO_LS_DIW_AP - res = NonObjectOptimizer(method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer(method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP").set_name("LLAMAUltimateRefinedAQAPSO_LS_DIW_AP", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateRefinedAQAPSO_LS_DIW_AP = NonObjectOptimizer( + method="LLAMAUltimateRefinedAQAPSO_LS_DIW_AP" + ).set_name("LLAMAUltimateRefinedAQAPSO_LS_DIW_AP", register=True) +except Exception as e: # UltimateRefinedAQAPSO_LS_DIW_AP print("UltimateRefinedAQAPSO_LS_DIW_AP can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateRefinedPrecisionEvolutionaryOptimizerV41 import UltimateRefinedPrecisionEvolutionaryOptimizerV41 - - lama_register["UltimateRefinedPrecisionEvolutionaryOptimizerV41"] = UltimateRefinedPrecisionEvolutionaryOptimizerV41 - res = NonObjectOptimizer(method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41 = NonObjectOptimizer(method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41").set_name("LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41", register=True) -except Exception as e: +try: # UltimateRefinedPrecisionEvolutionaryOptimizerV41 + from nevergrad.optimization.lama.UltimateRefinedPrecisionEvolutionaryOptimizerV41 import ( + UltimateRefinedPrecisionEvolutionaryOptimizerV41, + ) + + lama_register["UltimateRefinedPrecisionEvolutionaryOptimizerV41"] = ( + UltimateRefinedPrecisionEvolutionaryOptimizerV41 + ) + # res = NonObjectOptimizer(method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41 = NonObjectOptimizer( + method="LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41" + ).set_name("LLAMAUltimateRefinedPrecisionEvolutionaryOptimizerV41", register=True) +except Exception as e: # UltimateRefinedPrecisionEvolutionaryOptimizerV41 print("UltimateRefinedPrecisionEvolutionaryOptimizerV41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 import UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 - - lama_register["UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18"] = UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 - res = NonObjectOptimizer(method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 = NonObjectOptimizer(method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18").set_name("LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18", register=True) -except Exception as e: +try: # UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 + from nevergrad.optimization.lama.UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 import ( + UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18, + ) + + lama_register["UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18"] = ( + UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 + ) + # res = NonObjectOptimizer(method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 = NonObjectOptimizer( + method="LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18" + ).set_name("LLAMAUltimateSuperiorRefinedEvolutionaryGradientOptimizerV18", register=True) +except Exception as e: # UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 print("UltimateSuperiorRefinedEvolutionaryGradientOptimizerV18 can not be imported: ", e) -try: +try: # UltraDynamicAdaptiveRAMEDS from nevergrad.optimization.lama.UltraDynamicAdaptiveRAMEDS import UltraDynamicAdaptiveRAMEDS lama_register["UltraDynamicAdaptiveRAMEDS"] = UltraDynamicAdaptiveRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraDynamicAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS").set_name("LLAMAUltraDynamicAdaptiveRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraDynamicAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraDynamicAdaptiveRAMEDS").set_name( + "LLAMAUltraDynamicAdaptiveRAMEDS", register=True + ) +except Exception as e: # UltraDynamicAdaptiveRAMEDS print("UltraDynamicAdaptiveRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraDynamicDualPhaseOptimizedStrategyV16 import UltraDynamicDualPhaseOptimizedStrategyV16 +try: # UltraDynamicDualPhaseOptimizedStrategyV16 + from nevergrad.optimization.lama.UltraDynamicDualPhaseOptimizedStrategyV16 import ( + UltraDynamicDualPhaseOptimizedStrategyV16, + ) lama_register["UltraDynamicDualPhaseOptimizedStrategyV16"] = UltraDynamicDualPhaseOptimizedStrategyV16 - res = NonObjectOptimizer(method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraDynamicDualPhaseOptimizedStrategyV16 = NonObjectOptimizer(method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16").set_name("LLAMAUltraDynamicDualPhaseOptimizedStrategyV16", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraDynamicDualPhaseOptimizedStrategyV16 = NonObjectOptimizer( + method="LLAMAUltraDynamicDualPhaseOptimizedStrategyV16" + ).set_name("LLAMAUltraDynamicDualPhaseOptimizedStrategyV16", register=True) +except Exception as e: # UltraDynamicDualPhaseOptimizedStrategyV16 print("UltraDynamicDualPhaseOptimizedStrategyV16 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV10 import UltraEnhancedAdaptiveMemoryHybridOptimizerV10 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV10"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV10 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV10 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV10 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV10, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV10"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV10 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV10", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV10 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV11 import UltraEnhancedAdaptiveMemoryHybridOptimizerV11 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV11"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV11 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV11 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV11 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV11, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV11"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV11 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV11", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV11 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV12 import UltraEnhancedAdaptiveMemoryHybridOptimizerV12 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV12"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV12 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV12 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV12 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV12, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV12"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV12 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV12", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV12 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV12 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV2 import UltraEnhancedAdaptiveMemoryHybridOptimizerV2 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV2"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV2 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV2 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV2 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV2, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV2"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV2 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV2", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV2 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV2 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV3 import UltraEnhancedAdaptiveMemoryHybridOptimizerV3 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV3"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV3 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV3 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV3 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV3, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV3"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV3 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV3", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV3 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV4 import UltraEnhancedAdaptiveMemoryHybridOptimizerV4 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV4"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV4 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV4 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV4 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV4, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV4"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV4 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV4", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV4 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV7 import UltraEnhancedAdaptiveMemoryHybridOptimizerV7 - - lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV7"] = UltraEnhancedAdaptiveMemoryHybridOptimizerV7 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7").set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7", register=True) -except Exception as e: +try: # UltraEnhancedAdaptiveMemoryHybridOptimizerV7 + from nevergrad.optimization.lama.UltraEnhancedAdaptiveMemoryHybridOptimizerV7 import ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV7, + ) + + lama_register["UltraEnhancedAdaptiveMemoryHybridOptimizerV7"] = ( + UltraEnhancedAdaptiveMemoryHybridOptimizerV7 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7" + ).set_name("LLAMAUltraEnhancedAdaptiveMemoryHybridOptimizerV7", register=True) +except Exception as e: # UltraEnhancedAdaptiveMemoryHybridOptimizerV7 print("UltraEnhancedAdaptiveMemoryHybridOptimizerV7 can not be imported: ", e) -try: +try: # UltraEnhancedAdaptiveRAMEDS from nevergrad.optimization.lama.UltraEnhancedAdaptiveRAMEDS import UltraEnhancedAdaptiveRAMEDS lama_register["UltraEnhancedAdaptiveRAMEDS"] = UltraEnhancedAdaptiveRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS").set_name("LLAMAUltraEnhancedAdaptiveRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraEnhancedAdaptiveRAMEDS").set_name( + "LLAMAUltraEnhancedAdaptiveRAMEDS", register=True + ) +except Exception as e: # UltraEnhancedAdaptiveRAMEDS print("UltraEnhancedAdaptiveRAMEDS can not be imported: ", e) -try: +try: # UltraEnhancedDynamicDE from nevergrad.optimization.lama.UltraEnhancedDynamicDE import UltraEnhancedDynamicDE lama_register["UltraEnhancedDynamicDE"] = UltraEnhancedDynamicDE - res = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedDynamicDE = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE").set_name("LLAMAUltraEnhancedDynamicDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedDynamicDE = NonObjectOptimizer(method="LLAMAUltraEnhancedDynamicDE").set_name( + "LLAMAUltraEnhancedDynamicDE", register=True + ) +except Exception as e: # UltraEnhancedDynamicDE print("UltraEnhancedDynamicDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedEliteAdaptiveMemoryHybridOptimizer import UltraEnhancedEliteAdaptiveMemoryHybridOptimizer - - lama_register["UltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = UltraEnhancedEliteAdaptiveMemoryHybridOptimizer - res = NonObjectOptimizer(method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer(method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer").set_name("LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) -except Exception as e: +try: # UltraEnhancedEliteAdaptiveMemoryHybridOptimizer + from nevergrad.optimization.lama.UltraEnhancedEliteAdaptiveMemoryHybridOptimizer import ( + UltraEnhancedEliteAdaptiveMemoryHybridOptimizer, + ) + + lama_register["UltraEnhancedEliteAdaptiveMemoryHybridOptimizer"] = ( + UltraEnhancedEliteAdaptiveMemoryHybridOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer = NonObjectOptimizer( + method="LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer" + ).set_name("LLAMAUltraEnhancedEliteAdaptiveMemoryHybridOptimizer", register=True) +except Exception as e: # UltraEnhancedEliteAdaptiveMemoryHybridOptimizer print("UltraEnhancedEliteAdaptiveMemoryHybridOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedEvolutionaryGradientOptimizerV14 import UltraEnhancedEvolutionaryGradientOptimizerV14 - - lama_register["UltraEnhancedEvolutionaryGradientOptimizerV14"] = UltraEnhancedEvolutionaryGradientOptimizerV14 - res = NonObjectOptimizer(method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14 = NonObjectOptimizer(method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14").set_name("LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14", register=True) -except Exception as e: +try: # UltraEnhancedEvolutionaryGradientOptimizerV14 + from nevergrad.optimization.lama.UltraEnhancedEvolutionaryGradientOptimizerV14 import ( + UltraEnhancedEvolutionaryGradientOptimizerV14, + ) + + lama_register["UltraEnhancedEvolutionaryGradientOptimizerV14"] = ( + UltraEnhancedEvolutionaryGradientOptimizerV14 + ) + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14 = NonObjectOptimizer( + method="LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14" + ).set_name("LLAMAUltraEnhancedEvolutionaryGradientOptimizerV14", register=True) +except Exception as e: # UltraEnhancedEvolutionaryGradientOptimizerV14 print("UltraEnhancedEvolutionaryGradientOptimizerV14 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEnhancedPrecisionEvolutionaryOptimizer import UltraEnhancedPrecisionEvolutionaryOptimizer +try: # UltraEnhancedPrecisionEvolutionaryOptimizer + from nevergrad.optimization.lama.UltraEnhancedPrecisionEvolutionaryOptimizer import ( + UltraEnhancedPrecisionEvolutionaryOptimizer, + ) lama_register["UltraEnhancedPrecisionEvolutionaryOptimizer"] = UltraEnhancedPrecisionEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer").set_name("LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer" + ).set_name("LLAMAUltraEnhancedPrecisionEvolutionaryOptimizer", register=True) +except Exception as e: # UltraEnhancedPrecisionEvolutionaryOptimizer print("UltraEnhancedPrecisionEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraEvolutionaryGradientOptimizerV27 import UltraEvolutionaryGradientOptimizerV27 +try: # UltraEvolutionaryGradientOptimizerV27 + from nevergrad.optimization.lama.UltraEvolutionaryGradientOptimizerV27 import ( + UltraEvolutionaryGradientOptimizerV27, + ) lama_register["UltraEvolutionaryGradientOptimizerV27"] = UltraEvolutionaryGradientOptimizerV27 - res = NonObjectOptimizer(method="LLAMAUltraEvolutionaryGradientOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraEvolutionaryGradientOptimizerV27 = NonObjectOptimizer(method="LLAMAUltraEvolutionaryGradientOptimizerV27").set_name("LLAMAUltraEvolutionaryGradientOptimizerV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraEvolutionaryGradientOptimizerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraEvolutionaryGradientOptimizerV27 = NonObjectOptimizer( + method="LLAMAUltraEvolutionaryGradientOptimizerV27" + ).set_name("LLAMAUltraEvolutionaryGradientOptimizerV27", register=True) +except Exception as e: # UltraEvolutionaryGradientOptimizerV27 print("UltraEvolutionaryGradientOptimizerV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraFineSpiralDifferentialOptimizerV7 import UltraFineSpiralDifferentialOptimizerV7 +try: # UltraFineSpiralDifferentialOptimizerV7 + from nevergrad.optimization.lama.UltraFineSpiralDifferentialOptimizerV7 import ( + UltraFineSpiralDifferentialOptimizerV7, + ) lama_register["UltraFineSpiralDifferentialOptimizerV7"] = UltraFineSpiralDifferentialOptimizerV7 - res = NonObjectOptimizer(method="LLAMAUltraFineSpiralDifferentialOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraFineSpiralDifferentialOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraFineSpiralDifferentialOptimizerV7").set_name("LLAMAUltraFineSpiralDifferentialOptimizerV7", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraFineSpiralDifferentialOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraFineSpiralDifferentialOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraFineSpiralDifferentialOptimizerV7" + ).set_name("LLAMAUltraFineSpiralDifferentialOptimizerV7", register=True) +except Exception as e: # UltraFineSpiralDifferentialOptimizerV7 print("UltraFineSpiralDifferentialOptimizerV7 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizer import UltraFineTunedEvolutionaryOptimizer +try: # UltraFineTunedEvolutionaryOptimizer + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizer import ( + UltraFineTunedEvolutionaryOptimizer, + ) lama_register["UltraFineTunedEvolutionaryOptimizer"] = UltraFineTunedEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraFineTunedEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizer").set_name("LLAMAUltraFineTunedEvolutionaryOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraFineTunedEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraFineTunedEvolutionaryOptimizer" + ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizer", register=True) +except Exception as e: # UltraFineTunedEvolutionaryOptimizer print("UltraFineTunedEvolutionaryOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizerV24 import UltraFineTunedEvolutionaryOptimizerV24 +try: # UltraFineTunedEvolutionaryOptimizerV24 + from nevergrad.optimization.lama.UltraFineTunedEvolutionaryOptimizerV24 import ( + UltraFineTunedEvolutionaryOptimizerV24, + ) lama_register["UltraFineTunedEvolutionaryOptimizerV24"] = UltraFineTunedEvolutionaryOptimizerV24 - res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraFineTunedEvolutionaryOptimizerV24 = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizerV24").set_name("LLAMAUltraFineTunedEvolutionaryOptimizerV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraFineTunedEvolutionaryOptimizerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraFineTunedEvolutionaryOptimizerV24 = NonObjectOptimizer( + method="LLAMAUltraFineTunedEvolutionaryOptimizerV24" + ).set_name("LLAMAUltraFineTunedEvolutionaryOptimizerV24", register=True) +except Exception as e: # UltraFineTunedEvolutionaryOptimizerV24 print("UltraFineTunedEvolutionaryOptimizerV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV18 import UltraOptimizedDynamicPrecisionOptimizerV18 +try: # UltraOptimizedDynamicPrecisionOptimizerV18 + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV18 import ( + UltraOptimizedDynamicPrecisionOptimizerV18, + ) lama_register["UltraOptimizedDynamicPrecisionOptimizerV18"] = UltraOptimizedDynamicPrecisionOptimizerV18 - res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedDynamicPrecisionOptimizerV18 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV18", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV18 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV18" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV18", register=True) +except Exception as e: # UltraOptimizedDynamicPrecisionOptimizerV18 print("UltraOptimizedDynamicPrecisionOptimizerV18 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV19 import UltraOptimizedDynamicPrecisionOptimizerV19 +try: # UltraOptimizedDynamicPrecisionOptimizerV19 + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV19 import ( + UltraOptimizedDynamicPrecisionOptimizerV19, + ) lama_register["UltraOptimizedDynamicPrecisionOptimizerV19"] = UltraOptimizedDynamicPrecisionOptimizerV19 - res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedDynamicPrecisionOptimizerV19 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV19", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV19 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV19" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV19", register=True) +except Exception as e: # UltraOptimizedDynamicPrecisionOptimizerV19 print("UltraOptimizedDynamicPrecisionOptimizerV19 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV52 import UltraOptimizedDynamicPrecisionOptimizerV52 +try: # UltraOptimizedDynamicPrecisionOptimizerV52 + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV52 import ( + UltraOptimizedDynamicPrecisionOptimizerV52, + ) lama_register["UltraOptimizedDynamicPrecisionOptimizerV52"] = UltraOptimizedDynamicPrecisionOptimizerV52 - res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedDynamicPrecisionOptimizerV52 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV52", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV52 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV52" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV52", register=True) +except Exception as e: # UltraOptimizedDynamicPrecisionOptimizerV52 print("UltraOptimizedDynamicPrecisionOptimizerV52 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV53 import UltraOptimizedDynamicPrecisionOptimizerV53 +try: # UltraOptimizedDynamicPrecisionOptimizerV53 + from nevergrad.optimization.lama.UltraOptimizedDynamicPrecisionOptimizerV53 import ( + UltraOptimizedDynamicPrecisionOptimizerV53, + ) lama_register["UltraOptimizedDynamicPrecisionOptimizerV53"] = UltraOptimizedDynamicPrecisionOptimizerV53 - res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedDynamicPrecisionOptimizerV53 = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53").set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV53", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedDynamicPrecisionOptimizerV53 = NonObjectOptimizer( + method="LLAMAUltraOptimizedDynamicPrecisionOptimizerV53" + ).set_name("LLAMAUltraOptimizedDynamicPrecisionOptimizerV53", register=True) +except Exception as e: # UltraOptimizedDynamicPrecisionOptimizerV53 print("UltraOptimizedDynamicPrecisionOptimizerV53 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedEvolutionaryGradientOptimizerV30 import UltraOptimizedEvolutionaryGradientOptimizerV30 - - lama_register["UltraOptimizedEvolutionaryGradientOptimizerV30"] = UltraOptimizedEvolutionaryGradientOptimizerV30 - res = NonObjectOptimizer(method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30 = NonObjectOptimizer(method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30").set_name("LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30", register=True) -except Exception as e: +try: # UltraOptimizedEvolutionaryGradientOptimizerV30 + from nevergrad.optimization.lama.UltraOptimizedEvolutionaryGradientOptimizerV30 import ( + UltraOptimizedEvolutionaryGradientOptimizerV30, + ) + + lama_register["UltraOptimizedEvolutionaryGradientOptimizerV30"] = ( + UltraOptimizedEvolutionaryGradientOptimizerV30 + ) + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30 = NonObjectOptimizer( + method="LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30" + ).set_name("LLAMAUltraOptimizedEvolutionaryGradientOptimizerV30", register=True) +except Exception as e: # UltraOptimizedEvolutionaryGradientOptimizerV30 print("UltraOptimizedEvolutionaryGradientOptimizerV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer import UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer - - lama_register["UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer"] = UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer - res = NonObjectOptimizer(method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer = NonObjectOptimizer(method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer").set_name("LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer", register=True) -except Exception as e: +try: # UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer + from nevergrad.optimization.lama.UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer import ( + UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer, + ) + + lama_register["UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer"] = ( + UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer = NonObjectOptimizer( + method="LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer" + ).set_name("LLAMAUltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer", register=True) +except Exception as e: # UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer print("UltraOptimizedPrecisionAdaptiveEvolutionaryOptimizer can not be imported: ", e) -try: +try: # UltraOptimizedRAMEDS from nevergrad.optimization.lama.UltraOptimizedRAMEDS import UltraOptimizedRAMEDS lama_register["UltraOptimizedRAMEDS"] = UltraOptimizedRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS").set_name("LLAMAUltraOptimizedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedRAMEDS = NonObjectOptimizer(method="LLAMAUltraOptimizedRAMEDS").set_name( + "LLAMAUltraOptimizedRAMEDS", register=True + ) +except Exception as e: # UltraOptimizedRAMEDS print("UltraOptimizedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraOptimizedSpiralDifferentialEvolution import UltraOptimizedSpiralDifferentialEvolution +try: # UltraOptimizedSpiralDifferentialEvolution + from nevergrad.optimization.lama.UltraOptimizedSpiralDifferentialEvolution import ( + UltraOptimizedSpiralDifferentialEvolution, + ) lama_register["UltraOptimizedSpiralDifferentialEvolution"] = UltraOptimizedSpiralDifferentialEvolution - res = NonObjectOptimizer(method="LLAMAUltraOptimizedSpiralDifferentialEvolution")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraOptimizedSpiralDifferentialEvolution = NonObjectOptimizer(method="LLAMAUltraOptimizedSpiralDifferentialEvolution").set_name("LLAMAUltraOptimizedSpiralDifferentialEvolution", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraOptimizedSpiralDifferentialEvolution")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraOptimizedSpiralDifferentialEvolution = NonObjectOptimizer( + method="LLAMAUltraOptimizedSpiralDifferentialEvolution" + ).set_name("LLAMAUltraOptimizedSpiralDifferentialEvolution", register=True) +except Exception as e: # UltraOptimizedSpiralDifferentialEvolution print("UltraOptimizedSpiralDifferentialEvolution can not be imported: ", e) -try: +try: # UltraPreciseDynamicOptimizerV26 from nevergrad.optimization.lama.UltraPreciseDynamicOptimizerV26 import UltraPreciseDynamicOptimizerV26 lama_register["UltraPreciseDynamicOptimizerV26"] = UltraPreciseDynamicOptimizerV26 - res = NonObjectOptimizer(method="LLAMAUltraPreciseDynamicOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraPreciseDynamicOptimizerV26 = NonObjectOptimizer(method="LLAMAUltraPreciseDynamicOptimizerV26").set_name("LLAMAUltraPreciseDynamicOptimizerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraPreciseDynamicOptimizerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraPreciseDynamicOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltraPreciseDynamicOptimizerV26" + ).set_name("LLAMAUltraPreciseDynamicOptimizerV26", register=True) +except Exception as e: # UltraPreciseDynamicOptimizerV26 print("UltraPreciseDynamicOptimizerV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraPrecisionSpiralDifferentialOptimizerV9 import UltraPrecisionSpiralDifferentialOptimizerV9 +try: # UltraPrecisionSpiralDifferentialOptimizerV9 + from nevergrad.optimization.lama.UltraPrecisionSpiralDifferentialOptimizerV9 import ( + UltraPrecisionSpiralDifferentialOptimizerV9, + ) lama_register["UltraPrecisionSpiralDifferentialOptimizerV9"] = UltraPrecisionSpiralDifferentialOptimizerV9 - res = NonObjectOptimizer(method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraPrecisionSpiralDifferentialOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9").set_name("LLAMAUltraPrecisionSpiralDifferentialOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraPrecisionSpiralDifferentialOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraPrecisionSpiralDifferentialOptimizerV9" + ).set_name("LLAMAUltraPrecisionSpiralDifferentialOptimizerV9", register=True) +except Exception as e: # UltraPrecisionSpiralDifferentialOptimizerV9 print("UltraPrecisionSpiralDifferentialOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraQuantumReactiveHybridStrategy import UltraQuantumReactiveHybridStrategy +try: # UltraQuantumReactiveHybridStrategy + from nevergrad.optimization.lama.UltraQuantumReactiveHybridStrategy import ( + UltraQuantumReactiveHybridStrategy, + ) lama_register["UltraQuantumReactiveHybridStrategy"] = UltraQuantumReactiveHybridStrategy - res = NonObjectOptimizer(method="LLAMAUltraQuantumReactiveHybridStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraQuantumReactiveHybridStrategy = NonObjectOptimizer(method="LLAMAUltraQuantumReactiveHybridStrategy").set_name("LLAMAUltraQuantumReactiveHybridStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraQuantumReactiveHybridStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraQuantumReactiveHybridStrategy = NonObjectOptimizer( + method="LLAMAUltraQuantumReactiveHybridStrategy" + ).set_name("LLAMAUltraQuantumReactiveHybridStrategy", register=True) +except Exception as e: # UltraQuantumReactiveHybridStrategy print("UltraQuantumReactiveHybridStrategy can not be imported: ", e) -try: +try: # UltraRAMEDS from nevergrad.optimization.lama.UltraRAMEDS import UltraRAMEDS lama_register["UltraRAMEDS"] = UltraRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRAMEDS = NonObjectOptimizer(method="LLAMAUltraRAMEDS").set_name("LLAMAUltraRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRAMEDS = NonObjectOptimizer(method="LLAMAUltraRAMEDS").set_name( + "LLAMAUltraRAMEDS", register=True + ) +except Exception as e: # UltraRAMEDS print("UltraRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveConvergenceStrategy import UltraRefinedAdaptiveConvergenceStrategy +try: # UltraRefinedAdaptiveConvergenceStrategy + from nevergrad.optimization.lama.UltraRefinedAdaptiveConvergenceStrategy import ( + UltraRefinedAdaptiveConvergenceStrategy, + ) lama_register["UltraRefinedAdaptiveConvergenceStrategy"] = UltraRefinedAdaptiveConvergenceStrategy - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveConvergenceStrategy = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveConvergenceStrategy").set_name("LLAMAUltraRefinedAdaptiveConvergenceStrategy", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveConvergenceStrategy")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveConvergenceStrategy = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveConvergenceStrategy" + ).set_name("LLAMAUltraRefinedAdaptiveConvergenceStrategy", register=True) +except Exception as e: # UltraRefinedAdaptiveConvergenceStrategy print("UltraRefinedAdaptiveConvergenceStrategy can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV5 import UltraRefinedAdaptiveMemoryHybridOptimizerV5 +try: # UltraRefinedAdaptiveMemoryHybridOptimizerV5 + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV5 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV5, + ) lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV5"] = UltraRefinedAdaptiveMemoryHybridOptimizerV5 - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV5", register=True) +except Exception as e: # UltraRefinedAdaptiveMemoryHybridOptimizerV5 print("UltraRefinedAdaptiveMemoryHybridOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV6 import UltraRefinedAdaptiveMemoryHybridOptimizerV6 +try: # UltraRefinedAdaptiveMemoryHybridOptimizerV6 + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV6 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV6, + ) lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV6"] = UltraRefinedAdaptiveMemoryHybridOptimizerV6 - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV6", register=True) +except Exception as e: # UltraRefinedAdaptiveMemoryHybridOptimizerV6 print("UltraRefinedAdaptiveMemoryHybridOptimizerV6 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV8 import UltraRefinedAdaptiveMemoryHybridOptimizerV8 +try: # UltraRefinedAdaptiveMemoryHybridOptimizerV8 + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV8 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV8, + ) lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV8"] = UltraRefinedAdaptiveMemoryHybridOptimizerV8 - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV8", register=True) +except Exception as e: # UltraRefinedAdaptiveMemoryHybridOptimizerV8 print("UltraRefinedAdaptiveMemoryHybridOptimizerV8 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV9 import UltraRefinedAdaptiveMemoryHybridOptimizerV9 +try: # UltraRefinedAdaptiveMemoryHybridOptimizerV9 + from nevergrad.optimization.lama.UltraRefinedAdaptiveMemoryHybridOptimizerV9 import ( + UltraRefinedAdaptiveMemoryHybridOptimizerV9, + ) lama_register["UltraRefinedAdaptiveMemoryHybridOptimizerV9"] = UltraRefinedAdaptiveMemoryHybridOptimizerV9 - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9").set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9" + ).set_name("LLAMAUltraRefinedAdaptiveMemoryHybridOptimizerV9", register=True) +except Exception as e: # UltraRefinedAdaptiveMemoryHybridOptimizerV9 print("UltraRefinedAdaptiveMemoryHybridOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedAdaptivePrecisionOptimizer import UltraRefinedAdaptivePrecisionOptimizer +try: # UltraRefinedAdaptivePrecisionOptimizer + from nevergrad.optimization.lama.UltraRefinedAdaptivePrecisionOptimizer import ( + UltraRefinedAdaptivePrecisionOptimizer, + ) lama_register["UltraRefinedAdaptivePrecisionOptimizer"] = UltraRefinedAdaptivePrecisionOptimizer - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptivePrecisionOptimizer").set_name("LLAMAUltraRefinedAdaptivePrecisionOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptivePrecisionOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptivePrecisionOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedAdaptivePrecisionOptimizer" + ).set_name("LLAMAUltraRefinedAdaptivePrecisionOptimizer", register=True) +except Exception as e: # UltraRefinedAdaptivePrecisionOptimizer print("UltraRefinedAdaptivePrecisionOptimizer can not be imported: ", e) -try: +try: # UltraRefinedAdaptiveRAMEDS from nevergrad.optimization.lama.UltraRefinedAdaptiveRAMEDS import UltraRefinedAdaptiveRAMEDS lama_register["UltraRefinedAdaptiveRAMEDS"] = UltraRefinedAdaptiveRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS").set_name("LLAMAUltraRefinedAdaptiveRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedAdaptiveRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedAdaptiveRAMEDS").set_name( + "LLAMAUltraRefinedAdaptiveRAMEDS", register=True + ) +except Exception as e: # UltraRefinedAdaptiveRAMEDS print("UltraRefinedAdaptiveRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedConvergenceSpiralSearch import UltraRefinedConvergenceSpiralSearch +try: # UltraRefinedConvergenceSpiralSearch + from nevergrad.optimization.lama.UltraRefinedConvergenceSpiralSearch import ( + UltraRefinedConvergenceSpiralSearch, + ) lama_register["UltraRefinedConvergenceSpiralSearch"] = UltraRefinedConvergenceSpiralSearch - res = NonObjectOptimizer(method="LLAMAUltraRefinedConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedConvergenceSpiralSearch = NonObjectOptimizer(method="LLAMAUltraRefinedConvergenceSpiralSearch").set_name("LLAMAUltraRefinedConvergenceSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedConvergenceSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedConvergenceSpiralSearch = NonObjectOptimizer( + method="LLAMAUltraRefinedConvergenceSpiralSearch" + ).set_name("LLAMAUltraRefinedConvergenceSpiralSearch", register=True) +except Exception as e: # UltraRefinedConvergenceSpiralSearch print("UltraRefinedConvergenceSpiralSearch can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV10 import UltraRefinedDynamicPrecisionOptimizerV10 +try: # UltraRefinedDynamicPrecisionOptimizerV10 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV10 import ( + UltraRefinedDynamicPrecisionOptimizerV10, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV10"] = UltraRefinedDynamicPrecisionOptimizerV10 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV10", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV10" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV10", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV10 print("UltraRefinedDynamicPrecisionOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV11 import UltraRefinedDynamicPrecisionOptimizerV11 +try: # UltraRefinedDynamicPrecisionOptimizerV11 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV11 import ( + UltraRefinedDynamicPrecisionOptimizerV11, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV11"] = UltraRefinedDynamicPrecisionOptimizerV11 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV11 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV11", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV11 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV11" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV11", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV11 print("UltraRefinedDynamicPrecisionOptimizerV11 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV17 import UltraRefinedDynamicPrecisionOptimizerV17 +try: # UltraRefinedDynamicPrecisionOptimizerV17 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV17 import ( + UltraRefinedDynamicPrecisionOptimizerV17, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV17"] = UltraRefinedDynamicPrecisionOptimizerV17 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV17 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV17", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV17 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV17" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV17", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV17 print("UltraRefinedDynamicPrecisionOptimizerV17 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV22 import UltraRefinedDynamicPrecisionOptimizerV22 +try: # UltraRefinedDynamicPrecisionOptimizerV22 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV22 import ( + UltraRefinedDynamicPrecisionOptimizerV22, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV22"] = UltraRefinedDynamicPrecisionOptimizerV22 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV22 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV22", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV22 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV22" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV22", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV22 print("UltraRefinedDynamicPrecisionOptimizerV22 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV23 import UltraRefinedDynamicPrecisionOptimizerV23 +try: # UltraRefinedDynamicPrecisionOptimizerV23 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV23 import ( + UltraRefinedDynamicPrecisionOptimizerV23, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV23"] = UltraRefinedDynamicPrecisionOptimizerV23 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV23 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV23", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV23 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV23" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV23", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV23 print("UltraRefinedDynamicPrecisionOptimizerV23 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV24 import UltraRefinedDynamicPrecisionOptimizerV24 +try: # UltraRefinedDynamicPrecisionOptimizerV24 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV24 import ( + UltraRefinedDynamicPrecisionOptimizerV24, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV24"] = UltraRefinedDynamicPrecisionOptimizerV24 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV24 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV24", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV24 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV24" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV24", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV24 print("UltraRefinedDynamicPrecisionOptimizerV24 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV25 import UltraRefinedDynamicPrecisionOptimizerV25 +try: # UltraRefinedDynamicPrecisionOptimizerV25 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV25 import ( + UltraRefinedDynamicPrecisionOptimizerV25, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV25"] = UltraRefinedDynamicPrecisionOptimizerV25 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV25 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV25", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV25 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV25" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV25", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV25 print("UltraRefinedDynamicPrecisionOptimizerV25 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV26 import UltraRefinedDynamicPrecisionOptimizerV26 +try: # UltraRefinedDynamicPrecisionOptimizerV26 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV26 import ( + UltraRefinedDynamicPrecisionOptimizerV26, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV26"] = UltraRefinedDynamicPrecisionOptimizerV26 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV26 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV26", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV26 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV26" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV26", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV26 print("UltraRefinedDynamicPrecisionOptimizerV26 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV27 import UltraRefinedDynamicPrecisionOptimizerV27 +try: # UltraRefinedDynamicPrecisionOptimizerV27 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV27 import ( + UltraRefinedDynamicPrecisionOptimizerV27, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV27"] = UltraRefinedDynamicPrecisionOptimizerV27 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV27 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV27", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV27 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV27" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV27", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV27 print("UltraRefinedDynamicPrecisionOptimizerV27 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV28 import UltraRefinedDynamicPrecisionOptimizerV28 +try: # UltraRefinedDynamicPrecisionOptimizerV28 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV28 import ( + UltraRefinedDynamicPrecisionOptimizerV28, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV28"] = UltraRefinedDynamicPrecisionOptimizerV28 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV28 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV28", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV28 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV28" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV28", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV28 print("UltraRefinedDynamicPrecisionOptimizerV28 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV29 import UltraRefinedDynamicPrecisionOptimizerV29 +try: # UltraRefinedDynamicPrecisionOptimizerV29 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV29 import ( + UltraRefinedDynamicPrecisionOptimizerV29, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV29"] = UltraRefinedDynamicPrecisionOptimizerV29 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV29 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV29", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV29 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV29" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV29", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV29 print("UltraRefinedDynamicPrecisionOptimizerV29 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV30 import UltraRefinedDynamicPrecisionOptimizerV30 +try: # UltraRefinedDynamicPrecisionOptimizerV30 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV30 import ( + UltraRefinedDynamicPrecisionOptimizerV30, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV30"] = UltraRefinedDynamicPrecisionOptimizerV30 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV30 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV30", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV30 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV30" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV30", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV30 print("UltraRefinedDynamicPrecisionOptimizerV30 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV31 import UltraRefinedDynamicPrecisionOptimizerV31 +try: # UltraRefinedDynamicPrecisionOptimizerV31 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV31 import ( + UltraRefinedDynamicPrecisionOptimizerV31, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV31"] = UltraRefinedDynamicPrecisionOptimizerV31 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV31 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV31", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV31 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV31" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV31", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV31 print("UltraRefinedDynamicPrecisionOptimizerV31 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV32 import UltraRefinedDynamicPrecisionOptimizerV32 +try: # UltraRefinedDynamicPrecisionOptimizerV32 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV32 import ( + UltraRefinedDynamicPrecisionOptimizerV32, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV32"] = UltraRefinedDynamicPrecisionOptimizerV32 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV32 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV32", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV32 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV32" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV32", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV32 print("UltraRefinedDynamicPrecisionOptimizerV32 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV33 import UltraRefinedDynamicPrecisionOptimizerV33 +try: # UltraRefinedDynamicPrecisionOptimizerV33 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV33 import ( + UltraRefinedDynamicPrecisionOptimizerV33, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV33"] = UltraRefinedDynamicPrecisionOptimizerV33 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV33 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV33", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV33 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV33" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV33", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV33 print("UltraRefinedDynamicPrecisionOptimizerV33 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV34 import UltraRefinedDynamicPrecisionOptimizerV34 +try: # UltraRefinedDynamicPrecisionOptimizerV34 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV34 import ( + UltraRefinedDynamicPrecisionOptimizerV34, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV34"] = UltraRefinedDynamicPrecisionOptimizerV34 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV34 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV34", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV34 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV34" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV34", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV34 print("UltraRefinedDynamicPrecisionOptimizerV34 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV35 import UltraRefinedDynamicPrecisionOptimizerV35 +try: # UltraRefinedDynamicPrecisionOptimizerV35 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV35 import ( + UltraRefinedDynamicPrecisionOptimizerV35, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV35"] = UltraRefinedDynamicPrecisionOptimizerV35 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV35 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV35", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV35 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV35" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV35", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV35 print("UltraRefinedDynamicPrecisionOptimizerV35 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV36 import UltraRefinedDynamicPrecisionOptimizerV36 +try: # UltraRefinedDynamicPrecisionOptimizerV36 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV36 import ( + UltraRefinedDynamicPrecisionOptimizerV36, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV36"] = UltraRefinedDynamicPrecisionOptimizerV36 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV36 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV36", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV36 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV36" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV36", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV36 print("UltraRefinedDynamicPrecisionOptimizerV36 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV37 import UltraRefinedDynamicPrecisionOptimizerV37 +try: # UltraRefinedDynamicPrecisionOptimizerV37 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV37 import ( + UltraRefinedDynamicPrecisionOptimizerV37, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV37"] = UltraRefinedDynamicPrecisionOptimizerV37 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV37 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV37", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV37 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV37" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV37", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV37 print("UltraRefinedDynamicPrecisionOptimizerV37 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV38 import UltraRefinedDynamicPrecisionOptimizerV38 +try: # UltraRefinedDynamicPrecisionOptimizerV38 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV38 import ( + UltraRefinedDynamicPrecisionOptimizerV38, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV38"] = UltraRefinedDynamicPrecisionOptimizerV38 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV38 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV38", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV38 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV38" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV38", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV38 print("UltraRefinedDynamicPrecisionOptimizerV38 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV39 import UltraRefinedDynamicPrecisionOptimizerV39 +try: # UltraRefinedDynamicPrecisionOptimizerV39 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV39 import ( + UltraRefinedDynamicPrecisionOptimizerV39, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV39"] = UltraRefinedDynamicPrecisionOptimizerV39 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV39 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV39", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV39 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV39" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV39", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV39 print("UltraRefinedDynamicPrecisionOptimizerV39 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV4 import UltraRefinedDynamicPrecisionOptimizerV4 +try: # UltraRefinedDynamicPrecisionOptimizerV4 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV4 import ( + UltraRefinedDynamicPrecisionOptimizerV4, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV4"] = UltraRefinedDynamicPrecisionOptimizerV4 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV4 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV4", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV4 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV4" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV4", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV4 print("UltraRefinedDynamicPrecisionOptimizerV4 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV40 import UltraRefinedDynamicPrecisionOptimizerV40 +try: # UltraRefinedDynamicPrecisionOptimizerV40 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV40 import ( + UltraRefinedDynamicPrecisionOptimizerV40, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV40"] = UltraRefinedDynamicPrecisionOptimizerV40 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV40 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV40", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV40 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV40" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV40", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV40 print("UltraRefinedDynamicPrecisionOptimizerV40 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV41 import UltraRefinedDynamicPrecisionOptimizerV41 +try: # UltraRefinedDynamicPrecisionOptimizerV41 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV41 import ( + UltraRefinedDynamicPrecisionOptimizerV41, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV41"] = UltraRefinedDynamicPrecisionOptimizerV41 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV41 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV41", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV41 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV41" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV41", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV41 print("UltraRefinedDynamicPrecisionOptimizerV41 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV44 import UltraRefinedDynamicPrecisionOptimizerV44 +try: # UltraRefinedDynamicPrecisionOptimizerV44 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV44 import ( + UltraRefinedDynamicPrecisionOptimizerV44, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV44"] = UltraRefinedDynamicPrecisionOptimizerV44 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV44 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV44", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV44 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV44" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV44", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV44 print("UltraRefinedDynamicPrecisionOptimizerV44 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV45 import UltraRefinedDynamicPrecisionOptimizerV45 +try: # UltraRefinedDynamicPrecisionOptimizerV45 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV45 import ( + UltraRefinedDynamicPrecisionOptimizerV45, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV45"] = UltraRefinedDynamicPrecisionOptimizerV45 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV45 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV45", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV45 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV45" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV45", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV45 print("UltraRefinedDynamicPrecisionOptimizerV45 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV46 import UltraRefinedDynamicPrecisionOptimizerV46 +try: # UltraRefinedDynamicPrecisionOptimizerV46 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV46 import ( + UltraRefinedDynamicPrecisionOptimizerV46, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV46"] = UltraRefinedDynamicPrecisionOptimizerV46 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV46 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV46", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV46 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV46" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV46", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV46 print("UltraRefinedDynamicPrecisionOptimizerV46 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV47 import UltraRefinedDynamicPrecisionOptimizerV47 +try: # UltraRefinedDynamicPrecisionOptimizerV47 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV47 import ( + UltraRefinedDynamicPrecisionOptimizerV47, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV47"] = UltraRefinedDynamicPrecisionOptimizerV47 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV47 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV47", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV47 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV47" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV47", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV47 print("UltraRefinedDynamicPrecisionOptimizerV47 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV5 import UltraRefinedDynamicPrecisionOptimizerV5 +try: # UltraRefinedDynamicPrecisionOptimizerV5 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV5 import ( + UltraRefinedDynamicPrecisionOptimizerV5, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV5"] = UltraRefinedDynamicPrecisionOptimizerV5 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV5", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV5" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV5", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV5 print("UltraRefinedDynamicPrecisionOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV54 import UltraRefinedDynamicPrecisionOptimizerV54 +try: # UltraRefinedDynamicPrecisionOptimizerV54 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV54 import ( + UltraRefinedDynamicPrecisionOptimizerV54, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV54"] = UltraRefinedDynamicPrecisionOptimizerV54 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV54 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV54", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV54 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV54" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV54", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV54 print("UltraRefinedDynamicPrecisionOptimizerV54 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV55 import UltraRefinedDynamicPrecisionOptimizerV55 +try: # UltraRefinedDynamicPrecisionOptimizerV55 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV55 import ( + UltraRefinedDynamicPrecisionOptimizerV55, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV55"] = UltraRefinedDynamicPrecisionOptimizerV55 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV55 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV55", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV55 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV55" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV55", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV55 print("UltraRefinedDynamicPrecisionOptimizerV55 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV56 import UltraRefinedDynamicPrecisionOptimizerV56 +try: # UltraRefinedDynamicPrecisionOptimizerV56 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV56 import ( + UltraRefinedDynamicPrecisionOptimizerV56, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV56"] = UltraRefinedDynamicPrecisionOptimizerV56 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV56 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV56", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV56 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV56" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV56", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV56 print("UltraRefinedDynamicPrecisionOptimizerV56 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV9 import UltraRefinedDynamicPrecisionOptimizerV9 +try: # UltraRefinedDynamicPrecisionOptimizerV9 + from nevergrad.optimization.lama.UltraRefinedDynamicPrecisionOptimizerV9 import ( + UltraRefinedDynamicPrecisionOptimizerV9, + ) lama_register["UltraRefinedDynamicPrecisionOptimizerV9"] = UltraRefinedDynamicPrecisionOptimizerV9 - res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedDynamicPrecisionOptimizerV9 = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9").set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV9", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedDynamicPrecisionOptimizerV9 = NonObjectOptimizer( + method="LLAMAUltraRefinedDynamicPrecisionOptimizerV9" + ).set_name("LLAMAUltraRefinedDynamicPrecisionOptimizerV9", register=True) +except Exception as e: # UltraRefinedDynamicPrecisionOptimizerV9 print("UltraRefinedDynamicPrecisionOptimizerV9 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - - lama_register["UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer - res = NonObjectOptimizer(method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer").set_name("LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) -except Exception as e: +try: # UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + from nevergrad.optimization.lama.UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer import ( + UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer, + ) + + lama_register["UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer"] = ( + UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer" + ).set_name("LLAMAUltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer", register=True) +except Exception as e: # UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer print("UltraRefinedEliteAdaptiveMemoryDynamicCrowdingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientHybridOptimizerV5 import UltraRefinedEvolutionaryGradientHybridOptimizerV5 - - lama_register["UltraRefinedEvolutionaryGradientHybridOptimizerV5"] = UltraRefinedEvolutionaryGradientHybridOptimizerV5 - res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5").set_name("LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5", register=True) -except Exception as e: +try: # UltraRefinedEvolutionaryGradientHybridOptimizerV5 + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientHybridOptimizerV5 import ( + UltraRefinedEvolutionaryGradientHybridOptimizerV5, + ) + + lama_register["UltraRefinedEvolutionaryGradientHybridOptimizerV5"] = ( + UltraRefinedEvolutionaryGradientHybridOptimizerV5 + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientHybridOptimizerV5", register=True) +except Exception as e: # UltraRefinedEvolutionaryGradientHybridOptimizerV5 print("UltraRefinedEvolutionaryGradientHybridOptimizerV5 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV10 import UltraRefinedEvolutionaryGradientOptimizerV10 - - lama_register["UltraRefinedEvolutionaryGradientOptimizerV10"] = UltraRefinedEvolutionaryGradientOptimizerV10 - res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedEvolutionaryGradientOptimizerV10 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10").set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV10", register=True) -except Exception as e: +try: # UltraRefinedEvolutionaryGradientOptimizerV10 + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV10 import ( + UltraRefinedEvolutionaryGradientOptimizerV10, + ) + + lama_register["UltraRefinedEvolutionaryGradientOptimizerV10"] = ( + UltraRefinedEvolutionaryGradientOptimizerV10 + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedEvolutionaryGradientOptimizerV10 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV10" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV10", register=True) +except Exception as e: # UltraRefinedEvolutionaryGradientOptimizerV10 print("UltraRefinedEvolutionaryGradientOptimizerV10 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV32 import UltraRefinedEvolutionaryGradientOptimizerV32 - - lama_register["UltraRefinedEvolutionaryGradientOptimizerV32"] = UltraRefinedEvolutionaryGradientOptimizerV32 - res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedEvolutionaryGradientOptimizerV32 = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32").set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV32", register=True) -except Exception as e: +try: # UltraRefinedEvolutionaryGradientOptimizerV32 + from nevergrad.optimization.lama.UltraRefinedEvolutionaryGradientOptimizerV32 import ( + UltraRefinedEvolutionaryGradientOptimizerV32, + ) + + lama_register["UltraRefinedEvolutionaryGradientOptimizerV32"] = ( + UltraRefinedEvolutionaryGradientOptimizerV32 + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedEvolutionaryGradientOptimizerV32 = NonObjectOptimizer( + method="LLAMAUltraRefinedEvolutionaryGradientOptimizerV32" + ).set_name("LLAMAUltraRefinedEvolutionaryGradientOptimizerV32", register=True) +except Exception as e: # UltraRefinedEvolutionaryGradientOptimizerV32 print("UltraRefinedEvolutionaryGradientOptimizerV32 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedHybridEvolutionaryAnnealingOptimizer import UltraRefinedHybridEvolutionaryAnnealingOptimizer - - lama_register["UltraRefinedHybridEvolutionaryAnnealingOptimizer"] = UltraRefinedHybridEvolutionaryAnnealingOptimizer - res = NonObjectOptimizer(method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer(method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer").set_name("LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer", register=True) -except Exception as e: +try: # UltraRefinedHybridEvolutionaryAnnealingOptimizer + from nevergrad.optimization.lama.UltraRefinedHybridEvolutionaryAnnealingOptimizer import ( + UltraRefinedHybridEvolutionaryAnnealingOptimizer, + ) + + lama_register["UltraRefinedHybridEvolutionaryAnnealingOptimizer"] = ( + UltraRefinedHybridEvolutionaryAnnealingOptimizer + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer = NonObjectOptimizer( + method="LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer" + ).set_name("LLAMAUltraRefinedHybridEvolutionaryAnnealingOptimizer", register=True) +except Exception as e: # UltraRefinedHybridEvolutionaryAnnealingOptimizer print("UltraRefinedHybridEvolutionaryAnnealingOptimizer can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV50 import UltraRefinedHyperStrategicOptimizerV50 +try: # UltraRefinedHyperStrategicOptimizerV50 + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV50 import ( + UltraRefinedHyperStrategicOptimizerV50, + ) lama_register["UltraRefinedHyperStrategicOptimizerV50"] = UltraRefinedHyperStrategicOptimizerV50 - res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV50")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedHyperStrategicOptimizerV50 = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV50").set_name("LLAMAUltraRefinedHyperStrategicOptimizerV50", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV50")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedHyperStrategicOptimizerV50 = NonObjectOptimizer( + method="LLAMAUltraRefinedHyperStrategicOptimizerV50" + ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV50", register=True) +except Exception as e: # UltraRefinedHyperStrategicOptimizerV50 print("UltraRefinedHyperStrategicOptimizerV50 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV54 import UltraRefinedHyperStrategicOptimizerV54 +try: # UltraRefinedHyperStrategicOptimizerV54 + from nevergrad.optimization.lama.UltraRefinedHyperStrategicOptimizerV54 import ( + UltraRefinedHyperStrategicOptimizerV54, + ) lama_register["UltraRefinedHyperStrategicOptimizerV54"] = UltraRefinedHyperStrategicOptimizerV54 - res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV54")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedHyperStrategicOptimizerV54 = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV54").set_name("LLAMAUltraRefinedHyperStrategicOptimizerV54", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedHyperStrategicOptimizerV54")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedHyperStrategicOptimizerV54 = NonObjectOptimizer( + method="LLAMAUltraRefinedHyperStrategicOptimizerV54" + ).set_name("LLAMAUltraRefinedHyperStrategicOptimizerV54", register=True) +except Exception as e: # UltraRefinedHyperStrategicOptimizerV54 print("UltraRefinedHyperStrategicOptimizerV54 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedPrecisionEvolutionaryOptimizerV43 import UltraRefinedPrecisionEvolutionaryOptimizerV43 - - lama_register["UltraRefinedPrecisionEvolutionaryOptimizerV43"] = UltraRefinedPrecisionEvolutionaryOptimizerV43 - res = NonObjectOptimizer(method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer(method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43").set_name("LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43", register=True) -except Exception as e: +try: # UltraRefinedPrecisionEvolutionaryOptimizerV43 + from nevergrad.optimization.lama.UltraRefinedPrecisionEvolutionaryOptimizerV43 import ( + UltraRefinedPrecisionEvolutionaryOptimizerV43, + ) + + lama_register["UltraRefinedPrecisionEvolutionaryOptimizerV43"] = ( + UltraRefinedPrecisionEvolutionaryOptimizerV43 + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43 = NonObjectOptimizer( + method="LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43" + ).set_name("LLAMAUltraRefinedPrecisionEvolutionaryOptimizerV43", register=True) +except Exception as e: # UltraRefinedPrecisionEvolutionaryOptimizerV43 print("UltraRefinedPrecisionEvolutionaryOptimizerV43 can not be imported: ", e) -try: +try: # UltraRefinedRAMEDS from nevergrad.optimization.lama.UltraRefinedRAMEDS import UltraRefinedRAMEDS lama_register["UltraRefinedRAMEDS"] = UltraRefinedRAMEDS - res = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS").set_name("LLAMAUltraRefinedRAMEDS", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedRAMEDS = NonObjectOptimizer(method="LLAMAUltraRefinedRAMEDS").set_name( + "LLAMAUltraRefinedRAMEDS", register=True + ) +except Exception as e: # UltraRefinedRAMEDS print("UltraRefinedRAMEDS can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedSpiralDifferentialClimberV3 import UltraRefinedSpiralDifferentialClimberV3 +try: # UltraRefinedSpiralDifferentialClimberV3 + from nevergrad.optimization.lama.UltraRefinedSpiralDifferentialClimberV3 import ( + UltraRefinedSpiralDifferentialClimberV3, + ) lama_register["UltraRefinedSpiralDifferentialClimberV3"] = UltraRefinedSpiralDifferentialClimberV3 - res = NonObjectOptimizer(method="LLAMAUltraRefinedSpiralDifferentialClimberV3")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedSpiralDifferentialClimberV3 = NonObjectOptimizer(method="LLAMAUltraRefinedSpiralDifferentialClimberV3").set_name("LLAMAUltraRefinedSpiralDifferentialClimberV3", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedSpiralDifferentialClimberV3")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedSpiralDifferentialClimberV3 = NonObjectOptimizer( + method="LLAMAUltraRefinedSpiralDifferentialClimberV3" + ).set_name("LLAMAUltraRefinedSpiralDifferentialClimberV3", register=True) +except Exception as e: # UltraRefinedSpiralDifferentialClimberV3 print("UltraRefinedSpiralDifferentialClimberV3 can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraRefinedStrategicEvolutionaryOptimizerV60 import UltraRefinedStrategicEvolutionaryOptimizerV60 - - lama_register["UltraRefinedStrategicEvolutionaryOptimizerV60"] = UltraRefinedStrategicEvolutionaryOptimizerV60 - res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60 = NonObjectOptimizer(method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60").set_name("LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60", register=True) -except Exception as e: +try: # UltraRefinedStrategicEvolutionaryOptimizerV60 + from nevergrad.optimization.lama.UltraRefinedStrategicEvolutionaryOptimizerV60 import ( + UltraRefinedStrategicEvolutionaryOptimizerV60, + ) + + lama_register["UltraRefinedStrategicEvolutionaryOptimizerV60"] = ( + UltraRefinedStrategicEvolutionaryOptimizerV60 + ) + # res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60 = NonObjectOptimizer( + method="LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60" + ).set_name("LLAMAUltraRefinedStrategicEvolutionaryOptimizerV60", register=True) +except Exception as e: # UltraRefinedStrategicEvolutionaryOptimizerV60 print("UltraRefinedStrategicEvolutionaryOptimizerV60 can not be imported: ", e) -try: +try: # UltraRefinedStrategyDE from nevergrad.optimization.lama.UltraRefinedStrategyDE import UltraRefinedStrategyDE lama_register["UltraRefinedStrategyDE"] = UltraRefinedStrategyDE - res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraRefinedStrategyDE = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE").set_name("LLAMAUltraRefinedStrategyDE", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraRefinedStrategyDE = NonObjectOptimizer(method="LLAMAUltraRefinedStrategyDE").set_name( + "LLAMAUltraRefinedStrategyDE", register=True + ) +except Exception as e: # UltraRefinedStrategyDE print("UltraRefinedStrategyDE can not be imported: ", e) -try: - from nevergrad.optimization.lama.UltraSupremeEvolutionaryGradientHybridOptimizerV7 import UltraSupremeEvolutionaryGradientHybridOptimizerV7 - - lama_register["UltraSupremeEvolutionaryGradientHybridOptimizerV7"] = UltraSupremeEvolutionaryGradientHybridOptimizerV7 - res = NonObjectOptimizer(method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7 = NonObjectOptimizer(method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7").set_name("LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7", register=True) -except Exception as e: +try: # UltraSupremeEvolutionaryGradientHybridOptimizerV7 + from nevergrad.optimization.lama.UltraSupremeEvolutionaryGradientHybridOptimizerV7 import ( + UltraSupremeEvolutionaryGradientHybridOptimizerV7, + ) + + lama_register["UltraSupremeEvolutionaryGradientHybridOptimizerV7"] = ( + UltraSupremeEvolutionaryGradientHybridOptimizerV7 + ) + # res = NonObjectOptimizer(method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7 = NonObjectOptimizer( + method="LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7" + ).set_name("LLAMAUltraSupremeEvolutionaryGradientHybridOptimizerV7", register=True) +except Exception as e: # UltraSupremeEvolutionaryGradientHybridOptimizerV7 print("UltraSupremeEvolutionaryGradientHybridOptimizerV7 can not be imported: ", e) -try: +try: # UnifiedAdaptiveMemeticOptimizer from nevergrad.optimization.lama.UnifiedAdaptiveMemeticOptimizer import UnifiedAdaptiveMemeticOptimizer lama_register["UnifiedAdaptiveMemeticOptimizer"] = UnifiedAdaptiveMemeticOptimizer - res = NonObjectOptimizer(method="LLAMAUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer(method="LLAMAUnifiedAdaptiveMemeticOptimizer").set_name("LLAMAUnifiedAdaptiveMemeticOptimizer", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAUnifiedAdaptiveMemeticOptimizer")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAUnifiedAdaptiveMemeticOptimizer = NonObjectOptimizer( + method="LLAMAUnifiedAdaptiveMemeticOptimizer" + ).set_name("LLAMAUnifiedAdaptiveMemeticOptimizer", register=True) +except Exception as e: # UnifiedAdaptiveMemeticOptimizer print("UnifiedAdaptiveMemeticOptimizer can not be imported: ", e) -try: +try: # VectorizedRefinedSpiralSearch from nevergrad.optimization.lama.VectorizedRefinedSpiralSearch import VectorizedRefinedSpiralSearch lama_register["VectorizedRefinedSpiralSearch"] = VectorizedRefinedSpiralSearch - res = NonObjectOptimizer(method="LLAMAVectorizedRefinedSpiralSearch")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value - LLAMAVectorizedRefinedSpiralSearch = NonObjectOptimizer(method="LLAMAVectorizedRefinedSpiralSearch").set_name("LLAMAVectorizedRefinedSpiralSearch", register=True) -except Exception as e: + # res = NonObjectOptimizer(method="LLAMAVectorizedRefinedSpiralSearch")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value + LLAMAVectorizedRefinedSpiralSearch = NonObjectOptimizer( + method="LLAMAVectorizedRefinedSpiralSearch" + ).set_name("LLAMAVectorizedRefinedSpiralSearch", register=True) +except Exception as e: # VectorizedRefinedSpiralSearch print("VectorizedRefinedSpiralSearch can not be imported: ", e) -try: +try: # eQGSA_v2 from nevergrad.optimization.lama.eQGSA_v2 import eQGSA_v2 lama_register["eQGSA_v2"] = eQGSA_v2 - res = NonObjectOptimizer(method="LLAMAeQGSA_v2")(5, 15).minimize(lambda x: sum((x-.7)**2.)).value + # res = NonObjectOptimizer(method="LLAMAeQGSA_v2")(5, 15).minimize(lambda x: sum((x - 0.7) ** 2.0)).value LLAMAeQGSA_v2 = NonObjectOptimizer(method="LLAMAeQGSA_v2").set_name("LLAMAeQGSA_v2", register=True) -except Exception as e: +except Exception as e: # eQGSA_v2 print("eQGSA_v2 can not be imported: ", e) From 343d3c32e44747905ff49bbfec14df7ba2ec30e8 Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Thu, 27 Jun 2024 19:26:57 +0200 Subject: [PATCH 5/6] multidim --- nevergrad/benchmark/experiments.py | 45 +- nevergrad/benchmark/plotting.py | 16 +- ...ncedMemeticQuantumDifferentialOptimizer.py | 13 +- ...eOptimizerWithAdaptiveElitismAndRestart.py | 11 +- ...liteQuantumDifferentialMemeticOptimizer.py | 13 +- ...ialParticleOptimizerWithAdaptiveElitism.py | 11 +- ...liteQuantumDifferentialMemeticOptimizer.py | 13 +- ...alParticleOptimizerWithAdaptiveRestarts.py | 11 +- ...articleOptimizerWithEliteGuidedMutation.py | 13 +- ...ialParticleOptimizerWithEliteRefinement.py | 11 +- ...leOptimizerWithEnhancedAdaptiveRestarts.py | 13 +- ...inedMemeticQuantumDifferentialOptimizer.py | 13 +- ...inedQuantumDifferentialMemeticOptimizer.py | 13 +- nevergrad/optimization/optimizerlib.py | 768 ++++++++++++++++++ nevergrad/optimization/recastlib.py | 4 +- scripts/plot_dagstuhloid.sh | 8 +- 16 files changed, 914 insertions(+), 62 deletions(-) diff --git a/nevergrad/benchmark/experiments.py b/nevergrad/benchmark/experiments.py index 64728f773..9673180d9 100644 --- a/nevergrad/benchmark/experiments.py +++ b/nevergrad/benchmark/experiments.py @@ -61,21 +61,26 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore # "SmallLognormalDiscreteOnePlusOne", # "XLognormalDiscreteOnePlusOne", # ])] - return [ - # "BigLognormalDiscreteOnePlusOne", - # "DiscreteLenglerOnePlusOne", - # "NgLn", - # "SmallLognormalDiscreteOnePlusOne", - # "XLognormalDiscreteOnePlusOne", - "XSmallLognormalDiscreteOnePlusOne", - "MultiLN", - "NgRS", - "NgIohRS", - "NgIohMLn", - "NgIohLn", - # "LognormalDiscreteOnePlusOne", - # "HugeLognormalDiscreteOnePlusOne", - ] + lama = ["NgIohTuned"] + [o for o in list(ng.optimizers.registry.keys()) if "LAMA" in o] + optims = [o for o in ng.optimizers.registry.keys() if "LAMA" in o] + lama = ["NgIohTuned"] * 10 + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])] + return list(np.random.choice(lama, 55)) + # "BigLognormalDiscreteOnePlusOne", + # "DiscreteLenglerOnePlusOne", + # "NgLn", + # "SmallLognormalDiscreteOnePlusOne", + # "XLognormalDiscreteOnePlusOne", + if False: + return [ + "XSmallLognormalDiscreteOnePlusOne", + "MultiLN", + "NgRS", + "NgIohRS", + "NgIohMLn", + "NgIohLn", + # "LognormalDiscreteOnePlusOne", + # "HugeLognormalDiscreteOnePlusOne", + ] # return ["CSEC11"] # return [np.random.choice(["CSEC11", "SQOPSODCMA", "NgIoh4", "NGOpt"])] # return ["LPCMA"] #return [np.random.choice(["CSEC10", "DSproba", "NgIoh4", "DSbase", "DS3p", "DSsubspace"])] @@ -1691,15 +1696,15 @@ def yabbob( for name in names for rotation in [True, False] for num_blocks in ([1] if not split else [7, 12]) - for d in ( + for d in ( [100, 1000, 3000] if hd else ( [2, 5, 10, 15] if tuning - else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 10, 50])) + else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 5, 10, 50])) # added 5 for lama stuff ) - ) + ) ] assert reduction_factor in [1, 7, 13, 17] # needs to be a cofactor @@ -1767,6 +1772,8 @@ def f(x): if bounded: budgets = [10, 20, 40, 100, 300] optims = refactor_optims(optims) + if hd or big: + optims = [np.random.choice(optims)] for optim in optims: for function in functions: for budget in budgets: @@ -2121,6 +2128,7 @@ def zp_ms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]: yield Experiment(function, optim, budget=budget, num_workers=nw, seed=next(seedg)) +@registry.register def nozp_noms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]: """Testing optimizers on exponentiated problems. Cigar, Ellipsoid. @@ -3852,6 +3860,7 @@ def lsgo() -> tp.Iterator[Experiment]: optims = ["DiagonalCMA", "TinyQODE", "OpoDE", "OpoTinyDE"] optims = ["TinyQODE", "OpoDE", "OpoTinyDE"] optims = refactor_optims(optims) + optims = [np.random.choice(optims)] for i in range(1, 16): # [np.random.choice(list(range(1, 16)))]: for optim in optims: for budget in [120000, 600000, 3000000]: diff --git a/nevergrad/benchmark/plotting.py b/nevergrad/benchmark/plotting.py index 300d9e8fb..deaf6d77c 100644 --- a/nevergrad/benchmark/plotting.py +++ b/nevergrad/benchmark/plotting.py @@ -136,6 +136,17 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector: df = utils.Selector(df) if "error" not in df.columns: # backward compatibility return df # type: ignore + dropped = [] + non_dropped = 0 + for index, row in df.iterrows(): + try: + if np.isnan(row["loss"]): + pass + non_dropped += 1 + except: + dropped += [index] + print(f"Dropped: {len(dropped)}, Non-dropped: {non_dropped}") + df.drop(dropped, inplace=True) # errors with no recommendation nandf = df.select(loss=np.isnan) for row in nandf.itertuples(): @@ -153,7 +164,10 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector: err_inds = set(nandf.index) output = df.loc[[i for i in df.index if i not in err_inds], [c for c in df.columns if c != "error"]] # cast nans in loss to infinity - df.loc[np.isnan(df.loss), "loss"] = float("inf") + try: + df.loc[np.isnan(df.loss), "loss"] = float("inf") + except Exception as e: + print("pb with isnan(loss): {e}") # assert ( not output.loc[:, "loss"].isnull().values.any() diff --git a/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py b/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py index d24ce299a..97d7cac04 100644 --- a/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py +++ b/nevergrad/optimization/lama/AdvancedMemeticQuantumDifferentialOptimizer.py @@ -128,10 +128,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py b/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py index 71f202ec6..5604ea1be 100644 --- a/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py +++ b/nevergrad/optimization/lama/AdvancedQuantumDifferentialParticleOptimizerWithAdaptiveElitismAndRestart.py @@ -126,9 +126,14 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) if evaluations % (self.swarm_size * 10) == 0: diversity = np.std(fitness) diff --git a/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py index 01484305b..c3c1db999 100644 --- a/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py +++ b/nevergrad/optimization/lama/EliteQuantumDifferentialMemeticOptimizer.py @@ -127,10 +127,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py index 9019763be..7f63604b2 100644 --- a/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py +++ b/nevergrad/optimization/lama/EnhancedQuantumDifferentialParticleOptimizerWithAdaptiveElitism.py @@ -126,9 +126,14 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) if evaluations % (self.swarm_size * 10) == 0: diversity = np.std(fitness) diff --git a/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py index e07b666f4..0e2a5975f 100644 --- a/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py +++ b/nevergrad/optimization/lama/ImprovedEliteQuantumDifferentialMemeticOptimizer.py @@ -127,10 +127,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py index d9d79e6fa..f85ed4312 100644 --- a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithAdaptiveRestarts.py @@ -130,9 +130,14 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) if evaluations % (self.swarm_size * 10) == 0: diversity = np.std(fitness) diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py index c41925986..be03414d9 100644 --- a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteGuidedMutation.py @@ -127,10 +127,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py index 65d46cb37..4e9e92ab1 100644 --- a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEliteRefinement.py @@ -129,9 +129,14 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.adaptive_restart(particles, fitness, personal_bests, personal_best_fits, func) if evaluations % (self.swarm_size * 10) == 0: diversity = np.std(fitness) diff --git a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py index dd4ce61d1..d19fd0bd1 100644 --- a/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py +++ b/nevergrad/optimization/lama/QuantumDifferentialParticleOptimizerWithEnhancedAdaptiveRestarts.py @@ -127,10 +127,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py b/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py index c75f081bb..c626525ff 100644 --- a/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py +++ b/nevergrad/optimization/lama/RefinedMemeticQuantumDifferentialOptimizer.py @@ -128,10 +128,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py b/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py index d22497b19..ef64efd85 100644 --- a/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py +++ b/nevergrad/optimization/lama/RefinedQuantumDifferentialMemeticOptimizer.py @@ -127,10 +127,15 @@ def __call__(self, func): else: self.local_search_prob = max(0.1, self.local_search_prob - 0.1) - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit = ( - self.enhanced_adaptive_restart( - particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func - ) + ( + particles, + fitness, + personal_bests, + personal_best_fits, + global_best, + global_best_fit, + ) = self.enhanced_adaptive_restart( + particles, fitness, personal_bests, personal_best_fits, global_best, global_best_fit, func ) if evaluations % (self.swarm_size * 10) == 0: diff --git a/nevergrad/optimization/optimizerlib.py b/nevergrad/optimization/optimizerlib.py index fedb33b97..4b0f3596d 100644 --- a/nevergrad/optimization/optimizerlib.py +++ b/nevergrad/optimization/optimizerlib.py @@ -7482,3 +7482,771 @@ class NgIohTuned(CSEC11): warmup_ratio=0.5, ).set_name("MultiLN", register=True) NgIohMLn = Chaining([MultiLN, CSEC11], ["tenth"]).set_name("NgIohMLn", register=True) + + +# Below a dirty hack for removing buggy stuff. +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV25") +registry.unregister("LLAMARefinedEnhancedOptimizedEvolutiveStrategy") +registry.unregister("LLAMAEnhancedAdaptiveHarmonyTabuOptimization") +registry.unregister("LLAMAEnhancedDualStrategyHybridOptimizer") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV8") +registry.unregister("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlight") +registry.unregister("LLAMAEnhancedRefinedDynamicFireworkAlgorithm") +registry.unregister("LLAMAQuantumIterativeRefinementOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV3") +registry.unregister("LLAMAEnhancedQuantumAnnealingOptimizer") +registry.unregister("LLAMAAdaptiveEnhancedExplorationGravitationalSwarmOptimization") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionRefined") +registry.unregister("LLAMAImprovedEnhancedDynamicLocalSearchFireworkAlgorithm") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV19") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV10") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV23") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutation") +registry.unregister("LLAMAAdaptiveEnhancedHarmonySearchWithLevyFlightInspiration") +registry.unregister("LLAMAEnhancedRefinedGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligenceV2") +registry.unregister("LLAMASelfAdaptiveOppositionBasedHarmonySearchDE") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV3") +registry.unregister("LLAMAHybridAdaptiveMultiPhaseEvolutionV2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV15") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV15") +registry.unregister("LLAMAAdaptiveOppositionBasedDifferentialEvolution") +registry.unregister("LLAMAHybridGradientMemoryAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV26") +registry.unregister("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV3") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV2") +registry.unregister("LLAMAAdaptiveOppositionBasedHarmonySearchDynamicBandwidthDE") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV13") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV2") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV5") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV12") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV13") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV25") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV26") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParameters") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV31") +registry.unregister("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV4") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV4") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationRefined") +registry.unregister("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV4") +registry.unregister("LLAMAHarmonyTabuOptimization") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV29") +registry.unregister("LLAMADynamicRefinedGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV4") +registry.unregister("LLAMAEnhancedDiversifiedAdaptiveHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV16") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV3") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearch") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV9") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV15") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV24") +registry.unregister("LLAMAAdaptivePrecisionDivideSearch") +registry.unregister("LLAMAAdvancedEliteDynamicHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV4") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinal") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV5") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV8") +registry.unregister("LLAMAAdaptiveEvolutionaryDifferentialOptimization") +registry.unregister("LLAMACMADifferentialEvolutionPSO") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV27") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV4") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionImproved") +registry.unregister("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithmRefined") +registry.unregister("LLAMAQuantumParticleSwarmDifferentialEvolution") +registry.unregister("LLAMAEnhancedHarmonyTabuSearchV2") +registry.unregister("LLAMAAdaptiveHarmonySearchWithSimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveLevyHarmonySearch") +registry.unregister("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealingWithGradient") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV3") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchOptimization") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV21") +registry.unregister("LLAMAAdvancedFireworkAlgorithmWithAdaptiveMutation") +registry.unregister("LLAMAHybridAdaptiveOrthogonalDifferentialEvolution") +registry.unregister("LLAMADynamicElitistHybridOptimizer") +registry.unregister("LLAMAHybridAdaptiveDifferentialEvolutionWithMemoryAndEliteSearch") +registry.unregister("LLAMAHybridAdaptiveMemoryAnnealing") +registry.unregister("LLAMAHybridGradientAnnealingWithMemory") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV7") +registry.unregister("LLAMAEnhancedQuantumFireworksAlgorithmV2") +registry.unregister("LLAMADualConvergenceEvolutiveStrategy") +registry.unregister("LLAMAAdvancedAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAAdaptiveRestartHybridOptimizer") +registry.unregister("LLAMAStochasticGradientHybridOptimization") +registry.unregister("LLAMADynamicGradientBoostedRefinementAnnealing") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV14") +registry.unregister("LLAMAMultiScaleGradientSearch") +registry.unregister("LLAMADynamicHybridAnnealing") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV3") +registry.unregister("LLAMAAdaptiveHarmonySearchWithLocalOptimization") +registry.unregister("LLAMAEnhancedAdvancedAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedEnhancedHarmonySearchWithImprovedAdaptiveLevyFlightInspiration") +registry.unregister("LLAMAAdaptiveHarmonicSwarmOptimizationV3") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV12") +registry.unregister("LLAMAEnhancedAdaptiveSwarmHarmonicOptimization") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV4") +registry.unregister("LLAMAMemoryEnhancedAdaptiveMultiPhaseAnnealing") +registry.unregister("LLAMAMemoryEnhancedAdaptiveAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV7") +registry.unregister("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV19") +registry.unregister("LLAMAEnhancedConvergentDifferentialEvolutionV2") +registry.unregister("LLAMADifferentialEvolutionPSOHybrid") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithAdaptiveLevyFlightInspiration") +registry.unregister("LLAMAEnhancedQuantumAdaptiveFireworksOptimizer") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV5") +registry.unregister("LLAMAEnhancedOrthogonalDifferentialEvolutionV4") +registry.unregister("LLAMAHybridQuantumDifferentialEvolution") +registry.unregister("LLAMAAdvancedGradientBoostedMemorySimulatedAnnealingWithAdaptiveExploration") +registry.unregister("LLAMAAdaptiveMultiOperatorDifferentialEvolution") +registry.unregister("LLAMAEnhancedDynamicLevyHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV21") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchFinal") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV9") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV8") +registry.unregister("LLAMAEnhancedAdaptiveMemeticOptimizerV7") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV19") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV7") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV11") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV23") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV17") +registry.unregister("LLAMAImprovedEnhancedAdaptiveLevyHarmonySearchV4") +registry.unregister("LLAMARefinedDualConvergenceEvolutiveStrategy") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV22") +registry.unregister("LLAMAEnhancedHarmonicSwarmOptimizationV3") +registry.unregister("LLAMAImprovedAdaptiveMemeticHybridOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmOptimizationWithDiversityPreservation") +registry.unregister("LLAMAAdaptiveEliteDiverseHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV3") +registry.unregister("LLAMAEnhancedHybridGradientAnnealingWithMemory") +registry.unregister("LLAMAGradientInformedAdaptiveSearch") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV4") +registry.unregister("LLAMAAdvancedDynamicHybridOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV31") +registry.unregister("LLAMAFinalOptimizedEnhancedDynamicFireworkAlgorithm") +registry.unregister("LLAMAHybridGradientMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedStochasticMetaHeuristicOptimizer") +registry.unregister("LLAMAQuantumEnhancedDifferentialEvolution") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV3") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV2") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmImproved") +registry.unregister("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthABC") +registry.unregister("LLAMAQuantumInspiredAdaptiveHybridOptimizer") +registry.unregister("LLAMAEnhancedHarmonyTabuOptimization") +registry.unregister("LLAMAImprovedOppositionBasedDifferentialEvolution") +registry.unregister("LLAMAImprovedEnhancedFireworkAlgorithmWithAdaptiveLocalSearch") +registry.unregister("LLAMAAdaptiveHarmonicSwarmOptimization") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV14") +registry.unregister("LLAMAAdaptiveMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedDynamicHybridHarmonySearchWithAdaptiveMutationV21") +registry.unregister("LLAMAEnhancedHarmonySearchOB") +registry.unregister("LLAMARefinedGradientBoostedAnnealingWithAdaptiveMemoryAndExploration") +registry.unregister("LLAMAQuantumAdaptiveRefinementStrategyV2") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV30") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV18") +registry.unregister("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV17") +registry.unregister("LLAMAEnhancedDifferentiatedAdaptiveEvolution") +registry.unregister("LLAMAEnhancedOppositionBasedHarmonySearch") +registry.unregister("LLAMAEnhancedQuantumHarmonySearch") +registry.unregister("LLAMAAdvancedHybridSimulatedAnnealingWithGuidedExploration") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV6") +registry.unregister("LLAMAEnhancedDynamicRefinementGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithBetterAdaptiveLocalSearchOptimization") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV6") +registry.unregister("LLAMAAdaptiveMemeticHybridOptimizer") +registry.unregister("LLAMAAdaptiveEliteHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlightInspiration") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV13") +registry.unregister("LLAMAAdaptiveEvolutionaryDifferentialPopulationStrategy") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV3") +registry.unregister("LLAMAAdvancedRefinedGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV3") +registry.unregister("LLAMAAdvancedRefinedGradientBoostedAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV4") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSizeRefined") +registry.unregister("LLAMAEnhancedConvergentDifferentialEvolution") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligenceV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV20") +registry.unregister("LLAMAImprovedSelfAdaptiveHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV19") +registry.unregister("LLAMAEnhancedHarmonyTabuSearchV4") +registry.unregister("LLAMAEnhancedDynamicAdaptiveMemoryAnnealing") +registry.unregister("LLAMAAdaptiveHarmonyTabuOptimization") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV10") +registry.unregister("LLAMAEnhancedOrthogonalDifferentialEvolutionV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV25") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV11") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV27") +registry.unregister("LLAMAAdaptiveEnhancedFireworkAlgorithmWithLocalSearch") +registry.unregister("LLAMAEnhancedDynamicRefinedGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMADynamicGradientEnhancedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution_v2") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV2") +registry.unregister("LLAMAAdaptiveDiversifiedHarmonySearch") +registry.unregister("LLAMAQuantumSimulatedAnnealingHybridOptimizer") +registry.unregister("LLAMAMetaHarmonicSearch") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithAdaptiveMutation") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV3") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchOptimized") +registry.unregister("LLAMAEnhancedAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV29") +registry.unregister("LLAMAEnhancedAdaptiveHarmonyTabuSearchV2") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV18") +registry.unregister("LLAMAEnhancedEvolutionaryParticleSwarmOptimizer") +registry.unregister("LLAMAEnhancedDynamicHarmonySearchV5") +registry.unregister("LLAMAQuantumLevyAdaptiveMemeticOptimizerV3") +registry.unregister("LLAMAAdaptiveHybridAnnealingWithGradientBoost") +registry.unregister("LLAMAAdaptiveHarmonySearchWithLevyFlightImprovement") +registry.unregister("LLAMAFinalEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV2") +registry.unregister("LLAMAEnhancedEvolutionaryGradientSearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulationV2") +registry.unregister("LLAMAAdaptiveOppositionBasedDifferentialEvolutionImproved") +registry.unregister("LLAMAAdaptiveMemoryGradientAnnealingPlus") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV12") +registry.unregister("LLAMAEnhancedHybridHarmonySearchWithAdaptiveMutationV20") +registry.unregister("LLAMAGradientInformedAdaptiveDirectionSearch") +registry.unregister("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizerV2") +registry.unregister("LLAMAAdvancedRefinedGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchImprovedRefined") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV3") +registry.unregister("LLAMAImprovedEnhancedStochasticMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV22") +registry.unregister("LLAMAQuantumGeneticDifferentialEvolution") +registry.unregister("LLAMAImprovedQuantumHarmonySearch") +registry.unregister("LLAMAEnhancedExplorationGravitationalSwarmOptimization") +registry.unregister("LLAMAQuantumAnnealingDifferentialEvolution") +registry.unregister("LLAMADynamicallyAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV23") +registry.unregister("LLAMAAdvancedQuantumMemeticDifferentialEvolution") +registry.unregister("LLAMAMetaHarmonicSearch2") +registry.unregister("LLAMAOptimizedGradientBoostedMemoryAnnealingWithAdaptiveSearch") +registry.unregister("LLAMAAdaptiveHybridFireworkAlgorithm") +registry.unregister("LLAMAAdvancedDynamicFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV6") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV17") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV3") +registry.unregister("LLAMAEnhancedAdaptiveGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV23") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV28") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV30") +registry.unregister("LLAMARefinedEliteDynamicHybridOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV25") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV14") +registry.unregister("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionV2") +registry.unregister("LLAMARefinedAdaptiveEnhancedFireworkAlgorithmWithLocalSearch") +registry.unregister("LLAMAImprovedAdaptiveHarmonySearchWithCuckooInspiration") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV18") +registry.unregister("LLAMAEnhancedHarmonicSwarmOptimizationV4") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV17") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithHybridLocalSearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonyTabuSearchV4") +registry.unregister("LLAMAEnhancedAdaptiveMultiPhaseAnnealing") +registry.unregister("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV9") +registry.unregister("LLAMAOptimizedDynamicGradientBoostedSimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV5") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV22") +registry.unregister("LLAMAAdaptiveCMADiffEvoPSO") +registry.unregister("LLAMAEnhancedAdaptiveDolphinPodOptimization") +registry.unregister("LLAMAHybridGradientBoostedMemoryAnnealingPlus") +registry.unregister("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV3") +registry.unregister("LLAMAEnhancedAdaptiveControlledMemoryAnnealing") +registry.unregister("LLAMADolphinPodOptimization") +registry.unregister("LLAMAEnhancedEnhancedDynamicAdaptiveGravitationalSwarmIntelligence") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV10") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchRefined") +registry.unregister("LLAMANovelDynamicFireworkAlgorithm") +registry.unregister("LLAMAEnhancedOppositionBasedDifferentialEvolution") +registry.unregister("LLAMAEnhancedParticleSwarmOptimizerV6") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV15") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligenceV26") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV7") +registry.unregister("LLAMAEnhancedGravitationSwarmOptimizationV2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV16") +registry.unregister("LLAMAAdaptiveDifferentialCrossover") +registry.unregister("LLAMAAdaptiveDolphinPodOptimization") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV8") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV10") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionWithSelfAdaptiveParametersAndCrossover") +registry.unregister("LLAMAHybridGradientMemoryAnnealingV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV22") +registry.unregister("LLAMADynamicEliteAdaptiveHybridOptimizerV2") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV18") +registry.unregister("LLAMAEnhancedHarmonyTabuOptimizationV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV4") +registry.unregister("LLAMADynamicAdaptiveGravitationalSwarmIntelligenceV2") +registry.unregister("LLAMAHybridAdaptiveMultiPhaseEvolution") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchImprovedWithLocalOptimization") +registry.unregister("LLAMAAdaptiveDynamicFireworkAlgorithm") +registry.unregister("LLAMAAdaptiveLevyHarmonySearch") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV4") +registry.unregister("LLAMAEnhancedHarmonicLevyDolphinOptimization") +registry.unregister("LLAMAEnhancedQuantumHarmonySearchABGBRefined") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV12") +registry.unregister("LLAMAImprovedEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV11") +registry.unregister("LLAMAImprovedEnhancedQuantumHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveMultiPhaseAnnealingWithGradient") +registry.unregister("LLAMAAdaptiveDynamicHarmonySearch") +registry.unregister("LLAMAAdaptiveEliteMemeticOptimizerV6") +registry.unregister("LLAMAAdaptiveDirectionalSearch") +registry.unregister("LLAMAEnhancedUltimateDynamicFireworkAlgorithm") +registry.unregister("LLAMANovelHarmonyTabuSearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV5") +registry.unregister("LLAMAEnhancedDynamicLevyHarmonySearchV2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV11") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearch") +registry.unregister("LLAMARefinedEnhancedAdaptiveHarmonySearch") +registry.unregister("LLAMAImprovedAdaptiveLevyHarmonySearch") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV6") +registry.unregister("LLAMAQuantumInspiredAdaptiveDEHybridLocalSearch") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV15") +registry.unregister("LLAMAQuantumLevyEliteMemeticDEHybridOptimizer") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizer") +registry.unregister("LLAMADynamicAdaptiveGravitationalSwarmIntelligence") +registry.unregister("LLAMAEvolutionaryParticleSwarmOptimizer") +registry.unregister("LLAMARefinedAdaptiveSimulatedAnnealingWithSmartMemory") +registry.unregister("LLAMADynamicLevyHarmonySearch") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV16") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV6") +registry.unregister("LLAMAEnhancedParticleSwarmOptimizerV4") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV9") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBImproved") +registry.unregister("LLAMAGradientEnhancedAdaptiveAnnealing") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV6") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV4") +registry.unregister("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidth") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV13") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithHybridSearch") +registry.unregister("LLAMADifferentialHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV8") +registry.unregister("LLAMAImprovedEnhancedDynamicDifferentialEvolution") +registry.unregister("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimizationV2") +registry.unregister("LLAMAEnhancedHarmonyTabuSearch") +registry.unregister("LLAMAEnhancedUltimateDynamicFireworkAlgorithmImproved") +registry.unregister("LLAMADynamicAdaptiveEliteHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV5") +registry.unregister("LLAMAImprovedSelfAdaptiveOppositionBasedDifferentialEvolution") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV23") +registry.unregister("LLAMAAdaptiveHarmonySearchWithLocalOptimizationImproved") +registry.unregister("LLAMAEnhancedQuantumHarmonySearchAB") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV29") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearchRefined") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV5") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionV2") +registry.unregister("LLAMAEnhancedHybridAdaptiveHarmonicFireworksTabuSearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicOptimizationV2") +registry.unregister("LLAMAAdaptiveHybridAnnealingWithMemoryRefinement") +registry.unregister("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV6") +registry.unregister("LLAMAOptimizedQuantumHarmonySearch") +registry.unregister("LLAMARefinementEnhancedHybridOptimizer") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV15") +registry.unregister("LLAMARefinedAdaptiveQuantumDifferentialEvolution") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV3") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV10") +registry.unregister("LLAMAEnhancedDynamicAdaptiveDifferentialEvolution") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLocalOptimizationAndDiversificationV2") +registry.unregister("LLAMAAdaptiveSimulatedAnnealingWithSmartMemory") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV33") +registry.unregister("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV5") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithRefinedLevyFlight") +registry.unregister("LLAMAEnhancedAdaptiveOppositionBasedDifferentialEvolution") +registry.unregister("LLAMARefinedOptimizedDynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV21") +registry.unregister("LLAMAEnhancedRobustDifferentialEvolutionWithMemoryAndEliteSearch") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV28") +registry.unregister("LLAMAQuantumAdaptiveFireworksOptimizer") +registry.unregister("LLAMAAdaptiveHarmonicTabuSearchV17") +registry.unregister("LLAMAAdaptiveGradientBoostedMemoryAnnealingWithExplorationControl") +registry.unregister("LLAMAAdvancedDynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMADynamicGradientBoostedMemorySimulatedAnnealingV2") +registry.unregister("LLAMAEnhancedEnhancedMetaHeuristicOptimizerV3") +registry.unregister("LLAMAEnhancedHyperParameterTunedMetaHeuristicOptimizerV4") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV27") +registry.unregister("LLAMARefinedGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV10") +registry.unregister("LLAMAAdaptiveMemoryGradientAnnealing") +registry.unregister("LLAMAQuantumHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamicImproved") +registry.unregister("LLAMAEnhancedGradientBoostedAnnealingWithAdaptiveMemory") +registry.unregister("LLAMAEnhancedConvergentDifferentialEvolutionV3") +registry.unregister("LLAMAAdaptiveSwarmHarmonicOptimizationV4") +registry.unregister("LLAMAEnhancedAdaptiveDynamicHarmonySearchV3") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV5") +registry.unregister("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithEliteLearning") +registry.unregister("LLAMAEnhancedHarmonyTabuSearchV6") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithAdaptiveLocalSearch") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutationImproved") +registry.unregister("LLAMARefinedGradientBoostedMemorySimulatedAnnealingPlus") +registry.unregister("LLAMAEnhancedDynamicEliteAnnealingDE") +registry.unregister("LLAMAEnhancedDynamicAdaptiveGravitationalSwarmIntelligence") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionV3") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithBeeColonyOptimization") +registry.unregister("LLAMAEnhancedGravitationSwarmOptimization") +registry.unregister("LLAMAFinalEnhancedDynamicLocalSearchFireworkAlgorithm") +registry.unregister("LLAMAOptimizedAdaptiveSimulatedAnnealingWithSmartMemory") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV14") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverRefined") +registry.unregister("LLAMAEnhancedHybridAdaptiveMemoryAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmFinal") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV20") +registry.unregister("LLAMAEnhancedEvolutionaryParticleSwarmOptimizerV2") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV13") +registry.unregister("LLAMAAdaptiveGradientBoostedMemoryExploration") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithHybridInspirationV16") +registry.unregister("LLAMAEnhancedEnhancedAdaptiveHarmonicTabuSearchV24") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV6") +registry.unregister("LLAMAEnhancedOrthogonalDifferentialEvolutionImproved") +registry.unregister("LLAMAEnhancedQuantumHarmonySearchABGB") +registry.unregister("LLAMAAdaptiveQuantumMemeticEvolutionaryOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV2") +registry.unregister("LLAMAGravitationalSwarmIntelligence") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV14") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV11") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV7") +registry.unregister("LLAMAAdaptiveHarmonicTabuSearchV12") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmRedesigned") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV5") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV4") +registry.unregister("LLAMAAdvancedAdaptiveMemorySimulatedAnnealing") +registry.unregister("LLAMAAdaptiveGravitationalSwarmOptimizationWithDynamicDiversityPreservation") +registry.unregister("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmRefined") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV5") +registry.unregister("LLAMAAdvancedHybridSimulatedAnnealingWithAdaptiveMemory") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinal") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovement") +registry.unregister("LLAMAEnhancedEnhancedAdaptiveOppositionBasedDifferentialEvolution") +registry.unregister("LLAMAEnhancedAdaptiveDynamicHarmonySearchV2") +registry.unregister("LLAMAImprovedEnhancedDiversifiedGravitationalSwarmOptimization") +registry.unregister("LLAMAImprovedEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV6") +registry.unregister("LLAMARefinedGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV8") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV8") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV24") +registry.unregister("LLAMAEnhancedDynamicAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV4") +registry.unregister("LLAMAOctopusSwarmAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicPopulation") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV20") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionDynamic") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchImproved") +registry.unregister("LLAMAAdvancedEnhancedAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV8") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV14") +registry.unregister("LLAMAAdvancedOppositionBasedHarmonySearchDynamicBandwidthSADE") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmRefined") +registry.unregister("LLAMAQuantumLevyEnhancedMemeticOptimizerV2") +registry.unregister("LLAMADynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedParticleSwarmOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV9") +registry.unregister("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV3") +registry.unregister("LLAMAAdaptiveControlledMemoryAnnealing") +registry.unregister("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV7") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV24") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV18") +registry.unregister("LLAMAQuantumGradientGuidedFireworksAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlight") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV8") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV8") +registry.unregister("LLAMAEnhancedMetaHeuristicOptimizerV2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV27") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV28") +registry.unregister("LLAMAQuantumInspiredAdaptiveDEElitistLocalSearch") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV12") +registry.unregister("LLAMAEnhancedAdaptiveMemoryGradientAnnealingWithExplorationBoost") +registry.unregister("LLAMAHybridGradientMemoryAnnealingV2") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV22") +registry.unregister("LLAMAHybridAdaptiveMemeticOptimizerV4") +registry.unregister("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightV2") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV4") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV18") +registry.unregister("LLAMAEnhancedMultiStageGradientBoostedAnnealing") +registry.unregister("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV2") +registry.unregister("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedDynamicHarmonyTabuSearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV12") +registry.unregister("LLAMAEnhancedDualPhaseAdaptiveMemeticDifferentialEvolution") +registry.unregister("LLAMARefinedQuantumLevyMemeticDifferentialEvolution") +registry.unregister("LLAMAEnhancedStochasticDifferentialEvolutionWithAdaptiveParametersAndCrossover") +registry.unregister("LLAMAUltimateDynamicFireworkAlgorithm") +registry.unregister("LLAMAAdaptiveGradientEnhancedMultiPhaseAnnealing") +registry.unregister("LLAMAEnhancedDifferentialEvolutionParticleSwarmOptimizer") +registry.unregister("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithm") +registry.unregister("LLAMAAdaptiveDifferentialEvolutionHarmonySearch") +registry.unregister("LLAMAAdaptiveHarmonicSwarmOptimizationV2") +registry.unregister("LLAMAHybridEnhancedGravitationalSwarmIntelligence") +registry.unregister("LLAMAAdaptiveRefinedGradientBoostedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV2") +registry.unregister("LLAMAQuantumEvolutionaryOptimization") +registry.unregister("LLAMAAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV7") +registry.unregister("LLAMAEnhancedHarmonicSwarmOptimization") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedGravitationalSwarmOptimization") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV12") +registry.unregister("LLAMAEnhancedDynamicallyAdaptiveFireworkAlgorithmImproved") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV15") +registry.unregister("LLAMAEnhancedHarmonyTabuSearchV7") +registry.unregister("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionHyperMutation") +registry.unregister("LLAMAHybridAdaptiveHarmonicFireworksTabuSearch") +registry.unregister("LLAMAAdaptiveDifferentialEvolutionWithDynamicPopulationV2") +registry.unregister("LLAMARefinedAdaptiveMemeticDifferentialEvolution") +registry.unregister("LLAMAEnhancedOrthogonalDifferentialEvolution") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV6") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV4") +registry.unregister("LLAMAEnhancedAdaptiveLevyHarmonySearchV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonyTabuSearchV3") +registry.unregister("LLAMAQuantumLevyAdaptiveDEHybridLocalSearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV4") +registry.unregister("LLAMADynamicAdaptiveFireworkAlgorithm") +registry.unregister("LLAMAImprovedEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV21") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalII") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligenceV22") +registry.unregister("LLAMACoordinatedAdaptiveHybridOptimizer") +registry.unregister("LLAMAImprovedEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV19") +registry.unregister("LLAMAEnhancedExplorationGravitationalSwarmOptimizationV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV2") +registry.unregister("LLAMAMemoryBasedSimulatedAnnealing") +registry.unregister("LLAMAAdvancedHybridMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveMultiMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmV2") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV22") +registry.unregister("LLAMAAdaptiveOrthogonalDifferentialEvolution") +registry.unregister("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithm") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV16") +registry.unregister("LLAMAAdvancedImprovedMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV9") +registry.unregister("LLAMAProgressivePopulationRefinementStrategy") +registry.unregister("LLAMAEnhancedOppositionBasedHarmonySearchDynamicBandwidthSADE") +registry.unregister("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmEnhanced") +registry.unregister("LLAMAEnhancedHarmonySearchWithAdaptiveLevyFlightInspiration") +registry.unregister("LLAMAAdaptiveHarmonySearchWithImprovedLevyFlightInspiration") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV2") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimization") +registry.unregister("LLAMAEnhancedAdaptiveDynamicHarmonySearch") +registry.unregister("LLAMAEnhancedQuantumFireworksAlgorithm") +registry.unregister("LLAMADynamicEliteAnnealingDE") +registry.unregister("LLAMAEnhancedDynamicMultiPhaseAnnealingPlus") +registry.unregister("LLAMAImprovedAdaptiveHybridOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveOppositionBasedHarmonySearchDynamicBandwidthSADE") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV10") +registry.unregister("LLAMAAdaptiveRefinementEvolutiveStrategy") +registry.unregister("LLAMAQuantumInspiredAdaptiveDifferentialEvolutionWithLocalSearch") +registry.unregister("LLAMAAdaptiveDEWithOrthogonalCrossover") +registry.unregister("LLAMAEnhancedAdvancedHyperParameterTunedMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV6") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV7") +registry.unregister("LLAMAAdaptiveQuantumAnnealingDE") +registry.unregister("LLAMAEnhancedAdaptiveLevyHarmonySearchV2") +registry.unregister("LLAMAAdaptiveFireworkAlgorithmEnhanced") +registry.unregister("LLAMAImprovedEnhancedDynamicLevyHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveDynamicDifferentialEvolution") +registry.unregister("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV7") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV4") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionEnhanced") +registry.unregister("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizerV16") +registry.unregister("LLAMAEnhancedOrthogonalDE") +registry.unregister("LLAMAAdaptiveDifferentialHarmonySearch") +registry.unregister("LLAMAAdaptiveEnhancedQuantumHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveTabuHarmonySearchV2") +registry.unregister("LLAMAEnhancedAdaptiveHybridGradientAnnealingWithDynamicMemory") +registry.unregister("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV8") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV9") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligence") +registry.unregister("LLAMAAdaptiveQuantumAnnealingDEv2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV9") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV8") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationV6") +registry.unregister("LLAMAEnhancedDynamicAdaptiveDifferentialEvolutionRefined") +registry.unregister("LLAMAAdaptiveHarmonySearchWithCuckooInspiration") +registry.unregister("LLAMAAdaptiveMemoryGradientAnnealingWithExplorationBoost") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicFireworksTabuSearchV2") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV19") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV11") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV6") +registry.unregister("LLAMAEnhancedAdaptiveMemeticHybridOptimizer") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV11") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithSelfAdaptiveParameters") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV10") +registry.unregister("LLAMAUltraRefinedAdaptiveConvergenceStrategy") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedHarmonySearchV2") +registry.unregister("LLAMADynamicMultiPhaseAnnealingPlus") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossover") +registry.unregister("LLAMAEliteDynamicHybridOptimizer") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithImprovedMutation") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithLocalSearchOptimization") +registry.unregister("LLAMAEnhancedAdaptiveTabuHarmonySearch") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalOptimized") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightV12") +registry.unregister("LLAMADynamicLocalSearchFireworkAlgorithm") +registry.unregister("LLAMAImprovedEnhancedHarmonySearchOB") +registry.unregister("LLAMARefinedDynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMARefinedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch") +registry.unregister("LLAMANovelAdaptiveHarmonicFireworksTabuSearch") +registry.unregister("LLAMAOptimizedDynamicGradientBoostedMemorySimulatedAnnealingPlus") +registry.unregister("LLAMAQuantumFireworksAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV7") +registry.unregister("LLAMAAdvancedRefinedDynamicGradientBoostedMemorySimulatedAnnealingPlus") +registry.unregister("LLAMAAdaptiveMultiPhaseAnnealing") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV7") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV19") +registry.unregister("LLAMAFurtherEnhancedHybridMetaHeuristicOptimizerV13") +registry.unregister("LLAMAUltimateDynamicFireworkAlgorithmImproved") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV11") +registry.unregister("LLAMAConvergentAdaptiveEvolutiveStrategy") +registry.unregister("LLAMAHybridMultiDimensionalAnnealing") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithLocalSearchFinalRefined") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV7") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimization") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLevyFlightInspirationV2") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV19") +registry.unregister("LLAMAEnhancedAdaptiveHarmonyTabuSearchV5") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV12") +registry.unregister("LLAMAAdaptiveHarmonicTabuSearchV20") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV10") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV6") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV14") +registry.unregister("LLAMAAdvancedEnhancedHybridMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV13") +registry.unregister("LLAMAEnhancedDifferentialEvolutionWithAdaptiveMutationControl") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV10") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV21") +registry.unregister("LLAMAEnhancedHarmonyTabuSearchV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV2") +registry.unregister("LLAMAGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV11") +registry.unregister("LLAMAAdaptiveHarmonicTabuSearchV8") +registry.unregister("LLAMAEnhancedDiversifiedGravitationalSwarmOptimizationV7") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV3") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedHybridInspirationV18") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV3") +registry.unregister("LLAMAAdaptiveMemoryGradientSimulatedAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV11") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicParametersV4") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV6") +registry.unregister("LLAMAEnhancedConvergentDifferentialEvolutionV4") +registry.unregister("LLAMAEnhancedAdaptiveMemoryHybridAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveEvolutionaryDifferentialPopulationStrategy") +registry.unregister("LLAMAHyperAdaptiveConvergenceStrategy") +registry.unregister("LLAMAQuantumStochasticGradientDescentFireworks") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV9") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV23") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGB") +registry.unregister("LLAMAHierarchicalAdaptiveAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV5") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV26") +registry.unregister("LLAMAEnhancedAdaptiveDiversifiedHarmonySearch") +registry.unregister("LLAMAAdaptiveGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAImprovedAdaptiveEnhancedQuantumHarmonySearch") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV21") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV5") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV30") +registry.unregister("LLAMAQuantumLevyEliteMemeticOptimizer") +registry.unregister("LLAMAEnhancedHarmonicSwarmOptimizationV2") +registry.unregister("LLAMAEnhancedFireworkAlgorithmWithDynamicMutation") +registry.unregister("LLAMAQuantumInspiredAdaptiveMemeticOptimizer") +registry.unregister("LLAMAEnhancedGravitationalSwarmOptimizationWithDynamicDiversityPreservationV3") +registry.unregister("LLAMAAdaptiveDynamicFireworkAlgorithmRedesigned") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearchAndDynamicMutationV13") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV17") +registry.unregister("LLAMAEnhancedHybridAdaptiveMultiPhaseEvolution") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV10") +registry.unregister("LLAMAAdaptiveHybridHarmonySearch") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV9") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV17") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithm") +registry.unregister("LLAMAEnhancedEnhancedAdaptiveHarmonySearchWithLocalOptimizationAndDiversificationV8") +registry.unregister("LLAMAEnhancedAdaptiveHarmonicTabuSearchV26") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedV2") +registry.unregister("LLAMAEnhancedAdaptiveDynamicFireworkAlgorithmImproved") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchOptimizedWithLocalOptimization") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchImproved") +registry.unregister("LLAMAQuantumDynamicallyAdaptiveFireworksAlgorithm") +registry.unregister("LLAMAImprovedEnhancedAdaptiveDynamicHarmonySearchV4") +registry.unregister("LLAMADualPhaseAdaptiveMemeticDifferentialEvolutionV2") +registry.unregister("LLAMASelfAdaptiveHybridOptimizer") +registry.unregister("LLAMAAdaptiveEliteMemeticOptimizerV5") +registry.unregister("LLAMASimulatedAnnealingOptimizer") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV1") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV2") +registry.unregister("LLAMAAdaptiveHarmonySearchWithLocalOptimizationV2") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithDynamicLevyFlightImprovementV3") +registry.unregister("LLAMAEnhancedOrthogonalDifferentialEvolutionV2") +registry.unregister("LLAMAEnhancedAdaptiveQuantumHarmonySearchDBGBFinalIII") +registry.unregister("LLAMAEnhancedDynamicDifferentialEvolutionWithAdaptiveCrossoverAndMutation") +registry.unregister("LLAMAImprovedPrecisionAdaptiveEvolutiveStrategy") +registry.unregister("LLAMAFinalEnhancedFireworkAlgorithmWithAdaptiveLocalSearch") +registry.unregister("LLAMAEnhancedDynamicLevyHarmonySearchV3") +registry.unregister("LLAMAAdaptiveEnhancedDynamicFireworkAlgorithmWithHybridSearch") +registry.unregister("LLAMAOptimizedGradientMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedDynamicFireworkAlgorithmWithAdaptiveLocalSearch") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV32") +registry.unregister("LLAMARefinedAdaptivePopulationEnhancedRobustDifferentialEvolutionWithEliteSearch") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligenceV15") +registry.unregister("LLAMAAdaptiveHarmonySearchWithDiversificationAndLocalOptimizationV2") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligenceV4") +registry.unregister("LLAMAEnhancedHarmonyTabuOptimizationV2") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV25") +registry.unregister("LLAMAAdaptiveEnhancedGravitationalSwarmIntelligence") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicStepSize") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV13") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV4") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionWithDynamicCrossoverAndMutation") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV7") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV9") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV5") +registry.unregister("LLAMAMultiStageHybridGradientBoostedAnnealing") +registry.unregister("LLAMAAdaptiveGravitationalSwarmIntelligenceV2") +registry.unregister("LLAMAEnhancedEnhancedHybridMetaHeuristicOptimizerV12") +registry.unregister("LLAMAEnhancedDynamicHarmonySearchV8") +registry.unregister("LLAMAAdvancedAdaptiveGradientBoostedMemoryExploration") +registry.unregister("LLAMAAdaptiveMemoryHybridAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionV20") +registry.unregister("LLAMAAdaptiveHybridGradientAnnealingWithVariableMemory") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV5") +registry.unregister("LLAMAEnhancedFireworkAlgorithm") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithSimulatedAnnealingV2") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionRefined") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithEnhancedLevyFlightInspirationV6") +registry.unregister("LLAMAEnhancedRefinedDynamicGradientBoostedMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedEnhancedEvolutionaryDifferentialSwarmOptimizerV10") +registry.unregister("LLAMAEnhancedAdaptiveDifferentialEvolutionRefinedImproved") +registry.unregister("LLAMAEnhancedAdaptiveGravitationalSwarmIntelligenceV20") +registry.unregister("LLAMAAdvancedDynamicMultimodalSimulatedAnnealing") +registry.unregister("LLAMAEnhancedRefinedAdaptiveHarmonySearch") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlight") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV24") +registry.unregister("LLAMAEnhancedEvolutionaryDifferentialSwarmOptimizerV24") +registry.unregister("LLAMAEnhancedHybridMetaHeuristicOptimizerV4") +registry.unregister("LLAMADynamicRefinementGradientBoostedMemoryAnnealing") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchV11") +registry.unregister("LLAMAImprovedHybridAdaptiveHarmonicFireworksTabuSearch") +registry.unregister("LLAMAEnhancedDualPhaseDifferentialEvolution") +registry.unregister("LLAMAEnhancedParticleSwarmOptimizerV5") +registry.unregister("LLAMARefinedCMADiffEvoPSO") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV27") +registry.unregister("LLAMAAdaptiveGradientBoostedMemoryAnnealingPlus") +registry.unregister("LLAMAEnhancedAdaptiveHarmonySearchWithImprovedLevyFlightInspirationV9") +registry.unregister("LLAMARefinedDynamicEliteAdaptiveHybridOptimizer") +registry.unregister("LLAMAEnhancedQuantumLevyMemeticOptimizer") +registry.unregister("LLAMAEnhancedAdaptiveHybridHarmonySearchV25") +registry.unregister("LLAMAEnhancedAdaptiveOrthogonalDifferentialEvolution") +registry.unregister("LLAMAEnhancedImprovedHyperParameterTunedMetaHeuristicOptimizer") +registry.unregister("LLAMAEnhancedAdvancedHybridMetaHeuristicOptimizerV18") +registry.unregister("LLAMAEnhancedAdaptivePopulationRobustDifferentialEvolutionWithEliteSearch") +registry.unregister("LLAMAAdaptiveMultiPhaseAnnealingV2") +registry.unregister("LLAMAEnhancedDynamicGradientBoostedMemorySimulatedAnnealingPlus") +registry.unregister("LLAMAHybridAdaptiveQuantumMemeticDifferentialEvolution") +registry.unregister("LLAMAEnhancedDynamicHarmonySearchV7") +registry.unregister("LLAMAEnhancedHarmonicTabuSearchV13") +registry.unregister("LLAMAEnhancedDynamicLocalSearchFireworkAlgorithmV2") +registry.unregister("LLAMARestartAdaptiveDifferentialEvolutionPSO") +registry.unregister("LLAMADualPhaseAdaptiveMemeticDifferentialEvolution") +registry.unregister("LLAMAAdaptiveMultiMemorySimulatedAnnealing") +registry.unregister("LLAMAEnhancedGravitationalSwarmIntelligenceV16") +registry.unregister("LLAMAAdaptiveDifferentialEvolutionPSO") +registry.unregister("LLAMAEnhancedDynamicHarmonySearchV6") +registry.unregister("LLAMAOptimizedEnhancedDynamicFireworkAlgorithm") diff --git a/nevergrad/optimization/recastlib.py b/nevergrad/optimization/recastlib.py index 5801c7b09..a8782e6c2 100644 --- a/nevergrad/optimization/recastlib.py +++ b/nevergrad/optimization/recastlib.py @@ -122,7 +122,9 @@ def ax_obj(p): def five_objective_function(x): return objective_function(10.0 * x - 5.0) - val, best_x = lama_register[method_name](budget)(five_objective_function) + lama = lama_register[method_name](budget) + lama.dim = weakself.dimension + val, best_x = lama(five_objective_function) best_x = 10.0 * best_x - 5.0 if weakself._normalizer is not None: best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32)) diff --git a/scripts/plot_dagstuhloid.sh b/scripts/plot_dagstuhloid.sh index 45f5b393e..fb4f79295 100755 --- a/scripts/plot_dagstuhloid.sh +++ b/scripts/plot_dagstuhloid.sh @@ -18,7 +18,10 @@ if compgen -G "*.csv" > /dev/null; then # First we run all nevergrad plotting. for i in `ls *.csv ` do - (python -m nevergrad.benchmark.plotting --nomanyxp=1 $i ; python -m nevergrad.benchmark.plotting --max_combsize=2 --competencemaps=1 --nomanyxp=1 $i ) & + #python -m nevergrad.benchmark.plotting --max_combsize=1 $i + (python -m nevergrad.benchmark.plotting --nomanyxp=1 $i ; + python -m nevergrad.benchmark.plotting --max_combsize=1 --competencemaps=0 --nomanyxp=1 $i ; + python -m nevergrad.benchmark.plotting --max_combsize=0 --competencemaps=0 --nomanyxp=0 $i ) & done wait @@ -41,7 +44,8 @@ wait fi # End of "there is something to do". # tar -zcvf ~/dag.tgz *_plots -scripts/latexize.sh +#scripts/latexize.sh tar -zcvf dagstuhloid.tgz dagstuhloid.pdf *.csv *plots/xpresults_all.png rnk_*.txt *plots/fight_all.png.cp.txt +tar -zcvf ~/lamamd.tgz *plots/xp*dimension5,*.png *plots/xpresults*.png *plots/figh*dimensions5.0.*pure.png *plots/fight_all*pure.png From 50379bc98033dae22aa87db53478c971127c66bd Mon Sep 17 00:00:00 2001 From: Olivier Teytaud Date: Mon, 1 Jul 2024 12:01:26 +0200 Subject: [PATCH 6/6] pouet --- nevergrad/benchmark/experiments.py | 8 +++++--- nevergrad/benchmark/plotting.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nevergrad/benchmark/experiments.py b/nevergrad/benchmark/experiments.py index 9673180d9..8ced88b1b 100644 --- a/nevergrad/benchmark/experiments.py +++ b/nevergrad/benchmark/experiments.py @@ -61,9 +61,9 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore # "SmallLognormalDiscreteOnePlusOne", # "XLognormalDiscreteOnePlusOne", # ])] - lama = ["NgIohTuned"] + [o for o in list(ng.optimizers.registry.keys()) if "LAMA" in o] + lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + (["NgIohTuned"] * 5) + [o for o in list(ng.optimizers.registry.keys()) if "LAMA" in o] optims = [o for o in ng.optimizers.registry.keys() if "LAMA" in o] - lama = ["NgIohTuned"] * 10 + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])] + lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + (["NgIohTuned"] * 10) + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])] return list(np.random.choice(lama, 55)) # "BigLognormalDiscreteOnePlusOne", # "DiscreteLenglerOnePlusOne", @@ -744,6 +744,7 @@ def keras_tuning( optims = refactor_optims(optims) datasets = ["kerasBoston", "diabetes", "auto-mpg", "red-wine", "white-wine"] optims = refactor_optims(optims) + optims = ["NgIohTuned"] for dimension in [None]: for dataset in datasets: function = MLTuning( @@ -3861,7 +3862,8 @@ def lsgo() -> tp.Iterator[Experiment]: optims = ["TinyQODE", "OpoDE", "OpoTinyDE"] optims = refactor_optims(optims) optims = [np.random.choice(optims)] - for i in range(1, 16): # [np.random.choice(list(range(1, 16)))]: + optims = ["NgIohTuned"] + for i in [np.random.choice(list(range(1, 16)))]: # [np.random.choice(list(range(1, 16)))]: for optim in optims: for budget in [120000, 600000, 3000000]: yield Experiment(lsgo_makefunction(i).instrumented(), optim, budget=budget) diff --git a/nevergrad/benchmark/plotting.py b/nevergrad/benchmark/plotting.py index deaf6d77c..4403945da 100644 --- a/nevergrad/benchmark/plotting.py +++ b/nevergrad/benchmark/plotting.py @@ -167,7 +167,7 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector: try: df.loc[np.isnan(df.loss), "loss"] = float("inf") except Exception as e: - print("pb with isnan(loss): {e}") + print(f"pb with isnan(loss): {e}") # assert ( not output.loc[:, "loss"].isnull().values.any()